aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--sys/amd64/conf/GENERIC1
-rw-r--r--sys/amd64/conf/NOTES4
-rw-r--r--sys/arm64/conf/NOTES2
-rw-r--r--sys/conf/files.amd6446
-rw-r--r--sys/conf/files.arm6446
-rw-r--r--sys/contrib/dev/ice/LICENSE41
-rw-r--r--sys/contrib/dev/ice/README197
-rw-r--r--sys/contrib/dev/ice/ice-1.3.9.0.pkgbin0 -> 639236 bytes
-rw-r--r--sys/dev/ice/ice_adminq_cmd.h2968
-rw-r--r--sys/dev/ice/ice_alloc.h50
-rw-r--r--sys/dev/ice/ice_bitops.h407
-rw-r--r--sys/dev/ice/ice_common.c4895
-rw-r--r--sys/dev/ice/ice_common.h302
-rw-r--r--sys/dev/ice/ice_common_sysctls.h100
-rw-r--r--sys/dev/ice/ice_common_txrx.h424
-rw-r--r--sys/dev/ice/ice_controlq.c1227
-rw-r--r--sys/dev/ice/ice_controlq.h125
-rw-r--r--sys/dev/ice/ice_dcb.c1697
-rw-r--r--sys/dev/ice/ice_dcb.h271
-rw-r--r--sys/dev/ice/ice_devids.h78
-rw-r--r--sys/dev/ice/ice_drv_info.h195
-rw-r--r--sys/dev/ice/ice_features.h91
-rw-r--r--sys/dev/ice/ice_flex_pipe.c5630
-rw-r--r--sys/dev/ice/ice_flex_pipe.h131
-rw-r--r--sys/dev/ice/ice_flex_type.h734
-rw-r--r--sys/dev/ice/ice_flow.c2228
-rw-r--r--sys/dev/ice/ice_flow.h383
-rw-r--r--sys/dev/ice/ice_hw_autogen.h9480
-rw-r--r--sys/dev/ice/ice_iflib.h294
-rw-r--r--sys/dev/ice/ice_iflib_recovery_txrx.c188
-rw-r--r--sys/dev/ice/ice_iflib_sysctls.h45
-rw-r--r--sys/dev/ice/ice_iflib_txrx.c401
-rw-r--r--sys/dev/ice/ice_lan_tx_rx.h2355
-rw-r--r--sys/dev/ice/ice_lib.c8000
-rw-r--r--sys/dev/ice/ice_lib.h811
-rw-r--r--sys/dev/ice/ice_nvm.c1303
-rw-r--r--sys/dev/ice/ice_nvm.h143
-rw-r--r--sys/dev/ice/ice_opts.h48
-rw-r--r--sys/dev/ice/ice_osdep.c409
-rw-r--r--sys/dev/ice/ice_osdep.h521
-rw-r--r--sys/dev/ice/ice_protocol_type.h307
-rw-r--r--sys/dev/ice/ice_resmgr.c228
-rw-r--r--sys/dev/ice/ice_resmgr.h111
-rw-r--r--sys/dev/ice/ice_rss.h117
-rw-r--r--sys/dev/ice/ice_sbq_cmd.h121
-rw-r--r--sys/dev/ice/ice_sched.c5541
-rw-r--r--sys/dev/ice/ice_sched.h225
-rw-r--r--sys/dev/ice/ice_sriov.c193
-rw-r--r--sys/dev/ice/ice_sriov.h48
-rw-r--r--sys/dev/ice/ice_status.h75
-rw-r--r--sys/dev/ice/ice_strings.c1034
-rw-r--r--sys/dev/ice/ice_switch.c4126
-rw-r--r--sys/dev/ice/ice_switch.h468
-rw-r--r--sys/dev/ice/ice_type.h1072
-rw-r--r--sys/dev/ice/if_ice_iflib.c2874
-rw-r--r--sys/dev/ice/virtchnl.h923
-rw-r--r--sys/dev/ice/virtchnl_inline_ipsec.h449
-rw-r--r--sys/modules/Makefile9
-rw-r--r--sys/modules/ice/Makefile15
-rw-r--r--sys/modules/ice_ddp/Makefile24
-rw-r--r--tools/kerneldoc/subsys/Doxyfile-dev_ice21
62 files changed, 64254 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 58e09bbf72bd..904418bfaade 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -99,10 +99,12 @@ sys/compat/linuxkpi hselasky If in doubt, ask.
#x11 phabricator group.
(to avoid drm graphics drivers
impact)
+sys/contrib/dev/ice erj Pre-commit phabricator review requested.
sys/contrib/ipfilter cy Pre-commit review requested.
sys/dev/e1000 erj Pre-commit phabricator review requested.
sys/dev/ixgbe erj Pre-commit phabricator review requested.
sys/dev/ixl erj Pre-commit phabricator review requested.
+sys/dev/ice erj Pre-commit phabricator review requested.
sys/dev/sound/usb hselasky If in doubt, ask.
sys/dev/usb hselasky If in doubt, ask.
sys/dev/xen royger Pre-commit review recommended.
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index 64e35c379d8e..57fcadc3283c 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -245,6 +245,7 @@ device ix # Intel PRO/10GbE PCIE PF Ethernet
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
+device ice # Intel 800 Series Physical Function
device vmx # VMware VMXNET3 Ethernet
# PCI Ethernet NICs.
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index 5d5b14d81c43..b8fa9dc505f2 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -291,6 +291,8 @@ device cpufreq
# bxe: Broadcom NetXtreme II (BCM5771X/BCM578XX) PCIe 10Gb Ethernet
# adapters.
+# ice: Intel 800 Series Physical Function
+# Requires the ice_ddp module for full functionality
# ipw: Intel PRO/Wireless 2100 IEEE 802.11 adapter
# Requires the ipw firmware module
# iwi: Intel PRO/Wireless 2200BG/2225BG/2915ABG IEEE 802.11 adapters
@@ -316,6 +318,8 @@ device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs.
device iwn # Intel 4965/1000/5000/6000 wireless NICs.
device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
+device ice # Intel 800 Series Physical Function
+device ice_ddp # Intel 800 Series DDP Package
device mthca # Mellanox HCA InfiniBand
device mlx4 # Shared code module between IB and Ethernet
device mlx4ib # Mellanox ConnectX HCA InfiniBand
diff --git a/sys/arm64/conf/NOTES b/sys/arm64/conf/NOTES
index 75129b9e284a..45d94a4c67b2 100644
--- a/sys/arm64/conf/NOTES
+++ b/sys/arm64/conf/NOTES
@@ -83,6 +83,8 @@ device vnic # Cavium ThunderX NIC
device al_eth # Annapurna Alpine Ethernet NIC
device dwc_rk # Rockchip Designware
device dwc_socfpga # Altera SOCFPGA Ethernet MAC
+device ice # Intel 800 Series Physical Function
+device ice_ddp # Intel 800 Series DDP Package
# Etherswitch devices
device e6000sw # Marvell mv88e6085 based switches
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index e6d91e96b423..6a829ae6dfdd 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -144,6 +144,52 @@ dev/agp/agp_via.c optional agp
dev/amdgpio/amdgpio.c optional amdgpio
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
+dev/ice/if_ice_iflib.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_lib.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_osdep.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_resmgr.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_strings.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iflib_recovery_txrx.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iflib_txrx.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_common.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_controlq.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_dcb.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_flex_pipe.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_flow.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_nvm.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_sched.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_sriov.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_switch.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+ice_ddp.c optional ice_ddp \
+ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01030900 -mice_ddp -c${.TARGET}" \
+ no-implicit-rule before-depend local \
+ clean "ice_ddp.c"
+ice_ddp.fwo optional ice_ddp \
+ dependency "ice_ddp.fw" \
+ compile-with "${NORMAL_FWO}" \
+ no-implicit-rule \
+ clean "ice_ddp.fwo"
+ice_ddp.fw optional ice_ddp \
+ dependency "$S/contrib/dev/ice/ice-1.3.9.0.pkg" \
+ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.9.0.pkg ice_ddp.fw" \
+ no-obj no-implicit-rule \
+ clean "ice_ddp.fw"
dev/ioat/ioat.c optional ioat pci
dev/ioat/ioat_test.c optional ioat pci
dev/ixl/if_ixl.c optional ixl pci \
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index aa87dd4a5b23..deafaaa5486a 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -240,6 +240,52 @@ dev/axgbe/xgbe-dev.c optional axgbe
dev/axgbe/xgbe-drv.c optional axgbe
dev/axgbe/xgbe-mdio.c optional axgbe
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
+dev/ice/if_ice_iflib.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_lib.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_osdep.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_resmgr.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_strings.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iflib_recovery_txrx.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iflib_txrx.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_common.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_controlq.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_dcb.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_flex_pipe.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_flow.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_nvm.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_sched.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_sriov.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_switch.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+ice_ddp.c optional ice_ddp \
+ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01030900 -mice_ddp -c${.TARGET}" \
+ no-implicit-rule before-depend local \
+ clean "ice_ddp.c"
+ice_ddp.fwo optional ice_ddp \
+ dependency "ice_ddp.fw" \
+ compile-with "${NORMAL_FWO}" \
+ no-implicit-rule \
+ clean "ice_ddp.fwo"
+ice_ddp.fw optional ice_ddp \
+ dependency "$S/contrib/dev/ice/ice-1.3.9.0.pkg" \
+ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.9.0.pkg ice_ddp.fw" \
+ no-obj no-implicit-rule \
+ clean "ice_ddp.fw"
dev/iicbus/sy8106a.c optional sy8106a fdt
dev/iicbus/twsi/mv_twsi.c optional twsi fdt
dev/iicbus/twsi/a10_twsi.c optional twsi fdt
diff --git a/sys/contrib/dev/ice/LICENSE b/sys/contrib/dev/ice/LICENSE
new file mode 100644
index 000000000000..7daf627fc7a0
--- /dev/null
+++ b/sys/contrib/dev/ice/LICENSE
@@ -0,0 +1,41 @@
+Copyright (c) 2006-2018, Intel Corporation.
+All rights reserved.
+
+Redistribution. Redistribution and use in binary form, without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions must reproduce the above copyright notice and the
+ following disclaimer in the documentation and/or other materials
+ provided with the distribution.
+* Neither the name of Intel Corporation nor the names of its suppliers
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+* No reverse engineering, decompilation, or disassembly of this software
+ is permitted.
+
+Limited patent license. Intel Corporation grants a world-wide,
+royalty-free, non-exclusive license under patents it now or hereafter
+owns or controls to make, have made, use, import, offer to sell and
+sell ("Utilize") this software, but solely to the extent that any
+such patent is necessary to Utilize the software alone, or in
+combination with an operating system licensed under an approved Open
+Source license as listed by the Open Source Initiative at
+http://opensource.org/licenses. The patent license shall not apply to
+any other combinations which include this software. No hardware per
+se is licensed hereunder.
+
+DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+
diff --git a/sys/contrib/dev/ice/README b/sys/contrib/dev/ice/README
new file mode 100644
index 000000000000..f60055de8f35
--- /dev/null
+++ b/sys/contrib/dev/ice/README
@@ -0,0 +1,197 @@
+´╗┐Dynamic Device Personalization (DDP) Package
+============================================
+February 21, 2020
+
+
+Contents
+========
+- Overview
+- Safe Mode
+- Notes
+- Installation & Troubleshooting
+- Legal
+
+
+Overview
+========
+Adapters based on the Intel(R) Ethernet Controller 800 Series require a Dynamic
+Device Personalization (DDP) package file to enable advanced features (such as
+dynamic tunneling, Flow Director, RSS, and ADQ).
+
+DDP allows you to change the packet processing pipeline of a device by applying
+a profile package to the device at runtime. Profiles can be used to, for
+example, add support for new protocols, change existing protocols, or change
+default settings. DDP profiles can also be rolled back without rebooting the
+system.
+
+The DDP package loads during device initialization. The driver checks to see if
+the DDP package is present and compatible. If this file exists, the driver will
+load it into the device. If the DDP package file is missing or incompatible
+with the driver, the driver will go into Safe Mode where it will use the
+configuration contained in the device's NVM. See "Safe Mode" later in this
+README for more information.
+
+A general purpose, OS-default DDP package is automatically installed with all
+supported Intel Ethernet Controller 800 Series drivers on Microsoft* Windows*,
+ESX*, FreeBSD*, and Linux* operating systems. Additional DDP packages are
+available to address needs for specific market segments. For example, a
+telecommunications (Comms) DDP package is available to support certain
+market-specific protocols in addition to the protocols in the OS-default
+package.
+
+The OS-default DDP package supports the following:
+- MAC
+- EtherType
+- VLAN
+- IPv4
+- IPv6
+- TCP
+- ARP
+- UDP
+- SCTP
+- ICMP
+- ICMPv6
+- CTRL
+- LLDP
+- VXLAN-GPE
+- VXLAN (non-GPE)
+- Geneve
+- GRE
+- NVGRE
+- RoCEv2
+
+
+Safe Mode
+=========
+Safe Mode disables advanced and performance features, and supports only basic
+traffic and minimal functionality, such as updating the NVM or downloading a
+new driver or DDP package.
+
+See the Intel(R) Ethernet Adapters and Devices User Guide for more details on
+DDP and Safe Mode.
+
+
+Notes
+=====
+- You cannot update the DDP package if any PF drivers are already loaded. To
+overwrite a package, unload all PFs and then reload the driver with the new
+package.
+
+- Except for Linux, you can only use one DDP package per driver, even if you
+have more than one device installed that uses the driver.
+
+- Only the first loaded PF per device can download a package for that device.
+
+- If you are using DPDK, see the DPDK documentation at https://www.dpdk.org/
+for installation instructions and more information.
+
+
+Installation and Troubleshooting
+================================
+
+Microsoft* Windows*
+-------------------
+The DDP package is installed as part of the driver binary. You don't need to
+take additional steps to install the DDP package file.
+
+If you encounter issues with the DDP package file, download the latest driver.
+
+
+ESX
+---
+The DDP package is installed as part of the driver binary. You don't need to
+take additional steps to install the DDP package file.
+
+If you encounter issues with the DDP package file, download the latest driver.
+
+
+FreeBSD
+-------
+The FreeBSD driver automatically installs the default DDP package file during
+driver installation. See the ice driver README for general installation and
+building instructions.
+
+The DDP package loads during device initialization. The driver looks for the
+ice_ddp module and checks that it contains a valid DDP package file.
+
+If you encounter issues with the DDP package file, you may need to download an
+updated driver or ice_ddp module. See the log messages for more information.
+
+NOTE: It's important to do 'make install' during initial ice driver
+installation so that the driver loads the DDP package automatically.
+
+
+Linux
+-----
+The Linux driver automatically installs the default DDP package file during
+driver installation. See the ice driver README for general installation and
+building instructions.
+
+The DDP package loads during device initialization. The driver looks for
+intel/ice/ddp/ice.pkg in your firmware root (typically /lib/firmware/ or
+/lib/firmware/updates/) and checks that it contains a valid DDP package file.
+The ice.pkg file is a symbolic link to the default DDP package file installed
+by the linux-firmware software package or the ice out-of-tree driver
+installation.
+
+If you encounter issues with the DDP package file, you may need to download an
+updated driver or DDP package file. See the log messages for more information.
+
+You can install specific DDP package files for different physical devices in
+the same system. To install a specific DDP package:
+
+1. Download the DDP package file (ice-x.x.x.x.zip) you want for your device. In
+addition to licensing information and this README, this zip file contains the
+following files:
+ ice-x.x.x.x.pkg
+ ice.pkg
+
+NOTE: The ice.pkg file is a Linux symbolic link file pointing to
+ice-x.x.x.x.pkg (in the same path).
+
+2. Rename the ice-x.x.x.x.pkg file as ice-xxxxxxxxxxxxxxxx.pkg, where
+'xxxxxxxxxxxxxxxx' is the unique 64-bit PCI Express device serial number (in
+hex) of the device you want the package downloaded on. The filename must
+include the complete serial number (including leading zeros) and be all
+lowercase. For example, if the 64-bit serial number is b887a3ffffca0568, then
+the file name would be ice-b887a3ffffca0568.pkg.
+
+To find the serial number from the PCI bus address, you can use the following
+command:
+
+# lspci -vv -s af:00.0 | grep -i Serial
+Capabilities: [150 v1] Device Serial Number b8-87-a3-ff-ff-ca-05-68
+
+You can use the following command to format the serial number without the
+dashes:
+
+# lspci -vv -s af:00.0 | grep -i Serial | awk '{print $7}' | sed s/-//g
+b887a3ffffca0568
+
+3. Copy the renamed DDP package file to /lib/firmware/updates/intel/ice/ddp/.
+If the directory does not yet exist, create it before copying the file.
+
+4. Unload all of the PFs on the device.
+
+5. Reload the driver with the new package.
+
+NOTE: The presence of a device-specific DDP package file overrides the loading
+of the default DDP package file (ice.pkg).
+
+
+Legal / Disclaimers
+===================
+Copyright (c) 2019 - 2020, Intel Corporation.
+
+Intel and the Intel logo are trademarks of Intel Corporation or its
+subsidiaries in the U.S. and/or other countries.
+
+*Other names and brands may be claimed as the property of others.
+
+This software and the related documents are Intel copyrighted materials, and
+your use of them is governed by the express license under which they were
+provided to you ("License"). Unless the License provides otherwise, you may not
+use, modify, copy, publish, distribute, disclose or transmit this software or
+the related documents without Intel's prior written permission.
+This software and the related documents are provided as is, with no express or
+implied warranties, other than those that are expressly stated in the License.
diff --git a/sys/contrib/dev/ice/ice-1.3.9.0.pkg b/sys/contrib/dev/ice/ice-1.3.9.0.pkg
new file mode 100644
index 000000000000..74bce6da3e26
--- /dev/null
+++ b/sys/contrib/dev/ice/ice-1.3.9.0.pkg
Binary files differ
diff --git a/sys/dev/ice/ice_adminq_cmd.h b/sys/dev/ice/ice_adminq_cmd.h
new file mode 100644
index 000000000000..5ac7c9a03fa4
--- /dev/null
+++ b/sys/dev/ice/ice_adminq_cmd.h
@@ -0,0 +1,2968 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_ADMINQ_CMD_H_
+#define _ICE_ADMINQ_CMD_H_
+
+/* This header file defines the Admin Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+#define ICE_MAX_VSI 768
+#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
+#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+
+struct ice_aqc_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get version (direct 0x0001) */
+struct ice_aqc_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+/* Send driver version (indirect 0x0002) */
+struct ice_aqc_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Queue Shutdown (direct 0x0003) */
+struct ice_aqc_q_shutdown {
+ u8 driver_unloading;
+#define ICE_AQC_DRIVER_UNLOADING BIT(0)
+ u8 reserved[15];
+};
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ice_aqc_get_exp_err {
+ __le32 reason;
+#define ICE_AQC_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ice_aqc_req_res {
+ __le16 res_id;
+#define ICE_AQC_RES_ID_NVM 1
+#define ICE_AQC_RES_ID_SDP 2
+#define ICE_AQC_RES_ID_CHNG_LOCK 3
+#define ICE_AQC_RES_ID_GLBL_LOCK 4
+ __le16 access_type;
+#define ICE_AQC_RES_ACCESS_READ 1
+#define ICE_AQC_RES_ACCESS_WRITE 2
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
+ __le16 status;
+#define ICE_AQ_RES_GLBL_SUCCESS 0
+#define ICE_AQ_RES_GLBL_IN_PROG 1
+#define ICE_AQ_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ice_aqc_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ice_aqc_list_caps_elem {
+ __le16 cap;
+#define ICE_AQC_CAPS_SWITCHING_MODE 0x0001
+#define ICE_AQC_CAPS_MANAGEABILITY_MODE 0x0002
+#define ICE_AQC_CAPS_OS2BMC 0x0004
+#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
+#define ICE_AQC_MAX_VALID_FUNCTIONS 0x8
+#define ICE_AQC_CAPS_ALTERNATE_RAM 0x0006
+#define ICE_AQC_CAPS_WOL_PROXY 0x0008
+#define ICE_AQC_CAPS_SRIOV 0x0012
+#define ICE_AQC_CAPS_VF 0x0013
+#define ICE_AQC_CAPS_802_1QBG 0x0015
+#define ICE_AQC_CAPS_802_1BR 0x0016
+#define ICE_AQC_CAPS_VSI 0x0017
+#define ICE_AQC_CAPS_DCB 0x0018
+#define ICE_AQC_CAPS_RSVD 0x0021
+#define ICE_AQC_CAPS_ISCSI 0x0022
+#define ICE_AQC_CAPS_RSS 0x0040
+#define ICE_AQC_CAPS_RXQS 0x0041
+#define ICE_AQC_CAPS_TXQS 0x0042
+#define ICE_AQC_CAPS_MSIX 0x0043
+#define ICE_AQC_CAPS_MAX_MTU 0x0047
+#define ICE_AQC_CAPS_NVM_VER 0x0048
+#define ICE_AQC_CAPS_CEM 0x00F2
+#define ICE_AQC_CAPS_IWARP 0x0051
+#define ICE_AQC_CAPS_LED 0x0061
+#define ICE_AQC_CAPS_SDP 0x0062
+#define ICE_AQC_CAPS_WR_CSR_PROT 0x0064
+#define ICE_AQC_CAPS_NO_DROP_POLICY 0x0065
+#define ICE_AQC_CAPS_LOGI_TO_PHYSI_PORT_MAP 0x0073
+#define ICE_AQC_CAPS_SKU 0x0074
+#define ICE_AQC_CAPS_PORT_MAP 0x0075
+#define ICE_AQC_CAPS_NVM_MGMT 0x0080
+
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/* Manage MAC address, read command - indirect (0x0107)
+ * This struct is also used for the response
+ */
+struct ice_aqc_manage_mac_read {
+ __le16 flags; /* Zeroed by device driver */
+#define ICE_AQC_MAN_MAC_LAN_ADDR_VALID BIT(4)
+#define ICE_AQC_MAN_MAC_SAN_ADDR_VALID BIT(5)
+#define ICE_AQC_MAN_MAC_PORT_ADDR_VALID BIT(6)
+#define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7)
+#define ICE_AQC_MAN_MAC_MC_MAG_EN BIT(8)
+#define ICE_AQC_MAN_MAC_WOL_PRESERVE_ON_PFR BIT(9)
+#define ICE_AQC_MAN_MAC_READ_S 4
+#define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S)
+ u8 rsvd[2];
+ u8 num_addr; /* Used in response */
+ u8 rsvd1[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response buffer format for manage MAC read command */
+struct ice_aqc_manage_mac_read_resp {
+ u8 lport_num;
+ u8 addr_type;
+#define ICE_AQC_MAN_MAC_ADDR_TYPE_LAN 0
+#define ICE_AQC_MAN_MAC_ADDR_TYPE_WOL 1
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Manage MAC address, write command - direct (0x0108) */
+struct ice_aqc_manage_mac_write {
+ u8 rsvd;
+ u8 flags;
+#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
+#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
+#define ICE_AQC_MAN_MAC_WR_S 6
+#define ICE_AQC_MAN_MAC_WR_M MAKEMASK(3, ICE_AQC_MAN_MAC_WR_S)
+#define ICE_AQC_MAN_MAC_UPDATE_LAA 0
+#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S)
+ /* byte stream in network order */
+ u8 mac_addr[ETH_ALEN];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct ice_aqc_clear_pxe {
+ u8 rx_cnt;
+#define ICE_AQC_CLEAR_PXE_RX_CNT 0x2
+ u8 reserved[15];
+};
+
+/* Configure No-Drop Policy Command (direct 0x0112) */
+struct ice_aqc_config_no_drop_policy {
+ u8 opts;
+#define ICE_AQC_FORCE_NO_DROP BIT(0)
+ u8 rsvd[15];
+};
+
+/* Get switch configuration (0x0200) */
+struct ice_aqc_get_sw_cfg {
+ /* Reserved for command and copy of request flags for response */
+ __le16 flags;
+ /* First desc in case of command and next_elem in case of response
+ * In case of response, if it is not zero, means all the configuration
+ * was not returned and new command shall be sent with this value in
+ * the 'first desc' field
+ */
+ __le16 element;
+ /* Reserved for command, only used for response */
+ __le16 num_elems;
+ __le16 rsvd;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Each entry in the response buffer is of the following type: */
+struct ice_aqc_get_sw_cfg_resp_elem {
+ /* VSI/Port Number */
+ __le16 vsi_port_num;
+#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S 0
+#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M \
+ (0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S)
+#define ICE_AQC_GET_SW_CONF_RESP_TYPE_S 14
+#define ICE_AQC_GET_SW_CONF_RESP_TYPE_M (0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S)
+#define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT 0
+#define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT 1
+#define ICE_AQC_GET_SW_CONF_RESP_VSI 2
+
+ /* SWID VSI/Port belongs to */
+ __le16 swid;
+
+ /* Bit 14..0 : PF/VF number VSI belongs to
+ * Bit 15 : VF indication bit
+ */
+ __le16 pf_vf_num;
+#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S 0
+#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M \
+ (0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S)
+#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
+};
+
+/* The response buffer is as follows. Note that the length of the
+ * elements array varies with the length of the command response.
+ */
+struct ice_aqc_get_sw_cfg_resp {
+ struct ice_aqc_get_sw_cfg_resp_elem elements[1];
+};
+
+/* Set Port parameters, (direct, 0x0203) */
+struct ice_aqc_set_port_params {
+ __le16 cmd_flags;
+#define ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS BIT(0)
+#define ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS BIT(1)
+#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
+ __le16 bad_frame_vsi;
+#define ICE_AQC_SET_P_PARAMS_VSI_S 0
+#define ICE_AQC_SET_P_PARAMS_VSI_M (0x3FF << ICE_AQC_SET_P_PARAMS_VSI_S)
+#define ICE_AQC_SET_P_PARAMS_VSI_VALID BIT(15)
+ __le16 swid;
+#define ICE_AQC_SET_P_PARAMS_SWID_S 0
+#define ICE_AQC_SET_P_PARAMS_SWID_M (0xFF << ICE_AQC_SET_P_PARAMS_SWID_S)
+#define ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S 8
+#define ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_M \
+ (0x3F << ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S)
+#define ICE_AQC_SET_P_PARAMS_IS_LOGI_PORT BIT(14)
+#define ICE_AQC_SET_P_PARAMS_SWID_VALID BIT(15)
+ u8 reserved[10];
+};
+
+/* These resource type defines are used for all switch resource
+ * commands where a resource type is required, such as:
+ * Get Resource Allocation command (indirect 0x0204)
+ * Allocate Resources command (indirect 0x0208)
+ * Free Resources command (indirect 0x0209)
+ * Get Allocated Resource Descriptors Command (indirect 0x020A)
+ */
+#define ICE_AQC_RES_TYPE_VEB_COUNTER 0x00
+#define ICE_AQC_RES_TYPE_VLAN_COUNTER 0x01
+#define ICE_AQC_RES_TYPE_MIRROR_RULE 0x02
+#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
+#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_RECIPE 0x05
+#define ICE_AQC_RES_TYPE_PROFILE 0x06
+#define ICE_AQC_RES_TYPE_SWID 0x07
+#define ICE_AQC_RES_TYPE_VSI 0x08
+#define ICE_AQC_RES_TYPE_FLU 0x09
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_1 0x0A
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_2 0x0B
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_4 0x0C
+#define ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH 0x20
+#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
+#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
+#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
+#define ICE_AQC_RES_TYPE_FLEX_DESC_PROG 0x30
+#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID 0x48
+#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM 0x49
+#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID 0x50
+#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM 0x51
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+/* Resource types 0x62-67 are reserved for Hash profile builder */
+#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID 0x68
+#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM 0x69
+
+#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
+#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
+#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+
+#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
+
+#define ICE_AQC_RES_TYPE_S 0
+#define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S)
+
+/* Get Resource Allocation command (indirect 0x0204) */
+struct ice_aqc_get_res_alloc {
+ __le16 resp_elem_num; /* Used in response, reserved in command */
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Resource Allocation Response Buffer per response */
+struct ice_aqc_get_res_resp_elem {
+ __le16 res_type; /* Types defined above cmd 0x0204 */
+ __le16 total_capacity; /* Resources available to all PF's */
+ __le16 total_function; /* Resources allocated for a PF */
+ __le16 total_shared; /* Resources allocated as shared */
+ __le16 total_free; /* Resources un-allocated/not reserved by any PF */
+};
+
+/* Buffer for Get Resource command */
+struct ice_aqc_get_res_resp {
+ /* Number of resource entries to be calculated using
+ * datalen/sizeof(struct ice_aqc_cmd_resp)).
+ * Value of 'datalen' gets updated as part of response.
+ */
+ struct ice_aqc_get_res_resp_elem elem[1];
+};
+
+/* Allocate Resources command (indirect 0x0208)
+ * Free Resources command (indirect 0x0209)
+ */
+struct ice_aqc_alloc_free_res_cmd {
+ __le16 num_entries; /* Number of Resource entries */
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Resource descriptor */
+struct ice_aqc_res_elem {
+ union {
+ __le16 sw_resp;
+ __le16 flu_resp;
+ } e;
+};
+
+/* Buffer for Allocate/Free Resources commands */
+struct ice_aqc_alloc_free_res_elem {
+ __le16 res_type; /* Types defined above cmd 0x0204 */
+#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S 8
+#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \
+ (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
+ __le16 num_elems;
+ struct ice_aqc_res_elem elem[1];
+};
+
+/* Get Allocated Resource Descriptors Command (indirect 0x020A) */
+struct ice_aqc_get_allocd_res_desc {
+ union {
+ struct {
+ __le16 res; /* Types defined above cmd 0x0204 */
+ __le16 first_desc;
+ __le32 reserved;
+ } cmd;
+ struct {
+ __le16 res;
+ __le16 next_desc;
+ __le16 num_desc;
+ __le16 reserved;
+ } resp;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_allocd_res_desc_resp {
+ struct ice_aqc_res_elem elem[1];
+};
+
+/* Add VSI (indirect 0x0210)
+ * Update VSI (indirect 0x0211)
+ * Get VSI (indirect 0x0212)
+ * Free VSI (indirect 0x0213)
+ */
+struct ice_aqc_add_get_update_free_vsi {
+ __le16 vsi_num;
+#define ICE_AQ_VSI_NUM_S 0
+#define ICE_AQ_VSI_NUM_M (0x03FF << ICE_AQ_VSI_NUM_S)
+#define ICE_AQ_VSI_IS_VALID BIT(15)
+ __le16 cmd_flags;
+#define ICE_AQ_VSI_KEEP_ALLOC 0x1
+ u8 vf_id;
+ u8 reserved;
+ __le16 vsi_flags;
+#define ICE_AQ_VSI_TYPE_S 0
+#define ICE_AQ_VSI_TYPE_M (0x3 << ICE_AQ_VSI_TYPE_S)
+#define ICE_AQ_VSI_TYPE_VF 0x0
+#define ICE_AQ_VSI_TYPE_VMDQ2 0x1
+#define ICE_AQ_VSI_TYPE_PF 0x2
+#define ICE_AQ_VSI_TYPE_EMP_MNG 0x3
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response descriptor for:
+ * Add VSI (indirect 0x0210)
+ * Update VSI (indirect 0x0211)
+ * Free VSI (indirect 0x0213)
+ */
+struct ice_aqc_add_update_free_vsi_resp {
+ __le16 vsi_num;
+ __le16 ext_status;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_vsi_resp {
+ __le16 vsi_num;
+ u8 vf_id;
+ /* The vsi_flags field uses the ICE_AQ_VSI_TYPE_* defines for values.
+ * These are found above in struct ice_aqc_add_get_update_free_vsi.
+ */
+ u8 vsi_flags;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_vsi_props {
+ __le16 valid_sections;
+#define ICE_AQ_VSI_PROP_SW_VALID BIT(0)
+#define ICE_AQ_VSI_PROP_SECURITY_VALID BIT(1)
+#define ICE_AQ_VSI_PROP_VLAN_VALID BIT(2)
+#define ICE_AQ_VSI_PROP_OUTER_TAG_VALID BIT(3)
+#define ICE_AQ_VSI_PROP_INGRESS_UP_VALID BIT(4)
+#define ICE_AQ_VSI_PROP_EGRESS_UP_VALID BIT(5)
+#define ICE_AQ_VSI_PROP_RXQ_MAP_VALID BIT(6)
+#define ICE_AQ_VSI_PROP_Q_OPT_VALID BIT(7)
+#define ICE_AQ_VSI_PROP_OUTER_UP_VALID BIT(8)
+#define ICE_AQ_VSI_PROP_FLOW_DIR_VALID BIT(11)
+#define ICE_AQ_VSI_PROP_PASID_VALID BIT(12)
+ /* switch section */
+ u8 sw_id;
+ u8 sw_flags;
+#define ICE_AQ_VSI_SW_FLAG_ALLOW_LB BIT(5)
+#define ICE_AQ_VSI_SW_FLAG_LOCAL_LB BIT(6)
+#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7)
+ u8 sw_flags2;
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \
+ (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
+#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
+#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
+ u8 veb_stat_id;
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5)
+ /* security section */
+ u8 sec_flags;
+#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0)
+#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2)
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
+#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0)
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ u8 pvlan_reserved[2];
+ u8 vlan_flags;
+#define ICE_AQ_VSI_VLAN_MODE_S 0
+#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
+#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
+#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
+#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
+#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
+#define ICE_AQ_VSI_VLAN_EMOD_S 3
+#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
+ u8 pvlan_reserved2[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
+#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
+#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
+#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
+#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
+#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
+#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
+#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
+#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
+#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
+#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
+#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
+#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
+#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
+#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
+#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* outer tags section */
+ __le16 outer_tag;
+ u8 outer_tag_flags;
+#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0
+#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0
+#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1
+#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
+#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
+#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4)
+#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)
+ u8 outer_tag_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
+#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
+ __le16 q_mapping[16];
+#define ICE_AQ_VSI_Q_S 0
+#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
+ __le16 tc_mapping[8];
+#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
+#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
+#define ICE_AQ_VSI_TC_Q_NUM_S 11
+#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
+ /* queueing option section */
+ u8 q_opt_rss;
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+ u8 q_opt_tc;
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
+ u8 q_opt_flags;
+#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
+ u8 q_opt_reserved[3];
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ /* section 10 */
+ __le16 sect_10_reserved;
+ /* flow director section */
+ __le16 fd_options;
+#define ICE_AQ_VSI_FD_ENABLE BIT(0)
+#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
+#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
+ __le16 max_fd_fltr_dedicated;
+ __le16 max_fd_fltr_shared;
+ __le16 fd_def_q;
+#define ICE_AQ_VSI_FD_DEF_Q_S 0
+#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
+#define ICE_AQ_VSI_FD_DEF_GRP_S 12
+#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
+ __le16 fd_report_opt;
+#define ICE_AQ_VSI_FD_REPORT_Q_S 0
+#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
+#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
+ /* PASID section */
+ __le32 pasid_id;
+#define ICE_AQ_VSI_PASID_ID_S 0
+#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
+#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
+ u8 reserved[24];
+};
+
+/* Add/update mirror rule - direct (0x0260) */
+#define ICE_AQC_RULE_ID_VALID_S 7
+#define ICE_AQC_RULE_ID_VALID_M (0x1 << ICE_AQC_RULE_ID_VALID_S)
+#define ICE_AQC_RULE_ID_S 0
+#define ICE_AQC_RULE_ID_M (0x3F << ICE_AQC_RULE_ID_S)
+
+/* Following defines to be used while processing caller specified mirror list
+ * of VSI indexes.
+ */
+/* Action: Byte.bit (1.7)
+ * 0 = Remove VSI from mirror rule
+ * 1 = Add VSI to mirror rule
+ */
+#define ICE_AQC_RULE_ACT_S 15
+#define ICE_AQC_RULE_ACT_M (0x1 << ICE_AQC_RULE_ACT_S)
+/* Action: 1.2:0.0 = Mirrored VSI */
+#define ICE_AQC_RULE_MIRRORED_VSI_S 0
+#define ICE_AQC_RULE_MIRRORED_VSI_M (0x7FF << ICE_AQC_RULE_MIRRORED_VSI_S)
+
+/* This is to be used by add/update mirror rule Admin Queue command.
+ * In case of add mirror rule - if rule ID is specified as
+ * INVAL_MIRROR_RULE_ID, new rule ID is allocated from shared pool.
+ * If specified rule_id is valid, then it is used. If specified rule_id
+ * is in use then new mirroring rule is added.
+ */
+#define ICE_INVAL_MIRROR_RULE_ID 0xFFFF
+
+struct ice_aqc_add_update_mir_rule {
+ __le16 rule_id;
+
+ __le16 rule_type;
+#define ICE_AQC_RULE_TYPE_S 0
+#define ICE_AQC_RULE_TYPE_M (0x7 << ICE_AQC_RULE_TYPE_S)
+ /* VPORT ingress/egress */
+#define ICE_AQC_RULE_TYPE_VPORT_INGRESS 0x1
+#define ICE_AQC_RULE_TYPE_VPORT_EGRESS 0x2
+ /* Physical port ingress mirroring.
+ * All traffic received by this port
+ */
+#define ICE_AQC_RULE_TYPE_PPORT_INGRESS 0x6
+ /* Physical port egress mirroring. All traffic sent by this port */
+#define ICE_AQC_RULE_TYPE_PPORT_EGRESS 0x7
+
+ /* Number of mirrored entries.
+ * The values are in the command buffer
+ */
+ __le16 num_entries;
+
+ /* Destination VSI */
+ __le16 dest;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Delete mirror rule - direct(0x0261) */
+struct ice_aqc_delete_mir_rule {
+ __le16 rule_id;
+ __le16 rsvd;
+
+ /* Byte.bit: 20.0 = Keep allocation. If set VSI stays part of
+ * the PF allocated resources, otherwise it is returned to the
+ * shared pool
+ */
+#define ICE_AQC_FLAG_KEEP_ALLOCD_S 0
+#define ICE_AQC_FLAG_KEEP_ALLOCD_M (0x1 << ICE_AQC_FLAG_KEEP_ALLOCD_S)
+ __le16 flags;
+
+ u8 reserved[10];
+};
+
+/* Set/Get storm config - (direct 0x0280, 0x0281) */
+/* This structure holds get storm configuration response and same structure
+ * is used to perform set_storm_cfg
+ */
+struct ice_aqc_storm_cfg {
+ __le32 bcast_thresh_size;
+ __le32 mcast_thresh_size;
+ /* Bit 18:0 - Traffic upper threshold size
+ * Bit 31:19 - Reserved
+ */
+#define ICE_AQ_THRESHOLD_S 0
+#define ICE_AQ_THRESHOLD_M (0x7FFFF << ICE_AQ_THRESHOLD_S)
+
+ __le32 storm_ctrl_ctrl;
+ /* Bit 0: MDIPW - Drop Multicast packets in previous window
+ * Bit 1: MDICW - Drop multicast packets in current window
+ * Bit 2: BDIPW - Drop broadcast packets in previous window
+ * Bit 3: BDICW - Drop broadcast packets in current window
+ */
+#define ICE_AQ_STORM_CTRL_MDIPW_DROP_MULTICAST BIT(0)
+#define ICE_AQ_STORM_CTRL_MDICW_DROP_MULTICAST BIT(1)
+#define ICE_AQ_STORM_CTRL_BDIPW_DROP_MULTICAST BIT(2)
+#define ICE_AQ_STORM_CTRL_BDICW_DROP_MULTICAST BIT(3)
+ /* Bit 7:5 : Reserved */
+ /* Bit 27:8 : Interval - BSC/MSC Time-interval specification: The
+ * interval size for applying ingress broadcast or multicast storm
+ * control.
+ */
+#define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S 8
+#define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_M \
+ (0xFFFFF << ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S)
+ __le32 reserved;
+};
+
+#define ICE_MAX_NUM_RECIPES 64
+
+/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
+ */
+struct ice_aqc_sw_rules {
+ /* ops: add switch rules, referring the number of rules.
+ * ops: update switch rules, referring the number of filters
+ * ops: remove switch rules, referring the entry index.
+ * ops: get switch rules, referring to the number of filters.
+ */
+ __le16 num_rules_fltr_entry_index;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+#pragma pack(1)
+/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
+ * This structures describes the lookup rules and associated actions. "index"
+ * is returned as part of a response to a successful Add command, and can be
+ * used to identify the rule for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_lkup_rx_tx {
+ __le16 recipe_id;
+#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10
+ /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */
+ __le16 src;
+ __le32 act;
+
+ /* Bit 0:1 - Action type */
+#define ICE_SINGLE_ACT_TYPE_S 0x00
+#define ICE_SINGLE_ACT_TYPE_M (0x3 << ICE_SINGLE_ACT_TYPE_S)
+
+ /* Bit 2 - Loop back enable
+ * Bit 3 - LAN enable
+ */
+#define ICE_SINGLE_ACT_LB_ENABLE BIT(2)
+#define ICE_SINGLE_ACT_LAN_ENABLE BIT(3)
+
+ /* Action type = 0 - Forward to VSI or VSI list */
+#define ICE_SINGLE_ACT_VSI_FORWARDING 0x0
+
+#define ICE_SINGLE_ACT_VSI_ID_S 4
+#define ICE_SINGLE_ACT_VSI_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_ID_S)
+#define ICE_SINGLE_ACT_VSI_LIST_ID_S 4
+#define ICE_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_LIST_ID_S)
+ /* This bit needs to be set if action is forward to VSI list */
+#define ICE_SINGLE_ACT_VSI_LIST BIT(14)
+#define ICE_SINGLE_ACT_VALID_BIT BIT(17)
+#define ICE_SINGLE_ACT_DROP BIT(18)
+
+ /* Action type = 1 - Forward to Queue of Queue group */
+#define ICE_SINGLE_ACT_TO_Q 0x1
+#define ICE_SINGLE_ACT_Q_INDEX_S 4
+#define ICE_SINGLE_ACT_Q_INDEX_M (0x7FF << ICE_SINGLE_ACT_Q_INDEX_S)
+#define ICE_SINGLE_ACT_Q_REGION_S 15
+#define ICE_SINGLE_ACT_Q_REGION_M (0x7 << ICE_SINGLE_ACT_Q_REGION_S)
+#define ICE_SINGLE_ACT_Q_PRIORITY BIT(18)
+
+ /* Action type = 2 - Prune */
+#define ICE_SINGLE_ACT_PRUNE 0x2
+#define ICE_SINGLE_ACT_EGRESS BIT(15)
+#define ICE_SINGLE_ACT_INGRESS BIT(16)
+#define ICE_SINGLE_ACT_PRUNET BIT(17)
+ /* Bit 18 should be set to 0 for this action */
+
+ /* Action type = 2 - Pointer */
+#define ICE_SINGLE_ACT_PTR 0x2
+#define ICE_SINGLE_ACT_PTR_VAL_S 4
+#define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S)
+ /* Bit 18 should be set to 1 */
+#define ICE_SINGLE_ACT_PTR_BIT BIT(18)
+
+ /* Action type = 3 - Other actions. Last two bits
+ * are other action identifier
+ */
+#define ICE_SINGLE_ACT_OTHER_ACTS 0x3
+#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17
+#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \
+ (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
+
+ /* Bit 17:18 - Defines other actions */
+ /* Other action = 0 - Mirror VSI */
+#define ICE_SINGLE_OTHER_ACT_MIRROR 0
+#define ICE_SINGLE_ACT_MIRROR_VSI_ID_S 4
+#define ICE_SINGLE_ACT_MIRROR_VSI_ID_M \
+ (0x3FF << ICE_SINGLE_ACT_MIRROR_VSI_ID_S)
+
+ /* Other action = 3 - Set Stat count */
+#define ICE_SINGLE_OTHER_ACT_STAT_COUNT 3
+#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_S 4
+#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_M \
+ (0x7F << ICE_SINGLE_ACT_STAT_COUNT_INDEX_S)
+
+ __le16 index; /* The index of the rule in the lookup table */
+ /* Length and values of the header to be matched per recipe or
+ * lookup-type
+ */
+ __le16 hdr_len;
+ u8 hdr[1];
+};
+#pragma pack()
+
+/* Add/Update/Remove large action command/response entry
+ * "index" is returned as part of a response to a successful Add command, and
+ * can be used to identify the action for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_lg_act {
+ __le16 index; /* Index in large action table */
+ __le16 size;
+ __le32 act[1]; /* array of size for actions */
+ /* Max number of large actions */
+#define ICE_MAX_LG_ACT 4
+ /* Bit 0:1 - Action type */
+#define ICE_LG_ACT_TYPE_S 0
+#define ICE_LG_ACT_TYPE_M (0x7 << ICE_LG_ACT_TYPE_S)
+
+ /* Action type = 0 - Forward to VSI or VSI list */
+#define ICE_LG_ACT_VSI_FORWARDING 0
+#define ICE_LG_ACT_VSI_ID_S 3
+#define ICE_LG_ACT_VSI_ID_M (0x3FF << ICE_LG_ACT_VSI_ID_S)
+#define ICE_LG_ACT_VSI_LIST_ID_S 3
+#define ICE_LG_ACT_VSI_LIST_ID_M (0x3FF << ICE_LG_ACT_VSI_LIST_ID_S)
+ /* This bit needs to be set if action is forward to VSI list */
+#define ICE_LG_ACT_VSI_LIST BIT(13)
+
+#define ICE_LG_ACT_VALID_BIT BIT(16)
+
+ /* Action type = 1 - Forward to Queue of Queue group */
+#define ICE_LG_ACT_TO_Q 0x1
+#define ICE_LG_ACT_Q_INDEX_S 3
+#define ICE_LG_ACT_Q_INDEX_M (0x7FF << ICE_LG_ACT_Q_INDEX_S)
+#define ICE_LG_ACT_Q_REGION_S 14
+#define ICE_LG_ACT_Q_REGION_M (0x7 << ICE_LG_ACT_Q_REGION_S)
+#define ICE_LG_ACT_Q_PRIORITY_SET BIT(17)
+
+ /* Action type = 2 - Prune */
+#define ICE_LG_ACT_PRUNE 0x2
+#define ICE_LG_ACT_EGRESS BIT(14)
+#define ICE_LG_ACT_INGRESS BIT(15)
+#define ICE_LG_ACT_PRUNET BIT(16)
+
+ /* Action type = 3 - Mirror VSI */
+#define ICE_LG_OTHER_ACT_MIRROR 0x3
+#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
+#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
+
+ /* Action type = 5 - Generic Value */
+#define ICE_LG_ACT_GENERIC 0x5
+#define ICE_LG_ACT_GENERIC_VALUE_S 3
+#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
+#define ICE_LG_ACT_GENERIC_OFFSET_S 19
+#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
+#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
+#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
+#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
+
+ /* Action = 7 - Set Stat count */
+#define ICE_LG_ACT_STAT_COUNT 0x7
+#define ICE_LG_ACT_STAT_COUNT_S 3
+#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
+};
+
+/* Add/Update/Remove VSI list command/response entry
+ * "index" is returned as part of a response to a successful Add command, and
+ * can be used to identify the VSI list for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_vsi_list {
+ __le16 index; /* Index of VSI/Prune list */
+ __le16 number_vsi;
+ __le16 vsi[1]; /* Array of number_vsi VSI numbers */
+};
+
+#pragma pack(1)
+/* Query VSI list command/response entry */
+struct ice_sw_rule_vsi_list_query {
+ __le16 index;
+ ice_declare_bitmap(vsi_list, ICE_MAX_VSI);
+};
+#pragma pack()
+
+#pragma pack(1)
+/* Add switch rule response:
+ * Content of return buffer is same as the input buffer. The status field and
+ * LUT index are updated as part of the response
+ */
+struct ice_aqc_sw_rules_elem {
+ __le16 type; /* Switch rule type, one of T_... */
+#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
+#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
+#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
+#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
+#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
+ __le16 status;
+ union {
+ struct ice_sw_rule_lkup_rx_tx lkup_tx_rx;
+ struct ice_sw_rule_lg_act lg_act;
+ struct ice_sw_rule_vsi_list vsi_list;
+ struct ice_sw_rule_vsi_list_query vsi_list_query;
+ } pdata;
+};
+
+#pragma pack()
+
+/* PFC Ignore (direct 0x0301)
+ * The command and response use the same descriptor structure
+ */
+struct ice_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 cmd_flags; /* unused in response */
+#define ICE_AQC_PFC_IGNORE_SET BIT(7)
+#define ICE_AQC_PFC_IGNORE_CLEAR 0
+ u8 reserved[14];
+};
+
+/* Set PFC Mode (direct 0x0303)
+ * Query PFC Mode (direct 0x0302)
+ */
+struct ice_aqc_set_query_pfc_mode {
+ u8 pfc_mode;
+/* For Set Command response, reserved in all other cases */
+#define ICE_AQC_PFC_NOT_CONFIGURED 0
+/* For Query Command response, reserved in all other cases */
+#define ICE_AQC_DCB_DIS 0
+#define ICE_AQC_PFC_VLAN_BASED_PFC 1
+#define ICE_AQC_PFC_DSCP_BASED_PFC 2
+ u8 rsvd[15];
+};
+
+/* Set DCB Parameters (direct 0x0306) */
+struct ice_aqc_set_dcb_params {
+ u8 cmd_flags; /* unused in response */
+#define ICE_AQC_LINK_UP_DCB_CFG BIT(0)
+ u8 valid_flags; /* unused in response */
+#define ICE_AQC_LINK_UP_DCB_CFG_VALID BIT(0)
+ u8 rsvd[14];
+};
+
+/* Get Default Topology (indirect 0x0400) */
+struct ice_aqc_get_topo {
+ u8 port_num;
+ u8 num_branches;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Update TSE (indirect 0x0403)
+ * Get TSE (indirect 0x0404)
+ * Add TSE (indirect 0x0401)
+ * Delete TSE (indirect 0x040F)
+ * Move TSE (indirect 0x0408)
+ * Suspend Nodes (indirect 0x0409)
+ * Resume Nodes (indirect 0x040A)
+ */
+struct ice_aqc_sched_elem_cmd {
+ __le16 num_elem_req; /* Used by commands */
+ __le16 num_elem_resp; /* Used by responses */
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the buffer for:
+ * Suspend Nodes (indirect 0x0409)
+ * Resume Nodes (indirect 0x040A)
+ */
+struct ice_aqc_suspend_resume_elem {
+ __le32 teid[1];
+};
+
+struct ice_aqc_txsched_move_grp_info_hdr {
+ __le32 src_parent_teid;
+ __le32 dest_parent_teid;
+ __le16 num_elems;
+ __le16 reserved;
+};
+
+struct ice_aqc_move_elem {
+ struct ice_aqc_txsched_move_grp_info_hdr hdr;
+ __le32 teid[1];
+};
+
+struct ice_aqc_elem_info_bw {
+ __le16 bw_profile_idx;
+ __le16 bw_alloc;
+};
+
+struct ice_aqc_txsched_elem {
+ u8 elem_type; /* Special field, reserved for some aq calls */
+#define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0
+#define ICE_AQC_ELEM_TYPE_ROOT_PORT 0x1
+#define ICE_AQC_ELEM_TYPE_TC 0x2
+#define ICE_AQC_ELEM_TYPE_SE_GENERIC 0x3
+#define ICE_AQC_ELEM_TYPE_ENTRY_POINT 0x4
+#define ICE_AQC_ELEM_TYPE_LEAF 0x5
+#define ICE_AQC_ELEM_TYPE_SE_PADDED 0x6
+ u8 valid_sections;
+#define ICE_AQC_ELEM_VALID_GENERIC BIT(0)
+#define ICE_AQC_ELEM_VALID_CIR BIT(1)
+#define ICE_AQC_ELEM_VALID_EIR BIT(2)
+#define ICE_AQC_ELEM_VALID_SHARED BIT(3)
+ u8 generic;
+#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1
+#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1
+#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
+#define ICE_AQC_ELEM_GENERIC_SP_S 0x4
+#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
+#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5
+#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \
+ (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)
+ u8 flags; /* Special field, reserved for some aq calls */
+#define ICE_AQC_ELEM_FLAG_SUSPEND_M 0x1
+ struct ice_aqc_elem_info_bw cir_bw;
+ struct ice_aqc_elem_info_bw eir_bw;
+ __le16 srl_id;
+ __le16 reserved2;
+};
+
+struct ice_aqc_txsched_elem_data {
+ __le32 parent_teid;
+ __le32 node_teid;
+ struct ice_aqc_txsched_elem data;
+};
+
+struct ice_aqc_txsched_topo_grp_info_hdr {
+ __le32 parent_teid;
+ __le16 num_elems;
+ __le16 reserved2;
+};
+
+struct ice_aqc_add_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
+struct ice_aqc_conf_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
+struct ice_aqc_get_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
+struct ice_aqc_get_topo_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ struct ice_aqc_txsched_elem_data
+ generic[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+};
+
+struct ice_aqc_delete_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ __le32 teid[1];
+};
+
+/* Query Port ETS (indirect 0x040E)
+ *
+ * This indirect command is used to query port TC node configuration.
+ */
+struct ice_aqc_query_port_ets {
+ __le32 port_teid;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_port_ets_elem {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ /* 3 bits for UP per TC 0-7, 4th byte reserved */
+ __le32 up2tc;
+ u8 tc_bw_share[8];
+ __le32 port_eir_prof_id;
+ __le32 port_cir_prof_id;
+ /* 3 bits per Node priority to TC 0-7, 4th byte reserved */
+ __le32 tc_node_prio;
+#define ICE_TC_NODE_PRIO_S 0x4
+ u8 reserved1[4];
+ __le32 tc_node_teid[8]; /* Used for response, reserved in command */
+};
+
+/* Rate limiting profile for
+ * Add RL profile (indirect 0x0410)
+ * Query RL profile (indirect 0x0411)
+ * Remove RL profile (indirect 0x0415)
+ * These indirect commands acts on single or multiple
+ * RL profiles with specified data.
+ */
+struct ice_aqc_rl_profile {
+ __le16 num_profiles;
+ __le16 num_processed; /* Only for response. Reserved in Command. */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_rl_profile_elem {
+ u8 level;
+ u8 flags;
+#define ICE_AQC_RL_PROFILE_TYPE_S 0x0
+#define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S)
+#define ICE_AQC_RL_PROFILE_TYPE_CIR 0
+#define ICE_AQC_RL_PROFILE_TYPE_EIR 1
+#define ICE_AQC_RL_PROFILE_TYPE_SRL 2
+/* The following flag is used for Query RL Profile Data */
+#define ICE_AQC_RL_PROFILE_INVAL_S 0x7
+#define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S)
+
+ __le16 profile_id;
+ __le16 max_burst_size;
+ __le16 rl_multiply;
+ __le16 wake_up_calc;
+ __le16 rl_encode;
+};
+
+struct ice_aqc_rl_profile_generic_elem {
+ struct ice_aqc_rl_profile_elem generic[1];
+};
+
+/* Configure L2 Node CGD (indirect 0x0414)
+ * This indirect command allows configuring a congestion domain for given L2
+ * node TEIDs in the scheduler topology.
+ */
+struct ice_aqc_cfg_l2_node_cgd {
+ __le16 num_l2_nodes;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_cfg_l2_node_cgd_elem {
+ __le32 node_teid;
+ u8 cgd;
+ u8 reserved[3];
+};
+
+struct ice_aqc_cfg_l2_node_cgd_data {
+ struct ice_aqc_cfg_l2_node_cgd_elem elem[1];
+};
+
+/* Query Scheduler Resource Allocation (indirect 0x0412)
+ * This indirect command retrieves the scheduler resources allocated by
+ * EMP Firmware to the given PF.
+ */
+struct ice_aqc_query_txsched_res {
+ u8 reserved[8];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_generic_sched_props {
+ __le16 phys_levels;
+ __le16 logical_levels;
+ u8 flattening_bitmap;
+ u8 max_device_cgds;
+ u8 max_pf_cgds;
+ u8 rsvd0;
+ __le16 rdma_qsets;
+ u8 rsvd1[22];
+};
+
+struct ice_aqc_layer_props {
+ u8 logical_layer;
+ u8 chunk_size;
+ __le16 max_device_nodes;
+ __le16 max_pf_nodes;
+ u8 rsvd0[4];
+ __le16 max_sibl_grp_sz;
+ __le16 max_cir_rl_profiles;
+ __le16 max_eir_rl_profiles;
+ __le16 max_srl_profiles;
+ u8 rsvd1[14];
+};
+
+struct ice_aqc_query_txsched_res_resp {
+ struct ice_aqc_generic_sched_props sched_props;
+ struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+};
+
+/* Query Node to Root Topology (indirect 0x0413)
+ * This command uses ice_aqc_get_elem as its data buffer.
+ */
+struct ice_aqc_query_node_to_root {
+ __le32 teid;
+ __le32 num_nodes; /* Response only */
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ice_aqc_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define ICE_AQC_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.2 : Report mode
+ * 00b - Report NVM capabilities
+ * 01b - Report topology capabilities
+ * 10b - Report SW configured
+ */
+#define ICE_AQC_REPORT_MODE_S 1
+#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S)
+#define ICE_AQC_REPORT_NVM_CAP 0
+#define ICE_AQC_REPORT_TOPO_CAP BIT(1)
+#define ICE_AQC_REPORT_SW_CFG BIT(2)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define ICE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define ICE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define ICE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define ICE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define ICE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define ICE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define ICE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define ICE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define ICE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define ICE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define ICE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define ICE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define ICE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define ICE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define ICE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define ICE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define ICE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define ICE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define ICE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define ICE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define ICE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define ICE_PHY_TYPE_LOW_40GBASE_CR4 BIT_ULL(30)
+#define ICE_PHY_TYPE_LOW_40GBASE_SR4 BIT_ULL(31)
+#define ICE_PHY_TYPE_LOW_40GBASE_LR4 BIT_ULL(32)
+#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33)
+#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34)
+#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35)
+#define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41)
+#define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42)
+#define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43)
+#define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45)
+#define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48)
+#define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49)
+#define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52)
+#define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56)
+#define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57)
+#define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60)
+#define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62)
+#define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63)
+#define ICE_PHY_TYPE_LOW_MAX_INDEX 63
+/* The second set of defines is for phy_type_high. */
+#define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
+
+struct ice_aqc_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define ICE_AQC_PHY_LOW_POWER_MODE BIT(2)
+#define ICE_AQC_PHY_EN_LINK BIT(3)
+#define ICE_AQC_PHY_AN_MODE BIT(4)
+#define ICE_AQC_PHY_EN_MOD_QUAL BIT(5)
+#define ICE_AQC_PHY_EN_LESM BIT(6)
+#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7)
+#define ICE_AQC_PHY_CAPS_MASK MAKEMASK(0xff, 0)
+ u8 low_power_ctrl_an;
+#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1)
+#define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2)
+#define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0)
+#define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1)
+#define ICE_AQC_PHY_EEE_EN_10GBASE_T BIT(2)
+#define ICE_AQC_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define ICE_AQC_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define ICE_AQC_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6)
+#define ICE_AQC_PHY_EEE_EN_50GBASE_KR2 BIT(7)
+#define ICE_AQC_PHY_EEE_EN_50GBASE_KR_PAM4 BIT(8)
+#define ICE_AQC_PHY_EEE_EN_100GBASE_KR4 BIT(9)
+#define ICE_AQC_PHY_EEE_EN_100GBASE_KR2_PAM4 BIT(10)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3)
+#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define ICE_AQC_PHY_FEC_MASK MAKEMASK(0xdf, 0)
+ u8 module_compliance_enforcement;
+#define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define ICE_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
+#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define ICE_AQC_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ice_aqc_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ice_aqc_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define ICE_AQ_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0)
+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
+#define ICE_AQ_PHY_ENA_LINK BIT(3)
+#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define ICE_AQ_PHY_ENA_LESM BIT(6)
+#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
+ __le16 eeer_value;
+ u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct ice_aqc_set_mac_cfg {
+ __le16 max_frame_size;
+ u8 params;
+#define ICE_AQ_SET_MAC_PACE_S 3
+#define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S)
+#define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7)
+#define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0
+#define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M
+ u8 tx_tmr_priority;
+ __le16 tx_tmr_value;
+ __le16 fc_refresh_threshold;
+ u8 drop_opts;
+#define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0)
+#define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0
+#define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0)
+ u8 reserved[7];
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ice_aqc_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define ICE_AQC_RESTART_AN_LINK_RESTART BIT(1)
+#define ICE_AQC_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ice_aqc_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define ICE_AQ_LSE_M 0x3
+#define ICE_AQ_LSE_NOP 0x0
+#define ICE_AQ_LSE_DIS 0x2
+#define ICE_AQ_LSE_ENA 0x3
+ /* only response uses this flag */
+#define ICE_AQ_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ice_aqc_get_link_status_data {
+ u8 topo_media_conflict;
+#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0)
+#define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1)
+#define ICE_AQ_LINK_TOPO_CORRUPT BIT(2)
+#define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define ICE_AQ_LINK_CFG_ERR BIT(0)
+ u8 link_info;
+#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
+#define ICE_AQ_LINK_FAULT BIT(1)
+#define ICE_AQ_LINK_FAULT_TX BIT(2)
+#define ICE_AQ_LINK_FAULT_RX BIT(3)
+#define ICE_AQ_LINK_FAULT_REMOTE BIT(4)
+#define ICE_AQ_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define ICE_AQ_MEDIA_AVAILABLE BIT(6)
+#define ICE_AQ_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define ICE_AQ_AN_COMPLETED BIT(0)
+#define ICE_AQ_LP_AN_ABILITY BIT(1)
+#define ICE_AQ_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define ICE_AQ_FEC_EN BIT(3)
+#define ICE_AQ_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define ICE_AQ_LINK_PAUSE_TX BIT(5)
+#define ICE_AQ_LINK_PAUSE_RX BIT(6)
+#define ICE_AQ_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
+#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define ICE_AQ_LINK_TX_S 2
+#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
+#define ICE_AQ_LINK_TX_ACTIVE 0
+#define ICE_AQ_LINK_TX_DRAINED 1
+#define ICE_AQ_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define ICE_AQ_LINK_LB_PHY_LCL BIT(0)
+#define ICE_AQ_LINK_LB_PHY_RMT BIT(1)
+#define ICE_AQ_LINK_LB_MAC_LCL BIT(2)
+#define ICE_AQ_LINK_LB_PHY_IDX_S 3
+#define ICE_AQ_LINK_LB_PHY_IDX_M (0x7 << ICE_AQ_LB_PHY_IDX_S)
+ __le16 max_frame_size;
+ u8 cfg;
+#define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0)
+#define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1)
+#define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2)
+#define ICE_AQ_FEC_MASK MAKEMASK(0x7, 0)
+ /* Pacing Config */
+#define ICE_AQ_CFG_PACING_S 3
+#define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S)
+#define ICE_AQ_CFG_PACING_TYPE_M BIT(7)
+#define ICE_AQ_CFG_PACING_TYPE_AVG 0
+#define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define ICE_AQ_PWR_CLASS_M 0x3
+#define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0
+#define ICE_AQ_LINK_PWR_BASET_HIGH 1
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_2 1
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define ICE_AQ_LINK_SPEED_M 0x7FF
+#define ICE_AQ_LINK_SPEED_10MB BIT(0)
+#define ICE_AQ_LINK_SPEED_100MB BIT(1)
+#define ICE_AQ_LINK_SPEED_1000MB BIT(2)
+#define ICE_AQ_LINK_SPEED_2500MB BIT(3)
+#define ICE_AQ_LINK_SPEED_5GB BIT(4)
+#define ICE_AQ_LINK_SPEED_10GB BIT(5)
+#define ICE_AQ_LINK_SPEED_20GB BIT(6)
+#define ICE_AQ_LINK_SPEED_25GB BIT(7)
+#define ICE_AQ_LINK_SPEED_40GB BIT(8)
+#define ICE_AQ_LINK_SPEED_50GB BIT(9)
+#define ICE_AQ_LINK_SPEED_100GB BIT(10)
+#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
+ __le32 reserved3; /* Aligns next field to 8-byte boundary */
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+};
+
+/* Set event mask command (direct 0x0613) */
+struct ice_aqc_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define ICE_AQ_LINK_EVENT_UPDOWN BIT(1)
+#define ICE_AQ_LINK_EVENT_MEDIA_NA BIT(2)
+#define ICE_AQ_LINK_EVENT_LINK_FAULT BIT(3)
+#define ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define ICE_AQ_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7)
+#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define ICE_AQ_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define ICE_AQ_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+ u8 reserved1[6];
+};
+
+/* Set PHY Loopback command (direct 0x0619) */
+struct ice_aqc_set_phy_lb {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQ_PHY_LB_PORT_NUM_VALID BIT(0)
+ u8 phy_index;
+ u8 lb_mode;
+#define ICE_AQ_PHY_LB_EN BIT(0)
+#define ICE_AQ_PHY_LB_TYPE_M BIT(1)
+#define ICE_AQ_PHY_LB_TYPE_LOCAL 0
+#define ICE_AQ_PHY_LB_TYPE_REMOTE ICE_AQ_PHY_LB_TYPE_M
+#define ICE_AQ_PHY_LB_LEVEL_M BIT(2)
+#define ICE_AQ_PHY_LB_LEVEL_PMD 0
+#define ICE_AQ_PHY_LB_LEVEL_PCS ICE_AQ_PHY_LB_LEVEL_M
+ u8 reserved2[12];
+};
+
+/* Set MAC Loopback command (direct 0x0620) */
+struct ice_aqc_set_mac_lb {
+ u8 lb_mode;
+#define ICE_AQ_MAC_LB_EN BIT(0)
+#define ICE_AQ_MAC_LB_OSC_CLK BIT(1)
+ u8 reserved[15];
+};
+
+/* DNL Get Status command (indirect 0x680)
+ * Structure used for the response, the command uses the generic
+ * ice_aqc_generic struct to pass a buffer address to the FW.
+ */
+struct ice_aqc_dnl_get_status {
+ u8 ctx;
+ u8 status;
+#define ICE_AQ_DNL_STATUS_IDLE 0x0
+#define ICE_AQ_DNL_STATUS_RESERVED 0x1
+#define ICE_AQ_DNL_STATUS_STOPPED 0x2
+#define ICE_AQ_DNL_STATUS_FATAL 0x3 /* Fatal DNL engine error */
+#define ICE_AQ_DNL_SRC_S 3
+#define ICE_AQ_DNL_SRC_M (0x3 << ICE_AQ_DNL_SRC_S)
+#define ICE_AQ_DNL_SRC_NVM (0x0 << ICE_AQ_DNL_SRC_S)
+#define ICE_AQ_DNL_SRC_NVM_SCRATCH (0x1 << ICE_AQ_DNL_SRC_S)
+ u8 stack_ptr;
+#define ICE_AQ_DNL_ST_PTR_S 0x0
+#define ICE_AQ_DNL_ST_PTR_M (0x7 << ICE_AQ_DNL_ST_PTR_S)
+ u8 engine_flags;
+#define ICE_AQ_DNL_FLAGS_ERROR BIT(2)
+#define ICE_AQ_DNL_FLAGS_NEGATIVE BIT(3)
+#define ICE_AQ_DNL_FLAGS_OVERFLOW BIT(4)
+#define ICE_AQ_DNL_FLAGS_ZERO BIT(5)
+#define ICE_AQ_DNL_FLAGS_CARRY BIT(6)
+#define ICE_AQ_DNL_FLAGS_JUMP BIT(7)
+ __le16 pc;
+ __le16 activity_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_dnl_get_status_data {
+ __le16 activity_err_code;
+ __le16 act_err_code;
+#define ICE_AQ_DNL_ACT_ERR_SUCCESS 0x0000 /* no error */
+#define ICE_AQ_DNL_ACT_ERR_PARSE 0x8001 /* NVM parse error */
+#define ICE_AQ_DNL_ACT_ERR_UNSUPPORTED 0x8002 /* unsupported action */
+#define ICE_AQ_DNL_ACT_ERR_NOT_FOUND 0x8003 /* activity not found */
+#define ICE_AQ_DNL_ACT_ERR_BAD_JUMP 0x8004 /* an illegal jump */
+#define ICE_AQ_DNL_ACT_ERR_PSTO_OVER 0x8005 /* persistent store overflow */
+#define ICE_AQ_DNL_ACT_ERR_ST_OVERFLOW 0x8006 /* stack overflow */
+#define ICE_AQ_DNL_ACT_ERR_TIMEOUT 0x8007 /* activity timeout */
+#define ICE_AQ_DNL_ACT_ERR_BREAK 0x0008 /* stopped at breakpoint */
+#define ICE_AQ_DNL_ACT_ERR_INVAL_ARG 0x0101 /* invalid action argument */
+ __le32 execution_time; /* in nanoseconds */
+ __le16 lib_ver;
+ u8 psto_local_sz;
+ u8 psto_global_sz;
+ u8 stack_sz;
+#define ICE_AQ_DNL_STACK_SZ_S 0
+#define ICE_AQ_DNL_STACK_SZ_M (0xF << ICE_AQ_DNL_STACK_SZ_S)
+ u8 port_count;
+#define ICE_AQ_DNL_PORT_CNT_S 0
+#define ICE_AQ_DNL_PORT_CNT_M (0x1F << ICE_AQ_DNL_PORT_CNT_S)
+ __le16 act_cache_cntr;
+ u32 i2c_clk_cntr;
+ u32 mdio_clk_cntr;
+ u32 sb_iosf_clk_cntr;
+};
+
+/* DNL run command (direct 0x681) */
+struct ice_aqc_dnl_run_command {
+ u8 reserved0;
+ u8 command;
+#define ICE_AQ_DNL_CMD_S 0
+#define ICE_AQ_DNL_CMD_M (0x7 << ICE_AQ_DNL_CMD_S)
+#define ICE_AQ_DNL_CMD_RESET 0x0
+#define ICE_AQ_DNL_CMD_RUN 0x1
+#define ICE_AQ_DNL_CMD_STEP 0x3
+#define ICE_AQ_DNL_CMD_ABORT 0x4
+#define ICE_AQ_DNL_CMD_SET_PC 0x7
+#define ICE_AQ_DNL_CMD_SRC_S 3
+#define ICE_AQ_DNL_CMD_SRC_M (0x3 << ICE_AQ_DNL_CMD_SRC_S)
+#define ICE_AQ_DNL_CMD_SRC_DNL 0x0
+#define ICE_AQ_DNL_CMD_SRC_SCRATCH 0x1
+ __le16 new_pc;
+ u8 reserved1[12];
+};
+
+/* DNL call command (indirect 0x682)
+ * Struct is used for both command and response
+ */
+struct ice_aqc_dnl_call_command {
+ u8 ctx; /* Used in command, reserved in response */
+ u8 reserved;
+ __le16 activity_id;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* DNL call command/response buffer (indirect 0x682) */
+struct ice_aqc_dnl_call {
+ __le32 stores[4];
+};
+
+/* Used for both commands:
+ * DNL read sto command (indirect 0x683)
+ * DNL write sto command (indirect 0x684)
+ */
+struct ice_aqc_dnl_read_write_command {
+ u8 ctx;
+ u8 sto_sel; /* STORE select */
+#define ICE_AQC_DNL_STORE_SELECT_STORE 0x0
+#define ICE_AQC_DNL_STORE_SELECT_PSTO 0x1
+#define ICE_AQC_DNL_STORE_SELECT_STACK 0x2
+ __le16 offset;
+ __le32 data; /* Used for write sto only */
+ __le32 addr_high; /* Used for read sto only */
+ __le32 addr_low; /* Used for read sto only */
+};
+
+/* Used for both command responses:
+ * DNL read sto response (indirect 0x683)
+ * DNL write sto response (indirect 0x684)
+ */
+struct ice_aqc_dnl_read_write_response {
+ u8 reserved;
+ u8 status; /* Reserved for read command */
+ __le16 size; /* Reserved for write command */
+ __le32 data; /* Reserved for write command */
+ __le32 addr_high; /* Reserved for write command */
+ __le32 addr_low; /* Reserved for write command */
+};
+
+/* DNL set breakpoints command (indirect 0x686) */
+struct ice_aqc_dnl_set_breakpoints_command {
+ __le32 reserved[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* DNL set breakpoints data buffer structure (indirect 0x686) */
+struct ice_aqc_dnl_set_breakpoints {
+ u8 ctx;
+ u8 ena; /* 0- disabled, 1- enabled */
+ __le16 offset;
+ __le16 activity_id;
+};
+
+/* DNL read log data command(indirect 0x687) */
+struct ice_aqc_dnl_read_log_command {
+ __le16 reserved0;
+ __le16 offset;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+/* DNL read log data response(indirect 0x687) */
+struct ice_aqc_dnl_read_log_response {
+ __le16 reserved;
+ __le16 size;
+ __le32 data;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+struct ice_aqc_link_topo_addr {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S)
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_M \
+ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
+#define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2
+#define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
+ u8 index;
+ __le16 handle;
+#define ICE_AQC_LINK_TOPO_HANDLE_S 0
+#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ice_aqc_get_link_topo {
+ struct ice_aqc_link_topo_addr addr;
+ u8 node_part_num;
+ u8 rsvd[9];
+};
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ice_aqc_get_link_topo_pin {
+ struct ice_aqc_link_topo_addr addr;
+ u8 input_io_params;
+#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S 0
+#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_M \
+ (0x1F << ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S)
+#define ICE_AQC_LINK_TOPO_IO_FUNC_GPIO 0
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RESET_N 1
+#define ICE_AQC_LINK_TOPO_IO_FUNC_INT_N 2
+#define ICE_AQC_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define ICE_AQC_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define ICE_AQC_LINK_TOPO_IO_FUNC_LPMODE 6
+#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RS0 9
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RS1 10
+#define ICE_AQC_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define ICE_AQC_LINK_TOPO_IO_FUNC_LED 12
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RED_LED 12
+#define ICE_AQC_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define ICE_AQC_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S 5
+#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_M \
+ (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
+/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_S 0
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_M \
+ (0x1F << \ ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_NUM_S)
+/* Use ICE_AQC_LINK_TOPO_IO_FUNC_* for the non-numerical options */
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_S 5
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_M \
+ (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
+/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S 0
+#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_M \
+ (0x7 << ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S)
+#define ICE_AQC_LINK_TOPO_OUTPUT_INT_S 3
+#define ICE_AQC_LINK_TOPO_OUTPUT_INT_M \
+ (0x3 << ICE_AQC_LINK_TOPO_OUTPUT_INT_S)
+#define ICE_AQC_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define ICE_AQC_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define ICE_AQC_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ice_aqc_i2c {
+ struct ice_aqc_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define ICE_AQC_I2C_DATA_SIZE_S 0
+#define ICE_AQC_I2C_DATA_SIZE_M (0xF << ICE_AQC_I2C_DATA_SIZE_S)
+#define ICE_AQC_I2C_ADDR_TYPE_M BIT(4)
+#define ICE_AQC_I2C_ADDR_TYPE_7BIT 0
+#define ICE_AQC_I2C_ADDR_TYPE_10BIT ICE_AQC_I2C_ADDR_TYPE_M
+#define ICE_AQC_I2C_DATA_OFFSET_S 5
+#define ICE_AQC_I2C_DATA_OFFSET_M (0x3 << ICE_AQC_I2C_DATA_OFFSET_S)
+#define ICE_AQC_I2C_USE_REPEATED_START BIT(7)
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+#define ICE_AQC_I2C_ADDR_7BIT_MASK 0x7F
+#define ICE_AQC_I2C_ADDR_10BIT_MASK 0x3FF
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ice_aqc_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
+/* Read/Write MDIO (direct, 0x06E4/0x06E5) */
+struct ice_aqc_mdio {
+ struct ice_aqc_link_topo_addr topo_addr;
+ u8 mdio_device_addr;
+#define ICE_AQC_MDIO_DEV_S 0
+#define ICE_AQC_MDIO_DEV_M (0x1F << ICE_AQC_MDIO_DEV_S)
+#define ICE_AQC_MDIO_CLAUSE_22 BIT(5)
+#define ICE_AQC_MDIO_CLAUSE_45 BIT(6)
+ u8 rsvd;
+ __le16 offset;
+ __le16 data; /* Input in write cmd, output in read cmd. */
+ u8 rsvd1[4];
+};
+
+/* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */
+struct ice_aqc_gpio_by_func {
+ struct ice_aqc_link_topo_addr topo_addr;
+ u8 io_func_num;
+#define ICE_AQC_GPIO_FUNC_S 0
+#define ICE_AQC_GPIO_FUNC_M (0x1F << ICE_AQC_GPIO_IO_FUNC_NUM_S)
+ u8 io_value; /* Input in write cmd, output in read cmd. */
+#define ICE_AQC_GPIO_ON BIT(0)
+#define ICE_AQC_GPIO_OFF 0
+ u8 rsvd[8];
+};
+
+/* Set LED (direct, 0x06E8) */
+struct ice_aqc_set_led {
+ struct ice_aqc_link_topo_addr topo_addr;
+ u8 color_and_blink;
+#define ICE_AQC_LED_COLOR_S 0
+#define ICE_AQC_LED_COLOR_M (0x7 << ICE_AQC_LED_COLOR_S)
+#define ICE_AQC_LED_COLOR_SKIP 0
+#define ICE_AQC_LED_COLOR_RED 1
+#define ICE_AQC_LED_COLOR_ORANGE 2
+#define ICE_AQC_LED_COLOR_YELLOW 3
+#define ICE_AQC_LED_COLOR_GREEN 4
+#define ICE_AQC_LED_COLOR_BLUE 5
+#define ICE_AQC_LED_COLOR_PURPLE 6
+#define ICE_AQC_LED_BLINK_S 3
+#define ICE_AQC_LED_BLINK_M (0x7 << ICE_AQC_LED_BLINK_S)
+#define ICE_AQC_LED_BLINK_NONE 0
+#define ICE_AQC_LED_BLINK_SLOW 1
+#define ICE_AQC_LED_BLINK_SLOW_MAC 2
+#define ICE_AQC_LED_BLINK_SLOW_FLTR 3
+#define ICE_AQC_LED_BLINK_FAST 5
+#define ICE_AQC_LED_BLINK_FAST_MAC 6
+#define ICE_AQC_LED_BLINK_FAST_FLTR 7
+ u8 rsvd[9];
+};
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ice_aqc_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_PORT_ID_PORT_NUM_VALID BIT(0)
+ u8 ident_mode;
+#define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0)
+#define ICE_AQC_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
+/* Get Port Options (indirect, 0x06EA) */
+struct ice_aqc_get_port_options {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_PORT_OPT_PORT_NUM_VALID BIT(0)
+ u8 port_options_count;
+#define ICE_AQC_PORT_OPT_COUNT_S 0
+#define ICE_AQC_PORT_OPT_COUNT_M (0xF << ICE_AQC_PORT_OPT_COUNT_S)
+ u8 innermost_phy_index;
+ u8 port_options;
+#define ICE_AQC_PORT_OPT_ACTIVE_S 0
+#define ICE_AQC_PORT_OPT_ACTIVE_M (0xF << ICE_AQC_PORT_OPT_ACTIVE_S)
+#define ICE_AQC_PORT_OPT_FORCED BIT(6)
+#define ICE_AQC_PORT_OPT_VALID BIT(7)
+ u8 rsvd[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_port_options_elem {
+ u8 pmd;
+#define ICE_AQC_PORT_OPT_PMD_COUNT_S 0
+#define ICE_AQC_PORT_OPT_PMD_COUNT_M (0xF << ICE_AQC_PORT_OPT_PMD_COUNT_S)
+#define ICE_AQC_PORT_OPT_PMD_WIDTH_S 4
+#define ICE_AQC_PORT_OPT_PMD_WIDTH_M (0xF << ICE_AQC_PORT_OPT_PMD_WIDTH_S)
+ u8 max_lane_speed;
+#define ICE_AQC_PORT_OPT_MAX_LANE_S 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_M (0xF << ICE_AQC_PORT_OPT_MAX_LANE_S)
+#define ICE_AQC_PORT_OPT_MAX_LANE_100M 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_1G 1
+#define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2
+#define ICE_AQC_PORT_OPT_MAX_LANE_5G 3
+#define ICE_AQC_PORT_OPT_MAX_LANE_10G 4
+#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
+#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
+#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+ u8 global_scid[2];
+ u8 phy_scid[2];
+};
+
+/* The buffer for command 0x06EA contains port_options_count of options
+ * in the option array.
+ */
+struct ice_aqc_get_port_options_data {
+ struct ice_aqc_get_port_options_elem option[1];
+};
+
+/* Set Port Option (direct, 0x06EB) */
+struct ice_aqc_set_port_option {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_SET_PORT_OPT_PORT_NUM_VALID BIT(0)
+ u8 selected_port_option;
+ u8 rsvd[13];
+};
+
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ice_aqc_gpio {
+ __le16 gpio_ctrl_handle;
+#define ICE_AQC_GPIO_HANDLE_S 0
+#define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ice_aqc_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F
+#define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF
+#define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10)
+#define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0
+#define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S)
+#define ICE_AQC_SFF_NO_PAGE_CHANGE 0
+#define ICE_AQC_SFF_SET_23_ON_MISMATCH 1
+#define ICE_AQC_SFF_SET_22_ON_MISMATCH 2
+#define ICE_AQC_SFF_IS_WRITE BIT(15)
+ __le16 i2c_mem_addr;
+ __le16 eeprom_page;
+#define ICE_AQC_SFF_EEPROM_BANK_S 0
+#define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S)
+#define ICE_AQC_SFF_EEPROM_PAGE_S 8
+#define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S)
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ice_aqc_nvm {
+#define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define ICE_AQC_NVM_LAST_CMD BIT(0)
+#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define ICE_AQC_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */
+#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
+#define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4)
+#define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6)
+#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define ICE_AQC_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
+#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
+ __le16 module_typeid;
+ __le16 length;
+#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for struct ice_aqc_nvm. */
+#define ICE_AQC_NVM_SECTOR_UNIT 4096 /* In Bytes */
+#define ICE_AQC_NVM_WORD_UNIT 2 /* In Bytes */
+
+#define ICE_AQC_NVM_START_POINT 0
+#define ICE_AQC_NVM_EMP_SR_PTR_OFFSET 0x90
+#define ICE_AQC_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0)
+#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_S 15
+#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_M BIT(15)
+#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_SECTOR 1
+
+#define ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET 0x46
+#define ICE_AQC_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+
+#define ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID 0x129
+#define ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */
+#define ICE_AQC_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0)
+#define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
+#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+
+/* The result of netlist NVM read comes in a TLV format. The actual data
+ * (netlist header) starts from word offset 1 (byte 2). The FW strips
+ * out the type field from the TLV header so all the netlist fields
+ * should adjust their offset value by 1 word (2 bytes) in order to map
+ * their correct location.
+ */
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
+
+/* netlist ID block field offsets (word offsets) */
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
+#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
+#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct ice_aqc_nvm_cfg {
+ u8 cmd_flags;
+#define ICE_AQC_ANVM_MULTIPLE_ELEMS BIT(0)
+#define ICE_AQC_ANVM_IMMEDIATE_FIELD BIT(1)
+#define ICE_AQC_ANVM_NEW_CFG BIT(2)
+ u8 reserved;
+ __le16 count;
+ __le16 id;
+ u8 reserved1[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_nvm_cfg_data {
+ __le16 field_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ice_aqc_nvm_checksum {
+ u8 flags;
+#define ICE_AQC_NVM_CHECKSUM_VERIFY BIT(0)
+#define ICE_AQC_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define ICE_AQC_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+/**
+ * Send to PF command (indirect 0x0801) ID is only used by PF
+ *
+ * Send to VF command (indirect 0x0802) ID is only used by PF
+ *
+ */
+struct ice_aqc_pf_vf_msg {
+ __le32 id;
+ u32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Write/Read Alternate - Direct (direct 0x0900/0x0902) */
+struct ice_aqc_read_write_alt_direct {
+ __le32 dword0_addr;
+ __le32 dword0_value;
+ __le32 dword1_addr;
+ __le32 dword1_value;
+};
+
+/* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */
+struct ice_aqc_read_write_alt_indirect {
+ __le32 base_dword_addr;
+ __le32 num_dwords;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Done Alternate Write (direct 0x0904) */
+struct ice_aqc_done_alt_write {
+ u8 flags;
+#define ICE_AQC_CMD_UEFI_BIOS_MODE BIT(0)
+#define ICE_AQC_RESP_RESET_NEEDED BIT(1)
+ u8 reserved[15];
+};
+
+/* Clear Port Alternate Write (direct 0x0906) */
+struct ice_aqc_clear_port_alt_write {
+ u8 reserved[16];
+};
+
+/* Get LLDP MIB (indirect 0x0A00)
+ * Note: This is also used by the LLDP MIB Change Event (0x0A01)
+ * as the format is the same.
+ */
+struct ice_aqc_lldp_get_mib {
+ u8 type;
+#define ICE_AQ_LLDP_MIB_TYPE_S 0
+#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S)
+#define ICE_AQ_LLDP_MIB_LOCAL 0
+#define ICE_AQ_LLDP_MIB_REMOTE 1
+#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2
+#define ICE_AQ_LLDP_BRID_TYPE_S 2
+#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S)
+#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0
+#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1
+/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */
+#define ICE_AQ_LLDP_TX_S 0x4
+#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S)
+#define ICE_AQ_LLDP_TX_ACTIVE 0
+#define ICE_AQ_LLDP_TX_SUSPENDED 1
+#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
+ * and in the LLDP MIB Change Event (0x0A01). They are valid for the
+ * Get LLDP MIB (0x0A00) response only.
+ */
+ u8 reserved1;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Configure LLDP MIB Change Event (direct 0x0A01) */
+/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */
+struct ice_aqc_lldp_set_mib_change {
+ u8 command;
+#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+ u8 reserved[15];
+};
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct ice_aqc_lldp_add_delete_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct ice_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Stop LLDP (direct 0x0A05) */
+struct ice_aqc_lldp_stop {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0)
+#define ICE_AQ_LLDP_AGENT_STOP 0x0
+#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK
+#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1)
+ u8 reserved[15];
+};
+
+/* Start LLDP (direct 0x0A06) */
+struct ice_aqc_lldp_start {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_START BIT(0)
+#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1)
+ u8 reserved[15];
+};
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * The command uses the generic descriptor struct and
+ * returns the struct below as an indirect response.
+ */
+struct ice_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+#define ICE_AQC_CEE_APP_FCOE_S 0
+#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S)
+#define ICE_AQC_CEE_APP_ISCSI_S 3
+#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S)
+#define ICE_AQC_CEE_APP_FIP_S 8
+#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S)
+ __le32 tlv_status;
+#define ICE_AQC_CEE_PG_STATUS_S 0
+#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S)
+#define ICE_AQC_CEE_PFC_STATUS_S 3
+#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S)
+#define ICE_AQC_CEE_FCOE_STATUS_S 8
+#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S)
+#define ICE_AQC_CEE_ISCSI_STATUS_S 11
+#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S)
+#define ICE_AQC_CEE_FIP_STATUS_S 16
+#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S)
+ u8 reserved[12];
+};
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBX
+ */
+struct ice_aqc_lldp_set_local_mib {
+ u8 type;
+#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
+#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
+#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
+#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
+#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_lldp_set_local_mib_resp {
+ u8 status;
+#define SET_LOCAL_MIB_RESP_EVENT_M BIT(0)
+#define SET_LOCAL_MIB_RESP_MIB_CHANGE_SILENT 0
+#define SET_LOCAL_MIB_RESP_MIB_CHANGE_EVENT SET_LOCAL_MIB_RESP_EVENT_M
+ u8 reserved[15];
+};
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBX.
+ * The same structure is used for the response, with the command field
+ * being used as the status field.
+ */
+struct ice_aqc_lldp_stop_start_specific_agent {
+ u8 command;
+#define ICE_AQC_START_STOP_AGENT_M BIT(0)
+#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0
+#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M
+ u8 reserved[15];
+};
+
+/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
+struct ice_aqc_get_set_rss_key {
+#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
+#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0
+#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
+#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
+#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \
+ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \
+ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)
+
+/**
+ * struct ice_aqc_get_set_rss_keys - Get/Set RSS hash key command buffer
+ * @standard_rss_key: 40 most significant bytes of hash key
+ * @extended_hash_key: 12 least significant bytes of hash key
+ *
+ * Set/Get 40 byte hash key using standard_rss_key field, and set
+ * extended_hash_key field to zero. Set/Get 52 byte hash key using
+ * standard_rss_key field for 40 most significant bytes and the
+ * extended_hash_key field for the 12 least significant bytes of hash key.
+ */
+struct ice_aqc_get_set_rss_keys {
+ u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
+ u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE];
+};
+
+/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
+struct ice_aqc_get_set_rss_lut {
+#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
+ __le16 vsi_id;
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
+ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S)
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \
+ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S)
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2
+
+#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4
+#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \
+ (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S)
+
+ __le16 flags;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Add Tx LAN Queues (indirect 0x0C30) */
+struct ice_aqc_add_txqs {
+ u8 num_qgrps;
+ u8 reserved[3];
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Add Tx LAN Queues
+ * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
+ */
+struct ice_aqc_add_txqs_perq {
+ __le16 txq_id;
+ u8 rsvd[2];
+ __le32 q_teid;
+ u8 txq_ctx[22];
+ u8 rsvd2[2];
+ struct ice_aqc_txsched_elem info;
+};
+
+/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_add_tx_qgrp is variable due
+ * to the variable number of queues in each group!
+ */
+struct ice_aqc_add_tx_qgrp {
+ __le32 parent_teid;
+ u8 num_txqs;
+ u8 rsvd[3];
+ struct ice_aqc_add_txqs_perq txqs[1];
+};
+
+/* Disable Tx LAN Queues (indirect 0x0C31) */
+struct ice_aqc_dis_txqs {
+ u8 cmd_type;
+#define ICE_AQC_Q_DIS_CMD_S 0
+#define ICE_AQC_Q_DIS_CMD_M (0x3 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET (0 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_VM_RESET BIT(ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_VF_RESET (2 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_PF_RESET (3 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL BIT(2)
+#define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE BIT(3)
+ u8 num_entries;
+ __le16 vmvf_and_timeout;
+#define ICE_AQC_Q_DIS_VMVF_NUM_S 0
+#define ICE_AQC_Q_DIS_VMVF_NUM_M (0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S)
+#define ICE_AQC_Q_DIS_TIMEOUT_S 10
+#define ICE_AQC_Q_DIS_TIMEOUT_M (0x3F << ICE_AQC_Q_DIS_TIMEOUT_S)
+ __le32 blocked_cgds;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* The buffer for Disable Tx LAN Queues (indirect 0x0C31)
+ * contains the following structures, arrayed one after the
+ * other.
+ * Note: Since the q_id is 16 bits wide, if the
+ * number of queues is even, then 2 bytes of alignment MUST be
+ * added before the start of the next group, to allow correct
+ * alignment of the parent_teid field.
+ */
+struct ice_aqc_dis_txq_item {
+ __le32 parent_teid;
+ u8 num_qs;
+ u8 rsvd;
+ /* The length of the q_id array varies according to num_qs */
+ __le16 q_id[1];
+ /* This only applies from F8 onward */
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \
+ (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
+ (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
+};
+
+struct ice_aqc_dis_txq {
+ struct ice_aqc_dis_txq_item qgrps[1];
+};
+
+/* Tx LAN Queues Cleanup Event (0x0C31) */
+struct ice_aqc_txqs_cleanup {
+ __le16 caller_opc;
+ __le16 cmd_tag;
+ u8 reserved[12];
+};
+
+/* Move / Reconfigure Tx Queues (indirect 0x0C32) */
+struct ice_aqc_move_txqs {
+ u8 cmd_type;
+#define ICE_AQC_Q_CMD_TYPE_S 0
+#define ICE_AQC_Q_CMD_TYPE_M (0x3 << ICE_AQC_Q_CMD_TYPE_S)
+#define ICE_AQC_Q_CMD_TYPE_MOVE 1
+#define ICE_AQC_Q_CMD_TYPE_TC_CHANGE 2
+#define ICE_AQC_Q_CMD_TYPE_MOVE_AND_TC 3
+#define ICE_AQC_Q_CMD_SUBSEQ_CALL BIT(2)
+#define ICE_AQC_Q_CMD_FLUSH_PIPE BIT(3)
+ u8 num_qs;
+ u8 rsvd;
+ u8 timeout;
+#define ICE_AQC_Q_CMD_TIMEOUT_S 2
+#define ICE_AQC_Q_CMD_TIMEOUT_M (0x3F << ICE_AQC_Q_CMD_TIMEOUT_S)
+ __le32 blocked_cgds;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Per-queue data buffer for the Move Tx LAN Queues command/response */
+struct ice_aqc_move_txqs_elem {
+ __le16 txq_id;
+ u8 q_cgd;
+ u8 rsvd;
+ __le32 q_teid;
+};
+
+/* Indirect data buffer for the Move Tx LAN Queues command/response */
+struct ice_aqc_move_txqs_data {
+ __le32 src_teid;
+ __le32 dest_teid;
+ struct ice_aqc_move_txqs_elem txqs[1];
+};
+
+/* Download Package (indirect 0x0C40) */
+/* Also used for Update Package (indirect 0x0C42) */
+struct ice_aqc_download_pkg {
+ u8 flags;
+#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
+ u8 reserved[3];
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_download_pkg_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Package Info List (indirect 0x0C43) */
+struct ice_aqc_get_pkg_info_list {
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Version format for packages */
+struct ice_pkg_ver {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define ICE_PKG_NAME_SIZE 32
+#define ICE_SEG_NAME_SIZE 28
+
+struct ice_aqc_get_pkg_info {
+ struct ice_pkg_ver ver;
+ char name[ICE_SEG_NAME_SIZE];
+ __le32 track_id;
+ u8 is_in_nvm;
+ u8 is_active;
+ u8 is_active_at_boot;
+ u8 is_modified;
+};
+
+/* Get Package Info List response buffer format (0x0C43) */
+struct ice_aqc_get_pkg_info_resp {
+ __le32 count;
+ struct ice_aqc_get_pkg_info pkg_info[1];
+};
+
+/* Driver Shared Parameters (direct, 0x0C90) */
+struct ice_aqc_driver_shared_params {
+ u8 set_or_get_op;
+#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0)
+#define ICE_AQC_DRIVER_PARAM_SET 0
+#define ICE_AQC_DRIVER_PARAM_GET 1
+ u8 param_indx;
+#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15
+ u8 rsvd[2];
+ __le32 param_val;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct ice_aqc_event_lan_overflow {
+ __le32 prtdcb_ruptq;
+ __le32 qtx_ctl;
+ u8 reserved[8];
+};
+
+/**
+ * struct ice_aq_desc - Admin Queue (AQ) descriptor
+ * @flags: ICE_AQ_FLAG_* flags
+ * @opcode: AQ command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_h: opaque data high-half
+ * @cookie_l: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts on the Admin Transmit Queue
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Receive Queue (ARQ) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ice_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ice_aqc_generic generic;
+ struct ice_aqc_get_ver get_ver;
+ struct ice_aqc_driver_ver driver_ver;
+ struct ice_aqc_q_shutdown q_shutdown;
+ struct ice_aqc_get_exp_err exp_err;
+ struct ice_aqc_req_res res_owner;
+ struct ice_aqc_manage_mac_read mac_read;
+ struct ice_aqc_manage_mac_write mac_write;
+ struct ice_aqc_clear_pxe clear_pxe;
+ struct ice_aqc_config_no_drop_policy no_drop;
+ struct ice_aqc_add_update_mir_rule add_update_rule;
+ struct ice_aqc_delete_mir_rule del_rule;
+ struct ice_aqc_list_caps get_cap;
+ struct ice_aqc_get_phy_caps get_phy;
+ struct ice_aqc_set_phy_cfg set_phy;
+ struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_dnl_get_status get_status;
+ struct ice_aqc_dnl_run_command dnl_run;
+ struct ice_aqc_dnl_call_command dnl_call;
+ struct ice_aqc_dnl_read_write_command dnl_read_write;
+ struct ice_aqc_dnl_read_write_response dnl_read_write_resp;
+ struct ice_aqc_dnl_set_breakpoints_command dnl_set_brk;
+ struct ice_aqc_dnl_read_log_command dnl_read_log;
+ struct ice_aqc_dnl_read_log_response dnl_read_log_resp;
+ struct ice_aqc_i2c read_write_i2c;
+ struct ice_aqc_mdio read_write_mdio;
+ struct ice_aqc_gpio_by_func read_write_gpio_by_func;
+ struct ice_aqc_gpio read_write_gpio;
+ struct ice_aqc_set_led set_led;
+ struct ice_aqc_sff_eeprom read_write_sff_param;
+ struct ice_aqc_set_port_id_led set_port_id_led;
+ struct ice_aqc_get_port_options get_port_options;
+ struct ice_aqc_set_port_option set_port_option;
+ struct ice_aqc_get_sw_cfg get_sw_conf;
+ struct ice_aqc_set_port_params set_port_params;
+ struct ice_aqc_sw_rules sw_rules;
+ struct ice_aqc_storm_cfg storm_conf;
+ struct ice_aqc_get_topo get_topo;
+ struct ice_aqc_sched_elem_cmd sched_elem_cmd;
+ struct ice_aqc_query_txsched_res query_sched_res;
+ struct ice_aqc_query_node_to_root query_node_to_root;
+ struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd;
+ struct ice_aqc_query_port_ets port_ets;
+ struct ice_aqc_rl_profile rl_profile;
+ struct ice_aqc_nvm nvm;
+ struct ice_aqc_nvm_cfg nvm_cfg;
+ struct ice_aqc_nvm_checksum nvm_checksum;
+ struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_read_write_alt_direct read_write_alt_direct;
+ struct ice_aqc_read_write_alt_indirect read_write_alt_indirect;
+ struct ice_aqc_done_alt_write done_alt_write;
+ struct ice_aqc_clear_port_alt_write clear_port_alt_write;
+ struct ice_aqc_pfc_ignore pfc_ignore;
+ struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
+ struct ice_aqc_set_dcb_params set_dcb_params;
+ struct ice_aqc_lldp_get_mib lldp_get_mib;
+ struct ice_aqc_lldp_set_mib_change lldp_set_event;
+ struct ice_aqc_lldp_add_delete_tlv lldp_add_delete_tlv;
+ struct ice_aqc_lldp_update_tlv lldp_update_tlv;
+ struct ice_aqc_lldp_stop lldp_stop;
+ struct ice_aqc_lldp_start lldp_start;
+ struct ice_aqc_lldp_set_local_mib lldp_set_mib;
+ struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
+ struct ice_aqc_get_set_rss_lut get_set_rss_lut;
+ struct ice_aqc_get_set_rss_key get_set_rss_key;
+ struct ice_aqc_add_txqs add_txqs;
+ struct ice_aqc_dis_txqs dis_txqs;
+ struct ice_aqc_move_txqs move_txqs;
+ struct ice_aqc_txqs_cleanup txqs_cleanup;
+ struct ice_aqc_add_get_update_free_vsi vsi_cmd;
+ struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
+ struct ice_aqc_get_vsi_resp get_vsi_resp;
+ struct ice_aqc_download_pkg download_pkg;
+ struct ice_aqc_get_pkg_info_list get_pkg_info_list;
+ struct ice_aqc_driver_shared_params drv_shared_params;
+ struct ice_aqc_set_mac_lb set_mac_lb;
+ struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
+ struct ice_aqc_get_res_alloc get_res;
+ struct ice_aqc_get_allocd_res_desc get_res_desc;
+ struct ice_aqc_set_mac_cfg set_mac_cfg;
+ struct ice_aqc_set_event_mask set_event_mask;
+ struct ice_aqc_get_link_status get_link_status;
+ struct ice_aqc_event_lan_overflow lan_overflow;
+ struct ice_aqc_get_link_topo get_link_topo;
+ } params;
+};
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define ICE_AQ_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets */
+#define ICE_AQ_FLAG_DD_S 0
+#define ICE_AQ_FLAG_CMP_S 1
+#define ICE_AQ_FLAG_ERR_S 2
+#define ICE_AQ_FLAG_VFE_S 3
+#define ICE_AQ_FLAG_LB_S 9
+#define ICE_AQ_FLAG_RD_S 10
+#define ICE_AQ_FLAG_VFC_S 11
+#define ICE_AQ_FLAG_BUF_S 12
+#define ICE_AQ_FLAG_SI_S 13
+#define ICE_AQ_FLAG_EI_S 14
+#define ICE_AQ_FLAG_FE_S 15
+
+#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */
+#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */
+#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
+#define ICE_AQ_FLAG_VFE BIT(ICE_AQ_FLAG_VFE_S) /* 0x8 */
+#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
+#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
+#define ICE_AQ_FLAG_VFC BIT(ICE_AQ_FLAG_VFC_S) /* 0x800 */
+#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
+#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */
+#define ICE_AQ_FLAG_EI BIT(ICE_AQ_FLAG_EI_S) /* 0x4000 */
+#define ICE_AQ_FLAG_FE BIT(ICE_AQ_FLAG_FE_S) /* 0x8000 */
+
+/* error codes */
+enum ice_aq_err {
+ ICE_AQ_RC_OK = 0, /* Success */
+ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ ICE_AQ_RC_ENOENT = 2, /* No such element */
+ ICE_AQ_RC_ESRCH = 3, /* Bad opcode */
+ ICE_AQ_RC_EINTR = 4, /* Operation interrupted */
+ ICE_AQ_RC_EIO = 5, /* I/O error */
+ ICE_AQ_RC_ENXIO = 6, /* No such resource */
+ ICE_AQ_RC_E2BIG = 7, /* Arg too long */
+ ICE_AQ_RC_EAGAIN = 8, /* Try again */
+ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
+ ICE_AQ_RC_EACCES = 10, /* Permission denied */
+ ICE_AQ_RC_EFAULT = 11, /* Bad address */
+ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ ICE_AQ_RC_EEXIST = 13, /* Object already exists */
+ ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
+ ICE_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ ICE_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ ICE_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ ICE_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ ICE_AQ_RC_EFBIG = 22, /* File too big */
+ ICE_AQ_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
+ ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
+ ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
+ ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ ICE_AQ_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Queue command opcodes */
+enum ice_adminq_opc {
+ /* AQ commands */
+ ice_aqc_opc_get_ver = 0x0001,
+ ice_aqc_opc_driver_ver = 0x0002,
+ ice_aqc_opc_q_shutdown = 0x0003,
+ ice_aqc_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ice_aqc_opc_req_res = 0x0008,
+ ice_aqc_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ice_aqc_opc_list_func_caps = 0x000A,
+ ice_aqc_opc_list_dev_caps = 0x000B,
+
+ /* manage MAC address */
+ ice_aqc_opc_manage_mac_read = 0x0107,
+ ice_aqc_opc_manage_mac_write = 0x0108,
+
+ /* PXE */
+ ice_aqc_opc_clear_pxe_mode = 0x0110,
+
+ ice_aqc_opc_config_no_drop_policy = 0x0112,
+
+ /* internal switch commands */
+ ice_aqc_opc_get_sw_cfg = 0x0200,
+ ice_aqc_opc_set_port_params = 0x0203,
+
+ /* Alloc/Free/Get Resources */
+ ice_aqc_opc_get_res_alloc = 0x0204,
+ ice_aqc_opc_alloc_res = 0x0208,
+ ice_aqc_opc_free_res = 0x0209,
+ ice_aqc_opc_get_allocd_res_desc = 0x020A,
+
+ /* VSI commands */
+ ice_aqc_opc_add_vsi = 0x0210,
+ ice_aqc_opc_update_vsi = 0x0211,
+ ice_aqc_opc_get_vsi_params = 0x0212,
+ ice_aqc_opc_free_vsi = 0x0213,
+
+ /* Mirroring rules - add/update, delete */
+ ice_aqc_opc_add_update_mir_rule = 0x0260,
+ ice_aqc_opc_del_mir_rule = 0x0261,
+
+ /* storm configuration */
+ ice_aqc_opc_set_storm_cfg = 0x0280,
+ ice_aqc_opc_get_storm_cfg = 0x0281,
+
+ /* switch rules population commands */
+ ice_aqc_opc_add_sw_rules = 0x02A0,
+ ice_aqc_opc_update_sw_rules = 0x02A1,
+ ice_aqc_opc_remove_sw_rules = 0x02A2,
+ ice_aqc_opc_get_sw_rules = 0x02A3,
+ ice_aqc_opc_clear_pf_cfg = 0x02A4,
+
+ /* DCB commands */
+ ice_aqc_opc_pfc_ignore = 0x0301,
+ ice_aqc_opc_query_pfc_mode = 0x0302,
+ ice_aqc_opc_set_pfc_mode = 0x0303,
+ ice_aqc_opc_set_dcb_params = 0x0306,
+
+ /* transmit scheduler commands */
+ ice_aqc_opc_get_dflt_topo = 0x0400,
+ ice_aqc_opc_add_sched_elems = 0x0401,
+ ice_aqc_opc_cfg_sched_elems = 0x0403,
+ ice_aqc_opc_get_sched_elems = 0x0404,
+ ice_aqc_opc_move_sched_elems = 0x0408,
+ ice_aqc_opc_suspend_sched_elems = 0x0409,
+ ice_aqc_opc_resume_sched_elems = 0x040A,
+ ice_aqc_opc_query_port_ets = 0x040E,
+ ice_aqc_opc_delete_sched_elems = 0x040F,
+ ice_aqc_opc_add_rl_profiles = 0x0410,
+ ice_aqc_opc_query_rl_profiles = 0x0411,
+ ice_aqc_opc_query_sched_res = 0x0412,
+ ice_aqc_opc_query_node_to_root = 0x0413,
+ ice_aqc_opc_cfg_l2_node_cgd = 0x0414,
+ ice_aqc_opc_remove_rl_profiles = 0x0415,
+
+ /* PHY commands */
+ ice_aqc_opc_get_phy_caps = 0x0600,
+ ice_aqc_opc_set_phy_cfg = 0x0601,
+ ice_aqc_opc_set_mac_cfg = 0x0603,
+ ice_aqc_opc_restart_an = 0x0605,
+ ice_aqc_opc_get_link_status = 0x0607,
+ ice_aqc_opc_set_event_mask = 0x0613,
+ ice_aqc_opc_set_mac_lb = 0x0620,
+ ice_aqc_opc_dnl_get_status = 0x0680,
+ ice_aqc_opc_dnl_run = 0x0681,
+ ice_aqc_opc_dnl_call = 0x0682,
+ ice_aqc_opc_dnl_read_sto = 0x0683,
+ ice_aqc_opc_dnl_write_sto = 0x0684,
+ ice_aqc_opc_dnl_set_breakpoints = 0x0686,
+ ice_aqc_opc_dnl_read_log = 0x0687,
+ ice_aqc_opc_get_link_topo = 0x06E0,
+ ice_aqc_opc_get_link_topo_pin = 0x06E1,
+ ice_aqc_opc_read_i2c = 0x06E2,
+ ice_aqc_opc_write_i2c = 0x06E3,
+ ice_aqc_opc_read_mdio = 0x06E4,
+ ice_aqc_opc_write_mdio = 0x06E5,
+ ice_aqc_opc_set_gpio_by_func = 0x06E6,
+ ice_aqc_opc_get_gpio_by_func = 0x06E7,
+ ice_aqc_opc_set_led = 0x06E8,
+ ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_get_port_options = 0x06EA,
+ ice_aqc_opc_set_port_option = 0x06EB,
+ ice_aqc_opc_set_gpio = 0x06EC,
+ ice_aqc_opc_get_gpio = 0x06ED,
+ ice_aqc_opc_sff_eeprom = 0x06EE,
+
+ /* NVM commands */
+ ice_aqc_opc_nvm_read = 0x0701,
+ ice_aqc_opc_nvm_erase = 0x0702,
+ ice_aqc_opc_nvm_write = 0x0703,
+ ice_aqc_opc_nvm_cfg_read = 0x0704,
+ ice_aqc_opc_nvm_cfg_write = 0x0705,
+ ice_aqc_opc_nvm_checksum = 0x0706,
+ ice_aqc_opc_nvm_write_activate = 0x0707,
+ ice_aqc_opc_nvm_sr_dump = 0x0707,
+ ice_aqc_opc_nvm_save_factory_settings = 0x0708,
+ ice_aqc_opc_nvm_update_empr = 0x0709,
+
+ /* PF/VF mailbox commands */
+ ice_mbx_opc_send_msg_to_pf = 0x0801,
+ ice_mbx_opc_send_msg_to_vf = 0x0802,
+ /* Alternate Structure Commands */
+ ice_aqc_opc_write_alt_direct = 0x0900,
+ ice_aqc_opc_write_alt_indirect = 0x0901,
+ ice_aqc_opc_read_alt_direct = 0x0902,
+ ice_aqc_opc_read_alt_indirect = 0x0903,
+ ice_aqc_opc_done_alt_write = 0x0904,
+ ice_aqc_opc_clear_port_alt_write = 0x0906,
+ /* LLDP commands */
+ ice_aqc_opc_lldp_get_mib = 0x0A00,
+ ice_aqc_opc_lldp_set_mib_change = 0x0A01,
+ ice_aqc_opc_lldp_add_tlv = 0x0A02,
+ ice_aqc_opc_lldp_update_tlv = 0x0A03,
+ ice_aqc_opc_lldp_delete_tlv = 0x0A04,
+ ice_aqc_opc_lldp_stop = 0x0A05,
+ ice_aqc_opc_lldp_start = 0x0A06,
+ ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ ice_aqc_opc_lldp_set_local_mib = 0x0A08,
+ ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
+
+ /* RSS commands */
+ ice_aqc_opc_set_rss_key = 0x0B02,
+ ice_aqc_opc_set_rss_lut = 0x0B03,
+ ice_aqc_opc_get_rss_key = 0x0B04,
+ ice_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Tx queue handling commands/events */
+ ice_aqc_opc_add_txqs = 0x0C30,
+ ice_aqc_opc_dis_txqs = 0x0C31,
+ ice_aqc_opc_txqs_cleanup = 0x0C31,
+ ice_aqc_opc_move_recfg_txqs = 0x0C32,
+
+ /* package commands */
+ ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_upload_section = 0x0C41,
+ ice_aqc_opc_update_pkg = 0x0C42,
+ ice_aqc_opc_get_pkg_info_list = 0x0C43,
+
+ ice_aqc_opc_driver_shared_params = 0x0C90,
+
+ /* Standalone Commands/Events */
+ ice_aqc_opc_event_lan_overflow = 0x1001,
+};
+
+#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/sys/dev/ice/ice_alloc.h b/sys/dev/ice/ice_alloc.h
new file mode 100644
index 000000000000..1d9b9169eb02
--- /dev/null
+++ b/sys/dev/ice/ice_alloc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_ALLOC_H_
+#define _ICE_ALLOC_H_
+
+/* Memory types */
+enum ice_memset_type {
+ ICE_NONDMA_MEM = 0,
+ ICE_DMA_MEM
+};
+
+/* Memcpy types */
+enum ice_memcpy_type {
+ ICE_NONDMA_TO_NONDMA = 0,
+ ICE_NONDMA_TO_DMA,
+ ICE_DMA_TO_DMA,
+ ICE_DMA_TO_NONDMA
+};
+
+#endif /* _ICE_ALLOC_H_ */
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
new file mode 100644
index 000000000000..c7b5ae5a6a56
--- /dev/null
+++ b/sys/dev/ice/ice_bitops.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_BITOPS_H_
+#define _ICE_BITOPS_H_
+
+/* Define the size of the bitmap chunk */
+typedef u32 ice_bitmap_t;
+
+/* Number of bits per bitmap chunk */
+#define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t))
+/* Determine which chunk a bit belongs in */
+#define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
+/* How many chunks are required to store this many bits */
+#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK)
+/* Which bit inside a chunk this bit corresponds to */
+#define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
+/* How many bits are valid in the last chunk, assumes nr > 0 */
+#define LAST_CHUNK_BITS(nr) ((((nr) - 1) % BITS_PER_CHUNK) + 1)
+/* Generate a bitmask of valid bits in the last chunk, assumes nr > 0 */
+#define LAST_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \
+ (BITS_PER_CHUNK - LAST_CHUNK_BITS(nr)))
+
+#define ice_declare_bitmap(A, sz) \
+ ice_bitmap_t A[BITS_TO_CHUNKS(sz)]
+
+static inline bool ice_is_bit_set_internal(u16 nr, const ice_bitmap_t *bitmap)
+{
+ return !!(*bitmap & BIT(nr));
+}
+
+/*
+ * If atomic version of the bitops are required, each specific OS
+ * implementation will need to implement OS/platform specific atomic
+ * version of the functions below:
+ *
+ * ice_clear_bit_internal
+ * ice_set_bit_internal
+ * ice_test_and_clear_bit_internal
+ * ice_test_and_set_bit_internal
+ *
+ * and define macro ICE_ATOMIC_BITOPS to overwrite the default non-atomic
+ * implementation.
+ */
+static inline void ice_clear_bit_internal(u16 nr, ice_bitmap_t *bitmap)
+{
+ *bitmap &= ~BIT(nr);
+}
+
+static inline void ice_set_bit_internal(u16 nr, ice_bitmap_t *bitmap)
+{
+ *bitmap |= BIT(nr);
+}
+
+static inline bool ice_test_and_clear_bit_internal(u16 nr,
+ ice_bitmap_t *bitmap)
+{
+ if (ice_is_bit_set_internal(nr, bitmap)) {
+ ice_clear_bit_internal(nr, bitmap);
+ return true;
+ }
+ return false;
+}
+
+static inline bool ice_test_and_set_bit_internal(u16 nr, ice_bitmap_t *bitmap)
+{
+ if (ice_is_bit_set_internal(nr, bitmap))
+ return true;
+
+ ice_set_bit_internal(nr, bitmap);
+ return false;
+}
+
+/**
+ * ice_is_bit_set - Check state of a bit in a bitmap
+ * @bitmap: the bitmap to check
+ * @nr: the bit to check
+ *
+ * Returns true if bit nr of bitmap is set. False otherwise. Assumes that nr
+ * is less than the size of the bitmap.
+ */
+static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, u16 nr)
+{
+ return ice_is_bit_set_internal(BIT_IN_CHUNK(nr),
+ &bitmap[BIT_CHUNK(nr)]);
+}
+
+/**
+ * ice_clear_bit - Clear a bit in a bitmap
+ * @bitmap: the bitmap to change
+ * @nr: the bit to change
+ *
+ * Clears the bit nr in bitmap. Assumes that nr is less than the size of the
+ * bitmap.
+ */
+static inline void ice_clear_bit(u16 nr, ice_bitmap_t *bitmap)
+{
+ ice_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
+}
+
+/**
+ * ice_set_bit - Set a bit in a bitmap
+ * @bitmap: the bitmap to change
+ * @nr: the bit to change
+ *
+ * Sets the bit nr in bitmap. Assumes that nr is less than the size of the
+ * bitmap.
+ */
+static inline void ice_set_bit(u16 nr, ice_bitmap_t *bitmap)
+{
+ ice_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
+}
+
+/**
+ * ice_test_and_clear_bit - Atomically clear a bit and return the old bit value
+ * @nr: the bit to change
+ * @bitmap: the bitmap to change
+ *
+ * Check and clear the bit nr in bitmap. Assumes that nr is less than the size
+ * of the bitmap.
+ */
+static inline bool
+ice_test_and_clear_bit(u16 nr, ice_bitmap_t *bitmap)
+{
+ return ice_test_and_clear_bit_internal(BIT_IN_CHUNK(nr),
+ &bitmap[BIT_CHUNK(nr)]);
+}
+
+/**
+ * ice_test_and_set_bit - Atomically set a bit and return the old bit value
+ * @nr: the bit to change
+ * @bitmap: the bitmap to change
+ *
+ * Check and set the bit nr in bitmap. Assumes that nr is less than the size of
+ * the bitmap.
+ */
+static inline bool
+ice_test_and_set_bit(u16 nr, ice_bitmap_t *bitmap)
+{
+ return ice_test_and_set_bit_internal(BIT_IN_CHUNK(nr),
+ &bitmap[BIT_CHUNK(nr)]);
+}
+
+/* ice_zero_bitmap - set bits of bitmap to zero.
+ * @bmp: bitmap to set zeros
+ * @size: Size of the bitmaps in bits
+ *
+ * Set all of the bits in a bitmap to zero. Note that this function assumes it
+ * operates on an ice_bitmap_t which was declared using ice_declare_bitmap. It
+ * will zero every bit in the last chunk, even if those bits are beyond the
+ * size.
+ */
+static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
+{
+ ice_memset(bmp, 0, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t),
+ ICE_NONDMA_MEM);
+}
+
+/**
+ * ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
+ * @dst: Destination bitmap that receive the result of the operation
+ * @bmp1: The first bitmap to intersect
+ * @bmp2: The second bitmap to intersect wit the first
+ * @size: Size of the bitmaps in bits
+ *
+ * This function performs a bitwise AND on two "source" bitmaps of the same size
+ * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
+ * size as the "source" bitmaps to avoid buffer overflows. This function returns
+ * a non-zero value if at least one bit location from both "source" bitmaps is
+ * non-zero.
+ */
+static inline int
+ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
+ const ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t res = 0, mask;
+ u16 i;
+
+ /* Handle all but the last chunk */
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) {
+ dst[i] = bmp1[i] & bmp2[i];
+ res |= dst[i];
+ }
+
+ /* We want to take care not to modify any bits outside of the bitmap
+ * size, even in the destination bitmap. Thus, we won't directly
+ * assign the last bitmap, but instead use a bitmask to ensure we only
+ * modify bits which are within the size, and leave any bits above the
+ * size value alone.
+ */
+ mask = LAST_CHUNK_MASK(size);
+ dst[i] = (dst[i] & ~mask) | ((bmp1[i] & bmp2[i]) & mask);
+ res |= dst[i] & mask;
+
+ return res != 0;
+}
+
+/**
+ * ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
+ * @dst: Destination bitmap that receive the result of the operation
+ * @bmp1: The first bitmap to intersect
+ * @bmp2: The second bitmap to intersect wit the first
+ * @size: Size of the bitmaps in bits
+ *
+ * This function performs a bitwise OR on two "source" bitmaps of the same size
+ * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
+ * size as the "source" bitmaps to avoid buffer overflows.
+ */
+static inline void
+ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
+ const ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t mask;
+ u16 i;
+
+ /* Handle all but last chunk*/
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
+ dst[i] = bmp1[i] | bmp2[i];
+
+ /* We want to only OR bits within the size. Furthermore, we also do
+ * not want to modify destination bits which are beyond the specified
+ * size. Use a bitmask to ensure that we only modify the bits that are
+ * within the specified size.
+ */
+ mask = LAST_CHUNK_MASK(size);
+ dst[i] = (dst[i] & ~mask) | ((bmp1[i] | bmp2[i]) & mask);
+}
+
+/**
+ * ice_xor_bitmap - bitwise XOR 2 bitmaps and store result in dst bitmap
+ * @dst: Destination bitmap that receive the result of the operation
+ * @bmp1: The first bitmap of XOR operation
+ * @bmp2: The second bitmap to XOR with the first
+ * @size: Size of the bitmaps in bits
+ *
+ * This function performs a bitwise XOR on two "source" bitmaps of the same size
+ * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
+ * size as the "source" bitmaps to avoid buffer overflows.
+ */
+static inline void
+ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
+ const ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t mask;
+ u16 i;
+
+ /* Handle all but last chunk*/
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
+ dst[i] = bmp1[i] ^ bmp2[i];
+
+ /* We want to only XOR bits within the size. Furthermore, we also do
+ * not want to modify destination bits which are beyond the specified
+ * size. Use a bitmask to ensure that we only modify the bits that are
+ * within the specified size.
+ */
+ mask = LAST_CHUNK_MASK(size);
+ dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask);
+}
+
+/**
+ * ice_find_next_bit - Find the index of the next set bit of a bitmap
+ * @bitmap: the bitmap to scan
+ * @size: the size in bits of the bitmap
+ * @offset: the offset to start at
+ *
+ * Scans the bitmap and returns the index of the first set bit which is equal
+ * to or after the specified offset. Will return size if no bits are set.
+ */
+static inline u16
+ice_find_next_bit(const ice_bitmap_t *bitmap, u16 size, u16 offset)
+{
+ u16 i, j;
+
+ if (offset >= size)
+ return size;
+
+ /* Since the starting position may not be directly on a chunk
+ * boundary, we need to be careful to handle the first chunk specially
+ */
+ i = BIT_CHUNK(offset);
+ if (bitmap[i] != 0) {
+ u16 off = i * BITS_PER_CHUNK;
+
+ for (j = offset % BITS_PER_CHUNK; j < BITS_PER_CHUNK; j++) {
+ if (ice_is_bit_set(bitmap, off + j))
+ return min(size, (u16)(off + j));
+ }
+ }
+
+ /* Now we handle the remaining chunks, if any */
+ for (i++; i < BITS_TO_CHUNKS(size); i++) {
+ if (bitmap[i] != 0) {
+ u16 off = i * BITS_PER_CHUNK;
+
+ for (j = 0; j < BITS_PER_CHUNK; j++) {
+ if (ice_is_bit_set(bitmap, off + j))
+ return min(size, (u16)(off + j));
+ }
+ }
+ }
+ return size;
+}
+
+/**
+ * ice_find_first_bit - Find the index of the first set bit of a bitmap
+ * @bitmap: the bitmap to scan
+ * @size: the size in bits of the bitmap
+ *
+ * Scans the bitmap and returns the index of the first set bit. Will return
+ * size if no bits are set.
+ */
+static inline u16 ice_find_first_bit(const ice_bitmap_t *bitmap, u16 size)
+{
+ return ice_find_next_bit(bitmap, size, 0);
+}
+
+/**
+ * ice_is_any_bit_set - Return true of any bit in the bitmap is set
+ * @bitmap: the bitmap to check
+ * @size: the size of the bitmap
+ *
+ * Equivalent to checking if ice_find_first_bit returns a value less than the
+ * bitmap size.
+ */
+static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u16 size)
+{
+ return ice_find_first_bit(bitmap, size) < size;
+}
+
+/**
+ * ice_cp_bitmap - copy bitmaps.
+ * @dst: bitmap destination
+ * @src: bitmap to copy from
+ * @size: Size of the bitmaps in bits
+ *
+ * This function copy bitmap from src to dst. Note that this function assumes
+ * it is operating on a bitmap declared using ice_declare_bitmap. It will copy
+ * the entire last chunk even if this contains bits beyond the size.
+ */
+static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, u16 size)
+{
+ ice_memcpy(dst, src, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t),
+ ICE_NONDMA_TO_NONDMA);
+}
+
+/**
+ * ice_cmp_bitmaps - compares two bitmaps.
+ * @bmp1: the bitmap to compare
+ * @bmp2: the bitmap to compare with bmp1
+ * @size: Size of the bitmaps in bits
+ *
+ * This function compares two bitmaps, and returns result as true or false.
+ */
+static inline bool
+ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t mask;
+ u16 i;
+
+ /* Handle all but last chunk*/
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
+ if (bmp1[i] != bmp2[i])
+ return false;
+
+ /* We want to only compare bits within the size.*/
+ mask = LAST_CHUNK_MASK(size);
+ if ((bmp1[i] & mask) != (bmp2[i] & mask))
+ return false;
+
+ return true;
+}
+
+#undef BIT_CHUNK
+#undef BIT_IN_CHUNK
+#undef LAST_CHUNK_BITS
+#undef LAST_CHUNK_MASK
+
+#endif /* _ICE_BITOPS_H_ */
diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c
new file mode 100644
index 000000000000..53f0e361b07d
--- /dev/null
+++ b/sys/dev/ice/ice_common.c
@@ -0,0 +1,4895 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+#include "ice_sched.h"
+#include "ice_adminq_cmd.h"
+
+#include "ice_flow.h"
+#include "ice_switch.h"
+
+#define ICE_PF_RESET_WAIT_COUNT 300
+
+/**
+ * ice_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the MAC type of the adapter based on the
+ * vendor ID and device ID stored in the HW structure.
+ */
+enum ice_status ice_set_mac_type(struct ice_hw *hw)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
+ return ICE_ERR_DEVICE_NOT_SUPPORTED;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_BACKPLANE:
+ case ICE_DEV_ID_E810C_QSFP:
+ case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
+ case ICE_DEV_ID_E810_XXV_SFP:
+ hw->mac_type = ICE_MAC_E810;
+ break;
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ hw->mac_type = ICE_MAC_GENERIC;
+ break;
+ default:
+ hw->mac_type = ICE_MAC_UNKNOWN;
+ break;
+ }
+
+ ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_clear_pf_cfg - Clear PF configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
+ * configuration, flow director filters, etc.).
+ */
+enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_manage_mac_read - manage MAC address read command
+ * @hw: pointer to the HW struct
+ * @buf: a virtual buffer to hold the manage MAC read response
+ * @buf_size: Size of the virtual buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function is used to return per PF station MAC address (0x0107).
+ * NOTE: Upon successful completion of this command, MAC address information
+ * is returned in user specified buffer. Please interpret user specified
+ * buffer as "manage_mac_read" response.
+ * Response such as various MAC addresses are stored in HW struct (port.mac)
+ * ice_aq_discover_caps is expected to be called before this function is called.
+ */
+enum ice_status
+ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_manage_mac_read_resp *resp;
+ struct ice_aqc_manage_mac_read *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 flags;
+ u8 i;
+
+ cmd = &desc.params.mac_read;
+
+ if (buf_size < sizeof(*resp))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+
+ resp = (struct ice_aqc_manage_mac_read_resp *)buf;
+ flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
+
+ if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
+ ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* A single port can report up to two (LAN and WoL) addresses */
+ for (i = 0; i < cmd->num_addr; i++)
+ if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
+ ice_memcpy(hw->port_info->mac.lan_addr,
+ resp[i].mac_addr, ETH_ALEN,
+ ICE_DMA_TO_NONDMA);
+ ice_memcpy(hw->port_info->mac.perm_addr,
+ resp[i].mac_addr,
+ ETH_ALEN, ICE_DMA_TO_NONDMA);
+ break;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_aq_get_phy_caps - returns PHY capabilities
+ * @pi: port information structure
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Returns the various PHY capabilities supported on the Port (0x0600)
+ */
+enum ice_status
+ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
+
+ cmd->param0 |= CPU_TO_LE16(report_mode);
+ status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
+
+ if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
+ pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
+ pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_get_link_topo_handle - get link topology node return status
+ * @pi: port information structure
+ * @node_type: requested node type
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get link topology node return status for specified node type (0x06E0)
+ *
+ * Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present, then
+ * connection type is backplane or BASE-T.
+ */
+static enum ice_status
+ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
+
+ /* set node type */
+ cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+
+ return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+}
+
+/*
+ * ice_is_media_cage_present
+ * @pi: port information structure
+ *
+ * Returns true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ice_is_media_cage_present(struct ice_port_info *pi)
+{
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return !ice_aq_get_link_topo_handle(pi,
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
+ NULL);
+}
+
+/**
+ * ice_get_media_type - Gets media type
+ * @pi: port information structure
+ */
+static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
+{
+ struct ice_link_status *hw_link_info;
+
+ if (!pi)
+ return ICE_MEDIA_UNKNOWN;
+
+ hw_link_info = &pi->phy.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ICE_MEDIA_UNKNOWN;
+
+ if (hw_link_info->phy_type_low) {
+ switch (hw_link_info->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ return ICE_MEDIA_FIBER;
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ return ICE_MEDIA_BASET;
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ return ICE_MEDIA_DA;
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ if (ice_is_media_cage_present(pi))
+ return ICE_MEDIA_DA;
+ /* fall-through */
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ return ICE_MEDIA_BACKPLANE;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ if (ice_is_media_cage_present(pi))
+ return ICE_MEDIA_DA;
+ /* fall-through */
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ return ICE_MEDIA_BACKPLANE;
+ }
+ }
+ return ICE_MEDIA_UNKNOWN;
+}
+
+/**
+ * ice_aq_get_link_info
+ * @pi: port information structure
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Link Status (0x607). Returns the link status of the adapter.
+ */
+enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_link_status_data link_data = { 0 };
+ struct ice_aqc_get_link_status *resp;
+ struct ice_link_status *li_old, *li;
+ enum ice_media_type *hw_media_type;
+ struct ice_fc_info *hw_fc_info;
+ bool tx_pause, rx_pause;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u16 cmd_flags;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+
+ li_old = &pi->phy.link_info_old;
+ hw_media_type = &pi->phy.media_type;
+ li = &pi->phy.link_info;
+ hw_fc_info = &pi->fc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
+ cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = CPU_TO_LE16(cmd_flags);
+ resp->lport_num = pi->lport;
+
+ status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
+
+ if (status != ICE_SUCCESS)
+ return status;
+
+ /* save off old link status information */
+ *li_old = *li;
+
+ /* update current link status information */
+ li->link_speed = LE16_TO_CPU(link_data.link_speed);
+ li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
+ li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
+ *hw_media_type = ice_get_media_type(pi);
+ li->link_info = link_data.link_info;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
+ ICE_AQ_CFG_PACING_TYPE_M);
+
+ /* update fc info */
+ tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ICE_FC_FULL;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
+ else
+ hw_fc_info->current_mode = ICE_FC_NONE;
+
+ li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
+
+ ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)li->phy_type_low);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)li->phy_type_high);
+ ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
+ ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
+ ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
+ ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
+ ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
+ ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
+
+ /* save link status information */
+ if (link)
+ *link = *li;
+
+ /* flag cleared so calling functions don't call AQ again */
+ pi->phy.get_link_info = false;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_aq_set_mac_cfg
+ * @hw: pointer to the HW struct
+ * @max_frame_size: Maximum Frame Size to be supported
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603)
+ */
+enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
+{
+ u16 fc_threshold_val, tx_timer_val;
+ struct ice_aqc_set_mac_cfg *cmd;
+ struct ice_aq_desc desc;
+ u32 reg_val;
+
+ cmd = &desc.params.set_mac_cfg;
+
+ if (max_frame_size == 0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
+
+ cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+
+ /* We read back the transmit timer and fc threshold value of
+ * LFC. Thus, we will use index =
+ * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
+ *
+ * Also, because we are opearating on transmit timer and fc
+ * threshold of LFC, we don't turn on any bit in tx_tmr_priority
+ */
+#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
+
+ /* Retrieve the transmit timer */
+ reg_val = rd32(hw,
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
+ tx_timer_val = reg_val &
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+
+ /* Retrieve the fc threshold */
+ reg_val = rd32(hw,
+ PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
+ fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0);
+ cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_init_fltr_mgmt_struct - initializes filter management list and locks
+ * @hw: pointer to the HW struct
+ */
+static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw;
+
+ hw->switch_info = (struct ice_switch_info *)
+ ice_malloc(hw, sizeof(*hw->switch_info));
+
+ sw = hw->switch_info;
+
+ if (!sw)
+ return ICE_ERR_NO_MEMORY;
+
+ INIT_LIST_HEAD(&sw->vsi_list_map_head);
+
+ return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
+}
+
+/**
+ * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
+ * @hw: pointer to the HW struct
+ */
+static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_vsi_list_map_info *v_pos_map;
+ struct ice_vsi_list_map_info *v_tmp_map;
+ struct ice_sw_recipe *recps;
+ u8 i;
+
+ LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
+ ice_vsi_list_map_info, list_entry) {
+ LIST_DEL(&v_pos_map->list_entry);
+ ice_free(hw, v_pos_map);
+ }
+ recps = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
+
+ recps[i].root_rid = i;
+ LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
+ &recps[i].rg_list, ice_recp_grp_entry,
+ l_entry) {
+ LIST_DEL(&rg_entry->l_entry);
+ ice_free(hw, rg_entry);
+ }
+
+ if (recps[i].adv_rule) {
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ ice_destroy_lock(&recps[i].filt_rule_lock);
+ LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ ice_adv_fltr_mgmt_list_entry,
+ list_entry) {
+ LIST_DEL(&lst_itr->list_entry);
+ ice_free(hw, lst_itr->lkups);
+ ice_free(hw, lst_itr);
+ }
+ } else {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ ice_destroy_lock(&recps[i].filt_rule_lock);
+ LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ ice_fltr_mgmt_list_entry,
+ list_entry) {
+ LIST_DEL(&lst_itr->list_entry);
+ ice_free(hw, lst_itr);
+ }
+ }
+ if (recps[i].root_buf)
+ ice_free(hw, recps[i].root_buf);
+ }
+ ice_rm_all_sw_replay_rule_info(hw);
+ ice_free(hw, sw->recp_list);
+ ice_free(hw, sw);
+}
+
+/**
+ * ice_get_itr_intrl_gran
+ * @hw: pointer to the HW struct
+ *
+ * Determines the ITR/INTRL granularities based on the maximum aggregate
+ * bandwidth according to the device's configuration during power-on.
+ */
+static void ice_get_itr_intrl_gran(struct ice_hw *hw)
+{
+ u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
+ GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
+ GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+
+ switch (max_agg_bw) {
+ case ICE_MAX_AGG_BW_200G:
+ case ICE_MAX_AGG_BW_100G:
+ case ICE_MAX_AGG_BW_50G:
+ hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
+ break;
+ case ICE_MAX_AGG_BW_25G:
+ hw->itr_gran = ICE_ITR_GRAN_MAX_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
+ break;
+ }
+}
+
+/**
+ * ice_print_rollback_msg - print FW rollback message
+ * @hw: pointer to the hardware structure
+ */
+void ice_print_rollback_msg(struct ice_hw *hw)
+{
+ char nvm_str[ICE_NVM_VER_LEN] = { 0 };
+ struct ice_nvm_info *nvm = &hw->nvm;
+ struct ice_orom_info *orom;
+
+ orom = &nvm->orom;
+
+ SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
+ nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
+ orom->build, orom->patch);
+ ice_warn(hw,
+ "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
+ nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
+}
+
+/**
+ * ice_init_hw - main hardware initialization routine
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw(struct ice_hw *hw)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ u16 mac_buf_len;
+ void *mac_buf;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Set MAC type based on DeviceID */
+ status = ice_set_mac_type(hw);
+ if (status)
+ return status;
+
+ hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
+ PF_FUNC_RID_FUNCTION_NUMBER_M) >>
+ PF_FUNC_RID_FUNCTION_NUMBER_S;
+
+ status = ice_reset(hw, ICE_RESET_PFR);
+ if (status)
+ return status;
+
+ ice_get_itr_intrl_gran(hw);
+
+ status = ice_create_all_ctrlq(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ status = ice_init_nvm(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
+ ice_print_rollback_msg(hw);
+
+ status = ice_clear_pf_cfg(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ ice_clear_pxe_mode(hw);
+
+ status = ice_get_caps(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ hw->port_info = (struct ice_port_info *)
+ ice_malloc(hw, sizeof(*hw->port_info));
+ if (!hw->port_info) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll_cqinit;
+ }
+
+ /* set the back pointer to HW */
+ hw->port_info->hw = hw;
+
+ /* Initialize port_info struct with switch configuration data */
+ status = ice_get_initial_sw_cfg(hw);
+ if (status)
+ goto err_unroll_alloc;
+
+ hw->evb_veb = true;
+ /* Query the allocated resources for Tx scheduler */
+ status = ice_sched_query_res_alloc(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Failed to get scheduler allocated resources\n");
+ goto err_unroll_alloc;
+ }
+ ice_sched_get_psm_clk_freq(hw);
+
+ /* Initialize port_info struct with scheduler data */
+ status = ice_sched_init_port(hw->port_info);
+ if (status)
+ goto err_unroll_sched;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll_sched;
+ }
+
+ /* Initialize port_info struct with PHY capabilities */
+ status = ice_aq_get_phy_caps(hw->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
+ ice_free(hw, pcaps);
+ if (status)
+ goto err_unroll_sched;
+
+ /* Initialize port_info struct with link information */
+ status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
+ if (status)
+ goto err_unroll_sched;
+ /* need a valid SW entry point to build a Tx tree */
+ if (!hw->sw_entry_point_layer) {
+ ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
+ status = ICE_ERR_CFG;
+ goto err_unroll_sched;
+ }
+ INIT_LIST_HEAD(&hw->agg_list);
+ /* Initialize max burst size */
+ if (!hw->max_burst_size)
+ ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
+
+ status = ice_init_fltr_mgmt_struct(hw);
+ if (status)
+ goto err_unroll_sched;
+
+ /* Get MAC information */
+ /* A single port can report up to two (LAN and WoL) addresses */
+ mac_buf = ice_calloc(hw, 2,
+ sizeof(struct ice_aqc_manage_mac_read_resp));
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
+
+ if (!mac_buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll_fltr_mgmt_struct;
+ }
+
+ status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
+ ice_free(hw, mac_buf);
+
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+ status = ice_init_hw_tbls(hw);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+ ice_init_lock(&hw->tnl_lock);
+ return ICE_SUCCESS;
+
+err_unroll_fltr_mgmt_struct:
+ ice_cleanup_fltr_mgmt_struct(hw);
+err_unroll_sched:
+ ice_sched_cleanup_all(hw);
+err_unroll_alloc:
+ ice_free(hw, hw->port_info);
+ hw->port_info = NULL;
+err_unroll_cqinit:
+ ice_destroy_all_ctrlq(hw);
+ return status;
+}
+
+/**
+ * ice_deinit_hw - unroll initialization operations done by ice_init_hw
+ * @hw: pointer to the hardware structure
+ *
+ * This should be called only during nominal operation, not as a result of
+ * ice_init_hw() failing since ice_init_hw() will take care of unrolling
+ * applicable initializations if it fails for any reason.
+ */
+void ice_deinit_hw(struct ice_hw *hw)
+{
+ ice_cleanup_fltr_mgmt_struct(hw);
+
+ ice_sched_cleanup_all(hw);
+ ice_sched_clear_agg(hw);
+ ice_free_seg(hw);
+ ice_free_hw_tbls(hw);
+ ice_destroy_lock(&hw->tnl_lock);
+
+ if (hw->port_info) {
+ ice_free(hw, hw->port_info);
+ hw->port_info = NULL;
+ }
+
+ ice_destroy_all_ctrlq(hw);
+
+ /* Clear VSI contexts if not already cleared */
+ ice_clear_all_vsi_ctx(hw);
+}
+
+/**
+ * ice_check_reset - Check to see if a global reset is complete
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_check_reset(struct ice_hw *hw)
+{
+ u32 cnt, reg = 0, grst_delay, uld_mask;
+
+ /* Poll for Device Active state in case a recent CORER, GLOBR,
+ * or EMPR has occurred. The grst delay value is in 100ms units.
+ * Add 1sec for outstanding AQ commands that can take a long time.
+ */
+ grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
+
+ for (cnt = 0; cnt < grst_delay; cnt++) {
+ ice_msec_delay(100, true);
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
+ break;
+ }
+
+ if (cnt == grst_delay) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Global reset polling failed to complete.\n");
+ return ICE_ERR_RESET_FAILED;
+ }
+
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
+ GLNVM_ULD_PCIER_DONE_1_M |\
+ GLNVM_ULD_CORER_DONE_M |\
+ GLNVM_ULD_GLOBR_DONE_M |\
+ GLNVM_ULD_POR_DONE_M |\
+ GLNVM_ULD_POR_DONE_1_M |\
+ GLNVM_ULD_PCIER_DONE_2_M)
+
+ uld_mask = ICE_RESET_DONE_MASK;
+
+ /* Device is Active; check Global Reset processes are done */
+ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, GLNVM_ULD) & uld_mask;
+ if (reg == uld_mask) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Global reset processes done. %d\n", cnt);
+ break;
+ }
+ ice_msec_delay(10, true);
+ }
+
+ if (cnt == ICE_PF_RESET_WAIT_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
+ reg);
+ return ICE_ERR_RESET_FAILED;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * If a global reset has been triggered, this function checks
+ * for its completion and then issues the PF reset
+ */
+static enum ice_status ice_pf_reset(struct ice_hw *hw)
+{
+ u32 cnt, reg;
+
+ /* If at function entry a global reset was already in progress, i.e.
+ * state is not 'device active' or any of the reset done bits are not
+ * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
+ * global reset is done.
+ */
+ if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
+ (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
+ /* poll on global reset currently in progress until done */
+ if (ice_check_reset(hw))
+ return ICE_ERR_RESET_FAILED;
+
+ return ICE_SUCCESS;
+ }
+
+ /* Reset the PF */
+ reg = rd32(hw, PFGEN_CTRL);
+
+ wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
+
+ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, PFGEN_CTRL);
+ if (!(reg & PFGEN_CTRL_PFSWR_M))
+ break;
+
+ ice_msec_delay(1, true);
+ }
+
+ if (cnt == ICE_PF_RESET_WAIT_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "PF reset polling failed to complete.\n");
+ return ICE_ERR_RESET_FAILED;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_reset - Perform different types of reset
+ * @hw: pointer to the hardware structure
+ * @req: reset request
+ *
+ * This function triggers a reset as specified by the req parameter.
+ *
+ * Note:
+ * If anything other than a PF reset is triggered, PXE mode is restored.
+ * This has to be cleared using ice_clear_pxe_mode again, once the AQ
+ * interface has been restored in the rebuild flow.
+ */
+enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+{
+ u32 val = 0;
+
+ switch (req) {
+ case ICE_RESET_PFR:
+ return ice_pf_reset(hw);
+ case ICE_RESET_CORER:
+ ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
+ val = GLGEN_RTRIG_CORER_M;
+ break;
+ case ICE_RESET_GLOBR:
+ ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
+ val = GLGEN_RTRIG_GLOBR_M;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ val |= rd32(hw, GLGEN_RTRIG);
+ wr32(hw, GLGEN_RTRIG, val);
+ ice_flush(hw);
+
+ /* wait for the FW to be ready */
+ return ice_check_reset(hw);
+}
+
+/**
+ * ice_copy_rxq_ctx_to_hw
+ * @hw: pointer to the hardware structure
+ * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_index: the index of the Rx queue
+ *
+ * Copies rxq context from dense structure to HW register space
+ */
+static enum ice_status
+ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+{
+ u8 i;
+
+ if (!ice_rxq_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Copy each dword separately to HW */
+ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ wr32(hw, QRX_CONTEXT(i, rxq_index),
+ *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
+ *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* LAN Rx Queue Context */
+static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
+ ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
+ ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
+ ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
+ ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
+ ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
+ ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
+ ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
+ ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
+ ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
+ ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
+ ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
+ ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
+ ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
+ ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
+ ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
+ ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
+ ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
+ ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
+ ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
+ { 0 }
+};
+
+/**
+ * ice_write_rxq_ctx
+ * @hw: pointer to the hardware structure
+ * @rlan_ctx: pointer to the rxq context
+ * @rxq_index: the index of the Rx queue
+ *
+ * Converts rxq context from sparse to dense structure and then writes
+ * it to HW register space and enables the hardware to prefetch descriptors
+ * instead of only fetching them on demand
+ */
+enum ice_status
+ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
+{
+ u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+
+ if (!rlan_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ rlan_ctx->prefena = 1;
+
+ ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
+ return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+}
+
+/**
+ * ice_clear_rxq_ctx
+ * @hw: pointer to the hardware structure
+ * @rxq_index: the index of the Rx queue to clear
+ *
+ * Clears rxq context in HW register space
+ */
+enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
+{
+ u8 i;
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Clear each dword register separately */
+ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
+ wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
+
+ return ICE_SUCCESS;
+}
+
+/* LAN Tx Queue Context */
+const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
+ ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
+ ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
+ ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
+ ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
+ ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
+ ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
+ ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
+ ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
+ ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
+ ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
+ ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
+ ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
+ ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
+ ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
+ ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
+ ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
+ ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
+ ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
+ ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
+ ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
+ ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
+ ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
+ ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
+ ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
+ ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
+ ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
+ { 0 }
+};
+
+/**
+ * ice_copy_tx_cmpltnq_ctx_to_hw
+ * @hw: pointer to the hardware structure
+ * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
+ * @tx_cmpltnq_index: the index of the completion queue
+ *
+ * Copies Tx completion queue context from dense structure to HW register space
+ */
+static enum ice_status
+ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
+ u32 tx_cmpltnq_index)
+{
+ u8 i;
+
+ if (!ice_tx_cmpltnq_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Copy each dword separately to HW */
+ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
+ wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
+ *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
+
+ ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
+ *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* LAN Tx Completion Queue Context */
+static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
+ { 0 }
+};
+
+/**
+ * ice_write_tx_cmpltnq_ctx
+ * @hw: pointer to the hardware structure
+ * @tx_cmpltnq_ctx: pointer to the completion queue context
+ * @tx_cmpltnq_index: the index of the completion queue
+ *
+ * Converts completion queue context from sparse to dense structure and then
+ * writes it to HW register space
+ */
+enum ice_status
+ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
+ struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
+ u32 tx_cmpltnq_index)
+{
+ u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
+
+ ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
+ return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
+}
+
+/**
+ * ice_clear_tx_cmpltnq_ctx
+ * @hw: pointer to the hardware structure
+ * @tx_cmpltnq_index: the index of the completion queue to clear
+ *
+ * Clears Tx completion queue context in HW register space
+ */
+enum ice_status
+ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
+{
+ u8 i;
+
+ if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Clear each dword register separately */
+ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
+ wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_copy_tx_drbell_q_ctx_to_hw
+ * @hw: pointer to the hardware structure
+ * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
+ * @tx_drbell_q_index: the index of the doorbell queue
+ *
+ * Copies doorbell queue context from dense structure to HW register space
+ */
+static enum ice_status
+ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
+ u32 tx_drbell_q_index)
+{
+ u8 i;
+
+ if (!ice_tx_drbell_q_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Copy each dword separately to HW */
+ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
+ wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
+ *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
+
+ ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
+ *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* LAN Tx Doorbell Queue Context info */
+static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
+ { 0 }
+};
+
+/**
+ * ice_write_tx_drbell_q_ctx
+ * @hw: pointer to the hardware structure
+ * @tx_drbell_q_ctx: pointer to the doorbell queue context
+ * @tx_drbell_q_index: the index of the doorbell queue
+ *
+ * Converts doorbell queue context from sparse to dense structure and then
+ * writes it to HW register space
+ */
+enum ice_status
+ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
+ struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
+ u32 tx_drbell_q_index)
+{
+ u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
+
+ ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
+ return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
+}
+
+/**
+ * ice_clear_tx_drbell_q_ctx
+ * @hw: pointer to the hardware structure
+ * @tx_drbell_q_index: the index of the doorbell queue to clear
+ *
+ * Clears doorbell queue context in HW register space
+ */
+enum ice_status
+ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
+{
+ u8 i;
+
+ if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Clear each dword register separately */
+ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
+ wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
+
+ return ICE_SUCCESS;
+}
+
+/* FW Admin Queue command wrappers */
+
+/**
+ * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * Helper function to send FW Admin Queue commands to the FW Admin Queue.
+ */
+enum ice_status
+ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_get_fw_ver
+ * @hw: pointer to the HW struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the firmware version (0x0001) from the admin queue commands
+ */
+enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_ver *resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ resp = &desc.params.get_ver;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = LE32_TO_CPU(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_send_driver_ver
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ * @cd: pointer to command details structure or NULL
+ *
+ * Send the driver version (0x0002) to the firmware
+ */
+enum ice_status
+ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_ver *cmd;
+ struct ice_aq_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
+}
+
+/**
+ * ice_aq_q_shutdown
+ * @hw: pointer to the HW struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well (0x0003).
+ */
+enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+{
+ struct ice_aqc_q_shutdown *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.q_shutdown;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_req_res
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cd: pointer to command details structure or NULL
+ *
+ * Requests common resource using the admin queue commands (0x0008).
+ * When attempting to acquire the Global Config Lock, the driver can
+ * learn of three states:
+ * 1) ICE_SUCCESS - acquired lock, and can perform download package
+ * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
+ * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
+ *
+ * Note that if the caller is in an acquire lock, perform action, release lock
+ * phase of operation, it is possible that the FW may detect a timeout and issue
+ * a CORER. In this case, the driver will receive a CORER interrupt and will
+ * have to determine its cause. The calling thread that is handling this flow
+ * will likely get an error propagated back to it indicating the Download
+ * Package, Update Package or the Release Resource AQ commands timed out.
+ */
+static enum ice_status
+ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_req_res *cmd_resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd_resp = &desc.params.res_owner;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
+
+ cmd_resp->res_id = CPU_TO_LE16(res);
+ cmd_resp->access_type = CPU_TO_LE16(access);
+ cmd_resp->res_number = CPU_TO_LE32(sdp_number);
+ cmd_resp->timeout = CPU_TO_LE32(*timeout);
+ *timeout = 0;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ */
+
+ /* Global config lock response utilizes an additional status field.
+ *
+ * If the Global config lock resource is held by some other driver, the
+ * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * and the timeout field indicates the maximum time the current owner
+ * of the resource has to free it.
+ */
+ if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
+ if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+ return ICE_SUCCESS;
+ } else if (LE16_TO_CPU(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_IN_PROG) {
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+ return ICE_ERR_AQ_ERROR;
+ } else if (LE16_TO_CPU(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_DONE) {
+ return ICE_ERR_AQ_NO_WORK;
+ }
+
+ /* invalid FW response, force a timeout immediately */
+ *timeout = 0;
+ return ICE_ERR_AQ_ERROR;
+ }
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * ice_aq_release_res
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ * @cd: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands (0x0009)
+ */
+static enum ice_status
+ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_req_res *cmd;
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.res_owner;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
+
+ cmd->res_id = CPU_TO_LE16(res);
+ cmd->res_number = CPU_TO_LE32(sdp_number);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_acquire_res
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * This function will attempt to acquire the ownership of a resource.
+ */
+enum ice_status
+ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access, u32 timeout)
+{
+#define ICE_RES_POLLING_DELAY_MS 10
+ u32 delay = ICE_RES_POLLING_DELAY_MS;
+ u32 time_left = timeout;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
+
+ /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (status == ICE_ERR_AQ_NO_WORK)
+ goto ice_acquire_res_exit;
+
+ if (status)
+ ice_debug(hw, ICE_DBG_RES,
+ "resource %d acquire type %d failed.\n", res, access);
+
+ /* If necessary, poll until the current lock owner timeouts */
+ timeout = time_left;
+ while (status && timeout && time_left) {
+ ice_msec_delay(delay, true);
+ timeout = (timeout > delay) ? timeout - delay : 0;
+ status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
+
+ if (status == ICE_ERR_AQ_NO_WORK)
+ /* lock free, but no work to do */
+ break;
+
+ if (!status)
+ /* lock acquired */
+ break;
+ }
+ if (status && status != ICE_ERR_AQ_NO_WORK)
+ ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
+
+ice_acquire_res_exit:
+ if (status == ICE_ERR_AQ_NO_WORK) {
+ if (access == ICE_RES_WRITE)
+ ice_debug(hw, ICE_DBG_RES,
+ "resource indicates no work to do.\n");
+ else
+ ice_debug(hw, ICE_DBG_RES,
+ "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ }
+ return status;
+}
+
+/**
+ * ice_release_res
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * This function will release a resource using the proper Admin Command.
+ */
+void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
+{
+ enum ice_status status;
+ u32 total_delay = 0;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_aq_release_res(hw, res, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin queue timeout, so handle them correctly
+ */
+ while ((status == ICE_ERR_AQ_TIMEOUT) &&
+ (total_delay < hw->adminq.sq_cmd_timeout)) {
+ ice_msec_delay(1, true);
+ status = ice_aq_release_res(hw, res, 0, NULL);
+ total_delay++;
+ }
+}
+
+/**
+ * ice_aq_alloc_free_res - command to allocate/free resources
+ * @hw: pointer to the HW struct
+ * @num_entries: number of resource entries in buffer
+ * @buf: Indirect buffer to hold data parameters and response
+ * @buf_size: size of buffer for indirect commands
+ * @opc: pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Helper function to allocate/free resources using the admin queue commands
+ */
+enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_alloc_free_res_cmd *cmd;
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.sw_res_ctrl;
+
+ if (!buf)
+ return ICE_ERR_PARAM;
+
+ if (buf_size < (num_entries * sizeof(buf->elem[0])))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ cmd->num_entries = CPU_TO_LE16(num_entries);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_alloc_hw_res - allocate resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @btm: allocate from bottom
+ * @res: pointer to array that will receive the resources
+ */
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = ice_struct_size(buf, elem, num - 1);
+ buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to allocate resource. */
+ buf->num_elems = CPU_TO_LE16(num);
+ buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
+ ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
+ if (btm)
+ buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto ice_alloc_res_exit;
+
+ ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
+ ICE_NONDMA_TO_NONDMA);
+
+ice_alloc_res_exit:
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_free_hw_res - free allocated HW resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: pointer to array that contains the resources to free
+ */
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = ice_struct_size(buf, elem, num - 1);
+ buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to free resource. */
+ buf->num_elems = CPU_TO_LE16(num);
+ buf->res_type = CPU_TO_LE16(type);
+ ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
+ ICE_NONDMA_TO_NONDMA);
+
+ status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
+
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ */
+static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
+{
+ u8 funcs;
+
+#define ICE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
+ ICE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return max / funcs;
+}
+
+/**
+ * ice_print_led_caps - print LED capabilities
+ * @hw: pointer to the ice_hw instance
+ * @caps: pointer to common caps instance
+ * @prefix: string to prefix when printing
+ * @debug: set to indicate debug print
+ */
+static void
+ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
+ char const *prefix, bool debug)
+{
+ u8 i;
+
+ if (debug)
+ ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
+ caps->led_pin_num);
+ else
+ ice_info(hw, "%s: led_pin_num = %d\n", prefix,
+ caps->led_pin_num);
+
+ for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
+ if (!caps->led[i])
+ continue;
+
+ if (debug)
+ ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
+ prefix, i, caps->led[i]);
+ else
+ ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
+ caps->led[i]);
+ }
+}
+
+/**
+ * ice_print_sdp_caps - print SDP capabilities
+ * @hw: pointer to the ice_hw instance
+ * @caps: pointer to common caps instance
+ * @prefix: string to prefix when printing
+ * @debug: set to indicate debug print
+ */
+static void
+ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
+ char const *prefix, bool debug)
+{
+ u8 i;
+
+ if (debug)
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
+ caps->sdp_pin_num);
+ else
+ ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
+ caps->sdp_pin_num);
+
+ for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
+ if (!caps->sdp[i])
+ continue;
+
+ if (debug)
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
+ prefix, i, caps->sdp[i]);
+ else
+ ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
+ i, caps->sdp[i]);
+ }
+}
+
+/**
+ * ice_parse_caps - parse function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: pointer to a buffer containing function/device capability records
+ * @cap_count: number of capability records in the list
+ * @opc: type of capabilities list to parse
+ *
+ * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
+ */
+static void
+ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
+ enum ice_adminq_opc opc)
+{
+ struct ice_aqc_list_caps_elem *cap_resp;
+ struct ice_hw_func_caps *func_p = NULL;
+ struct ice_hw_dev_caps *dev_p = NULL;
+ struct ice_hw_common_caps *caps;
+ char const *prefix;
+ u32 i;
+
+ if (!buf)
+ return;
+
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+
+ if (opc == ice_aqc_opc_list_dev_caps) {
+ dev_p = &hw->dev_caps;
+ caps = &dev_p->common_cap;
+ prefix = "dev cap";
+ } else if (opc == ice_aqc_opc_list_func_caps) {
+ func_p = &hw->func_caps;
+ caps = &func_p->common_cap;
+ prefix = "func cap";
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
+ return;
+ }
+
+ for (i = 0; caps && i < cap_count; i++, cap_resp++) {
+ u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
+ u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
+ u32 number = LE32_TO_CPU(cap_resp->number);
+ u16 cap = LE16_TO_CPU(cap_resp->cap);
+
+ switch (cap) {
+ case ICE_AQC_CAPS_SWITCHING_MODE:
+ caps->switching_mode = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: switching_mode = %d\n", prefix,
+ caps->switching_mode);
+ break;
+ case ICE_AQC_CAPS_MANAGEABILITY_MODE:
+ caps->mgmt_mode = number;
+ caps->mgmt_protocols_mctp = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: mgmt_mode = %d\n", prefix,
+ caps->mgmt_mode);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: mgmt_protocols_mctp = %d\n", prefix,
+ caps->mgmt_protocols_mctp);
+ break;
+ case ICE_AQC_CAPS_OS2BMC:
+ caps->os2bmc = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: os2bmc = %d\n", prefix, caps->os2bmc);
+ break;
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: valid_functions (bitmap) = %d\n", prefix,
+ caps->valid_functions);
+
+ /* store func count for resource management purposes */
+ if (dev_p)
+ dev_p->num_funcs = ice_hweight32(number);
+ break;
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: sr_iov_1_1 = %d\n", prefix,
+ caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_VF:
+ if (dev_p) {
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_vfs_exposed = %d\n", prefix,
+ dev_p->num_vfs_exposed);
+ } else if (func_p) {
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_allocd_vfs = %d\n", prefix,
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: vf_base_id = %d\n", prefix,
+ func_p->vf_base_id);
+ }
+ break;
+ case ICE_AQC_CAPS_802_1QBG:
+ caps->evb_802_1_qbg = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: evb_802_1_qbg = %d\n", prefix, number);
+ break;
+ case ICE_AQC_CAPS_802_1BR:
+ caps->evb_802_1_qbh = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: evb_802_1_qbh = %d\n", prefix, number);
+ break;
+ case ICE_AQC_CAPS_VSI:
+ if (dev_p) {
+ dev_p->num_vsi_allocd_to_host = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_vsi_allocd_to_host = %d\n",
+ prefix,
+ dev_p->num_vsi_allocd_to_host);
+ } else if (func_p) {
+ func_p->guar_num_vsi =
+ ice_get_num_per_func(hw, ICE_MAX_VSI);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: guar_num_vsi (fw) = %d\n",
+ prefix, number);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: guar_num_vsi = %d\n",
+ prefix, func_p->guar_num_vsi);
+ }
+ break;
+ case ICE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: dcb = %d\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: active_tc_bitmap = %d\n", prefix,
+ caps->active_tc_bitmap);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d\n", prefix, caps->maxtc);
+ break;
+ case ICE_AQC_CAPS_ISCSI:
+ caps->iscsi = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: iscsi = %d\n", prefix, caps->iscsi);
+ break;
+ case ICE_AQC_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rss_table_size = %d\n", prefix,
+ caps->rss_table_size);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rss_table_entry_width = %d\n", prefix,
+ caps->rss_table_entry_width);
+ break;
+ case ICE_AQC_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_rxq = %d\n", prefix,
+ caps->num_rxq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rxq_first_id = %d\n", prefix,
+ caps->rxq_first_id);
+ break;
+ case ICE_AQC_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_txq = %d\n", prefix,
+ caps->num_txq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: txq_first_id = %d\n", prefix,
+ caps->txq_first_id);
+ break;
+ case ICE_AQC_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_msix_vectors = %d\n", prefix,
+ caps->num_msix_vectors);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: msix_vector_first_id = %d\n", prefix,
+ caps->msix_vector_first_id);
+ break;
+ case ICE_AQC_CAPS_NVM_VER:
+ break;
+ case ICE_AQC_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: nvm_unified_update = %d\n", prefix,
+ caps->nvm_unified_update);
+ break;
+ case ICE_AQC_CAPS_CEM:
+ caps->mgmt_cem = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: mgmt_cem = %d\n", prefix,
+ caps->mgmt_cem);
+ break;
+ case ICE_AQC_CAPS_LED:
+ if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
+ caps->led[phys_id] = true;
+ caps->led_pin_num++;
+ }
+ break;
+ case ICE_AQC_CAPS_SDP:
+ if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
+ caps->sdp[phys_id] = true;
+ caps->sdp_pin_num++;
+ }
+ break;
+ case ICE_AQC_CAPS_WR_CSR_PROT:
+ caps->wr_csr_prot = number;
+ caps->wr_csr_prot |= (u64)logical_id << 32;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: wr_csr_prot = 0x%llX\n", prefix,
+ (unsigned long long)caps->wr_csr_prot);
+ break;
+ case ICE_AQC_CAPS_WOL_PROXY:
+ caps->num_wol_proxy_fltr = number;
+ caps->wol_proxy_vsi_seid = logical_id;
+ caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
+ caps->acpi_prog_mthd = !!(phys_id &
+ ICE_ACPI_PROG_MTHD_M);
+ caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_wol_proxy_fltr = %d\n", prefix,
+ caps->num_wol_proxy_fltr);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: wol_proxy_vsi_seid = %d\n", prefix,
+ caps->wol_proxy_vsi_seid);
+ break;
+ case ICE_AQC_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
+ prefix, caps->max_mtu);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: unknown capability[%d]: 0x%x\n", prefix,
+ i, cap);
+ break;
+ }
+ }
+
+ ice_print_led_caps(hw, caps, prefix, true);
+ ice_print_sdp_caps(hw, caps, prefix, true);
+
+ /* Re-calculate capabilities that are dependent on the number of
+ * physical ports; i.e. some features are not supported or function
+ * differently on devices with more than 4 ports.
+ */
+ if (hw->dev_caps.num_funcs > 4) {
+ /* Max 4 TCs per port */
+ caps->maxtc = 4;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d (based on #ports)\n", prefix,
+ caps->maxtc);
+ }
+}
+
+/**
+ * ice_aq_discover_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a virtual buffer to hold the capabilities
+ * @buf_size: Size of the virtual buffer
+ * @cap_count: cap count needed if AQ err==ENOMEM
+ * @opc: capabilities type to discover - pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the function(0x000a)/device(0x000b) capabilities description from
+ * the firmware.
+ */
+enum ice_status
+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_list_caps *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ice_aqc_opc_list_func_caps &&
+ opc != ice_aqc_opc_list_dev_caps)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status)
+ ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
+ else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+ *cap_count = LE32_TO_CPU(cmd->count);
+ return status;
+}
+
+/**
+ * ice_discover_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ * @opc: capabilities type to discover - pass in the command opcode
+ */
+static enum ice_status
+ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
+{
+ enum ice_status status;
+ u32 cap_count;
+ u16 cbuf_len;
+ u8 retries;
+
+ /* The driver doesn't know how many capabilities the device will return
+ * so the buffer size required isn't known ahead of time. The driver
+ * starts with cbuf_len and if this turns out to be insufficient, the
+ * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
+ * The driver then allocates the buffer based on the count and retries
+ * the operation. So it follows that the retry count is 2.
+ */
+#define ICE_GET_CAP_BUF_COUNT 40
+#define ICE_GET_CAP_RETRY_COUNT 2
+
+ cap_count = ICE_GET_CAP_BUF_COUNT;
+ retries = ICE_GET_CAP_RETRY_COUNT;
+
+ do {
+ void *cbuf;
+
+ cbuf_len = (u16)(cap_count *
+ sizeof(struct ice_aqc_list_caps_elem));
+ cbuf = ice_malloc(hw, cbuf_len);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
+ opc, NULL);
+ ice_free(hw, cbuf);
+
+ if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
+ break;
+
+ /* If ENOMEM is returned, try again with bigger buffer */
+ } while (--retries);
+
+ return status;
+}
+
+/**
+ * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
+ * @hw: pointer to the hardware structure
+ */
+void ice_set_safe_mode_caps(struct ice_hw *hw)
+{
+ struct ice_hw_func_caps *func_caps = &hw->func_caps;
+ struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
+ u32 valid_func, rxq_first_id, txq_first_id;
+ u32 msix_vector_first_id, max_mtu;
+ u32 num_funcs;
+
+ /* cache some func_caps values that should be restored after memset */
+ valid_func = func_caps->common_cap.valid_functions;
+ txq_first_id = func_caps->common_cap.txq_first_id;
+ rxq_first_id = func_caps->common_cap.rxq_first_id;
+ msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
+ max_mtu = func_caps->common_cap.max_mtu;
+
+ /* unset func capabilities */
+ memset(func_caps, 0, sizeof(*func_caps));
+
+ /* restore cached values */
+ func_caps->common_cap.valid_functions = valid_func;
+ func_caps->common_cap.txq_first_id = txq_first_id;
+ func_caps->common_cap.rxq_first_id = rxq_first_id;
+ func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
+ func_caps->common_cap.max_mtu = max_mtu;
+
+ /* one Tx and one Rx queue in safe mode */
+ func_caps->common_cap.num_rxq = 1;
+ func_caps->common_cap.num_txq = 1;
+
+ /* two MSIX vectors, one for traffic and one for misc causes */
+ func_caps->common_cap.num_msix_vectors = 2;
+ func_caps->guar_num_vsi = 1;
+
+ /* cache some dev_caps values that should be restored after memset */
+ valid_func = dev_caps->common_cap.valid_functions;
+ txq_first_id = dev_caps->common_cap.txq_first_id;
+ rxq_first_id = dev_caps->common_cap.rxq_first_id;
+ msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
+ max_mtu = dev_caps->common_cap.max_mtu;
+ num_funcs = dev_caps->num_funcs;
+
+ /* unset dev capabilities */
+ memset(dev_caps, 0, sizeof(*dev_caps));
+
+ /* restore cached values */
+ dev_caps->common_cap.valid_functions = valid_func;
+ dev_caps->common_cap.txq_first_id = txq_first_id;
+ dev_caps->common_cap.rxq_first_id = rxq_first_id;
+ dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
+ dev_caps->common_cap.max_mtu = max_mtu;
+ dev_caps->num_funcs = num_funcs;
+
+ /* one Tx and one Rx queue per function in safe mode */
+ dev_caps->common_cap.num_rxq = num_funcs;
+ dev_caps->common_cap.num_txq = num_funcs;
+
+ /* two MSIX vectors per function */
+ dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
+}
+
+/**
+ * ice_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_get_caps(struct ice_hw *hw)
+{
+ enum ice_status status;
+
+ status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
+ if (!status)
+ status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+
+ return status;
+}
+
+/**
+ * ice_aq_manage_mac_write - manage MAC address write command
+ * @hw: pointer to the HW struct
+ * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
+ * @flags: flags to control write behavior
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function is used to write MAC address to the NVM (0x0108).
+ */
+enum ice_status
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_manage_mac_write *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.mac_write;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
+
+ cmd->flags = flags;
+ ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_clear_pxe_mode
+ * @hw: pointer to the HW struct
+ *
+ * Tell the firmware that the driver is taking over from PXE (0x0110).
+ */
+static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
+ desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the HW struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ */
+void ice_clear_pxe_mode(struct ice_hw *hw)
+{
+ if (ice_check_sq_alive(hw, &hw->adminq))
+ ice_aq_clear_pxe_mode(hw);
+}
+
+/**
+ * ice_aq_set_port_params - set physical port parameters.
+ * @pi: pointer to the port info struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Physical port parameters (0x0203)
+ */
+enum ice_status
+ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
+ bool save_bad_pac, bool pad_short_pac, bool double_vlan,
+ struct ice_sq_cd *cd)
+
+{
+ struct ice_aqc_set_port_params *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+ u16 cmd_flags = 0;
+
+ cmd = &desc.params.set_port_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+ cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+ if (save_bad_pac)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
+ if (pad_short_pac)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
+ if (double_vlan)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_get_link_speed_based_on_phy_type - returns link speed
+ * @phy_type_low: lower part of phy_type
+ * @phy_type_high: higher part of phy_type
+ *
+ * This helper function will convert an entry in PHY type structure
+ * [phy_type_low, phy_type_high] to its corresponding link speed.
+ * Note: In the structure of [phy_type_low, phy_type_high], there should
+ * be one bit set, as this function will convert one PHY type to its
+ * speed.
+ * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ */
+static u16
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
+{
+ u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
+ u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ switch (phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
+ break;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
+ break;
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ switch (phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return speed_phy_type_low;
+ else
+ return speed_phy_type_high;
+}
+
+/**
+ * ice_update_phy_type
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
+ * @link_speeds_bitmap: targeted link speeds bitmap
+ *
+ * Note: For the link_speeds_bitmap structure, you can check it at
+ * [ice_aqc_get_link_status->link_speed]. Caller can pass in
+ * link_speeds_bitmap include multiple speeds.
+ *
+ * Each entry in this [phy_type_low, phy_type_high] structure will
+ * present a certain link speed. This helper function will turn on bits
+ * in [phy_type_low, phy_type_high] structure based on the value of
+ * link_speeds_bitmap input parameter.
+ */
+void
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
+ u16 link_speeds_bitmap)
+{
+ u64 pt_high;
+ u64 pt_low;
+ int index;
+ u16 speed;
+
+ /* We first check with low part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
+ pt_low = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_low |= BIT_ULL(index);
+ }
+
+ /* We then check with high part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
+ pt_high = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_high |= BIT_ULL(index);
+ }
+}
+
+/**
+ * ice_aq_set_phy_cfg
+ * @hw: pointer to the HW struct
+ * @pi: port info structure of the interested logical port
+ * @cfg: structure with PHY configuration data to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters supported on the Port.
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters. This status will be indicated by the command response (0x0601).
+ */
+enum ice_status
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
+ struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!cfg)
+ return ICE_ERR_PARAM;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
+ ice_debug(hw, ICE_DBG_PHY,
+ "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
+ cfg->caps);
+
+ cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
+ }
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
+ desc.params.set_phy.lport_num = pi->lport;
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
+ ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
+ ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n",
+ cfg->low_power_ctrl_an);
+ ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+
+ status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+
+ if (!status)
+ pi->phy.curr_user_phy_cfg = *cfg;
+
+ return status;
+}
+
+/**
+ * ice_update_link_info - update status of the HW network link
+ * @pi: port info structure of the interested logical port
+ */
+enum ice_status ice_update_link_info(struct ice_port_info *pi)
+{
+ struct ice_link_status *li;
+ enum ice_status status;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ li = &pi->phy.link_info;
+
+ status = ice_aq_get_link_info(pi, true, NULL, NULL);
+ if (status)
+ return status;
+
+ if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_hw *hw;
+
+ hw = pi->hw;
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ pcaps, NULL);
+ if (status == ICE_SUCCESS)
+ ice_memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type),
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_free(hw, pcaps);
+ }
+
+ return status;
+}
+
+/**
+ * ice_cache_phy_user_req
+ * @pi: port information structure
+ * @cache_data: PHY logging data
+ * @cache_mode: PHY logging mode
+ *
+ * Log the user request on (FC, FEC, SPEED) for later user.
+ */
+static void
+ice_cache_phy_user_req(struct ice_port_info *pi,
+ struct ice_phy_cache_mode_data cache_data,
+ enum ice_phy_cache_mode cache_mode)
+{
+ if (!pi)
+ return;
+
+ switch (cache_mode) {
+ case ICE_FC_MODE:
+ pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
+ break;
+ case ICE_SPEED_MODE:
+ pi->phy.curr_user_speed_req =
+ cache_data.data.curr_user_speed_req;
+ break;
+ case ICE_FEC_MODE:
+ pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_caps_to_fc_mode
+ * @caps: PHY capabilities
+ *
+ * Convert PHY FC capabilities to ice FC mode
+ */
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
+{
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
+ caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ return ICE_FC_FULL;
+
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
+ return ICE_FC_TX_PAUSE;
+
+ if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ return ICE_FC_RX_PAUSE;
+
+ return ICE_FC_NONE;
+}
+
+/**
+ * ice_caps_to_fec_mode
+ * @caps: PHY capabilities
+ * @fec_options: Link FEC options
+ *
+ * Convert PHY FEC capabilities to ice FEC mode
+ */
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
+{
+ if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
+ return ICE_FEC_AUTO;
+
+ if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+ ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+ ICE_AQC_PHY_FEC_25G_KR_REQ))
+ return ICE_FEC_BASER;
+
+ if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
+ return ICE_FEC_RS;
+
+ return ICE_FEC_NONE;
+}
+
+/**
+ * ice_set_fc
+ * @pi: port information structure
+ * @aq_failures: pointer to status code, specific to ice_set_fc routine
+ * @ena_auto_link_update: enable automatic link update
+ *
+ * Set the requested flow control mode.
+ */
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_phy_cache_mode_data cache_data;
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ u8 pause_mask = 0x0;
+ struct ice_hw *hw;
+
+ if (!pi || !aq_failures)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+ *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
+
+ /* Cache user FC request */
+ cache_data.data.curr_user_fc_req = pi->fc.req_mode;
+ ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ switch (pi->fc.req_mode) {
+ case ICE_FC_AUTO:
+ /* Query the value of FC that both the NIC and attached media
+ * can do.
+ */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ pcaps, NULL);
+ if (status) {
+ *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
+ goto out;
+ }
+
+ pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ICE_FC_FULL:
+ pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ICE_FC_RX_PAUSE:
+ pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ICE_FC_TX_PAUSE:
+ pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Get the current PHY config */
+ ice_memset(pcaps, 0, sizeof(*pcaps), ICE_NONDMA_MEM);
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
+ goto out;
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
+
+ /* clear the old pause settings */
+ cfg.caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
+ ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg.caps |= pause_mask;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps->caps) {
+ int retry_count, retry_max = 10;
+
+ /* Auto restart link so settings take effect */
+ if (ena_auto_link_update)
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
+ if (status) {
+ *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
+ goto out;
+ }
+
+ /* Update the link info
+ * It sometimes takes a really long time for link to
+ * come back from the atomic reset. Thus, we wait a
+ * little bit.
+ */
+ for (retry_count = 0; retry_count < retry_max; retry_count++) {
+ status = ice_update_link_info(pi);
+
+ if (status == ICE_SUCCESS)
+ break;
+
+ ice_msec_delay(100, true);
+ }
+
+ if (status)
+ *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
+ }
+
+out:
+ ice_free(hw, pcaps);
+ return status;
+}
+
+/**
+ * ice_phy_caps_equals_cfg
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities matches PHY
+ * configuration
+ */
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
+ struct ice_aqc_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
+ ICE_AQC_PHY_EN_MOD_QUAL);
+ cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @pi: port information structure
+ * @caps: PHY ability structure to copy date from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy AQC PHY get ability data to PHY set configuration
+ * data structure
+ */
+void
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg)
+{
+ if (!pi || !caps || !cfg)
+ return;
+
+ ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+
+ if (ice_fw_supports_link_override(pi->hw)) {
+ struct ice_link_default_override_tlv tlv;
+
+ if (ice_get_link_default_override(&tlv, pi))
+ return;
+
+ if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
+ cfg->module_compliance_enforcement |=
+ ICE_LINK_OVERRIDE_STRICT_MODE;
+ }
+}
+
+/**
+ * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
+ * @pi: port information structure
+ * @cfg: PHY configuration data to set FEC mode
+ * @fec: FEC mode to configure
+ */
+enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fec_mode fec)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw;
+
+ if (!pi || !cfg)
+ return ICE_ERR_BAD_PTR;
+
+ hw = pi->hw;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status)
+ goto out;
+
+ switch (fec) {
+ case ICE_FEC_BASER:
+ /* Clear RS bits, and AND BASE-R ability
+ * bits and OR request bits.
+ */
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_REQ;
+ break;
+ case ICE_FEC_RS:
+ /* Clear BASE-R bits, and AND RS ability
+ * bits and OR request bits.
+ */
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+ break;
+ case ICE_FEC_NONE:
+ /* Clear all FEC option bits. */
+ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
+ break;
+ case ICE_FEC_AUTO:
+ /* AND auto FEC bit, and all caps bits. */
+ cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
+ cfg->link_fec_opt |= pcaps->link_fec_options;
+ break;
+ default:
+ status = ICE_ERR_PARAM;
+ break;
+ }
+
+ if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
+ struct ice_link_default_override_tlv tlv;
+
+ if (ice_get_link_default_override(&tlv, pi))
+ goto out;
+
+ if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
+ (tlv.options & ICE_LINK_OVERRIDE_EN))
+ cfg->link_fec_opt = tlv.fec_options;
+ }
+
+out:
+ ice_free(hw, pcaps);
+
+ return status;
+}
+
+/**
+ * ice_get_link_status - get status of the HW network link
+ * @pi: port information structure
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ */
+enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+{
+ struct ice_phy_info *phy_info;
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!pi || !link_up)
+ return ICE_ERR_PARAM;
+
+ phy_info = &pi->phy;
+
+ if (phy_info->get_link_info) {
+ status = ice_update_link_info(pi);
+
+ if (status)
+ ice_debug(pi->hw, ICE_DBG_LINK,
+ "get link status error, status = %d\n",
+ status);
+ }
+
+ *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * ice_aq_set_link_restart_an
+ * @pi: pointer to the port information structure
+ * @ena_link: if true: enable link, if false: disable link
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ */
+enum ice_status
+ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_restart_an *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
+
+ cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = pi->lport;
+ if (ena_link)
+ cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
+
+ return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_event_mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set event mask (0x0613)
+ */
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_event_mask *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = CPU_TO_LE16(mask);
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_mac_loopback
+ * @hw: pointer to the HW struct
+ * @ena_lpbk: Enable or Disable loopback
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable/disable loopback on a given port
+ */
+enum ice_status
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_mac_lb *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_mac_lb;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
+ if (ena_lpbk)
+ cmd->lb_mode = ICE_AQ_MAC_LB_EN;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_port_id_led
+ * @pi: pointer to the port information
+ * @is_orig_mode: is this LED set to original mode (by the net-list)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set LED value for the given port (0x06e9)
+ */
+enum ice_status
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_port_id_led *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
+
+ if (is_orig_mode)
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_sff_eeprom
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @set_page: set or ignore the page
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read/Write SFF EEPROM (0x06EE)
+ */
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_sff_eeprom *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!data || (mem_addr & 0xff00))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
+ ICE_AQC_SFF_I2CBUS_7BIT_M) |
+ ((set_page <<
+ ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
+ ICE_AQC_SFF_SET_EEPROM_PAGE_M));
+ cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
+ cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
+ if (write)
+ cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
+
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
+ return status;
+}
+
+/**
+ * __ice_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: VSI FW index
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ * @glob_lut_idx: global LUT index
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
+ */
+static enum ice_status
+__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ u16 lut_size, u8 glob_lut_idx, bool set)
+{
+ struct ice_aqc_get_set_rss_lut *cmd_resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 flags = 0;
+
+ cmd_resp = &desc.params.get_set_rss_lut;
+
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
+ }
+
+ cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
+ ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
+ ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
+ ICE_AQC_GSET_RSS_LUT_VSI_VALID);
+
+ switch (lut_type) {
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
+ flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
+ break;
+ default:
+ status = ICE_ERR_PARAM;
+ goto ice_aq_get_set_rss_lut_exit;
+ }
+
+ if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
+ flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
+ ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
+
+ if (!set)
+ goto ice_aq_get_set_rss_lut_send;
+ } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+ if (!set)
+ goto ice_aq_get_set_rss_lut_send;
+ } else {
+ goto ice_aq_get_set_rss_lut_send;
+ }
+
+ /* LUT size is only valid for Global and PF table types */
+ switch (lut_size) {
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ break;
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ break;
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
+ if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ break;
+ }
+ /* fall-through */
+ default:
+ status = ICE_ERR_PARAM;
+ goto ice_aq_get_set_rss_lut_exit;
+ }
+
+ice_aq_get_set_rss_lut_send:
+ cmd_resp->flags = CPU_TO_LE16(flags);
+ status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
+
+ice_aq_get_set_rss_lut_exit:
+ return status;
+}
+
+/**
+ * ice_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ */
+enum ice_status
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, false);
+}
+
+/**
+ * ice_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ */
+enum ice_status
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, true);
+}
+
+/**
+ * __ice_aq_get_set_rss_key
+ * @hw: pointer to the HW struct
+ * @vsi_id: VSI FW index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get (0x0B04) or set (0x0B02) the RSS key per VSI
+ */
+static enum
+ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *key,
+ bool set)
+{
+ struct ice_aqc_get_set_rss_key *cmd_resp;
+ u16 key_size = sizeof(*key);
+ struct ice_aq_desc desc;
+
+ cmd_resp = &desc.params.get_set_rss_key;
+
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
+ }
+
+ cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
+ ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
+ ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
+ ICE_AQC_GSET_RSS_KEY_VSI_VALID);
+
+ return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
+}
+
+/**
+ * ice_aq_get_rss_key
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @key: pointer to key info struct
+ *
+ * get the RSS key per VSI
+ */
+enum ice_status
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_aqc_get_set_rss_keys *key)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ key, false);
+}
+
+/**
+ * ice_aq_set_rss_key
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @keys: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ */
+enum ice_status
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_aqc_get_set_rss_keys *keys)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ keys, true);
+}
+
+/**
+ * ice_aq_add_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qgrps: Number of added queue groups
+ * @qg_list: list of queue groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add Tx LAN queue (0x0C30)
+ *
+ * NOTE:
+ * Prior to calling add Tx LAN queue:
+ * Initialize the following as part of the Tx queue context:
+ * Completion queue ID if the queue uses Completion queue, Quanta profile,
+ * Cache profile and Packet shaper profile.
+ *
+ * After add Tx LAN queue AQ command is completed:
+ * Interrupts should be associated with specific queues,
+ * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
+ * flow.
+ */
+enum ice_status
+ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
+ struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ u16 i, sum_header_size, sum_q_size = 0;
+ struct ice_aqc_add_tx_qgrp *list;
+ struct ice_aqc_add_txqs *cmd;
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.add_txqs;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
+
+ if (!qg_list)
+ return ICE_ERR_PARAM;
+
+ if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
+ return ICE_ERR_PARAM;
+
+ sum_header_size = num_qgrps *
+ (sizeof(*qg_list) - sizeof(*qg_list->txqs));
+
+ list = qg_list;
+ for (i = 0; i < num_qgrps; i++) {
+ struct ice_aqc_add_txqs_perq *q = list->txqs;
+
+ sum_q_size += list->num_txqs * sizeof(*q);
+ list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
+ }
+
+ if (buf_size != (sum_header_size + sum_q_size))
+ return ICE_ERR_PARAM;
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ cmd->num_qgrps = num_qgrps;
+
+ return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
+}
+
+/**
+ * ice_aq_dis_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qgrps: number of groups in the list
+ * @qg_list: the list of groups to disable
+ * @buf_size: the total size of the qg_list buffer in bytes
+ * @rst_src: if called due to reset, specifies the reset source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
+ * @cd: pointer to command details structure or NULL
+ *
+ * Disable LAN Tx queue (0x0C31)
+ */
+static enum ice_status
+ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
+ struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_dis_txqs *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 i, sz = 0;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ cmd = &desc.params.dis_txqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
+
+ /* qg_list can be NULL only in VM/VF reset flow */
+ if (!qg_list && !rst_src)
+ return ICE_ERR_PARAM;
+
+ if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
+ return ICE_ERR_PARAM;
+
+ cmd->num_entries = num_qgrps;
+
+ cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
+ ICE_AQC_Q_DIS_TIMEOUT_M);
+
+ switch (rst_src) {
+ case ICE_VM_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
+ cmd->vmvf_and_timeout |=
+ CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_VF_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
+ /* In this case, FW expects vmvf_num to be absolute VF ID */
+ cmd->vmvf_and_timeout |=
+ CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_NO_RESET:
+ default:
+ break;
+ }
+
+ /* flush pipe on time out */
+ cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
+ /* If no queue group info, we are in a reset flow. Issue the AQ */
+ if (!qg_list)
+ goto do_aq;
+
+ /* set RD bit to indicate that command buffer is provided by the driver
+ * and it needs to be read by the firmware
+ */
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ for (i = 0; i < num_qgrps; ++i) {
+ /* Calculate the size taken up by the queue IDs in this group */
+ sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
+
+ /* Add the size of the group header */
+ sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
+
+ /* If the num of queues is even, add 2 bytes of padding */
+ if ((qg_list[i].num_qs % 2) == 0)
+ sz += 2;
+ }
+
+ if (buf_size != sz)
+ return ICE_ERR_PARAM;
+
+do_aq:
+ status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
+ if (status) {
+ if (!qg_list)
+ ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
+ vmvf_num, hw->adminq.sq_last_status);
+ else
+ ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
+ LE16_TO_CPU(qg_list[0].q_id[0]),
+ hw->adminq.sq_last_status);
+ }
+ return status;
+}
+
+/**
+ * ice_aq_move_recfg_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qs: number of queues to move/reconfigure
+ * @is_move: true if this operation involves node movement
+ * @is_tc_change: true if this operation involves a TC change
+ * @subseq_call: true if this operation is a subsequent call
+ * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
+ * @timeout: timeout in units of 100 usec (valid values 0-50)
+ * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
+ * @buf: struct containing src/dest TEID and per-queue info
+ * @buf_size: size of buffer for indirect command
+ * @txqs_moved: out param, number of queues successfully moved
+ * @cd: pointer to command details structure or NULL
+ *
+ * Move / Reconfigure Tx LAN queues (0x0C32)
+ */
+enum ice_status
+ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
+ bool is_tc_change, bool subseq_call, bool flush_pipe,
+ u8 timeout, u32 *blocked_cgds,
+ struct ice_aqc_move_txqs_data *buf, u16 buf_size,
+ u8 *txqs_moved, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_move_txqs *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.move_txqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
+
+#define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
+ if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
+ return ICE_ERR_PARAM;
+
+ if (is_tc_change && !flush_pipe && !blocked_cgds)
+ return ICE_ERR_PARAM;
+
+ if (!is_move && !is_tc_change)
+ return ICE_ERR_PARAM;
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (is_move)
+ cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
+
+ if (is_tc_change)
+ cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
+
+ if (subseq_call)
+ cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
+
+ if (flush_pipe)
+ cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
+
+ cmd->num_qs = num_qs;
+ cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
+ ICE_AQC_Q_CMD_TIMEOUT_M);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+
+ if (!status && txqs_moved)
+ *txqs_moved = cmd->num_qs;
+
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
+ is_tc_change && !flush_pipe)
+ *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
+
+ return status;
+}
+
+/* End of FW Admin Queue command wrappers */
+
+/**
+ * ice_write_byte - write a byte to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_write_word - write a word to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ u16 src_word, mask;
+ __le16 dest_word;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
+
+ dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
+ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_write_dword - write a dword to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ u32 src_dword, mask;
+ __le32 dest_dword;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = (u32)~0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
+
+ dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
+ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_write_qword - write a qword to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ u64 src_qword, mask;
+ __le64 dest_qword;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = (u64)~0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
+
+ dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
+ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_set_ctx - set context bits in packed structure
+ * @src_ctx: pointer to a generic non-packed context structure
+ * @dest_ctx: pointer to memory for the packed structure
+ * @ce_info: a description of the structure to be transformed
+ */
+enum ice_status
+ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width; f++) {
+ /* We have to deal with each element of the FW response
+ * using the correct size so that we are correct regardless
+ * of the endianness of the machine.
+ */
+ switch (ce_info[f].size_of) {
+ case sizeof(u8):
+ ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u16):
+ ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u32):
+ ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u64):
+ ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ default:
+ return ICE_ERR_INVAL_SIZE;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_byte - read context byte into struct
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ u8 dest_byte, mask;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = src_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+
+ dest_byte &= ~(mask);
+
+ dest_byte >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest_ctx + ce_info->offset;
+
+ /* put it back in the struct */
+ ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_read_word - read context word into struct
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ u16 dest_word, mask;
+ u8 *src, *target;
+ __le16 src_word;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = src_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_word &= ~(CPU_TO_LE16(mask));
+
+ /* get the data back into host order before shifting */
+ dest_word = LE16_TO_CPU(src_word);
+
+ dest_word >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest_ctx + ce_info->offset;
+
+ /* put it back in the struct */
+ ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_read_dword - read context dword into struct
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ u32 dest_dword, mask;
+ __le32 src_dword;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = (u32)~0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = src_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_dword &= ~(CPU_TO_LE32(mask));
+
+ /* get the data back into host order before shifting */
+ dest_dword = LE32_TO_CPU(src_dword);
+
+ dest_dword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest_ctx + ce_info->offset;
+
+ /* put it back in the struct */
+ ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_read_qword - read context qword into struct
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ u64 dest_qword, mask;
+ __le64 src_qword;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = (u64)~0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = src_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_qword &= ~(CPU_TO_LE64(mask));
+
+ /* get the data back into host order before shifting */
+ dest_qword = LE64_TO_CPU(src_qword);
+
+ dest_qword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest_ctx + ce_info->offset;
+
+ /* put it back in the struct */
+ ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_get_ctx - extract context bits from a packed structure
+ * @src_ctx: pointer to a generic packed context structure
+ * @dest_ctx: pointer to a generic non-packed context structure
+ * @ce_info: a description of the structure to be read from
+ */
+enum ice_status
+ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width; f++) {
+ switch (ce_info[f].size_of) {
+ case 1:
+ ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case 2:
+ ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case 4:
+ ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case 8:
+ ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ default:
+ /* nothing to do, just keep going */
+ break;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @q_handle: software queue handle
+ */
+struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ struct ice_q_ctx *q_ctx;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return NULL;
+ if (q_handle >= vsi->num_lan_q_entries[tc])
+ return NULL;
+ if (!vsi->lan_q_ctx[tc])
+ return NULL;
+ q_ctx = vsi->lan_q_ctx[tc];
+ return &q_ctx[q_handle];
+}
+
+/**
+ * ice_ena_vsi_txq
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @q_handle: software queue handle
+ * @num_qgrps: Number of added queue groups
+ * @buf: list of queue groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function adds one LAN queue
+ */
+enum ice_status
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_txsched_elem_data node = { 0 };
+ struct ice_sched_node *parent;
+ struct ice_q_ctx *q_ctx;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ if (num_qgrps > 1 || buf->num_txqs > 1)
+ return ICE_ERR_MAX_LIMIT;
+
+ hw = pi->hw;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&pi->sched_lock);
+
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
+ if (!q_ctx) {
+ ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
+ q_handle);
+ status = ICE_ERR_PARAM;
+ goto ena_txq_exit;
+ }
+
+ /* find a parent node */
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
+ ICE_SCHED_NODE_OWNER_LAN);
+ if (!parent) {
+ status = ICE_ERR_PARAM;
+ goto ena_txq_exit;
+ }
+
+ buf->parent_teid = parent->info.node_teid;
+ node.parent_teid = parent->info.node_teid;
+ /* Mark that the values in the "generic" section as valid. The default
+ * value in the "generic" section is zero. This means that :
+ * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
+ * - 0 priority among siblings, indicated by Bit 1-3.
+ * - WFQ, indicated by Bit 4.
+ * - 0 Adjustment value is used in PSM credit update flow, indicated by
+ * Bit 5-6.
+ * - Bit 7 is reserved.
+ * Without setting the generic section as valid in valid_sections, the
+ * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
+ */
+ buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+
+ /* add the LAN queue */
+ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
+ LE16_TO_CPU(buf->txqs[0].txq_id),
+ hw->adminq.sq_last_status);
+ goto ena_txq_exit;
+ }
+
+ node.node_teid = buf->txqs[0].q_teid;
+ node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+ q_ctx->q_handle = q_handle;
+ q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
+
+ /* add a leaf node into scheduler tree queue layer */
+ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
+ if (!status)
+ status = ice_sched_replay_q_bw(pi, q_ctx);
+
+ena_txq_exit:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_dis_vsi_txq
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @num_queues: number of queues
+ * @q_handles: pointer to software queue handle array
+ * @q_ids: pointer to the q_id array
+ * @q_teids: pointer to queue node teids
+ * @rst_src: if called due to reset, specifies the reset source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function removes queues and their corresponding nodes in SW DB
+ */
+enum ice_status
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handles, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
+ struct ice_aqc_dis_txq_item qg_list;
+ struct ice_q_ctx *q_ctx;
+ u16 i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ if (!num_queues) {
+ /* if queue is disabled already yet the disable queue command
+ * has to be sent to complete the VF reset, then call
+ * ice_aq_dis_lan_txq without any queue information
+ */
+ if (rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ vmvf_num, NULL);
+ return ICE_ERR_CFG;
+ }
+
+ ice_acquire_lock(&pi->sched_lock);
+
+ for (i = 0; i < num_queues; i++) {
+ struct ice_sched_node *node;
+
+ node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
+ if (!node)
+ continue;
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ if (!q_ctx) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ q_handles[i]);
+ continue;
+ }
+ if (q_ctx->q_handle != q_handles[i]) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ q_ctx->q_handle, q_handles[i]);
+ continue;
+ }
+ qg_list.parent_teid = node->info.parent_teid;
+ qg_list.num_qs = 1;
+ qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
+ status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
+ sizeof(qg_list), rst_src, vmvf_num,
+ cd);
+
+ if (status != ICE_SUCCESS)
+ break;
+ ice_free_sched_node(pi, node);
+ q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
+ }
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_qs - configure the new/existing VSI queues
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap
+ * @maxqs: max queues array per TC
+ * @owner: LAN or RDMA
+ *
+ * This function adds/updates the VSI queues per TC.
+ */
+static enum ice_status
+ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *maxqs, u8 owner)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u8 i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&pi->sched_lock);
+
+ ice_for_each_traffic_class(i) {
+ /* configuration is possible only if TC node is present */
+ if (!ice_sched_get_tc_node(pi, i))
+ continue;
+
+ status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
+ ice_is_tc_ena(tc_bitmap, i));
+ if (status)
+ break;
+ }
+
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_lan - configure VSI LAN queues
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap
+ * @max_lanqs: max LAN queues array per TC
+ *
+ * This function adds/updates the VSI LAN queues per TC.
+ */
+enum ice_status
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_lanqs)
+{
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
+ ICE_SCHED_NODE_OWNER_LAN);
+}
+
+/**
+ * ice_replay_pre_init - replay pre initialization
+ * @hw: pointer to the HW struct
+ *
+ * Initializes required config data for VSI, FD, ACL, and RSS before replay.
+ */
+static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ /* Delete old entries from replay filter list head if there is any */
+ ice_rm_all_sw_replay_rule_info(hw);
+ /* In start of replay, move entries into replay_rules list, it
+ * will allow adding rules entries back to filt_rules list,
+ * which is operational list.
+ */
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
+ LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
+ &sw->recp_list[i].filt_replay_rules);
+ ice_sched_replay_agg_vsi_preinit(hw);
+
+ return ice_sched_replay_tc_node_bw(hw->port_info);
+}
+
+/**
+ * ice_replay_vsi - replay VSI configuration
+ * @hw: pointer to the HW struct
+ * @vsi_handle: driver VSI handle
+ *
+ * Restore all VSI configuration after reset. It is required to call this
+ * function with main VSI first.
+ */
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Replay pre-initialization if there is any */
+ if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
+ status = ice_replay_pre_init(hw);
+ if (status)
+ return status;
+ }
+ /* Replay per VSI all RSS configurations */
+ status = ice_replay_rss_cfg(hw, vsi_handle);
+ if (status)
+ return status;
+ /* Replay per VSI all filters */
+ status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+ if (!status)
+ status = ice_replay_vsi_agg(hw, vsi_handle);
+ return status;
+}
+
+/**
+ * ice_replay_post - post replay configuration cleanup
+ * @hw: pointer to the HW struct
+ *
+ * Post replay cleanup.
+ */
+void ice_replay_post(struct ice_hw *hw)
+{
+ /* Delete old entries from replay filter list head */
+ ice_rm_all_sw_replay_rule_info(hw);
+ ice_sched_replay_agg(hw);
+}
+
+/**
+ * ice_stat_update40 - read 40 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @reg: offset of 64 bit HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
+{
+ u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
+ */
+ if (!prev_stat_loaded) {
+ *prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
+ if (new_data >= *prev_stat)
+ *cur_stat += new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
+}
+
+/**
+ * ice_stat_update32 - read 32 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @reg: offset of HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
+ */
+ if (!prev_stat_loaded) {
+ *prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
+ if (new_data >= *prev_stat)
+ *cur_stat += new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
+}
+
+/**
+ * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
+ * @hw: ptr to the hardware info
+ * @vsi_handle: VSI handle
+ * @prev_stat_loaded: bool to specify if the previous stat values are loaded
+ * @cur_stats: ptr to current stats structure
+ *
+ * The GLV_REPC statistic register actually tracks two 16bit statistics, and
+ * thus cannot be read using the normal ice_stat_update32 function.
+ *
+ * Read the GLV_REPC register associated with the given VSI, and update the
+ * rx_no_desc and rx_error values in the ice_eth_stats structure.
+ *
+ * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
+ * cleared each time it's read.
+ *
+ * Note that the GLV_RDPC register also counts the causes that would trigger
+ * GLV_REPC. However, it does not give the finer grained detail about why the
+ * packets are being dropped. The GLV_REPC values can be used to distinguish
+ * whether Rx packets are dropped due to errors or due to no available
+ * descriptors.
+ */
+void
+ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
+ struct ice_eth_stats *cur_stats)
+{
+ u16 vsi_num, no_desc, error_cnt;
+ u32 repc;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return;
+
+ vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ /* If we haven't loaded stats yet, just clear the current value */
+ if (!prev_stat_loaded) {
+ wr32(hw, GLV_REPC(vsi_num), 0);
+ return;
+ }
+
+ repc = rd32(hw, GLV_REPC(vsi_num));
+ no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
+ error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
+
+ /* Clear the count by writing to the stats register */
+ wr32(hw, GLV_REPC(vsi_num), 0);
+
+ cur_stats->rx_no_desc += no_desc;
+ cur_stats->rx_errors += error_cnt;
+}
+
+/**
+ * ice_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be written
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be written
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ */
+enum ice_status
+ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1)
+{
+ struct ice_aqc_read_write_alt_direct *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
+ cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
+ cmd->dword0_value = CPU_TO_LE32(reg_val0);
+ cmd->dword1_value = CPU_TO_LE32(reg_val1);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * ice_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ */
+enum ice_status
+ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct ice_aqc_read_write_alt_direct *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ if (!reg_val0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
+ cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+
+ if (status == ICE_SUCCESS) {
+ *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
+
+ if (reg_val1)
+ *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_alternate_write_done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ */
+enum ice_status
+ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
+{
+ struct ice_aqc_done_alt_write *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.done_alt_write;
+
+ if (!reset_needed)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
+ cmd->flags = bios_mode;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *reset_needed = (LE16_TO_CPU(cmd->flags) &
+ ICE_AQC_RESP_RESET_NEEDED) != 0;
+
+ return status;
+}
+
+/**
+ * ice_aq_alternate_clear
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ */
+enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * ice_sched_query_elem - query element information from HW
+ * @hw: pointer to the HW struct
+ * @node_teid: node TEID to be queried
+ * @buf: buffer to element information
+ *
+ * This function queries HW element information
+ */
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf)
+{
+ u16 buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf);
+ ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
+ buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
+ status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
+ NULL);
+ if (status != ICE_SUCCESS || num_elem_ret != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
+ return status;
+}
+
+/**
+ * ice_get_fw_mode - returns FW mode
+ * @hw: pointer to the HW struct
+ */
+enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
+{
+#define ICE_FW_MODE_DBG_M BIT(0)
+#define ICE_FW_MODE_REC_M BIT(1)
+#define ICE_FW_MODE_ROLLBACK_M BIT(2)
+ u32 fw_mode;
+
+ /* check the current FW mode */
+ fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
+
+ if (fw_mode & ICE_FW_MODE_DBG_M)
+ return ICE_FW_MODE_DBG;
+ else if (fw_mode & ICE_FW_MODE_REC_M)
+ return ICE_FW_MODE_REC;
+ else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
+ return ICE_FW_MODE_ROLLBACK;
+ else
+ return ICE_FW_MODE_NORMAL;
+}
+
+/**
+ * ice_cfg_get_cur_lldp_persist_status
+ * @hw: pointer to the HW struct
+ * @lldp_status: return value of LLDP persistent status
+ *
+ * Get the current status of LLDP persistent
+ */
+enum ice_status
+ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret;
+ __le32 raw_data;
+ u32 data, mask;
+
+ if (!lldp_status)
+ return ICE_ERR_BAD_PTR;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
+ ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
+ ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
+ false, true, NULL);
+ if (!ret) {
+ data = LE32_TO_CPU(raw_data);
+ mask = ICE_AQC_NVM_LLDP_STATUS_M <<
+ (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
+ data = data & mask;
+ *lldp_status = data >>
+ (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
+ }
+
+ ice_release_nvm(hw);
+
+ return ret;
+}
+
+/**
+ * ice_get_dflt_lldp_persist_status
+ * @hw: pointer to the HW struct
+ * @lldp_status: return value of LLDP persistent status
+ *
+ * Get the default status of LLDP persistent
+ */
+enum ice_status
+ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
+{
+ struct ice_port_info *pi = hw->port_info;
+ u32 data, mask, loc_data, loc_data_tmp;
+ enum ice_status ret;
+ __le16 loc_raw_data;
+ __le32 raw_data;
+
+ if (!lldp_status)
+ return ICE_ERR_BAD_PTR;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+
+ /* Read the offset of EMP_SR_PTR */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
+ ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
+ ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
+ &loc_raw_data, false, true, NULL);
+ if (ret)
+ goto exit;
+
+ loc_data = LE16_TO_CPU(loc_raw_data);
+ if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
+ loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
+ loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
+ } else {
+ loc_data *= ICE_AQC_NVM_WORD_UNIT;
+ }
+
+ /* Read the offset of LLDP configuration pointer */
+ loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
+ ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
+ false, true, NULL);
+ if (ret)
+ goto exit;
+
+ loc_data_tmp = LE16_TO_CPU(loc_raw_data);
+ loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
+ loc_data += loc_data_tmp;
+
+ /* We need to skip LLDP configuration section length (2 bytes)*/
+ loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
+
+ /* Read the LLDP Default Configure */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
+ ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
+ true, NULL);
+ if (!ret) {
+ data = LE32_TO_CPU(raw_data);
+ mask = ICE_AQC_NVM_LLDP_STATUS_M <<
+ (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
+ data = data & mask;
+ *lldp_status = data >>
+ (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
+ }
+
+exit:
+ ice_release_nvm(hw);
+
+ return ret;
+}
+
+/**
+ * ice_get_netlist_ver_info
+ * @hw: pointer to the HW struct
+ *
+ * Get the netlist version information
+ */
+enum ice_status
+ice_get_netlist_ver_info(struct ice_hw *hw)
+{
+ struct ice_netlist_ver_info *ver = &hw->netlist_ver;
+ enum ice_status ret;
+ u32 id_blk_start;
+ __le16 raw_data;
+ u16 data, i;
+ u16 *buff;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+ buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN,
+ sizeof(*buff));
+ if (!buff) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto exit_no_mem;
+ }
+
+ /* read module length */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+
+ data = LE16_TO_CPU(raw_data);
+ /* exit if length is = 0 */
+ if (!data)
+ goto exit_error;
+
+ /* read node count */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+ data = LE16_TO_CPU(raw_data);
+
+ /* netlist ID block starts from offset 4 + node count * 2 */
+ id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
+
+ /* read the entire netlist ID block */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ id_blk_start * 2,
+ ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
+ false, NULL);
+ if (ret)
+ goto exit_error;
+
+ for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
+ buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]);
+
+ ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
+ ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
+ ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
+ ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
+
+exit_error:
+ ice_free(hw, buff);
+exit_no_mem:
+ ice_release_nvm(hw);
+ return ret;
+}
+
+/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ return true;
+ if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
+ hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ return true;
+ } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_link_default_override
+ * @ldo: pointer to the link default override struct
+ * @pi: pointer to the port info struct
+ *
+ * Gets the link default override for a port
+ */
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+ struct ice_port_info *pi)
+{
+ u16 i, tlv, tlv_len, tlv_start, buf, offset;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
+ ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read link override TLV.\n");
+ return status;
+ }
+
+ /* Each port has its own config; calculate for our port */
+ tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
+ ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
+
+ /* link options first */
+ status = ice_read_sr_word(hw, tlv_start, &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
+ ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
+ ICE_LINK_OVERRIDE_PHY_CFG_S;
+
+ /* link PHY config */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
+ status = ice_read_sr_word(hw, offset, &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override phy config.\n");
+ return status;
+ }
+ ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
+
+ /* PHY types low */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+ status = ice_read_sr_word(hw, (offset + i), &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ /* shift 16 bits at a time to fill 64 bits */
+ ldo->phy_type_low |= ((u64)buf << (i * 16));
+ }
+
+ /* PHY types high */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
+ ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+ status = ice_read_sr_word(hw, (offset + i), &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ /* shift 16 bits at a time to fill 64 bits */
+ ldo->phy_type_high |= ((u64)buf << (i * 16));
+ }
+
+ return status;
+}
diff --git a/sys/dev/ice/ice_common.h b/sys/dev/ice/ice_common.h
new file mode 100644
index 000000000000..1cff7efb8150
--- /dev/null
+++ b/sys/dev/ice/ice_common.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_COMMON_H_
+#define _ICE_COMMON_H_
+
+#include "ice_type.h"
+#include "ice_nvm.h"
+#include "ice_flex_pipe.h"
+#include "virtchnl.h"
+#include "ice_switch.h"
+
+enum ice_fw_modes {
+ ICE_FW_MODE_NORMAL,
+ ICE_FW_MODE_DBG,
+ ICE_FW_MODE_REC,
+ ICE_FW_MODE_ROLLBACK
+};
+
+/* prototype for functions used for SW locks */
+void ice_free_list(struct LIST_HEAD_TYPE *list);
+void ice_init_lock(struct ice_lock *lock);
+void ice_acquire_lock(struct ice_lock *lock);
+void ice_release_lock(struct ice_lock *lock);
+void ice_destroy_lock(struct ice_lock *lock);
+
+void *ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m, u64 size);
+void ice_free_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m);
+
+void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+
+enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
+enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
+enum ice_status ice_init_hw(struct ice_hw *hw);
+void ice_deinit_hw(struct ice_hw *hw);
+enum ice_status ice_check_reset(struct ice_hw *hw);
+enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
+enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_destroy_all_ctrlq(struct ice_hw *hw);
+enum ice_status
+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_rq_event_info *e, u16 *pending);
+enum ice_status
+ice_get_link_status(struct ice_port_info *pi, bool *link_up);
+enum ice_status ice_update_link_info(struct ice_port_info *pi);
+enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_nvm(struct ice_hw *hw);
+enum ice_status
+ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
+ void *data, bool last_command, bool read_shadow_ram,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access, u32 timeout);
+void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
+enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+enum ice_status
+ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+void ice_clear_pxe_mode(struct ice_hw *hw);
+
+enum ice_status ice_get_caps(struct ice_hw *hw);
+
+void ice_set_safe_mode_caps(struct ice_hw *hw);
+
+enum ice_status ice_set_mac_type(struct ice_hw *hw);
+
+/* Define a macro that will align a pointer to point to the next memory address
+ * that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For
+ * example, given the variable pointer = 0x1006, then after the following call:
+ *
+ * pointer = ICE_ALIGN(pointer, 4)
+ *
+ * ... the value of pointer would equal 0x1008, since 0x1008 is the next
+ * address after 0x1006 which is divisible by 4.
+ */
+#define ICE_ALIGN(ptr, align) (((ptr) + ((align) - 1)) & ~((align) - 1))
+
+enum ice_status
+ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
+enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
+enum ice_status
+ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index);
+enum ice_status
+ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
+ struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
+ u32 tx_cmpltnq_index);
+enum ice_status
+ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index);
+enum ice_status
+ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
+ struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
+ u32 tx_drbell_q_index);
+
+enum ice_status
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
+ u16 lut_size);
+enum ice_status
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
+ u16 lut_size);
+enum ice_status
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_aqc_get_set_rss_keys *keys);
+enum ice_status
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_aqc_get_set_rss_keys *keys);
+enum ice_status
+ice_aq_add_lan_txq(struct ice_hw *hw, u8 count,
+ struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
+ bool is_tc_change, bool subseq_call, bool flush_pipe,
+ u8 timeout, u32 *blocked_cgds,
+ struct ice_aqc_move_txqs_data *buf, u16 buf_size,
+ u8 *txqs_moved, struct ice_sq_cd *cd);
+
+bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
+extern const struct ice_ctx_ele ice_tlan_ctx_info[];
+enum ice_status
+ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
+
+enum ice_status
+ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd);
+enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
+ bool save_bad_pac, bool pad_short_pac, bool double_vlan,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+void
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
+ u16 link_speeds_bitmap);
+enum ice_status
+ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
+ struct ice_sq_cd *cd);
+
+enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
+enum ice_status
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
+ struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
+bool ice_fw_supports_link_override(struct ice_hw *hw);
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+ struct ice_port_info *pi);
+
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
+ bool ena_auto_link_update);
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
+void
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
+enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fec_mode fec);
+enum ice_status
+ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info);
+enum ice_status
+__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data);
+enum ice_status
+__ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data);
+enum ice_status
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handle, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_lanqs);
+enum ice_status
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+void ice_replay_post(struct ice_hw *hw);
+void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
+void ice_sched_replay_agg(struct ice_hw *hw);
+enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
+enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
+struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+void
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
+ struct ice_eth_stats *cur_stats);
+enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
+void ice_print_rollback_msg(struct ice_hw *hw);
+bool ice_is_generic_mac(struct ice_hw *hw);
+enum ice_status
+ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1);
+enum ice_status
+ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1);
+enum ice_status
+ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode,
+ bool *reset_needed);
+enum ice_status ice_aq_alternate_clear(struct ice_hw *hw);
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf);
+enum ice_status
+ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
+enum ice_status
+ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
+enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw);
+#endif /* _ICE_COMMON_H_ */
diff --git a/sys/dev/ice/ice_common_sysctls.h b/sys/dev/ice/ice_common_sysctls.h
new file mode 100644
index 000000000000..82ead3fcfecc
--- /dev/null
+++ b/sys/dev/ice/ice_common_sysctls.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_common_sysctls.h
+ * @brief driver wide sysctls not related to the iflib stack
+ *
+ * Contains static sysctl values which are driver wide and configure all
+ * devices of the driver at once.
+ *
+ * Device specific sysctls are setup by functions in ice_lib.c
+ */
+
+#ifndef _ICE_COMMON_SYSCTLS_H_
+#define _ICE_COMMON_SYSCTLS_H_
+
+#include <sys/sysctl.h>
+
+/**
+ * @var ice_enable_tx_fc_filter
+ * @brief boolean indicating if the Tx Flow Control filter should be enabled
+ *
+ * Global sysctl variable indicating whether the Tx Flow Control filters
+ * should be enabled. If true, Ethertype 0x8808 packets will be dropped if
+ * they come from non-HW sources. If false, packets coming from software will
+ * not be dropped. Leave this on if unless you must send flow control frames
+ * (or other control frames) from software.
+ *
+ * @remark each PF has a separate sysctl which can override this value.
+ */
+bool ice_enable_tx_fc_filter = true;
+
+/**
+ * @var ice_enable_tx_lldp_filter
+ * @brief boolean indicating if the Tx LLDP filter should be enabled
+ *
+ * Global sysctl variable indicating whether the Tx Flow Control filters
+ * should be enabled. If true, Ethertype 0x88cc packets will be dropped if
+ * they come from non-HW sources. If false, packets coming from software will
+ * not be dropped. Leave this on if unless you must send LLDP frames from
+ * software.
+ *
+ * @remark each PF has a separate sysctl which can override this value.
+ */
+bool ice_enable_tx_lldp_filter = true;
+
+/* sysctls marked as tunable, (i.e. with the CTLFLAG_TUN set) will
+ * automatically load tunable values, without the need to manually create the
+ * TUNABLE definition.
+ *
+ * This works since at least FreeBSD 11, and was backported into FreeBSD 10
+ * before the FreeBSD 10.1-RELEASE.
+ *
+ * If the tunable needs a custom loader, mark the SYSCTL as CTLFLAG_NOFETCH,
+ * and create the tunable manually.
+ */
+
+static SYSCTL_NODE(_hw, OID_AUTO, ice, CTLFLAG_RD, 0, "ICE driver parameters");
+
+static SYSCTL_NODE(_hw_ice, OID_AUTO, debug, ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 0,
+ "ICE driver debug parameters");
+
+SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
+ &ice_enable_tx_fc_filter, 0,
+ "Drop Ethertype 0x8808 control frames originating from non-HW sources");
+
+SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_lldp_filter, CTLFLAG_RDTUN,
+ &ice_enable_tx_lldp_filter, 0,
+ "Drop Ethertype 0x88cc LLDP frames originating from non-HW sources");
+
+#endif /* _ICE_COMMON_SYSCTLS_H_ */
diff --git a/sys/dev/ice/ice_common_txrx.h b/sys/dev/ice/ice_common_txrx.h
new file mode 100644
index 000000000000..8fb3d49288d8
--- /dev/null
+++ b/sys/dev/ice/ice_common_txrx.h
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_common_txrx.h
+ * @brief common Tx/Rx utility functions
+ *
+ * Contains common utility functions for the Tx/Rx hot path.
+ *
+ * The functions do depend on the if_pkt_info_t structure. A suitable
+ * implementation of this structure must be provided if these functions are to
+ * be used without the iflib networking stack.
+ */
+
+#ifndef _ICE_COMMON_TXRX_H_
+#define _ICE_COMMON_TXRX_H_
+
+#include <netinet/udp.h>
+#include <netinet/sctp.h>
+
+/**
+ * ice_tso_detect_sparse - detect TSO packets with too many segments
+ * @pi: packet information
+ *
+ * Hardware only transmits packets with a maximum of 8 descriptors. For TSO
+ * packets, hardware needs to be able to build the split packets using 8 or
+ * fewer descriptors. Additionally, the header must be contained within at
+ * most 3 descriptors.
+ *
+ * To verify this, we walk the headers to find out how many descriptors the
+ * headers require (usually 1). Then we ensure that, for each TSO segment, its
+ * data plus the headers are contained within 8 or fewer descriptors.
+ */
+static inline int
+ice_tso_detect_sparse(if_pkt_info_t pi)
+{
+ int count, curseg, i, hlen, segsz, seglen, tsolen, hdrs, maxsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ int nsegs = pi->ipi_nsegs;
+
+ curseg = hdrs = 0;
+
+ hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
+ tsolen = pi->ipi_len - hlen;
+
+ /* First, count the number of descriptors for the header.
+ * Additionally, make sure it does not span more than 3 segments.
+ */
+ i = 0;
+ curseg = segs[0].ds_len;
+ while (hlen > 0) {
+ hdrs++;
+ if (hdrs > ICE_MAX_TSO_HDR_SEGS)
+ return (1);
+ if (curseg == 0) {
+ i++;
+ if (__predict_false(i == nsegs))
+ return (1);
+
+ curseg = segs[i].ds_len;
+ }
+ seglen = min(curseg, hlen);
+ curseg -= seglen;
+ hlen -= seglen;
+ }
+
+ maxsegs = ICE_MAX_TX_SEGS - hdrs;
+
+ /* We must count the headers, in order to verify that they take up
+ * 3 or fewer descriptors. However, we don't need to check the data
+ * if the total segments is small.
+ */
+ if (nsegs <= maxsegs)
+ return (0);
+
+ count = 0;
+
+ /* Now check the data to make sure that each TSO segment is made up of
+ * no more than maxsegs descriptors. This ensures that hardware will
+ * be capable of performing TSO offload.
+ */
+ while (tsolen > 0) {
+ segsz = pi->ipi_tso_segsz;
+ while (segsz > 0 && tsolen != 0) {
+ count++;
+ if (count > maxsegs) {
+ return (1);
+ }
+ if (curseg == 0) {
+ i++;
+ if (__predict_false(i == nsegs)) {
+ return (1);
+ }
+ curseg = segs[i].ds_len;
+ }
+ seglen = min(curseg, segsz);
+ segsz -= seglen;
+ curseg -= seglen;
+ tsolen -= seglen;
+ }
+ count = 0;
+ }
+
+ return (0);
+}
+
+/**
+ * ice_tso_setup - Setup a context descriptor to prepare for a TSO packet
+ * @txq: the Tx queue to use
+ * @pi: the packet info to prepare for
+ *
+ * Setup a context descriptor in preparation for sending a Tx packet that
+ * requires the TSO offload. Returns the index of the descriptor to use when
+ * encapsulating the Tx packet data into descriptors.
+ */
+static inline int
+ice_tso_setup(struct ice_tx_queue *txq, if_pkt_info_t pi)
+{
+ struct ice_tx_ctx_desc *txd;
+ u32 cmd, mss, type, tsolen;
+ int idx;
+ u64 type_cmd_tso_mss;
+
+ idx = pi->ipi_pidx;
+ txd = (struct ice_tx_ctx_desc *)&txq->tx_base[idx];
+ tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen);
+
+ type = ICE_TX_DESC_DTYPE_CTX;
+ cmd = ICE_TX_CTX_DESC_TSO;
+ /* TSO MSS must not be less than 64 */
+ if (pi->ipi_tso_segsz < ICE_MIN_TSO_MSS) {
+ txq->stats.mss_too_small++;
+ pi->ipi_tso_segsz = ICE_MIN_TSO_MSS;
+ }
+ mss = pi->ipi_tso_segsz;
+
+ type_cmd_tso_mss = ((u64)type << ICE_TXD_CTX_QW1_DTYPE_S) |
+ ((u64)cmd << ICE_TXD_CTX_QW1_CMD_S) |
+ ((u64)tsolen << ICE_TXD_CTX_QW1_TSO_LEN_S) |
+ ((u64)mss << ICE_TXD_CTX_QW1_MSS_S);
+ txd->qw1 = htole64(type_cmd_tso_mss);
+
+ txd->tunneling_params = htole32(0);
+ txq->tso++;
+
+ return ((idx + 1) & (txq->desc_count-1));
+}
+
+/**
+ * ice_tx_setup_offload - Setup register values for performing a Tx offload
+ * @txq: The Tx queue, used to track checksum offload stats
+ * @pi: the packet info to program for
+ * @cmd: the cmd register value to update
+ * @off: the off register value to update
+ *
+ * Based on the packet info provided, update the cmd and off values for
+ * enabling Tx offloads. This depends on the packet type and which offloads
+ * have been requested.
+ *
+ * We also track the total number of times that we've requested hardware
+ * offload a particular type of checksum for debugging purposes.
+ */
+static inline void
+ice_tx_setup_offload(struct ice_tx_queue *txq, if_pkt_info_t pi, u32 *cmd, u32 *off)
+{
+ u32 remaining_csum_flags = pi->ipi_csum_flags;
+
+ switch (pi->ipi_etype) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ if (pi->ipi_csum_flags & ICE_CSUM_IP) {
+ *cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ txq->stats.cso[ICE_CSO_STAT_TX_IP4]++;
+ remaining_csum_flags &= ~CSUM_IP;
+ } else
+ *cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ *cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+ /*
+ * This indicates that the IIPT flag was set to the IPV6 value;
+ * there's no checksum for IPv6 packets.
+ */
+ txq->stats.cso[ICE_CSO_STAT_TX_IP6]++;
+ break;
+#endif
+ default:
+ txq->stats.cso[ICE_CSO_STAT_TX_L3_ERR]++;
+ break;
+ }
+
+ *off |= (pi->ipi_ehdrlen >> 1) << ICE_TX_DESC_LEN_MACLEN_S;
+ *off |= (pi->ipi_ip_hlen >> 2) << ICE_TX_DESC_LEN_IPLEN_S;
+
+ if (!(remaining_csum_flags & ~ICE_RX_CSUM_FLAGS))
+ return;
+
+ switch (pi->ipi_ipproto) {
+ case IPPROTO_TCP:
+ if (pi->ipi_csum_flags & ICE_CSUM_TCP) {
+ *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+ *off |= (pi->ipi_tcp_hlen >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ txq->stats.cso[ICE_CSO_STAT_TX_TCP]++;
+ }
+ break;
+ case IPPROTO_UDP:
+ if (pi->ipi_csum_flags & ICE_CSUM_UDP) {
+ *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+ *off |= (sizeof(struct udphdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ txq->stats.cso[ICE_CSO_STAT_TX_UDP]++;
+ }
+ break;
+ case IPPROTO_SCTP:
+ if (pi->ipi_csum_flags & ICE_CSUM_SCTP) {
+ *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *off |= (sizeof(struct sctphdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ txq->stats.cso[ICE_CSO_STAT_TX_SCTP]++;
+ }
+ break;
+ default:
+ txq->stats.cso[ICE_CSO_STAT_TX_L4_ERR]++;
+ break;
+ }
+}
+
+/**
+ * ice_rx_checksum - verify hardware checksum is valid or not
+ * @rxq: the Rx queue structure
+ * @flags: checksum flags to update
+ * @data: checksum data to update
+ * @status0: descriptor status data
+ * @ptype: packet type
+ *
+ * Determine whether the hardware indicated that the Rx checksum is valid. If
+ * so, update the checksum flags and data, informing the stack of the status
+ * of the checksum so that it does not spend time verifying it manually.
+ */
+static void
+ice_rx_checksum(struct ice_rx_queue *rxq, uint32_t *flags, uint32_t *data,
+ u16 status0, u16 ptype)
+{
+ const u16 l3_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S));
+ const u16 l4_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S));
+ const u16 xsum_errors = (l3_error | l4_error |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S));
+ struct ice_rx_ptype_decoded decoded;
+ bool is_ipv4, is_ipv6;
+
+ /* No L3 or L4 checksum was calculated */
+ if (!(status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) {
+ return;
+ }
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+ *flags = 0;
+
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ is_ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
+ is_ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+
+ /* No checksum errors were reported */
+ if (!(status0 & xsum_errors)) {
+ if (is_ipv4)
+ *flags |= CSUM_L3_CALC | CSUM_L3_VALID;
+
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ *flags |= CSUM_L4_CALC | CSUM_L4_VALID;
+ *data |= htons(0xffff);
+ break;
+ default:
+ break;
+ }
+
+ return;
+ }
+
+ /*
+ * Certain IPv6 extension headers impact the validity of L4 checksums.
+ * If one of these headers exist, hardware will set the IPV6EXADD bit
+ * in the descriptor. If the bit is set then pretend like hardware
+ * didn't checksum this packet.
+ */
+ if (is_ipv6 && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))) {
+ rxq->stats.cso[ICE_CSO_STAT_RX_IP6_ERR]++;
+ return;
+ }
+
+ /*
+ * At this point, status0 must have at least one of the l3_error or
+ * l4_error bits set.
+ */
+
+ if (status0 & l3_error) {
+ if (is_ipv4) {
+ rxq->stats.cso[ICE_CSO_STAT_RX_IP4_ERR]++;
+ *flags |= CSUM_L3_CALC;
+ } else {
+ /* Hardware indicated L3 error but this isn't IPv4? */
+ rxq->stats.cso[ICE_CSO_STAT_RX_L3_ERR]++;
+ }
+ /* don't bother reporting L4 errors if we got an L3 error */
+ return;
+ } else if (is_ipv4) {
+ *flags |= CSUM_L3_CALC | CSUM_L3_VALID;
+ }
+
+ if (status0 & l4_error) {
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ rxq->stats.cso[ICE_CSO_STAT_RX_TCP_ERR]++;
+ *flags |= CSUM_L4_CALC;
+ break;
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ rxq->stats.cso[ICE_CSO_STAT_RX_UDP_ERR]++;
+ *flags |= CSUM_L4_CALC;
+ break;
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ rxq->stats.cso[ICE_CSO_STAT_RX_SCTP_ERR]++;
+ *flags |= CSUM_L4_CALC;
+ break;
+ default:
+ /*
+ * Hardware indicated L4 error, but this isn't one of
+ * the expected protocols.
+ */
+ rxq->stats.cso[ICE_CSO_STAT_RX_L4_ERR]++;
+ }
+ }
+}
+
+/**
+ * ice_ptype_to_hash - Convert packet type to a hash value
+ * @ptype: the packet type to convert
+ *
+ * Given the packet type, convert to a suitable hashtype to report to the
+ * upper stack via the iri_rsstype value of the if_rxd_info_t structure.
+ *
+ * If the hash type is unknown we'll report M_HASHTYPE_OPAQUE.
+ */
+static inline int
+ice_ptype_to_hash(u16 ptype)
+{
+ struct ice_rx_ptype_decoded decoded;
+
+ if (ptype >= ARRAY_SIZE(ice_ptype_lkup))
+ return M_HASHTYPE_OPAQUE;
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return M_HASHTYPE_OPAQUE;
+
+ if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
+ return M_HASHTYPE_OPAQUE;
+
+ /* Note: anything that gets to this point is IP */
+ if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6) {
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV6;
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ return M_HASHTYPE_RSS_UDP_IPV6;
+ default:
+ return M_HASHTYPE_RSS_IPV6;
+ }
+ }
+ if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4) {
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV4;
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ return M_HASHTYPE_RSS_UDP_IPV4;
+ default:
+ return M_HASHTYPE_RSS_IPV4;
+ }
+ }
+
+ /* We should never get here!! */
+ return M_HASHTYPE_OPAQUE;
+}
+#endif
diff --git a/sys/dev/ice/ice_controlq.c b/sys/dev/ice/ice_controlq.c
new file mode 100644
index 000000000000..0346b81409a2
--- /dev/null
+++ b/sys/dev/ice/ice_controlq.c
@@ -0,0 +1,1227 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+
+#define ICE_CQ_INIT_REGS(qinfo, prefix) \
+do { \
+ (qinfo)->sq.head = prefix##_ATQH; \
+ (qinfo)->sq.tail = prefix##_ATQT; \
+ (qinfo)->sq.len = prefix##_ATQLEN; \
+ (qinfo)->sq.bah = prefix##_ATQBAH; \
+ (qinfo)->sq.bal = prefix##_ATQBAL; \
+ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
+ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
+ (qinfo)->rq.head = prefix##_ARQH; \
+ (qinfo)->rq.tail = prefix##_ARQT; \
+ (qinfo)->rq.len = prefix##_ARQLEN; \
+ (qinfo)->rq.bah = prefix##_ARQBAH; \
+ (qinfo)->rq.bal = prefix##_ARQBAL; \
+ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
+ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
+} while (0)
+
+/**
+ * ice_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_adminq_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->adminq;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ICE_CQ_INIT_REGS(cq, PF_FW);
+}
+
+/**
+ * ice_mailbox_init_regs - Initialize Mailbox registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_mailbox_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+
+ ICE_CQ_INIT_REGS(cq, PF_MBX);
+}
+
+/**
+ * ice_check_sq_alive
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Returns true if Queue is enabled else false.
+ */
+bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ /* check both queue-length and queue-enable fields */
+ if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
+ return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
+ cq->sq.len_ena_mask)) ==
+ (cq->num_sq_entries | cq->sq.len_ena_mask);
+
+ return false;
+}
+
+/**
+ * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
+
+ cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
+ if (!cq->sq.desc_buf.va)
+ return ICE_ERR_NO_MEMORY;
+
+ cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
+ sizeof(struct ice_sq_cd));
+ if (!cq->sq.cmd_buf) {
+ ice_free_dma_mem(hw, &cq->sq.desc_buf);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
+
+ cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
+ if (!cq->rq.desc_buf.va)
+ return ICE_ERR_NO_MEMORY;
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_cq_ring - Free control queue ring
+ * @hw: pointer to the hardware structure
+ * @ring: pointer to the specific control queue ring
+ *
+ * This assumes the posted buffers have already been cleaned
+ * and de-allocated
+ */
+static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
+{
+ ice_free_dma_mem(hw, &ring->desc_buf);
+}
+
+/**
+ * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+ cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
+ sizeof(cq->rq.desc_buf));
+ if (!cq->rq.dma_head)
+ return ICE_ERR_NO_MEMORY;
+ cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < cq->num_rq_entries; i++) {
+ struct ice_aq_desc *desc;
+ struct ice_dma_mem *bi;
+
+ bi = &cq->rq.r.rq_bi[i];
+ bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
+ if (!bi->va)
+ goto unwind_alloc_rq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = ICE_CTL_Q_DESC(cq->rq, i);
+
+ desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16(bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.generic.addr_high =
+ CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
+ desc->params.generic.addr_low =
+ CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
+ }
+ return ICE_SUCCESS;
+
+unwind_alloc_rq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
+ ice_free(hw, cq->rq.dma_head);
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
+ sizeof(cq->sq.desc_buf));
+ if (!cq->sq.dma_head)
+ return ICE_ERR_NO_MEMORY;
+ cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < cq->num_sq_entries; i++) {
+ struct ice_dma_mem *bi;
+
+ bi = &cq->sq.r.sq_bi[i];
+ bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
+ if (!bi->va)
+ goto unwind_alloc_sq_bufs;
+ }
+ return ICE_SUCCESS;
+
+unwind_alloc_sq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
+ ice_free(hw, cq->sq.dma_head);
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+static enum ice_status
+ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
+{
+ /* Clear Head and Tail */
+ wr32(hw, ring->head, 0);
+ wr32(hw, ring->tail, 0);
+
+ /* set starting point */
+ wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
+ wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
+ wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
+ return ICE_ERR_AQ_ERROR;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_cfg_sq_regs - configure Control ATQ registers
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * Configure base address and length registers for the transmit queue
+ */
+static enum ice_status
+ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
+}
+
+/**
+ * ice_cfg_rq_regs - configure Control ARQ register
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * Configure base address and length registers for the receive (event queue)
+ */
+static enum ice_status
+ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status status;
+
+ status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
+ if (status)
+ return status;
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_init_sq - main initialization routine for Control ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * This is the main initialization routine for the Control Send Queue
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_sq_entries
+ * - cq->sq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (cq->sq.count > 0) {
+ /* queue already initialized */
+ ret_code = ICE_ERR_NOT_READY;
+ goto init_ctrlq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if (!cq->num_sq_entries || !cq->sq_buf_size) {
+ ret_code = ICE_ERR_CFG;
+ goto init_ctrlq_exit;
+ }
+
+ cq->sq.next_to_use = 0;
+ cq->sq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = ice_alloc_sq_bufs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* initialize base registers */
+ ret_code = ice_cfg_sq_regs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* success! */
+ cq->sq.count = cq->num_sq_entries;
+ goto init_ctrlq_exit;
+
+init_ctrlq_free_rings:
+ ice_free_cq_ring(hw, &cq->sq);
+
+init_ctrlq_exit:
+ return ret_code;
+}
+
+/**
+ * ice_init_rq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (cq->rq.count > 0) {
+ /* queue already initialized */
+ ret_code = ICE_ERR_NOT_READY;
+ goto init_ctrlq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if (!cq->num_rq_entries || !cq->rq_buf_size) {
+ ret_code = ICE_ERR_CFG;
+ goto init_ctrlq_exit;
+ }
+
+ cq->rq.next_to_use = 0;
+ cq->rq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = ice_alloc_rq_bufs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* initialize base registers */
+ ret_code = ice_cfg_rq_regs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* success! */
+ cq->rq.count = cq->num_rq_entries;
+ goto init_ctrlq_exit;
+
+init_ctrlq_free_rings:
+ ice_free_cq_ring(hw, &cq->rq);
+
+init_ctrlq_exit:
+ return ret_code;
+}
+
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) \
+ ice_free_dma_mem((hw), \
+ &(qi)->ring.r.ring##_bi[i]); \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ ice_free(hw, (qi)->ring.cmd_buf); \
+ /* free DMA head */ \
+ ice_free(hw, (qi)->ring.dma_head); \
+} while (0)
+
+/**
+ * ice_shutdown_sq - shutdown the Control ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for the Control Transmit Queue
+ */
+static enum ice_status
+ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code = ICE_SUCCESS;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ice_acquire_lock(&cq->sq_lock);
+
+ if (!cq->sq.count) {
+ ret_code = ICE_ERR_NOT_READY;
+ goto shutdown_sq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, cq->sq.head, 0);
+ wr32(hw, cq->sq.tail, 0);
+ wr32(hw, cq->sq.len, 0);
+ wr32(hw, cq->sq.bal, 0);
+ wr32(hw, cq->sq.bah, 0);
+
+ cq->sq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers and the ring itself */
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
+ ice_free_cq_ring(hw, &cq->sq);
+
+shutdown_sq_out:
+ ice_release_lock(&cq->sq_lock);
+ return ret_code;
+}
+
+/**
+ * ice_aq_ver_check - Check the reported AQ API version.
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the driver should load on a given AQ API version.
+ *
+ * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
+ */
+static bool ice_aq_ver_check(struct ice_hw *hw)
+{
+ if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ /* Major API version is newer than expected, don't load */
+ ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+ return false;
+ } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
+ if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ } else {
+ /* Major API version is older than expected, log a warning */
+ ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ }
+ return true;
+}
+
+/**
+ * ice_shutdown_rq - shutdown Control ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for the Control Receive Queue
+ */
+static enum ice_status
+ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code = ICE_SUCCESS;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ice_acquire_lock(&cq->rq_lock);
+
+ if (!cq->rq.count) {
+ ret_code = ICE_ERR_NOT_READY;
+ goto shutdown_rq_out;
+ }
+
+ /* Stop Control Queue processing */
+ wr32(hw, cq->rq.head, 0);
+ wr32(hw, cq->rq.tail, 0);
+ wr32(hw, cq->rq.len, 0);
+ wr32(hw, cq->rq.bal, 0);
+ wr32(hw, cq->rq.bah, 0);
+
+ /* set rq.count to 0 to indicate uninitialized queue */
+ cq->rq.count = 0;
+
+ /* free ring buffers and the ring itself */
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
+ ice_free_cq_ring(hw, &cq->rq);
+
+shutdown_rq_out:
+ ice_release_lock(&cq->rq_lock);
+ return ret_code;
+}
+
+/**
+ * ice_idle_aq - stop ARQ/ATQ processing momentarily
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ wr32(hw, cq->sq.len, 0);
+ wr32(hw, cq->rq.len, 0);
+
+ ice_msec_delay(2, false);
+}
+
+/**
+ * ice_init_check_adminq - Check version for Admin Queue to know if its alive
+ * @hw: pointer to the hardware structure
+ */
+static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->adminq;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_aq_get_fw_ver(hw, NULL);
+ if (status)
+ goto init_ctrlq_free_rq;
+
+ if (!ice_aq_ver_check(hw)) {
+ status = ICE_ERR_FW_API_VER;
+ goto init_ctrlq_free_rq;
+ }
+
+ return ICE_SUCCESS;
+
+init_ctrlq_free_rq:
+ ice_shutdown_rq(hw, cq);
+ ice_shutdown_sq(hw, cq);
+ return status;
+}
+
+/**
+ * ice_init_ctrlq - main initialization routine for any control Queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks
+ */
+static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+ enum ice_status ret_code;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ ice_adminq_init_regs(hw);
+ cq = &hw->adminq;
+ break;
+ case ICE_CTL_Q_MAILBOX:
+ ice_mailbox_init_regs(hw);
+ cq = &hw->mailboxq;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ cq->qtype = q_type;
+
+ /* verify input for valid configuration */
+ if (!cq->num_rq_entries || !cq->num_sq_entries ||
+ !cq->rq_buf_size || !cq->sq_buf_size) {
+ return ICE_ERR_CFG;
+ }
+
+ /* setup SQ command write back timeout */
+ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
+
+ /* allocate the ATQ */
+ ret_code = ice_init_sq(hw, cq);
+ if (ret_code)
+ return ret_code;
+
+ /* allocate the ARQ */
+ ret_code = ice_init_rq(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_sq;
+
+ /* success! */
+ return ICE_SUCCESS;
+
+init_ctrlq_free_sq:
+ ice_shutdown_sq(hw, cq);
+ return ret_code;
+}
+
+/**
+ * ice_init_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks.
+ */
+enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+{
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Init FW admin queue */
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ if (status)
+ return status;
+
+ status = ice_init_check_adminq(hw);
+ if (status)
+ return status;
+ /* Init Mailbox queue */
+ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+}
+
+/**
+ * ice_init_ctrlq_locks - Initialize locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Initializes the send and receive queue locks for a given control queue.
+ */
+static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ ice_init_lock(&cq->sq_lock);
+ ice_init_lock(&cq->rq_lock);
+}
+
+/**
+ * ice_create_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * This function creates all the control queue locks and then calls
+ * ice_init_all_ctrlq. It should be called once during driver load. If the
+ * driver needs to re-initialize control queues at run time it should call
+ * ice_init_all_ctrlq instead.
+ */
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+{
+ ice_init_ctrlq_locks(&hw->adminq);
+ ice_init_ctrlq_locks(&hw->mailboxq);
+
+ return ice_init_all_ctrlq(hw);
+}
+
+/**
+ * ice_shutdown_ctrlq - shutdown routine for any control queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
+ */
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ if (ice_check_sq_alive(hw, cq))
+ ice_aq_q_shutdown(hw, true);
+ break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
+ default:
+ return;
+ }
+
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
+}
+
+/**
+ * ice_shutdown_all_ctrlq - shutdown routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
+ */
+void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ /* Shutdown FW admin queue */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+}
+
+/**
+ * ice_destroy_ctrlq_locks - Destroy locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Destroys the send and receive queue locks for a given control queue.
+ */
+static void
+ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ ice_destroy_lock(&cq->sq_lock);
+ ice_destroy_lock(&cq->rq_lock);
+}
+
+/**
+ * ice_destroy_all_ctrlq - exit routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * This function shuts down all the control queues and then destroys the
+ * control queue locks. It should be called once during driver unload. The
+ * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
+ * reinitialize control queues, such as in response to a reset event.
+ */
+void ice_destroy_all_ctrlq(struct ice_hw *hw)
+{
+ /* shut down all the control queues first */
+ ice_shutdown_all_ctrlq(hw);
+
+ ice_destroy_ctrlq_locks(&hw->adminq);
+ ice_destroy_ctrlq_locks(&hw->mailboxq);
+}
+
+/**
+ * ice_clean_sq - cleans Admin send queue (ATQ)
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * returns the number of free desc
+ */
+static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ struct ice_ctl_q_ring *sq = &cq->sq;
+ u16 ntc = sq->next_to_clean;
+ struct ice_sq_cd *details;
+ struct ice_aq_desc *desc;
+
+ desc = ICE_CTL_Q_DESC(*sq, ntc);
+ details = ICE_CTL_Q_DETAILS(*sq, ntc);
+
+ while (rd32(hw, cq->sq.head) != ntc) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
+ ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
+ ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
+ ntc++;
+ if (ntc == sq->count)
+ ntc = 0;
+ desc = ICE_CTL_Q_DESC(*sq, ntc);
+ details = ICE_CTL_Q_DETAILS(*sq, ntc);
+ }
+
+ sq->next_to_clean = ntc;
+
+ return ICE_CTL_Q_DESC_UNUSED(sq);
+}
+
+/**
+ * ice_debug_cq
+ * @hw: pointer to the hardware structure
+ * @desc: pointer to control queue descriptor
+ * @buf: pointer to command buffer
+ * @buf_len: max length of buf
+ *
+ * Dumps debug log about control command with descriptor contents.
+ */
+static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+{
+ struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
+ u16 datalen, flags;
+
+ if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
+ return;
+
+ if (!desc)
+ return;
+
+ datalen = LE16_TO_CPU(cq_desc->datalen);
+ flags = LE16_TO_CPU(cq_desc->flags);
+
+ ice_debug(hw, ICE_DBG_AQ_DESC,
+ "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(cq_desc->opcode), flags, datalen,
+ LE16_TO_CPU(cq_desc->retval));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(cq_desc->cookie_high),
+ LE32_TO_CPU(cq_desc->cookie_low));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(cq_desc->params.generic.param0),
+ LE32_TO_CPU(cq_desc->params.generic.param1));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(cq_desc->params.generic.addr_high),
+ LE32_TO_CPU(cq_desc->params.generic.addr_low));
+ /* Dump buffer iff 1) one exists and 2) is either a response indicated
+ * by the DD and/or CMP flag set or a command with the RD flag set.
+ */
+ if (buf && cq_desc->datalen != 0 &&
+ (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
+ flags & ICE_AQ_FLAG_RD)) {
+ ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
+ ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
+ MIN_T(u16, buf_len, datalen));
+ }
+}
+
+/**
+ * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ */
+bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
+}
+
+/**
+ * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buf: buffer to use for indirect commands (or NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * This is the main send command routine for the ATQ. It runs the queue,
+ * cleans the queue, etc.
+ */
+static enum ice_status
+ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_dma_mem *dma_buf = NULL;
+ struct ice_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_sq_cd *details;
+ u32 total_delay = 0;
+ u16 retval = 0;
+ u32 val = 0;
+
+ /* if reset is in progress return a soft error */
+ if (hw->reset_ongoing)
+ return ICE_ERR_RESET_ONGOING;
+
+ cq->sq_last_status = ICE_AQ_RC_OK;
+
+ if (!cq->sq.count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send queue not initialized.\n");
+ status = ICE_ERR_AQ_EMPTY;
+ goto sq_send_command_error;
+ }
+
+ if ((buf && !buf_size) || (!buf && buf_size)) {
+ status = ICE_ERR_PARAM;
+ goto sq_send_command_error;
+ }
+
+ if (buf) {
+ if (buf_size > cq->sq_buf_size) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Invalid buffer size for Control Send queue: %d.\n",
+ buf_size);
+ status = ICE_ERR_INVAL_SIZE;
+ goto sq_send_command_error;
+ }
+
+ desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
+ if (buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
+ }
+
+ val = rd32(hw, cq->sq.head);
+ if (val >= cq->num_sq_entries) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "head overrun at %d in the Control Send Queue ring\n",
+ val);
+ status = ICE_ERR_AQ_EMPTY;
+ goto sq_send_command_error;
+ }
+
+ details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
+ if (cd)
+ *details = *cd;
+ else
+ ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
+
+ /* Call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW/MBX; the function returns the
+ * number of desc available. The clean function called here could be
+ * called in a separate thread in case of asynchronous completions.
+ */
+ if (ice_clean_sq(hw, cq) == 0) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Error: Control Send Queue is full.\n");
+ status = ICE_ERR_AQ_FULL;
+ goto sq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
+ ICE_NONDMA_TO_DMA);
+
+ /* if buf is not NULL assume indirect command */
+ if (buf) {
+ dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
+ /* copy the user buf into the respective DMA buf */
+ ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buf_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.generic.addr_high =
+ CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
+ desc_on_ring->params.generic.addr_low =
+ CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
+ }
+
+ /* Debug desc and buffer */
+ ice_debug(hw, ICE_DBG_AQ_DESC,
+ "ATQ: Control Send queue desc and buffer:\n");
+
+ ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
+
+ (cq->sq.next_to_use)++;
+ if (cq->sq.next_to_use == cq->sq.count)
+ cq->sq.next_to_use = 0;
+ wr32(hw, cq->sq.tail, cq->sq.next_to_use);
+
+ do {
+ if (ice_sq_done(hw, cq))
+ break;
+
+ ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
+ total_delay++;
+ } while (total_delay < cq->sq_cmd_timeout);
+
+ /* if ready, copy the desc back to temp */
+ if (ice_sq_done(hw, cq)) {
+ ice_memcpy(desc, desc_on_ring, sizeof(*desc),
+ ICE_DMA_TO_NONDMA);
+ if (buf) {
+ /* get returned length to copy */
+ u16 copy_size = LE16_TO_CPU(desc->datalen);
+
+ if (copy_size > buf_size) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Return len %d > than buf len %d\n",
+ copy_size, buf_size);
+ status = ICE_ERR_AQ_ERROR;
+ } else {
+ ice_memcpy(buf, dma_buf->va, copy_size,
+ ICE_DMA_TO_NONDMA);
+ }
+ }
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue command 0x%04X completed with error 0x%X\n",
+ LE16_TO_CPU(desc->opcode),
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if (!status && retval != ICE_AQ_RC_OK)
+ status = ICE_ERR_AQ_ERROR;
+ cq->sq_last_status = (enum ice_aq_err)retval;
+ }
+
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "ATQ: desc and buffer writeback:\n");
+
+ ice_debug_cq(hw, (void *)desc, buf, buf_size);
+
+ /* save writeback AQ if requested */
+ if (details->wb_desc)
+ ice_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if (!cmd_completed) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue Writeback timeout.\n");
+ status = ICE_ERR_AQ_TIMEOUT;
+ }
+
+sq_send_command_error:
+ return status;
+}
+
+/**
+ * ice_sq_send_cmd - send command to Control Queue (ATQ)
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buf: buffer to use for indirect commands (or NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * This is the main send command routine for the ATQ. It runs the queue,
+ * cleans the queue, etc.
+ */
+enum ice_status
+ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status = ICE_SUCCESS;
+
+ /* if reset is in progress return a soft error */
+ if (hw->reset_ongoing)
+ return ICE_ERR_RESET_ONGOING;
+
+ ice_acquire_lock(&cq->sq_lock);
+ status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
+ ice_release_lock(&cq->sq_lock);
+
+ return status;
+}
+
+/**
+ * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ */
+void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
+}
+
+/**
+ * ice_clean_rq_elem
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'.
+ */
+enum ice_status
+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_rq_event_info *e, u16 *pending)
+{
+ u16 ntc = cq->rq.next_to_clean;
+ enum ice_status ret_code = ICE_SUCCESS;
+ struct ice_aq_desc *desc;
+ struct ice_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ ice_acquire_lock(&cq->rq_lock);
+
+ if (!cq->rq.count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Receive queue not initialized.\n");
+ ret_code = ICE_ERR_AQ_EMPTY;
+ goto clean_rq_elem_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = ICE_ERR_AQ_NO_WORK;
+ goto clean_rq_elem_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = ICE_CTL_Q_DESC(cq->rq, ntc);
+ desc_idx = ntc;
+
+ cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & ICE_AQ_FLAG_ERR) {
+ ret_code = ICE_ERR_AQ_ERROR;
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Receive Queue Event 0x%04X received with error 0x%X\n",
+ LE16_TO_CPU(desc->opcode),
+ cq->rq_last_status);
+ }
+ ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = MIN_T(u16, datalen, e->buf_len);
+ if (e->msg_buf && e->msg_len)
+ ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
+ e->msg_len, ICE_DMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
+
+ ice_debug_cq(hw, (void *)desc, e->msg_buf,
+ cq->rq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message size
+ */
+ bi = &cq->rq.r.rq_bi[ntc];
+ ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16(bi->size);
+ desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
+ desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, cq->rq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == cq->num_rq_entries)
+ ntc = 0;
+ cq->rq.next_to_clean = ntc;
+ cq->rq.next_to_use = ntu;
+
+clean_rq_elem_out:
+ /* Set pending if needed, unlock and return */
+ if (pending) {
+ /* re-read HW head to calculate actual pending messages */
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+ *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
+ }
+clean_rq_elem_err:
+ ice_release_lock(&cq->rq_lock);
+
+ return ret_code;
+}
diff --git a/sys/dev/ice/ice_controlq.h b/sys/dev/ice/ice_controlq.h
new file mode 100644
index 000000000000..05e100d405d0
--- /dev/null
+++ b/sys/dev/ice/ice_controlq.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_CONTROLQ_H_
+#define _ICE_CONTROLQ_H_
+
+#include "ice_adminq_cmd.h"
+
+/* Maximum buffer lengths for all control queue types */
+#define ICE_AQ_MAX_BUF_LEN 4096
+#define ICE_MBXQ_MAX_BUF_LEN 4096
+
+#define ICE_CTL_Q_DESC(R, i) \
+ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
+
+#define ICE_CTL_Q_DESC_UNUSED(R) \
+ (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* Defines that help manage the driver vs FW API checks.
+ * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
+ */
+#define EXP_FW_API_VER_BRANCH 0x00
+#define EXP_FW_API_VER_MAJOR 0x01
+#define EXP_FW_API_VER_MINOR 0x05
+
+/* Different control queue types: These are mainly for SW consumption. */
+enum ice_ctl_q {
+ ICE_CTL_Q_UNKNOWN = 0,
+ ICE_CTL_Q_ADMIN,
+ ICE_CTL_Q_MAILBOX,
+};
+
+/* Control Queue timeout settings - max delay 250ms */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
+
+struct ice_ctl_q_ring {
+ void *dma_head; /* Virtual address to DMA head */
+ struct ice_dma_mem desc_buf; /* descriptor ring memory */
+ void *cmd_buf; /* command buffer memory */
+
+ union {
+ struct ice_dma_mem *sq_bi;
+ struct ice_dma_mem *rq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+ u32 len_mask;
+ u32 len_ena_mask;
+ u32 head_mask;
+};
+
+/* sq transaction details */
+struct ice_sq_cd {
+ struct ice_aq_desc *wb_desc;
+};
+
+#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
+
+/* rq event information */
+struct ice_rq_event_info {
+ struct ice_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Control Queue information */
+struct ice_ctl_q_info {
+ enum ice_ctl_q qtype;
+ enum ice_aq_err rq_last_status; /* last status on receive queue */
+ struct ice_ctl_q_ring rq; /* receive queue */
+ struct ice_ctl_q_ring sq; /* send queue */
+ u32 sq_cmd_timeout; /* send queue cmd write back timeout */
+ u16 num_rq_entries; /* receive queue depth */
+ u16 num_sq_entries; /* send queue depth */
+ u16 rq_buf_size; /* receive queue buffer size */
+ u16 sq_buf_size; /* send queue buffer size */
+ enum ice_aq_err sq_last_status; /* last status on send queue */
+ struct ice_lock sq_lock; /* Send queue lock */
+ struct ice_lock rq_lock; /* Receive queue lock */
+};
+
+#endif /* _ICE_CONTROLQ_H_ */
diff --git a/sys/dev/ice/ice_dcb.c b/sys/dev/ice/ice_dcb.c
new file mode 100644
index 000000000000..a6992de2db80
--- /dev/null
+++ b/sys/dev/ice/ice_dcb.c
@@ -0,0 +1,1697 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+#include "ice_sched.h"
+#include "ice_dcb.h"
+
+/**
+ * ice_aq_get_lldp_mib
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @local_len: length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet). (0x0A00)
+ */
+enum ice_status
+ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
+ u16 buf_size, u16 *local_len, u16 *remote_len,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_get_mib *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.lldp_get_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
+
+ cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M;
+ cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M;
+
+ desc.datalen = CPU_TO_LE16(buf_size);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status) {
+ if (local_len)
+ *local_len = LE16_TO_CPU(cmd->local_len);
+ if (remote_len)
+ *remote_len = LE16_TO_CPU(cmd->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_cfg_lldp_mib_change
+ * @hw: pointer to the HW struct
+ * @ena_update: Enable or Disable event posting
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes (0x0A01)
+ */
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_mib_change *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_event;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
+
+ if (!ena_update)
+ cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_add_delete_lldp_tlv
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge
+ * @add_lldp_tlv: add (true) or delete (false) TLV
+ * @buf: buffer with TLV to add or delete
+ * @buf_size: length of the buffer
+ * @tlv_len: length of the TLV to be added/deleted
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cd: pointer to command details structure or NULL
+ *
+ * (Add tlv)
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer. (0x0A02)
+ *
+ * (Delete tlv)
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer. (0x0A04)
+ */
+enum ice_status
+ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
+ void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_add_delete_tlv *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (tlv_len == 0)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.lldp_add_delete_tlv;
+
+ if (add_lldp_tlv)
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_add_tlv);
+ else
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_lldp_delete_tlv);
+
+ desc.flags |= CPU_TO_LE16((u16)(ICE_AQ_FLAG_RD));
+
+ cmd->type = ((bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ cmd->len = CPU_TO_LE16(tlv_len);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && mib_len)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+
+ return status;
+}
+
+/**
+ * ice_aq_update_lldp_tlv
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge
+ * @buf: buffer with TLV to update
+ * @buf_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV. (0x0A03)
+ */
+enum ice_status
+ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
+ u16 buf_size, u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_update_tlv *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.lldp_update_tlv;
+
+ if (offset == 0 || old_len == 0 || new_len == 0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_update_tlv);
+
+ desc.flags |= CPU_TO_LE16((u16)(ICE_AQ_FLAG_RD));
+
+ cmd->type = ((bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ cmd->old_len = CPU_TO_LE16(old_len);
+ cmd->new_offset = CPU_TO_LE16(offset);
+ cmd->new_len = CPU_TO_LE16(new_len);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && mib_len)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+
+ return status;
+}
+
+/**
+ * ice_aq_stop_lldp
+ * @hw: pointer to the HW struct
+ * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown
+ * False if LLDP Agent needs to be Stopped
+ * @persist: True if Stop/Shutdown of LLDP Agent needs to be persistent across
+ * reboots
+ * @cd: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent (0x0A05)
+ */
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_stop;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
+
+ if (shutdown_lldp_agent)
+ cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN;
+
+ if (persist)
+ cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_DIS;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_start_lldp
+ * @hw: pointer to the HW struct
+ * @persist: True if Start of LLDP Agent needs to be persistent across reboots
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports. (0x0A06)
+ */
+enum ice_status
+ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_start *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_start;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
+
+ cmd->command = ICE_AQ_LLDP_AGENT_START;
+
+ if (persist)
+ cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = CPU_TO_LE16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = CPU_TO_LE16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_get_dcbx_status
+ * @hw: pointer to the HW struct
+ *
+ * Get the DCBX status from the Firmware
+ */
+u8 ice_get_dcbx_status(struct ice_hw *hw)
+{
+ u32 reg;
+
+ reg = rd32(hw, PRTDCB_GENS);
+ return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >>
+ PRTDCB_GENS_DCBX_STATUS_S);
+}
+
+/**
+ * ice_parse_ieee_ets_common_tlv
+ * @buf: Data buffer to be parsed for ETS CFG/REC data
+ * @ets_cfg: Container to store parsed data
+ *
+ * Parses the common data of IEEE 802.1Qaz ETS CFG/REC TLV
+ */
+static void
+ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ ets_cfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >>
+ ICE_IEEE_ETS_PRIO_1_S);
+ ets_cfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >>
+ ICE_IEEE_ETS_PRIO_0_S);
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 9 | 10| 11| 12| 13| 14| 15| 16|
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ ets_cfg->tcbwtable[i] = buf[offset];
+ ets_cfg->tsatable[i] = buf[ICE_MAX_TRAFFIC_CLASS + offset++];
+ }
+}
+
+/**
+ * ice_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >>
+ ICE_IEEE_ETS_WILLING_S);
+ etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S);
+ etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >>
+ ICE_IEEE_ETS_MAXTC_S);
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_parse_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], &dcbcfg->etsrec);
+}
+
+/**
+ * ice_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >>
+ ICE_IEEE_PFC_WILLING_S);
+ dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S);
+ dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >>
+ ICE_IEEE_PFC_CAP_S);
+ dcbcfg->pfc.pfcena = buf[1];
+}
+
+/**
+ * ice_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ */
+static void
+ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 offset = 0;
+ u16 typelen;
+ int i = 0;
+ u16 len;
+ u8 *buf;
+
+ typelen = NTOHS(tlv->typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ buf = tlv->tlvinfo;
+
+ /* Removing sizeof(ouisubtype) and reserved byte from len.
+ * Remaining len div 3 is number of APP TLVs.
+ */
+ len -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < len) {
+ dcbcfg->app[i].priority = ((buf[offset] &
+ ICE_IEEE_APP_PRIO_M) >>
+ ICE_IEEE_APP_PRIO_S);
+ dcbcfg->app[i].selector = ((buf[offset] &
+ ICE_IEEE_APP_SEL_M) >>
+ ICE_IEEE_APP_SEL_S);
+ dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * ice_parse_ieee_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ switch (subtype) {
+ case ICE_IEEE_SUBTYPE_ETS_CFG:
+ ice_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_ETS_REC:
+ ice_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_PFC_CFG:
+ ice_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_APP_PRI:
+ ice_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ */
+static void
+ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ etscfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ etscfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ if (etscfg->prio_table[i] == ICE_CEE_PGID_STRICT)
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT;
+ else
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ }
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * ice_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ */
+static void
+ice_parse_cee_pfccfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcena = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * ice_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ */
+static void
+ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, typelen, offset = 0;
+ struct ice_cee_app_prio *app;
+ u8 i;
+
+ typelen = NTOHS(tlv->hdr.typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+
+ dcbcfg->numapps = len / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > ICE_DCBX_MAX_APPS)
+ dcbcfg->numapps = ICE_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct ice_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < ICE_MAX_USER_PRIORITY; up++)
+ if (app->prio_map & BIT(up))
+ break;
+
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & ICE_CEE_APP_SELECTOR_M);
+ switch (selector) {
+ case ICE_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = ICE_APP_SEL_ETHTYPE;
+ break;
+ case ICE_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = ICE_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].prot_id = NTOHS(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * ice_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u16 len, tlvlen, typelen;
+ u32 ouisubtype;
+
+ ouisubtype = NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ /* Return if not CEE DCBX */
+ if (subtype != ICE_CEE_DCBX_TYPE)
+ return;
+
+ typelen = NTOHS(tlv->typelen);
+ tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = sizeof(tlv->typelen) + sizeof(ouisubtype) +
+ sizeof(struct ice_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) {
+ u16 sublen;
+
+ typelen = NTOHS(sub_tlv->hdr.typelen);
+ sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >>
+ ICE_LLDP_TLV_TYPE_S);
+ switch (subtype) {
+ case ICE_CEE_SUBTYPE_PG_CFG:
+ ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_PFC_CFG:
+ ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_APP_PRI:
+ ice_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct ice_cee_feat_tlv *)
+ ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * ice_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ */
+static void
+ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = NTOHL(tlv->ouisubtype);
+ oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S);
+ switch (oui) {
+ case ICE_IEEE_8021QAZ_OUI:
+ ice_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case ICE_CEE_DCBX_OUI:
+ ice_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_lldp_to_dcb_cfg
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ */
+enum ice_status
+ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_lldp_org_tlv *tlv;
+ enum ice_status ret = ICE_SUCCESS;
+ u16 offset = 0;
+ u16 typelen;
+ u16 type;
+ u16 len;
+
+ if (!lldpmib || !dcbcfg)
+ return ICE_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HEADER_LEN;
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelen = NTOHS(tlv->typelen);
+ type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ offset += sizeof(typelen) + len;
+
+ /* END TLV or beyond LLDPDU size */
+ if (type == ICE_TLV_TYPE_END || offset > ICE_LLDPDU_SIZE)
+ break;
+
+ switch (type) {
+ case ICE_TLV_TYPE_ORG:
+ ice_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_get_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @mib_type: MIB type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the firmware
+ */
+enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ enum ice_status ret;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
+ ICE_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (ret == ICE_SUCCESS)
+ /* Parse LLDP MIB to get DCB configuration */
+ ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg);
+
+ ice_free(hw, lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the HW struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request (true) or release (false) ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cd: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored. (0x0301)
+ */
+enum ice_status
+ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pfc_ignore *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.pfc_ignore;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_pfc_ignore);
+
+ if (request)
+ cmd->cmd_flags = ICE_AQC_PFC_IGNORE_SET;
+
+ cmd->tc_bitmap = tcmap;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status && tcmap_ret)
+ *tcmap_ret = cmd->tc_bitmap;
+
+ return status;
+}
+
+/**
+ * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW
+ * @hw: pointer to the HW struct
+ * @start_dcbx_agent: True if DCBX Agent needs to be started
+ * False if DCBX Agent needs to be stopped
+ * @dcbx_agent_status: FW indicates back the DCBX agent status
+ * True if DCBX Agent is active
+ * False if DCBX Agent is stopped
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent. In case that this wrapper function
+ * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
+ */
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop_start_specific_agent *cmd;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+ u16 opcode;
+
+ cmd = &desc.params.lldp_agent_ctrl;
+
+ opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+
+ if (start_dcbx_agent)
+ cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ *dcbx_agent_status = false;
+
+ if (status == ICE_SUCCESS &&
+ cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX)
+ *dcbx_agent_status = true;
+
+ return status;
+}
+
+/**
+ * ice_aq_get_cee_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware (0x0A07)
+ */
+enum ice_status
+ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
+ struct ice_aqc_get_cee_dcb_cfg_resp *buff,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
+
+ return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd);
+}
+
+/**
+ * ice_aq_query_pfc_mode - Query PFC mode
+ * @hw: pointer to the HW struct
+ * @pfcmode_ret: Return PFC mode
+ * @cd: pointer to command details structure or NULL
+ *
+ * This will return an indication if DSCP-based PFC or VLAN-based PFC
+ * is enabled. (0x0302)
+ */
+enum ice_status
+ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_query_pfc_mode *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.set_query_pfc_mode;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_pfc_mode);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status)
+ *pfcmode_ret = cmd->pfc_mode;
+
+ return status;
+}
+
+/**
+ * ice_aq_set_pfc_mode - Set PFC mode
+ * @hw: pointer to the HW struct
+ * @pfcmode_set: set-value of PFC mode
+ * @pfcmode_ret: return value of PFC mode, written by FW
+ * @cd: pointer to command details structure or NULL
+ *
+ * This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN
+ * -based PFC (0x0303)
+ */
+enum ice_status
+ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfcmode_set, u8 *pfcmode_ret,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_query_pfc_mode *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (pfcmode_set > ICE_AQC_PFC_DSCP_BASED_PFC)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.set_query_pfc_mode;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
+
+ cmd->pfc_mode = pfcmode_set;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status)
+ *pfcmode_ret = cmd->pfc_mode;
+
+ return status;
+}
+
+/**
+ * ice_aq_set_dcb_parameters - Set DCB parameters
+ * @hw: pointer to the HW struct
+ * @dcb_enable: True if DCB configuration needs to be applied
+ * @cd: pointer to command details structure or NULL
+ *
+ * This AQ command will tell FW if it will apply or not apply the default DCB
+ * configuration when link up (0x0306).
+ */
+enum ice_status
+ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_dcb_params *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_dcb_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_dcb_params);
+
+ cmd->valid_flags = ICE_AQC_LINK_UP_DCB_CFG_VALID;
+ if (dcb_enable)
+ cmd->cmd_flags = ICE_AQC_LINK_UP_DCB_CFG;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_cee_to_dcb_cfg
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ */
+static void
+ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, err, sync, oper, app_index, ice_app_sel_type;
+ u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+ u16 ice_app_prot_id_type;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ dcbcfg->etscfg.prio_table[i * 2] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ dcbcfg->etscfg.prio_table[i * 2 + 1] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ }
+
+ ice_for_each_traffic_class(i) {
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ if (dcbcfg->etscfg.prio_table[i] == ICE_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prio_table[i] = cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+
+ app_index = 0;
+ for (i = 0; i < 3; i++) {
+ if (i == 0) {
+ /* FCoE APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FCOE_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FCOE_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE;
+ } else if (i == 1) {
+ /* iSCSI APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_ISCSI_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
+ ice_app_sel_type = ICE_APP_SEL_TCPIP;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+ } else {
+ /* FIP APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FIP_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FIP;
+ }
+
+ status = (tlv_status & ice_aqc_cee_status_mask) >>
+ ice_aqc_cee_status_shift;
+ err = (status & ICE_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & ICE_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & ICE_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE/iSCSI/FIP APP if Error is False and
+ * Oper/Sync is True
+ */
+ if (!err && sync && oper) {
+ dcbcfg->app[app_index].priority =
+ (app_prio & ice_aqc_cee_app_mask) >>
+ ice_aqc_cee_app_shift;
+ dcbcfg->app[app_index].selector = ice_app_sel_type;
+ dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
+ app_index++;
+ }
+ }
+
+ dcbcfg->numapps = app_index;
+}
+
+/**
+ * ice_get_ieee_dcb_cfg
+ * @pi: port information structure
+ * @dcbx_mode: mode of DCBX (IEEE or CEE)
+ *
+ * Get IEEE or CEE mode DCB configuration from the Firmware
+ */
+STATIC enum ice_status
+ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = NULL;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ if (dcbx_mode == ICE_DCBX_MODE_IEEE)
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ else if (dcbx_mode == ICE_DCBX_MODE_CEE)
+ dcbx_cfg = &pi->desired_dcbx_cfg;
+
+ /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
+ * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
+ */
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_LOCAL,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ dcbx_cfg = &pi->remote_dcbx_cfg;
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ ret = ICE_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * ice_get_dcb_cfg
+ * @pi: port information structure
+ *
+ * Get DCB configuration from the Firmware
+ */
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct ice_dcbx_cfg *dcbx_cfg;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
+ if (ret == ICE_SUCCESS) {
+ /* CEE mode */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+ dcbx_cfg->tlv_status = LE32_TO_CPU(cee_cfg.tlv_status);
+ ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+ } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ /* CEE mode not enabled try querying IEEE data */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_init_dcb
+ * @hw: pointer to the HW struct
+ * @enable_mib_change: enable MIB change event
+ *
+ * Update DCB configuration from the Firmware
+ */
+enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret = ICE_SUCCESS;
+
+ if (!hw->func_caps.common_cap.dcb)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ pi->is_sw_lldp = true;
+
+ /* Get DCBX status */
+ pi->dcbx_status = ice_get_dcbx_status(hw);
+
+ if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
+ pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+ pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ /* Get current DCBX configuration */
+ ret = ice_get_dcb_cfg(pi);
+ if (ret)
+ return ret;
+ pi->is_sw_lldp = false;
+ } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ return ICE_ERR_NOT_READY;
+ }
+
+ /* Configure the LLDP MIB change event */
+ if (enable_mib_change) {
+ ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
+ if (ret)
+ pi->is_sw_lldp = true;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_cfg_lldp_mib_change
+ * @hw: pointer to the HW struct
+ * @ena_mib: enable/disable MIB change event
+ *
+ * Configure (disable/enable) MIB
+ */
+enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret;
+
+ if (!hw->func_caps.common_cap.dcb)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ /* Get DCBX status */
+ pi->dcbx_status = ice_get_dcbx_status(hw);
+
+ if (pi->dcbx_status == ICE_DCBX_STATUS_DIS)
+ return ICE_ERR_NOT_READY;
+
+ ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
+ if (!ret)
+ pi->is_sw_lldp = !ena_mib;
+
+ return ret;
+}
+
+/**
+ * ice_add_ieee_ets_common_tlv
+ * @buf: Data buffer to be populated with ice_dcb_ets_cfg data
+ * @ets_cfg: Container for ice_dcb_ets_cfg data
+ *
+ * Populate the TLV buffer with ice_dcb_ets_cfg data
+ */
+static void
+ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 priority0, priority1;
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ priority0 = ets_cfg->prio_table[i * 2] & 0xF;
+ priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ buf[offset] = ets_cfg->tcbwtable[i];
+ buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u8 maxtcwilling = 0;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = HTONS(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = HTONL(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S);
+ maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+ buf[0] = maxtcwilling;
+
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etsrec;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = HTONS(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = HTONL(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+
+ /* First Octet is reserved */
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etsrec);
+}
+
+/**
+ * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = HTONS(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = HTONL(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(ICE_IEEE_PFC_WILLING_S);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(ICE_IEEE_PFC_MBC_S);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
+}
+
+/**
+ * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store which holds the APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ */
+static void
+ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 typelen, len, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = HTONL(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+ /* len includes size of ouisubtype + 1 reserved + 3*numapps */
+ len = sizeof(tlv->ouisubtype) + 1 + (i * 3);
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF));
+ tlv->typelen = HTONS(typelen);
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: Fill TLV data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ * @tlvid: Type of IEEE TLV
+ *
+ * Add tlv information
+ */
+static void
+ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format
+ * @lldpmib: pointer to the HW struct
+ * @miblen: length of LLDP MIB
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Convert the DCB configuration to MIB format
+ */
+void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, offset = 0, tlvid = ICE_TLV_ID_START;
+ struct ice_lldp_org_tlv *tlv;
+ u16 typelen;
+
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelen = NTOHS(tlv->typelen);
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ if (len)
+ offset += len + 2;
+ /* END TLV or beyond LLDPDU size */
+ if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU ||
+ offset > ICE_LLDPDU_SIZE)
+ break;
+ /* Move to next TLV */
+ if (len)
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+ *miblen = offset;
+}
+
+/**
+ * ice_set_dcb_cfg - Set the local LLDP MIB to FW
+ * @pi: port information structure
+ *
+ * Set DCB configuration to the Firmware
+ */
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+{
+ u8 mib_type, *lldpmib = NULL;
+ struct ice_dcbx_cfg *dcbcfg;
+ enum ice_status ret;
+ struct ice_hw *hw;
+ u16 miblen;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ /* update the HW local config */
+ dcbcfg = &pi->local_dcbx_cfg;
+ /* Allocate the LLDPDU */
+ lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+ mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING;
+
+ ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen,
+ NULL);
+
+ ice_free(hw, lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_query_port_ets - query port ETS configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ETS configuration
+ */
+enum ice_status
+ice_aq_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_query_port_ets *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ cmd = &desc.params.port_ets;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
+ cmd->port_teid = pi->root->info.node_teid;
+
+ status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd);
+ return status;
+}
+
+/**
+ * ice_update_port_tc_tree_cfg - update TC tree configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ *
+ * update the SW DB with the new TC changes
+ */
+enum ice_status
+ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf)
+{
+ struct ice_sched_node *node, *tc_node;
+ struct ice_aqc_get_elem elem;
+ enum ice_status status = ICE_SUCCESS;
+ u32 teid1, teid2;
+ u8 i, j;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ /* suspend the missing TC nodes */
+ for (i = 0; i < pi->root->num_children; i++) {
+ teid1 = LE32_TO_CPU(pi->root->children[i]->info.node_teid);
+ ice_for_each_traffic_class(j) {
+ teid2 = LE32_TO_CPU(buf->tc_node_teid[j]);
+ if (teid1 == teid2)
+ break;
+ }
+ if (j < ICE_MAX_TRAFFIC_CLASS)
+ continue;
+ /* TC is missing */
+ pi->root->children[i]->in_use = false;
+ }
+ /* add the new TC nodes */
+ ice_for_each_traffic_class(j) {
+ teid2 = LE32_TO_CPU(buf->tc_node_teid[j]);
+ if (teid2 == ICE_INVAL_TEID)
+ continue;
+ /* Is it already present in the tree ? */
+ for (i = 0; i < pi->root->num_children; i++) {
+ tc_node = pi->root->children[i];
+ if (!tc_node)
+ continue;
+ teid1 = LE32_TO_CPU(tc_node->info.node_teid);
+ if (teid1 == teid2) {
+ tc_node->tc_num = j;
+ tc_node->in_use = true;
+ break;
+ }
+ }
+ if (i < pi->root->num_children)
+ continue;
+ /* new TC */
+ status = ice_sched_query_elem(pi->hw, teid2, &elem);
+ if (!status)
+ status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+ if (status)
+ break;
+ /* update the TC number */
+ node = ice_sched_find_node_by_teid(pi->root, teid2);
+ if (node)
+ node->tc_num = j;
+ }
+ return status;
+}
+
+/**
+ * ice_query_port_ets - query port ETS configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ETS configuration and update the
+ * SW DB with the TC changes
+ */
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
+ if (!status)
+ status = ice_update_port_tc_tree_cfg(pi, buf);
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
diff --git a/sys/dev/ice/ice_dcb.h b/sys/dev/ice/ice_dcb.h
new file mode 100644
index 000000000000..9c1522a35c8d
--- /dev/null
+++ b/sys/dev/ice/ice_dcb.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_DCB_H_
+#define _ICE_DCB_H_
+
+#include "ice_type.h"
+#include "ice_common.h"
+
+#define ICE_DCBX_OFFLOAD_DIS 0
+#define ICE_DCBX_OFFLOAD_ENABLED 1
+
+#define ICE_DCBX_STATUS_NOT_STARTED 0
+#define ICE_DCBX_STATUS_IN_PROGRESS 1
+#define ICE_DCBX_STATUS_DONE 2
+#define ICE_DCBX_STATUS_MULTIPLE_PEERS 3
+#define ICE_DCBX_STATUS_DIS 7
+
+#define ICE_TLV_TYPE_END 0
+#define ICE_TLV_TYPE_ORG 127
+
+#define ICE_IEEE_8021QAZ_OUI 0x0080C2
+#define ICE_IEEE_SUBTYPE_ETS_CFG 9
+#define ICE_IEEE_SUBTYPE_ETS_REC 10
+#define ICE_IEEE_SUBTYPE_PFC_CFG 11
+#define ICE_IEEE_SUBTYPE_APP_PRI 12
+
+#define ICE_CEE_DCBX_OUI 0x001B21
+#define ICE_CEE_DCBX_TYPE 2
+
+#define ICE_CEE_SUBTYPE_CTRL 1
+#define ICE_CEE_SUBTYPE_PG_CFG 2
+#define ICE_CEE_SUBTYPE_PFC_CFG 3
+#define ICE_CEE_SUBTYPE_APP_PRI 4
+
+#define ICE_CEE_MAX_FEAT_TYPE 3
+#define ICE_LLDP_ADMINSTATUS_DIS 0
+#define ICE_LLDP_ADMINSTATUS_ENA_RX 1
+#define ICE_LLDP_ADMINSTATUS_ENA_TX 2
+#define ICE_LLDP_ADMINSTATUS_ENA_RXTX 3
+
+/* Defines for LLDP TLV header */
+#define ICE_LLDP_TLV_LEN_S 0
+#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
+#define ICE_LLDP_TLV_TYPE_S 9
+#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
+#define ICE_LLDP_TLV_SUBTYPE_S 0
+#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
+#define ICE_LLDP_TLV_OUI_S 8
+#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
+
+/* Defines for IEEE ETS TLV */
+#define ICE_IEEE_ETS_MAXTC_S 0
+#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
+#define ICE_IEEE_ETS_CBS_S 6
+#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
+#define ICE_IEEE_ETS_WILLING_S 7
+#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
+#define ICE_IEEE_ETS_PRIO_0_S 0
+#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
+#define ICE_IEEE_ETS_PRIO_1_S 4
+#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
+#define ICE_CEE_PGID_PRIO_0_S 0
+#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
+#define ICE_CEE_PGID_PRIO_1_S 4
+#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
+#define ICE_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define ICE_IEEE_TSA_STRICT 0
+#define ICE_IEEE_TSA_CBS 1
+#define ICE_IEEE_TSA_ETS 2
+#define ICE_IEEE_TSA_VENDOR 255
+
+/* Defines for IEEE PFC TLV */
+#define ICE_IEEE_PFC_CAP_S 0
+#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
+#define ICE_IEEE_PFC_MBC_S 6
+#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
+#define ICE_IEEE_PFC_WILLING_S 7
+#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
+
+/* Defines for IEEE APP TLV */
+#define ICE_IEEE_APP_SEL_S 0
+#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
+#define ICE_IEEE_APP_PRIO_S 5
+#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
+
+/* TLV definitions for preparing MIB */
+#define ICE_TLV_ID_CHASSIS_ID 0
+#define ICE_TLV_ID_PORT_ID 1
+#define ICE_TLV_ID_TIME_TO_LIVE 2
+#define ICE_IEEE_TLV_ID_ETS_CFG 3
+#define ICE_IEEE_TLV_ID_ETS_REC 4
+#define ICE_IEEE_TLV_ID_PFC_CFG 5
+#define ICE_IEEE_TLV_ID_APP_PRI 6
+#define ICE_TLV_ID_END_OF_LLDPPDU 7
+#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+
+#define ICE_IEEE_ETS_TLV_LEN 25
+#define ICE_IEEE_PFC_TLV_LEN 6
+#define ICE_IEEE_APP_TLV_LEN 11
+
+#pragma pack(1)
+/* IEEE 802.1AB LLDP TLV structure */
+struct ice_lldp_generic_tlv {
+ __be16 typelen;
+ u8 tlvinfo[1];
+};
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct ice_lldp_org_tlv {
+ __be16 typelen;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+#pragma pack()
+
+struct ice_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct ice_cee_ctrl_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct ice_cee_feat_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define ICE_CEE_FEAT_TLV_ENA_M 0x80
+#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
+#define ICE_CEE_FEAT_TLV_ERR_M 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+#pragma pack(1)
+struct ice_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define ICE_CEE_APP_SELECTOR_M 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
+#pragma pack()
+
+/* TODO: The below structures related LLDP/DCBX variables
+ * and statistics are defined but need to find how to get
+ * the required information from the Firmware to use them
+ */
+
+/* IEEE 802.1AB LLDP Agent Statistics */
+struct ice_lldp_stats {
+ u64 remtablelastchangetime;
+ u64 remtableinserts;
+ u64 remtabledeletes;
+ u64 remtabledrops;
+ u64 remtableageouts;
+ u64 txframestotal;
+ u64 rxframesdiscarded;
+ u64 rxportframeerrors;
+ u64 rxportframestotal;
+ u64 rxporttlvsdiscardedtotal;
+ u64 rxporttlvsunrecognizedtotal;
+ u64 remtoomanyneighbors;
+};
+
+/* IEEE 802.1Qaz DCBX variables */
+struct ice_dcbx_variables {
+ u32 defmaxtrafficclasses;
+ u32 defprioritytcmapping;
+ u32 deftcbandwidth;
+ u32 deftsaassignment;
+};
+
+enum ice_status
+ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
+ u16 buf_size, u16 *local_len, u16 *remote_len,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
+ void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
+ u16 buf_size, u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
+ struct ice_aqc_get_cee_dcb_cfg_resp *buff,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfcmode_set, u8 *pfcmode_ret,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
+ struct ice_sq_cd *cd);
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+u8 ice_get_dcbx_status(struct ice_hw *hw);
+enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg);
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
+void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cmd_details);
+enum ice_status
+ice_aq_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf);
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd);
+enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd);
+#endif /* _ICE_DCB_H_ */
diff --git a/sys/dev/ice/ice_devids.h b/sys/dev/ice/ice_devids.h
new file mode 100644
index 000000000000..a110133823df
--- /dev/null
+++ b/sys/dev/ice/ice_devids.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_DEVIDS_H_
+#define _ICE_DEVIDS_H_
+
+/* Device IDs */
+/* Intel(R) Ethernet Connection E823-L for backplane */
+#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
+/* Intel(R) Ethernet Connection E823-L for SFP */
+#define ICE_DEV_ID_E823L_SFP 0x124D
+/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
+/* Intel(R) Ethernet Connection E823-L 1GbE */
+#define ICE_DEV_ID_E823L_1GBE 0x124F
+/* Intel(R) Ethernet Connection E823-L for QSFP */
+#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E810-C for backplane */
+#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
+/* Intel(R) Ethernet Controller E810-C for QSFP */
+#define ICE_DEV_ID_E810C_QSFP 0x1592
+/* Intel(R) Ethernet Controller E810-C for SFP */
+#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
+/* Intel(R) Ethernet Controller E810-XXV for SFP */
+#define ICE_DEV_ID_E810_XXV_SFP 0x159B
+/* Intel(R) Ethernet Connection E822-C for backplane */
+#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
+/* Intel(R) Ethernet Connection E822-C for QSFP */
+#define ICE_DEV_ID_E822C_QSFP 0x1891
+/* Intel(R) Ethernet Connection E822-C for SFP */
+#define ICE_DEV_ID_E822C_SFP 0x1892
+/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
+/* Intel(R) Ethernet Connection E822-C 1GbE */
+#define ICE_DEV_ID_E822C_SGMII 0x1894
+/* Intel(R) Ethernet Connection E822-L for backplane */
+#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for SFP */
+#define ICE_DEV_ID_E822L_SFP 0x1898
+/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
+/* Intel(R) Ethernet Connection E822-L 1GbE */
+#define ICE_DEV_ID_E822L_SGMII 0x189A
+
+#endif /* _ICE_DEVIDS_H_ */
diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h
new file mode 100644
index 000000000000..4eb312ae02d9
--- /dev/null
+++ b/sys/dev/ice/ice_drv_info.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_drv_info.h
+ * @brief device IDs and driver version
+ *
+ * Contains the device IDs tables and the driver version string.
+ *
+ * This file contains static or constant definitions intended to be included
+ * exactly once in the main driver interface file. It implicitly depends on
+ * the main driver header file.
+ *
+ * These definitions could be placed directly in the interface file, but are
+ * kept separate for organizational purposes.
+ */
+
+/**
+ * @var ice_driver_version
+ * @brief driver version string
+ *
+ * Driver version information, used for display as part of an informational
+ * sysctl, and as part of the driver information sent to the firmware at load.
+ *
+ * @var ice_major_version
+ * @brief driver major version number
+ *
+ * @var ice_minor_version
+ * @brief driver minor version number
+ *
+ * @var ice_patch_version
+ * @brief driver patch version number
+ *
+ * @var ice_rc_version
+ * @brief driver release candidate version number
+ */
+const char ice_driver_version[] = "0.26.0-k";
+const uint8_t ice_major_version = 0;
+const uint8_t ice_minor_version = 26;
+const uint8_t ice_patch_version = 0;
+const uint8_t ice_rc_version = 0;
+
+#define PVIDV(vendor, devid, name) \
+ PVID(vendor, devid, name " - 0.26.0-k")
+#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
+ PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.26.0-k")
+
+/**
+ * @var ice_vendor_info_array
+ * @brief array of PCI devices supported by this driver
+ *
+ * Array of PCI devices which are supported by this driver. Used to determine
+ * whether a given device should be loaded by this driver. This information is
+ * also exported as part of the module information for other tools to analyze.
+ *
+ * @remark Each type of device ID needs to be listed from most-specific entry
+ * to most-generic entry; e.g. PVIDV_OEM()s for a device ID must come before
+ * the PVIDV() for it.
+ */
+static pci_vendor_info_t ice_vendor_info_array[] = {
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE,
+ "Intel(R) Ethernet Controller E810-C for backplane"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0005, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0006, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0007, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0008, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ "Intel(R) Ethernet Controller E810-C for QSFP"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0005, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-4"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0006, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-4"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0007, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-4"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0008, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0009, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 2.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ "Intel(R) Ethernet Controller E810-C for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE,
+ "Intel(R) Ethernet Connection E822-C for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP,
+ "Intel(R) Ethernet Connection E822-C for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP,
+ "Intel(R) Ethernet Connection E822-C for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T,
+ "Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII,
+ "Intel(R) Ethernet Connection E822-C 1GbE"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE,
+ "Intel(R) Ethernet Connection E822-L for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP,
+ "Intel(R) Ethernet Connection E822-L for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T,
+ "Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII,
+ "Intel(R) Ethernet Connection E822-L 1GbE"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE,
+ "Intel(R) Ethernet Connection E823-L for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP,
+ "Intel(R) Ethernet Connection E823-L for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP,
+ "Intel(R) Ethernet Connection E823-L for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T,
+ "Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE,
+ "Intel(R) Ethernet Connection E823-L 1GbE"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE,
+ "Intel(R) Ethernet Controller E810-XXV for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP,
+ "Intel(R) Ethernet Controller E810-XXV for QSFP"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0005, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0006, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ "Intel(R) Ethernet Controller E810-XXV for SFP"),
+ PVID_END
+};
+
diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h
new file mode 100644
index 000000000000..f5ea542d8626
--- /dev/null
+++ b/sys/dev/ice/ice_features.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_features.h
+ * @brief device feature controls
+ *
+ * Contains a list of various device features which could be enabled or
+ * disabled.
+ */
+
+#ifndef _ICE_FEATURES_H_
+#define _ICE_FEATURES_H_
+
+/**
+ * @enum feat_list
+ * @brief driver feature enumeration
+ *
+ * Enumeration of possible device driver features that can be enabled or
+ * disabled. Each possible value represents a different feature which can be
+ * enabled or disabled.
+ *
+ * The driver stores a bitmap of the features that the device and OS are
+ * capable of, as well as another bitmap indicating which features are
+ * currently enabled for that device.
+ */
+enum feat_list {
+ ICE_FEATURE_SRIOV,
+ ICE_FEATURE_RSS,
+ ICE_FEATURE_NETMAP,
+ ICE_FEATURE_FDIR,
+ ICE_FEATURE_MSI,
+ ICE_FEATURE_MSIX,
+ ICE_FEATURE_RDMA,
+ ICE_FEATURE_SAFE_MODE,
+ ICE_FEATURE_LENIENT_LINK_MODE,
+ ICE_FEATURE_DEFAULT_OVERRIDE,
+ /* Must be last entry */
+ ICE_FEATURE_COUNT
+};
+
+/**
+ * ice_disable_unsupported_features - Disable features not enabled by OS
+ * @bitmap: the feature bitmap
+ *
+ * Check for OS support of various driver features. Clear the feature bit for
+ * any feature which is not enabled by the OS. This should be called early
+ * during driver attach after setting up the feature bitmap.
+ *
+ * @remark the bitmap parameter is marked as unused in order to avoid an
+ * unused parameter warning in case none of the features need to be disabled.
+ */
+static inline void
+ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap)
+{
+ ice_clear_bit(ICE_FEATURE_SRIOV, bitmap);
+#ifndef DEV_NETMAP
+ ice_clear_bit(ICE_FEATURE_NETMAP, bitmap);
+#endif
+}
+
+#endif
diff --git a/sys/dev/ice/ice_flex_pipe.c b/sys/dev/ice/ice_flex_pipe.c
new file mode 100644
index 000000000000..7ad780e2beee
--- /dev/null
+++ b/sys/dev/ice/ice_flex_pipe.c
@@ -0,0 +1,5630 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+#include "ice_flex_pipe.h"
+#include "ice_protocol_type.h"
+#include "ice_flow.h"
+
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+static const struct ice_tunnel_type_scan tnls[] = {
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_LAST, "" }
+};
+
+static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
+ /* SWITCH */
+ {
+ ICE_SID_XLT0_SW,
+ ICE_SID_XLT_KEY_BUILDER_SW,
+ ICE_SID_XLT1_SW,
+ ICE_SID_XLT2_SW,
+ ICE_SID_PROFID_TCAM_SW,
+ ICE_SID_PROFID_REDIR_SW,
+ ICE_SID_FLD_VEC_SW,
+ ICE_SID_CDID_KEY_BUILDER_SW,
+ ICE_SID_CDID_REDIR_SW
+ },
+
+ /* ACL */
+ {
+ ICE_SID_XLT0_ACL,
+ ICE_SID_XLT_KEY_BUILDER_ACL,
+ ICE_SID_XLT1_ACL,
+ ICE_SID_XLT2_ACL,
+ ICE_SID_PROFID_TCAM_ACL,
+ ICE_SID_PROFID_REDIR_ACL,
+ ICE_SID_FLD_VEC_ACL,
+ ICE_SID_CDID_KEY_BUILDER_ACL,
+ ICE_SID_CDID_REDIR_ACL
+ },
+
+ /* FD */
+ {
+ ICE_SID_XLT0_FD,
+ ICE_SID_XLT_KEY_BUILDER_FD,
+ ICE_SID_XLT1_FD,
+ ICE_SID_XLT2_FD,
+ ICE_SID_PROFID_TCAM_FD,
+ ICE_SID_PROFID_REDIR_FD,
+ ICE_SID_FLD_VEC_FD,
+ ICE_SID_CDID_KEY_BUILDER_FD,
+ ICE_SID_CDID_REDIR_FD
+ },
+
+ /* RSS */
+ {
+ ICE_SID_XLT0_RSS,
+ ICE_SID_XLT_KEY_BUILDER_RSS,
+ ICE_SID_XLT1_RSS,
+ ICE_SID_XLT2_RSS,
+ ICE_SID_PROFID_TCAM_RSS,
+ ICE_SID_PROFID_REDIR_RSS,
+ ICE_SID_FLD_VEC_RSS,
+ ICE_SID_CDID_KEY_BUILDER_RSS,
+ ICE_SID_CDID_REDIR_RSS
+ },
+
+ /* PE */
+ {
+ ICE_SID_XLT0_PE,
+ ICE_SID_XLT_KEY_BUILDER_PE,
+ ICE_SID_XLT1_PE,
+ ICE_SID_XLT2_PE,
+ ICE_SID_PROFID_TCAM_PE,
+ ICE_SID_PROFID_REDIR_PE,
+ ICE_SID_FLD_VEC_PE,
+ ICE_SID_CDID_KEY_BUILDER_PE,
+ ICE_SID_CDID_REDIR_PE
+ }
+};
+
+/**
+ * ice_sect_id - returns section ID
+ * @blk: block type
+ * @sect: section type
+ *
+ * This helper function returns the proper section ID given a block type and a
+ * section type.
+ */
+static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
+{
+ return ice_sect_lkup[blk][sect];
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+ struct ice_buf_hdr *hdr;
+ u16 section_count;
+ u16 data_end;
+
+ hdr = (struct ice_buf_hdr *)buf->buf;
+ /* verify data */
+ section_count = LE16_TO_CPU(hdr->section_count);
+ if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+ return NULL;
+
+ data_end = LE16_TO_CPU(hdr->data_end);
+ if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ return hdr;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+ struct ice_nvm_table *nvms;
+
+ nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table +
+ LE32_TO_CPU(ice_seg->device_table_count));
+
+ return (_FORCE_ struct ice_buf_table *)
+ (nvms->vers + LE32_TO_CPU(nvms->table_count));
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+static struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (ice_seg) {
+ state->buf_table = ice_find_buf_table(ice_seg);
+ if (!state->buf_table)
+ return NULL;
+
+ state->buf_idx = 0;
+ return ice_pkg_val_buf(state->buf_table->buf_array);
+ }
+
+ if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
+ return ice_pkg_val_buf(state->buf_table->buf_array +
+ state->buf_idx);
+ else
+ return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+static bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (!ice_seg && !state->buf)
+ return false;
+
+ if (!ice_seg && state->buf)
+ if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
+ return true;
+
+ state->buf = ice_pkg_enum_buf(ice_seg, state);
+ if (!state->buf)
+ return false;
+
+ /* start of new buffer, reset section index */
+ state->sect_idx = 0;
+ return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+static void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type)
+{
+ u16 offset, size;
+
+ if (ice_seg)
+ state->type = sect_type;
+
+ if (!ice_pkg_advance_sect(ice_seg, state))
+ return NULL;
+
+ /* scan for next matching section */
+ while (state->buf->section_entry[state->sect_idx].type !=
+ CPU_TO_LE32(state->type))
+ if (!ice_pkg_advance_sect(NULL, state))
+ return NULL;
+
+ /* validate section */
+ offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+ if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+ return NULL;
+
+ size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+ return NULL;
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE)
+ return NULL;
+
+ state->sect_type =
+ LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
+
+ /* calc pointer to this section */
+ state->sect = ((u8 *)state->buf) +
+ LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+
+ return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = (struct ice_boost_tcam_section *)section;
+ if (index >= LE16_TO_CPU(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ tcam = (struct ice_boost_tcam_entry *)
+ ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
+ *entry = tcam;
+ return ICE_SUCCESS;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = (struct ice_label_section *)section;
+ if (index >= LE16_TO_CPU(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+ u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
+ NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = LE16_TO_CPU(label->value);
+ return label->name;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].in_use = false;
+ hw->tnl.tbl[hw->tnl.count].marked = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry)
+ hw->tnl.tbl[i].valid = true;
+ }
+}
+
+/* Key creation */
+
+#define ICE_DC_KEY 0x1 /* don't care */
+#define ICE_DC_KEYINV 0x1
+#define ICE_NM_KEY 0x0 /* never match */
+#define ICE_NM_KEYINV 0x0
+#define ICE_0_KEY 0x1 /* match 0 */
+#define ICE_0_KEYINV 0x0
+#define ICE_1_KEY 0x0 /* match 1 */
+#define ICE_1_KEYINV 0x1
+
+/**
+ * ice_gen_key_word - generate 16-bits of a key/mask word
+ * @val: the value
+ * @valid: valid bits mask (change only the valid bits)
+ * @dont_care: don't care mask
+ * @nvr_mtch: never match mask
+ * @key: pointer to an array of where the resulting key portion
+ * @key_inv: pointer to an array of where the resulting key invert portion
+ *
+ * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
+ * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
+ * of key and 8 bits of key invert.
+ *
+ * '0' = b01, always match a 0 bit
+ * '1' = b10, always match a 1 bit
+ * '?' = b11, don't care bit (always matches)
+ * '~' = b00, never match bit
+ *
+ * Input:
+ * val: b0 1 0 1 0 1
+ * dont_care: b0 0 1 1 0 0
+ * never_mtch: b0 0 0 0 1 1
+ * ------------------------------
+ * Result: key: b01 10 11 11 00 00
+ */
+static enum ice_status
+ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
+ u8 *key_inv)
+{
+ u8 in_key = *key, in_key_inv = *key_inv;
+ u8 i;
+
+ /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
+ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
+ return ICE_ERR_CFG;
+
+ *key = 0;
+ *key_inv = 0;
+
+ /* encode the 8 bits into 8-bit key and 8-bit key invert */
+ for (i = 0; i < 8; i++) {
+ *key >>= 1;
+ *key_inv >>= 1;
+
+ if (!(valid & 0x1)) { /* change only valid bits */
+ *key |= (in_key & 0x1) << 7;
+ *key_inv |= (in_key_inv & 0x1) << 7;
+ } else if (dont_care & 0x1) { /* don't care bit */
+ *key |= ICE_DC_KEY << 7;
+ *key_inv |= ICE_DC_KEYINV << 7;
+ } else if (nvr_mtch & 0x1) { /* never match bit */
+ *key |= ICE_NM_KEY << 7;
+ *key_inv |= ICE_NM_KEYINV << 7;
+ } else if (val & 0x01) { /* exact 1 match */
+ *key |= ICE_1_KEY << 7;
+ *key_inv |= ICE_1_KEYINV << 7;
+ } else { /* exact 0 match */
+ *key |= ICE_0_KEY << 7;
+ *key_inv |= ICE_0_KEYINV << 7;
+ }
+
+ dont_care >>= 1;
+ nvr_mtch >>= 1;
+ valid >>= 1;
+ val >>= 1;
+ in_key >>= 1;
+ in_key_inv >>= 1;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_bits_max_set - determine if the number of bits set is within a maximum
+ * @mask: pointer to the byte array which is the mask
+ * @size: the number of bytes in the mask
+ * @max: the max number of set bits
+ *
+ * This function determines if there are at most 'max' number of bits set in an
+ * array. Returns true if the number for bits set is <= max or will return false
+ * otherwise.
+ */
+static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
+{
+ u16 count = 0;
+ u16 i;
+
+ /* check each byte */
+ for (i = 0; i < size; i++) {
+ /* if 0, go to next byte */
+ if (!mask[i])
+ continue;
+
+ /* We know there is at least one set bit in this byte because of
+ * the above check; if we already have found 'max' number of
+ * bits set, then we can return failure now.
+ */
+ if (count == max)
+ return false;
+
+ /* count the bits in this byte, checking threshold */
+ count += ice_hweight8(mask[i]);
+ if (count > max)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_key - generate a variable sized key with multiples of 16-bits
+ * @key: pointer to where the key will be stored
+ * @size: the size of the complete key in bytes (must be even)
+ * @val: array of 8-bit values that makes up the value portion of the key
+ * @upd: array of 8-bit masks that determine what key portion to update
+ * @dc: array of 8-bit masks that make up the don't care mask
+ * @nm: array of 8-bit masks that make up the never match mask
+ * @off: the offset of the first byte in the key to update
+ * @len: the number of bytes in the key update
+ *
+ * This function generates a key from a value, a don't care mask and a never
+ * match mask.
+ * upd, dc, and nm are optional parameters, and can be NULL:
+ * upd == NULL --> udp mask is all 1's (update all bits)
+ * dc == NULL --> dc mask is all 0's (no don't care bits)
+ * nm == NULL --> nm mask is all 0's (no never match bits)
+ */
+enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+ u16 len)
+{
+ u16 half_size;
+ u16 i;
+
+ /* size must be a multiple of 2 bytes. */
+ if (size % 2)
+ return ICE_ERR_CFG;
+ half_size = size / 2;
+
+ if (off + len > half_size)
+ return ICE_ERR_CFG;
+
+ /* Make sure at most one bit is set in the never match mask. Having more
+ * than one never match mask bit set will cause HW to consume excessive
+ * power otherwise; this is a power management efficiency check.
+ */
+#define ICE_NVR_MTCH_BITS_MAX 1
+ if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
+ return ICE_ERR_CFG;
+
+ for (i = 0; i < len; i++)
+ if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
+ dc ? dc[i] : 0, nm ? nm[i] : 0,
+ key + off + i, key + half_size + off + i))
+ return ICE_ERR_CFG;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * ICE_SUCCESS - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+ if (status == ICE_ERR_AQ_NO_WORK)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Global config lock: No work to do\n");
+
+ return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+static void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+ ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+void ice_release_change_lock(struct ice_hw *hw)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static enum ice_status
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+ bool last_buf, u32 *error_offset, u32 *error_info,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+static struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+ struct ice_generic_seg_hdr *seg;
+
+ seg = (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
+
+ if (LE32_TO_CPU(seg->seg_type) == seg_type)
+ return seg;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+ u32 offset, info, i;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+
+ status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware. Metadata buffers are skipped, and the first metadata buffer
+ * found indicates that the rest of the buffers are all metadata buffers.
+ */
+static enum ice_status
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+ struct ice_buf_hdr *bh;
+ u32 offset, info, i;
+
+ if (!bufs || !count)
+ return ICE_ERR_PARAM;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_SUCCESS;
+
+ /* reset pkg_dwnld_status in case this function is called in the
+ * reset/rebuild flow
+ */
+ hw->pkg_dwnld_status = ICE_AQ_RC_OK;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == ICE_ERR_AQ_NO_WORK)
+ hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
+ else
+ hw->pkg_dwnld_status = hw->adminq.sq_last_status;
+ return status;
+ }
+
+ for (i = 0; i < count; i++) {
+ bool last = ((i + 1) == count);
+
+ if (!last) {
+ /* check next buffer for metadata flag */
+ bh = (struct ice_buf_hdr *)(bufs + i + 1);
+
+ /* A set metadata flag in the next buffer will signal
+ * that the current buffer will be the last buffer
+ * downloaded
+ */
+ if (LE16_TO_CPU(bh->section_count))
+ if (LE32_TO_CPU(bh->section_entry[0].type) &
+ ICE_METADATA_BUF)
+ last = true;
+ }
+
+ bh = (struct ice_buf_hdr *)(bufs + i);
+
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
+
+ /* Save AQ status from download package */
+ hw->pkg_dwnld_status = hw->adminq.sq_last_status;
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Pkg download failed: err %d off %d inf %d\n",
+ status, offset, info);
+
+ break;
+ }
+
+ if (last)
+ break;
+ }
+
+ ice_release_global_cfg_lock(hw);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static enum ice_status
+ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_status
+ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ LE32_TO_CPU(ice_seg->hdr.seg_type),
+ LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+ return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the HW structure.
+ */
+static enum ice_status
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_global_metadata_seg *meta_seg;
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ if (!pkg_hdr)
+ return ICE_ERR_PARAM;
+
+ meta_seg = (struct ice_global_metadata_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
+ if (meta_seg) {
+ hw->pkg_ver = meta_seg->pkg_ver;
+ ice_memcpy(hw->pkg_name, meta_seg->pkg_name,
+ sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
+ meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
+ meta_seg->pkg_name);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find metadata segment in driver package\n");
+ return ICE_ERR_CFG;
+ }
+
+ seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ if (seg_hdr) {
+ hw->ice_pkg_ver = seg_hdr->seg_format_ver;
+ ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
+ sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft,
+ seg_hdr->seg_id);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice segment in driver package\n");
+ return ICE_ERR_CFG;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT - 1);
+ pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg_info)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
+ if (status)
+ goto init_pkg_free_alloc;
+
+ for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT 4
+ char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+ u8 place = 0;
+
+ if (pkg_info->pkg_info[i].is_active) {
+ flags[place++] = 'A';
+ hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
+ ice_memcpy(hw->active_pkg_name,
+ pkg_info->pkg_info[i].name,
+ sizeof(pkg_info->pkg_info[i].name),
+ ICE_NONDMA_TO_NONDMA);
+ hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
+ }
+ if (pkg_info->pkg_info[i].is_active_at_boot)
+ flags[place++] = 'B';
+ if (pkg_info->pkg_info[i].is_modified)
+ flags[place++] = 'M';
+ if (pkg_info->pkg_info[i].is_in_nvm)
+ flags[place++] = 'N';
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
+ i, pkg_info->pkg_info[i].ver.major,
+ pkg_info->pkg_info[i].ver.minor,
+ pkg_info->pkg_info[i].ver.update,
+ pkg_info->pkg_info[i].ver.draft,
+ pkg_info->pkg_info[i].name, flags);
+ }
+
+init_pkg_free_alloc:
+ ice_free(hw, pkg_info);
+
+ return status;
+}
+
+/**
+ * ice_find_label_value
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @name: name of the label to search for
+ * @type: the section type that will contain the label
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Finds a label's value given the label name and the section type to search.
+ * The ice_seg parameter must not be NULL since the first call to
+ * ice_enum_labels requires a pointer to an actual ice_seg structure.
+ */
+enum ice_status
+ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+ u16 *value)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ label_name = ice_enum_labels(ice_seg, type, &state, &val);
+ if (label_name && !strcmp(label_name, name)) {
+ *value = val;
+ return ICE_SUCCESS;
+ }
+
+ ice_seg = NULL;
+ } while (label_name);
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+ u32 seg_count;
+ u32 i;
+
+ if (len < sizeof(*pkg))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ return ICE_ERR_CFG;
+
+ /* pkg must have at least one segment */
+ seg_count = LE32_TO_CPU(pkg->seg_count);
+ if (seg_count < 1)
+ return ICE_ERR_CFG;
+
+ /* make sure segment array fits in package length */
+ if (len < ice_struct_size(pkg, seg_offset, seg_count - 1))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ /* all segments must fit within length */
+ for (i = 0; i < seg_count; i++) {
+ u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
+ struct ice_generic_seg_hdr *seg;
+
+ /* segment header must fit */
+ if (len < off + sizeof(*seg))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+ /* segment body must fit */
+ if (len < off + LE32_TO_CPU(seg->seg_size))
+ return ICE_ERR_BUF_TOO_SHORT;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+ if (hw->pkg_copy) {
+ ice_free(hw, hw->pkg_copy);
+ hw->pkg_copy = NULL;
+ hw->pkg_size = 0;
+ }
+ hw->seg = NULL;
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX 0
+
+ /* setup Switch block input mask, which is 48-bits in two parts */
+ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+ wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
+ pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_status
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Check package version compatibility */
+ status = ice_chk_pkg_version(&hw->pkg_ver);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return status;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
+ if (status)
+ goto fw_ddp_compat_free_alloc;
+
+ for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ status = ICE_ERR_FW_DDP_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT,
+ "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ ice_free(hw, pkg);
+ return status;
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ struct ice_pkg_hdr *pkg;
+ enum ice_status status;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ status = ice_verify_pkg(pkg, len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ status);
+ return status;
+ }
+
+ /* initialize package info */
+ status = ice_init_pkg_info(hw, pkg);
+ if (status)
+ return status;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ status = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (status)
+ return status;
+
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
+ status = ice_download_pkg(hw, seg);
+ if (status == ICE_ERR_AQ_NO_WORK) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "package previously loaded - no work.\n");
+ status = ICE_SUCCESS;
+ }
+
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!status) {
+ status = ice_get_pkg_info(hw);
+ if (!status)
+ status = ice_chk_pkg_version(&hw->active_pkg_ver);
+ }
+
+ if (!status) {
+ hw->seg = seg;
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
+ */
+ ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+{
+ enum ice_status status;
+ u8 *buf_copy;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
+
+ status = ice_init_pkg(hw, buf_copy, len);
+ if (status) {
+ /* Free the copy, since we failed to initialize the package */
+ ice_free(hw, buf_copy);
+ } else {
+ /* Track the copied pkg so we can free it later */
+ hw->pkg_copy = buf_copy;
+ hw->pkg_size = len;
+ }
+
+ return status;
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
+ section_entry));
+ return bld;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector
+ * vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section =
+ (struct ice_sw_fv_section *)section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= LE16_TO_CPU(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = LE16_TO_CPU(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ */
+static enum ice_prof_type
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+{
+ u16 i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+ }
+
+ return ICE_PROF_NON_TUN;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ ice_bitmap_t *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (req_profs == ICE_PROF_ALL) {
+ u16 i;
+
+ for (i = 0; i < ICE_MAX_NUM_PROFILES; i++)
+ ice_set_bit(i, bm);
+ return;
+ }
+
+ ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
+
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv);
+
+ if (req_profs & prof_type)
+ ice_set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @prot_ids: field vector to search for with a given protocol ID
+ * @ids_cnt: lookup/protocol count
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ids_cnt || !hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!ice_is_bit_set(bm, (u16)offset))
+ continue;
+
+ for (i = 0; i < ids_cnt; i++) {
+ int j;
+
+ /* This code assumes that if a switch field vector line
+ * has a matching protocol, then this line will contain
+ * the entries necessary to represent every field in
+ * that protocol header.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id == prot_ids[i])
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == ids_cnt) {
+ fvl = (struct ice_sw_fv_list_entry *)
+ ice_malloc(hw, sizeof(*fvl));
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ LIST_ADD(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (LIST_EMPTY(fv_list))
+ return ICE_ERR_CFG;
+ return ICE_SUCCESS;
+
+err:
+ LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
+ list_entry) {
+ LIST_DEL(&fvl->list_entry);
+ ice_free(hw, fvl);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ ice_set_bit(i,
+ hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ ice_free(hw, bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+static enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = LE16_TO_CPU(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries += count;
+
+ data_end = LE16_TO_CPU(buf->data_end) +
+ (count * sizeof(buf->section_entry[0]));
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+static void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = LE16_TO_CPU(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ICE_ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = LE16_TO_CPU(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
+ buf->section_entry[sect_count].size = CPU_TO_LE16(size);
+ buf->section_entry[sect_count].type = CPU_TO_LE32(type);
+
+ data_end += size;
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ buf->section_count = CPU_TO_LE16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_unreserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to unreserve
+ *
+ * Unreserves one or more section table entries in a package buffer, releasing
+ * space that can be used for section data. This routine can be called
+ * multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't decrease table size */
+ section_count = LE16_TO_CPU(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (count > bld->reserved_section_table_entries)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries -= count;
+
+ data_end = LE16_TO_CPU(buf->data_end) -
+ (count * sizeof(buf->section_entry[0]));
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_get_free_space
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of free bytes remaining in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return LE16_TO_CPU(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (!bld)
+ return NULL;
+
+ return &bld->buf;
+}
+
+/**
+ * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_tunnel_port_in_use
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
+{
+ bool res;
+
+ ice_acquire_lock(&hw->tnl_lock);
+ res = ice_tunnel_port_in_use_hlpr(hw, port, index);
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_tunnel_get_type
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @type: returns tunnel index
+ *
+ * For a given port number, will return the type of tunnel.
+ */
+bool
+ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
+{
+ bool res = false;
+ u16 i;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ *type = hw->tnl.tbl[i].type;
+ res = true;
+ break;
+ }
+
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_find_free_tunnel_entry
+ * @hw: pointer to the HW structure
+ * @type: tunnel type
+ * @index: optionally returns index
+ *
+ * Returns whether there is a free tunnel entry, and optionally its index
+ */
+static bool
+ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
+ hw->tnl.tbl[i].type == type) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
+ * @hw: pointer to the HW structure
+ * @type: tunnel type (TNL_ALL will return any open port)
+ * @port: returns open port
+ */
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port)
+{
+ bool res = false;
+ u16 i;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
+ }
+
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_create_tunnel
+ * @hw: pointer to the HW structure
+ * @type: type of tunnel
+ * @port: port of tunnel to create
+ *
+ * Create a tunnel by updating the parse graph in the parser. We do that by
+ * creating a package buffer with the tunnel info and issuing an update package
+ * command.
+ */
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 index;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
+ hw->tnl.tbl[index].ref++;
+ status = ICE_SUCCESS;
+ goto ice_create_tunnel_end;
+ }
+
+ if (!ice_find_free_tunnel_entry(hw, type, &index)) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto ice_create_tunnel_end;
+ }
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_create_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_create_tunnel_err;
+
+ sect_rx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(*sect_rx));
+ if (!sect_rx)
+ goto ice_create_tunnel_err;
+ sect_rx->count = CPU_TO_LE16(1);
+
+ sect_tx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ sizeof(*sect_tx));
+ if (!sect_tx)
+ goto ice_create_tunnel_err;
+ sect_tx->count = CPU_TO_LE16(1);
+
+ /* copy original boost entry to update package buffer */
+ ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
+
+ /* over-write the never-match dest port key bits with the encoded port
+ * bits
+ */
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ (u8 *)&port, NULL, NULL, NULL,
+ (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
+ sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
+
+ /* exact copy of entry to Tx section entry */
+ ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status) {
+ hw->tnl.tbl[index].port = port;
+ hw->tnl.tbl[index].in_use = true;
+ hw->tnl.tbl[index].ref = 1;
+ }
+
+ice_create_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_create_tunnel_end:
+ ice_release_lock(&hw->tnl_lock);
+
+ return status;
+}
+
+/**
+ * ice_destroy_tunnel
+ * @hw: pointer to the HW structure
+ * @port: port of tunnel to destroy (ignored if the all parameter is true)
+ * @all: flag that states to destroy all tunnels
+ *
+ * Destroys a tunnel or all tunnels by creating an update package buffer
+ * targeting the specific updates requested and then performing an update
+ * package.
+ */
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 count = 0;
+ u16 index;
+ u16 size;
+ u16 i;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
+ if (hw->tnl.tbl[index].ref > 1) {
+ hw->tnl.tbl[index].ref--;
+ status = ICE_SUCCESS;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* determine count */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port))
+ count++;
+
+ if (!count) {
+ status = ICE_ERR_PARAM;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* size of section - there is at least one entry */
+ size = ice_struct_size(sect_rx, tcam, count - 1);
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_destroy_tunnel_err;
+
+ sect_rx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_rx)
+ goto ice_destroy_tunnel_err;
+ sect_rx->count = CPU_TO_LE16(1);
+
+ sect_tx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_tx)
+ goto ice_destroy_tunnel_err;
+ sect_tx->count = CPU_TO_LE16(1);
+
+ /* copy original boost entry to update package buffer, one copy to Rx
+ * section, another copy to the Tx section
+ */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port)) {
+ ice_memcpy(sect_rx->tcam + i,
+ hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_rx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(sect_tx->tcam + i,
+ hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_tx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+ hw->tnl.tbl[i].marked = true;
+ }
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status)
+ for (i = 0; i < hw->tnl.count &&
+ i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].marked) {
+ hw->tnl.tbl[i].ref = 0;
+ hw->tnl.tbl[i].port = 0;
+ hw->tnl.tbl[i].in_use = false;
+ hw->tnl.tbl[i].marked = false;
+ }
+
+ice_destroy_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_destroy_tunnel_end:
+ ice_release_lock(&hw->tnl_lock);
+
+ return status;
+}
+
+/**
+ * ice_replay_tunnels
+ * @hw: pointer to the HW structure
+ *
+ * Replays all tunnels
+ */
+enum ice_status ice_replay_tunnels(struct ice_hw *hw)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u16 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) {
+ enum ice_tunnel_type type = hw->tnl.tbl[i].type;
+ u16 refs = hw->tnl.tbl[i].ref;
+ u16 port = hw->tnl.tbl[i].port;
+
+ if (!hw->tnl.tbl[i].in_use)
+ continue;
+
+ /* Replay tunnels one at a time by destroying them, then
+ * recreating them
+ */
+ hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */
+ status = ice_destroy_tunnel(hw, port, false);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "ERR: 0x%x - destroy tunnel port 0x%x\n",
+ status, port);
+ break;
+ }
+
+ status = ice_create_tunnel(hw, type, port);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "ERR: 0x%x - create tunnel port 0x%x\n",
+ status, port);
+ break;
+ }
+
+ /* reset to original ref count */
+ hw->tnl.tbl[i].ref = refs;
+ }
+
+ return status;
+}
+
+/**
+ * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
+ * @hw: pointer to the hardware structure
+ * @blk: hardware block
+ * @prof: profile ID
+ * @fv_idx: field vector word index
+ * @prot: variable to receive the protocol ID
+ * @off: variable to receive the protocol offset
+ */
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off)
+{
+ struct ice_fv_word *fv_ext;
+
+ if (prof >= hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (fv_idx >= hw->blk[blk].es.fvw)
+ return ICE_ERR_PARAM;
+
+ fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
+
+ *prot = fv_ext[fv_idx].prot_id;
+ *off = fv_ext[fv_idx].off;
+
+ return ICE_SUCCESS;
+}
+
+/* PTG Management */
+
+/**
+ * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will update the XLT1 hardware table to reflect the new
+ * packet type group configuration.
+ */
+enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
+{
+ struct ice_xlt1_section *sect;
+ struct ice_buf_build *bld;
+ enum ice_status status;
+ u16 index;
+
+ bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
+ ICE_XLT1_SIZE(ICE_XLT1_CNT),
+ (void **)&sect);
+ if (!bld)
+ return ICE_ERR_NO_MEMORY;
+
+ sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
+ sect->offset = CPU_TO_LE16(0);
+ for (index = 0; index < ICE_XLT1_CNT; index++)
+ sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to search for
+ * @ptg: pointer to variable that receives the PTG
+ *
+ * This function will search the PTGs for a particular ptype, returning the
+ * PTG ID that contains it through the PTG parameter, with the value of
+ * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
+ */
+static enum ice_status
+ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
+{
+ if (ptype >= ICE_XLT1_CNT || !ptg)
+ return ICE_ERR_PARAM;
+
+ *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptg_alloc_val - Allocates a new packet type group ID by value
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptg: the PTG to allocate
+ *
+ * This function allocates a given packet type group ID specified by the PTG
+ * parameter.
+ */
+static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+ hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
+}
+
+/**
+ * ice_ptg_free - Frees a packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptg: the PTG ID to free
+ *
+ * This function frees a packet type group, and returns all the current ptypes
+ * within it to the default PTG.
+ */
+void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+ struct ice_ptg_ptype *p, *temp;
+
+ hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
+ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ while (p) {
+ p->ptg = ICE_DEFAULT_PTG;
+ temp = p->next_ptype;
+ p->next_ptype = NULL;
+ p = temp;
+ }
+
+ hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
+}
+
+/**
+ * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to remove
+ * @ptg: the PTG to remove the ptype from
+ *
+ * This function will remove the ptype from the specific PTG, and move it to
+ * the default PTG (ICE_DEFAULT_PTG).
+ */
+static enum ice_status
+ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+ struct ice_ptg_ptype **ch;
+ struct ice_ptg_ptype *p;
+
+ if (ptype > ICE_XLT1_CNT - 1)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Should not happen if .in_use is set, bad config */
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
+ return ICE_ERR_CFG;
+
+ /* find the ptype within this PTG, and bypass the link over it */
+ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ while (p) {
+ if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
+ *ch = p->next_ptype;
+ break;
+ }
+
+ ch = &p->next_ptype;
+ p = p->next_ptype;
+ }
+
+ hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
+ hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to add or move
+ * @ptg: the PTG to add or move the ptype to
+ *
+ * This function will either add or move a ptype to a particular PTG depending
+ * on if the ptype is already part of another group. Note that using a
+ * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * default PTG.
+ */
+static enum ice_status
+ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+ enum ice_status status;
+ u8 original_ptg;
+
+ if (ptype > ICE_XLT1_CNT - 1)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
+ if (status)
+ return status;
+
+ /* Is ptype already in the correct PTG? */
+ if (original_ptg == ptg)
+ return ICE_SUCCESS;
+
+ /* Remove from original PTG and move back to the default PTG */
+ if (original_ptg != ICE_DEFAULT_PTG)
+ ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
+
+ /* Moving to default PTG? Then we're done with this request */
+ if (ptg == ICE_DEFAULT_PTG)
+ return ICE_SUCCESS;
+
+ /* Add ptype to PTG at beginning of list */
+ hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
+ hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
+ &hw->blk[blk].xlt1.ptypes[ptype];
+
+ hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
+ hw->blk[blk].xlt1.t[ptype] = ptg;
+
+ return ICE_SUCCESS;
+}
+
+/* Block / table size info */
+struct ice_blk_size_details {
+ u16 xlt1; /* # XLT1 entries */
+ u16 xlt2; /* # XLT2 entries */
+ u16 prof_tcam; /* # profile ID TCAM entries */
+ u16 prof_id; /* # profile IDs */
+ u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
+ u16 prof_redir; /* # profile redirection entries */
+ u16 es; /* # extraction sequence entries */
+ u16 fvw; /* # field vector words */
+ u8 overwrite; /* overwrite existing entries allowed */
+ u8 reverse; /* reverse FV order */
+};
+
+static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
+ /**
+ * Table Definitions
+ * XLT1 - Number of entries in XLT1 table
+ * XLT2 - Number of entries in XLT2 table
+ * TCAM - Number of entries Profile ID TCAM table
+ * CDID - Control Domain ID of the hardware block
+ * PRED - Number of entries in the Profile Redirection Table
+ * FV - Number of entries in the Field Vector
+ * FVW - Width (in WORDs) of the Field Vector
+ * OVR - Overwrite existing table entries
+ * REV - Reverse FV
+ */
+ /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
+ /* Overwrite , Reverse FV */
+ /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
+ false, false },
+ /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
+ false, false },
+ /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
+ false, true },
+ /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
+ true, true },
+ /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
+ false, false },
+};
+
+enum ice_sid_all {
+ ICE_SID_XLT1_OFF = 0,
+ ICE_SID_XLT2_OFF,
+ ICE_SID_PR_OFF,
+ ICE_SID_PR_REDIR_OFF,
+ ICE_SID_ES_OFF,
+ ICE_SID_OFF_COUNT,
+};
+
+/* Characteristic handling */
+
+/**
+ * ice_match_prop_lst - determine if properties of two lists match
+ * @list1: first properties list
+ * @list2: second properties list
+ *
+ * Count, cookies and the order must match in order to be considered equivalent.
+ */
+static bool
+ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
+{
+ struct ice_vsig_prof *tmp1;
+ struct ice_vsig_prof *tmp2;
+ u16 chk_count = 0;
+ u16 count = 0;
+
+ /* compare counts */
+ LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
+ count++;
+ }
+ LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
+ chk_count++;
+ }
+ if (!count || count != chk_count)
+ return false;
+
+ tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
+ tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
+
+ /* profile cookies must compare, and in the exact same order to take
+ * into account priority
+ */
+ while (count--) {
+ if (tmp2->profile_cookie != tmp1->profile_cookie)
+ return false;
+
+ tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
+ tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
+ }
+
+ return true;
+}
+
+/* VSIG Management */
+
+/**
+ * ice_vsig_update_xlt2_sect - update one section of XLT2 table
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: HW VSI number to program
+ * @vsig: VSIG for the VSI
+ *
+ * This function will update the XLT2 hardware table with the input VSI
+ * group configuration.
+ */
+static enum ice_status
+ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+ u16 vsig)
+{
+ struct ice_xlt2_section *sect;
+ struct ice_buf_build *bld;
+ enum ice_status status;
+
+ bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
+ sizeof(struct ice_xlt2_section),
+ (void **)&sect);
+ if (!bld)
+ return ICE_ERR_NO_MEMORY;
+
+ sect->count = CPU_TO_LE16(1);
+ sect->offset = CPU_TO_LE16(vsi);
+ sect->value[0] = CPU_TO_LE16(vsig);
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will update the XLT2 hardware table with the input VSI
+ * group configuration of used vsis.
+ */
+enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 vsi;
+
+ for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
+ /* update only vsis that have been changed */
+ if (hw->blk[blk].xlt2.vsis[vsi].changed) {
+ enum ice_status status;
+ u16 vsig;
+
+ vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+ status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
+ if (status)
+ return status;
+
+ hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI of interest
+ * @vsig: pointer to receive the VSI group
+ *
+ * This function will lookup the VSI entry in the XLT2 list and return
+ * the VSI group its associated with.
+ */
+enum ice_status
+ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
+{
+ if (!vsig || vsi >= ICE_MAX_VSI)
+ return ICE_ERR_PARAM;
+
+ /* As long as there's a default or valid VSIG associated with the input
+ * VSI, the functions returns a success. Any handling of VSIG will be
+ * done by the following add, update or remove functions.
+ */
+ *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_alloc_val - allocate a new VSIG by value
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: the VSIG to allocate
+ *
+ * This function will allocate a given VSIG specified by the VSIG parameter.
+ */
+static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+ hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
+ }
+
+ return ICE_VSIG_VALUE(idx, hw->pf_id);
+}
+
+/**
+ * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will iterate through the VSIG list and mark the first
+ * unused entry for the new VSIG entry as used and return that value.
+ */
+static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ return ice_vsig_alloc_val(hw, blk, i);
+
+ return ICE_DEFAULT_VSIG;
+}
+
+/**
+ * ice_find_dup_props_vsig - find VSI group with a specified set of properties
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @chs: characteristic list
+ * @vsig: returns the VSIG with the matching profiles, if found
+ *
+ * Each VSIG is associated with a characteristic set; i.e. all VSIs under
+ * a group have the same characteristic set. To check if there exists a VSIG
+ * which has the same characteristics as the input characteristics; this
+ * function will iterate through the XLT2 list and return the VSIG that has a
+ * matching configuration. In order to make sure that priorities are accounted
+ * for, the list must match exactly, including the order in which the
+ * characteristics are listed.
+ */
+static enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+ struct LIST_HEAD_TYPE *chs, u16 *vsig)
+{
+ struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
+ u16 i;
+
+ for (i = 0; i < xlt2->count; i++) {
+ if (xlt2->vsig_tbl[i].in_use &&
+ ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
+ *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
+ return ICE_SUCCESS;
+ }
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_vsig_free - free VSI group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to remove
+ *
+ * The function will remove all VSIs associated with the input VSIG and move
+ * them to the DEFAULT_VSIG and mark the VSIG available.
+ */
+static enum ice_status
+ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ struct ice_vsig_prof *dtmp, *del;
+ struct ice_vsig_vsi *vsi_cur;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+ if (idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
+
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the
+ * list and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur) {
+ /* remove all vsis associated with this VSIG XLT2 entry */
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+ vsi_cur = tmp;
+ } while (vsi_cur);
+
+ /* NULL terminate head of VSI list */
+ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+ }
+
+ /* free characteristic list */
+ LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ LIST_DEL(&del->list);
+ ice_free(hw, del);
+ }
+
+ /* if VSIG characteristic list was cleared for reset
+ * re-initialize the list head
+ */
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_remove_vsi - remove VSI from VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI to remove
+ * @vsig: VSI group to remove from
+ *
+ * The function will remove the input VSI from its VSI group and move it
+ * to the DEFAULT_VSIG.
+ */
+static enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* entry already in default VSIG, don't have to remove */
+ if (idx == ICE_DEFAULT_VSIG)
+ return ICE_SUCCESS;
+
+ vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ if (!(*vsi_head))
+ return ICE_ERR_CFG;
+
+ vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
+ vsi_cur = (*vsi_head);
+
+ /* iterate the VSI list, skip over the entry to be removed */
+ while (vsi_cur) {
+ if (vsi_tgt == vsi_cur) {
+ (*vsi_head) = vsi_cur->next_vsi;
+ break;
+ }
+ vsi_head = &vsi_cur->next_vsi;
+ vsi_cur = vsi_cur->next_vsi;
+ }
+
+ /* verify if VSI was removed from group list */
+ if (!vsi_cur)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI to move
+ * @vsig: destination VSI group
+ *
+ * This function will move or add the input VSI to the target VSIG.
+ * The function will find the original VSIG the VSI belongs to and
+ * move the entry to the DEFAULT_VSIG, update the original VSIG and
+ * then move entry to the new VSIG.
+ */
+static enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_vsig_vsi *tmp;
+ enum ice_status status;
+ u16 orig_vsig, idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ /* if VSIG not in use and VSIG is not default type this VSIG
+ * doesn't exist.
+ */
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
+ vsig != ICE_DEFAULT_VSIG)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+ if (status)
+ return status;
+
+ /* no update required if vsigs match */
+ if (orig_vsig == vsig)
+ return ICE_SUCCESS;
+
+ if (orig_vsig != ICE_DEFAULT_VSIG) {
+ /* remove entry from orig_vsig and add to default VSIG */
+ status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
+ if (status)
+ return status;
+ }
+
+ if (idx == ICE_DEFAULT_VSIG)
+ return ICE_SUCCESS;
+
+ /* Create VSI entry and add VSIG and prop_mask values */
+ hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
+ hw->blk[blk].xlt2.vsis[vsi].changed = 1;
+
+ /* Add new entry to the head of the VSIG list */
+ tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
+ &hw->blk[blk].xlt2.vsis[vsi];
+ hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
+ hw->blk[blk].xlt2.t[vsi] = vsig;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_find_prof_id - find profile ID for a given field vector
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @fv: field vector to search for
+ * @prof_id: receives the profile ID
+ */
+static enum ice_status
+ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_fv_word *fv, u8 *prof_id)
+{
+ struct ice_es *es = &hw->blk[blk].es;
+ u16 off;
+ u8 i;
+
+ for (i = 0; i < (u8)es->count; i++) {
+ off = i * es->fvw;
+
+ if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+ continue;
+
+ *prof_id = i;
+ return ICE_SUCCESS;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_prof_id_rsrc_type - get profile ID resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_SW:
+ *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
+ break;
+ case ICE_BLK_ACL:
+ *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
+ break;
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
+ break;
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
+ break;
+ case ICE_BLK_PE:
+ *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_SW:
+ *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
+ break;
+ case ICE_BLK_ACL:
+ *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
+ break;
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
+ break;
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
+ break;
+ case ICE_BLK_PE:
+ *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_alloc_tcam_ent - allocate hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the TCAM for
+ * @tcam_idx: pointer to variable to receive the TCAM entry
+ *
+ * This function allocates a new entry in a Profile ID TCAM for a specific
+ * block.
+ */
+static enum ice_status
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+}
+
+/**
+ * ice_free_tcam_ent - free hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the TCAM entry
+ * @tcam_idx: the TCAM entry to free
+ *
+ * This function frees an entry in a Profile ID TCAM for a specific block.
+ */
+static enum ice_status
+ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
+}
+
+/**
+ * ice_alloc_prof_id - allocate profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the profile ID for
+ * @prof_id: pointer to variable to receive the profile ID
+ *
+ * This function allocates a new profile ID, which also corresponds to a Field
+ * Vector (Extraction Sequence) entry.
+ */
+static enum ice_status
+ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+{
+ enum ice_status status;
+ u16 res_type;
+ u16 get_prof;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
+ if (!status)
+ *prof_id = (u8)get_prof;
+
+ return status;
+}
+
+/**
+ * ice_free_prof_id - free profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID to free
+ *
+ * This function frees a profile ID, which also corresponds to a Field Vector.
+ */
+static enum ice_status
+ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ u16 tmp_prof_id = (u16)prof_id;
+ u16 res_type;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
+}
+
+/**
+ * ice_prof_inc_ref - increment reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to increment the reference count
+ */
+static enum ice_status
+ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ hw->blk[blk].es.ref_count[prof_id]++;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_es - write an extraction sequence to hardware
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write the extraction sequence
+ * @prof_id: the profile ID to write
+ * @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ */
+static void
+ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
+ struct ice_fv_word *fv)
+{
+ u16 off;
+
+ off = prof_id * hw->blk[blk].es.fvw;
+ if (!fv) {
+ ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
+ sizeof(*fv), ICE_NONDMA_MEM);
+ hw->blk[blk].es.written[prof_id] = false;
+ } else {
+ ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
+ sizeof(*fv), ICE_NONDMA_TO_NONDMA);
+ }
+}
+
+/**
+ * ice_prof_dec_ref - decrement reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to decrement the reference count
+ */
+static enum ice_status
+ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (hw->blk[blk].es.ref_count[prof_id] > 0) {
+ if (!--hw->blk[blk].es.ref_count[prof_id]) {
+ ice_write_es(hw, blk, prof_id, NULL);
+ return ice_free_prof_id(hw, blk, prof_id);
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* Block / table section IDs */
+static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
+ /* SWITCH */
+ { ICE_SID_XLT1_SW,
+ ICE_SID_XLT2_SW,
+ ICE_SID_PROFID_TCAM_SW,
+ ICE_SID_PROFID_REDIR_SW,
+ ICE_SID_FLD_VEC_SW
+ },
+
+ /* ACL */
+ { ICE_SID_XLT1_ACL,
+ ICE_SID_XLT2_ACL,
+ ICE_SID_PROFID_TCAM_ACL,
+ ICE_SID_PROFID_REDIR_ACL,
+ ICE_SID_FLD_VEC_ACL
+ },
+
+ /* FD */
+ { ICE_SID_XLT1_FD,
+ ICE_SID_XLT2_FD,
+ ICE_SID_PROFID_TCAM_FD,
+ ICE_SID_PROFID_REDIR_FD,
+ ICE_SID_FLD_VEC_FD
+ },
+
+ /* RSS */
+ { ICE_SID_XLT1_RSS,
+ ICE_SID_XLT2_RSS,
+ ICE_SID_PROFID_TCAM_RSS,
+ ICE_SID_PROFID_REDIR_RSS,
+ ICE_SID_FLD_VEC_RSS
+ },
+
+ /* PE */
+ { ICE_SID_XLT1_PE,
+ ICE_SID_XLT2_PE,
+ ICE_SID_PROFID_TCAM_PE,
+ ICE_SID_PROFID_REDIR_PE,
+ ICE_SID_FLD_VEC_PE
+ }
+};
+
+/**
+ * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 pt;
+
+ for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
+ u8 ptg;
+
+ ptg = hw->blk[blk].xlt1.t[pt];
+ if (ptg != ICE_DEFAULT_PTG) {
+ ice_ptg_alloc_val(hw, blk, ptg);
+ ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
+ }
+ }
+}
+
+/**
+ * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 vsi;
+
+ for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
+ u16 vsig;
+
+ vsig = hw->blk[blk].xlt2.t[vsi];
+ if (vsig) {
+ ice_vsig_alloc_val(hw, blk, vsig);
+ ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+ /* no changes at this time, since this has been
+ * initialized from the original package
+ */
+ hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+ }
+ }
+}
+
+/**
+ * ice_init_sw_db - init software database from HW tables
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_sw_db(struct ice_hw *hw)
+{
+ u16 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ ice_init_sw_xlt1_db(hw, (enum ice_block)i);
+ ice_init_sw_xlt2_db(hw, (enum ice_block)i);
+ }
+}
+
+/**
+ * ice_fill_tbl - Reads content of a single table type into database
+ * @hw: pointer to the hardware structure
+ * @block_id: Block ID of the table to copy
+ * @sid: Section ID of the table to copy
+ *
+ * Will attempt to read the entire content of a given table of a single block
+ * into the driver database. We assume that the buffer will always
+ * be as large or larger than the data contained in the package. If
+ * this condition is not met, there is most likely an error in the package
+ * contents.
+ */
+static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
+{
+ u32 dst_len, sect_len, offset = 0;
+ struct ice_prof_redir_section *pr;
+ struct ice_prof_id_section *pid;
+ struct ice_xlt1_section *xlt1;
+ struct ice_xlt2_section *xlt2;
+ struct ice_sw_fv_section *es;
+ struct ice_pkg_enum state;
+ u8 *src, *dst;
+ void *sect;
+
+ /* if the HW segment pointer is null then the first iteration of
+ * ice_pkg_enum_section() will fail. In this case the HW tables will
+ * not be filled and return success.
+ */
+ if (!hw->seg) {
+ ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
+ return;
+ }
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ sect = ice_pkg_enum_section(hw->seg, &state, sid);
+
+ while (sect) {
+ switch (sid) {
+ case ICE_SID_XLT1_SW:
+ case ICE_SID_XLT1_FD:
+ case ICE_SID_XLT1_RSS:
+ case ICE_SID_XLT1_ACL:
+ case ICE_SID_XLT1_PE:
+ xlt1 = (struct ice_xlt1_section *)sect;
+ src = xlt1->value;
+ sect_len = LE16_TO_CPU(xlt1->count) *
+ sizeof(*hw->blk[block_id].xlt1.t);
+ dst = hw->blk[block_id].xlt1.t;
+ dst_len = hw->blk[block_id].xlt1.count *
+ sizeof(*hw->blk[block_id].xlt1.t);
+ break;
+ case ICE_SID_XLT2_SW:
+ case ICE_SID_XLT2_FD:
+ case ICE_SID_XLT2_RSS:
+ case ICE_SID_XLT2_ACL:
+ case ICE_SID_XLT2_PE:
+ xlt2 = (struct ice_xlt2_section *)sect;
+ src = (_FORCE_ u8 *)xlt2->value;
+ sect_len = LE16_TO_CPU(xlt2->count) *
+ sizeof(*hw->blk[block_id].xlt2.t);
+ dst = (u8 *)hw->blk[block_id].xlt2.t;
+ dst_len = hw->blk[block_id].xlt2.count *
+ sizeof(*hw->blk[block_id].xlt2.t);
+ break;
+ case ICE_SID_PROFID_TCAM_SW:
+ case ICE_SID_PROFID_TCAM_FD:
+ case ICE_SID_PROFID_TCAM_RSS:
+ case ICE_SID_PROFID_TCAM_ACL:
+ case ICE_SID_PROFID_TCAM_PE:
+ pid = (struct ice_prof_id_section *)sect;
+ src = (u8 *)pid->entry;
+ sect_len = LE16_TO_CPU(pid->count) *
+ sizeof(*hw->blk[block_id].prof.t);
+ dst = (u8 *)hw->blk[block_id].prof.t;
+ dst_len = hw->blk[block_id].prof.count *
+ sizeof(*hw->blk[block_id].prof.t);
+ break;
+ case ICE_SID_PROFID_REDIR_SW:
+ case ICE_SID_PROFID_REDIR_FD:
+ case ICE_SID_PROFID_REDIR_RSS:
+ case ICE_SID_PROFID_REDIR_ACL:
+ case ICE_SID_PROFID_REDIR_PE:
+ pr = (struct ice_prof_redir_section *)sect;
+ src = pr->redir_value;
+ sect_len = LE16_TO_CPU(pr->count) *
+ sizeof(*hw->blk[block_id].prof_redir.t);
+ dst = hw->blk[block_id].prof_redir.t;
+ dst_len = hw->blk[block_id].prof_redir.count *
+ sizeof(*hw->blk[block_id].prof_redir.t);
+ break;
+ case ICE_SID_FLD_VEC_SW:
+ case ICE_SID_FLD_VEC_FD:
+ case ICE_SID_FLD_VEC_RSS:
+ case ICE_SID_FLD_VEC_ACL:
+ case ICE_SID_FLD_VEC_PE:
+ es = (struct ice_sw_fv_section *)sect;
+ src = (u8 *)es->fv;
+ sect_len = (u32)(LE16_TO_CPU(es->count) *
+ hw->blk[block_id].es.fvw) *
+ sizeof(*hw->blk[block_id].es.t);
+ dst = (u8 *)hw->blk[block_id].es.t;
+ dst_len = (u32)(hw->blk[block_id].es.count *
+ hw->blk[block_id].es.fvw) *
+ sizeof(*hw->blk[block_id].es.t);
+ break;
+ default:
+ return;
+ }
+
+ /* if the section offset exceeds destination length, terminate
+ * table fill.
+ */
+ if (offset > dst_len)
+ return;
+
+ /* if the sum of section size and offset exceed destination size
+ * then we are out of bounds of the HW table size for that PF.
+ * Changing section length to fill the remaining table space
+ * of that PF.
+ */
+ if ((offset + sect_len) > dst_len)
+ sect_len = dst_len - offset;
+
+ ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
+ offset += sect_len;
+ sect = ice_pkg_enum_section(NULL, &state, sid);
+ }
+}
+
+/**
+ * ice_fill_blk_tbls - Read package context for tables
+ * @hw: pointer to the hardware structure
+ *
+ * Reads the current package contents and populates the driver
+ * database with the data iteratively for all advanced feature
+ * blocks. Assume that the HW tables have been allocated.
+ */
+void ice_fill_blk_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ enum ice_block blk_id = (enum ice_block)i;
+
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
+ }
+
+ ice_init_sw_db(hw);
+}
+
+/**
+ * ice_free_prof_map - free profile map
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_es *es = &hw->blk[blk_idx].es;
+ struct ice_prof_map *del, *tmp;
+
+ ice_acquire_lock(&es->prof_map_lock);
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
+ ice_prof_map, list) {
+ LIST_DEL(&del->list);
+ ice_free(hw, del);
+ }
+ INIT_LIST_HEAD(&es->prof_map);
+ ice_release_lock(&es->prof_map_lock);
+}
+
+/**
+ * ice_free_flow_profs - free flow profile entries
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_flow_prof *p, *tmp;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
+ LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
+ ice_flow_prof, l_entry) {
+ struct ice_flow_entry *e, *t;
+
+ LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
+ ice_flow_entry, l_entry)
+ ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
+ ICE_FLOW_ENTRY_HNDL(e));
+
+ LIST_DEL(&p->l_entry);
+ if (p->acts)
+ ice_free(hw, p->acts);
+ ice_free(hw, p);
+ }
+ ice_release_lock(&hw->fl_profs_locks[blk_idx]);
+
+ /* if driver is in reset and tables are being cleared
+ * re-initialize the flow profile list heads
+ */
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
+ * ice_free_vsig_tbl - free complete VSIG table entries
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block on which to free the VSIG table entries
+ */
+static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl)
+ return;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ ice_vsig_free(hw, blk, i);
+}
+
+/**
+ * ice_free_hw_tbls - free hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+void ice_free_hw_tbls(struct ice_hw *hw)
+{
+ struct ice_rss_cfg *r, *rt;
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ if (hw->blk[i].is_list_init) {
+ struct ice_es *es = &hw->blk[i].es;
+
+ ice_free_prof_map(hw, i);
+ ice_destroy_lock(&es->prof_map_lock);
+
+ ice_free_flow_profs(hw, i);
+ ice_destroy_lock(&hw->fl_profs_locks[i]);
+
+ hw->blk[i].is_list_init = false;
+ }
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
+ ice_free(hw, hw->blk[i].xlt1.ptypes);
+ ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
+ ice_free(hw, hw->blk[i].xlt1.t);
+ ice_free(hw, hw->blk[i].xlt2.t);
+ ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
+ ice_free(hw, hw->blk[i].xlt2.vsis);
+ ice_free(hw, hw->blk[i].prof.t);
+ ice_free(hw, hw->blk[i].prof_redir.t);
+ ice_free(hw, hw->blk[i].es.t);
+ ice_free(hw, hw->blk[i].es.ref_count);
+ ice_free(hw, hw->blk[i].es.written);
+ }
+
+ LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
+ ice_rss_cfg, l_entry) {
+ LIST_DEL(&r->l_entry);
+ ice_free(hw, r);
+ }
+ ice_destroy_lock(&hw->rss_locks);
+ ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
+}
+
+/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ ice_init_lock(&hw->fl_profs_locks[blk_idx]);
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
+ * ice_clear_hw_tbls - clear HW tables and flow profiles
+ * @hw: pointer to the hardware structure
+ */
+void ice_clear_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+
+ if (hw->blk[i].is_list_init) {
+ ice_free_prof_map(hw, i);
+ ice_free_flow_profs(hw, i);
+ }
+
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
+
+ ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt1->ptg_tbl, 0,
+ ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt2->vsig_tbl, 0,
+ xlt2->count * sizeof(*xlt2->vsig_tbl),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
+ ICE_NONDMA_MEM);
+ ice_memset(prof_redir->t, 0,
+ prof_redir->count * sizeof(*prof_redir->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(es->t, 0, es->count * sizeof(*es->t),
+ ICE_NONDMA_MEM);
+ ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
+ ICE_NONDMA_MEM);
+ ice_memset(es->written, 0, es->count * sizeof(*es->written),
+ ICE_NONDMA_MEM);
+ }
+}
+
+/**
+ * ice_init_hw_tbls - init hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ ice_init_lock(&hw->rss_locks);
+ INIT_LIST_HEAD(&hw->rss_list_head);
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+ u16 j;
+
+ if (hw->blk[i].is_list_init)
+ continue;
+
+ ice_init_flow_profs(hw, i);
+ ice_init_lock(&es->prof_map_lock);
+ INIT_LIST_HEAD(&es->prof_map);
+ hw->blk[i].is_list_init = true;
+
+ hw->blk[i].overwrite = blk_sizes[i].overwrite;
+ es->reverse = blk_sizes[i].reverse;
+
+ xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
+ xlt1->count = blk_sizes[i].xlt1;
+
+ xlt1->ptypes = (struct ice_ptg_ptype *)
+ ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
+
+ if (!xlt1->ptypes)
+ goto err;
+
+ xlt1->ptg_tbl = (struct ice_ptg_entry *)
+ ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
+
+ if (!xlt1->ptg_tbl)
+ goto err;
+
+ xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
+ if (!xlt1->t)
+ goto err;
+
+ xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
+ xlt2->count = blk_sizes[i].xlt2;
+
+ xlt2->vsis = (struct ice_vsig_vsi *)
+ ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
+
+ if (!xlt2->vsis)
+ goto err;
+
+ xlt2->vsig_tbl = (struct ice_vsig_entry *)
+ ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
+ if (!xlt2->vsig_tbl)
+ goto err;
+
+ for (j = 0; j < xlt2->count; j++)
+ INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
+
+ xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
+ if (!xlt2->t)
+ goto err;
+
+ prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
+ prof->count = blk_sizes[i].prof_tcam;
+ prof->max_prof_id = blk_sizes[i].prof_id;
+ prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
+ prof->t = (struct ice_prof_tcam_entry *)
+ ice_calloc(hw, prof->count, sizeof(*prof->t));
+
+ if (!prof->t)
+ goto err;
+
+ prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
+ prof_redir->count = blk_sizes[i].prof_redir;
+ prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
+ sizeof(*prof_redir->t));
+
+ if (!prof_redir->t)
+ goto err;
+
+ es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
+ es->count = blk_sizes[i].es;
+ es->fvw = blk_sizes[i].fvw;
+ es->t = (struct ice_fv_word *)
+ ice_calloc(hw, (u32)(es->count * es->fvw),
+ sizeof(*es->t));
+ if (!es->t)
+ goto err;
+
+ es->ref_count = (u16 *)
+ ice_calloc(hw, es->count, sizeof(*es->ref_count));
+
+ es->written = (u8 *)
+ ice_calloc(hw, es->count, sizeof(*es->written));
+ if (!es->ref_count)
+ goto err;
+ }
+ return ICE_SUCCESS;
+
+err:
+ ice_free_hw_tbls(hw);
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_prof_gen_key - generate profile ID key
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ * @key: output of profile ID key
+ */
+static enum ice_status
+ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
+ u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 key[ICE_TCAM_KEY_SZ])
+{
+ struct ice_prof_id_key inkey;
+
+ inkey.xlt1 = ptg;
+ inkey.xlt2_cdid = CPU_TO_LE16(vsig);
+ inkey.flags = CPU_TO_LE16(flags);
+
+ switch (hw->blk[blk].prof.cdid_bits) {
+ case 0:
+ break;
+ case 2:
+#define ICE_CD_2_M 0xC000U
+#define ICE_CD_2_S 14
+ inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
+ inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
+ break;
+ case 4:
+#define ICE_CD_4_M 0xF000U
+#define ICE_CD_4_S 12
+ inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
+ inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
+ break;
+ case 8:
+#define ICE_CD_8_M 0xFF00U
+#define ICE_CD_8_S 16
+ inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
+ inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
+ break;
+ }
+
+ return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
+ nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
+}
+
+/**
+ * ice_tcam_write_entry - write TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @idx: the entry index to write to
+ * @prof_id: profile ID
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID: portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ */
+static enum ice_status
+ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
+ u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
+{
+ struct ice_prof_tcam_entry;
+ enum ice_status status;
+
+ status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
+ dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
+ if (!status) {
+ hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
+ hw->blk[blk].prof.t[idx].prof_id = prof_id;
+ }
+
+ return status;
+}
+
+/**
+ * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to query
+ * @refs: pointer to variable to receive the reference count
+ */
+static enum ice_status
+ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *ptr;
+
+ *refs = 0;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ while (ptr) {
+ (*refs)++;
+ ptr = ptr->next_vsi;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_has_prof_vsig - check to see if VSIG has a specific profile
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to check against
+ * @hdl: profile handle
+ */
+static bool
+ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *ent;
+
+ LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ if (ent->profile_cookie == hdl)
+ return true;
+ }
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "Characteristic list for VSI group %d not found.\n",
+ vsig);
+ return false;
+}
+
+/**
+ * ice_prof_bld_es - build profile ID extraction sequence changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
+{
+ u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
+ struct ice_chs_chg *tmp;
+
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
+ u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
+ struct ice_pkg_es *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_VEC_TBL);
+ p = (struct ice_pkg_es *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+ vec_size -
+ sizeof(p->es[0]));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = CPU_TO_LE16(1);
+ p->offset = CPU_TO_LE16(tmp->prof_id);
+
+ ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
+ ICE_NONDMA_TO_NONDMA);
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_tcam - build profile ID TCAM changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
+ struct ice_prof_id_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_PROF_TCAM);
+ p = (struct ice_prof_id_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = CPU_TO_LE16(1);
+ p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
+ p->entry[0].prof_id = tmp->prof_id;
+
+ ice_memcpy(p->entry[0].key,
+ &hw->blk[blk].prof.t[tmp->tcam_idx].key,
+ sizeof(hw->blk[blk].prof.t->key),
+ ICE_NONDMA_TO_NONDMA);
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_xlt1 - build XLT1 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
+ struct LIST_HEAD_TYPE *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
+ struct ice_xlt1_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_XLT1);
+ p = (struct ice_xlt1_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = CPU_TO_LE16(1);
+ p->offset = CPU_TO_LE16(tmp->ptype);
+ p->value[0] = tmp->ptg;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_xlt2 - build XLT2 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
+ struct LIST_HEAD_TYPE *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ struct ice_xlt2_section *p;
+ u32 id;
+
+ switch (tmp->type) {
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ id = ice_sect_id(blk, ICE_XLT2);
+ p = (struct ice_xlt2_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = CPU_TO_LE16(1);
+ p->offset = CPU_TO_LE16(tmp->vsi);
+ p->value[0] = CPU_TO_LE16(tmp->vsig);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_upd_prof_hw - update hardware using the change list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
+ struct LIST_HEAD_TYPE *chgs)
+{
+ struct ice_buf_build *b;
+ struct ice_chs_chg *tmp;
+ enum ice_status status;
+ u16 pkg_sects;
+ u16 xlt1 = 0;
+ u16 xlt2 = 0;
+ u16 tcam = 0;
+ u16 es = 0;
+ u16 sects;
+
+ /* count number of sections we need */
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ switch (tmp->type) {
+ case ICE_PTG_ES_ADD:
+ if (tmp->add_ptg)
+ xlt1++;
+ if (tmp->add_prof)
+ es++;
+ break;
+ case ICE_TCAM_ADD:
+ tcam++;
+ break;
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ xlt2++;
+ break;
+ default:
+ break;
+ }
+ }
+ sects = xlt1 + xlt2 + tcam + es;
+
+ if (!sects)
+ return ICE_SUCCESS;
+
+ /* Build update package buffer */
+ b = ice_pkg_buf_alloc(hw);
+ if (!b)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_pkg_buf_reserve_section(b, sects);
+ if (status)
+ goto error_tmp;
+
+ /* Preserve order of table update: ES, TCAM, PTG, VSIG */
+ if (es) {
+ status = ice_prof_bld_es(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (tcam) {
+ status = ice_prof_bld_tcam(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt1) {
+ status = ice_prof_bld_xlt1(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt2) {
+ status = ice_prof_bld_xlt2(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ /* After package buffer build check if the section count in buffer is
+ * non-zero and matches the number of sections detected for package
+ * update.
+ */
+ pkg_sects = ice_pkg_buf_get_active_sections(b);
+ if (!pkg_sects || pkg_sects != sects) {
+ status = ICE_ERR_INVAL_SIZE;
+ goto error_tmp;
+ }
+
+ /* update package */
+ status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
+ if (status == ICE_ERR_AQ_ERROR)
+ ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
+
+error_tmp:
+ ice_pkg_buf_free(hw, b);
+ return status;
+}
+
+/**
+ * ice_add_prof - add profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @es: extraction sequence (length of array is determined by the block)
+ *
+ * This function registers a profile, which matches a set of PTGs with a
+ * particular extraction sequence. While the hardware profile is allocated
+ * it will not be written until the first call to ice_add_flow that specifies
+ * the ID value used here.
+ */
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es)
+{
+ u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+ ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
+ struct ice_prof_map *prof;
+ enum ice_status status;
+ u8 byte = 0;
+ u8 prof_id;
+
+ ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+
+ /* search for existing profile */
+ status = ice_find_prof_id(hw, blk, es, &prof_id);
+ if (status) {
+ /* allocate profile ID */
+ status = ice_alloc_prof_id(hw, blk, &prof_id);
+ if (status)
+ goto err_ice_add_prof;
+
+ /* and write new es */
+ ice_write_es(hw, blk, prof_id, es);
+ }
+
+ ice_prof_inc_ref(hw, blk, prof_id);
+
+ /* add profile info */
+
+ prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
+ if (!prof)
+ goto err_ice_add_prof;
+
+ prof->profile_cookie = id;
+ prof->prof_id = prof_id;
+ prof->ptg_cnt = 0;
+ prof->context = 0;
+
+ /* build list of ptgs */
+ while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
+ u8 bit;
+
+ if (!ptypes[byte]) {
+ bytes--;
+ byte++;
+ continue;
+ }
+ /* Examine 8 bits per byte */
+ for (bit = 0; bit < 8; bit++) {
+ if (ptypes[byte] & BIT(bit)) {
+ u16 ptype;
+ u8 ptg;
+ u8 m;
+
+ ptype = byte * BITS_PER_BYTE + bit;
+
+ /* The package should place all ptypes in a
+ * non-zero PTG, so the following call should
+ * never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
+
+ /* If PTG is already added, skip and continue */
+ if (ice_is_bit_set(ptgs_used, ptg))
+ continue;
+
+ ice_set_bit(ptg, ptgs_used);
+ prof->ptg[prof->ptg_cnt] = ptg;
+
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ break;
+
+ /* nothing left in byte, then exit */
+ m = ~(u8)((1 << (bit + 1)) - 1);
+ if (!(ptypes[byte] & m))
+ break;
+ }
+ }
+
+ bytes--;
+ byte++;
+ }
+
+ LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
+ status = ICE_SUCCESS;
+
+err_ice_add_prof:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added. This
+ * version assumes that the caller has already acquired the prof map lock.
+ */
+static struct ice_prof_map *
+ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry = NULL;
+ struct ice_prof_map *map;
+
+ LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
+ list) {
+ if (map->profile_cookie == id) {
+ entry = map;
+ break;
+ }
+ }
+
+ return entry;
+}
+
+/**
+ * ice_search_prof_id - Search for a profile tracking ID
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added.
+ */
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry;
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+ entry = ice_search_prof_id_low(hw, blk, id);
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+
+ return entry;
+}
+
+/**
+ * ice_set_prof_context - Set context for a given profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @cntxt: context
+ */
+struct ice_prof_map *
+ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
+{
+ struct ice_prof_map *entry;
+
+ entry = ice_search_prof_id(hw, blk, id);
+ if (entry)
+ entry->context = cntxt;
+
+ return entry;
+}
+
+/**
+ * ice_get_prof_context - Get context for a given profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @cntxt: pointer to variable to receive the context
+ */
+struct ice_prof_map *
+ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
+{
+ struct ice_prof_map *entry;
+
+ entry = ice_search_prof_id(hw, blk, id);
+ if (entry)
+ *cntxt = entry->context;
+
+ return entry;
+}
+
+/**
+ * ice_vsig_prof_id_count - count profiles in a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ */
+static u16
+ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
+ struct ice_vsig_prof *p;
+
+ LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_rel_tcam_idx - release a TCAM index
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @idx: the index to release
+ */
+static enum ice_status
+ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+{
+ /* Masks to invoke a never match entry */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
+ dc_msk, nm_msk);
+ if (status)
+ return status;
+
+ /* release the TCAM entry */
+ status = ice_free_tcam_ent(hw, blk, idx);
+
+ return status;
+}
+
+/**
+ * ice_rem_prof_id - remove one profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof: pointer to profile structure to remove
+ */
+static enum ice_status
+ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_vsig_prof *prof)
+{
+ enum ice_status status;
+ u16 i;
+
+ for (i = 0; i < prof->tcam_count; i++) {
+ if (prof->tcam[i].in_use) {
+ prof->tcam[i].in_use = false;
+ status = ice_rel_tcam_idx(hw, blk,
+ prof->tcam[i].tcam_idx);
+ if (status)
+ return ICE_ERR_HW_TABLE;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_rem_vsig - remove VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to remove
+ * @chg: the change list
+ */
+static enum ice_status
+ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct LIST_HEAD_TYPE *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *vsi_cur;
+ struct ice_vsig_prof *d, *t;
+ enum ice_status status;
+
+ /* remove TCAM entries */
+ LIST_FOR_EACH_ENTRY_SAFE(d, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ status = ice_rem_prof_id(hw, blk, d);
+ if (status)
+ return status;
+
+ LIST_DEL(&d->list);
+ ice_free(hw, d);
+ }
+
+ /* Move all VSIS associated with this VSIG to the default VSIG */
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the list
+ * and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur) {
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+ struct ice_chs_chg *p;
+
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->type = ICE_VSIG_REM;
+ p->orig_vsig = vsig;
+ p->vsig = ICE_DEFAULT_VSIG;
+ p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+
+ LIST_ADD(&p->list_entry, chg);
+
+ vsi_cur = tmp;
+ } while (vsi_cur);
+ }
+
+ return ice_vsig_free(hw, blk, vsig);
+}
+
+/**
+ * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @hdl: profile handle indicating which profile to remove
+ * @chg: list to receive a record of changes
+ */
+static enum ice_status
+ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ struct LIST_HEAD_TYPE *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *p, *t;
+ enum ice_status status;
+
+ LIST_FOR_EACH_ENTRY_SAFE(p, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ if (p->profile_cookie == hdl) {
+ if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
+ /* this is the last profile, remove the VSIG */
+ return ice_rem_vsig(hw, blk, vsig, chg);
+
+ status = ice_rem_prof_id(hw, blk, p);
+ if (!status) {
+ LIST_DEL(&p->list);
+ ice_free(hw, p);
+ }
+ return status;
+ }
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_flow_all - remove all flows with a particular profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ */
+static enum ice_status
+ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_chs_chg *del, *tmp;
+ struct LIST_HEAD_TYPE chg;
+ enum ice_status status;
+ u16 i;
+
+ INIT_LIST_HEAD(&chg);
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++) {
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
+ if (ice_has_prof_vsig(hw, blk, i, id)) {
+ status = ice_rem_prof_id_vsig(hw, blk, i, id,
+ &chg);
+ if (status)
+ goto err_ice_rem_flow_all;
+ }
+ }
+ }
+
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_flow_all:
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+ LIST_DEL(&del->list_entry);
+ ice_free(hw, del);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_prof - remove profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will remove the profile specified by the ID parameter, which was
+ * previously created through ice_add_prof. If any existing entries
+ * are associated with this profile, they will be removed as well.
+ */
+enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *pmap;
+ enum ice_status status;
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+
+ pmap = ice_search_prof_id_low(hw, blk, id);
+ if (!pmap) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_rem_prof;
+ }
+
+ /* remove all flows with this profile */
+ status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
+ if (status)
+ goto err_ice_rem_prof;
+
+ /* dereference profile, and possibly remove */
+ ice_prof_dec_ref(hw, blk, pmap->prof_id);
+
+ LIST_DEL(&pmap->list);
+ ice_free(hw, pmap);
+
+err_ice_rem_prof:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_get_prof - get profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: profile handle
+ * @chg: change list
+ */
+static enum ice_status
+ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+ struct LIST_HEAD_TYPE *chg)
+{
+ struct ice_prof_map *map;
+ struct ice_chs_chg *p;
+ u16 i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ for (i = 0; i < map->ptg_cnt; i++) {
+ if (!hw->blk[blk].es.written[map->prof_id]) {
+ /* add ES to change list */
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ goto err_ice_get_prof;
+
+ p->type = ICE_PTG_ES_ADD;
+ p->ptype = 0;
+ p->ptg = map->ptg[i];
+ p->add_ptg = 0;
+
+ p->add_prof = 1;
+ p->prof_id = map->prof_id;
+
+ hw->blk[blk].es.written[map->prof_id] = true;
+
+ LIST_ADD(&p->list_entry, chg);
+ }
+ }
+
+ return ICE_SUCCESS;
+
+err_ice_get_prof:
+ /* let caller clean up the change list */
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG from which to copy the list
+ * @lst: output list
+ *
+ * This routine makes a copy of the list of profiles in the specified VSIG.
+ */
+static enum ice_status
+ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct LIST_HEAD_TYPE *lst)
+{
+ struct ice_vsig_prof *ent1, *ent2;
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+
+ LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ struct ice_vsig_prof *p;
+
+ /* copy to the input list */
+ p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
+ ICE_NONDMA_TO_NONDMA);
+ if (!p)
+ goto err_ice_get_profs_vsig;
+
+ LIST_ADD_TAIL(&p->list, lst);
+ }
+
+ return ICE_SUCCESS;
+
+err_ice_get_profs_vsig:
+ LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
+ LIST_DEL(&ent1->list);
+ ice_free(hw, ent1);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_add_prof_to_lst - add profile entry to a list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @lst: the list to be added to
+ * @hdl: profile handle of entry to add
+ */
+static enum ice_status
+ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
+ struct LIST_HEAD_TYPE *lst, u64 hdl)
+{
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *p;
+ u16 i;
+
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->profile_cookie = map->profile_cookie;
+ p->prof_id = map->prof_id;
+ p->tcam_count = map->ptg_cnt;
+
+ for (i = 0; i < map->ptg_cnt; i++) {
+ p->tcam[i].prof_id = map->prof_id;
+ p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
+ p->tcam[i].ptg = map->ptg[i];
+ }
+
+ LIST_ADD(&p->list, lst);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_move_vsi - move VSI to another VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to move
+ * @vsig: the VSIG to move the VSI to
+ * @chg: the change list
+ */
+static enum ice_status
+ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
+ struct LIST_HEAD_TYPE *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 orig_vsig;
+
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+ if (!status)
+ status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+
+ if (status) {
+ ice_free(hw, p);
+ return status;
+ }
+
+ p->type = ICE_VSI_MOVE;
+ p->vsi = vsi;
+ p->orig_vsig = orig_vsig;
+ p->vsig = vsig;
+
+ LIST_ADD(&p->list_entry, chg);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
+ * @hw: pointer to the HW struct
+ * @idx: the index of the TCAM entry to remove
+ * @chg: the list of change structures to search
+ */
+static void
+ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
+{
+ struct ice_chs_chg *pos, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) {
+ if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
+ LIST_DEL(&tmp->list_entry);
+ ice_free(hw, tmp);
+ }
+ }
+}
+
+/**
+ * ice_prof_tcam_ena_dis - add enable or disable TCAM change
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @enable: true to enable, false to disable
+ * @vsig: the VSIG of the TCAM entry
+ * @tcam: pointer the TCAM info structure of the TCAM to disable
+ * @chg: the change list
+ *
+ * This function appends an enable or disable TCAM entry in the change log
+ */
+static enum ice_status
+ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
+ u16 vsig, struct ice_tcam_inf *tcam,
+ struct LIST_HEAD_TYPE *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ /* if disabling, free the TCAM */
+ if (!enable) {
+ status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
+
+ /* if we have already created a change for this TCAM entry, then
+ * we need to remove that entry, in order to prevent writing to
+ * a TCAM entry we no longer will have ownership of.
+ */
+ ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
+ tcam->tcam_idx = 0;
+ tcam->in_use = 0;
+ return status;
+ }
+
+ /* for re-enabling, reallocate a TCAM */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ if (status)
+ return status;
+
+ /* add TCAM to change list */
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
+ tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
+ nm_msk);
+ if (status)
+ goto err_ice_prof_tcam_ena_dis;
+
+ tcam->in_use = 1;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = tcam->prof_id;
+ p->ptg = tcam->ptg;
+ p->vsig = 0;
+ p->tcam_idx = tcam->tcam_idx;
+
+ /* log change */
+ LIST_ADD(&p->list_entry, chg);
+
+ return ICE_SUCCESS;
+
+err_ice_prof_tcam_ena_dis:
+ ice_free(hw, p);
+ return status;
+}
+
+/**
+ * ice_adj_prof_priorities - adjust profile based on priorities
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG for which to adjust profile priorities
+ * @chg: the change list
+ */
+static enum ice_status
+ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct LIST_HEAD_TYPE *chg)
+{
+ ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_vsig_prof *t;
+ u16 idx;
+
+ ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ /* Priority is based on the order in which the profiles are added. The
+ * newest added profile has highest priority and the oldest added
+ * profile has the lowest priority. Since the profile property list for
+ * a VSIG is sorted from newest to oldest, this code traverses the list
+ * in order and enables the first of each PTG that it finds (that is not
+ * already enabled); it also disables any duplicate PTGs that it finds
+ * in the older profiles (that are currently enabled).
+ */
+
+ LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ u16 i;
+
+ for (i = 0; i < t->tcam_count; i++) {
+ bool used;
+
+ /* Scan the priorities from newest to oldest.
+ * Make sure that the newest profiles take priority.
+ */
+ used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg);
+
+ if (used && t->tcam[i].in_use) {
+ /* need to mark this PTG as never match, as it
+ * was already in use and therefore duplicate
+ * (and lower priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, false,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ } else if (!used && !t->tcam[i].in_use) {
+ /* need to enable this PTG, as it in not in use
+ * and not enabled (highest priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, true,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ }
+
+ /* keep track of used ptgs */
+ ice_set_bit(t->tcam[i].ptg, ptgs_used);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_add_prof_id_vsig - add profile to VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to which this profile is to be added
+ * @hdl: the profile handle indicating the profile to add
+ * @rev: true to add entries to the end of the list
+ * @chg: the change list
+ */
+static enum ice_status
+ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ bool rev, struct LIST_HEAD_TYPE *chg)
+{
+ /* Masks that ignore flags */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *t;
+ struct ice_chs_chg *p;
+ u16 vsig_idx, i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Error, if this VSIG already has this profile */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl))
+ return ICE_ERR_ALREADY_EXISTS;
+
+ /* new VSIG profile structure */
+ t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
+ if (!t)
+ return ICE_ERR_NO_MEMORY;
+
+ t->profile_cookie = map->profile_cookie;
+ t->prof_id = map->prof_id;
+ t->tcam_count = map->ptg_cnt;
+
+ /* create TCAM entries */
+ for (i = 0; i < map->ptg_cnt; i++) {
+ enum ice_status status;
+ u16 tcam_idx;
+
+ /* add TCAM to change list */
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ goto err_ice_add_prof_id_vsig;
+
+ /* allocate the TCAM entry index */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ if (status) {
+ ice_free(hw, p);
+ goto err_ice_add_prof_id_vsig;
+ }
+
+ t->tcam[i].ptg = map->ptg[i];
+ t->tcam[i].prof_id = map->prof_id;
+ t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].in_use = true;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = t->tcam[i].prof_id;
+ p->ptg = t->tcam[i].ptg;
+ p->vsig = vsig;
+ p->tcam_idx = t->tcam[i].tcam_idx;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
+ t->tcam[i].prof_id,
+ t->tcam[i].ptg, vsig, 0, 0,
+ vl_msk, dc_msk, nm_msk);
+ if (status) {
+ ice_free(hw, p);
+ goto err_ice_add_prof_id_vsig;
+ }
+
+ /* log change */
+ LIST_ADD(&p->list_entry, chg);
+ }
+
+ /* add profile to VSIG */
+ vsig_idx = vsig & ICE_VSIG_IDX_M;
+ if (rev)
+ LIST_ADD_TAIL(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+ else
+ LIST_ADD(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+
+ return ICE_SUCCESS;
+
+err_ice_add_prof_id_vsig:
+ /* let caller clean up the change list */
+ ice_free(hw, t);
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_create_prof_id_vsig - add a new VSIG with a single profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @hdl: the profile handle of the profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
+ struct LIST_HEAD_TYPE *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 new_vsig;
+
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ new_vsig = ice_vsig_alloc(hw, blk);
+ if (!new_vsig) {
+ status = ICE_ERR_HW_TABLE;
+ goto err_ice_create_prof_id_vsig;
+ }
+
+ status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ p->type = ICE_VSIG_ADD;
+ p->vsi = vsi;
+ p->orig_vsig = ICE_DEFAULT_VSIG;
+ p->vsig = new_vsig;
+
+ LIST_ADD(&p->list_entry, chg);
+
+ return ICE_SUCCESS;
+
+err_ice_create_prof_id_vsig:
+ /* let caller clean up the change list */
+ ice_free(hw, p);
+ return status;
+}
+
+/**
+ * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @lst: the list of profile that will be added to the VSIG
+ * @new_vsig: return of new VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+ struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
+ struct LIST_HEAD_TYPE *chg)
+{
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ u16 vsig;
+
+ vsig = ice_vsig_alloc(hw, blk);
+ if (!vsig)
+ return ICE_ERR_HW_TABLE;
+
+ status = ice_move_vsi(hw, blk, vsi, vsig, chg);
+ if (status)
+ return status;
+
+ LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
+ /* Reverse the order here since we are copying the list */
+ status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
+ true, chg);
+ if (status)
+ return status;
+ }
+
+ *new_vsig = vsig;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_find_prof_vsig - find a VSIG with a specific profile handle
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: the profile handle of the profile to search for
+ * @vsig: returns the VSIG with the matching profile
+ */
+static bool
+ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
+{
+ struct ice_vsig_prof *t;
+ struct LIST_HEAD_TYPE lst;
+ enum ice_status status;
+
+ INIT_LIST_HEAD(&lst);
+
+ t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
+ if (!t)
+ return false;
+
+ t->profile_cookie = hdl;
+ LIST_ADD(&t->list, &lst);
+
+ status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
+
+ LIST_DEL(&t->list);
+ ice_free(hw, t);
+
+ return status == ICE_SUCCESS;
+}
+
+/**
+ * ice_add_vsi_flow - add VSI flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: input VSI
+ * @vsig: target VSIG to include the input VSI
+ *
+ * Calling this function will add the VSI to a given VSIG and
+ * update the HW tables accordingly. This call can be used to
+ * add multiple VSIs to a VSIG if we know beforehand that those
+ * VSIs have the same characteristics of the VSIG. This will
+ * save time in generating a new VSIG and TCAMs till a match is
+ * found and subsequent rollback when a matching VSIG is found.
+ */
+enum ice_status
+ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_chs_chg *tmp, *del;
+ struct LIST_HEAD_TYPE chg;
+ enum ice_status status;
+
+ /* if target VSIG is default the move is invalid */
+ if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
+ return ICE_ERR_PARAM;
+
+ INIT_LIST_HEAD(&chg);
+
+ /* move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ /* update hardware if success */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+ LIST_DEL(&del->list_entry);
+ ice_free(hw, del);
+ }
+
+ return status;
+}
+
+/**
+ * ice_add_prof_id_flow - add profile flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to enable with the profile specified by ID
+ * @hdl: profile handle
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct LIST_HEAD_TYPE union_lst;
+ struct ice_chs_chg *tmp, *del;
+ struct LIST_HEAD_TYPE chg;
+ enum ice_status status;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&union_lst);
+ INIT_LIST_HEAD(&chg);
+
+ /* Get profile */
+ status = ice_get_prof(hw, blk, hdl, &chg);
+ if (status)
+ return status;
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool only_vsi;
+ u16 or_vsig;
+ u16 ref;
+
+ /* found in VSIG */
+ or_vsig = vsig;
+
+ /* make sure that there is no overlap/conflict between the new
+ * characteristics and the existing ones; we don't support that
+ * scenario
+ */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto err_ice_add_prof_id_flow;
+ }
+
+ /* last VSI in the VSIG? */
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ /* create a union of the current profiles and the one being
+ * added
+ */
+ status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* search for an existing VSIG with an exact charc match */
+ status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
+ if (!status) {
+ /* move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* VSI has been moved out of or_vsig. If the or_vsig had
+ * only that VSI it is now empty and can be removed.
+ */
+ if (only_vsi) {
+ status = ice_rem_vsig(hw, blk, or_vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else if (only_vsi) {
+ /* If the original VSIG only contains one VSI, then it
+ * will be the requesting VSI. In this case the VSI is
+ * not sharing entries and we can simply add the new
+ * profile to the VSIG.
+ */
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
+ &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* No match, so we need a new VSIG */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &union_lst, &vsig,
+ &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust