aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/acpica/acpi_wakeup.c4
-rw-r--r--sys/amd64/include/param.h11
-rw-r--r--sys/arm/allwinner/aw_gpio.c4
-rw-r--r--sys/arm/broadcom/bcm2835/bcm2835_gpio.c4
-rw-r--r--sys/arm/freescale/imx/imx_gpio.c4
-rw-r--r--sys/arm/mv/mvebu_gpio.c4
-rw-r--r--sys/arm/nvidia/tegra_gpio.c4
-rw-r--r--sys/arm/ti/ti_gpio.c4
-rw-r--r--sys/arm64/include/armreg.h5
-rw-r--r--sys/arm64/include/hypervisor.h96
-rw-r--r--sys/arm64/rockchip/rk_gpio.c4
-rw-r--r--sys/arm64/vmm/arm64.h7
-rw-r--r--sys/arm64/vmm/vmm.c34
-rw-r--r--sys/arm64/vmm/vmm_hyp.c19
-rw-r--r--sys/arm64/vmm/vmm_reset.c7
-rw-r--r--sys/cam/ata/ata_da.c9
-rw-r--r--sys/cam/nvme/nvme_da.c5
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pci.h6
-rw-r--r--sys/compat/linuxkpi/common/src/linux_acpi.c33
-rw-r--r--sys/compat/linuxkpi/common/src/linux_pci.c62
-rw-r--r--sys/conf/files28
-rw-r--r--sys/conf/kern.post.mk15
-rw-r--r--sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md2
-rw-r--r--sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md5
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh2
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh11
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh25
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh2
-rw-r--r--sys/contrib/openzfs/META2
-rw-r--r--sys/contrib/openzfs/cmd/zdb/zdb.c20
-rw-r--r--sys/contrib/openzfs/cmd/zfs/zfs_main.c38
-rw-r--r--sys/contrib/openzfs/cmd/zinject/zinject.c81
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_iter.c118
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_main.c55
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_util.h3
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_vdev.c26
-rw-r--r--sys/contrib/openzfs/contrib/intel_qat/readme.md2
-rw-r--r--sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py2
-rw-r--r--sys/contrib/openzfs/etc/init.d/README.md2
-rw-r--r--sys/contrib/openzfs/include/libzfs.h2
-rw-r--r--sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h18
-rw-r--r--sys/contrib/openzfs/include/sys/fs/zfs.h2
-rw-r--r--sys/contrib/openzfs/include/sys/range_tree.h5
-rw-r--r--sys/contrib/openzfs/include/sys/spa.h3
-rw-r--r--sys/contrib/openzfs/include/sys/spa_impl.h1
-rw-r--r--sys/contrib/openzfs/include/sys/zfs_ioctl.h1
-rw-r--r--sys/contrib/openzfs/include/sys/zio.h1
-rw-r--r--sys/contrib/openzfs/lib/libuutil/libuutil.abi105
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs.abi172
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_config.c17
-rw-r--r--sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi105
-rw-r--r--sys/contrib/openzfs/man/man8/zinject.814
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-upgrade.84
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c23
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfs_prop.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/arc.c1
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode.c65
-rw-r--r--sys/contrib/openzfs/module/zfs/mmp.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/range_tree.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_config.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_misc.c23
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev.c15
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_label.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zio.c35
-rw-r--r--sys/contrib/openzfs/module/zfs/zio_inject.c38
-rw-r--r--sys/contrib/openzfs/module/zstd/zfs_zstd.c58
-rwxr-xr-xsys/contrib/openzfs/scripts/zfs-tests.sh9
-rw-r--r--sys/contrib/openzfs/tests/runfiles/common.run20
-rw-r--r--sys/contrib/openzfs/tests/runfiles/sanity.run2
-rwxr-xr-xsys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in61
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am11
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_encrypted_raw.ksh75
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib42
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_create.ksh (renamed from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh)101
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh204
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_removal.ksh126
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/cleanup.ksh30
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/setup.ksh32
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib235
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh90
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh80
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib6
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh14
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_014_pos.ksh53
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh40
-rw-r--r--sys/dev/acpica/acpi.c6
-rw-r--r--sys/dev/acpica/acpi_apei.c2
-rw-r--r--sys/dev/acpica/acpi_powerres.c274
-rw-r--r--sys/dev/acpica/acpi_timer.c11
-rw-r--r--sys/dev/acpica/acpivar.h1
-rw-r--r--sys/dev/ahci/ahci_pci.c46
-rw-r--r--sys/dev/ath/if_ath_tx.c14
-rw-r--r--sys/dev/bnxt/bnxt_en/if_bnxt.c1
-rw-r--r--sys/dev/cxgbe/adapter.h91
-rw-r--r--sys/dev/cxgbe/common/common.h177
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c1967
-rw-r--r--sys/dev/cxgbe/common/t4_hw.h135
-rw-r--r--sys/dev/cxgbe/common/t4_msg.h3011
-rw-r--r--sys/dev/cxgbe/common/t4_regs.h27273
-rw-r--r--sys/dev/cxgbe/common/t4_regs_values.h24
-rw-r--r--sys/dev/cxgbe/common/t4_tcb.h182
-rw-r--r--sys/dev/cxgbe/crypto/t4_crypto.c54
-rw-r--r--sys/dev/cxgbe/crypto/t4_crypto.h1
-rw-r--r--sys/dev/cxgbe/crypto/t4_keyctx.c30
-rw-r--r--sys/dev/cxgbe/crypto/t6_kern_tls.c2
-rw-r--r--sys/dev/cxgbe/crypto/t7_kern_tls.c2196
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_flash_utils.c90
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib.c11
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib_common.h7
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c98
-rw-r--r--sys/dev/cxgbe/firmware/t4fw_interface.h1320
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg.txt644
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt530
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt644
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/device.c20
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h5
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/mem.c103
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/resource.c38
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/t4.h1
-rw-r--r--sys/dev/cxgbe/offload.h12
-rw-r--r--sys/dev/cxgbe/t4_filter.c476
-rw-r--r--sys/dev/cxgbe/t4_ioctl.h17
-rw-r--r--sys/dev/cxgbe/t4_iov.c67
-rw-r--r--sys/dev/cxgbe/t4_l2t.c14
-rw-r--r--sys/dev/cxgbe/t4_l2t.h2
-rw-r--r--sys/dev/cxgbe/t4_main.c1575
-rw-r--r--sys/dev/cxgbe/t4_mp_ring.c81
-rw-r--r--sys/dev/cxgbe/t4_mp_ring.h1
-rw-r--r--sys/dev/cxgbe/t4_netmap.c23
-rw-r--r--sys/dev/cxgbe/t4_sched.c6
-rw-r--r--sys/dev/cxgbe/t4_sge.c209
-rw-r--r--sys/dev/cxgbe/t4_tpt.c193
-rw-r--r--sys/dev/cxgbe/t4_tracer.c5
-rw-r--r--sys/dev/cxgbe/t4_vf.c65
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c43
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c20
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c20
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c7
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.c288
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.h1
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c141
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h10
-rw-r--r--sys/dev/cxgbe/tom/t4_tom_l2t.c2
-rw-r--r--sys/dev/gpio/gpioc.c31
-rw-r--r--sys/dev/gpio/pl061.c3
-rw-r--r--sys/dev/iwx/if_iwx.c187
-rw-r--r--sys/dev/iwx/if_iwxreg.h4
-rw-r--r--sys/dev/nvme/nvme_private.h6
-rw-r--r--sys/dev/pci/pci.c10
-rw-r--r--sys/dev/sound/pci/hda/hdaa.c53
-rw-r--r--sys/dev/virtio/network/if_vtnet.c2
-rw-r--r--sys/dev/vmware/vmxnet3/if_vmx.c7
-rw-r--r--sys/dev/vt/vt_core.c8
-rw-r--r--sys/dev/watchdog/watchdog.c2
-rw-r--r--sys/dev/xen/control/control.c7
-rw-r--r--sys/fs/nullfs/null.h10
-rw-r--r--sys/fs/nullfs/null_subr.c94
-rw-r--r--sys/fs/nullfs/null_vnops.c173
-rw-r--r--sys/geom/part/g_part.c14
-rw-r--r--sys/i386/acpica/acpi_wakeup.c4
-rw-r--r--sys/isa/isa_common.c2
-rw-r--r--sys/kern/kern_exit.c46
-rw-r--r--sys/kern/kern_lock.c6
-rw-r--r--sys/kern/kern_mutex.c29
-rw-r--r--sys/kern/kern_sx.c11
-rw-r--r--sys/kern/link_elf.c6
-rw-r--r--sys/kern/link_elf_obj.c8
-rw-r--r--sys/kern/sys_generic.c36
-rw-r--r--sys/kern/sys_pipe.c5
-rw-r--r--sys/kern/vfs_cache.c52
-rw-r--r--sys/kern/vfs_vnops.c22
-rw-r--r--sys/modules/cxgbe/Makefile4
-rw-r--r--sys/modules/cxgbe/if_cxgbe/Makefile2
-rw-r--r--sys/modules/cxgbe/t7_firmware/Makefile23
-rw-r--r--sys/modules/zfs/zfs_config.h6
-rw-r--r--sys/modules/zfs/zfs_gitrev.h2
-rw-r--r--sys/net/iflib.c117
-rw-r--r--sys/net/iflib.h2
-rw-r--r--sys/net/pfvar.h7
-rw-r--r--sys/net80211/ieee80211.c28
-rw-r--r--sys/net80211/ieee80211_crypto.c87
-rw-r--r--sys/net80211/ieee80211_var.h2
-rw-r--r--sys/netgraph/netflow/netflow.c6
-rw-r--r--sys/netinet/ip_carp.c27
-rw-r--r--sys/netinet/sctp_lock_bsd.h6
-rw-r--r--sys/netinet/tcp_syncache.c82
-rw-r--r--sys/netinet/tcp_syncache.h2
-rw-r--r--sys/netinet6/in6.c17
-rw-r--r--sys/netinet6/in6_ifattach.c6
-rw-r--r--sys/netinet6/in6_proto.c4
-rw-r--r--sys/netinet6/in6_src.c54
-rw-r--r--sys/netinet6/in6_var.h2
-rw-r--r--sys/netinet6/ip6_var.h4
-rw-r--r--sys/netinet6/nd6.h4
-rw-r--r--sys/netinet6/nd6_nbr.c129
-rw-r--r--sys/netinet6/nd6_rtr.c9
-rw-r--r--sys/netlink/netlink_snl.h6
-rw-r--r--sys/netpfil/pf/pf.c582
-rw-r--r--sys/netpfil/pf/pf_lb.c4
-rw-r--r--sys/security/mac_do/mac_do.c6
-rw-r--r--sys/sys/eventhandler.h3
-rw-r--r--sys/sys/mutex.h28
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/vnode.h1
-rw-r--r--sys/tools/gdb/README.txt21
-rw-r--r--sys/tools/gdb/acttrace.py48
-rw-r--r--sys/tools/gdb/freebsd.py75
-rw-r--r--sys/tools/gdb/pcpu.py77
-rw-r--r--sys/tools/gdb/selftest.py31
-rw-r--r--sys/tools/gdb/selftest.sh23
-rw-r--r--sys/tools/gdb/vnet.py100
-rw-r--r--sys/tools/kernel-gdb.py15
-rw-r--r--sys/ufs/ufs/ufs_vnops.c8
-rw-r--r--sys/vm/uma_core.c22
-rw-r--r--sys/x86/include/mca.h25
-rw-r--r--sys/x86/x86/mca.c355
217 files changed, 42895 insertions, 5010 deletions
diff --git a/sys/amd64/acpica/acpi_wakeup.c b/sys/amd64/acpica/acpi_wakeup.c
index 99565fbb69ca..8cada2f4f911 100644
--- a/sys/amd64/acpica/acpi_wakeup.c
+++ b/sys/amd64/acpica/acpi_wakeup.c
@@ -74,7 +74,7 @@ extern int acpi_susp_bounce;
extern struct susppcb **susppcbs;
static cpuset_t suspcpus;
-static void acpi_stop_beep(void *);
+static void acpi_stop_beep(void *, enum power_stype);
static int acpi_wakeup_ap(struct acpi_softc *, int);
static void acpi_wakeup_cpus(struct acpi_softc *);
@@ -88,7 +88,7 @@ static void acpi_wakeup_cpus(struct acpi_softc *);
} while (0)
static void
-acpi_stop_beep(void *arg)
+acpi_stop_beep(void *arg, enum power_stype stype)
{
if (acpi_resume_beep != 0)
diff --git a/sys/amd64/include/param.h b/sys/amd64/include/param.h
index 5a9c3162e14c..0654bb9de790 100644
--- a/sys/amd64/include/param.h
+++ b/sys/amd64/include/param.h
@@ -150,6 +150,15 @@
(((va) >= kva_layout.dmap_low && (va) < kva_layout.dmap_high) || \
((va) >= kva_layout.km_low && (va) < kva_layout.km_high))
-#define SC_TABLESIZE 1024 /* Must be power of 2. */
+/*
+ * Must be power of 2.
+ *
+ * Perhaps should be autosized on boot based on found ncpus.
+ */
+#if MAXCPU > 256
+#define SC_TABLESIZE 2048
+#else
+#define SC_TABLESIZE 1024
+#endif
#endif /* !_AMD64_INCLUDE_PARAM_H_ */
diff --git a/sys/arm/allwinner/aw_gpio.c b/sys/arm/allwinner/aw_gpio.c
index f1b6f0bc9193..c90d61f7b45e 100644
--- a/sys/arm/allwinner/aw_gpio.c
+++ b/sys/arm/allwinner/aw_gpio.c
@@ -1531,6 +1531,10 @@ static device_method_t aw_gpio_methods[] = {
DEVMETHOD(device_attach, aw_gpio_attach),
DEVMETHOD(device_detach, aw_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, aw_gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, aw_gpio_pic_enable_intr),
diff --git a/sys/arm/broadcom/bcm2835/bcm2835_gpio.c b/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
index 93ee5d7c8bd3..ff5c4043dd86 100644
--- a/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
+++ b/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
@@ -1321,6 +1321,10 @@ static device_method_t bcm_gpio_methods[] = {
DEVMETHOD(device_attach, bcm_gpio_attach),
DEVMETHOD(device_detach, bcm_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, bcm_gpio_get_bus),
DEVMETHOD(gpio_pin_max, bcm_gpio_pin_max),
diff --git a/sys/arm/freescale/imx/imx_gpio.c b/sys/arm/freescale/imx/imx_gpio.c
index 3b19ef1b5e67..60b8d79ab27e 100644
--- a/sys/arm/freescale/imx/imx_gpio.c
+++ b/sys/arm/freescale/imx/imx_gpio.c
@@ -918,6 +918,10 @@ static device_method_t imx51_gpio_methods[] = {
DEVMETHOD(device_detach, imx51_gpio_detach),
#ifdef INTRNG
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, gpio_pic_enable_intr),
diff --git a/sys/arm/mv/mvebu_gpio.c b/sys/arm/mv/mvebu_gpio.c
index 4cc9b7030a65..c27d5a204052 100644
--- a/sys/arm/mv/mvebu_gpio.c
+++ b/sys/arm/mv/mvebu_gpio.c
@@ -839,6 +839,10 @@ static device_method_t mvebu_gpio_methods[] = {
DEVMETHOD(device_attach, mvebu_gpio_attach),
DEVMETHOD(device_detach, mvebu_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, mvebu_gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, mvebu_gpio_pic_enable_intr),
diff --git a/sys/arm/nvidia/tegra_gpio.c b/sys/arm/nvidia/tegra_gpio.c
index aa34537352be..ce24fccd3a40 100644
--- a/sys/arm/nvidia/tegra_gpio.c
+++ b/sys/arm/nvidia/tegra_gpio.c
@@ -853,6 +853,10 @@ static device_method_t tegra_gpio_methods[] = {
DEVMETHOD(device_attach, tegra_gpio_attach),
DEVMETHOD(device_detach, tegra_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, tegra_gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, tegra_gpio_pic_enable_intr),
diff --git a/sys/arm/ti/ti_gpio.c b/sys/arm/ti/ti_gpio.c
index 01b9597a4418..b7e9909b8548 100644
--- a/sys/arm/ti/ti_gpio.c
+++ b/sys/arm/ti/ti_gpio.c
@@ -1048,6 +1048,10 @@ static device_method_t ti_gpio_methods[] = {
DEVMETHOD(device_attach, ti_gpio_attach),
DEVMETHOD(device_detach, ti_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, ti_gpio_get_bus),
DEVMETHOD(gpio_pin_max, ti_gpio_pin_max),
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index da051e8f7c8a..393d6d89da0c 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -2180,6 +2180,7 @@
#define OSLAR_EL1_CRn 1
#define OSLAR_EL1_CRm 0
#define OSLAR_EL1_op2 4
+#define OSLAR_OSLK (0x1ul << 0)
/* OSLSR_EL1 */
#define OSLSR_EL1_op0 2
@@ -2187,6 +2188,10 @@
#define OSLSR_EL1_CRn 1
#define OSLSR_EL1_CRm 1
#define OSLSR_EL1_op2 4
+#define OSLSR_OSLM_1 (0x1ul << 3)
+#define OSLSR_nTT (0x1ul << 2)
+#define OSLSR_OSLK (0x1ul << 1)
+#define OSLSR_OSLM_0 (0x1ul << 0)
/* PAR_EL1 - Physical Address Register */
#define PAR_F_SHIFT 0
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
index 04e15b55b218..8feabd2b981b 100644
--- a/sys/arm64/include/hypervisor.h
+++ b/sys/arm64/include/hypervisor.h
@@ -247,6 +247,54 @@
#define ICC_SRE_EL2_SRE (1UL << 0)
#define ICC_SRE_EL2_EN (1UL << 3)
+/* MDCR_EL2 - Hyp Debug Control Register */
+#define MDCR_EL2_HPMN_MASK 0x1f
+#define MDCR_EL2_HPMN_SHIFT 0
+#define MDCR_EL2_TPMCR_SHIFT 5
+#define MDCR_EL2_TPMCR (0x1UL << MDCR_EL2_TPMCR_SHIFT)
+#define MDCR_EL2_TPM_SHIFT 6
+#define MDCR_EL2_TPM (0x1UL << MDCR_EL2_TPM_SHIFT)
+#define MDCR_EL2_HPME_SHIFT 7
+#define MDCR_EL2_HPME (0x1UL << MDCR_EL2_HPME_SHIFT)
+#define MDCR_EL2_TDE_SHIFT 8
+#define MDCR_EL2_TDE (0x1UL << MDCR_EL2_TDE_SHIFT)
+#define MDCR_EL2_TDA_SHIFT 9
+#define MDCR_EL2_TDA (0x1UL << MDCR_EL2_TDA_SHIFT)
+#define MDCR_EL2_TDOSA_SHIFT 10
+#define MDCR_EL2_TDOSA (0x1UL << MDCR_EL2_TDOSA_SHIFT)
+#define MDCR_EL2_TDRA_SHIFT 11
+#define MDCR_EL2_TDRA (0x1UL << MDCR_EL2_TDRA_SHIFT)
+#define MDCR_EL2_E2PB_SHIFT 12
+#define MDCR_EL2_E2PB_MASK (0x3UL << MDCR_EL2_E2PB_SHIFT)
+#define MDCR_EL2_TPMS_SHIFT 14
+#define MDCR_EL2_TPMS (0x1UL << MDCR_EL2_TPMS_SHIFT)
+#define MDCR_EL2_EnSPM_SHIFT 15
+#define MDCR_EL2_EnSPM (0x1UL << MDCR_EL2_EnSPM_SHIFT)
+#define MDCR_EL2_HPMD_SHIFT 17
+#define MDCR_EL2_HPMD (0x1UL << MDCR_EL2_HPMD_SHIFT)
+#define MDCR_EL2_TTRF_SHIFT 19
+#define MDCR_EL2_TTRF (0x1UL << MDCR_EL2_TTRF_SHIFT)
+#define MDCR_EL2_HCCD_SHIFT 23
+#define MDCR_EL2_HCCD (0x1UL << MDCR_EL2_HCCD_SHIFT)
+#define MDCR_EL2_E2TB_SHIFT 24
+#define MDCR_EL2_E2TB_MASK (0x3UL << MDCR_EL2_E2TB_SHIFT)
+#define MDCR_EL2_HLP_SHIFT 26
+#define MDCR_EL2_HLP (0x1UL << MDCR_EL2_HLP_SHIFT)
+#define MDCR_EL2_TDCC_SHIFT 27
+#define MDCR_EL2_TDCC (0x1UL << MDCR_EL2_TDCC_SHIFT)
+#define MDCR_EL2_MTPME_SHIFT 28
+#define MDCR_EL2_MTPME (0x1UL << MDCR_EL2_MTPME_SHIFT)
+#define MDCR_EL2_HPMFZO_SHIFT 29
+#define MDCR_EL2_HPMFZO (0x1UL << MDCR_EL2_HPMFZO_SHIFT)
+#define MDCR_EL2_PMSSE_SHIFT 30
+#define MDCR_EL2_PMSSE_MASK (0x3UL << MDCR_EL2_PMSSE_SHIFT)
+#define MDCR_EL2_HPMFZS_SHIFT 36
+#define MDCR_EL2_HPMFZS (0x1UL << MDCR_EL2_HPMFZS_SHIFT)
+#define MDCR_EL2_PMEE_SHIFT 40
+#define MDCR_EL2_PMEE_MASK (0x3UL << MDCR_EL2_PMEE_SHIFT)
+#define MDCR_EL2_EBWE_SHIFT 43
+#define MDCR_EL2_EBWE (0x1UL << MDCR_EL2_EBWE_SHIFT)
+
/* SCTLR_EL2 - System Control Register */
#define SCTLR_EL2_RES1 0x30c50830
#define SCTLR_EL2_M_SHIFT 0
@@ -356,52 +404,4 @@
/* Assumed to be 0 by locore.S */
#define VTTBR_HOST 0x0000000000000000
-/* MDCR_EL2 - Hyp Debug Control Register */
-#define MDCR_EL2_HPMN_MASK 0x1f
-#define MDCR_EL2_HPMN_SHIFT 0
-#define MDCR_EL2_TPMCR_SHIFT 5
-#define MDCR_EL2_TPMCR (0x1UL << MDCR_EL2_TPMCR_SHIFT)
-#define MDCR_EL2_TPM_SHIFT 6
-#define MDCR_EL2_TPM (0x1UL << MDCR_EL2_TPM_SHIFT)
-#define MDCR_EL2_HPME_SHIFT 7
-#define MDCR_EL2_HPME (0x1UL << MDCR_EL2_HPME_SHIFT)
-#define MDCR_EL2_TDE_SHIFT 8
-#define MDCR_EL2_TDE (0x1UL << MDCR_EL2_TDE_SHIFT)
-#define MDCR_EL2_TDA_SHIFT 9
-#define MDCR_EL2_TDA (0x1UL << MDCR_EL2_TDA_SHIFT)
-#define MDCR_EL2_TDOSA_SHIFT 10
-#define MDCR_EL2_TDOSA (0x1UL << MDCR_EL2_TDOSA_SHIFT)
-#define MDCR_EL2_TDRA_SHIFT 11
-#define MDCR_EL2_TDRA (0x1UL << MDCR_EL2_TDRA_SHIFT)
-#define MDCR_E2PB_SHIFT 12
-#define MDCR_E2PB_MASK (0x3UL << MDCR_E2PB_SHIFT)
-#define MDCR_TPMS_SHIFT 14
-#define MDCR_TPMS (0x1UL << MDCR_TPMS_SHIFT)
-#define MDCR_EnSPM_SHIFT 15
-#define MDCR_EnSPM (0x1UL << MDCR_EnSPM_SHIFT)
-#define MDCR_HPMD_SHIFT 17
-#define MDCR_HPMD (0x1UL << MDCR_HPMD_SHIFT)
-#define MDCR_TTRF_SHIFT 19
-#define MDCR_TTRF (0x1UL << MDCR_TTRF_SHIFT)
-#define MDCR_HCCD_SHIFT 23
-#define MDCR_HCCD (0x1UL << MDCR_HCCD_SHIFT)
-#define MDCR_E2TB_SHIFT 24
-#define MDCR_E2TB_MASK (0x3UL << MDCR_E2TB_SHIFT)
-#define MDCR_HLP_SHIFT 26
-#define MDCR_HLP (0x1UL << MDCR_HLP_SHIFT)
-#define MDCR_TDCC_SHIFT 27
-#define MDCR_TDCC (0x1UL << MDCR_TDCC_SHIFT)
-#define MDCR_MTPME_SHIFT 28
-#define MDCR_MTPME (0x1UL << MDCR_MTPME_SHIFT)
-#define MDCR_HPMFZO_SHIFT 29
-#define MDCR_HPMFZO (0x1UL << MDCR_HPMFZO_SHIFT)
-#define MDCR_PMSSE_SHIFT 30
-#define MDCR_PMSSE_MASK (0x3UL << MDCR_PMSSE_SHIFT)
-#define MDCR_HPMFZS_SHIFT 36
-#define MDCR_HPMFZS (0x1UL << MDCR_HPMFZS_SHIFT)
-#define MDCR_PMEE_SHIFT 40
-#define MDCR_PMEE_MASK (0x3UL << MDCR_PMEE_SHIFT)
-#define MDCR_EBWE_SHIFT 43
-#define MDCR_EBWE (0x1UL << MDCR_EBWE_SHIFT)
-
#endif /* !_MACHINE_HYPERVISOR_H_ */
diff --git a/sys/arm64/rockchip/rk_gpio.c b/sys/arm64/rockchip/rk_gpio.c
index 145d9769f35f..8da37d516802 100644
--- a/sys/arm64/rockchip/rk_gpio.c
+++ b/sys/arm64/rockchip/rk_gpio.c
@@ -851,6 +851,10 @@ static device_method_t rk_gpio_methods[] = {
DEVMETHOD(device_attach, rk_gpio_attach),
DEVMETHOD(device_detach, rk_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, rk_gpio_get_bus),
DEVMETHOD(gpio_pin_max, rk_gpio_pin_max),
diff --git a/sys/arm64/vmm/arm64.h b/sys/arm64/vmm/arm64.h
index 82c4481b8692..f9b74aef7188 100644
--- a/sys/arm64/vmm/arm64.h
+++ b/sys/arm64/vmm/arm64.h
@@ -78,14 +78,16 @@ struct hypctx {
uint64_t pmcr_el0; /* Performance Monitors Control Register */
uint64_t pmccntr_el0;
uint64_t pmccfiltr_el0;
+ uint64_t pmuserenr_el0;
+ uint64_t pmselr_el0;
+ uint64_t pmxevcntr_el0;
uint64_t pmcntenset_el0;
uint64_t pmintenset_el1;
uint64_t pmovsset_el0;
- uint64_t pmselr_el0;
- uint64_t pmuserenr_el0;
uint64_t pmevcntr_el0[31];
uint64_t pmevtyper_el0[31];
+ uint64_t dbgclaimset_el1;
uint64_t dbgbcr_el1[16]; /* Debug Breakpoint Control Registers */
uint64_t dbgbvr_el1[16]; /* Debug Breakpoint Value Registers */
uint64_t dbgwcr_el1[16]; /* Debug Watchpoint Control Registers */
@@ -117,6 +119,7 @@ struct hypctx {
struct vgic_v3_regs vgic_v3_regs;
struct vgic_v3_cpu *vgic_cpu;
bool has_exception;
+ bool dbg_oslock;
};
struct hyp {
diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c
index 1dcefa1489e9..a551a2807183 100644
--- a/sys/arm64/vmm/vmm.c
+++ b/sys/arm64/vmm/vmm.c
@@ -651,6 +651,33 @@ vmm_reg_wi(struct vcpu *vcpu, uint64_t wval, void *arg)
return (0);
}
+static int
+vmm_write_oslar_el1(struct vcpu *vcpu, uint64_t wval, void *arg)
+{
+ struct hypctx *hypctx;
+
+ hypctx = vcpu_get_cookie(vcpu);
+ /* All other fields are RES0 & we don't do anything with this */
+ /* TODO: Disable access to other debug state when locked */
+ hypctx->dbg_oslock = (wval & OSLAR_OSLK) == OSLAR_OSLK;
+ return (0);
+}
+
+static int
+vmm_read_oslsr_el1(struct vcpu *vcpu, uint64_t *rval, void *arg)
+{
+ struct hypctx *hypctx;
+ uint64_t val;
+
+ hypctx = vcpu_get_cookie(vcpu);
+ val = OSLSR_OSLM_1;
+ if (hypctx->dbg_oslock)
+ val |= OSLSR_OSLK;
+ *rval = val;
+
+ return (0);
+}
+
static const struct vmm_special_reg vmm_special_regs[] = {
#define SPECIAL_REG(_reg, _read, _write) \
{ \
@@ -707,6 +734,13 @@ static const struct vmm_special_reg vmm_special_regs[] = {
SPECIAL_REG(CNTP_TVAL_EL0, vtimer_phys_tval_read,
vtimer_phys_tval_write),
SPECIAL_REG(CNTPCT_EL0, vtimer_phys_cnt_read, vtimer_phys_cnt_write),
+
+ /* Debug registers */
+ SPECIAL_REG(DBGPRCR_EL1, vmm_reg_raz, vmm_reg_wi),
+ SPECIAL_REG(OSDLR_EL1, vmm_reg_raz, vmm_reg_wi),
+ /* TODO: Exceptions on invalid access */
+ SPECIAL_REG(OSLAR_EL1, vmm_reg_raz, vmm_write_oslar_el1),
+ SPECIAL_REG(OSLSR_EL1, vmm_read_oslsr_el1, vmm_reg_wi),
#undef SPECIAL_REG
};
diff --git a/sys/arm64/vmm/vmm_hyp.c b/sys/arm64/vmm/vmm_hyp.c
index 345535318f6e..b8c6d2ab7a9a 100644
--- a/sys/arm64/vmm/vmm_hyp.c
+++ b/sys/arm64/vmm/vmm_hyp.c
@@ -121,6 +121,8 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest,
}
}
+ hypctx->dbgclaimset_el1 = READ_SPECIALREG(dbgclaimset_el1);
+
dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
#define STORE_DBG_BRP(x) \
@@ -180,10 +182,13 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest,
hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
+ hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
+ hypctx->pmselr_el0 = READ_SPECIALREG(pmselr_el0);
+ hypctx->pmxevcntr_el0 = READ_SPECIALREG(pmxevcntr_el0);
hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
- hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
+
switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
#define STORE_PMU(x) \
case (x + 1): \
@@ -337,12 +342,15 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest,
WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
+ WRITE_SPECIALREG(pmuserenr_el0, hypctx->pmuserenr_el0);
+ WRITE_SPECIALREG(pmselr_el0, hypctx->pmselr_el0);
+ WRITE_SPECIALREG(pmxevcntr_el0, hypctx->pmxevcntr_el0);
/* Clear all events/interrupts then enable them */
- WRITE_SPECIALREG(pmcntenclr_el0, 0xfffffffful);
+ WRITE_SPECIALREG(pmcntenclr_el0, ~0ul);
WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
- WRITE_SPECIALREG(pmintenclr_el1, 0xfffffffful);
+ WRITE_SPECIALREG(pmintenclr_el1, ~0ul);
WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
- WRITE_SPECIALREG(pmovsclr_el0, 0xfffffffful);
+ WRITE_SPECIALREG(pmovsclr_el0, ~0ul);
WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
@@ -388,6 +396,9 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest,
#undef LOAD_PMU
}
+ WRITE_SPECIALREG(dbgclaimclr_el1, ~0ul);
+ WRITE_SPECIALREG(dbgclaimclr_el1, hypctx->dbgclaimset_el1);
+
dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
#define LOAD_DBG_BRP(x) \
diff --git a/sys/arm64/vmm/vmm_reset.c b/sys/arm64/vmm/vmm_reset.c
index 79d022cf33e8..1240c3ed16ec 100644
--- a/sys/arm64/vmm/vmm_reset.c
+++ b/sys/arm64/vmm/vmm_reset.c
@@ -100,10 +100,12 @@ reset_vm_el01_regs(void *vcpu)
el2ctx->pmcr_el0 |= PMCR_LC;
set_arch_unknown(el2ctx->pmccntr_el0);
set_arch_unknown(el2ctx->pmccfiltr_el0);
+ set_arch_unknown(el2ctx->pmuserenr_el0);
+ set_arch_unknown(el2ctx->pmselr_el0);
+ set_arch_unknown(el2ctx->pmxevcntr_el0);
set_arch_unknown(el2ctx->pmcntenset_el0);
set_arch_unknown(el2ctx->pmintenset_el1);
set_arch_unknown(el2ctx->pmovsset_el0);
- set_arch_unknown(el2ctx->pmuserenr_el0);
memset(el2ctx->pmevcntr_el0, 0, sizeof(el2ctx->pmevcntr_el0));
memset(el2ctx->pmevtyper_el0, 0, sizeof(el2ctx->pmevtyper_el0));
}
@@ -143,7 +145,8 @@ reset_vm_el2_regs(void *vcpu)
/* Set the Extended Hypervisor Configuration Register */
el2ctx->hcrx_el2 = 0;
/* TODO: Trap all extensions we don't support */
- el2ctx->mdcr_el2 = 0;
+ el2ctx->mdcr_el2 = MDCR_EL2_TDOSA | MDCR_EL2_TDRA | MDCR_EL2_TPMS |
+ MDCR_EL2_TTRF;
/* PMCR_EL0.N is read from MDCR_EL2.HPMN */
el2ctx->mdcr_el2 |= (el2ctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT;
diff --git a/sys/cam/ata/ata_da.c b/sys/cam/ata/ata_da.c
index 1facab47473c..0d844a6fbf9e 100644
--- a/sys/cam/ata/ata_da.c
+++ b/sys/cam/ata/ata_da.c
@@ -44,6 +44,7 @@
#include <sys/malloc.h>
#include <sys/endian.h>
#include <sys/cons.h>
+#include <sys/power.h>
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/sbuf.h>
@@ -878,8 +879,8 @@ static int adaerror(union ccb *ccb, uint32_t cam_flags,
uint32_t sense_flags);
static callout_func_t adasendorderedtag;
static void adashutdown(void *arg, int howto);
-static void adasuspend(void *arg);
-static void adaresume(void *arg);
+static void adasuspend(void *arg, enum power_stype stype);
+static void adaresume(void *arg, enum power_stype stype);
#ifndef ADA_DEFAULT_TIMEOUT
#define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */
@@ -3747,7 +3748,7 @@ adashutdown(void *arg, int howto)
}
static void
-adasuspend(void *arg)
+adasuspend(void *arg, enum power_stype stype)
{
adaflush();
@@ -3760,7 +3761,7 @@ adasuspend(void *arg)
}
static void
-adaresume(void *arg)
+adaresume(void *arg, enum power_stype stype)
{
struct cam_periph *periph;
struct ada_softc *softc;
diff --git a/sys/cam/nvme/nvme_da.c b/sys/cam/nvme/nvme_da.c
index 1c0d5e8381d8..9c4707da482c 100644
--- a/sys/cam/nvme/nvme_da.c
+++ b/sys/cam/nvme/nvme_da.c
@@ -43,6 +43,7 @@
#include <sys/eventhandler.h>
#include <sys/malloc.h>
#include <sys/cons.h>
+#include <sys/power.h>
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/sbuf.h>
@@ -159,7 +160,7 @@ static void ndadone(struct cam_periph *periph,
static int ndaerror(union ccb *ccb, uint32_t cam_flags,
uint32_t sense_flags);
static void ndashutdown(void *arg, int howto);
-static void ndasuspend(void *arg);
+static void ndasuspend(void *arg, enum power_stype stype);
#ifndef NDA_DEFAULT_SEND_ORDERED
#define NDA_DEFAULT_SEND_ORDERED 1
@@ -1365,7 +1366,7 @@ ndashutdown(void *arg, int howto)
}
static void
-ndasuspend(void *arg)
+ndasuspend(void *arg, enum power_stype stype)
{
ndaflush();
diff --git a/sys/compat/linuxkpi/common/include/linux/pci.h b/sys/compat/linuxkpi/common/include/linux/pci.h
index d891d0df3546..ffc2be600c22 100644
--- a/sys/compat/linuxkpi/common/include/linux/pci.h
+++ b/sys/compat/linuxkpi/common/include/linux/pci.h
@@ -1332,6 +1332,12 @@ struct pci_dev *lkpi_pci_get_domain_bus_and_slot(int domain,
#define pci_get_domain_bus_and_slot(domain, bus, devfn) \
lkpi_pci_get_domain_bus_and_slot(domain, bus, devfn)
+struct pci_dev *lkpi_pci_get_slot(struct pci_bus *, unsigned int);
+#ifndef WANT_NATIVE_PCI_GET_SLOT
+#define pci_get_slot(_pbus, _devfn) \
+ lkpi_pci_get_slot(_pbus, _devfn)
+#endif
+
static inline int
pci_domain_nr(struct pci_bus *pbus)
{
diff --git a/sys/compat/linuxkpi/common/src/linux_acpi.c b/sys/compat/linuxkpi/common/src/linux_acpi.c
index 43783bb8727b..c7d62c745c7e 100644
--- a/sys/compat/linuxkpi/common/src/linux_acpi.c
+++ b/sys/compat/linuxkpi/common/src/linux_acpi.c
@@ -33,6 +33,7 @@
#include <sys/bus.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
+#include <sys/power.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <dev/acpica/acpivar.h>
@@ -118,20 +119,32 @@ acpi_evaluate_dsm(ACPI_HANDLE ObjHandle, const guid_t *guid,
}
static void
-linux_handle_power_suspend_event(void *arg __unused)
+linux_handle_power_suspend_event(void *arg __unused, enum power_stype stype)
{
- /*
- * Only support S3 for now.
- * acpi_sleep_event isn't always called so we use power_suspend_early
- * instead which means we don't know what state we're switching to.
- * TODO: Make acpi_sleep_event consistent
- */
- linux_acpi_target_sleep_state = ACPI_STATE_S3;
- pm_suspend_target_state = PM_SUSPEND_MEM;
+ switch (stype) {
+ case POWER_STYPE_SUSPEND_TO_IDLE:
+ /*
+ * XXX: obiwac Not 100% sure this is correct, but
+ * acpi_target_sleep_state does seem to be set to
+ * ACPI_STATE_S3 during s2idle on Linux.
+ */
+ linux_acpi_target_sleep_state = ACPI_STATE_S3;
+ pm_suspend_target_state = PM_SUSPEND_TO_IDLE;
+ break;
+ case POWER_STYPE_SUSPEND_TO_MEM:
+ linux_acpi_target_sleep_state = ACPI_STATE_S3;
+ pm_suspend_target_state = PM_SUSPEND_MEM;
+ break;
+ default:
+ printf("%s: sleep type %d not yet supported\n",
+ __func__, stype);
+ break;
+ }
}
static void
-linux_handle_power_resume_event(void *arg __unused)
+linux_handle_power_resume_event(void *arg __unused,
+ enum power_stype stype __unused)
{
linux_acpi_target_sleep_state = ACPI_STATE_S0;
pm_suspend_target_state = PM_SUSPEND_ON;
diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c
index 43fd6ad28ac4..8507a59a8df3 100644
--- a/sys/compat/linuxkpi/common/src/linux_pci.c
+++ b/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -67,6 +67,7 @@
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
+#define WANT_NATIVE_PCI_GET_SLOT
#include <linux/pci.h>
#include <linux/compat.h>
@@ -342,6 +343,7 @@ lkpi_pci_dev_release(struct device *dev)
static int
lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
{
+ struct pci_devinfo *dinfo;
int error;
error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype,
@@ -362,15 +364,24 @@ lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d",
pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
pci_get_function(dev));
+
pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
- /*
- * This should be the upstream bridge; pci_upstream_bridge()
- * handles that case on demand as otherwise we'll shadow the
- * entire PCI hierarchy.
- */
- pdev->bus->self = pdev;
pdev->bus->number = pci_get_bus(dev);
pdev->bus->domain = pci_get_domain(dev);
+
+ /* Check if we have reached the root to satisfy pci_is_root_bus() */
+ dinfo = device_get_ivars(dev);
+ if (dinfo->cfg.pcie.pcie_location != 0 &&
+ dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) {
+ pdev->bus->self = NULL;
+ } else {
+ /*
+ * This should be the upstream bridge; pci_upstream_bridge()
+ * handles that case on demand as otherwise we'll shadow the
+ * entire PCI hierarchy.
+ */
+ pdev->bus->self = pdev;
+ }
pdev->dev.bsddev = dev;
pdev->dev.parent = &linux_root_device;
pdev->dev.release = lkpi_pci_dev_release;
@@ -395,7 +406,7 @@ lkpinew_pci_dev_release(struct device *dev)
pdev = to_pci_dev(dev);
if (pdev->root != NULL)
pci_dev_put(pdev->root);
- if (pdev->bus->self != pdev)
+ if (pdev->bus->self != pdev && pdev->bus->self != NULL)
pci_dev_put(pdev->bus->self);
free(pdev->bus, M_DEVBUF);
if (pdev->msi_desc != NULL) {
@@ -475,6 +486,20 @@ lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,
return (pdev);
}
+struct pci_dev *
+lkpi_pci_get_slot(struct pci_bus *pbus, unsigned int devfn)
+{
+ device_t dev;
+ struct pci_dev *pdev;
+
+ dev = pci_find_bsf(pbus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ if (dev == NULL)
+ return (NULL);
+
+ pdev = lkpinew_pci_dev(dev);
+ return (pdev);
+}
+
static int
linux_pci_probe(device_t dev)
{
@@ -551,6 +576,7 @@ linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
{
struct resource_list_entry *rle;
device_t parent;
+ struct pci_dev *pbus, *ppbus;
uintptr_t rid;
int error;
bool isdrm;
@@ -594,6 +620,27 @@ linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
list_add(&pdev->links, &pci_devices);
spin_unlock(&pci_lock);
+ /*
+ * Create the hierarchy now as we cannot on demand later.
+ * Take special care of DRM as there is a non-PCI device in the chain.
+ */
+ pbus = pdev;
+ if (isdrm) {
+ pbus = lkpinew_pci_dev(parent);
+ if (pbus == NULL) {
+ error = ENXIO;
+ goto out_dma_init;
+ }
+ }
+ pcie_find_root_port(pbus);
+ if (isdrm)
+ pdev->root = pbus->root;
+ ppbus = pci_upstream_bridge(pbus);
+ while (ppbus != NULL && ppbus != pbus) {
+ pbus = ppbus;
+ ppbus = pci_upstream_bridge(pbus);
+ }
+
if (pdrv != NULL) {
error = pdrv->probe(pdev, id);
if (error)
@@ -601,6 +648,7 @@ linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
}
return (0);
+/* XXX the cleanup does not match the allocation up there. */
out_probe:
free(pdev->bus, M_DEVBUF);
spin_lock_destroy(&pdev->pcie_cap_lock);
diff --git a/sys/conf/files b/sys/conf/files
index db05a1424f00..d9730e6bf55b 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1393,6 +1393,8 @@ dev/cxgbe/t4_smt.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_l2t.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
+dev/cxgbe/t4_tpt.c optional cxgbe pci \
+ compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_tracer.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_vf.c optional cxgbev pci \
@@ -1403,6 +1405,8 @@ dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/crypto/t6_kern_tls.c optional cxgbe pci kern_tls \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
+dev/cxgbe/crypto/t7_kern_tls.c optional cxgbe pci kern_tls \
+ compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/crypto/t4_keyctx.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_common.c optional cxgbe \
@@ -1519,6 +1523,30 @@ t6fw.fw optional cxgbe \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t6fw.fw"
+t7fw_cfg.c optional cxgbe \
+ compile-with "${AWK} -f $S/tools/fw_stub.awk t7fw_cfg.fw:t7fw_cfg t7fw_cfg_uwire.fw:t7fw_cfg_uwire -mt7fw_cfg -c${.TARGET}" \
+ no-ctfconvert no-implicit-rule before-depend local \
+ clean "t7fw_cfg.c"
+t7fw_cfg.fwo optional cxgbe \
+ dependency "t7fw_cfg.fw" \
+ compile-with "${NORMAL_FWO}" \
+ no-implicit-rule \
+ clean "t7fw_cfg.fwo"
+t7fw_cfg.fw optional cxgbe \
+ dependency "$S/dev/cxgbe/firmware/t7fw_cfg.txt" \
+ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
+ no-obj no-implicit-rule \
+ clean "t7fw_cfg.fw"
+t7fw_cfg_uwire.fwo optional cxgbe \
+ dependency "t7fw_cfg_uwire.fw" \
+ compile-with "${NORMAL_FWO}" \
+ no-implicit-rule \
+ clean "t7fw_cfg_uwire.fwo"
+t7fw_cfg_uwire.fw optional cxgbe \
+ dependency "$S/dev/cxgbe/firmware/t7fw_cfg_uwire.txt" \
+ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
+ no-obj no-implicit-rule \
+ clean "t7fw_cfg_uwire.fw"
dev/cxgbe/crypto/t4_crypto.c optional ccr \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cyapa/cyapa.c optional cyapa iicbus
diff --git a/sys/conf/kern.post.mk b/sys/conf/kern.post.mk
index bb3c7af82a4d..7cdfd17778db 100644
--- a/sys/conf/kern.post.mk
+++ b/sys/conf/kern.post.mk
@@ -398,6 +398,14 @@ CFLAGS+= -fdebug-prefix-map=./${_link}=${PREFIX_SYSDIR}/${_link}/include
.endif
.endfor
+# Install GDB plugins that are useful for kernel debugging. See the
+# README in sys/tools/gdb for more information.
+GDB_FILES= acttrace.py \
+ freebsd.py \
+ pcpu.py \
+ selftest.py \
+ vnet.py
+
${_ILINKS}:
@case ${.TARGET} in \
machine) \
@@ -447,6 +455,13 @@ kernel-install: .PHONY
.if defined(DEBUG) && !defined(INSTALL_NODEBUG) && ${MK_KERNEL_SYMBOLS} != "no"
mkdir -p ${DESTDIR}${KERN_DEBUGDIR}${KODIR}
${INSTALL} -p -m ${KMODMODE} -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_KO}.debug ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/
+ ${INSTALL} -m ${KMODMODE} -o ${KMODOWN} -g ${KMODGRP} \
+ $S/tools/kernel-gdb.py ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/${KERNEL_KO}-gdb.py
+ mkdir -p ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/gdb
+.for file in ${GDB_FILES}
+ ${INSTALL} -m ${KMODMODE} -o ${KMODOWN} -g ${KMODGRP} \
+ $S/tools/gdb/${file} ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/gdb/${file}
+.endfor
.endif
.if defined(KERNEL_EXTRA_INSTALL)
${INSTALL} -p -m ${KMODMODE} -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_EXTRA_INSTALL} ${DESTDIR}${KODIR}/
diff --git a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md
index 9b50a4a3d96e..f3d4316f6f67 100644
--- a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md
@@ -14,7 +14,7 @@ Please check our issue tracker before opening a new feature request.
Filling out the following template will help other contributors better understand your proposed feature.
-->
-### Describe the feature would like to see added to OpenZFS
+### Describe the feature you would like to see added to OpenZFS
<!--
Provide a clear and concise description of the feature.
diff --git a/sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md b/sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md
index 79809179cf13..47edc8174603 100644
--- a/sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md
+++ b/sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md
@@ -2,11 +2,6 @@
<!--- Provide a general summary of your changes in the Title above -->
-<!---
-Documentation on ZFS Buildbot options can be found at
-https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.html
--->
-
### Motivation and Context
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. -->
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
index 8439942c5a41..1c608348ffcd 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
@@ -121,7 +121,7 @@ case "$OS" in
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
;;
freebsd15-0c)
- FreeBSD="15.0-ALPHA2"
+ FreeBSD="15.0-ALPHA3"
OSNAME="FreeBSD $FreeBSD"
OSv="freebsd14.0"
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh
index ee058b488088..f67bb2f68e94 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh
@@ -20,7 +20,7 @@ function archlinux() {
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
- samba sysstat rng-tools rsync wget xxhash
+ samba strace sysstat rng-tools rsync wget xxhash
echo "##[endgroup]"
}
@@ -43,7 +43,8 @@ function debian() {
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
python3-cffi python3-dev python3-distlib python3-packaging libtirpc-dev \
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
- rsync samba sysstat uuid-dev watchdog wget xfslibs-dev xxhash zlib1g-dev
+ rsync samba strace sysstat uuid-dev watchdog wget xfslibs-dev xxhash \
+ zlib1g-dev
echo "##[endgroup]"
}
@@ -87,8 +88,8 @@ function rhel() {
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
parted perf python3 python3-cffi python3-devel python3-packaging \
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
- rpm-build rsync samba sysstat systemd watchdog wget xfsprogs-devel xxhash \
- zlib-devel
+ rpm-build rsync samba strace sysstat systemd watchdog wget xfsprogs-devel \
+ xxhash zlib-devel
echo "##[endgroup]"
}
@@ -104,7 +105,7 @@ function install_fedora_experimental_kernel {
our_version="$1"
sudo dnf -y copr enable @kernel-vanilla/stable
sudo dnf -y copr enable @kernel-vanilla/mainline
- all="$(sudo dnf list --showduplicates kernel-*)"
+ all="$(sudo dnf list --showduplicates kernel-* python3-perf* perf* bpftool*)"
echo "Available versions:"
echo "$all"
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh
index 0adcad2a99bc..4869c1003e48 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh
@@ -108,19 +108,30 @@ echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
sudo crontab crontab.txt
rm crontab.txt
-# check if the machines are okay
-echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
-for ((i=1; i<=VMs; i++)); do
- .github/workflows/scripts/qemu-wait-for-vm.sh vm$i
-done
-echo "All $VMs VMs are up now."
-
# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
for ((i=1; i<=VMs; i++)); do
mkdir -p $RESPATH/vm$i
read "pty" <<< $(sudo virsh ttyconsole vm$i)
+
+ # Create the file so we can tail it, even if there's no output.
+ touch $RESPATH/vm$i/console.txt
+
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
+
+ # Write all VM boot lines to the console to aid in debugging failed boots.
+ # The boot lines from all the VMs will be munged together, so prepend each
+ # line with the vm hostname (like 'vm1:').
+ (while IFS=$'\n' read -r line; do echo "vm$i: $line" ; done < <(sudo tail -f $RESPATH/vm$i/console.txt)) &
+
done
echo "Console logging for ${VMs}x $OS started."
+
+
+# check if the machines are okay
+echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
+for ((i=1; i<=VMs; i++)); do
+ .github/workflows/scripts/qemu-wait-for-vm.sh vm$i
+done
+echo "All $VMs VMs are up now."
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh
index 5ab822f4f076..ca6ac77f146d 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh
@@ -111,7 +111,7 @@ fi
sudo dmesg -c > dmesg-prerun.txt
mount > mount.txt
df -h > df-prerun.txt
-$TDIR/zfs-tests.sh -vK -s 3GB -T $TAGS
+$TDIR/zfs-tests.sh -vKO -s 3GB -T $TAGS
RV=$?
df -h > df-postrun.txt
echo $RV > tests-exitcode.txt
diff --git a/sys/contrib/openzfs/META b/sys/contrib/openzfs/META
index 5704b5c6de8a..bdb7aee48041 100644
--- a/sys/contrib/openzfs/META
+++ b/sys/contrib/openzfs/META
@@ -6,5 +6,5 @@ Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS
-Linux-Maximum: 6.16
+Linux-Maximum: 6.17
Linux-Minimum: 4.18
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index d655fa715e15..70a4ed46f263 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -3301,6 +3301,7 @@ zdb_derive_key(dsl_dir_t *dd, uint8_t *key_out)
uint64_t keyformat, salt, iters;
int i;
unsigned char c;
+ FILE *f;
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), sizeof (uint64_t),
@@ -3333,6 +3334,25 @@ zdb_derive_key(dsl_dir_t *dd, uint8_t *key_out)
break;
+ case ZFS_KEYFORMAT_RAW:
+ if ((f = fopen(key_material, "r")) == NULL)
+ return (B_FALSE);
+
+ if (fread(key_out, 1, WRAPPING_KEY_LEN, f) !=
+ WRAPPING_KEY_LEN) {
+ (void) fclose(f);
+ return (B_FALSE);
+ }
+
+ /* Check the key length */
+ if (fgetc(f) != EOF) {
+ (void) fclose(f);
+ return (B_FALSE);
+ }
+
+ (void) fclose(f);
+ break;
+
default:
fatal("no support for key format %u\n",
(unsigned int) keyformat);
diff --git a/sys/contrib/openzfs/cmd/zfs/zfs_main.c b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
index 484986bde719..ccdd5ffef8e6 100644
--- a/sys/contrib/openzfs/cmd/zfs/zfs_main.c
+++ b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
@@ -914,7 +914,11 @@ zfs_do_clone(int argc, char **argv)
log_history = B_FALSE;
}
- ret = zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
+ /*
+ * Dataset cloned successfully, mount/share failures are
+ * non-fatal.
+ */
+ (void) zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
}
zfs_close(zhp);
@@ -930,19 +934,15 @@ usage:
}
/*
- * Return a default volblocksize for the pool which always uses more than
- * half of the data sectors. This primarily applies to dRAID which always
- * writes full stripe widths.
+ * Calculate the minimum allocation size based on the top-level vdevs.
*/
static uint64_t
-default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
+calculate_volblocksize(nvlist_t *config)
{
- uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
+ uint64_t asize = SPA_MINBLOCKSIZE;
nvlist_t *tree, **vdevs;
uint_t nvdevs;
- nvlist_t *config = zpool_get_config(zhp, NULL);
-
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
@@ -973,6 +973,24 @@ default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
}
}
+ return (asize);
+}
+
+/*
+ * Return a default volblocksize for the pool which always uses more than
+ * half of the data sectors. This primarily applies to dRAID which always
+ * writes full stripe widths.
+ */
+static uint64_t
+default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
+{
+ uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
+
+ nvlist_t *config = zpool_get_config(zhp, NULL);
+
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_MAX_ALLOC, &asize) != 0)
+ asize = calculate_volblocksize(config);
+
/*
* Calculate the target volblocksize such that more than half
* of the asize is used. The following table is for 4k sectors.
@@ -1319,7 +1337,9 @@ zfs_do_create(int argc, char **argv)
goto error;
}
- ret = zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
+ /* Dataset created successfully, mount/share failures are non-fatal */
+ ret = 0;
+ (void) zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
error:
nvlist_free(props);
return (ret);
diff --git a/sys/contrib/openzfs/cmd/zinject/zinject.c b/sys/contrib/openzfs/cmd/zinject/zinject.c
index 113797c878b9..c2f646f2567d 100644
--- a/sys/contrib/openzfs/cmd/zinject/zinject.c
+++ b/sys/contrib/openzfs/cmd/zinject/zinject.c
@@ -107,6 +107,8 @@
* zinject
* zinject <-a | -u pool>
* zinject -c <id|all>
+ * zinject -E <delay> [-a] [-m] [-f freq] [-l level] [-r range]
+ * [-T iotype] [-t type object | -b bookmark pool]
* zinject [-q] <-t type> [-f freq] [-u] [-a] [-m] [-e errno] [-l level]
* [-r range] <object>
* zinject [-f freq] [-a] [-m] [-u] -b objset:object:level:start:end pool
@@ -132,14 +134,18 @@
* The '-f' flag controls the frequency of errors injected, expressed as a
* real number percentage between 0.0001 and 100. The default is 100.
*
- * The this form is responsible for actually injecting the handler into the
+ * The <object> form is responsible for actually injecting the handler into the
* framework. It takes the arguments described above, translates them to the
* internal tuple using libzpool, and then issues an ioctl() to register the
* handler.
*
- * The final form can target a specific bookmark, regardless of whether a
+ * The '-b' option can target a specific bookmark, regardless of whether a
* human-readable interface has been designed. It allows developers to specify
* a particular block by number.
+ *
+ * The '-E' option injects pipeline ready stage delays for the given object or
+ * bookmark. The delay is specified in milliseconds, and it supports I/O type
+ * and range filters.
*/
#include <errno.h>
@@ -346,6 +352,13 @@ usage(void)
"\t\tsuch that the operation takes a minimum of supplied seconds\n"
"\t\tto complete.\n"
"\n"
+ "\tzinject -E <delay> [-a] [-m] [-f freq] [-l level] [-r range]\n"
+ "\t\t[-T iotype] [-t type object | -b bookmark pool]\n"
+ "\n"
+ "\t\tInject pipeline ready stage delays for the given object path\n"
+ "\t\t(data or dnode) or raw bookmark. The delay is specified in\n"
+ "\t\tmilliseconds.\n"
+ "\n"
"\tzinject -I [-s <seconds> | -g <txgs>] pool\n"
"\t\tCause the pool to stop writing blocks yet not\n"
"\t\treport errors for a duration. Simulates buggy hardware\n"
@@ -724,12 +737,15 @@ register_handler(const char *pool, int flags, zinject_record_t *record,
if (quiet) {
(void) printf("%llu\n", (u_longlong_t)zc.zc_guid);
} else {
+ boolean_t show_object = B_FALSE;
+ boolean_t show_iotype = B_FALSE;
(void) printf("Added handler %llu with the following "
"properties:\n", (u_longlong_t)zc.zc_guid);
(void) printf(" pool: %s\n", pool);
if (record->zi_guid) {
(void) printf(" vdev: %llx\n",
(u_longlong_t)record->zi_guid);
+ show_iotype = B_TRUE;
} else if (record->zi_func[0] != '\0') {
(void) printf(" panic function: %s\n",
record->zi_func);
@@ -742,7 +758,18 @@ register_handler(const char *pool, int flags, zinject_record_t *record,
} else if (record->zi_timer > 0) {
(void) printf(" timer: %lld ms\n",
(u_longlong_t)NSEC2MSEC(record->zi_timer));
+ if (record->zi_cmd == ZINJECT_DELAY_READY) {
+ show_object = B_TRUE;
+ show_iotype = B_TRUE;
+ }
} else {
+ show_object = B_TRUE;
+ }
+ if (show_iotype) {
+ (void) printf("iotype: %s\n",
+ iotype_to_str(record->zi_iotype));
+ }
+ if (show_object) {
(void) printf("objset: %llu\n",
(u_longlong_t)record->zi_objset);
(void) printf("object: %llu\n",
@@ -910,6 +937,7 @@ main(int argc, char **argv)
int ret;
int flags = 0;
uint32_t dvas = 0;
+ hrtime_t ready_delay = -1;
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
@@ -940,7 +968,7 @@ main(int argc, char **argv)
}
while ((c = getopt(argc, argv,
- ":aA:b:C:d:D:f:Fg:qhIc:t:T:l:mr:s:e:uL:p:P:")) != -1) {
+ ":aA:b:C:d:D:E:f:Fg:qhIc:t:T:l:mr:s:e:uL:p:P:")) != -1) {
switch (c) {
case 'a':
flags |= ZINJECT_FLUSH_ARC;
@@ -1113,6 +1141,18 @@ main(int argc, char **argv)
case 'u':
flags |= ZINJECT_UNLOAD_SPA;
break;
+ case 'E':
+ ready_delay = MSEC2NSEC(strtol(optarg, &end, 10));
+ if (ready_delay <= 0 || *end != '\0') {
+ (void) fprintf(stderr, "invalid delay '%s': "
+ "must be a positive duration\n", optarg);
+ usage();
+ libzfs_fini(g_zfs);
+ return (1);
+ }
+ record.zi_cmd = ZINJECT_DELAY_READY;
+ record.zi_timer = ready_delay;
+ break;
case 'L':
if ((label = name_to_type(optarg)) == TYPE_INVAL &&
!LABEL_TYPE(type)) {
@@ -1150,7 +1190,7 @@ main(int argc, char **argv)
*/
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
level != 0 || record.zi_cmd != ZINJECT_UNINITIALIZED ||
- record.zi_freq > 0 || dvas != 0) {
+ record.zi_freq > 0 || dvas != 0 || ready_delay >= 0) {
(void) fprintf(stderr, "cancel (-c) incompatible with "
"any other options\n");
usage();
@@ -1186,7 +1226,7 @@ main(int argc, char **argv)
*/
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
level != 0 || record.zi_cmd != ZINJECT_UNINITIALIZED ||
- dvas != 0) {
+ dvas != 0 || ready_delay >= 0) {
(void) fprintf(stderr, "device (-d) incompatible with "
"data error injection\n");
usage();
@@ -1276,13 +1316,23 @@ main(int argc, char **argv)
return (1);
}
- record.zi_cmd = ZINJECT_DATA_FAULT;
+ if (record.zi_cmd == ZINJECT_UNINITIALIZED) {
+ record.zi_cmd = ZINJECT_DATA_FAULT;
+ if (!error)
+ error = EIO;
+ } else if (error != 0) {
+ (void) fprintf(stderr, "error type -e incompatible "
+ "with delay injection\n");
+ libzfs_fini(g_zfs);
+ return (1);
+ } else {
+ record.zi_iotype = io_type;
+ }
+
if (translate_raw(raw, &record) != 0) {
libzfs_fini(g_zfs);
return (1);
}
- if (!error)
- error = EIO;
} else if (record.zi_cmd == ZINJECT_PANIC) {
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
level != 0 || device != NULL || record.zi_freq > 0 ||
@@ -1410,6 +1460,13 @@ main(int argc, char **argv)
record.zi_dvas = dvas;
}
+ if (record.zi_cmd != ZINJECT_UNINITIALIZED && error != 0) {
+ (void) fprintf(stderr, "error type -e incompatible "
+ "with delay injection\n");
+ libzfs_fini(g_zfs);
+ return (1);
+ }
+
if (error == EACCES) {
if (type != TYPE_DATA) {
(void) fprintf(stderr, "decryption errors "
@@ -1425,8 +1482,12 @@ main(int argc, char **argv)
* not found.
*/
error = ECKSUM;
- } else {
+ } else if (record.zi_cmd == ZINJECT_UNINITIALIZED) {
record.zi_cmd = ZINJECT_DATA_FAULT;
+ if (!error)
+ error = EIO;
+ } else {
+ record.zi_iotype = io_type;
}
if (translate_record(type, argv[0], range, level, &record, pool,
@@ -1434,8 +1495,6 @@ main(int argc, char **argv)
libzfs_fini(g_zfs);
return (1);
}
- if (!error)
- error = EIO;
}
/*
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
index 2eec9a95e24c..fef602736705 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
@@ -26,6 +26,7 @@
/*
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
+ * Copyright (c) 2025, Klara, Inc.
*/
#include <libintl.h>
@@ -52,7 +53,7 @@
typedef struct zpool_node {
zpool_handle_t *zn_handle;
uu_avl_node_t zn_avlnode;
- int zn_mark;
+ hrtime_t zn_last_refresh;
} zpool_node_t;
struct zpool_list {
@@ -62,6 +63,7 @@ struct zpool_list {
uu_avl_pool_t *zl_pool;
zprop_list_t **zl_proplist;
zfs_type_t zl_type;
+ hrtime_t zl_last_refresh;
};
static int
@@ -81,26 +83,30 @@ zpool_compare(const void *larg, const void *rarg, void *unused)
* of known pools.
*/
static int
-add_pool(zpool_handle_t *zhp, void *data)
+add_pool(zpool_handle_t *zhp, zpool_list_t *zlp)
{
- zpool_list_t *zlp = data;
- zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
+ zpool_node_t *node, *new = safe_malloc(sizeof (zpool_node_t));
uu_avl_index_t idx;
- node->zn_handle = zhp;
- uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
- if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
+ new->zn_handle = zhp;
+ uu_avl_node_init(new, &new->zn_avlnode, zlp->zl_pool);
+
+ node = uu_avl_find(zlp->zl_avl, new, NULL, &idx);
+ if (node == NULL) {
if (zlp->zl_proplist &&
zpool_expand_proplist(zhp, zlp->zl_proplist,
zlp->zl_type, zlp->zl_literal) != 0) {
zpool_close(zhp);
- free(node);
+ free(new);
return (-1);
}
- uu_avl_insert(zlp->zl_avl, node, idx);
+ new->zn_last_refresh = zlp->zl_last_refresh;
+ uu_avl_insert(zlp->zl_avl, new, idx);
} else {
+ zpool_refresh_stats_from_handle(node->zn_handle, zhp);
+ node->zn_last_refresh = zlp->zl_last_refresh;
zpool_close(zhp);
- free(node);
+ free(new);
return (-1);
}
@@ -108,6 +114,18 @@ add_pool(zpool_handle_t *zhp, void *data)
}
/*
+ * add_pool(), but always returns 0. This allows zpool_iter() to continue
+ * even if a pool exists in the tree, or we fail to get the properties for
+ * a new one.
+ */
+static int
+add_pool_cb(zpool_handle_t *zhp, void *data)
+{
+ (void) add_pool(zhp, data);
+ return (0);
+}
+
+/*
* Create a list of pools based on the given arguments. If we're given no
* arguments, then iterate over all pools in the system and add them to the AVL
* tree. Otherwise, add only those pool explicitly specified on the command
@@ -135,9 +153,10 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
zlp->zl_type = type;
zlp->zl_literal = literal;
+ zlp->zl_last_refresh = gethrtime();
if (argc == 0) {
- (void) zpool_iter(g_zfs, add_pool, zlp);
+ (void) zpool_iter(g_zfs, add_pool_cb, zlp);
zlp->zl_findall = B_TRUE;
} else {
int i;
@@ -159,15 +178,61 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
}
/*
- * Search for any new pools, adding them to the list. We only add pools when no
- * options were given on the command line. Otherwise, we keep the list fixed as
- * those that were explicitly specified.
+ * Refresh the state of all pools on the list. Additionally, if no options were
+ * given on the command line, add any new pools and remove any that are no
+ * longer available.
*/
-void
-pool_list_update(zpool_list_t *zlp)
+int
+pool_list_refresh(zpool_list_t *zlp)
{
- if (zlp->zl_findall)
- (void) zpool_iter(g_zfs, add_pool, zlp);
+ zlp->zl_last_refresh = gethrtime();
+
+ if (!zlp->zl_findall) {
+ /*
+ * This list is a fixed list of pools, so we must not add
+ * or remove any. Just walk over them and refresh their
+ * state.
+ */
+ int navail = 0;
+ for (zpool_node_t *node = uu_avl_first(zlp->zl_avl);
+ node != NULL; node = uu_avl_next(zlp->zl_avl, node)) {
+ boolean_t missing;
+ zpool_refresh_stats(node->zn_handle, &missing);
+ navail += !missing;
+ node->zn_last_refresh = zlp->zl_last_refresh;
+ }
+ return (navail);
+ }
+
+ /* Search for any new pools and add them to the list. */
+ (void) zpool_iter(g_zfs, add_pool_cb, zlp);
+
+ /* Walk the list of existing pools, and update or remove them. */
+ zpool_node_t *node, *next;
+ for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next) {
+ next = uu_avl_next(zlp->zl_avl, node);
+
+ /*
+ * Skip any that were refreshed and are online; they were added
+ * by zpool_iter() and are already up to date.
+ */
+ if (node->zn_last_refresh == zlp->zl_last_refresh &&
+ zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL)
+ continue;
+
+ /* Refresh and remove if necessary. */
+ boolean_t missing;
+ zpool_refresh_stats(node->zn_handle, &missing);
+ if (missing) {
+ uu_avl_remove(zlp->zl_avl, node);
+ zpool_close(node->zn_handle);
+ free(node);
+ } else {
+ node->zn_last_refresh = zlp->zl_last_refresh;
+ }
+ }
+
+ return (uu_avl_numnodes(zlp->zl_avl));
}
/*
@@ -191,23 +256,6 @@ pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
}
/*
- * Remove the given pool from the list. When running iostat, we want to remove
- * those pools that no longer exist.
- */
-void
-pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
-{
- zpool_node_t search, *node;
-
- search.zn_handle = zhp;
- if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
- uu_avl_remove(zlp->zl_avl, node);
- zpool_close(node->zn_handle);
- free(node);
- }
-}
-
-/*
* Free all the handles associated with this list.
*/
void
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index 2c46ad0df895..1feec55c0e8b 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -33,7 +33,7 @@
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
- * Copyright (c) 2021, 2023, Klara Inc.
+ * Copyright (c) 2021, 2023, 2025, Klara, Inc.
* Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.
*/
@@ -5761,24 +5761,6 @@ children:
return (ret);
}
-static int
-refresh_iostat(zpool_handle_t *zhp, void *data)
-{
- iostat_cbdata_t *cb = data;
- boolean_t missing;
-
- /*
- * If the pool has disappeared, remove it from the list and continue.
- */
- if (zpool_refresh_stats(zhp, &missing) != 0)
- return (-1);
-
- if (missing)
- pool_list_remove(cb->cb_list, zhp);
-
- return (0);
-}
-
/*
* Callback to print out the iostats for the given pool.
*/
@@ -6359,15 +6341,14 @@ get_namewidth_iostat(zpool_handle_t *zhp, void *data)
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
- * on pool_list_update() to detect the addition of new pools. Configuration
- * changes are all handled within libzfs.
+ * on pool_list_refresh() to detect the addition and removal of pools.
+ * Configuration changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
- int npools;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
@@ -6618,10 +6599,24 @@ zpool_do_iostat(int argc, char **argv)
return (1);
}
+ int last_npools = 0;
for (;;) {
- if ((npools = pool_list_count(list)) == 0)
+ /*
+ * Refresh all pools in list, adding or removing pools as
+ * necessary.
+ */
+ int npools = pool_list_refresh(list);
+ if (npools == 0) {
(void) fprintf(stderr, gettext("no pools available\n"));
- else {
+ } else {
+ /*
+ * If the list of pools has changed since last time
+ * around, reset the iteration count to force the
+ * header to be redisplayed.
+ */
+ if (last_npools != npools)
+ cb.cb_iteration = 0;
+
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
@@ -6630,15 +6625,6 @@ zpool_do_iostat(int argc, char **argv)
cb.cb_iteration == 0);
/*
- * Refresh all statistics. This is done as an
- * explicit step before calculating the maximum name
- * width, so that any * configuration changes are
- * properly accounted for.
- */
- (void) pool_list_iter(list, B_FALSE, refresh_iostat,
- &cb);
-
- /*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
@@ -6691,6 +6677,7 @@ zpool_do_iostat(int argc, char **argv)
if (skip) {
(void) fflush(stdout);
(void) fsleep(interval);
+ last_npools = npools;
continue;
}
@@ -6728,6 +6715,8 @@ zpool_do_iostat(int argc, char **argv)
(void) fflush(stdout);
(void) fsleep(interval);
+
+ last_npools = npools;
}
pool_list_free(list);
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_util.h b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
index 5ab7cb9750f1..3af23c52bd45 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_util.h
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
@@ -76,11 +76,10 @@ typedef struct zpool_list zpool_list_t;
zpool_list_t *pool_list_get(int, char **, zprop_list_t **, zfs_type_t,
boolean_t, int *);
-void pool_list_update(zpool_list_t *);
+int pool_list_refresh(zpool_list_t *);
int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *);
void pool_list_free(zpool_list_t *);
int pool_list_count(zpool_list_t *);
-void pool_list_remove(zpool_list_t *, zpool_handle_t *);
extern libzfs_handle_t *g_zfs;
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
index 684b46a2d673..088c0108e911 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
@@ -609,22 +609,28 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
ZPOOL_CONFIG_PATH, &path) == 0);
/*
+ * Skip active spares they should never cause
+ * the pool to be evaluated as inconsistent.
+ */
+ if (is_spare(NULL, path))
+ continue;
+
+ /*
* If we have a raidz/mirror that combines disks
- * with files, report it as an error.
+ * with files, only report it as an error when
+ * fatal is set to ensure all the replication
+ * checks aren't skipped in check_replication().
*/
- if (!dontreport && type != NULL &&
+ if (fatal && !dontreport && type != NULL &&
strcmp(type, childtype) != 0) {
if (ret != NULL)
free(ret);
ret = NULL;
- if (fatal)
- vdev_error(gettext(
- "mismatched replication "
- "level: %s contains both "
- "files and devices\n"),
- rep.zprl_type);
- else
- return (NULL);
+ vdev_error(gettext(
+ "mismatched replication "
+ "level: %s contains both "
+ "files and devices\n"),
+ rep.zprl_type);
dontreport = B_TRUE;
}
diff --git a/sys/contrib/openzfs/contrib/intel_qat/readme.md b/sys/contrib/openzfs/contrib/intel_qat/readme.md
index 7e45d395bb80..04c299b6404c 100644
--- a/sys/contrib/openzfs/contrib/intel_qat/readme.md
+++ b/sys/contrib/openzfs/contrib/intel_qat/readme.md
@@ -8,7 +8,7 @@ This contrib contains community compatibility patches to get Intel QAT working o
These patches are based on the following Intel QAT version:
[1.7.l.4.10.0-00014](https://01.org/sites/default/files/downloads/qat1.7.l.4.10.0-00014.tar.gz)
-When using QAT with above kernels versions, the following patches needs to be applied using:
+When using QAT with the above kernel versions, the following patches need to be applied using:
patch -p1 < _$PATCH_
_Where $PATCH refers to the path of the patch in question_
diff --git a/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py b/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
index 971aa1d0d493..bad1af2d1671 100644
--- a/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
+++ b/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
@@ -4223,7 +4223,7 @@ class _TempPool(object):
self.getRoot().reset()
return
- # On the Buildbot builders this may fail with "pool is busy"
+ # On the CI builders this may fail with "pool is busy"
# Retry 5 times before raising an error
retry = 0
while True:
diff --git a/sys/contrib/openzfs/etc/init.d/README.md b/sys/contrib/openzfs/etc/init.d/README.md
index da780fdc1222..3852dd9a6b2e 100644
--- a/sys/contrib/openzfs/etc/init.d/README.md
+++ b/sys/contrib/openzfs/etc/init.d/README.md
@@ -1,5 +1,5 @@
DESCRIPTION
- These script were written with the primary intention of being portable and
+ These scripts were written with the primary intention of being portable and
usable on as many systems as possible.
This is, in practice, usually not possible. But the intention is there.
diff --git a/sys/contrib/openzfs/include/libzfs.h b/sys/contrib/openzfs/include/libzfs.h
index 3fcdc176a621..14930fb90622 100644
--- a/sys/contrib/openzfs/include/libzfs.h
+++ b/sys/contrib/openzfs/include/libzfs.h
@@ -479,6 +479,8 @@ _LIBZFS_H zpool_status_t zpool_import_status(nvlist_t *, const char **,
_LIBZFS_H nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
_LIBZFS_H nvlist_t *zpool_get_features(zpool_handle_t *);
_LIBZFS_H int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
+_LIBZFS_H void zpool_refresh_stats_from_handle(zpool_handle_t *,
+ zpool_handle_t *);
_LIBZFS_H int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
_LIBZFS_H void zpool_add_propname(zpool_handle_t *, const char *);
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
index 076dab8ba6dc..214f3ea0e787 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
@@ -542,24 +542,6 @@ blk_generic_alloc_queue(make_request_fn make_request, int node_id)
}
#endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
-/*
- * All the io_*() helper functions below can operate on a bio, or a rq, but
- * not both. The older submit_bio() codepath will pass a bio, and the
- * newer blk-mq codepath will pass a rq.
- */
-static inline int
-io_data_dir(struct bio *bio, struct request *rq)
-{
- if (rq != NULL) {
- if (op_is_write(req_op(rq))) {
- return (WRITE);
- } else {
- return (READ);
- }
- }
- return (bio_data_dir(bio));
-}
-
static inline int
io_is_flush(struct bio *bio, struct request *rq)
{
diff --git a/sys/contrib/openzfs/include/sys/fs/zfs.h b/sys/contrib/openzfs/include/sys/fs/zfs.h
index 49ab9d3db795..662fd81c5ee1 100644
--- a/sys/contrib/openzfs/include/sys/fs/zfs.h
+++ b/sys/contrib/openzfs/include/sys/fs/zfs.h
@@ -748,6 +748,8 @@ typedef struct zpool_load_policy {
#define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift"
#define ZPOOL_CONFIG_ASHIFT "ashift"
#define ZPOOL_CONFIG_ASIZE "asize"
+#define ZPOOL_CONFIG_MIN_ALLOC "min_alloc"
+#define ZPOOL_CONFIG_MAX_ALLOC "max_alloc"
#define ZPOOL_CONFIG_DTL "DTL"
#define ZPOOL_CONFIG_SCAN_STATS "scan_stats" /* not stored on disk */
#define ZPOOL_CONFIG_REMOVAL_STATS "removal_stats" /* not stored on disk */
diff --git a/sys/contrib/openzfs/include/sys/range_tree.h b/sys/contrib/openzfs/include/sys/range_tree.h
index 0f6884682459..0f6def36f9f6 100644
--- a/sys/contrib/openzfs/include/sys/range_tree.h
+++ b/sys/contrib/openzfs/include/sys/range_tree.h
@@ -238,8 +238,7 @@ zfs_rs_set_end_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t end)
}
static inline void
-zfs_zfs_rs_set_fill_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt,
- uint64_t fill)
+zfs_rs_set_fill_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t fill)
{
ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
@@ -277,7 +276,7 @@ static inline void
zfs_rs_set_fill(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t fill)
{
ASSERT(IS_P2ALIGNED(fill, 1ULL << rt->rt_shift));
- zfs_zfs_rs_set_fill_raw(rs, rt, fill >> rt->rt_shift);
+ zfs_rs_set_fill_raw(rs, rt, fill >> rt->rt_shift);
}
typedef void zfs_range_tree_func_t(void *arg, uint64_t start, uint64_t size);
diff --git a/sys/contrib/openzfs/include/sys/spa.h b/sys/contrib/openzfs/include/sys/spa.h
index 66db16b33c51..f172f2af6f07 100644
--- a/sys/contrib/openzfs/include/sys/spa.h
+++ b/sys/contrib/openzfs/include/sys/spa.h
@@ -1030,7 +1030,7 @@ extern void spa_import_progress_set_notes_nolog(spa_t *spa,
extern int spa_config_tryenter(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw);
-extern void spa_config_enter_mmp(spa_t *spa, int locks, const void *tag,
+extern void spa_config_enter_priority(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, const void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
@@ -1084,6 +1084,7 @@ extern pool_state_t spa_state(spa_t *spa);
extern spa_load_state_t spa_load_state(spa_t *spa);
extern uint64_t spa_freeze_txg(spa_t *spa);
extern uint64_t spa_get_worst_case_asize(spa_t *spa, uint64_t lsize);
+extern void spa_get_min_alloc_range(spa_t *spa, uint64_t *min, uint64_t *max);
extern uint64_t spa_get_dspace(spa_t *spa);
extern uint64_t spa_get_checkpoint_space(spa_t *spa);
extern uint64_t spa_get_slop_space(spa_t *spa);
diff --git a/sys/contrib/openzfs/include/sys/spa_impl.h b/sys/contrib/openzfs/include/sys/spa_impl.h
index 07a959db3447..62b062984d36 100644
--- a/sys/contrib/openzfs/include/sys/spa_impl.h
+++ b/sys/contrib/openzfs/include/sys/spa_impl.h
@@ -265,6 +265,7 @@ struct spa {
uint64_t spa_min_ashift; /* of vdevs in normal class */
uint64_t spa_max_ashift; /* of vdevs in normal class */
uint64_t spa_min_alloc; /* of vdevs in normal class */
+ uint64_t spa_max_alloc; /* of vdevs in normal class */
uint64_t spa_gcd_alloc; /* of vdevs in normal class */
uint64_t spa_config_guid; /* config pool guid */
uint64_t spa_load_guid; /* spa_load initialized guid */
diff --git a/sys/contrib/openzfs/include/sys/zfs_ioctl.h b/sys/contrib/openzfs/include/sys/zfs_ioctl.h
index 8174242abdac..cfe11f43bb8e 100644
--- a/sys/contrib/openzfs/include/sys/zfs_ioctl.h
+++ b/sys/contrib/openzfs/include/sys/zfs_ioctl.h
@@ -455,6 +455,7 @@ typedef enum zinject_type {
ZINJECT_DECRYPT_FAULT,
ZINJECT_DELAY_IMPORT,
ZINJECT_DELAY_EXPORT,
+ ZINJECT_DELAY_READY,
} zinject_type_t;
typedef enum zinject_iotype {
diff --git a/sys/contrib/openzfs/include/sys/zio.h b/sys/contrib/openzfs/include/sys/zio.h
index a8acb83b4c2f..acb0a03a36b2 100644
--- a/sys/contrib/openzfs/include/sys/zio.h
+++ b/sys/contrib/openzfs/include/sys/zio.h
@@ -718,6 +718,7 @@ extern void zio_handle_ignored_writes(zio_t *zio);
extern hrtime_t zio_handle_io_delay(zio_t *zio);
extern void zio_handle_import_delay(spa_t *spa, hrtime_t elapsed);
extern void zio_handle_export_delay(spa_t *spa, hrtime_t elapsed);
+extern hrtime_t zio_handle_ready_delay(zio_t *zio);
/*
* Checksum ereport functions
diff --git a/sys/contrib/openzfs/lib/libuutil/libuutil.abi b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
index 6c736c61e4a5..2a740afa07ca 100644
--- a/sys/contrib/openzfs/lib/libuutil/libuutil.abi
+++ b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
@@ -616,6 +616,7 @@
<array-type-def dimensions='1' type-id='de572c22' size-in-bits='1472' id='6d3c2f42'>
<subrange length='23' type-id='7359adad' id='fdd0f594'/>
</array-type-def>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<array-type-def dimensions='1' type-id='3a47d82b' size-in-bits='256' id='a133ec23'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
@@ -1020,13 +1021,6 @@
<array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
- <array-type-def dimensions='1' type-id='d315442e' size-in-bits='16' id='811205dc'>
- <subrange length='1' type-id='7359adad' id='52f813b4'/>
- </array-type-def>
- <array-type-def dimensions='1' type-id='d3130597' size-in-bits='768' id='f63f23b9'>
- <subrange length='12' type-id='7359adad' id='84827bdc'/>
- </array-type-def>
- <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
@@ -1061,93 +1055,6 @@
<var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__u16' type-id='8efea9e5' id='d315442e'/>
- <typedef-decl name='__s32' type-id='95e97e5e' id='3158a266'/>
- <typedef-decl name='__u32' type-id='f0981eeb' id='3f1a6b60'/>
- <typedef-decl name='__s64' type-id='1eb56b1e' id='49659421'/>
- <typedef-decl name='__u64' type-id='3a47d82b' id='d3130597'/>
- <class-decl name='statx_timestamp' size-in-bits='128' is-struct='yes' visibility='default' id='94101016'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='49659421' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__reserved' type-id='3158a266' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='statx' size-in-bits='2048' is-struct='yes' visibility='default' id='720b04c5'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='stx_mask' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='stx_blksize' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='stx_attributes' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='stx_nlink' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='stx_uid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='stx_gid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='stx_mode' type-id='d315442e' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='__spare0' type-id='811205dc' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='stx_ino' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='stx_size' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='stx_blocks' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='stx_attributes_mask' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='stx_atime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='stx_btime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='stx_ctime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='stx_mtime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='stx_rdev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1056'>
- <var-decl name='stx_rdev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='stx_dev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1120'>
- <var-decl name='stx_dev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='stx_mnt_id' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__spare2' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__spare3' type-id='f63f23b9' visibility='default'/>
- </data-member>
- </class-decl>
<class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' id='56fe4a37'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_fsname' type-id='26a90f95' visibility='default'/>
@@ -1237,8 +1144,6 @@
<pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
<pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
<qualified-type-def type-id='62f7a03d' restrict='yes' id='f1cadedf'/>
- <pointer-type-def type-id='720b04c5' size-in-bits='64' id='936b8e35'/>
- <qualified-type-def type-id='936b8e35' restrict='yes' id='31d265b7'/>
<function-decl name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='3cad23cd'/>
@@ -1254,14 +1159,6 @@
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
- <function-decl name='statx' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='9d26089a'/>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='f0981eeb'/>
- <parameter type-id='31d265b7'/>
- <return type-id='95e97e5e'/>
- </function-decl>
<function-decl name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='95e97e5e'/>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs.abi b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
index 184ea4a55b43..f988d27a286a 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs.abi
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
@@ -571,6 +571,7 @@
<elf-symbol name='zpool_props_refresh' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_read_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_refresh_stats' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zpool_refresh_stats_from_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reguid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reopen_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_scan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@@ -641,7 +642,7 @@
<elf-symbol name='sa_protocol_names' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spa_feature_table' size='2632' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
- <elf-symbol name='zfs_deleg_perm_tab' size='528' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zfs_deleg_perm_tab' size='544' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_max_dataset_nesting' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userquota_prop_prefixes' size='96' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@@ -1458,103 +1459,8 @@
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/os/linux/getmntany.c' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='d315442e' size-in-bits='16' id='811205dc'>
- <subrange length='1' type-id='7359adad' id='52f813b4'/>
- </array-type-def>
- <array-type-def dimensions='1' type-id='d3130597' size-in-bits='768' id='f63f23b9'>
- <subrange length='12' type-id='7359adad' id='84827bdc'/>
- </array-type-def>
- <typedef-decl name='__u16' type-id='8efea9e5' id='d315442e'/>
- <typedef-decl name='__s32' type-id='95e97e5e' id='3158a266'/>
- <typedef-decl name='__u32' type-id='f0981eeb' id='3f1a6b60'/>
- <typedef-decl name='__s64' type-id='1eb56b1e' id='49659421'/>
- <typedef-decl name='__u64' type-id='3a47d82b' id='d3130597'/>
- <class-decl name='statx_timestamp' size-in-bits='128' is-struct='yes' visibility='default' id='94101016'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='49659421' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__reserved' type-id='3158a266' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='statx' size-in-bits='2048' is-struct='yes' visibility='default' id='720b04c5'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='stx_mask' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='stx_blksize' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='stx_attributes' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='stx_nlink' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='stx_uid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='stx_gid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='stx_mode' type-id='d315442e' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='__spare0' type-id='811205dc' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='stx_ino' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='stx_size' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='stx_blocks' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='stx_attributes_mask' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='stx_atime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='stx_btime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='stx_ctime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='stx_mtime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='stx_rdev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1056'>
- <var-decl name='stx_rdev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='stx_dev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1120'>
- <var-decl name='stx_dev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='stx_mnt_id' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__spare2' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__spare3' type-id='f63f23b9' visibility='default'/>
- </data-member>
- </class-decl>
<pointer-type-def type-id='56fe4a37' size-in-bits='64' id='b6b61d2f'/>
<qualified-type-def type-id='b6b61d2f' restrict='yes' id='3cad23cd'/>
- <pointer-type-def type-id='720b04c5' size-in-bits='64' id='936b8e35'/>
- <qualified-type-def type-id='936b8e35' restrict='yes' id='31d265b7'/>
<function-decl name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='3cad23cd'/>
@@ -1566,14 +1472,6 @@
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='statx' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='9d26089a'/>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='f0981eeb'/>
- <parameter type-id='31d265b7'/>
- <return type-id='95e97e5e'/>
- </function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/timestamp.c' language='LANG_C99'>
<typedef-decl name='nl_item' type-id='95e97e5e' id='03b79a94'/>
@@ -3194,6 +3092,10 @@
<parameter type-id='dace003f'/>
<return type-id='80f4b756'/>
</function-decl>
+ <function-decl name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='22cce67b'/>
+ <return type-id='5ce45b60'/>
+ </function-decl>
<function-decl name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='5ce45b60'/>
@@ -3238,6 +3140,11 @@
<parameter type-id='37e3bd22' name='missing'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='zpool_refresh_stats_from_handle' mangled-name='zpool_refresh_stats_from_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats_from_handle'>
+ <parameter type-id='4c81de99' name='dzhp'/>
+ <parameter type-id='4c81de99' name='szhp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
<function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
<parameter type-id='80f4b756' name='poolname'/>
<return type-id='c19b74c3'/>
@@ -9398,10 +9305,6 @@
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='22cce67b'/>
- <return type-id='5ce45b60'/>
- </function-decl>
<function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
<return type-id='b59d7dce'/>
</function-decl>
@@ -9774,8 +9677,8 @@
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_deleg.c' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='4224' id='55e705e7'>
- <subrange length='33' type-id='7359adad' id='6a5934df'/>
+ <array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='4352' id='55f84f08'>
+ <subrange length='34' type-id='7359adad' id='6a6a7e00'/>
</array-type-def>
<array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='infinite' id='7c00e69d'>
<subrange length='infinite' id='031f2035'/>
@@ -9805,30 +9708,31 @@
<enumerator name='ZFS_DELEG_NOTE_PROMOTE' value='5'/>
<enumerator name='ZFS_DELEG_NOTE_RENAME' value='6'/>
<enumerator name='ZFS_DELEG_NOTE_SEND' value='7'/>
- <enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='8'/>
- <enumerator name='ZFS_DELEG_NOTE_ALLOW' value='9'/>
- <enumerator name='ZFS_DELEG_NOTE_USERPROP' value='10'/>
- <enumerator name='ZFS_DELEG_NOTE_MOUNT' value='11'/>
- <enumerator name='ZFS_DELEG_NOTE_SHARE' value='12'/>
- <enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='13'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='14'/>
- <enumerator name='ZFS_DELEG_NOTE_USERUSED' value='15'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='16'/>
- <enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='17'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='18'/>
- <enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='19'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='20'/>
- <enumerator name='ZFS_DELEG_NOTE_HOLD' value='21'/>
- <enumerator name='ZFS_DELEG_NOTE_RELEASE' value='22'/>
- <enumerator name='ZFS_DELEG_NOTE_DIFF' value='23'/>
- <enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='24'/>
- <enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='25'/>
- <enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='26'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='27'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='28'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='29'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='30'/>
- <enumerator name='ZFS_DELEG_NOTE_NONE' value='31'/>
+ <enumerator name='ZFS_DELEG_NOTE_SEND_RAW' value='8'/>
+ <enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='9'/>
+ <enumerator name='ZFS_DELEG_NOTE_ALLOW' value='10'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERPROP' value='11'/>
+ <enumerator name='ZFS_DELEG_NOTE_MOUNT' value='12'/>
+ <enumerator name='ZFS_DELEG_NOTE_SHARE' value='13'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='14'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='15'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERUSED' value='16'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='17'/>
+ <enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='18'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='19'/>
+ <enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='20'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='21'/>
+ <enumerator name='ZFS_DELEG_NOTE_HOLD' value='22'/>
+ <enumerator name='ZFS_DELEG_NOTE_RELEASE' value='23'/>
+ <enumerator name='ZFS_DELEG_NOTE_DIFF' value='24'/>
+ <enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='25'/>
+ <enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='26'/>
+ <enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='27'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='28'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='29'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='30'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='31'/>
+ <enumerator name='ZFS_DELEG_NOTE_NONE' value='32'/>
</enum-decl>
<typedef-decl name='zfs_deleg_note_t' type-id='729d4547' id='4613c173'/>
<class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='5aa05c1f'>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_config.c b/sys/contrib/openzfs/lib/libzfs/libzfs_config.c
index 0d2102191389..9d704e4303ff 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_config.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_config.c
@@ -308,6 +308,23 @@ zpool_refresh_stats(zpool_handle_t *zhp, boolean_t *missing)
}
/*
+ * Copies the pool config and state from szhp to dzhp. szhp and dzhp must
+ * represent the same pool. Used by pool_list_refresh() to avoid another
+ * round-trip into the kernel to get stats already collected earlier in the
+ * function.
+ */
+void
+zpool_refresh_stats_from_handle(zpool_handle_t *dzhp, zpool_handle_t *szhp)
+{
+ VERIFY0(strcmp(dzhp->zpool_name, szhp->zpool_name));
+ nvlist_free(dzhp->zpool_old_config);
+ dzhp->zpool_old_config = dzhp->zpool_config;
+ dzhp->zpool_config = fnvlist_dup(szhp->zpool_config);
+ dzhp->zpool_config_size = szhp->zpool_config_size;
+ dzhp->zpool_state = szhp->zpool_state;
+}
+
+/*
* The following environment variables are undocumented
* and should be used for testing purposes only:
*
diff --git a/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi b/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
index 7464b3adb254..263cad045f7a 100644
--- a/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
+++ b/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
@@ -617,6 +617,7 @@
<array-type-def dimensions='1' type-id='de572c22' size-in-bits='1472' id='6d3c2f42'>
<subrange length='23' type-id='7359adad' id='fdd0f594'/>
</array-type-def>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<array-type-def dimensions='1' type-id='3a47d82b' size-in-bits='256' id='a133ec23'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
@@ -988,13 +989,6 @@
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/os/linux/getmntany.c' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='d315442e' size-in-bits='16' id='811205dc'>
- <subrange length='1' type-id='7359adad' id='52f813b4'/>
- </array-type-def>
- <array-type-def dimensions='1' type-id='d3130597' size-in-bits='768' id='f63f23b9'>
- <subrange length='12' type-id='7359adad' id='84827bdc'/>
- </array-type-def>
- <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
@@ -1029,93 +1023,6 @@
<var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__u16' type-id='8efea9e5' id='d315442e'/>
- <typedef-decl name='__s32' type-id='95e97e5e' id='3158a266'/>
- <typedef-decl name='__u32' type-id='f0981eeb' id='3f1a6b60'/>
- <typedef-decl name='__s64' type-id='1eb56b1e' id='49659421'/>
- <typedef-decl name='__u64' type-id='3a47d82b' id='d3130597'/>
- <class-decl name='statx_timestamp' size-in-bits='128' is-struct='yes' visibility='default' id='94101016'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='49659421' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__reserved' type-id='3158a266' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='statx' size-in-bits='2048' is-struct='yes' visibility='default' id='720b04c5'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='stx_mask' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='stx_blksize' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='stx_attributes' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='stx_nlink' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='stx_uid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='stx_gid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='stx_mode' type-id='d315442e' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='__spare0' type-id='811205dc' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='stx_ino' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='stx_size' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='stx_blocks' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='stx_attributes_mask' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='stx_atime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='stx_btime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='stx_ctime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='stx_mtime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='stx_rdev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1056'>
- <var-decl name='stx_rdev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='stx_dev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1120'>
- <var-decl name='stx_dev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='stx_mnt_id' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__spare2' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__spare3' type-id='f63f23b9' visibility='default'/>
- </data-member>
- </class-decl>
<class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' id='56fe4a37'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_fsname' type-id='26a90f95' visibility='default'/>
@@ -1191,8 +1098,6 @@
<pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
<pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
<qualified-type-def type-id='62f7a03d' restrict='yes' id='f1cadedf'/>
- <pointer-type-def type-id='720b04c5' size-in-bits='64' id='936b8e35'/>
- <qualified-type-def type-id='936b8e35' restrict='yes' id='31d265b7'/>
<function-decl name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='3cad23cd'/>
@@ -1208,14 +1113,6 @@
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
- <function-decl name='statx' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='9d26089a'/>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='f0981eeb'/>
- <parameter type-id='31d265b7'/>
- <return type-id='95e97e5e'/>
- </function-decl>
<function-decl name='stat64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='f1cadedf'/>
diff --git a/sys/contrib/openzfs/man/man8/zinject.8 b/sys/contrib/openzfs/man/man8/zinject.8
index 1d9e43aed5ec..704f6a7accd8 100644
--- a/sys/contrib/openzfs/man/man8/zinject.8
+++ b/sys/contrib/openzfs/man/man8/zinject.8
@@ -138,6 +138,20 @@ This injector is automatically cleared after the import is finished.
.
.It Xo
.Nm zinject
+.Fl E Ar delay
+.Op Fl a
+.Op Fl m
+.Op Fl f Ar freq
+.Op Fl l Ar level
+.Op Fl r Ar range
+.Op Fl T Ar iotype
+.Op Fl t Ar type Ns | Ns Fl b Ar bookmark
+.Xc
+Inject pipeline ready stage delays for the given object or bookmark.
+The delay is specified in milliseconds.
+.
+.It Xo
+.Nm zinject
.Fl I
.Op Fl s Ar seconds Ns | Ns Fl g Ar txgs
.Ar pool
diff --git a/sys/contrib/openzfs/man/man8/zpool-upgrade.8 b/sys/contrib/openzfs/man/man8/zpool-upgrade.8
index cf69060da5ce..adae47f82eb1 100644
--- a/sys/contrib/openzfs/man/man8/zpool-upgrade.8
+++ b/sys/contrib/openzfs/man/man8/zpool-upgrade.8
@@ -65,10 +65,10 @@ property).
.Cm upgrade
.Fl v
.Xc
-Displays legacy ZFS versions supported by the this version of ZFS.
+Displays legacy ZFS versions supported by this version of ZFS.
See
.Xr zpool-features 7
-for a description of feature flags features supported by this version of ZFS.
+for a description of features supported by this version of ZFS.
.It Xo
.Nm zpool
.Cm upgrade
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index bac166fcd89e..967a018640e1 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -484,7 +484,28 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = io_offset(bio, rq);
uint64_t size = io_size(bio, rq);
- int rw = io_data_dir(bio, rq);
+ int rw;
+
+ if (rq != NULL) {
+ /*
+ * Flush & trim requests go down the zvol_write codepath. Or
+ * more specifically:
+ *
+ * If request is a write, or if it's op_is_sync() and not a
+ * read, or if it's a flush, or if it's a discard, then send the
+ * request down the write path.
+ */
+ if (op_is_write(rq->cmd_flags) ||
+ (op_is_sync(rq->cmd_flags) && req_op(rq) != REQ_OP_READ) ||
+ req_op(rq) == REQ_OP_FLUSH ||
+ op_is_discard(rq->cmd_flags)) {
+ rw = WRITE;
+ } else {
+ rw = READ;
+ }
+ } else {
+ rw = bio_data_dir(bio);
+ }
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
zvol_end_io(bio, rq, SET_ERROR(ENXIO));
diff --git a/sys/contrib/openzfs/module/zcommon/zfs_prop.c b/sys/contrib/openzfs/module/zcommon/zfs_prop.c
index 864e3898b365..9190ae0362ea 100644
--- a/sys/contrib/openzfs/module/zcommon/zfs_prop.c
+++ b/sys/contrib/openzfs/module/zcommon/zfs_prop.c
@@ -364,8 +364,8 @@ zfs_prop_init(void)
static const zprop_index_t xattr_table[] = {
{ "off", ZFS_XATTR_OFF },
- { "on", ZFS_XATTR_SA },
{ "sa", ZFS_XATTR_SA },
+ { "on", ZFS_XATTR_SA },
{ "dir", ZFS_XATTR_DIR },
{ NULL }
};
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index bd6dc8edd8ca..591e2dade59e 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1392,6 +1392,7 @@ arc_get_complevel(arc_buf_t *buf)
return (buf->b_hdr->b_complevel);
}
+__maybe_unused
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index 6c150d31c669..e88d394b5229 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -2656,6 +2656,32 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
}
/*
+ * Adjust *offset to the next (or previous) block byte offset at lvl.
+ * Returns FALSE if *offset would overflow or underflow.
+ */
+static boolean_t
+dnode_next_block(dnode_t *dn, int flags, uint64_t *offset, int lvl)
+{
+ int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
+ int span = lvl * epbs + dn->dn_datablkshift;
+ uint64_t blkid, maxblkid;
+
+ if (span >= 8 * sizeof (uint64_t))
+ return (B_FALSE);
+
+ blkid = *offset >> span;
+ maxblkid = 1ULL << (8 * sizeof (*offset) - span);
+ if (!(flags & DNODE_FIND_BACKWARDS) && blkid + 1 < maxblkid)
+ *offset = (blkid + 1) << span;
+ else if ((flags & DNODE_FIND_BACKWARDS) && blkid > 0)
+ *offset = (blkid << span) - 1;
+ else
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+/*
* Find the next hole, data, or sparse region at or after *offset.
* The value 'blkfill' tells us how many items we expect to find
* in an L0 data block; this value is 1 for normal objects,
@@ -2682,7 +2708,7 @@ int
dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
int minlvl, uint64_t blkfill, uint64_t txg)
{
- uint64_t initial_offset = *offset;
+ uint64_t matched = *offset;
int lvl, maxlvl;
int error = 0;
@@ -2706,16 +2732,36 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
maxlvl = dn->dn_phys->dn_nlevels;
- for (lvl = minlvl; lvl <= maxlvl; lvl++) {
+ for (lvl = minlvl; lvl <= maxlvl; ) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
- if (error != ESRCH)
+ if (error == 0 && lvl > minlvl) {
+ --lvl;
+ matched = *offset;
+ } else if (error == ESRCH && lvl < maxlvl &&
+ dnode_next_block(dn, flags, &matched, lvl)) {
+ /*
+ * Continue search at next/prev offset in lvl+1 block.
+ *
+ * Usually we only search upwards at the start of the
+ * search as higher level blocks point at a matching
+ * minlvl block in most cases, but we backtrack if not.
+ *
+ * This can happen for txg > 0 searches if the block
+ * contains only BPs/dnodes freed at that txg. It also
+ * happens if we are still syncing out the tree, and
+ * some BP's at higher levels are not updated yet.
+ *
+ * We must adjust offset to avoid coming back to the
+ * same offset and getting stuck looping forever. This
+ * also deals with the case where offset is already at
+ * the beginning or end of the object.
+ */
+ ++lvl;
+ *offset = matched;
+ } else {
break;
- }
-
- while (error == 0 && --lvl >= minlvl) {
- error = dnode_next_offset_level(dn,
- flags, offset, lvl, blkfill, txg);
+ }
}
/*
@@ -2727,9 +2773,6 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
error = 0;
}
- if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
- initial_offset < *offset : initial_offset > *offset))
- error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);
diff --git a/sys/contrib/openzfs/module/zfs/mmp.c b/sys/contrib/openzfs/module/zfs/mmp.c
index 7db72b9b04b0..fd46127b6068 100644
--- a/sys/contrib/openzfs/module/zfs/mmp.c
+++ b/sys/contrib/openzfs/module/zfs/mmp.c
@@ -446,7 +446,7 @@ mmp_write_uberblock(spa_t *spa)
uint64_t offset;
hrtime_t lock_acquire_time = gethrtime();
- spa_config_enter_mmp(spa, SCL_STATE, mmp_tag, RW_READER);
+ spa_config_enter_priority(spa, SCL_STATE, mmp_tag, RW_READER);
lock_acquire_time = gethrtime() - lock_acquire_time;
if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "
diff --git a/sys/contrib/openzfs/module/zfs/range_tree.c b/sys/contrib/openzfs/module/zfs/range_tree.c
index ea2d2c7227c8..d73195f1a21f 100644
--- a/sys/contrib/openzfs/module/zfs/range_tree.c
+++ b/sys/contrib/openzfs/module/zfs/range_tree.c
@@ -585,7 +585,7 @@ zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
* the size, since we do not support removing partial segments
* of range trees with gaps.
*/
- zfs_zfs_rs_set_fill_raw(rs, rt, zfs_rs_get_end_raw(rs, rt) -
+ zfs_rs_set_fill_raw(rs, rt, zfs_rs_get_end_raw(rs, rt) -
zfs_rs_get_start_raw(rs, rt));
zfs_range_tree_stat_incr(rt, &rs_tmp);
diff --git a/sys/contrib/openzfs/module/zfs/spa_config.c b/sys/contrib/openzfs/module/zfs/spa_config.c
index cf28955b0c50..f615591e826b 100644
--- a/sys/contrib/openzfs/module/zfs/spa_config.c
+++ b/sys/contrib/openzfs/module/zfs/spa_config.c
@@ -372,6 +372,8 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, spa->spa_errata);
+ fnvlist_add_uint64(config, ZPOOL_CONFIG_MIN_ALLOC, spa->spa_min_alloc);
+ fnvlist_add_uint64(config, ZPOOL_CONFIG_MAX_ALLOC, spa->spa_max_alloc);
if (spa->spa_comment != NULL)
fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT,
spa->spa_comment);
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 6f7c060f97f8..0bead6d49666 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -510,7 +510,7 @@ spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
static void
spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
- int mmp_flag)
+ int priority_flag)
{
(void) tag;
int wlocks_held = 0;
@@ -526,7 +526,7 @@ spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer ||
- (!mmp_flag && scl->scl_write_wanted)) {
+ (!priority_flag && scl->scl_write_wanted)) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
@@ -551,7 +551,7 @@ spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
}
/*
- * The spa_config_enter_mmp() allows the mmp thread to cut in front of
+ * The spa_config_enter_priority() allows the mmp thread to cut in front of
* outstanding write lock requests. This is needed since the mmp updates are
* time sensitive and failure to service them promptly will result in a
* suspended pool. This pool suspension has been seen in practice when there is
@@ -560,7 +560,7 @@ spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
*/
void
-spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw)
+spa_config_enter_priority(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 1);
}
@@ -806,6 +806,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
spa->spa_min_ashift = INT_MAX;
spa->spa_max_ashift = 0;
spa->spa_min_alloc = INT_MAX;
+ spa->spa_max_alloc = 0;
spa->spa_gcd_alloc = INT_MAX;
/* Reset cached value */
@@ -1865,6 +1866,19 @@ spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
}
/*
+ * Return the range of minimum allocation sizes for the normal allocation
+ * class. This can be used by external consumers of the DMU to estimate
+ * potential wasted capacity when setting the recordsize for an object.
+ * This is mainly for dRAID pools which always pad to a full stripe width.
+ */
+void
+spa_get_min_alloc_range(spa_t *spa, uint64_t *min_alloc, uint64_t *max_alloc)
+{
+ *min_alloc = spa->spa_min_alloc;
+ *max_alloc = spa->spa_max_alloc;
+}
+
+/*
* Return the amount of slop space in bytes. It is typically 1/32 of the pool
* (3.2%), minus the embedded log space. On very small pools, it may be
* slightly larger than this. On very large pools, it will be capped to
@@ -3085,6 +3099,7 @@ EXPORT_SYMBOL(spa_version);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
+EXPORT_SYMBOL(spa_get_min_alloc_range); /* for Lustre */
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index fc6d445f9785..654e034de9e1 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -1497,12 +1497,14 @@ vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
{
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
- if (spa->spa_gcd_alloc == INT_MAX) {
+
+ if (min_alloc > spa->spa_max_alloc)
+ spa->spa_max_alloc = min_alloc;
+
+ if (spa->spa_gcd_alloc == INT_MAX)
spa->spa_gcd_alloc = min_alloc;
- } else {
- spa->spa_gcd_alloc = vdev_gcd(min_alloc,
- spa->spa_gcd_alloc);
- }
+ else
+ spa->spa_gcd_alloc = vdev_gcd(min_alloc, spa->spa_gcd_alloc);
}
void
@@ -1560,8 +1562,7 @@ vdev_metaslab_group_create(vdev_t *vd)
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
- uint64_t min_alloc = vdev_get_min_alloc(vd);
- vdev_spa_set_alloc(spa, min_alloc);
+ vdev_spa_set_alloc(spa, vdev_get_min_alloc(vd));
}
}
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev_label.c b/sys/contrib/openzfs/module/zfs/vdev_label.c
index c44f654b0261..0d4fdaa77ba0 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_label.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_label.c
@@ -511,6 +511,8 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
vd->vdev_asize);
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_MIN_ALLOC,
+ vdev_get_min_alloc(vd));
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
if (vd->vdev_noalloc) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 4cf8912d4269..aeea58bedfe4 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -4574,8 +4574,29 @@ zio_vdev_io_start(zio_t *zio)
ASSERT0(zio->io_child_error[ZIO_CHILD_VDEV]);
if (vd == NULL) {
- if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
- spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
+ if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) {
+ /*
+ * A deadlock workaround. The ddt_prune_unique_entries()
+ * -> prune_candidates_sync() code path takes the
+ * SCL_ZIO reader lock and may request it again here.
+ * If there is another thread who wants the SCL_ZIO
+ * writer lock, then scl_write_wanted will be set.
+ * Thus, the spa_config_enter_priority() is used to
+ * ignore pending writer requests.
+ *
+ * The locking should be revised to remove the need
+ * for this workaround. If that's not workable then
+ * it should only be applied to the zios involved in
+ * the pruning process. This impacts the read/write
+ * I/O balance while pruning.
+ */
+ if (spa->spa_active_ddt_prune)
+ spa_config_enter_priority(spa, SCL_ZIO, zio,
+ RW_READER);
+ else
+ spa_config_enter(spa, SCL_ZIO, zio,
+ RW_READER);
+ }
/*
* The mirror_ops handle multiple DVAs in a single BP.
@@ -5305,6 +5326,16 @@ zio_ready(zio_t *zio)
return (NULL);
}
+ if (zio_injection_enabled) {
+ hrtime_t target = zio_handle_ready_delay(zio);
+ if (target != 0 && zio->io_target_timestamp == 0) {
+ zio->io_stage >>= 1;
+ zio->io_target_timestamp = target;
+ zio_delay_interrupt(zio);
+ return (NULL);
+ }
+ }
+
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(BP_GET_BIRTH(bp) == zio->io_txg ||
diff --git a/sys/contrib/openzfs/module/zfs/zio_inject.c b/sys/contrib/openzfs/module/zfs/zio_inject.c
index 981a1be4847c..287577018ed1 100644
--- a/sys/contrib/openzfs/module/zfs/zio_inject.c
+++ b/sys/contrib/openzfs/module/zfs/zio_inject.c
@@ -827,6 +827,44 @@ zio_handle_export_delay(spa_t *spa, hrtime_t elapsed)
zio_handle_pool_delay(spa, elapsed, ZINJECT_DELAY_EXPORT);
}
+/*
+ * For testing, inject a delay before ready state.
+ */
+hrtime_t
+zio_handle_ready_delay(zio_t *zio)
+{
+ inject_handler_t *handler;
+ hrtime_t now = gethrtime();
+ hrtime_t target = 0;
+
+ /*
+ * Ignore I/O not associated with any logical data.
+ */
+ if (zio->io_logical == NULL)
+ return (0);
+
+ rw_enter(&inject_lock, RW_READER);
+
+ for (handler = list_head(&inject_handlers); handler != NULL;
+ handler = list_next(&inject_handlers, handler)) {
+ if (zio->io_spa != handler->zi_spa ||
+ handler->zi_record.zi_cmd != ZINJECT_DELAY_READY)
+ continue;
+
+ /* If this handler matches, inject the delay */
+ if (zio_match_iotype(zio, handler->zi_record.zi_iotype) &&
+ zio_match_handler(&zio->io_logical->io_bookmark,
+ zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
+ zio_match_dva(zio), &handler->zi_record, zio->io_error)) {
+ target = now + (hrtime_t)handler->zi_record.zi_timer;
+ break;
+ }
+ }
+
+ rw_exit(&inject_lock);
+ return (target);
+}
+
static int
zio_calculate_range(const char *pool, zinject_record_t *record)
{
diff --git a/sys/contrib/openzfs/module/zstd/zfs_zstd.c b/sys/contrib/openzfs/module/zstd/zfs_zstd.c
index 3db196953f74..c403c001086a 100644
--- a/sys/contrib/openzfs/module/zstd/zfs_zstd.c
+++ b/sys/contrib/openzfs/module/zstd/zfs_zstd.c
@@ -441,64 +441,6 @@ zstd_enum_to_level(enum zio_zstd_levels level, int16_t *zstd_level)
}
#ifndef IN_LIBSA
-static size_t
-zfs_zstd_compress_wrap(void *s_start, void *d_start, size_t s_len, size_t d_len,
- int level)
-{
- int16_t zstd_level;
- if (zstd_enum_to_level(level, &zstd_level)) {
- ZSTDSTAT_BUMP(zstd_stat_com_inval);
- return (s_len);
- }
- /*
- * A zstd early abort heuristic.
- *
- * - Zeroth, if this is <= zstd-3, or < zstd_abort_size (currently
- * 128k), don't try any of this, just go.
- * (because experimentally that was a reasonable cutoff for a perf win
- * with tiny ratio change)
- * - First, we try LZ4 compression, and if it doesn't early abort, we
- * jump directly to whatever compression level we intended to try.
- * - Second, we try zstd-1 - if that errors out (usually, but not
- * exclusively, if it would overflow), we give up early.
- *
- * If it works, instead we go on and compress anyway.
- *
- * Why two passes? LZ4 alone gets you a lot of the way, but on highly
- * compressible data, it was losing up to 8.5% of the compressed
- * savings versus no early abort, and all the zstd-fast levels are
- * worse indications on their own than LZ4, and don't improve the LZ4
- * pass noticably if stacked like this.
- */
- size_t actual_abort_size = zstd_abort_size;
- if (zstd_earlyabort_pass > 0 && zstd_level >= zstd_cutoff_level &&
- s_len >= actual_abort_size) {
- int pass_len = 1;
- pass_len = zfs_lz4_compress(s_start, d_start, s_len, d_len, 0);
- if (pass_len < d_len) {
- ZSTDSTAT_BUMP(zstd_stat_lz4pass_allowed);
- goto keep_trying;
- }
- ZSTDSTAT_BUMP(zstd_stat_lz4pass_rejected);
-
- pass_len = zfs_zstd_compress(s_start, d_start, s_len, d_len,
- ZIO_ZSTD_LEVEL_1);
- if (pass_len == s_len || pass_len <= 0 || pass_len > d_len) {
- ZSTDSTAT_BUMP(zstd_stat_zstdpass_rejected);
- return (s_len);
- }
- ZSTDSTAT_BUMP(zstd_stat_zstdpass_allowed);
- } else {
- ZSTDSTAT_BUMP(zstd_stat_passignored);
- if (s_len < actual_abort_size) {
- ZSTDSTAT_BUMP(zstd_stat_passignored_size);
- }
- }
-keep_trying:
- return (zfs_zstd_compress(s_start, d_start, s_len, d_len, level));
-
-}
-
/* Compress block using zstd */
static size_t
zfs_zstd_compress_impl(void *s_start, void *d_start, size_t s_len, size_t d_len,
diff --git a/sys/contrib/openzfs/scripts/zfs-tests.sh b/sys/contrib/openzfs/scripts/zfs-tests.sh
index 04f3b6f32cb8..5a0a1a609448 100755
--- a/sys/contrib/openzfs/scripts/zfs-tests.sh
+++ b/sys/contrib/openzfs/scripts/zfs-tests.sh
@@ -38,6 +38,7 @@ DEBUG=""
CLEANUP="yes"
CLEANUPALL="no"
KMSG=""
+TIMEOUT_DEBUG=""
LOOPBACK="yes"
STACK_TRACER="no"
FILESIZE="4G"
@@ -364,6 +365,7 @@ OPTIONS:
-k Disable cleanup after test failure
-K Log test names to /dev/kmsg
-f Use files only, disables block device tests
+ -O Dump debugging info to /dev/kmsg on test timeout
-S Enable stack tracer (negative performance impact)
-c Only create and populate constrained path
-R Automatically rerun failing tests
@@ -402,7 +404,7 @@ $0 -x
EOF
}
-while getopts 'hvqxkKfScRmn:d:Ds:r:?t:T:u:I:' OPTION; do
+while getopts 'hvqxkKfScRmOn:d:Ds:r:?t:T:u:I:' OPTION; do
case $OPTION in
h)
usage
@@ -445,6 +447,9 @@ while getopts 'hvqxkKfScRmn:d:Ds:r:?t:T:u:I:' OPTION; do
export NFS=1
. "$nfsfile"
;;
+ O)
+ TIMEOUT_DEBUG="yes"
+ ;;
d)
FILEDIR="$OPTARG"
;;
@@ -773,6 +778,7 @@ msg "${TEST_RUNNER}" \
"${DEBUG:+-D}" \
"${KMEMLEAK:+-m}" \
"${KMSG:+-K}" \
+ "${TIMEOUT_DEBUG:+-O}" \
"-c \"${RUNFILES}\"" \
"-T \"${TAGS}\"" \
"-i \"${STF_SUITE}\"" \
@@ -783,6 +789,7 @@ msg "${TEST_RUNNER}" \
${DEBUG:+-D} \
${KMEMLEAK:+-m} \
${KMSG:+-K} \
+ ${TIMEOUT_DEBUG:+-O} \
-c "${RUNFILES}" \
-T "${TAGS}" \
-i "${STF_SUITE}" \
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index 2b002830c82f..9f531411fbe1 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -168,10 +168,10 @@ tags = ['functional', 'cli_root', 'zinject']
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
- 'zdb_display_block', 'zdb_encrypted', 'zdb_label_checksum',
- 'zdb_object_range_neg', 'zdb_object_range_pos', 'zdb_objset_id',
- 'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2', 'zdb_backup',
- 'zdb_tunables']
+ 'zdb_display_block', 'zdb_encrypted', 'zdb_encrypted_raw',
+ 'zdb_label_checksum', 'zdb_object_range_neg', 'zdb_object_range_pos',
+ 'zdb_objset_id', 'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2',
+ 'zdb_backup', 'zdb_tunables']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
@@ -395,8 +395,9 @@ tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
- 'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
- 'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
+ 'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_warn_create',
+ 'zpool_add_warn_degraded', 'zpool_add_warn_removal', 'add-o_ashift',
+ 'add_prop_ashift', 'zpool_add_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
@@ -490,6 +491,10 @@ tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
tags = ['functional', 'cli_root', 'zpool_import']
timeout = 1200
+[tests/functional/cli_root/zpool_iostat]
+tests = ['zpool_iostat_interval_all', 'zpool_iostat_interval_some']
+tags = ['functional', 'cli_root', 'zpool_iostat']
+
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
@@ -1085,7 +1090,8 @@ tags = ['functional', 'write_dirs']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
- 'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos', 'xattr_compat']
+ 'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos', 'xattr_014_pos',
+ 'xattr_compat']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
diff --git a/sys/contrib/openzfs/tests/runfiles/sanity.run b/sys/contrib/openzfs/tests/runfiles/sanity.run
index b56ffc3a4a2d..249b415029c4 100644
--- a/sys/contrib/openzfs/tests/runfiles/sanity.run
+++ b/sys/contrib/openzfs/tests/runfiles/sanity.run
@@ -622,7 +622,7 @@ tags = ['functional', 'vdev_zaps']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
- 'xattr_011_pos', 'xattr_013_pos', 'xattr_compat']
+ 'xattr_011_pos', 'xattr_013_pos', 'xattr_014_pos', 'xattr_compat']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in b/sys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in
index 2158208be6e5..d2c1185e4a94 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in
@@ -34,6 +34,7 @@ from select import select
from subprocess import PIPE
from subprocess import Popen
from subprocess import check_output
+from subprocess import run
from threading import Timer
from time import time, CLOCK_MONOTONIC
from os.path import exists
@@ -187,6 +188,63 @@ User: %s
''' % (self.pathname, self.identifier, self.outputdir, self.timeout, self.user)
def kill_cmd(self, proc, options, kmemleak, keyboard_interrupt=False):
+
+ """
+ We're about to kill a command due to a timeout.
+ If we're running with the -O option, then dump debug info about the
+ process with the highest CPU usage to /dev/kmsg (Linux only). This can
+ help debug the timeout.
+
+ Debug info includes:
+ - 30 lines from 'top'
+ - /proc/<PID>/stack output of process with highest CPU usage
+ - Last lines strace-ing process with highest CPU usage
+ """
+ if exists("/dev/kmsg"):
+ c = """
+TOP_OUT="$(COLUMNS=160 top -b -n 1 | head -n 30)"
+read -r PID CMD <<< $(echo "$TOP_OUT" | /usr/bin/awk \
+"/COMMAND/{
+ print_next=1
+ next
+}
+{
+ if (print_next == 1) {
+ print \\$1\\" \\"\\$12
+ exit
+ }
+}")
+echo "##### ZTS timeout debug #####"
+echo "----- top -----"
+echo "$TOP_OUT"
+echo "----- /proc/$PID/stack ($CMD)) -----"
+cat /proc/$PID/stack
+echo "----- strace ($CMD) -----"
+TMPFILE="$(mktemp --suffix=ZTS)"
+/usr/bin/strace -k --stack-traces -p $PID &> "$TMPFILE" &
+sleep 0.1
+killall strace
+tail -n 30 $TMPFILE
+rm "$TMPFILE"
+echo "##### /proc/sysrq-trigger stack #####"
+"""
+ c = "sudo bash -c '" + c + "'"
+ data = run(c, capture_output=True, shell=True, text=True)
+ out = data.stdout
+ try:
+ kp = Popen([SUDO, "sh", "-c",
+ "echo '" + out + "' > /dev/kmsg"])
+ kp.wait()
+
+ """
+ Trigger kernel stack traces
+ """
+ kp = Popen([SUDO, "sh", "-c",
+ "echo l > /proc/sysrq-trigger"])
+ kp.wait()
+ except Exception:
+ pass
+
"""
Kill a running command due to timeout, or ^C from the keyboard. If
sudo is required, this user was verified previously.
@@ -1129,6 +1187,9 @@ def parse_args():
parser.add_option('-o', action='callback', callback=options_cb,
default=BASEDIR, dest='outputdir', type='string',
metavar='outputdir', help='Specify an output directory.')
+ parser.add_option('-O', action='store_true', default=False,
+ dest='timeout_debug',
+ help='Dump debugging info to /dev/kmsg on test timeout')
parser.add_option('-i', action='callback', callback=options_cb,
default=TESTDIR, dest='testdir', type='string',
metavar='testdir', help='Specify a test directory.')
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
index 1517f90e99a5..678c01b58f94 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
@@ -197,6 +197,7 @@ nobase_dist_datadir_zfs_tests_tests_DATA += \
functional/cli_root/zpool_import/blockfiles/unclean_export.dat.bz2 \
functional/cli_root/zpool_import/zpool_import.cfg \
functional/cli_root/zpool_import/zpool_import.kshlib \
+ functional/cli_root/zpool_iostat/zpool_iostat.kshlib \
functional/cli_root/zpool_initialize/zpool_initialize.kshlib \
functional/cli_root/zpool_labelclear/labelclear.cfg \
functional/cli_root/zpool_remove/zpool_remove.cfg \
@@ -640,6 +641,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zdb/zdb_decompress_zstd.ksh \
functional/cli_root/zdb/zdb_display_block.ksh \
functional/cli_root/zdb/zdb_encrypted.ksh \
+ functional/cli_root/zdb/zdb_encrypted_raw.ksh \
functional/cli_root/zdb/zdb_label_checksum.ksh \
functional/cli_root/zdb/zdb_object_range_neg.ksh \
functional/cli_root/zdb/zdb_object_range_pos.ksh \
@@ -1027,7 +1029,9 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zpool_add/zpool_add_007_neg.ksh \
functional/cli_root/zpool_add/zpool_add_008_neg.ksh \
functional/cli_root/zpool_add/zpool_add_009_neg.ksh \
- functional/cli_root/zpool_add/zpool_add_010_pos.ksh \
+ functional/cli_root/zpool_add/zpool_add_warn_create.ksh \
+ functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh \
+ functional/cli_root/zpool_add/zpool_add_warn_removal.ksh \
functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh \
functional/cli_root/zpool_attach/attach-o_ashift.ksh \
functional/cli_root/zpool_attach/cleanup.ksh \
@@ -1178,6 +1182,10 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zpool_import/zpool_import_parallel_admin.ksh \
functional/cli_root/zpool_import/zpool_import_parallel_neg.ksh \
functional/cli_root/zpool_import/zpool_import_parallel_pos.ksh \
+ functional/cli_root/zpool_iostat/setup.ksh \
+ functional/cli_root/zpool_iostat/cleanup.ksh \
+ functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh \
+ functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh \
functional/cli_root/zpool_initialize/cleanup.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_attach_detach_add_remove.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_fault_export_import_online.ksh \
@@ -2226,6 +2234,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/xattr/xattr_011_pos.ksh \
functional/xattr/xattr_012_pos.ksh \
functional/xattr/xattr_013_pos.ksh \
+ functional/xattr/xattr_014_pos.ksh \
functional/xattr/xattr_compat.ksh \
functional/zap_shrink/cleanup.ksh \
functional/zap_shrink/zap_shrink_001_pos.ksh \
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_encrypted_raw.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_encrypted_raw.ksh
new file mode 100755
index 000000000000..85d267d5402f
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_encrypted_raw.ksh
@@ -0,0 +1,75 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
+
+#
+# DESCRIPTION:
+# 'zdb -K ...' should enable reading from a raw-encrypted dataset
+#
+# STRATEGY:
+# 1. Create an encrypted dataset
+# 2. Write some data to a file
+# 3. Run zdb -dddd on the file, confirm it can't be read
+# 4. Run zdb -K ... -ddddd on the file, confirm it can be read
+#
+
+verify_runnable "both"
+
+dataset="$TESTPOOL/$TESTFS2"
+file="$TESTDIR2/somefile"
+keyfile="$TEST_BASE_DIR/keyfile"
+
+function cleanup
+{
+ datasetexists "$dataset" && destroy_dataset "$dataset" -f
+ rm -f "$keyfile"
+ default_cleanup_noexit
+}
+
+log_onexit cleanup
+
+log_must default_setup_noexit $DISKS
+
+log_assert "'zdb -K' should enable reading from a raw-encrypted dataset"
+
+# The key must be 32 bytes long.
+echo -n "$RAWKEY" > "$keyfile"
+
+log_must zfs create -o mountpoint="$TESTDIR2" \
+ -o encryption=on -o keyformat=raw -o keylocation="file://$keyfile" \
+ "$dataset"
+
+echo 'my great encrypted text' > "$file"
+
+typeset -i obj=$(ls -i "$file" | cut -d' ' -f1)
+typeset -i size=$(wc -c < "$file")
+
+log_note "test file $file is objid $obj, size $size"
+
+sync_pool "$TESTPOOL" true
+
+log_must eval "zdb -dddd $dataset $obj | grep -q 'object encrypted'"
+
+log_must eval "zdb -K $keyfile -dddd $dataset $obj | grep -q 'size\s$size$'"
+
+log_pass "'zdb -K' enables reading from a raw-encrypted dataset"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib
index 091d65bb4f33..74780bb02141 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib
@@ -27,6 +27,7 @@
#
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2025 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
@@ -89,3 +90,44 @@ function save_dump_dev
fi
echo $dumpdev
}
+
+function zpool_create_add_setup
+{
+ typeset -i i=0
+
+ while ((i < 10)); do
+ log_must truncate -s $MINVDEVSIZE $TEST_BASE_DIR/vdev$i
+
+ eval vdev$i=$TEST_BASE_DIR/vdev$i
+ ((i += 1))
+ done
+
+ if is_linux; then
+ vdev_lo="$(losetup -f "$vdev4" --show)"
+ elif is_freebsd; then
+ vdev_lo=/dev/"$(mdconfig -a -t vnode -f "$vdev4")"
+ else
+ vdev_lo="$(lofiadm -a "$vdev4")"
+ fi
+}
+
+function zpool_create_add_cleanup
+{
+ datasetexists $TESTPOOL1 && destroy_pool $TESTPOOL1
+
+ if [[ -e $vdev_lo ]]; then
+ if is_linux; then
+ log_must losetup -d "$vdev_lo"
+ elif is_freebsd; then
+ log_must mdconfig -d -u "$vdev_lo"
+ else
+ log_must lofiadm -d "$vdev_lo"
+ fi
+ fi
+
+ typeset -i i=0
+ while ((i < 10)); do
+ rm -f $TEST_BASE_DIR/vdev$i
+ ((i += 1))
+ done
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_create.ksh
index df085a2ec746..661e55998d8d 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_create.ksh
@@ -23,67 +23,51 @@
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
-# Use is subject to license terms.
-#
-
-#
-# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2025 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_add/zpool_add.kshlib
#
# DESCRIPTION:
-# Verify zpool add succeed when adding vdevs with matching redundancy.
+# Verify zpool add succeeds when adding vdevs with matching redundancy
+# and warns with differing redundancy for a healthy pool.
#
# STRATEGY:
# 1. Create several files == $MINVDEVSIZE.
# 2. Verify 'zpool add' succeeds with matching redundancy.
# 3. Verify 'zpool add' warns with differing redundancy.
-# 4. Verify 'zpool add' warns with differing redundancy after removal.
#
verify_runnable "global"
-function cleanup
-{
- datasetexists $TESTPOOL1 && destroy_pool $TESTPOOL1
-
- typeset -i i=0
- while ((i < 10)); do
- rm -f $TEST_BASE_DIR/vdev$i
- ((i += 1))
- done
-}
-
+log_assert "Verify 'zpool add' warns for differing redundancy."
+log_onexit zpool_create_add_cleanup
-log_assert "Verify 'zpool add' succeed with keywords combination."
-log_onexit cleanup
+zpool_create_add_setup
-# 1. Create several files == $MINVDEVSIZE.
typeset -i i=0
-while ((i < 10)); do
- log_must truncate -s $MINVDEVSIZE $TEST_BASE_DIR/vdev$i
-
- eval vdev$i=$TEST_BASE_DIR/vdev$i
- ((i += 1))
-done
+typeset -i j=0
set -A redundancy0_create_args \
"$vdev0"
set -A redundancy1_create_args \
"mirror $vdev0 $vdev1" \
- "raidz1 $vdev0 $vdev1"
+ "raidz1 $vdev0 $vdev1" \
+ "draid1:1s $vdev0 $vdev1 $vdev9"
set -A redundancy2_create_args \
"mirror $vdev0 $vdev1 $vdev2" \
- "raidz2 $vdev0 $vdev1 $vdev2"
+ "raidz2 $vdev0 $vdev1 $vdev2" \
+ "draid2:1s $vdev0 $vdev1 $vdev2 $vdev9"
set -A redundancy3_create_args \
"mirror $vdev0 $vdev1 $vdev2 $vdev3" \
- "raidz3 $vdev0 $vdev1 $vdev2 $vdev3"
+ "raidz3 $vdev0 $vdev1 $vdev2 $vdev3" \
+ "draid3:1s $vdev0 $vdev1 $vdev2 $vdev3 $vdev9"
set -A redundancy0_add_args \
"$vdev5" \
@@ -93,21 +77,19 @@ set -A redundancy1_add_args \
"mirror $vdev5 $vdev6" \
"raidz1 $vdev5 $vdev6" \
"raidz1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
- "mirror $vdev5 $vdev6 raidz1 $vdev7 $vdev8"
+ "mirror $vdev5 $vdev6 raidz1 $vdev7 $vdev8" \
+ "draid1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 draid1 $vdev7 $vdev8"
set -A redundancy2_add_args \
"mirror $vdev5 $vdev6 $vdev7" \
- "raidz2 $vdev5 $vdev6 $vdev7"
+ "raidz2 $vdev5 $vdev6 $vdev7" \
+ "draid2 $vdev5 $vdev6 $vdev7"
set -A redundancy3_add_args \
"mirror $vdev5 $vdev6 $vdev7 $vdev8" \
- "raidz3 $vdev5 $vdev6 $vdev7 $vdev8"
-
-set -A log_args "log" "$vdev4"
-set -A cache_args "cache" "$vdev4"
-set -A spare_args "spare" "$vdev4"
-
-typeset -i j=0
+ "raidz3 $vdev5 $vdev6 $vdev7 $vdev8" \
+ "draid3 $vdev5 $vdev6 $vdev7 $vdev8"
function zpool_create_add
{
@@ -148,30 +130,6 @@ function zpool_create_forced_add
done
}
-function zpool_create_rm_add
-{
- typeset -n create_args=$1
- typeset -n add_args=$2
- typeset -n rm_args=$3
-
- i=0
- while ((i < ${#create_args[@]})); do
- j=0
- while ((j < ${#add_args[@]})); do
- log_must zpool create $TESTPOOL1 ${create_args[$i]}
- log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
- log_must zpool add $TESTPOOL1 ${add_args[$j]}
- log_must zpool remove $TESTPOOL1 ${rm_args[1]}
- log_mustnot zpool add $TESTPOOL1 ${rm_args[1]}
- log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
- log_must zpool destroy -f $TESTPOOL1
-
- ((j += 1))
- done
- ((i += 1))
- done
-}
-
# 2. Verify 'zpool add' succeeds with matching redundancy.
zpool_create_add redundancy0_create_args redundancy0_add_args
zpool_create_add redundancy1_create_args redundancy1_add_args
@@ -195,17 +153,4 @@ zpool_create_forced_add redundancy3_create_args redundancy0_add_args
zpool_create_forced_add redundancy3_create_args redundancy1_add_args
zpool_create_forced_add redundancy3_create_args redundancy2_add_args
-# 4. Verify 'zpool add' warns with differing redundancy after removal.
-zpool_create_rm_add redundancy1_create_args redundancy1_add_args log_args
-zpool_create_rm_add redundancy2_create_args redundancy2_add_args log_args
-zpool_create_rm_add redundancy3_create_args redundancy3_add_args log_args
-
-zpool_create_rm_add redundancy1_create_args redundancy1_add_args cache_args
-zpool_create_rm_add redundancy2_create_args redundancy2_add_args cache_args
-zpool_create_rm_add redundancy3_create_args redundancy3_add_args cache_args
-
-zpool_create_rm_add redundancy1_create_args redundancy1_add_args spare_args
-zpool_create_rm_add redundancy2_create_args redundancy2_add_args spare_args
-zpool_create_rm_add redundancy3_create_args redundancy3_add_args spare_args
-
-log_pass "'zpool add' succeed with keywords combination."
+log_pass "Verify 'zpool add' warns for differing redundancy."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh
new file mode 100755
index 000000000000..313eb3666f27
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh
@@ -0,0 +1,204 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2025 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_add/zpool_add.kshlib
+
+#
+# DESCRIPTION:
+# Verify zpool add succeeds when adding vdevs with matching redundancy
+# and warns with differing redundancy for a degraded pool.
+#
+# STRATEGY:
+# 1. Create several files == $MINVDEVSIZE.
+# 2. Verify 'zpool add' succeeds with matching redundancy
+# 3. Verify 'zpool add' warns with differing redundancy when
+# a. Degraded pool with replaced mismatch vdev (file vs disk)
+# b. Degraded pool dRAID distributed spare active
+# c. Degraded pool hot spare active
+#
+
+verify_runnable "global"
+
+log_assert "Verify 'zpool add' warns for differing redundancy."
+log_onexit zpool_create_add_cleanup
+
+zpool_create_add_setup
+
+set -A redundancy1_create_args \
+ "mirror $vdev0 $vdev1" \
+ "raidz1 $vdev0 $vdev1" \
+ "draid1:1s $vdev0 $vdev1 $vdev9"
+
+set -A redundancy2_create_args \
+ "mirror $vdev0 $vdev1 $vdev2" \
+ "raidz2 $vdev0 $vdev1 $vdev2" \
+ "draid2:1s $vdev0 $vdev1 $vdev2 $vdev9"
+
+set -A redundancy3_create_args \
+ "mirror $vdev0 $vdev1 $vdev2 $vdev3" \
+ "raidz3 $vdev0 $vdev1 $vdev2 $vdev3" \
+ "draid3:1s $vdev0 $vdev1 $vdev2 $vdev3 $vdev9"
+
+set -A redundancy1_add_args \
+ "mirror $vdev5 $vdev6" \
+ "raidz1 $vdev5 $vdev6" \
+ "raidz1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 raidz1 $vdev7 $vdev8" \
+ "draid1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 draid1 $vdev7 $vdev8"
+
+set -A redundancy2_add_args \
+ "mirror $vdev5 $vdev6 $vdev7" \
+ "raidz2 $vdev5 $vdev6 $vdev7" \
+ "draid2 $vdev5 $vdev6 $vdev7"
+
+set -A redundancy3_add_args \
+ "mirror $vdev5 $vdev6 $vdev7 $vdev8" \
+ "raidz3 $vdev5 $vdev6 $vdev7 $vdev8" \
+ "draid3 $vdev5 $vdev6 $vdev7 $vdev8"
+
+set -A redundancy1_create_draid_args \
+ "draid1:1s $vdev0 $vdev1 $vdev2"
+
+set -A redundancy2_create_draid_args \
+ "draid2:1s $vdev0 $vdev1 $vdev2 $vdev3"
+
+set -A redundancy3_create_draid_args \
+ "draid3:1s $vdev0 $vdev1 $vdev2 $vdev3 $vdev9"
+
+set -A redundancy1_create_spare_args \
+ "mirror $vdev0 $vdev1 spare $vdev_lo" \
+ "raidz1 $vdev0 $vdev1 spare $vdev_lo" \
+ "draid1 $vdev0 $vdev1 spare $vdev_lo"
+
+set -A redundancy2_create_spare_args \
+ "mirror $vdev0 $vdev1 $vdev2 spare $vdev_lo" \
+ "raidz2 $vdev0 $vdev1 $vdev2 spare $vdev_lo" \
+ "draid2 $vdev0 $vdev1 $vdev2 spare $vdev_lo"
+
+set -A redundancy3_create_spare_args \
+ "mirror $vdev0 $vdev1 $vdev2 $vdev3 spare $vdev_lo" \
+ "raidz3 $vdev0 $vdev1 $vdev2 $vdev3 spare $vdev_lo" \
+ "draid3 $vdev0 $vdev1 $vdev2 $vdev3 spare $vdev_lo"
+
+set -A replace_args "$vdev1" "$vdev_lo"
+set -A draid1_args "$vdev1" "draid1-0-0"
+set -A draid2_args "$vdev1" "draid2-0-0"
+set -A draid3_args "$vdev1" "draid3-0-0"
+
+typeset -i i=0
+typeset -i j=0
+
+function zpool_create_degraded_add
+{
+ typeset -n create_args=$1
+ typeset -n add_args=$2
+ typeset -n rm_args=$3
+
+ i=0
+ while ((i < ${#create_args[@]})); do
+ j=0
+ while ((j < ${#add_args[@]})); do
+ log_must zpool create $TESTPOOL1 ${create_args[$i]}
+ log_must zpool offline -f $TESTPOOL1 ${rm_args[0]}
+ log_must zpool replace -w $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
+ log_must zpool add $TESTPOOL1 ${add_args[$j]}
+ log_must zpool destroy -f $TESTPOOL1
+ log_must zpool labelclear -f ${rm_args[0]}
+
+ ((j += 1))
+ done
+ ((i += 1))
+ done
+}
+
+function zpool_create_forced_degraded_add
+{
+ typeset -n create_args=$1
+ typeset -n add_args=$2
+ typeset -n rm_args=$3
+
+ i=0
+ while ((i < ${#create_args[@]})); do
+ j=0
+ while ((j < ${#add_args[@]})); do
+ log_must zpool create $TESTPOOL1 ${create_args[$i]}
+ log_must zpool offline -f $TESTPOOL1 ${rm_args[0]}
+ log_must zpool replace -w $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
+ log_mustnot zpool add $TESTPOOL1 ${add_args[$j]}
+ log_must zpool add --allow-replication-mismatch $TESTPOOL1 ${add_args[$j]}
+ log_must zpool destroy -f $TESTPOOL1
+ log_must zpool labelclear -f ${rm_args[0]}
+
+ ((j += 1))
+ done
+ ((i += 1))
+ done
+}
+
+# 2. Verify 'zpool add' succeeds with matching redundancy and a degraded pool.
+zpool_create_degraded_add redundancy1_create_args redundancy1_add_args replace_args
+zpool_create_degraded_add redundancy2_create_args redundancy2_add_args replace_args
+zpool_create_degraded_add redundancy3_create_args redundancy3_add_args replace_args
+
+# 3. Verify 'zpool add' warns with differing redundancy and a degraded pool.
+#
+# a. Degraded pool with replaced mismatch vdev (file vs disk)
+zpool_create_forced_degraded_add redundancy1_create_args redundancy2_add_args replace_args
+zpool_create_forced_degraded_add redundancy1_create_args redundancy3_add_args replace_args
+
+zpool_create_forced_degraded_add redundancy2_create_args redundancy1_add_args replace_args
+zpool_create_forced_degraded_add redundancy2_create_args redundancy3_add_args replace_args
+
+zpool_create_forced_degraded_add redundancy3_create_args redundancy1_add_args replace_args
+zpool_create_forced_degraded_add redundancy3_create_args redundancy2_add_args replace_args
+
+# b. Degraded pool dRAID distributed spare active
+
+zpool_create_forced_degraded_add redundancy1_create_draid_args redundancy2_add_args draid1_args
+zpool_create_forced_degraded_add redundancy1_create_draid_args redundancy3_add_args draid1_args
+
+zpool_create_forced_degraded_add redundancy2_create_draid_args redundancy1_add_args draid2_args
+zpool_create_forced_degraded_add redundancy2_create_draid_args redundancy3_add_args draid2_args
+
+zpool_create_forced_degraded_add redundancy3_create_draid_args redundancy1_add_args draid3_args
+zpool_create_forced_degraded_add redundancy3_create_draid_args redundancy2_add_args draid3_args
+
+# c. Degraded pool hot spare active
+zpool_create_forced_degraded_add redundancy1_create_spare_args redundancy2_add_args replace_args
+zpool_create_forced_degraded_add redundancy1_create_spare_args redundancy3_add_args replace_args
+
+zpool_create_forced_degraded_add redundancy2_create_spare_args redundancy1_add_args replace_args
+zpool_create_forced_degraded_add redundancy2_create_spare_args redundancy3_add_args replace_args
+
+zpool_create_forced_degraded_add redundancy3_create_spare_args redundancy1_add_args replace_args
+zpool_create_forced_degraded_add redundancy3_create_spare_args redundancy2_add_args replace_args
+
+log_pass "Verify 'zpool add' warns for differing redundancy."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_removal.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_removal.ksh
new file mode 100755
index 000000000000..782858e301ac
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_removal.ksh
@@ -0,0 +1,126 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2025 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_add/zpool_add.kshlib
+
+#
+# DESCRIPTION:
+# Verify zpool add succeeds when adding vdevs with matching redundancy
+# and warns with differing redundancy after removal.
+#
+# STRATEGY:
+# 1. Create several files == $MINVDEVSIZE.
+# 2. Verify 'zpool add' warns with differing redundancy after removal.
+#
+
+verify_runnable "global"
+
+log_assert "Verify 'zpool add' warns for differing redundancy."
+log_onexit zpool_create_add_cleanup
+
+zpool_create_add_setup
+
+typeset -i i=0
+typeset -i j=0
+
+set -A redundancy1_create_args \
+ "mirror $vdev0 $vdev1" \
+ "raidz1 $vdev0 $vdev1" \
+ "draid1:1s $vdev0 $vdev1 $vdev9"
+
+set -A redundancy2_create_args \
+ "mirror $vdev0 $vdev1 $vdev2" \
+ "raidz2 $vdev0 $vdev1 $vdev2" \
+ "draid2:1s $vdev0 $vdev1 $vdev2 $vdev9"
+
+set -A redundancy3_create_args \
+ "mirror $vdev0 $vdev1 $vdev2 $vdev3" \
+ "raidz3 $vdev0 $vdev1 $vdev2 $vdev3" \
+ "draid3:1s $vdev0 $vdev1 $vdev2 $vdev3 $vdev9"
+
+set -A redundancy1_add_args \
+ "mirror $vdev5 $vdev6" \
+ "raidz1 $vdev5 $vdev6" \
+ "raidz1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 raidz1 $vdev7 $vdev8" \
+ "draid1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 draid1 $vdev7 $vdev8"
+
+set -A redundancy2_add_args \
+ "mirror $vdev5 $vdev6 $vdev7" \
+ "raidz2 $vdev5 $vdev6 $vdev7" \
+ "draid2 $vdev5 $vdev6 $vdev7"
+
+set -A redundancy3_add_args \
+ "mirror $vdev5 $vdev6 $vdev7 $vdev8" \
+ "raidz3 $vdev5 $vdev6 $vdev7 $vdev8" \
+ "draid3 $vdev5 $vdev6 $vdev7 $vdev8"
+
+set -A log_args "log" "$vdev_lo"
+set -A cache_args "cache" "$vdev_lo"
+set -A spare_args "spare" "$vdev_lo"
+
+
+function zpool_create_rm_add
+{
+ typeset -n create_args=$1
+ typeset -n add_args=$2
+ typeset -n rm_args=$3
+
+ i=0
+ while ((i < ${#create_args[@]})); do
+ j=0
+ while ((j < ${#add_args[@]})); do
+ log_must zpool create $TESTPOOL1 ${create_args[$i]}
+ log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
+ log_must zpool add $TESTPOOL1 ${add_args[$j]}
+ log_must zpool remove $TESTPOOL1 ${rm_args[1]}
+ log_mustnot zpool add $TESTPOOL1 ${rm_args[1]}
+ log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
+ log_must zpool destroy -f $TESTPOOL1
+
+ ((j += 1))
+ done
+ ((i += 1))
+ done
+}
+
+# 2. Verify 'zpool add' warns with differing redundancy after removal.
+zpool_create_rm_add redundancy1_create_args redundancy1_add_args log_args
+zpool_create_rm_add redundancy2_create_args redundancy2_add_args log_args
+zpool_create_rm_add redundancy3_create_args redundancy3_add_args log_args
+
+zpool_create_rm_add redundancy1_create_args redundancy1_add_args cache_args
+zpool_create_rm_add redundancy2_create_args redundancy2_add_args cache_args
+zpool_create_rm_add redundancy3_create_args redundancy3_add_args cache_args
+
+zpool_create_rm_add redundancy1_create_args redundancy1_add_args spare_args
+zpool_create_rm_add redundancy2_create_args redundancy2_add_args spare_args
+zpool_create_rm_add redundancy3_create_args redundancy3_add_args spare_args
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/cleanup.ksh
new file mode 100755
index 000000000000..099b5426031d
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/cleanup.ksh
@@ -0,0 +1,30 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+#
+. $STF_SUITE/include/libtest.shlib
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/setup.ksh
new file mode 100755
index 000000000000..3529a0ccc015
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/setup.ksh
@@ -0,0 +1,32 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+#
+. $STF_SUITE/include/libtest.shlib
+
+verify_runnable "global"
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib
new file mode 100644
index 000000000000..ea4b0bd2756d
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib
@@ -0,0 +1,235 @@
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+# Since we want to make sure that iostat responds correctly as pools appear and
+# disappear, we run it in the background and capture its output to a file.
+# Once we're done, we parse the output and ensure it matches what we'd expect
+# from the operations we performed.
+#
+# Because iostat is producing output every interval, it may produce the "same"
+# output for each step of the change; in fact, we want that to make sure we
+# don't miss anything. So, we describe what we expect as a series of "chunks".
+# Each chunk is a particular kind of output, which may repeat. Current known
+# chunk types are:
+#
+# NOPOOL: the text "no pools available"
+# HEADER: three lines, starting with "capacity", "pool" and "----" respectively.
+# (the rough shape of the normal iostat header).
+# POOL1: a line starting with "pool1" (stats line for a pool of that name)
+# POOL2: a line starting with "pool2"
+# POOLBOTH: three lines, starting with "pool1", "pool2" (either order) and
+# "-----" respectively. (the pool stat output for multiple pools)
+#
+# (the parser may produce other chunks in a failed parse to assist with
+# debugging, but they should never be part of the "wanted" output See the
+# parser commentary below).
+#
+# To help recognise the start of a new interval output, we run iostat with the
+# -T u option, which will output a numeric timestamp before each header or
+# second-or-later pool stat after the header.
+#
+# To keep the test run shorter, we use a subsecond interval, but to make sure
+# nothing is missed, we sleep for three intervals after each change.
+
+typeset _iostat_out=$(mktemp)
+typeset _iostat_pid=""
+
+function cleanup_iostat {
+ if [[ -n $_iostat_pid ]] ; then
+ kill -KILL $_iostat_pid || true
+ fi
+ rm -f $_iostat_out
+}
+
+function start_iostat {
+ zpool iostat -T u $@ 0.1 > $_iostat_out 2>&1 &
+ _iostat_pid=$!
+}
+
+function stop_iostat {
+ kill -TERM $_iostat_pid
+ wait $_iostat_pid
+ _iostat_pid=""
+}
+
+function delay_iostat {
+ sleep 0.3
+}
+
+typeset -a _iostat_expect
+function expect_iostat {
+ typeset chunk=$1
+ _iostat_expect+=($chunk)
+}
+
+# Parse the output The `state` var is used to track state across
+# multiple lines. The `last` var and the `_got_iostat` function are used
+# to record the completed chunks, and to collapse repetitions.
+typeset -a _iostat_got
+typeset _iostat_last=""
+typeset _iostat_state=""
+
+function _got_iostat {
+ typeset chunk=$1
+ if [[ -n $chunk && $_iostat_last != $chunk ]] ; then
+ _iostat_last=$chunk
+ _iostat_got+=($chunk)
+ fi
+ _iostat_state=""
+}
+
+function verify_iostat {
+
+ cat $_iostat_out | while read line ; do
+
+ # The "no pools available" text has no timestamp or other
+ # header, and should never appear in the middle of multiline
+ # chunk, so we can close any in-flight state.
+ if [[ $line = "no pools available" ]] ; then
+ _got_iostat $_iostat_state
+ _got_iostat "NOPOOL"
+ continue
+ fi
+
+ # A run of digits alone on the line is a timestamp (the `-T u`
+ # switch to `iostat`). It closes any in-flight state as a
+ # complete chunk, and indicates the start of a new chunk.
+ if [[ -z ${line/#+([0-9])/} ]] ; then
+ _got_iostat $_iostat_state
+ _iostat_state="TIMESTAMP"
+ continue
+ fi
+
+ # For this test, the first word of each line should be unique,
+ # so we extract it and use it for simplicity.
+ typeset first=${line%% *}
+
+ # Header is emitted whenever the pool list changes. It has
+ # three lines:
+ #
+ # capacity operations bandwidth
+ # pool alloc free read write read write
+ # ---------- ----- ----- ----- ----- ----- -----
+ #
+ # Each line moves the state; when we get to a run of dashes, we
+ # commit. Note that we check for one-or-more dashes, because
+ # the width can vary depending on the length of pool name.
+ #
+ if [[ $_iostat_state = "TIMESTAMP" &&
+ $first = "capacity" ]] ; then
+ _iostat_state="INHEADER1"
+ continue
+ fi
+ if [[ $_iostat_state = "INHEADER1" &&
+ $first = "pool" ]] ; then
+ _iostat_state="INHEADER2"
+ continue
+ fi
+ if [[ $_iostat_state = "INHEADER2" &&
+ -z ${first/#+(-)/} ]] ; then
+ # Headers never repeat, so if the last committed chunk
+ # was a header, we commit this one as EXTRAHEADER so we
+ # can see it in the error output.
+ if [[ $_iostat_last = "HEADER" ]] ; then
+ _got_iostat "EXTRAHEADER"
+ elif [[ $_iostat_last != "EXTRAHEADER" ]] ; then
+ _got_iostat "HEADER"
+ fi
+ _iostat_state="HEADER"
+ continue
+ fi
+
+ # A pool stat line looks like:
+ #
+ # pool1 147K 240M 0 0 0 0
+ #
+ # If there are multiple pools, iostat follows them with a
+ # separator of dashed lines:
+ #
+ # pool1 147K 240M 0 0 0 0
+ # pool2 147K 240M 0 0 0 0
+ # ---------- ----- ----- ----- ----- ----- -----
+ #
+ # Stats rows always start after a timestamp or a header. If the
+ # header was emitted, we won't see a timestamp here (it goes
+ # before the header).
+ #
+ # Because our test exercises both pools on their own and
+ # together, we allow pools in either order. In practice they
+ # are sorted, but that's a side-effect of the implementation
+ # (see zpool_compare()), so we're not going to rely on it here.
+ if [[ $first = "pool1" ]] || [[ $first = "pool2" ]] ; then
+
+ # First line, track which one we saw. If it's a
+ # standalone line, it will be committed by the next
+ # NOPOOL or TIMESTAMP above (or the `_got_iostat` after
+ # the loop if this is the last line).
+ if [[ $_iostat_state == "TIMESTAMP" ||
+ $_iostat_state == "HEADER" ]] ; then
+ if [[ $first = "pool1" ]] ; then
+ _iostat_state="POOL1"
+ elif [[ $first = "pool2" ]] ; then
+ _iostat_state="POOL2"
+ fi
+ continue
+ fi
+
+ # If this is the second pool, we're in a multi-pool
+ # block, and need to look for the separator to close it
+ # out.
+ if [[ $_iostat_state = "POOL1" && $first = "pool2" ]] ||
+ [[ $_iostat_state = "POOL2" && $first = "pool1" ]] ;
+ then
+ _iostat_state="INPOOLBOTH"
+ continue
+ fi
+ fi
+
+ # Separator after the stats block.
+ if [[ $_iostat_state = "INPOOLBOTH" &&
+ -z ${first/#+(-)/} ]] ; then
+ _got_iostat "POOLBOTH"
+ continue
+ fi
+
+ # Anything else will fall through to here. We commit any
+ # in-flight state, then "UNKNOWN", all to help with debugging..
+ if [[ $_iostat_state != "UNKNOWN" ]] ; then
+ _got_iostat $_iostat_state
+ _got_iostat "UNKNOWN"
+ fi
+ done
+
+ # Close out any remaining state.
+ _got_iostat $_iostat_state
+
+ # Compare what we wanted with what we got, and pass/fail the test!
+ if [[ "${_iostat_expect[*]}" != "${_iostat_got[*]}" ]] ; then
+ log_note "expected: ${_iostat_expect[*]}"
+ log_note " got: ${_iostat_got[*]}"
+ log_fail "zpool iostat did not produce expected output"
+ fi
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh
new file mode 100755
index 000000000000..8e040058ec3e
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh
@@ -0,0 +1,90 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+# `zpool iostat <N>` should keep running and update the pools it displays as
+# pools are created/destroyed/imported/export.
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib
+
+typeset vdev1=$(mktemp)
+typeset vdev2=$(mktemp)
+
+function cleanup {
+ cleanup_iostat
+
+ poolexists pool1 && destroy_pool pool1
+ poolexists pool2 && destroy_pool pool2
+ rm -f $vdev1 $vdev2
+}
+
+log_must mkfile $MINVDEVSIZE $vdev1 $vdev2
+
+expect_iostat "NOPOOL"
+
+start_iostat
+
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL1"
+log_must zpool create pool1 $vdev1
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOLBOTH"
+log_must zpool create pool2 $vdev2
+delay_iostat
+
+expect_iostat "NOPOOL"
+log_must zpool export -a
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL2"
+log_must zpool import -d $vdev2 pool2
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOLBOTH"
+log_must zpool import -d $vdev1 pool1
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL2"
+log_must zpool destroy pool1
+delay_iostat
+
+expect_iostat "NOPOOL"
+log_must zpool destroy pool2
+delay_iostat
+
+stop_iostat
+
+verify_iostat
+
+log_pass "zpool iostat in interval mode follows pool updates"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh
new file mode 100755
index 000000000000..ab1f258aa1cd
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh
@@ -0,0 +1,80 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+# `zpool iostat <pools> <N>` should keep running and only show the listed pools.
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib
+
+typeset vdev1=$(mktemp)
+typeset vdev2=$(mktemp)
+
+function cleanup {
+ cleanup_iostat
+
+ poolexists pool1 && destroy_pool pool1
+ poolexists pool2 && destroy_pool pool2
+ rm -f $vdev1 $vdev2
+}
+
+log_must mkfile $MINVDEVSIZE $vdev1 $vdev2
+
+log_must zpool create pool1 $vdev1
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL1"
+start_iostat pool1
+delay_iostat
+
+log_must zpool create pool2 $vdev2
+delay_iostat
+
+expect_iostat "NOPOOL"
+log_must zpool export -a
+delay_iostat
+
+log_must zpool import -d $vdev2 pool2
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL1"
+log_must zpool import -d $vdev1 pool1
+delay_iostat
+
+expect_iostat "NOPOOL"
+log_must zpool destroy pool1
+delay_iostat
+
+log_must zpool destroy pool2
+delay_iostat
+
+stop_iostat
+
+verify_iostat
+
+log_pass "zpool iostat in interval mode with pools follows listed pool updates"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib
index 0a402e71ee68..345239b88680 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib
@@ -1234,10 +1234,10 @@ function verify_fs_aedsx
typeset oldval
set -A modes "on" "off"
oldval=$(get_prop $perm $fs)
- if [[ $oldval == "on" ]]; then
- n=1
- elif [[ $oldval == "off" ]]; then
+ if [[ $oldval == "off" ]]; then
n=0
+ else
+ n=1
fi
log_note "$user zfs set $perm=${modes[$n]} $fs"
user_run $user zfs set $perm=${modes[$n]} $fs
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh
index 26153aafbc02..0e79e9b8b70c 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh
@@ -39,6 +39,6 @@
verify_runnable "global"
# create a pool without any features
-log_must mkfile 128m $TMPDEV
+log_must truncate -s $MINVDEVSIZE $TMPDEV
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh
index d6bd69b7e134..e81d07794689 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh
@@ -35,17 +35,19 @@
verify_runnable "global"
-TESTFILE="$TESTDIR/file.bin"
-
log_assert "User accounting upgrade should not be executed on readonly pool"
log_onexit cleanup_upgrade
# 1. Create a pool with the feature@userobj_accounting disabled to simulate
# a legacy pool from a previous ZFS version.
-log_must zpool create -d -m $TESTDIR $TESTPOOL $TMPDEV
+log_must zpool create -d $TESTPOOL $TMPDEV
+log_must zfs create $TESTPOOL/$TESTFS
+
+MNTPNT=$(get_prop mountpoint $TESTPOOL/$TESTFS)
+TESTFILE="$MNTPNT/file.bin"
# 2. Create a file on the "legecy" dataset
-log_must touch $TESTDIR/file.bin
+log_must touch $TESTFILE
# 3. Enable feature@userobj_accounting on the pool and verify it is only
# "enabled" and not "active": upgrading starts when the filesystem is mounted
@@ -54,12 +56,12 @@ log_must test "enabled" == "$(get_pool_prop 'feature@userobj_accounting' $TESTPO
# 4. Export the pool and re-import is readonly, without mounting any filesystem
log_must zpool export $TESTPOOL
-log_must zpool import -o readonly=on -N -d "$(dirname $TMPDEV)" $TESTPOOL
+log_must zpool import -o readonly=on -N -d $TEST_BASE_DIR $TESTPOOL
# 5. Try to mount the root dataset manually without the "ro" option, then verify
# filesystem status and the pool feature status (not "active") to ensure the
# pool "readonly" status is enforced.
-log_must mount -t zfs -o zfsutil $TESTPOOL $TESTDIR
+log_must zfs mount -R $TESTPOOL
log_must stat "$TESTFILE"
log_mustnot touch "$TESTFILE"
log_must test "enabled" == "$(get_pool_prop 'feature@userobj_accounting' $TESTPOOL)"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_014_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_014_pos.ksh
new file mode 100755
index 000000000000..d4c9a0a41816
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_014_pos.ksh
@@ -0,0 +1,53 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025 by Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/xattr/xattr_common.kshlib
+
+#
+# DESCRIPTION:
+# The default xattr should be shown as 'sa', not 'on', for clarity.
+#
+# STRATEGY:
+# 1. Create a filesystem.
+# 2. Verify that the xattra is shown as 'sa'.
+# 3. Manually set the value to 'dir', 'sa', 'on', and 'off'.
+# 4. Verify that it is shown as 'dir', 'sa', 'sa', and 'off.
+#
+
+log_assert "The default and specific xattr values are displayed correctly."
+
+set -A args "dir" "sa" "on" "off"
+set -A display "dir" "sa" "sa" "off"
+
+log_must eval "[[ 'sa' == '$(zfs get -Hpo value xattr $TESTPOOL)' ]]"
+
+for i in `seq 0 3`; do
+ log_must zfs set xattr="${args[$i]}" $TESTPOOL
+ log_must eval "[[ '${display[$i]}' == '$(zfs get -Hpo value xattr $TESTPOOL)' ]]"
+done
+log_pass "The default and specific xattr values are displayed correctly."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
index 571a698eb63a..502ebada22dc 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
@@ -50,17 +50,53 @@ fi
typeset datafile1="$(mktemp -t zvol_misc_fua1.XXXXXX)"
typeset datafile2="$(mktemp -t zvol_misc_fua2.XXXXXX)"
+typeset datafile3="$(mktemp -t zvol_misc_fua3_log.XXXXXX)"
typeset zvolpath=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
+typeset DISK1=${DISKS%% *}
function cleanup
{
- rm "$datafile1" "$datafile2"
+ log_must zpool remove $TESTPOOL $datafile3
+ rm "$datafile1" "$datafile2" "$datafile2"
+}
+
+# Prints the total number of sync writes for a vdev
+# $1: vdev
+function get_sync
+{
+ zpool iostat -p -H -v -r $TESTPOOL $1 | \
+ awk '/[0-9]+$/{s+=$4+$5} END{print s}'
}
function do_test {
# Wait for udev to create symlinks to our zvol
block_device_wait $zvolpath
+ # Write using sync (creates FLUSH calls after writes, but not FUA)
+ old_vdev_writes=$(get_sync $DISK1)
+ old_log_writes=$(get_sync $datafile3)
+
+ log_must fio --name=write_iops --size=5M \
+ --ioengine=libaio --verify=0 --bs=4K \
+ --iodepth=1 --rw=randwrite --group_reporting=1 \
+ --filename=$zvolpath --sync=1
+
+ vdev_writes=$(( $(get_sync $DISK1) - $old_vdev_writes))
+ log_writes=$(( $(get_sync $datafile3) - $old_log_writes))
+
+ # When we're doing sync writes, we should see many more writes go to
+ # the log vs the first vdev. Experiments show anywhere from a 160-320x
+ # ratio of writes to the log vs the first vdev (due to some straggler
+ # writes to the first vdev).
+ #
+ # Check that we have a large ratio (100x) of sync writes going to the
+ # log device
+ ratio=$(($log_writes / $vdev_writes))
+ log_note "Got $log_writes log writes, $vdev_writes vdev writes."
+ if [ $ratio -lt 100 ] ; then
+ log_fail "Expected > 100x more log writes than vdev writes. "
+ fi
+
# Create a data file
log_must dd if=/dev/urandom of="$datafile1" bs=1M count=5
@@ -81,6 +117,8 @@ log_assert "Verify that a ZFS volume can do Force Unit Access (FUA)"
log_onexit cleanup
log_must zfs set compression=off $TESTPOOL/$TESTVOL
+log_must truncate -s 100M $datafile3
+log_must zpool add $TESTPOOL log $datafile3
log_note "Testing without blk-mq"
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 7f9ca6e39df8..3f0a7b40245d 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -3468,10 +3468,10 @@ acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
return_ACPI_STATUS (AE_OK);
}
- EVENTHANDLER_INVOKE(power_suspend_early);
+ EVENTHANDLER_INVOKE(power_suspend_early, stype);
stop_all_proc();
suspend_all_fs();
- EVENTHANDLER_INVOKE(power_suspend);
+ EVENTHANDLER_INVOKE(power_suspend, stype);
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started);
@@ -3632,7 +3632,7 @@ backout:
resume_all_fs();
resume_all_proc();
- EVENTHANDLER_INVOKE(power_resume);
+ EVENTHANDLER_INVOKE(power_resume, stype);
/* Allow another sleep request after a while. */
callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
diff --git a/sys/dev/acpica/acpi_apei.c b/sys/dev/acpica/acpi_apei.c
index 9cfd46c97430..624c81ad1b4f 100644
--- a/sys/dev/acpica/acpi_apei.c
+++ b/sys/dev/acpica/acpi_apei.c
@@ -754,7 +754,7 @@ apei_detach(device_t dev)
apei_nmi = NULL;
apei_nmi_nges = NULL;
if (sc->nges.swi_ih != NULL) {
- swi_remove(&sc->nges.swi_ih);
+ swi_remove(sc->nges.swi_ih);
sc->nges.swi_ih = NULL;
}
if (acpi_get_handle(dev) != NULL) {
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
index 0a8b67a5fa84..29d1690f1bdd 100644
--- a/sys/dev/acpica/acpi_powerres.c
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -76,13 +76,6 @@ struct acpi_powerconsumer {
/* Device which is powered */
ACPI_HANDLE ac_consumer;
int ac_state;
-
- struct {
- bool prx_has;
- size_t prx_count;
- ACPI_HANDLE *prx_deps;
- } ac_prx[ACPI_D_STATE_COUNT];
-
TAILQ_ENTRY(acpi_powerconsumer) ac_link;
TAILQ_HEAD(,acpi_powerreference) ac_references;
};
@@ -103,7 +96,9 @@ static TAILQ_HEAD(acpi_powerconsumer_list, acpi_powerconsumer)
ACPI_SERIAL_DECL(powerres, "ACPI power resources");
static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer);
+#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer);
+#endif /* notyet */
static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res);
#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res);
@@ -117,8 +112,6 @@ static struct acpi_powerresource
*acpi_pwr_find_resource(ACPI_HANDLE res);
static struct acpi_powerconsumer
*acpi_pwr_find_consumer(ACPI_HANDLE consumer);
-static ACPI_STATUS acpi_pwr_infer_state(struct acpi_powerconsumer *pc);
-static ACPI_STATUS acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state);
/*
* Register a power resource.
@@ -229,84 +222,6 @@ acpi_pwr_deregister_resource(ACPI_HANDLE res)
#endif /* notyet */
/*
- * Evaluate the _PRx (power resources each D-state depends on). This also
- * populates the acpi_powerresources queue with the power resources discovered
- * during this step.
- *
- * ACPI 7.3.8 - 7.3.11 guarantee that _PRx will return the same data each
- * time they are evaluated.
- *
- * If this function fails, acpi_pwr_deregister_consumer() must be called on the
- * power consumer to free already allocated memory.
- */
-static ACPI_STATUS
-acpi_pwr_get_power_resources(ACPI_HANDLE consumer, struct acpi_powerconsumer *pc)
-{
- ACPI_INTEGER status;
- ACPI_STRING reslist_name;
- ACPI_HANDLE reslist_handle;
- ACPI_STRING reslist_names[] = {"_PR0", "_PR1", "_PR2", "_PR3"};
- ACPI_BUFFER reslist;
- ACPI_OBJECT *reslist_object;
- ACPI_OBJECT *dep;
- ACPI_HANDLE *res;
-
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- ACPI_SERIAL_ASSERT(powerres);
-
- MPASS(consumer != NULL);
-
- for (int state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
- pc->ac_prx[state].prx_has = false;
- pc->ac_prx[state].prx_count = 0;
- pc->ac_prx[state].prx_deps = NULL;
-
- reslist_name = reslist_names[state - ACPI_STATE_D0];
- if (ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle)))
- continue;
-
- reslist.Pointer = NULL;
- reslist.Length = ACPI_ALLOCATE_BUFFER;
- status = AcpiEvaluateObjectTyped(reslist_handle, NULL, NULL, &reslist,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status) || reslist.Pointer == NULL)
- /*
- * ACPI_ALLOCATE_BUFFER entails everything will be freed on error
- * by AcpiEvaluateObjectTyped.
- */
- continue;
-
- reslist_object = (ACPI_OBJECT *)reslist.Pointer;
- pc->ac_prx[state].prx_has = true;
- pc->ac_prx[state].prx_count = reslist_object->Package.Count;
-
- if (reslist_object->Package.Count == 0) {
- AcpiOsFree(reslist_object);
- continue;
- }
-
- pc->ac_prx[state].prx_deps = mallocarray(pc->ac_prx[state].prx_count,
- sizeof(*pc->ac_prx[state].prx_deps), M_ACPIPWR, M_NOWAIT);
- if (pc->ac_prx[state].prx_deps == NULL) {
- AcpiOsFree(reslist_object);
- return_ACPI_STATUS (AE_NO_MEMORY);
- }
-
- for (size_t i = 0; i < reslist_object->Package.Count; i++) {
- dep = &reslist_object->Package.Elements[i];
- res = dep->Reference.Handle;
- pc->ac_prx[state].prx_deps[i] = res;
-
- /* It's fine to attempt to register the same resource twice. */
- acpi_pwr_register_resource(res);
- }
- AcpiOsFree(reslist_object);
- }
-
- return_ACPI_STATUS (AE_OK);
-}
-
-/*
* Register a power consumer.
*
* It's OK to call this if we already know about the consumer.
@@ -314,7 +229,6 @@ acpi_pwr_get_power_resources(ACPI_HANDLE consumer, struct acpi_powerconsumer *pc
static ACPI_STATUS
acpi_pwr_register_consumer(ACPI_HANDLE consumer)
{
- ACPI_INTEGER status;
struct acpi_powerconsumer *pc;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -325,30 +239,14 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
/* Allocate a new power consumer */
- if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL)
+ if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT)) == NULL)
return_ACPI_STATUS (AE_NO_MEMORY);
TAILQ_INSERT_HEAD(&acpi_powerconsumers, pc, ac_link);
TAILQ_INIT(&pc->ac_references);
pc->ac_consumer = consumer;
- /*
- * Get all its power resource dependencies, if it has _PRx. We do this now
- * as an opportunity to populate the acpi_powerresources queue.
- *
- * If this fails, immediately deregister it.
- */
- status = acpi_pwr_get_power_resources(consumer, pc);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
- "failed to get power resources for %s\n",
- acpi_name(consumer)));
- acpi_pwr_deregister_consumer(consumer);
- return_ACPI_STATUS (status);
- }
-
- /* Find its initial state. */
- if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &pc->ac_state)))
- pc->ac_state = ACPI_STATE_UNKNOWN;
+ /* XXX we should try to find its current state */
+ pc->ac_state = ACPI_STATE_UNKNOWN;
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power consumer %s\n",
acpi_name(consumer)));
@@ -356,6 +254,7 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
+#ifdef notyet
/*
* Deregister a power consumer.
*
@@ -380,9 +279,6 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
/* Pull the consumer off the list and free it */
TAILQ_REMOVE(&acpi_powerconsumers, pc, ac_link);
- for (size_t i = 0; i < sizeof(pc->ac_prx) / sizeof(*pc->ac_prx); i++)
- if (pc->ac_prx[i].prx_deps != NULL)
- free(pc->ac_prx[i].prx_deps, M_ACPIPWR);
free(pc, M_ACPIPWR);
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power consumer %s\n",
@@ -390,139 +286,10 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
+#endif /* notyet */
/*
- * The _PSC control method isn't required if it's possible to infer the D-state
- * from the _PRx control methods. (See 7.3.6.)
- * We can infer that a given D-state has been achieved when all the dependencies
- * are in the ON state.
- */
-static ACPI_STATUS
-acpi_pwr_infer_state(struct acpi_powerconsumer *pc)
-{
- ACPI_HANDLE *res;
- uint32_t on;
- bool all_on = false;
-
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- ACPI_SERIAL_ASSERT(powerres);
-
- /* It is important we go from the hottest to the coldest state. */
- for (
- pc->ac_state = ACPI_STATE_D0;
- pc->ac_state <= ACPI_STATE_D3_HOT && !all_on;
- pc->ac_state++
- ) {
- MPASS(pc->ac_state <= sizeof(pc->ac_prx) / sizeof(*pc->ac_prx));
-
- if (!pc->ac_prx[pc->ac_state].prx_has)
- continue;
-
- all_on = true;
-
- for (size_t i = 0; i < pc->ac_prx[pc->ac_state].prx_count; i++) {
- res = pc->ac_prx[pc->ac_state].prx_deps[i];
- /* If failure, better to assume D-state is hotter than colder. */
- if (ACPI_FAILURE(acpi_GetInteger(res, "_STA", &on)))
- continue;
- if (on == 0) {
- all_on = false;
- break;
- }
- }
- }
-
- MPASS(pc->ac_state != ACPI_STATE_D0);
-
- /*
- * If none of the power resources required for the shallower D-states are
- * on, then we can assume it is unpowered (i.e. D3cold). A device is not
- * required to support D3cold however; in that case, _PR3 is not explicitly
- * provided. Those devices should default to D3hot instead.
- *
- * See comments of first row of table 7.1 in ACPI spec.
- */
- if (!all_on)
- pc->ac_state = pc->ac_prx[ACPI_STATE_D3_HOT].prx_has ?
- ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT;
- else
- pc->ac_state--;
-
- return_ACPI_STATUS (AE_OK);
-}
-
-static ACPI_STATUS
-acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state)
-{
- struct acpi_powerconsumer *pc;
- ACPI_HANDLE method_handle;
- ACPI_STATUS status;
- ACPI_BUFFER result;
- ACPI_OBJECT *object = NULL;
-
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- ACPI_SERIAL_ASSERT(powerres);
-
- if (consumer == NULL)
- return_ACPI_STATUS (AE_NOT_FOUND);
-
- if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) {
- if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer)))
- goto out;
- if ((pc = acpi_pwr_find_consumer(consumer)) == NULL)
- panic("acpi added power consumer but can't find it");
- }
-
- status = AcpiGetHandle(consumer, "_PSC", &method_handle);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "no _PSC object - %s\n",
- AcpiFormatException(status)));
- status = acpi_pwr_infer_state(pc);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't infer D-state - %s\n",
- AcpiFormatException(status)));
- pc->ac_state = ACPI_STATE_UNKNOWN;
- }
- goto out;
- }
-
- result.Pointer = NULL;
- result.Length = ACPI_ALLOCATE_BUFFER;
- status = AcpiEvaluateObjectTyped(method_handle, NULL, NULL, &result, ACPI_TYPE_INTEGER);
- if (ACPI_FAILURE(status) || result.Pointer == NULL) {
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to get state with _PSC - %s\n",
- AcpiFormatException(status)));
- pc->ac_state = ACPI_STATE_UNKNOWN;
- goto out;
- }
-
- object = (ACPI_OBJECT *)result.Pointer;
- pc->ac_state = ACPI_STATE_D0 + object->Integer.Value;
- status = AE_OK;
-
-out:
- if (object != NULL)
- AcpiOsFree(object);
- *state = pc->ac_state;
- return_ACPI_STATUS (status);
-}
-
-/*
- * Get a power consumer's D-state.
- */
-ACPI_STATUS
-acpi_pwr_get_state(ACPI_HANDLE consumer, int *state)
-{
- ACPI_STATUS res;
-
- ACPI_SERIAL_BEGIN(powerres);
- res = acpi_pwr_get_state_locked(consumer, state);
- ACPI_SERIAL_END(powerres);
- return (res);
-}
-
-/*
- * Set a power consumer to a particular D-state.
+ * Set a power consumer to a particular power state.
*/
ACPI_STATUS
acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
@@ -533,7 +300,6 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
ACPI_OBJECT *reslist_object;
ACPI_STATUS status;
char *method_name, *reslist_name = NULL;
- int new_state;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -735,28 +501,8 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
}
}
- /*
- * Make sure the transition succeeded. If getting new state failed,
- * just assume the new state is what we wanted. This was the behaviour
- * before we were checking D-states.
- */
- if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &new_state))) {
- printf("%s: failed to get new D-state\n", __func__);
- pc->ac_state = state;
- } else {
- if (new_state != state)
- printf("%s: new power state %s is not the one requested %s\n",
- __func__, acpi_d_state_to_str(new_state),
- acpi_d_state_to_str(state));
- pc->ac_state = new_state;
- }
-
- /*
- * We consider the transition successful even if the state we got doesn't
- * reflect what we set it to. This is because we weren't previously
- * checking the new state at all, so there might exist buggy platforms on
- * which suspend would otherwise succeed if we failed here.
- */
+ /* Transition was successful */
+ pc->ac_state = state;
status = AE_OK;
out:
diff --git a/sys/dev/acpica/acpi_timer.c b/sys/dev/acpica/acpi_timer.c
index 3d51a4211b80..b20912e2f5fb 100644
--- a/sys/dev/acpica/acpi_timer.c
+++ b/sys/dev/acpica/acpi_timer.c
@@ -34,6 +34,7 @@
#include <sys/module.h>
#include <sys/sysctl.h>
#include <sys/timetc.h>
+#include <sys/power.h>
#include <machine/bus.h>
#include <machine/resource.h>
@@ -69,8 +70,10 @@ bool acpi_timer_disabled = false;
static void acpi_timer_identify(driver_t *driver, device_t parent);
static int acpi_timer_probe(device_t dev);
static int acpi_timer_attach(device_t dev);
-static void acpi_timer_resume_handler(struct timecounter *);
-static void acpi_timer_suspend_handler(struct timecounter *);
+static void acpi_timer_resume_handler(struct timecounter *,
+ enum power_stype);
+static void acpi_timer_suspend_handler(struct timecounter *,
+ enum power_stype);
static u_int acpi_timer_get_timecount(struct timecounter *tc);
static u_int acpi_timer_get_timecount_safe(struct timecounter *tc);
static int acpi_timer_sysctl_freq(SYSCTL_HANDLER_ARGS);
@@ -235,7 +238,7 @@ acpi_timer_attach(device_t dev)
}
static void
-acpi_timer_resume_handler(struct timecounter *newtc)
+acpi_timer_resume_handler(struct timecounter *newtc, enum power_stype stype)
{
struct timecounter *tc;
@@ -251,7 +254,7 @@ acpi_timer_resume_handler(struct timecounter *newtc)
}
static void
-acpi_timer_suspend_handler(struct timecounter *newtc)
+acpi_timer_suspend_handler(struct timecounter *newtc, enum power_stype stype)
{
struct timecounter *tc;
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 4c789dd3e9f2..71d8e46ab310 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -490,7 +490,6 @@ EVENTHANDLER_DECLARE(acpi_video_event, acpi_event_handler_t);
/* Device power control. */
ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable);
-ACPI_STATUS acpi_pwr_get_state(ACPI_HANDLE consumer, int *state);
ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state);
acpi_pwr_for_sleep_t acpi_device_pwr_for_sleep;
int acpi_set_powerstate(device_t child, int state);
diff --git a/sys/dev/ahci/ahci_pci.c b/sys/dev/ahci/ahci_pci.c
index 82f56fc0d19e..2b4cb37275a6 100644
--- a/sys/dev/ahci/ahci_pci.c
+++ b/sys/dev/ahci/ahci_pci.c
@@ -467,28 +467,6 @@ ahci_ata_probe(device_t dev)
}
static int
-ahci_pci_read_msix_bars(device_t dev, uint8_t *table_bar, uint8_t *pba_bar)
-{
- int cap_offset = 0, ret;
- uint32_t val;
-
- if ((table_bar == NULL) || (pba_bar == NULL))
- return (EINVAL);
-
- ret = pci_find_cap(dev, PCIY_MSIX, &cap_offset);
- if (ret != 0)
- return (EINVAL);
-
- val = pci_read_config(dev, cap_offset + PCIR_MSIX_TABLE, 4);
- *table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
-
- val = pci_read_config(dev, cap_offset + PCIR_MSIX_PBA, 4);
- *pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
-
- return (0);
-}
-
-static int
ahci_pci_attach(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
@@ -496,7 +474,6 @@ ahci_pci_attach(device_t dev)
uint32_t devid = pci_get_devid(dev);
uint8_t revid = pci_get_revid(dev);
int msi_count, msix_count;
- uint8_t table_bar = 0, pba_bar = 0;
uint32_t caps, pi;
msi_count = pci_msi_count(dev);
@@ -584,20 +561,11 @@ ahci_pci_attach(device_t dev)
if (ctlr->quirks & AHCI_Q_NOMSIX)
msix_count = 0;
- /* Read MSI-x BAR IDs if supported */
- if (msix_count > 0) {
- error = ahci_pci_read_msix_bars(dev, &table_bar, &pba_bar);
- if (error == 0) {
- ctlr->r_msix_tab_rid = table_bar;
- ctlr->r_msix_pba_rid = pba_bar;
- } else {
- /* Failed to read BARs, disable MSI-x */
- msix_count = 0;
- }
- }
-
/* Allocate resources for MSI-x table and PBA */
if (msix_count > 0) {
+ ctlr->r_msix_tab_rid = pci_msix_table_bar(dev);
+ ctlr->r_msix_pba_rid = pci_msix_pba_bar(dev);
+
/*
* Allocate new MSI-x table only if not
* allocated before.
@@ -608,8 +576,8 @@ ahci_pci_attach(device_t dev)
ctlr->r_msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ctlr->r_msix_tab_rid, RF_ACTIVE);
if (ctlr->r_msix_table == NULL) {
- ahci_free_mem(dev);
- return (ENXIO);
+ msix_count = 0;
+ goto no_msix;
}
}
@@ -624,12 +592,12 @@ ahci_pci_attach(device_t dev)
ctlr->r_msix_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ctlr->r_msix_pba_rid, RF_ACTIVE);
if (ctlr->r_msix_pba == NULL) {
- ahci_free_mem(dev);
- return (ENXIO);
+ msix_count = 0;
}
}
}
+no_msix:
pci_enable_busmaster(dev);
/* Reset controller */
if ((error = ahci_pci_ctlr_reset(dev)) != 0) {
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index deadd63c3d18..9ac591c14943 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -971,6 +971,12 @@ ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_handoff_hw(sc, txq, bf);
}
+/*
+ * Setup a frame for encryption.
+ *
+ * If this fails, then an non-zero error is returned. The mbuf
+ * must be freed by the caller.
+ */
static int
ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
@@ -1547,6 +1553,10 @@ ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
*
* Note that this may cause the mbuf to be reallocated, so
* m0 may not be valid.
+ *
+ * If there's a problem then the mbuf is freed and an error
+ * is returned. The ath_buf then needs to be freed by the
+ * caller.
*/
static int
ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
@@ -2073,9 +2083,8 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
/* This also sets up the DMA map; crypto; frame parameters, etc */
r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
-
if (r != 0)
- goto done;
+ return (r);
/* At this point m0 could have changed! */
m0 = bf->bf_m;
@@ -2132,7 +2141,6 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
ath_tx_leak_count_update(sc, tid, bf);
ath_tx_xmit_normal(sc, txq, bf);
#endif
-done:
return 0;
}
diff --git a/sys/dev/bnxt/bnxt_en/if_bnxt.c b/sys/dev/bnxt/bnxt_en/if_bnxt.c
index feac3ce54a29..471e26a4b252 100644
--- a/sys/dev/bnxt/bnxt_en/if_bnxt.c
+++ b/sys/dev/bnxt/bnxt_en/if_bnxt.c
@@ -48,6 +48,7 @@
#include <net/ethernet.h>
#include <net/iflib.h>
+#define WANT_NATIVE_PCI_GET_SLOT
#include <linux/pci.h>
#include <linux/kmod.h>
#include <linux/module.h>
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index d3820245837a..55f09fefb7e3 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -319,7 +318,7 @@ struct port_info {
char lockname[16];
unsigned long flags;
- uint8_t lport; /* associated offload logical port */
+ uint8_t hw_port; /* associated hardware port idx */
int8_t mdio_addr;
uint8_t port_type;
uint8_t mod_type;
@@ -413,6 +412,24 @@ enum {
NUM_CPL_COOKIES = 8 /* Limited by M_COOKIE. Do not increase. */
};
+/*
+ * Crypto replies use the low bit in the 64-bit cookie of CPL_FW6_PLD as a
+ * CPL cookie to identify the sender/receiver.
+ */
+enum {
+ CPL_FW6_COOKIE_CCR = 0,
+ CPL_FW6_COOKIE_KTLS,
+
+ NUM_CPL_FW6_COOKIES = 2 /* Low bits of cookie value. */
+};
+
+_Static_assert(powerof2(NUM_CPL_FW6_COOKIES),
+ "NUM_CPL_FW6_COOKIES must be a power of 2");
+
+#define CPL_FW6_COOKIE_MASK (NUM_CPL_FW6_COOKIES - 1)
+
+#define CPL_FW6_PLD_COOKIE(cpl) (be64toh((cpl)->data[1]) & ~CPL_FW6_COOKIE_MASK)
+
struct sge_iq;
struct rss_header;
typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
@@ -477,6 +494,7 @@ struct sge_eq {
uint8_t doorbells;
uint8_t port_id; /* port_id of the port associated with the eq */
uint8_t tx_chan; /* tx channel used by the eq */
+ uint8_t hw_port; /* hw port used by the eq */
struct mtx eq_lock;
struct tx_desc *desc; /* KVA of descriptor ring */
@@ -640,12 +658,26 @@ struct sge_txq {
uint64_t kern_tls_full;
uint64_t kern_tls_octets;
uint64_t kern_tls_waste;
- uint64_t kern_tls_options;
uint64_t kern_tls_header;
- uint64_t kern_tls_fin;
uint64_t kern_tls_fin_short;
uint64_t kern_tls_cbc;
uint64_t kern_tls_gcm;
+ union {
+ struct {
+ /* T6 only. */
+ uint64_t kern_tls_options;
+ uint64_t kern_tls_fin;
+ };
+ struct {
+ /* T7 only. */
+ uint64_t kern_tls_ghash_received;
+ uint64_t kern_tls_ghash_requested;
+ uint64_t kern_tls_lso;
+ uint64_t kern_tls_partial_ghash;
+ uint64_t kern_tls_splitmode;
+ uint64_t kern_tls_trailer;
+ };
+ };
/* stats for not-that-common events */
@@ -769,6 +801,16 @@ struct sge_ofld_txq {
counter_u64_t tx_toe_tls_octets;
} __aligned(CACHE_LINE_SIZE);
+static inline int
+ofld_txq_group(int val, int mask)
+{
+ const uint32_t ngroup = 1 << bitcount32(mask);
+ const int mshift = ffs(mask) - 1;
+ const uint32_t gmask = ngroup - 1;
+
+ return (val >> mshift & gmask);
+}
+
#define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1))
struct sge_nm_rxq {
/* Items used by the driver rx ithread are in this cacheline. */
@@ -836,6 +878,7 @@ struct sge_nm_txq {
} __aligned(CACHE_LINE_SIZE);
struct sge {
+ int nctrlq; /* total # of control queues */
int nrxq; /* total # of Ethernet rx queues */
int ntxq; /* total # of Ethernet tx queues */
int nofldrxq; /* total # of TOE rx queues */
@@ -937,7 +980,8 @@ struct adapter {
struct taskqueue *tq[MAX_NPORTS]; /* General purpose taskqueues */
struct port_info *port[MAX_NPORTS];
- uint8_t chan_map[MAX_NCHAN]; /* channel -> port */
+ uint8_t chan_map[MAX_NCHAN]; /* tx_chan -> port_id */
+ uint8_t port_map[MAX_NPORTS]; /* hw_port -> port_id */
CXGBE_LIST_HEAD(, clip_entry) *clip_table;
TAILQ_HEAD(, clip_entry) clip_pending; /* these need hw update. */
@@ -959,9 +1003,12 @@ struct adapter {
vmem_t *key_map;
struct tls_tunables tlst;
+ vmem_t *pbl_arena;
+ vmem_t *stag_arena;
+
uint8_t doorbells;
int offload_map; /* port_id's with IFCAP_TOE enabled */
- int bt_map; /* tx_chan's with BASE-T */
+ int bt_map; /* hw_port's that are BASE-T */
int active_ulds; /* ULDs activated on this adapter */
int flags;
int debug_flags;
@@ -988,6 +1035,7 @@ struct adapter {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -1409,6 +1457,14 @@ void t6_ktls_modunload(void);
int t6_ktls_try(if_t, struct socket *, struct ktls_session *);
int t6_ktls_parse_pkt(struct mbuf *);
int t6_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int);
+
+/* t7_kern_tls.c */
+int t7_tls_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
+ struct m_snd_tag **);
+void t7_ktls_modload(void);
+void t7_ktls_modunload(void);
+int t7_ktls_parse_pkt(struct mbuf *);
+int t7_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int);
#endif
/* t4_keyctx.c */
@@ -1536,6 +1592,27 @@ int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbu
int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
void free_hftid_hash(struct tid_info *);
+/* t4_tpt.c */
+#define T4_STAG_UNSET 0xffffffff
+#define T4_WRITE_MEM_DMA_LEN \
+ roundup2(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_sgl), 16)
+#define T4_ULPTX_MIN_IO 32
+#define T4_MAX_INLINE_SIZE 96
+#define T4_WRITE_MEM_INLINE_LEN(len) \
+ roundup2(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_idata) + \
+ roundup((len), T4_ULPTX_MIN_IO), 16)
+
+uint32_t t4_pblpool_alloc(struct adapter *, int);
+void t4_pblpool_free(struct adapter *, uint32_t, int);
+uint32_t t4_stag_alloc(struct adapter *, int);
+void t4_stag_free(struct adapter *, uint32_t, int);
+void t4_init_tpt(struct adapter *);
+void t4_free_tpt(struct adapter *);
+void t4_write_mem_dma_wr(struct adapter *, void *, int, int, uint32_t,
+ uint32_t, vm_paddr_t, uint64_t);
+void t4_write_mem_inline_wr(struct adapter *, void *, int, int, uint32_t,
+ uint32_t, void *, uint64_t);
+
static inline struct wrqe *
alloc_wrqe(int wr_len, struct sge_wrq *wrq)
{
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index 6e80ce40648b..6b36832a7464 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +31,15 @@
#include "t4_hw.h"
+#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC0 | F_EDC0 | \
+ F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
+ F_CPL_SWITCH | F_SGE | F_ULP_TX | F_SF)
+
+#define GLBL_T7_INTR_MASK (F_CIM | F_MPS | F_PL | F_T7_PCIE | F_T7_MC0 | \
+ F_T7_EDC0 | F_T7_EDC1 | F_T7_LE | F_T7_TP | \
+ F_T7_MA | F_T7_PM_TX | F_T7_PM_RX | F_T7_ULP_RX | \
+ F_T7_CPL_SWITCH | F_T7_SGE | F_T7_ULP_TX | F_SF)
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -77,6 +85,18 @@ enum {
FEC_MODULE = 1 << 6, /* FEC suggested by the cable/transceiver. */
};
+enum {
+ ULP_T10DIF_ISCSI = 1 << 0,
+ ULP_T10DIF_FCOE = 1 << 1
+};
+
+enum {
+ ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_INLINE_TLS = 1 << 1,
+ ULP_CRYPTO_INLINE_IPSEC = 1 << 2,
+ ULP_CRYPTO_OFLD_OVER_IPSEC_INLINE = 1 << 4
+};
+
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
struct port_stats {
@@ -230,6 +250,15 @@ struct tp_cpl_stats {
struct tp_rdma_stats {
u32 rqe_dfr_pkt;
u32 rqe_dfr_mod;
+ u32 pkts_in[MAX_NCHAN];
+ u64 bytes_in[MAX_NCHAN];
+ /*
+ * When reading rdma stats, the address difference b/w RDMA_IN and
+ * RDMA_OUT is 4*u32, to read both at once, added padding
+ */
+ u32 padding[4];
+ u32 pkts_out[MAX_NCHAN];
+ u64 bytes_out[MAX_NCHAN];
};
struct sge_params {
@@ -259,7 +288,10 @@ struct tp_params {
uint32_t max_rx_pdu;
uint32_t max_tx_pdu;
bool rx_pkt_encap;
+ uint8_t lb_mode;
+ uint8_t lb_nchan;
+ int8_t ipsecidx_shift;
int8_t fcoe_shift;
int8_t port_shift;
int8_t vnic_shift;
@@ -270,6 +302,9 @@ struct tp_params {
int8_t macmatch_shift;
int8_t matchtype_shift;
int8_t frag_shift;
+ int8_t roce_shift;
+ int8_t synonly_shift;
+ int8_t tcpflags_shift;
};
/* Use same modulation queue as the tx channel. */
@@ -285,6 +320,22 @@ struct vpd_params {
u8 md[MD_LEN + 1];
};
+/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct pci_params {
unsigned int vpd_cap_addr;
unsigned int mps;
@@ -308,8 +359,11 @@ struct chip_params {
u8 pm_stats_cnt;
u8 cng_ch_bits_log; /* congestion channel map bits width */
u8 nsched_cls;
+ u8 cim_num_ibq;
u8 cim_num_obq;
- u8 filter_opt_len;
+ u8 filter_opt_len; /* number of bits for optional fields */
+ u8 filter_num_opt; /* number of optional fields */
+ u8 sge_ctxt_size;
u16 mps_rplc_size;
u16 vfcount;
u32 sge_fl_db;
@@ -360,6 +414,7 @@ struct adapter_params {
struct sge_params sge;
struct tp_params tp; /* PF-only */
struct vpd_params vpd;
+ struct pf_resources pfres; /* PF-only */
struct pci_params pci;
struct devlog_params devlog; /* PF-only */
struct rss_params rss; /* VF-only */
@@ -399,12 +454,13 @@ struct adapter_params {
unsigned int ofldq_wr_cred;
unsigned int eo_wr_cred;
- unsigned int max_ordird_qp;
- unsigned int max_ird_adapter;
+ unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
+ unsigned int max_ird_adapter; /* Max read depth per adapter */
/* These values are for all ports (8b/port, upto 4 ports) */
uint32_t mps_bg_map; /* MPS rx buffer group map */
uint32_t tp_ch_map; /* TPCHMAP from firmware */
+ uint32_t tx_tp_ch_map; /* TX_TPCHMAP from firmware */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
@@ -412,11 +468,15 @@ struct adapter_params {
bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */
unsigned int max_pkts_per_eth_tx_pkts_wr;
uint8_t nsched_cls; /* # of usable sched classes per port */
+
+ uint8_t ncores;
+ uint32_t tid_qid_sel_mask; /* TID based QID selection mask */
};
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
#define CHELSIO_T6 0x6
+#define CHELSIO_T7 0x7
/*
* State needed to monitor the forward progress of SGE Ingress DMA activities
@@ -509,10 +569,11 @@ static inline int is_hashfilter(const struct adapter *adap)
static inline int is_ktls(const struct adapter *adap)
{
- return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW;
+ return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW ||
+ adap->params.chipid == CHELSIO_T7;
}
-static inline int chip_id(struct adapter *adap)
+static inline int chip_id(const struct adapter *adap)
{
return adap->params.chipid;
}
@@ -537,6 +598,11 @@ static inline int is_t6(struct adapter *adap)
return adap->params.chipid == CHELSIO_T6;
}
+static inline int is_t7(struct adapter *adap)
+{
+ return adap->params.chipid == CHELSIO_T7;
+}
+
static inline int is_fpga(struct adapter *adap)
{
return adap->params.fpga;
@@ -641,7 +707,7 @@ int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int si
int t4_load_boot(struct adapter *adap, u8 *boot_data,
unsigned int boot_addr, unsigned int size);
int t4_flash_erase_sectors(struct adapter *adapter, int start, int end);
-int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp);
int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr);
@@ -655,9 +721,10 @@ int t4_init_hw(struct adapter *adapter, u32 fw_params);
const struct chip_params *t4_get_chip_params(int chipid);
int t4_prep_adapter(struct adapter *adapter, u32 *buf);
int t4_shutdown_adapter(struct adapter *adapter);
-int t4_init_devlog_params(struct adapter *adapter, int fw_attach);
+int t4_init_devlog_ncores_params(struct adapter *adapter, int fw_attach);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_width(const struct adapter *adap, int filter_field);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id);
void t4_fatal_err(struct adapter *adapter, bool fw_error);
@@ -665,6 +732,7 @@ int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
int filter_index, int enable);
void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
int filter_index, int *enabled);
+void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -691,19 +759,60 @@ void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok);
int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp);
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
- const unsigned int *valp);
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp);
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[]);
+void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
+ u16 *size, u16 *thres);
+int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n);
+int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n);
+int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ const unsigned int *valp);
+int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
+ u32 *wrptr);
void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
+
+static inline void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size,
+ u16 *thres)
+{
+ t4_read_cimq_cfg_core(adap, 0, base, size, thres);
+}
+
+static inline int t4_read_cim_ibq(struct adapter *adap, u32 qid, u32 *data,
+ size_t n)
+{
+ return t4_read_cim_ibq_core(adap, 0, qid, data, n);
+}
+
+static inline int t4_read_cim_obq(struct adapter *adap, u32 qid, u32 *data,
+ size_t n)
+{
+ return t4_read_cim_obq_core(adap, 0, qid, data, n);
+}
+
+static inline int t4_cim_read(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ return t4_cim_read_core(adap, 0, 0, addr, n, valp);
+}
+
+static inline int t4_cim_write(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ return t4_cim_write_core(adap, 0, 0, addr, n, valp);
+}
+
+static inline int t4_cim_read_la(struct adapter *adap, u32 *la_buf, u32 *wrptr)
+{
+ return t4_cim_read_la_core(adap, 0, la_buf, wrptr);
+}
+
int t4_get_flash_params(struct adapter *adapter);
u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach);
@@ -919,6 +1028,8 @@ int t4_configure_ringbb(struct adapter *adap);
int t4_configure_add_smac(struct adapter *adap);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
+int t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
+ unsigned int *lenp);
static inline int t4vf_query_params(struct adapter *adapter,
unsigned int nparams, const u32 *params,
@@ -969,8 +1080,8 @@ port_top_speed(const struct port_info *pi)
sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
static inline void *
-mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
- uint64_t mask, uint64_t val)
+mk_set_tcb_field_ulp_with_rpl(struct adapter *sc, void *cur, int tid,
+ uint16_t word, uint64_t mask, uint64_t val, const int qid)
{
struct ulp_txpkt *ulpmc;
struct ulptx_idata *ulpsc;
@@ -989,8 +1100,21 @@ mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htobe16(F_NO_REPLY);
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+
+ if (qid == -1) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+ } else {
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ req->word_cookie = htobe16(V_WORD(word) |
+ V_COOKIE(CPL_COOKIE_TOM));
+ }
req->mask = htobe64(mask);
req->val = htobe64(val);
@@ -1006,4 +1130,11 @@ mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
return (ulpsc + 1);
}
+
+static inline void *
+mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
+ uint64_t mask, uint64_t val)
+{
+ return (mk_set_tcb_field_ulp_with_rpl(sc, cur, tid, word, mask, val, -1));
+}
#endif /* __CHELSIO_COMMON_H */
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 07940a44f66e..eb7ea9acc108 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2012, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -246,6 +245,8 @@ struct port_tx_state {
u32
t4_port_reg(struct adapter *adap, u8 port, u32 reg)
{
+ if (chip_id(adap) > CHELSIO_T6)
+ return T7_PORT_REG(port, reg);
if (chip_id(adap) > CHELSIO_T4)
return T5_PORT_REG(port, reg);
return PORT_REG(port, reg);
@@ -268,8 +269,10 @@ read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
{
int i;
- for_each_port(sc, i)
- read_tx_state_one(sc, i, &tx_state[i]);
+ for (i = 0; i < MAX_NCHAN; i++) {
+ if (sc->chan_map[i] != 0xff)
+ read_tx_state_one(sc, i, &tx_state[i]);
+ }
}
static void
@@ -279,7 +282,9 @@ check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
uint64_t tx_frames, rx_pause;
int i;
- for_each_port(sc, i) {
+ for (i = 0; i < MAX_NCHAN; i++) {
+ if (sc->chan_map[i] == 0xff)
+ continue;
rx_pause = tx_state[i].rx_pause;
tx_frames = tx_state[i].tx_frames;
read_tx_state_one(sc, i, &tx_state[i]); /* update */
@@ -351,7 +356,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
return -EINVAL;
if (adap->flags & IS_VF) {
- if (is_t6(adap))
+ if (chip_id(adap) >= CHELSIO_T6)
data_reg = FW_T6VF_MBDATA_BASE_ADDR;
else
data_reg = FW_T4VF_MBDATA_BASE_ADDR;
@@ -508,9 +513,8 @@ failed:
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
- return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
- sleep_ok, FW_CMD_MAX_TIMEOUT);
-
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
+ sleep_ok, FW_CMD_MAX_TIMEOUT);
}
static int t4_edc_err_read(struct adapter *adap, int idx)
@@ -799,6 +803,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
case CHELSIO_T5:
case CHELSIO_T6:
+ case CHELSIO_T7:
if (adapter->flags & IS_VF)
return FW_T4VF_REGMAP_SIZE;
return T5_REGMAP_SIZE;
@@ -2639,6 +2644,638 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
};
+ static const unsigned int t7_reg_ranges[] = {
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x115c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11d0,
+ 0x11fc, 0x1278,
+ 0x1280, 0x1368,
+ 0x1700, 0x172c,
+ 0x173c, 0x1760,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3044,
+ 0x3060, 0x3064,
+ 0x30a4, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35e0, 0x35ec,
+ 0x3600, 0x37fc,
+ 0x3804, 0x3818,
+ 0x3880, 0x388c,
+ 0x3900, 0x3904,
+ 0x3910, 0x3978,
+ 0x3980, 0x399c,
+ 0x4700, 0x4720,
+ 0x4728, 0x475c,
+ 0x480c, 0x4814,
+ 0x4890, 0x489c,
+ 0x48a4, 0x48ac,
+ 0x48b8, 0x48c4,
+ 0x4900, 0x4924,
+ 0x4ffc, 0x4ffc,
+ 0x5500, 0x5624,
+ 0x56c4, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x598c,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ed0,
+ 0x5ee0, 0x5ee0,
+ 0x5ef0, 0x5ef0,
+ 0x5f00, 0x5f04,
+ 0x5f0c, 0x5f10,
+ 0x5f20, 0x5f88,
+ 0x5f90, 0x5fd8,
+ 0x6000, 0x6020,
+ 0x6028, 0x6030,
+ 0x6044, 0x609c,
+ 0x60a8, 0x60ac,
+ 0x60b8, 0x60ec,
+ 0x6100, 0x6104,
+ 0x6118, 0x611c,
+ 0x6150, 0x6150,
+ 0x6180, 0x61b8,
+ 0x7700, 0x77a8,
+ 0x77b0, 0x7888,
+ 0x78cc, 0x7970,
+ 0x7b00, 0x7b00,
+ 0x7b08, 0x7b0c,
+ 0x7b24, 0x7b84,
+ 0x7b8c, 0x7c2c,
+ 0x7c34, 0x7c40,
+ 0x7c48, 0x7c68,
+ 0x7c70, 0x7c7c,
+ 0x7d00, 0x7ddc,
+ 0x7de4, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e74,
+ 0x7e80, 0x7ee0,
+ 0x7ee8, 0x7f0c,
+ 0x7f20, 0x7f5c,
+ 0x8dc0, 0x8de8,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e30,
+ 0x8e7c, 0x8ee8,
+ 0x8f88, 0x8f88,
+ 0x8f90, 0x8fb0,
+ 0x8fb8, 0x9058,
+ 0x9074, 0x90f8,
+ 0x9100, 0x912c,
+ 0x9138, 0x9188,
+ 0x9400, 0x9414,
+ 0x9430, 0x9440,
+ 0x9454, 0x9454,
+ 0x945c, 0x947c,
+ 0x9498, 0x94b8,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
+ 0x9800, 0x9804,
+ 0x9854, 0x9854,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xa000, 0xa06c,
+ 0xa080, 0xa0ec,
+ 0xa100, 0xa16c,
+ 0xa180, 0xa1ec,
+ 0xa200, 0xa26c,
+ 0xa280, 0xa2ec,
+ 0xa300, 0xa36c,
+ 0xa380, 0xa458,
+ 0xa460, 0xa4f8,
+ 0xd000, 0xd03c,
+ 0xd100, 0xd134,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xe00c,
+ 0xf000, 0xf008,
+ 0xf010, 0xf06c,
+ 0x11000, 0x11014,
+ 0x11048, 0x11120,
+ 0x11130, 0x11144,
+ 0x11174, 0x11178,
+ 0x11190, 0x111a0,
+ 0x111e4, 0x112f0,
+ 0x11300, 0x1133c,
+ 0x11408, 0x1146c,
+ 0x12000, 0x12004,
+ 0x12060, 0x122c4,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191a0,
+ 0x191ac, 0x191c8,
+ 0x191d0, 0x191e4,
+ 0x19250, 0x19250,
+ 0x19258, 0x19268,
+ 0x19278, 0x19278,
+ 0x19280, 0x192b0,
+ 0x192bc, 0x192f0,
+ 0x19300, 0x19308,
+ 0x19310, 0x19318,
+ 0x19320, 0x19328,
+ 0x19330, 0x19330,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x1947c,
+ 0x19488, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cdc,
+ 0x19ce4, 0x19cf8,
+ 0x19d00, 0x19d30,
+ 0x19d50, 0x19d80,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19de0,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fb4,
+ 0x19fbc, 0x19fbc,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a108,
+ 0x1a114, 0x1a130,
+ 0x1a138, 0x1a1c4,
+ 0x1a1fc, 0x1a29c,
+ 0x1a2a8, 0x1a2b8,
+ 0x1a2c0, 0x1a388,
+ 0x1a398, 0x1a3ac,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e4,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e4,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae4,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee4,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e4,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e4,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae4,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee4,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30038,
+ 0x30100, 0x3017c,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301e0,
+ 0x30200, 0x30344,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3065c,
+ 0x30800, 0x30848,
+ 0x30850, 0x308a8,
+ 0x308b8, 0x308c0,
+ 0x308cc, 0x308dc,
+ 0x30900, 0x30904,
+ 0x3090c, 0x30914,
+ 0x3091c, 0x30928,
+ 0x30930, 0x3093c,
+ 0x30944, 0x30948,
+ 0x30954, 0x30974,
+ 0x3097c, 0x30980,
+ 0x30a00, 0x30a20,
+ 0x30a38, 0x30a3c,
+ 0x30a50, 0x30a50,
+ 0x30a80, 0x30a80,
+ 0x30a88, 0x30aa8,
+ 0x30ab0, 0x30ab4,
+ 0x30ac8, 0x30ad4,
+ 0x30b28, 0x30b84,
+ 0x30b98, 0x30bb8,
+ 0x30c98, 0x30d14,
+ 0x31000, 0x31020,
+ 0x31038, 0x3103c,
+ 0x31050, 0x31050,
+ 0x31080, 0x31080,
+ 0x31088, 0x310a8,
+ 0x310b0, 0x310b4,
+ 0x310c8, 0x310d4,
+ 0x31128, 0x31184,
+ 0x31198, 0x311b8,
+ 0x32000, 0x32038,
+ 0x32100, 0x3217c,
+ 0x32190, 0x321a0,
+ 0x321a8, 0x321b8,
+ 0x321c4, 0x321c8,
+ 0x321d0, 0x321e0,
+ 0x32200, 0x32344,
+ 0x32400, 0x324b4,
+ 0x324c0, 0x3252c,
+ 0x32540, 0x3265c,
+ 0x32800, 0x32848,
+ 0x32850, 0x328a8,
+ 0x328b8, 0x328c0,
+ 0x328cc, 0x328dc,
+ 0x32900, 0x32904,
+ 0x3290c, 0x32914,
+ 0x3291c, 0x32928,
+ 0x32930, 0x3293c,
+ 0x32944, 0x32948,
+ 0x32954, 0x32974,
+ 0x3297c, 0x32980,
+ 0x32a00, 0x32a20,
+ 0x32a38, 0x32a3c,
+ 0x32a50, 0x32a50,
+ 0x32a80, 0x32a80,
+ 0x32a88, 0x32aa8,
+ 0x32ab0, 0x32ab4,
+ 0x32ac8, 0x32ad4,
+ 0x32b28, 0x32b84,
+ 0x32b98, 0x32bb8,
+ 0x32c98, 0x32d14,
+ 0x33000, 0x33020,
+ 0x33038, 0x3303c,
+ 0x33050, 0x33050,
+ 0x33080, 0x33080,
+ 0x33088, 0x330a8,
+ 0x330b0, 0x330b4,
+ 0x330c8, 0x330d4,
+ 0x33128, 0x33184,
+ 0x33198, 0x331b8,
+ 0x34000, 0x34038,
+ 0x34100, 0x3417c,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341e0,
+ 0x34200, 0x34344,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3465c,
+ 0x34800, 0x34848,
+ 0x34850, 0x348a8,
+ 0x348b8, 0x348c0,
+ 0x348cc, 0x348dc,
+ 0x34900, 0x34904,
+ 0x3490c, 0x34914,
+ 0x3491c, 0x34928,
+ 0x34930, 0x3493c,
+ 0x34944, 0x34948,
+ 0x34954, 0x34974,
+ 0x3497c, 0x34980,
+ 0x34a00, 0x34a20,
+ 0x34a38, 0x34a3c,
+ 0x34a50, 0x34a50,
+ 0x34a80, 0x34a80,
+ 0x34a88, 0x34aa8,
+ 0x34ab0, 0x34ab4,
+ 0x34ac8, 0x34ad4,
+ 0x34b28, 0x34b84,
+ 0x34b98, 0x34bb8,
+ 0x34c98, 0x34d14,
+ 0x35000, 0x35020,
+ 0x35038, 0x3503c,
+ 0x35050, 0x35050,
+ 0x35080, 0x35080,
+ 0x35088, 0x350a8,
+ 0x350b0, 0x350b4,
+ 0x350c8, 0x350d4,
+ 0x35128, 0x35184,
+ 0x35198, 0x351b8,
+ 0x36000, 0x36038,
+ 0x36100, 0x3617c,
+ 0x36190, 0x361a0,
+ 0x361a8, 0x361b8,
+ 0x361c4, 0x361c8,
+ 0x361d0, 0x361e0,
+ 0x36200, 0x36344,
+ 0x36400, 0x364b4,
+ 0x364c0, 0x3652c,
+ 0x36540, 0x3665c,
+ 0x36800, 0x36848,
+ 0x36850, 0x368a8,
+ 0x368b8, 0x368c0,
+ 0x368cc, 0x368dc,
+ 0x36900, 0x36904,
+ 0x3690c, 0x36914,
+ 0x3691c, 0x36928,
+ 0x36930, 0x3693c,
+ 0x36944, 0x36948,
+ 0x36954, 0x36974,
+ 0x3697c, 0x36980,
+ 0x36a00, 0x36a20,
+ 0x36a38, 0x36a3c,
+ 0x36a50, 0x36a50,
+ 0x36a80, 0x36a80,
+ 0x36a88, 0x36aa8,
+ 0x36ab0, 0x36ab4,
+ 0x36ac8, 0x36ad4,
+ 0x36b28, 0x36b84,
+ 0x36b98, 0x36bb8,
+ 0x36c98, 0x36d14,
+ 0x37000, 0x37020,
+ 0x37038, 0x3703c,
+ 0x37050, 0x37050,
+ 0x37080, 0x37080,
+ 0x37088, 0x370a8,
+ 0x370b0, 0x370b4,
+ 0x370c8, 0x370d4,
+ 0x37128, 0x37184,
+ 0x37198, 0x371b8,
+ 0x38000, 0x380b0,
+ 0x380b8, 0x38130,
+ 0x38140, 0x38140,
+ 0x38150, 0x38154,
+ 0x38160, 0x381c4,
+ 0x381f0, 0x38204,
+ 0x3820c, 0x38214,
+ 0x3821c, 0x3822c,
+ 0x38244, 0x38244,
+ 0x38254, 0x38274,
+ 0x3827c, 0x38280,
+ 0x38300, 0x38304,
+ 0x3830c, 0x38314,
+ 0x3831c, 0x3832c,
+ 0x38344, 0x38344,
+ 0x38354, 0x38374,
+ 0x3837c, 0x38380,
+ 0x38400, 0x38424,
+ 0x38438, 0x3843c,
+ 0x38480, 0x38480,
+ 0x384a8, 0x384a8,
+ 0x384b0, 0x384b4,
+ 0x384c8, 0x38514,
+ 0x38600, 0x3860c,
+ 0x3861c, 0x38624,
+ 0x38900, 0x38924,
+ 0x38938, 0x3893c,
+ 0x38980, 0x38980,
+ 0x389a8, 0x389a8,
+ 0x389b0, 0x389b4,
+ 0x389c8, 0x38a14,
+ 0x38b00, 0x38b0c,
+ 0x38b1c, 0x38b24,
+ 0x38e00, 0x38e00,
+ 0x38e18, 0x38e20,
+ 0x38e38, 0x38e40,
+ 0x38e58, 0x38e60,
+ 0x38e78, 0x38e80,
+ 0x38e98, 0x38ea0,
+ 0x38eb8, 0x38ec0,
+ 0x38ed8, 0x38ee0,
+ 0x38ef8, 0x38f08,
+ 0x38f10, 0x38f2c,
+ 0x38f80, 0x38ffc,
+ 0x39080, 0x39080,
+ 0x39088, 0x39090,
+ 0x39100, 0x39108,
+ 0x39120, 0x39128,
+ 0x39140, 0x39148,
+ 0x39160, 0x39168,
+ 0x39180, 0x39188,
+ 0x391a0, 0x391a8,
+ 0x391c0, 0x391c8,
+ 0x391e0, 0x391e8,
+ 0x39200, 0x39200,
+ 0x39208, 0x39240,
+ 0x39300, 0x39300,
+ 0x39308, 0x39340,
+ 0x39400, 0x39400,
+ 0x39408, 0x39440,
+ 0x39500, 0x39500,
+ 0x39508, 0x39540,
+ 0x39600, 0x39600,
+ 0x39608, 0x39640,
+ 0x39700, 0x39700,
+ 0x39708, 0x39740,
+ 0x39800, 0x39800,
+ 0x39808, 0x39840,
+ 0x39900, 0x39900,
+ 0x39908, 0x39940,
+ 0x39a00, 0x39a04,
+ 0x39a10, 0x39a14,
+ 0x39a1c, 0x39aa8,
+ 0x39b00, 0x39ecc,
+ 0x3a000, 0x3a004,
+ 0x3a050, 0x3a084,
+ 0x3a090, 0x3a09c,
+ 0x3e000, 0x3e020,
+ 0x3e03c, 0x3e05c,
+ 0x3e100, 0x3e120,
+ 0x3e13c, 0x3e15c,
+ 0x3e200, 0x3e220,
+ 0x3e23c, 0x3e25c,
+ 0x3e300, 0x3e320,
+ 0x3e33c, 0x3e35c,
+ 0x3f000, 0x3f034,
+ 0x3f100, 0x3f130,
+ 0x3f200, 0x3f218,
+ 0x44000, 0x44014,
+ 0x44020, 0x44028,
+ 0x44030, 0x44030,
+ 0x44100, 0x44114,
+ 0x44120, 0x44128,
+ 0x44130, 0x44130,
+ 0x44200, 0x44214,
+ 0x44220, 0x44228,
+ 0x44230, 0x44230,
+ 0x44300, 0x44314,
+ 0x44320, 0x44328,
+ 0x44330, 0x44330,
+ 0x44400, 0x44414,
+ 0x44420, 0x44428,
+ 0x44430, 0x44430,
+ 0x44500, 0x44514,
+ 0x44520, 0x44528,
+ 0x44530, 0x44530,
+ 0x44714, 0x44718,
+ 0x44730, 0x44730,
+ 0x447c0, 0x447c0,
+ 0x447f0, 0x447f0,
+ 0x447f8, 0x447fc,
+ 0x45000, 0x45014,
+ 0x45020, 0x45028,
+ 0x45030, 0x45030,
+ 0x45100, 0x45114,
+ 0x45120, 0x45128,
+ 0x45130, 0x45130,
+ 0x45200, 0x45214,
+ 0x45220, 0x45228,
+ 0x45230, 0x45230,
+ 0x45300, 0x45314,
+ 0x45320, 0x45328,
+ 0x45330, 0x45330,
+ 0x45400, 0x45414,
+ 0x45420, 0x45428,
+ 0x45430, 0x45430,
+ 0x45500, 0x45514,
+ 0x45520, 0x45528,
+ 0x45530, 0x45530,
+ 0x45714, 0x45718,
+ 0x45730, 0x45730,
+ 0x457c0, 0x457c0,
+ 0x457f0, 0x457f0,
+ 0x457f8, 0x457fc,
+ 0x46000, 0x46010,
+ 0x46020, 0x46034,
+ 0x46040, 0x46050,
+ 0x46060, 0x46088,
+ 0x47000, 0x4709c,
+ 0x470c0, 0x470d4,
+ 0x47100, 0x471a8,
+ 0x471b0, 0x471e8,
+ 0x47200, 0x47210,
+ 0x4721c, 0x47230,
+ 0x47238, 0x47238,
+ 0x47240, 0x472ac,
+ 0x472d0, 0x472f4,
+ 0x47300, 0x47310,
+ 0x47318, 0x47348,
+ 0x47350, 0x47354,
+ 0x47380, 0x47388,
+ 0x47390, 0x47394,
+ 0x47400, 0x47448,
+ 0x47450, 0x47458,
+ 0x47500, 0x4751c,
+ 0x47530, 0x4754c,
+ 0x47560, 0x4757c,
+ 0x47590, 0x475ac,
+ 0x47600, 0x47630,
+ 0x47640, 0x47644,
+ 0x47660, 0x4769c,
+ 0x47700, 0x47710,
+ 0x47740, 0x47750,
+ 0x4775c, 0x4779c,
+ 0x477b0, 0x477bc,
+ 0x477c4, 0x477c8,
+ 0x477d4, 0x477fc,
+ 0x48000, 0x48004,
+ 0x48018, 0x4801c,
+ 0x49304, 0x493f0,
+ 0x49400, 0x49410,
+ 0x49460, 0x494f4,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
+ 0x50400, 0x50404,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
+ 0x50c00, 0x50c04,
+ 0x51000, 0x51020,
+ 0x51028, 0x510c4,
+ 0x51104, 0x51108,
+ 0x51200, 0x51274,
+ 0x51300, 0x51324,
+ 0x51400, 0x51548,
+ 0x51550, 0x51554,
+ 0x5155c, 0x51584,
+ 0x5158c, 0x515c8,
+ 0x515f0, 0x515f4,
+ 0x58000, 0x58004,
+ 0x58018, 0x5801c,
+ 0x59304, 0x593f0,
+ 0x59400, 0x59410,
+ 0x59460, 0x594f4,
+ };
+
u32 *buf_end = (u32 *)(buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -2679,6 +3316,16 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
}
break;
+ case CHELSIO_T7:
+ if (adap->flags & IS_VF) {
+ reg_ranges = t6vf_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
+ } else {
+ reg_ranges = t7_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t7_reg_ranges);
+ }
+ break;
+
default:
CH_ERR(adap,
"Unsupported chip version %d\n", chip_version);
@@ -3086,6 +3733,56 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
return 0;
}
+/* Flash Layout {start sector, # of sectors} for T4/T5/T6 adapters */
+static const struct t4_flash_loc_entry t4_flash_loc_arr[] = {
+ [FLASH_LOC_EXP_ROM] = { 0, 6 },
+ [FLASH_LOC_IBFT] = { 6, 1 },
+ [FLASH_LOC_BOOTCFG] = { 7, 1 },
+ [FLASH_LOC_FW] = { 8, 16 },
+ [FLASH_LOC_FWBOOTSTRAP] = { 27, 1 },
+ [FLASH_LOC_ISCSI_CRASH] = { 29, 1 },
+ [FLASH_LOC_FCOE_CRASH] = { 30, 1 },
+ [FLASH_LOC_CFG] = { 31, 1 },
+ [FLASH_LOC_CUDBG] = { 32, 32 },
+ [FLASH_LOC_BOOT_AREA] = { 0, 8 }, /* Spans complete Boot Area */
+ [FLASH_LOC_END] = { 64, 0 },
+};
+
+/* Flash Layout {start sector, # of sectors} for T7 adapters */
+static const struct t4_flash_loc_entry t7_flash_loc_arr[] = {
+ [FLASH_LOC_VPD] = { 0, 1 },
+ [FLASH_LOC_FWBOOTSTRAP] = { 1, 1 },
+ [FLASH_LOC_FW] = { 2, 29 },
+ [FLASH_LOC_CFG] = { 31, 1 },
+ [FLASH_LOC_EXP_ROM] = { 32, 15 },
+ [FLASH_LOC_IBFT] = { 47, 1 },
+ [FLASH_LOC_BOOTCFG] = { 48, 1 },
+ [FLASH_LOC_DPU_BOOT] = { 49, 13 },
+ [FLASH_LOC_ISCSI_CRASH] = { 62, 1 },
+ [FLASH_LOC_FCOE_CRASH] = { 63, 1 },
+ [FLASH_LOC_VPD_BACKUP] = { 64, 1 },
+ [FLASH_LOC_FWBOOTSTRAP_BACKUP] = { 65, 1 },
+ [FLASH_LOC_FW_BACKUP] = { 66, 29 },
+ [FLASH_LOC_CFG_BACK] = { 95, 1 },
+ [FLASH_LOC_CUDBG] = { 96, 48 },
+ [FLASH_LOC_CHIP_DUMP] = { 144, 48 },
+ [FLASH_LOC_DPU_AREA] = { 192, 64 },
+ [FLASH_LOC_BOOT_AREA] = { 32, 17 }, /* Spans complete UEFI/PXE Boot Area */
+ [FLASH_LOC_END] = { 256, 0 },
+};
+
+int
+t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
+ unsigned int *lenp)
+{
+ const struct t4_flash_loc_entry *l = chip_id(adap) >= CHELSIO_T7 ?
+ &t7_flash_loc_arr[loc] : &t4_flash_loc_arr[loc];
+
+ if (lenp != NULL)
+ *lenp = FLASH_MAX_SIZE(l->nsecs);
+ return (FLASH_START(l->start_sec));
+}
+
/* serial flash and firmware constants and flash config file constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -3116,13 +3813,16 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
int lock, u32 *valp)
{
int ret;
+ uint32_t op;
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
return -EBUSY;
- t4_write_reg(adapter, A_SF_OP,
- V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ op = V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1);
+ if (chip_id(adapter) >= CHELSIO_T7)
+ op |= F_QUADREADDISABLE;
+ t4_write_reg(adapter, A_SF_OP, op);
ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, A_SF_DATA);
@@ -3294,9 +3994,10 @@ unlock:
*/
int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FW_START +
- offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
+ 1, vers, 0);
}
/**
@@ -3308,8 +4009,10 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers)
*/
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
{
- return t4_read_flash(adapter, FLASH_FW_START,
- sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start, sizeof (*hdr) / sizeof (uint32_t),
+ (uint32_t *)hdr, 1);
}
/**
@@ -3321,9 +4024,11 @@ int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
*/
int t4_get_bs_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
- offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FWBOOTSTRAP,
+ NULL);
+
+ return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
+ 1, vers, 0);
}
/**
@@ -3335,9 +4040,10 @@ int t4_get_bs_version(struct adapter *adapter, u32 *vers)
*/
int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FW_START +
- offsetof(struct fw_hdr, tp_microcode_ver),
- 1, vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start +
+ offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0);
}
/**
@@ -3359,10 +4065,10 @@ int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
sizeof(u32))];
int ret;
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_EXP_ROM, NULL);
- ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
- ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
- 0);
+ ret = t4_read_flash(adapter, start, ARRAY_SIZE(exprom_header_buf),
+ exprom_header_buf, 0);
if (ret)
return ret;
@@ -3520,16 +4226,20 @@ int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
* File is stored, or an error if the device FLASH is too small to contain
* a Firmware Configuration File.
*/
-int t4_flash_cfg_addr(struct adapter *adapter)
+int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp)
{
+ unsigned int len = 0;
+ const int cfg_start = t4_flash_loc_start(adapter, FLASH_LOC_CFG, &len);
+
/*
* If the device FLASH isn't large enough to hold a Firmware
* Configuration File, return an error.
*/
- if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
+ if (adapter->params.sf_size < cfg_start + len)
return -ENOSPC;
-
- return FLASH_CFG_START;
+ if (lenp != NULL)
+ *lenp = len;
+ return (cfg_start);
}
/*
@@ -3547,7 +4257,8 @@ static int t4_fw_matches_chip(struct adapter *adap,
*/
if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
(is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
- (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
+ (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6) ||
+ (is_t7(adap) && hdr->chip == FW_HDR_CHIP_T7))
return 1;
CH_ERR(adap,
@@ -3572,20 +4283,15 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
u8 first_page[SF_PAGE_SIZE];
const u32 *p = (const u32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_start_sec;
unsigned int fw_start;
unsigned int fw_size;
+ enum t4_flash_loc loc;
- if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
- fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
- fw_start = FLASH_FWBOOTSTRAP_START;
- fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
- } else {
- fw_start_sec = FLASH_FW_START_SEC;
- fw_start = FLASH_FW_START;
- fw_size = FLASH_FW_MAX_SIZE;
- }
+ loc = ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP ?
+ FLASH_LOC_FWBOOTSTRAP : FLASH_LOC_FW;
+ fw_start = t4_flash_loc_start(adap, loc, &fw_size);
+ fw_start_sec = fw_start / SF_SEC_SIZE;
if (!size) {
CH_ERR(adap, "FW image has no data\n");
@@ -3618,7 +4324,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
}
- i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
if (ret)
goto out;
@@ -3672,7 +4378,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.param[0].mnem =
cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
- c.param[0].val = (__force __be32)op;
+ c.param[0].val = cpu_to_be32(op);
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
@@ -3922,15 +4628,12 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
* speed and let the firmware pick one.
*/
fec |= FW_PORT_CAP32_FORCE_FEC;
- if (speed & FW_PORT_CAP32_SPEED_100G) {
+ if (speed & FW_PORT_CAP32_SPEED_25G) {
fec |= FW_PORT_CAP32_FEC_RS;
- fec |= FW_PORT_CAP32_FEC_NO_FEC;
- } else if (speed & FW_PORT_CAP32_SPEED_50G) {
fec |= FW_PORT_CAP32_FEC_BASER_RS;
fec |= FW_PORT_CAP32_FEC_NO_FEC;
} else {
fec |= FW_PORT_CAP32_FEC_RS;
- fec |= FW_PORT_CAP32_FEC_BASER_RS;
fec |= FW_PORT_CAP32_FEC_NO_FEC;
}
} else {
@@ -3948,12 +4651,9 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
* the potential top speed. Request the best
* FEC at that speed instead.
*/
- if (speed & FW_PORT_CAP32_SPEED_100G) {
- if (fec == FW_PORT_CAP32_FEC_BASER_RS)
- fec = FW_PORT_CAP32_FEC_RS;
- } else if (speed & FW_PORT_CAP32_SPEED_50G) {
- if (fec == FW_PORT_CAP32_FEC_RS)
- fec = FW_PORT_CAP32_FEC_BASER_RS;
+ if ((speed & FW_PORT_CAP32_SPEED_25G) == 0 &&
+ fec == FW_PORT_CAP32_FEC_BASER_RS) {
+ fec = FW_PORT_CAP32_FEC_RS;
}
}
} else {
@@ -4925,6 +5625,15 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = mps_trc_intr_details,
.actions = NULL,
};
+ static const struct intr_info t7_mps_trc_intr_info = {
+ .name = "T7_MPS_TRC_INT_CAUSE",
+ .cause_reg = A_T7_MPS_TRC_INT_CAUSE,
+ .enable_reg = A_T7_MPS_TRC_INT_ENABLE,
+ .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
+ .flags = 0,
+ .details = mps_trc_intr_details,
+ .actions = NULL,
+ };
static const struct intr_details mps_stat_sram_intr_details[] = {
{ 0xffffffff, "MPS statistics SRAM parity error" },
{ 0 }
@@ -4998,7 +5707,10 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
fatal = false;
fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
+ if (chip_id(adap) > CHELSIO_T6)
+ fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, verbose);
+ else
+ fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
@@ -5225,7 +5937,7 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.flags = 0;
ii.details = mac_intr_details;
ii.actions = NULL;
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
@@ -5234,10 +5946,29 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.flags = 0;
ii.details = mac_intr_details;
ii.actions = NULL;
+ } else {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_INT_CAUSE", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN);
+ ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
+ ii.flags = 0;
+ ii.details = mac_intr_details;
+ ii.actions = NULL;
}
fatal |= t4_handle_intr(adap, &ii, 0, verbose);
- if (chip_id(adap) >= CHELSIO_T5) {
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN);
+ ii.fatal = 0;
+ ii.flags = 0;
+ ii.details = NULL;
+ ii.actions = NULL;
+ fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ } else if (chip_id(adap) >= CHELSIO_T5) {
snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
@@ -5249,7 +5980,17 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
fatal |= t4_handle_intr(adap, &ii, 0, verbose);
}
- if (chip_id(adap) >= CHELSIO_T6) {
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE_100G", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G);
+ ii.fatal = 0;
+ ii.flags = 0;
+ ii.details = NULL;
+ ii.actions = NULL;
+ fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ } else if (is_t6(adap)) {
snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
@@ -5346,13 +6087,42 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, "CIM" },
{ 0 }
};
- static const struct intr_info pl_perr_cause = {
+ static const struct intr_details t7_pl_intr_details[] = {
+ { F_T7_MC1, "MC1" },
+ { F_T7_ULP_TX, "ULP TX" },
+ { F_T7_SGE, "SGE" },
+ { F_T7_CPL_SWITCH, "CPL Switch" },
+ { F_T7_ULP_RX, "ULP RX" },
+ { F_T7_PM_RX, "PM RX" },
+ { F_T7_PM_TX, "PM TX" },
+ { F_T7_MA, "MA" },
+ { F_T7_TP, "TP" },
+ { F_T7_LE, "LE" },
+ { F_T7_EDC1, "EDC1" },
+ { F_T7_EDC0, "EDC0" },
+ { F_T7_MC0, "MC0" },
+ { F_T7_PCIE, "PCIE" },
+ { F_MAC3, "MAC3" },
+ { F_MAC2, "MAC2" },
+ { F_MAC1, "MAC1" },
+ { F_MAC0, "MAC0" },
+ { F_SMB, "SMB" },
+ { F_PL, "PL" },
+ { F_NCSI, "NC-SI" },
+ { F_MPS, "MPS" },
+ { F_DBG, "DBG" },
+ { F_I2CM, "I2CM" },
+ { F_MI, "MI" },
+ { F_CIM, "CIM" },
+ { 0 }
+ };
+ struct intr_info pl_perr_cause = {
.name = "PL_PERR_CAUSE",
.cause_reg = A_PL_PERR_CAUSE,
.enable_reg = A_PL_PERR_ENABLE,
.fatal = 0xffffffff,
- .flags = 0,
- .details = pl_intr_details,
+ .flags = NONFATAL_IF_DISABLED,
+ .details = NULL,
.actions = NULL,
};
static const struct intr_action pl_intr_action[] = {
@@ -5381,17 +6151,53 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, -1, cim_intr_handler },
{ 0 }
};
- static const struct intr_info pl_intr_info = {
+ static const struct intr_action t7_pl_intr_action[] = {
+ { F_T7_ULP_TX, -1, ulptx_intr_handler },
+ { F_T7_SGE, -1, sge_intr_handler },
+ { F_T7_CPL_SWITCH, -1, cplsw_intr_handler },
+ { F_T7_ULP_RX, -1, ulprx_intr_handler },
+ { F_T7_PM_RX, -1, pmrx_intr_handler},
+ { F_T7_PM_TX, -1, pmtx_intr_handler},
+ { F_T7_MA, -1, ma_intr_handler },
+ { F_T7_TP, -1, tp_intr_handler },
+ { F_T7_LE, -1, le_intr_handler },
+ { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
+ { F_T7_EDC0, MEM_EDC0, mem_intr_handler },
+ { F_T7_MC1, MEM_MC1, mem_intr_handler },
+ { F_T7_MC0, MEM_MC0, mem_intr_handler },
+ { F_T7_PCIE, -1, pcie_intr_handler },
+ { F_MAC3, 3, mac_intr_handler},
+ { F_MAC2, 2, mac_intr_handler},
+ { F_MAC1, 1, mac_intr_handler},
+ { F_MAC0, 0, mac_intr_handler},
+ { F_SMB, -1, smb_intr_handler},
+ { F_PL, -1, plpl_intr_handler },
+ { F_NCSI, -1, ncsi_intr_handler},
+ { F_MPS, -1, mps_intr_handler },
+ { F_CIM, -1, cim_intr_handler },
+ { 0 }
+ };
+ struct intr_info pl_intr_info = {
.name = "PL_INT_CAUSE",
.cause_reg = A_PL_INT_CAUSE,
.enable_reg = A_PL_INT_ENABLE,
.fatal = 0,
.flags = 0,
- .details = pl_intr_details,
- .actions = pl_intr_action,
+ .details = NULL,
+ .actions = NULL,
};
u32 perr;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ pl_perr_cause.details = t7_pl_intr_details;
+ pl_intr_info.details = t7_pl_intr_details;
+ pl_intr_info.actions = t7_pl_intr_action;
+ } else {
+ pl_perr_cause.details = pl_intr_details;
+ pl_intr_info.details = pl_intr_details;
+ pl_intr_info.actions = pl_intr_action;
+ }
+
perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
if (verbose || perr != 0) {
t4_show_intr_info(adap, &pl_perr_cause, perr);
@@ -5421,19 +6227,20 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
*/
void t4_intr_enable(struct adapter *adap)
{
- u32 val = 0;
+ u32 mask, val;
if (chip_id(adap) <= CHELSIO_T5)
- val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
+ val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT |
+ F_DBFIFO_LP_INT;
else
val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
- F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT |
- F_EGRESS_SIZE_ERR;
- t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
+ F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR;
+ mask = val;
+ t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val);
t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
@@ -6184,6 +6991,11 @@ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
{
t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
sleep_ok);
+
+ if (chip_id(adap) >= CHELSIO_T7)
+ /* read RDMA stats IN and OUT for all ports at once */
+ t4_tp_mib_read(adap, &st->pkts_in[0], 28, A_TP_MIB_RDMA_IN_PKT_0,
+ sleep_ok);
}
/**
@@ -6564,16 +7376,24 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
int idx, int enable)
{
- int i, ofst = idx * 4;
+ int i, ofst;
+ u32 match_ctl_a, match_ctl_b;
u32 data_reg, mask_reg, cfg;
u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
if (idx < 0 || idx >= NTRACE)
return -EINVAL;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ match_ctl_a = T7_MPS_TRC_FILTER_MATCH_CTL_A(idx);
+ match_ctl_b = T7_MPS_TRC_FILTER_MATCH_CTL_B(idx);
+ } else {
+ match_ctl_a = MPS_TRC_FILTER_MATCH_CTL_A(idx);
+ match_ctl_b = MPS_TRC_FILTER_MATCH_CTL_B(idx);
+ }
+
if (tp == NULL || !enable) {
- t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
- enable ? en : 0);
+ t4_set_reg_field(adap, match_ctl_a, en, enable ? en : 0);
return 0;
}
@@ -6610,22 +7430,20 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
return -EINVAL;
/* stop the tracer we'll be changing */
- t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
+ t4_set_reg_field(adap, match_ctl_a, en, 0);
- idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
- data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
- mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
+ ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
+ data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
+ mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
t4_write_reg(adap, data_reg, tp->data[i]);
t4_write_reg(adap, mask_reg, ~tp->mask[i]);
}
- t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
- V_TFCAPTUREMAX(tp->snap_len) |
+ t4_write_reg(adap, match_ctl_b, V_TFCAPTUREMAX(tp->snap_len) |
V_TFMINPKTSIZE(tp->min_len));
- t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
- V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
- (is_t4(adap) ?
+ t4_write_reg(adap, match_ctl_a, V_TFOFFSET(tp->skip_ofst) |
+ V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ?
V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
@@ -6645,11 +7463,16 @@ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
int *enabled)
{
u32 ctla, ctlb;
- int i, ofst = idx * 4;
+ int i, ofst;
u32 data_reg, mask_reg;
- ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
- ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ ctla = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_A(idx));
+ ctlb = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_B(idx));
+ } else {
+ ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A(idx));
+ ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B(idx));
+ }
if (is_t4(adap)) {
*enabled = !!(ctla & F_TFEN);
@@ -6676,6 +7499,37 @@ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
}
/**
+ * t4_set_trace_rss_control - configure the trace rss control register
+ * @adap: the adapter
+ * @chan: the channel number for RSS control
+ * @qid: queue number
+ *
+ * Configures the MPS tracing RSS control parameter for specified
+ * @chan channel and @qid queue number.
+ */
+void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid)
+{
+ u32 mps_trc_rss_control;
+
+ switch (chip_id(adap)) {
+ case CHELSIO_T4:
+ mps_trc_rss_control = A_MPS_TRC_RSS_CONTROL;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ mps_trc_rss_control = A_MPS_T5_TRC_RSS_CONTROL;
+ break;
+ case CHELSIO_T7:
+ default:
+ mps_trc_rss_control = A_T7_MPS_T5_TRC_RSS_CONTROL;
+ break;
+ }
+
+ t4_write_reg(adap, mps_trc_rss_control,
+ V_RSSCONTROL(chan) | V_QUEUENUMBER(qid));
+}
+
+/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
* @cnt: where to store the count statistics
@@ -6696,6 +7550,8 @@ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
else {
t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
A_PM_TX_DBG_DATA, data, 2,
+ chip_id(adap) >= CHELSIO_T7 ?
+ A_T7_PM_TX_DBG_STAT_MSB :
A_PM_TX_DBG_STAT_MSB);
cycles[i] = (((u64)data[0] << 32) | data[1]);
}
@@ -6730,6 +7586,25 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
+ * t4_pmrx_cache_get_stats - returns the HW PMRX cache stats
+ * @adap: the adapter
+ * @stats: where to store the statistics
+ *
+ * Returns performance statistics of PMRX cache.
+ */
+void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[])
+{
+ u8 i, j;
+
+ for (i = 0, j = 0; i < T7_PM_RX_CACHE_NSTATS / 3; i++, j += 3) {
+ t4_write_reg(adap, A_PM_RX_STAT_CONFIG, 0x100 + i);
+ stats[j] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
+ t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA,
+ &stats[j + 1], 2, A_PM_RX_DBG_STAT_MSB);
+ }
+}
+
+/**
* t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
@@ -6762,11 +7637,24 @@ static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
const u32 n = adap->params.nports;
const u32 all_chan = (1 << adap->chip_params->nchan) - 1;
- if (n == 1)
- return idx == 0 ? all_chan : 0;
- if (n == 2 && chip_id(adap) <= CHELSIO_T5)
- return idx < 2 ? (3 << (2 * idx)) : 0;
- return 1 << idx;
+ switch (adap->params.tp.lb_mode) {
+ case 0:
+ if (n == 1)
+ return (all_chan);
+ if (n == 2 && chip_id(adap) <= CHELSIO_T5)
+ return (3 << (2 * idx));
+ return (1 << idx);
+ case 1:
+ MPASS(n == 1);
+ return (all_chan);
+ case 2:
+ MPASS(n <= 2);
+ return (3 << (2 * idx));
+ default:
+ CH_ERR(adap, "Unsupported LB mode %d\n",
+ adap->params.tp.lb_mode);
+ return (0);
+ }
}
/*
@@ -6784,6 +7672,8 @@ static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx)
*/
static unsigned int t4_get_tx_c_chan(struct adapter *adap, int idx)
{
+ if (adap->params.tx_tp_ch_map != UINT32_MAX)
+ return (adap->params.tx_tp_ch_map >> (8 * idx)) & 0xff;
return idx;
}
@@ -6856,79 +7746,89 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
- struct port_info *pi = adap->port[idx];
- u32 bgmap = pi->mps_bg_map;
- u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
+ struct port_info *pi;
+ int port_id, tx_chan;
+ u32 bgmap, stat_ctl;
+
+ port_id = adap->port_map[idx];
+ MPASS(port_id >= 0 && port_id <= adap->params.nports);
+ pi = adap->port[port_id];
#define GET_STAT(name) \
t4_read_reg64(adap, \
- t4_port_reg(adap, pi->tx_chan, A_MPS_PORT_STAT_##name##_L));
-#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+ t4_port_reg(adap, tx_chan, A_MPS_PORT_STAT_##name##_L));
+ memset(p, 0, sizeof(*p));
+ for (tx_chan = pi->tx_chan;
+ tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
+ p->tx_pause += GET_STAT(TX_PORT_PAUSE);
+ p->tx_octets += GET_STAT(TX_PORT_BYTES);
+ p->tx_frames += GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames += GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames += GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames += GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames += GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 += GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 += GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 += GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 += GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 += GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 += GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max += GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop += GET_STAT(TX_PORT_DROP);
+ p->tx_ppp0 += GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 += GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 += GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 += GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 += GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 += GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 += GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 += GET_STAT(TX_PORT_PPP7);
+
+ p->rx_pause += GET_STAT(RX_PORT_PAUSE);
+ p->rx_octets += GET_STAT(RX_PORT_BYTES);
+ p->rx_frames += GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames += GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames += GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames += GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long += GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber += GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_len_err += GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err += GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt += GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 += GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 += GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 += GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 += GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 += GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 += GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max += GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_ppp0 += GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 += GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 += GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 += GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 += GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 += GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 += GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 += GET_STAT(RX_PORT_PPP7);
+ if (!is_t6(adap)) {
+ MPASS(pi->fcs_reg == A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
+ p->rx_fcs_err += GET_STAT(RX_PORT_CRC_ERROR);
+ }
+ }
+#undef GET_STAT
- p->tx_pause = GET_STAT(TX_PORT_PAUSE);
- p->tx_octets = GET_STAT(TX_PORT_BYTES);
- p->tx_frames = GET_STAT(TX_PORT_FRAMES);
- p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
- p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
- p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
- p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
- p->tx_frames_64 = GET_STAT(TX_PORT_64B);
- p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
- p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
- p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
- p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
- p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
- p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
- p->tx_drop = GET_STAT(TX_PORT_DROP);
- p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
- p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
- p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
- p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
- p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
- p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
- p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
- p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (is_t6(adap) && pi->fcs_reg != -1)
+ p->rx_fcs_err = t4_read_reg64(adap,
+ t4_port_reg(adap, pi->tx_chan, pi->fcs_reg)) - pi->fcs_base;
if (chip_id(adap) >= CHELSIO_T5) {
+ stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
if (stat_ctl & F_COUNTPAUSESTATTX) {
p->tx_frames -= p->tx_pause;
p->tx_octets -= p->tx_pause * 64;
}
if (stat_ctl & F_COUNTPAUSEMCTX)
p->tx_mcast_frames -= p->tx_pause;
- }
-
- p->rx_pause = GET_STAT(RX_PORT_PAUSE);
- p->rx_octets = GET_STAT(RX_PORT_BYTES);
- p->rx_frames = GET_STAT(RX_PORT_FRAMES);
- p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
- p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
- p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
- p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
- p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
- p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
- p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
- p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
- p->rx_frames_64 = GET_STAT(RX_PORT_64B);
- p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
- p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
- p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
- p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
- p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
- p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
- p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
- p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
- p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
- p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
- p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
- p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
- p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
- p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
-
- if (pi->fcs_reg != -1)
- p->rx_fcs_err = t4_read_reg64(adap, pi->fcs_reg) - pi->fcs_base;
-
- if (chip_id(adap) >= CHELSIO_T5) {
if (stat_ctl & F_COUNTPAUSESTATRX) {
p->rx_frames -= p->rx_pause;
p->rx_octets -= p->rx_pause * 64;
@@ -6937,6 +7837,8 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_mcast_frames -= p->rx_pause;
}
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+ bgmap = pi->mps_bg_map;
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -6945,8 +7847,6 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
-
-#undef GET_STAT
#undef GET_STAT_COM
}
@@ -7016,10 +7916,14 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
}
if (addr) {
@@ -7056,8 +7960,10 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
if (is_t4(adap))
port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
- else
+ else if (chip_id(adap) < CHELSIO_T7)
port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ else
+ port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
if (!enable) {
t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
@@ -7348,6 +8254,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
break;
case CHELSIO_T6:
+ case CHELSIO_T7:
sge_idma_decode = (const char * const *)t6_decode;
sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
break;
@@ -8964,7 +9871,7 @@ static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_READ |
- V_FW_PORT_CMD_PORTID(pi->tx_chan));
+ V_FW_PORT_CMD_PORTID(pi->hw_port));
action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
FW_PORT_ACTION_GET_PORT_INFO;
cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
@@ -8996,16 +9903,12 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
(action == FW_PORT_ACTION_GET_PORT_INFO ||
action == FW_PORT_ACTION_GET_PORT_INFO32)) {
/* link/module state change message */
- int i;
- int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
- struct port_info *pi = NULL;
-
- for_each_port(adap, i) {
- pi = adap2pinfo(adap, i);
- if (pi->tx_chan == chan)
- break;
- }
+ int hw_port = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
+ int port_id = adap->port_map[hw_port];
+ struct port_info *pi;
+ MPASS(port_id >= 0 && port_id < adap->params.nports);
+ pi = adap->port[port_id];
PORT_LOCK(pi);
handle_port_info(pi, p, action, &mod_changed, &link_changed);
PORT_UNLOCK(pi);
@@ -9159,14 +10062,15 @@ int t4_get_flash_params(struct adapter *adapter)
}
/* If we didn't recognize the FLASH part, that's no real issue: the
- * Hardware/Software contract says that Hardware will _*ALWAYS*_
- * use a FLASH part which is at least 4MB in size and has 64KB
- * sectors. The unrecognized FLASH part is likely to be much larger
- * than 4MB, but that's all we really need.
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_ use a
+ * FLASH part which has 64KB sectors and is at least 4MB or 16MB in
+ * size, depending on the board.
*/
if (size == 0) {
- CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
- size = 1 << 22;
+ size = chip_id(adapter) >= CHELSIO_T7 ? 16 : 4;
+ CH_WARN(adapter, "Unknown Flash Part %#x, assuming %uMB\n",
+ flashid, size);
+ size <<= 20;
}
/*
@@ -9212,11 +10116,14 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = PM_NSTATS,
.cng_ch_bits_log = 2,
.nsched_cls = 15,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ,
.filter_opt_len = FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 128,
.vfcount = 128,
.sge_fl_db = F_DBPRIO,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
.rss_nentries = RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE,
@@ -9227,11 +10134,14 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = PM_NSTATS,
.cng_ch_bits_log = 2,
.nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ_T5,
.filter_opt_len = T5_FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 128,
.vfcount = 128,
.sge_fl_db = F_DBPRIO | F_DBTYPE,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
.rss_nentries = RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE,
@@ -9242,15 +10152,36 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = T6_PM_NSTATS,
.cng_ch_bits_log = 3,
.nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ_T5,
.filter_opt_len = T5_FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 256,
.vfcount = 256,
.sge_fl_db = 0,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
.rss_nentries = T6_RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE_T6,
},
+ {
+ /* T7 */
+ .nchan = NCHAN,
+ .pm_stats_cnt = T6_PM_NSTATS,
+ .cng_ch_bits_log = 2,
+ .nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ_T7,
+ .cim_num_obq = CIM_NUM_OBQ_T7,
+ .filter_opt_len = T7_FILTER_OPT_LEN,
+ .filter_num_opt = S_T7_FT_LAST + 1,
+ .mps_rplc_size = 256,
+ .vfcount = 256,
+ .sge_fl_db = 0,
+ .sge_ctxt_size = SGE_CTXT_SIZE_T7,
+ .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
+ .rss_nentries = T7_RSS_NENTRIES,
+ .cim_la_size = CIMLA_SIZE_T6,
+ },
};
chipid -= CHELSIO_T4;
@@ -9466,14 +10397,11 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
}
/**
- * t4_init_devlog_params - initialize adapter->params.devlog
+ * t4_init_devlog_ncores_params - initialize adap->params.devlog and ncores
* @adap: the adapter
* @fw_attach: whether we can talk to the firmware
- *
- * Initialize various fields of the adapter's Firmware Device Log
- * Parameters structure.
*/
-int t4_init_devlog_params(struct adapter *adap, int fw_attach)
+int t4_init_devlog_ncores_params(struct adapter *adap, int fw_attach)
{
struct devlog_params *dparams = &adap->params.devlog;
u32 pf_dparams;
@@ -9487,12 +10415,15 @@ int t4_init_devlog_params(struct adapter *adap, int fw_attach)
*/
pf_dparams =
t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
- if (pf_dparams) {
- unsigned int nentries, nentries128;
+ if (pf_dparams && pf_dparams != UINT32_MAX) {
+ unsigned int nentries, nentries128, ncore_shift;
+
+ ncore_shift = (G_PCIE_FW_PF_DEVLOG_COUNT_MSB(pf_dparams) << 1) |
+ G_PCIE_FW_PF_DEVLOG_COUNT_LSB(pf_dparams);
+ adap->params.ncores = 1 << ncore_shift;
dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
-
nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
nentries = (nentries128 + 1) * 128;
dparams->size = nentries * sizeof(struct fw_devlog_e);
@@ -9503,6 +10434,7 @@ int t4_init_devlog_params(struct adapter *adap, int fw_attach)
/*
* For any failing returns ...
*/
+ adap->params.ncores = 1;
memset(dparams, 0, sizeof *dparams);
/*
@@ -9624,21 +10556,28 @@ int t4_init_sge_params(struct adapter *adapter)
/* Convert the LE's hardware hash mask to a shorter filter mask. */
static inline uint16_t
-hashmask_to_filtermask(uint64_t hashmask, uint16_t filter_mode)
+hashmask_to_filtermask(struct adapter *adap, uint64_t hashmask, uint16_t filter_mode)
{
- static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
- int i;
+ int first, last, i;
uint16_t filter_mask;
- uint64_t mask; /* field mask */
+ uint64_t mask; /* field mask */
+
+
+ if (chip_id(adap) >= CHELSIO_T7) {
+ first = S_T7_FT_FIRST;
+ last = S_T7_FT_LAST;
+ } else {
+ first = S_FT_FIRST;
+ last = S_FT_LAST;
+ }
- filter_mask = 0;
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
+ for (filter_mask = 0, i = first; i <= last; i++) {
if ((filter_mode & (1 << i)) == 0)
continue;
- mask = (1 << width[i]) - 1;
+ mask = (1 << t4_filter_field_width(adap, i)) - 1;
if ((hashmask & mask) == mask)
filter_mask |= 1 << i;
- hashmask >>= width[i];
+ hashmask >>= t4_filter_field_width(adap, i);
}
return (filter_mask);
@@ -9681,7 +10620,15 @@ read_filter_mode_and_ingress_config(struct adapter *adap)
v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
hash_mask |= (u64)v << 32;
}
- tpp->filter_mask = hashmask_to_filtermask(hash_mask,
+ if (chip_id(adap) >= CHELSIO_T7) {
+ /*
+ * This param came before T7 so T7+ firmwares should
+ * always support this query.
+ */
+ CH_WARN(adap, "query for filter mode/mask failed: %d\n",
+ rc);
+ }
+ tpp->filter_mask = hashmask_to_filtermask(adap, hash_mask,
tpp->filter_mode);
t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true);
@@ -9696,16 +10643,37 @@ read_filter_mode_and_ingress_config(struct adapter *adap)
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
- tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
- tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
- tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
- tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
- tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
- tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
- tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
- tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
- tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
- tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ tpp->ipsecidx_shift = t4_filter_field_shift(adap, F_IPSECIDX);
+ tpp->fcoe_shift = t4_filter_field_shift(adap, F_T7_FCOE);
+ tpp->port_shift = t4_filter_field_shift(adap, F_T7_PORT);
+ tpp->vnic_shift = t4_filter_field_shift(adap, F_T7_VNIC_ID);
+ tpp->vlan_shift = t4_filter_field_shift(adap, F_T7_VLAN);
+ tpp->tos_shift = t4_filter_field_shift(adap, F_T7_TOS);
+ tpp->protocol_shift = t4_filter_field_shift(adap, F_T7_PROTOCOL);
+ tpp->ethertype_shift = t4_filter_field_shift(adap, F_T7_ETHERTYPE);
+ tpp->macmatch_shift = t4_filter_field_shift(adap, F_T7_MACMATCH);
+ tpp->matchtype_shift = t4_filter_field_shift(adap, F_T7_MPSHITTYPE);
+ tpp->frag_shift = t4_filter_field_shift(adap, F_T7_FRAGMENTATION);
+ tpp->roce_shift = t4_filter_field_shift(adap, F_ROCE);
+ tpp->synonly_shift = t4_filter_field_shift(adap, F_SYNONLY);
+ tpp->tcpflags_shift = t4_filter_field_shift(adap, F_TCPFLAGS);
+ } else {
+ tpp->ipsecidx_shift = -1;
+ tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
+ tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
+ tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
+ tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
+ tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
+ tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
+ tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
+ tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+ tpp->roce_shift = -1;
+ tpp->synonly_shift = -1;
+ tpp->tcpflags_shift = -1;
+ }
}
/**
@@ -9725,11 +10693,21 @@ int t4_init_tp_params(struct adapter *adap)
read_filter_mode_and_ingress_config(adap);
+ tpp->rx_pkt_encap = false;
+ tpp->lb_mode = 0;
+ tpp->lb_nchan = 1;
if (chip_id(adap) > CHELSIO_T5) {
v = t4_read_reg(adap, A_TP_OUT_CONFIG);
tpp->rx_pkt_encap = v & F_CRXPKTENC;
- } else
- tpp->rx_pkt_encap = false;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ t4_tp_pio_read(adap, &v, 1, A_TP_CHANNEL_MAP, true);
+ tpp->lb_mode = G_T7_LB_MODE(v);
+ if (tpp->lb_mode == 1)
+ tpp->lb_nchan = 4;
+ else if (tpp->lb_mode == 2)
+ tpp->lb_nchan = 2;
+ }
+ }
rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE);
tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE);
@@ -9750,6 +10728,53 @@ int t4_init_tp_params(struct adapter *adap)
}
/**
+ * t4_filter_field_width - returns the width of a filter field
+ * @adap: the adapter
+ * @filter_field: the filter field whose width is being requested
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_width(const struct adapter *adap, int filter_field)
+{
+ const int nopt = adap->chip_params->filter_num_opt;
+ static const uint8_t width_t7[] = {
+ W_FT_IPSECIDX,
+ W_FT_FCOE,
+ W_FT_PORT,
+ W_FT_VNIC_ID,
+ W_FT_VLAN,
+ W_FT_TOS,
+ W_FT_PROTOCOL,
+ W_FT_ETHERTYPE,
+ W_FT_MACMATCH,
+ W_FT_MPSHITTYPE,
+ W_FT_FRAGMENTATION,
+ W_FT_ROCE,
+ W_FT_SYNONLY,
+ W_FT_TCPFLAGS
+ };
+ static const uint8_t width_t4[] = {
+ W_FT_FCOE,
+ W_FT_PORT,
+ W_FT_VNIC_ID,
+ W_FT_VLAN,
+ W_FT_TOS,
+ W_FT_PROTOCOL,
+ W_FT_ETHERTYPE,
+ W_FT_MACMATCH,
+ W_FT_MPSHITTYPE,
+ W_FT_FRAGMENTATION
+ };
+ const uint8_t *width = chip_id(adap) >= CHELSIO_T7 ? width_t7 : width_t4;
+
+ if (filter_field < 0 || filter_field >= nopt)
+ return (0);
+ return (width[filter_field]);
+}
+
+/**
* t4_filter_field_shift - calculate filter field shift
* @adap: the adapter
* @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
@@ -9767,6 +10792,56 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
if ((filter_mode & filter_sel) == 0)
return -1;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_IPSECIDX:
+ field_shift += W_FT_IPSECIDX;
+ break;
+ case F_T7_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_T7_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_T7_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_T7_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_T7_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_T7_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_T7_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_T7_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_T7_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_T7_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ case F_ROCE:
+ field_shift += W_FT_ROCE;
+ break;
+ case F_SYNONLY:
+ field_shift += W_FT_SYNONLY;
+ break;
+ case F_TCPFLAGS:
+ field_shift += W_FT_TCPFLAGS;
+ break;
+ }
+ }
+ return field_shift;
+ }
+
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
case F_FCOE:
@@ -9818,11 +10893,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
} while ((adap->params.portvec & (1 << j)) == 0);
}
+ p->hw_port = j;
p->tx_chan = t4_get_tx_c_chan(adap, j);
p->rx_chan = t4_get_rx_c_chan(adap, j);
p->mps_bg_map = t4_get_mps_bg_map(adap, j);
p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
- p->lport = j;
if (!(adap->flags & IS_VF) ||
adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
@@ -9851,232 +10926,321 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
return 0;
}
+static void t4_read_cimq_cfg_ibq_core(struct adapter *adap, u8 coreid, u32 qid,
+ u16 *base, u16 *size, u16 *thres)
+{
+ unsigned int v, m;
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ v = F_T7_IBQSELECT | V_T7_QUENUMSELECT(qid) |
+ V_CORESELECT(coreid);
+ /* value is in 512-byte units */
+ m = 512;
+ } else {
+ v = F_IBQSELECT | V_QUENUMSELECT(qid);
+ /* value is in 256-byte units */
+ m = 256;
+ }
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ if (base)
+ *base = G_CIMQBASE(v) * m;
+ if (size)
+ *size = G_CIMQSIZE(v) * m;
+ if (thres)
+ *thres = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
+}
+
+static void t4_read_cimq_cfg_obq_core(struct adapter *adap, u8 coreid, u32 qid,
+ u16 *base, u16 *size)
+{
+ unsigned int v, m;
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ v = F_T7_OBQSELECT | V_T7_QUENUMSELECT(qid) |
+ V_CORESELECT(coreid);
+ /* value is in 512-byte units */
+ m = 512;
+ } else {
+ v = F_OBQSELECT | V_QUENUMSELECT(qid);
+ /* value is in 256-byte units */
+ m = 256;
+ }
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ if (base)
+ *base = G_CIMQBASE(v) * m;
+ if (size)
+ *size = G_CIMQSIZE(v) * m;
+}
+
/**
- * t4_read_cimq_cfg - read CIM queue configuration
+ * t4_read_cimq_cfg_core - read CIM queue configuration on specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @base: holds the queue base addresses in bytes
* @size: holds the queue sizes in bytes
* @thres: holds the queue full thresholds in bytes
*
* Returns the current configuration of the CIM queues, starting with
- * the IBQs, then the OBQs.
+ * the IBQs, then the OBQs, on a specific @coreid.
*/
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
+ u16 *size, u16 *thres)
{
- unsigned int i, v;
- int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
+ unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int i;
- for (i = 0; i < CIM_NUM_IBQ; i++) {
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
- V_QUENUMSELECT(i));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
- /* value is in 256-byte units */
- *base++ = G_CIMQBASE(v) * 256;
- *size++ = G_CIMQSIZE(v) * 256;
- *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
- }
- for (i = 0; i < cim_num_obq; i++) {
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
- V_QUENUMSELECT(i));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
- /* value is in 256-byte units */
- *base++ = G_CIMQBASE(v) * 256;
- *size++ = G_CIMQSIZE(v) * 256;
- }
+ for (i = 0; i < cim_num_ibq; i++, base++, size++, thres++)
+ t4_read_cimq_cfg_ibq_core(adap, coreid, i, base, size, thres);
+
+ for (i = 0; i < cim_num_obq; i++, base++, size++)
+ t4_read_cimq_cfg_obq_core(adap, coreid, i, base, size);
+}
+
+static int t4_read_cim_ibq_data_core(struct adapter *adap, u8 coreid, u32 addr,
+ u32 *data)
+{
+ int ret, attempts;
+ unsigned int v;
+
+ /* It might take 3-10ms before the IBQ debug read access is allowed.
+ * Wait for 1 Sec with a delay of 1 usec.
+ */
+ attempts = 1000000;
+
+ if (chip_id(adap) > CHELSIO_T6)
+ v = V_T7_IBQDBGADDR(addr) | V_IBQDBGCORE(coreid);
+ else
+ v = V_IBQDBGADDR(addr);
+
+ t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, v | F_IBQDBGEN);
+ ret = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
+ attempts, 1);
+ if (ret)
+ return ret;
+
+ *data = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ return 0;
}
/**
- * t4_read_cim_ibq - read the contents of a CIM inbound queue
+ * t4_read_cim_ibq_core - read the contents of a CIM inbound queue on
+ * specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
- * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
- * error and the number of 32-bit words actually read on success.
+ * to the capacity of @data on a specific @coreid. @n must be a multiple
+ * of 4. Returns < 0 on error and the number of 32-bit words actually
+ * read on success.
*/
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n)
{
- int i, err, attempts;
- unsigned int addr;
- const unsigned int nwords = CIM_IBQ_SIZE * 4;
+ unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
+ u16 i, addr, nwords;
+ int ret;
- if (qid > 5 || (n & 3))
+ if (qid > (cim_num_ibq - 1) || (n & 3))
return -EINVAL;
- addr = qid * nwords;
+ t4_read_cimq_cfg_ibq_core(adap, coreid, qid, &addr, &nwords, NULL);
+ addr >>= sizeof(u16);
+ nwords >>= sizeof(u16);
if (n > nwords)
n = nwords;
- /* It might take 3-10ms before the IBQ debug read access is allowed.
- * Wait for 1 Sec with a delay of 1 usec.
- */
- attempts = 1000000;
-
- for (i = 0; i < n; i++, addr++) {
- t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
- F_IBQDBGEN);
- err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
- attempts, 1);
- if (err)
- return err;
- *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ for (i = 0; i < n; i++, addr++, data++) {
+ ret = t4_read_cim_ibq_data_core(adap, coreid, addr, data);
+ if (ret < 0)
+ return ret;
}
+
t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
return i;
}
+static int t4_read_cim_obq_data_core(struct adapter *adap, u8 coreid, u32 addr,
+ u32 *data)
+{
+ unsigned int v;
+ int ret;
+
+ if (chip_id(adap) > CHELSIO_T6)
+ v = V_T7_OBQDBGADDR(addr) | V_OBQDBGCORE(coreid);
+ else
+ v = V_OBQDBGADDR(addr);
+
+ t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, v | F_OBQDBGEN);
+ ret = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1);
+ if (ret)
+ return ret;
+
+ *data = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ return 0;
+}
+
/**
- * t4_read_cim_obq - read the contents of a CIM outbound queue
+ * t4_read_cim_obq_core - read the contents of a CIM outbound queue on
+ * specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
- * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
- * error and the number of 32-bit words actually read on success.
+ * to the capacity of @data on specific @coreid. @n must be a multiple
+ * of 4. Returns < 0 on error and the number of 32-bit words actually
+ * read on success.
*/
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n)
{
- int i, err;
- unsigned int addr, v, nwords;
- int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
+ u16 i, addr, nwords;
+ int ret;
if ((qid > (cim_num_obq - 1)) || (n & 3))
return -EINVAL;
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
- V_QUENUMSELECT(qid));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
-
- addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
- nwords = G_CIMQSIZE(v) * 64; /* same */
+ t4_read_cimq_cfg_obq_core(adap, coreid, qid, &addr, &nwords);
+ addr >>= sizeof(u16);
+ nwords >>= sizeof(u16);
if (n > nwords)
n = nwords;
- for (i = 0; i < n; i++, addr++) {
- t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
- F_OBQDBGEN);
- err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
- 2, 1);
- if (err)
- return err;
- *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ for (i = 0; i < n; i++, addr++, data++) {
+ ret = t4_read_cim_obq_data_core(adap, coreid, addr, data);
+ if (ret < 0)
+ return ret;
}
+
t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
- return i;
+ return i;
}
-enum {
- CIM_QCTL_BASE = 0,
- CIM_CTL_BASE = 0x2000,
- CIM_PBT_ADDR_BASE = 0x2800,
- CIM_PBT_LRF_BASE = 0x3000,
- CIM_PBT_DATA_BASE = 0x3800
-};
-
/**
- * t4_cim_read - read a block from CIM internal address space
+ * t4_cim_read_core - read a block from CIM internal address space
+ * of a control register group on specific core.
* @adap: the adapter
+ * @group: the control register group to select for read
+ * @coreid: the uP coreid
* @addr: the start address within the CIM address space
* @n: number of words to read
* @valp: where to store the result
*
- * Reads a block of 4-byte words from the CIM intenal address space.
+ * Reads a block of 4-byte words from the CIM intenal address space
+ * of a control register @group on a specific @coreid.
*/
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp)
+int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ unsigned int *valp)
{
+ unsigned int hostbusy, v = 0;
int ret = 0;
- if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ if (chip_id(adap) > CHELSIO_T6) {
+ hostbusy = F_T7_HOSTBUSY;
+ v = V_HOSTGRPSEL(group) | V_HOSTCORESEL(coreid);
+ } else {
+ hostbusy = F_HOSTBUSY;
+ }
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
- t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
- ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
0, 5, 2);
if (!ret)
*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
}
+
return ret;
}
/**
- * t4_cim_write - write a block into CIM internal address space
+ * t4_cim_write_core - write a block into CIM internal address space
+ * of a control register group on specific core.
* @adap: the adapter
+ * @group: the control register group to select for write
+ * @coreid: the uP coreid
* @addr: the start address within the CIM address space
* @n: number of words to write
* @valp: set of values to write
*
- * Writes a block of 4-byte words into the CIM intenal address space.
+ * Writes a block of 4-byte words into the CIM intenal address space
+ * of a control register @group on a specific @coreid.
*/
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
- const unsigned int *valp)
+int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ const unsigned int *valp)
{
+ unsigned int hostbusy, v;
int ret = 0;
- if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ if (chip_id(adap) > CHELSIO_T6) {
+ hostbusy = F_T7_HOSTBUSY;
+ v = F_T7_HOSTWRITE | V_HOSTGRPSEL(group) |
+ V_HOSTCORESEL(coreid);
+ } else {
+ hostbusy = F_HOSTBUSY;
+ v = F_HOSTWRITE;
+ }
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
- t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
- ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
0, 5, 2);
}
- return ret;
-}
-static int t4_cim_write1(struct adapter *adap, unsigned int addr,
- unsigned int val)
-{
- return t4_cim_write(adap, addr, 1, &val);
-}
-
-/**
- * t4_cim_ctl_read - read a block from CIM control region
- * @adap: the adapter
- * @addr: the start address within the CIM control region
- * @n: number of words to read
- * @valp: where to store the result
- *
- * Reads a block of 4-byte words from the CIM control region.
- */
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp)
-{
- return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
+ return ret;
}
/**
- * t4_cim_read_la - read CIM LA capture buffer
+ * t4_cim_read_la_core - read CIM LA capture buffer on specific core
* @adap: the adapter
+ * @coreid: uP coreid
* @la_buf: where to store the LA data
* @wrptr: the HW write pointer within the capture buffer
*
- * Reads the contents of the CIM LA buffer with the most recent entry at
- * the end of the returned data and with the entry at @wrptr first.
- * We try to leave the LA in the running state we find it in.
+ * Reads the contents of the CIM LA buffer on a specific @coreid
+ * with the most recent entry at the end of the returned data
+ * and with the entry at @wrptr first. We try to leave the LA
+ * in the running state we find it in.
*/
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
+ u32 *wrptr)
{
- int i, ret;
unsigned int cfg, val, idx;
+ int i, ret;
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &cfg);
if (ret)
return ret;
if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
- ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
+ val = 0;
+ ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
return ret;
}
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &val);
if (ret)
goto restart;
@@ -10085,25 +11249,28 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
*wrptr = idx;
for (i = 0; i < adap->params.cim_la_size; i++) {
- ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
- V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
+ val = V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN;
+ ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
break;
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
break;
if (val & F_UPDBGLARDEN) {
ret = -ETIMEDOUT;
break;
}
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_DATA, 1,
+ &la_buf[i]);
if (ret)
break;
/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
* identify the 32-bit portion of the full 312-bit data
*/
- if (is_t6(adap) && (idx & 0xf) >= 9)
+ if ((chip_id(adap) > CHELSIO_T5) && (idx & 0xf) >= 9)
idx = (idx & 0xff0) + 0x10;
else
idx++;
@@ -10112,11 +11279,15 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
}
restart:
if (cfg & F_UPDBGLAEN) {
- int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
- cfg & ~F_UPDBGLARDEN);
+ int r;
+
+ val = cfg & ~F_UPDBGLARDEN;
+ r = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (!ret)
ret = r;
}
+
return ret;
}
@@ -10403,25 +11574,20 @@ void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbp
int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
{
int ret, i, n, cfg_addr;
- unsigned int addr;
+ unsigned int addr, len;
unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- cfg_addr = t4_flash_cfg_addr(adap);
+ cfg_addr = t4_flash_cfg_addr(adap, &len);
if (cfg_addr < 0)
return cfg_addr;
- addr = cfg_addr;
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_CFG_MAX_SIZE) {
- CH_ERR(adap, "cfg file too large, max is %u bytes\n",
- FLASH_CFG_MAX_SIZE);
+ if (size > len) {
+ CH_ERR(adap, "cfg file too large, max is %u bytes\n", len);
return -EFBIG;
}
- i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
- sf_sec_size);
+ flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
+ i = DIV_ROUND_UP(len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
flash_cfg_start_sec + i - 1);
/*
@@ -10432,15 +11598,12 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
goto out;
/* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i< size; i+= SF_PAGE_SIZE) {
- if ( (size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
+ addr = cfg_addr;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ n = min(size - i, SF_PAGE_SIZE);
ret = t4_write_flash(adap, addr, n, cfg_data, 1);
if (ret)
goto out;
-
addr += SF_PAGE_SIZE;
cfg_data += SF_PAGE_SIZE;
}
@@ -10644,25 +11807,25 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
pcir_data_t *pcir_header;
int ret, addr;
uint16_t device_id;
- unsigned int i;
- unsigned int boot_sector = (boot_addr * 1024 );
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+ unsigned int i, start, len;
+ unsigned int boot_sector = boot_addr * 1024;
/*
- * Make sure the boot image does not encroach on the firmware region
+ * Make sure the boot image does not exceed its available space.
*/
- if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
- CH_ERR(adap, "boot image encroaching on firmware region\n");
+ len = 0;
+ start = t4_flash_loc_start(adap, FLASH_LOC_BOOT_AREA, &len);
+ if (boot_sector + size > start + len) {
+ CH_ERR(adap, "boot data is larger than available BOOT area\n");
return -EFBIG;
}
/*
* The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
* and Boot configuration data sections. These 3 boot sections span
- * sectors 0 to 7 in flash and live right before the FW image location.
+ * the entire FLASH_LOC_BOOT_AREA.
*/
- i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
- sf_sec_size);
+ i = DIV_ROUND_UP(size ? size : len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
(boot_sector >> 16) + i - 1);
@@ -10765,40 +11928,39 @@ out:
* is stored, or an error if the device FLASH is too small to contain
* a OptionROM Configuration.
*/
-static int t4_flash_bootcfg_addr(struct adapter *adapter)
+static int t4_flash_bootcfg_addr(struct adapter *adapter, unsigned int *lenp)
{
+ unsigned int len = 0;
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_BOOTCFG, &len);
+
/*
* If the device FLASH isn't large enough to hold a Firmware
* Configuration File, return an error.
*/
- if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
+ if (adapter->params.sf_size < start + len)
return -ENOSPC;
-
- return FLASH_BOOTCFG_START;
+ if (lenp != NULL)
+ *lenp = len;
+ return (start);
}
int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
{
int ret, i, n, cfg_addr;
- unsigned int addr;
+ unsigned int addr, len;
unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- cfg_addr = t4_flash_bootcfg_addr(adap);
+ cfg_addr = t4_flash_bootcfg_addr(adap, &len);
if (cfg_addr < 0)
return cfg_addr;
- addr = cfg_addr;
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_BOOTCFG_MAX_SIZE) {
- CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
- FLASH_BOOTCFG_MAX_SIZE);
+ if (size > len) {
+ CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", len);
return -EFBIG;
}
- i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
- sf_sec_size);
+ flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
+ i = DIV_ROUND_UP(len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
flash_cfg_start_sec + i - 1);
@@ -10810,15 +11972,12 @@ int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
goto out;
/* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i< size; i+= SF_PAGE_SIZE) {
- if ( (size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
+ addr = cfg_addr;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ n = min(size - i, SF_PAGE_SIZE);
ret = t4_write_flash(adap, addr, n, cfg_data, 0);
if (ret)
goto out;
-
addr += SF_PAGE_SIZE;
cfg_data += SF_PAGE_SIZE;
}
@@ -10844,19 +12003,20 @@ out:
*/
int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
{
- static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
int i, nbits, rc;
uint32_t param, val;
uint16_t fmode, fmask;
const int maxbits = adap->chip_params->filter_opt_len;
+ const int nopt = adap->chip_params->filter_num_opt;
+ int width;
if (mode != -1 || mask != -1) {
if (mode != -1) {
fmode = mode;
nbits = 0;
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
+ for (i = 0; i < nopt; i++) {
if (fmode & (1 << i))
- nbits += width[i];
+ nbits += t4_filter_field_width(adap, i);
}
if (nbits > maxbits) {
CH_ERR(adap, "optional fields in the filter "
@@ -10867,17 +12027,20 @@ int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
}
/*
- * Hardware wants the bits to be maxed out. Keep
+ * Hardware < T7 wants the bits to be maxed out. Keep
* setting them until there's no room for more.
*/
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
- if (fmode & (1 << i))
- continue;
- if (nbits + width[i] <= maxbits) {
- fmode |= 1 << i;
- nbits += width[i];
- if (nbits == maxbits)
- break;
+ if (chip_id(adap) < CHELSIO_T7) {
+ for (i = 0; i < nopt; i++) {
+ if (fmode & (1 << i))
+ continue;
+ width = t4_filter_field_width(adap, i);
+ if (nbits + width <= maxbits) {
+ fmode |= 1 << i;
+ nbits += width;
+ if (nbits == maxbits)
+ break;
+ }
}
}
@@ -10936,21 +12099,26 @@ int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
*/
void t4_clr_port_stats(struct adapter *adap, int idx)
{
- unsigned int i;
- u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
- u32 port_base_addr;
+ struct port_info *pi;
+ int i, port_id, tx_chan;
+ u32 bgmap, port_base_addr;
- if (is_t4(adap))
- port_base_addr = PORT_BASE(idx);
- else
- port_base_addr = T5_PORT_BASE(idx);
-
- for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
- t4_write_reg(adap, port_base_addr + i, 0);
- for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
- t4_write_reg(adap, port_base_addr + i, 0);
+ port_id = adap->port_map[idx];
+ MPASS(port_id >= 0 && port_id <= adap->params.nports);
+ pi = adap->port[port_id];
+
+ for (tx_chan = pi->tx_chan;
+ tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
+ port_base_addr = t4_port_reg(adap, tx_chan, 0);
+
+ for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ }
+ bgmap = pi->mps_bg_map;
for (i = 0; i < 4; i++)
if (bgmap & (1 << i)) {
t4_write_reg(adap,
@@ -11078,6 +12246,8 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
+ if (chip_id(adap) > CHELSIO_T6)
+ data[6] = be32_to_cpu(c.u.idctxt.ctxt_data6);
}
return ret;
}
@@ -11099,9 +12269,12 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type cty
t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
- if (!ret)
+ if (!ret) {
for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
*data++ = t4_read_reg(adap, i);
+ if (chip_id(adap) > CHELSIO_T6)
+ *data++ = t4_read_reg(adap, i);
+ }
return ret;
}
diff --git a/sys/dev/cxgbe/common/t4_hw.h b/sys/dev/cxgbe/common/t4_hw.h
index 79ec690cd5e6..09bd9ac9e637 100644
--- a/sys/dev/cxgbe/common/t4_hw.h
+++ b/sys/dev/cxgbe/common/t4_hw.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,30 +41,36 @@ enum {
EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
T6_RSS_NENTRIES = 4096,
+ T7_RSS_NENTRIES = 16384,
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
NTX_SCHED = 8, /* # of HW Tx scheduling queues */
PM_NSTATS = 5, /* # of PM stats */
- T6_PM_NSTATS = 7,
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
MAX_PM_NSTATS = 7,
+ T7_PM_RX_CACHE_NSTATS = 27, /* # of PM Rx Cache stats in T7 */
MBOX_LEN = 64, /* mailbox size in bytes */
NTRACE = 4, /* # of tracing filters */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width of optional components */
T5_FILTER_OPT_LEN = 40,
+ T7_FILTER_OPT_LEN = 63,
NWOL_PAT = 8, /* # of WoL patterns */
WOL_PAT_LEN = 128, /* length of WoL patterns */
UDBS_SEG_SIZE = 128, /* Segment size of BAR2 doorbells */
UDBS_SEG_SHIFT = 7, /* log2(UDBS_SEG_SIZE) */
UDBS_DB_OFFSET = 8, /* offset of the 4B doorbell in a segment */
UDBS_WR_OFFSET = 64, /* offset of the work request in a segment */
+ MAX_UP_CORES = 8, /* Max # of uP cores that can be enabled */
};
enum {
CIM_NUM_IBQ = 6, /* # of CIM IBQs */
+ CIM_NUM_IBQ_T7 = 16, /* # of CIM IBQs for T7 */
CIM_NUM_OBQ = 6, /* # of CIM OBQs */
CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
+ CIM_NUM_OBQ_T7 = 16, /* # of CIM OBQs for T7 adapter */
CIMLA_SIZE = 256 * 8, /* 256 rows * ceil(235/32) 32-bit words */
CIMLA_SIZE_T6 = 256 * 10, /* 256 rows * ceil(311/32) 32-bit words */
CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
@@ -91,6 +96,7 @@ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_CTXT_SIZE = 24, /* size of SGE context */
+ SGE_CTXT_SIZE_T7 = 28, /* size of SGE context for T7 */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */
@@ -161,6 +167,18 @@ struct rsp_ctrl {
#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+#define S_ARM_QTYPE 11
+#define M_ARM_QTYPE 1
+#define V_ARM_QTYPE(x) ((x) << S_ARM_QTYPE)
+
+#define S_ARM_PIDX 0
+#define M_ARM_PIDX 0x7ffU
+#define V_ARM_PIDX(x) ((x) << S_ARM_PIDX)
+
+#define S_ARM_CIDXINC 0
+#define M_ARM_CIDXINC 0x7ffU
+#define V_ARM_CIDXINC(x) ((x) << S_ARM_CIDXINC)
+
/* # of pages a pagepod can hold without needing another pagepod */
#define PPOD_PAGES 4U
@@ -206,95 +224,116 @@ struct pagepod {
*/
#define FLASH_START(start) ((start) * SF_SEC_SIZE)
#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+#define FLASH_MIN_SIZE FLASH_START(32)
-enum {
+enum t4_flash_loc {
/*
* Various Expansion-ROM boot images, etc.
*/
- FLASH_EXP_ROM_START_SEC = 0,
- FLASH_EXP_ROM_NSECS = 6,
- FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
- FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+ FLASH_LOC_EXP_ROM = 0,
/*
* iSCSI Boot Firmware Table (iBFT) and other driver-related
* parameters ...
*/
- FLASH_IBFT_START_SEC = 6,
- FLASH_IBFT_NSECS = 1,
- FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
- FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+ FLASH_LOC_IBFT,
/*
* Boot configuration data.
*/
- FLASH_BOOTCFG_START_SEC = 7,
- FLASH_BOOTCFG_NSECS = 1,
- FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
- FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+ FLASH_LOC_BOOTCFG,
/*
* Location of firmware image in FLASH.
*/
- FLASH_FW_START_SEC = 8,
- FLASH_FW_NSECS = 16,
- FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
- FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ FLASH_LOC_FW,
/*
* Location of bootstrap firmware image in FLASH.
*/
- FLASH_FWBOOTSTRAP_START_SEC = 27,
- FLASH_FWBOOTSTRAP_NSECS = 1,
- FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
- FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+ FLASH_LOC_FWBOOTSTRAP,
/*
* iSCSI persistent/crash information.
*/
- FLASH_ISCSI_CRASH_START_SEC = 29,
- FLASH_ISCSI_CRASH_NSECS = 1,
- FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
- FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+ FLASH_LOC_ISCSI_CRASH,
/*
* FCoE persistent/crash information.
*/
- FLASH_FCOE_CRASH_START_SEC = 30,
- FLASH_FCOE_CRASH_NSECS = 1,
- FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
- FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+ FLASH_LOC_FCOE_CRASH,
/*
* Location of Firmware Configuration File in FLASH.
*/
- FLASH_CFG_START_SEC = 31,
- FLASH_CFG_NSECS = 1,
- FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
- FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+ FLASH_LOC_CFG,
+
+ /*
+ * CUDBG chip dump.
+ */
+ FLASH_LOC_CUDBG,
+
+ /*
+ * FW chip dump.
+ */
+ FLASH_LOC_CHIP_DUMP,
+
+ /*
+ * DPU boot information store.
+ */
+ FLASH_LOC_DPU_BOOT,
+
+ /*
+ * DPU peristent information store.
+ */
+ FLASH_LOC_DPU_AREA,
/*
- * We don't support FLASH devices which can't support the full
- * standard set of sections which we need for normal operations.
+ * VPD location.
*/
- FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+ FLASH_LOC_VPD,
/*
- * Sectors 32-63 for CUDBG.
+ * Backup init/vpd.
*/
- FLASH_CUDBG_START_SEC = 32,
- FLASH_CUDBG_NSECS = 32,
- FLASH_CUDBG_START = FLASH_START(FLASH_CUDBG_START_SEC),
- FLASH_CUDBG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CUDBG_NSECS),
+ FLASH_LOC_VPD_BACKUP,
/*
- * Size of defined FLASH regions.
+ * Backup firmware image.
*/
- FLASH_END_SEC = 64,
+ FLASH_LOC_FW_BACKUP,
+
+ /*
+ * Backup bootstrap firmware image.
+ */
+ FLASH_LOC_FWBOOTSTRAP_BACKUP,
+
+ /*
+ * Backup Location of Firmware Configuration File in FLASH.
+ */
+ FLASH_LOC_CFG_BACK,
+
+ /*
+ * Helper to retrieve info that spans the entire Boot related area.
+ */
+ FLASH_LOC_BOOT_AREA,
+
+ /*
+ * Helper to determine minimum standard set of sections needed for
+ * normal operations.
+ */
+ FLASH_LOC_MIN_SIZE,
+
+ /*
+ * End of FLASH regions.
+ */
+ FLASH_LOC_END
};
-#undef FLASH_START
-#undef FLASH_MAX_SIZE
+struct t4_flash_loc_entry {
+ u16 start_sec;
+ u16 nsecs;
+};
#define S_SGE_TIMESTAMP 0
#define M_SGE_TIMESTAMP 0xfffffffffffffffULL
diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h
index d356d0d99f36..0d12ccf2e910 100644
--- a/sys/dev/cxgbe/common/t4_msg.h
+++ b/sys/dev/cxgbe/common/t4_msg.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +29,7 @@
#ifndef T4_MSG_H
#define T4_MSG_H
-enum {
+enum cpl_opcodes {
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_ACCEPT_RPL = 0x2,
CPL_ACT_OPEN_REQ = 0x3,
@@ -68,13 +67,16 @@ enum {
CPL_PEER_CLOSE = 0x26,
CPL_RTE_DELETE_RPL = 0x27,
CPL_RTE_WRITE_RPL = 0x28,
+ CPL_ROCE_FW_NOTIFY = 0x28,
CPL_RX_URG_PKT = 0x29,
CPL_TAG_WRITE_RPL = 0x2A,
+ CPL_RDMA_ASYNC_EVENT = 0x2A,
CPL_ABORT_REQ_RSS = 0x2B,
CPL_RX_URG_NOTIFY = 0x2C,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_SMT_WRITE_RPL = 0x2E,
CPL_TX_DATA_ACK = 0x2F,
+ CPL_RDMA_INV_REQ = 0x2F,
CPL_RX_PHYS_ADDR = 0x30,
CPL_PCMD_READ_RPL = 0x31,
@@ -107,19 +109,30 @@ enum {
CPL_RX_DATA_DIF = 0x4B,
CPL_ERR_NOTIFY = 0x4D,
CPL_RX_TLS_CMP = 0x4E,
+ CPL_T6_TX_DATA_ACK = 0x4F,
CPL_RDMA_READ_REQ = 0x60,
CPL_RX_ISCSI_DIF = 0x60,
+ CPL_RDMA_CQE_EXT = 0x61,
+ CPL_RDMA_CQE_FW_EXT = 0x62,
+ CPL_RDMA_CQE_ERR_EXT = 0x63,
+ CPL_TX_DATA_ACK_XT = 0x64,
+ CPL_ROCE_CQE = 0x68,
+ CPL_ROCE_CQE_FW = 0x69,
+ CPL_ROCE_CQE_ERR = 0x6A,
+
+ CPL_SACK_REQ = 0x70,
CPL_SET_LE_REQ = 0x80,
CPL_PASS_OPEN_REQ6 = 0x81,
CPL_ACT_OPEN_REQ6 = 0x83,
CPL_TX_TLS_PDU = 0x88,
CPL_TX_TLS_SFO = 0x89,
-
CPL_TX_SEC_PDU = 0x8A,
CPL_TX_TLS_ACK = 0x8B,
+ CPL_RCB_UPD = 0x8C,
+ CPL_SGE_FLR_FLUSH = 0xA0,
CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5,
@@ -138,15 +151,27 @@ enum {
CPL_TLS_DATA = 0xB1,
CPL_ISCSI_DATA = 0xB2,
CPL_FCOE_DATA = 0xB3,
+ CPL_NVMT_DATA = 0xB4,
+ CPL_NVMT_CMP = 0xB5,
+ CPL_NVMT_CMP_IMM = 0xB6,
+ CPL_NVMT_CMP_SRQ = 0xB7,
+ CPL_ROCE_ACK_NAK_REQ = 0xBC,
+ CPL_ROCE_ACK_NAK = 0xBD,
CPL_FW4_MSG = 0xC0,
CPL_FW4_PLD = 0xC1,
+ CPL_RDMA_CQE_SRQ = 0xC2,
+ CPL_ACCELERATOR_ACK = 0xC4,
CPL_FW4_ACK = 0xC3,
+ CPL_RX_PKT_IPSEC = 0xC6,
CPL_SRQ_TABLE_RPL = 0xCC,
+ CPL_TX_DATA_REQ = 0xCF,
+
CPL_RX_PHYS_DSGL = 0xD0,
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_ACCELERATOR_HDR = 0xE8,
CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -233,6 +258,8 @@ enum {
ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
ULP_MODE_TLS = 8,
+ ULP_MODE_RDMA_V2 = 10,
+ ULP_MODE_NVMET = 11,
};
enum {
@@ -325,9 +352,14 @@ union opcode_tid {
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
/* extract the TID from a CPL command */
-#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+#define GET_TID(cmd) (G_TID(be32toh(OPCODE_TID(cmd))))
#define GET_OPCODE(cmd) ((cmd)->ot.opcode)
+
+/*
+ * Note that this driver splits the 14b opaque atid into an 11b atid and a 3b
+ * cookie that is used to demux replies for shared CPLs.
+ */
/* partitioning of TID fields that also carry a queue id */
#define S_TID_TID 0
#define M_TID_TID 0x7ff
@@ -717,7 +749,7 @@ struct cpl_pass_establish {
struct cpl_pass_accept_req {
RSS_HDR
union opcode_tid ot;
- __be16 rsvd;
+ __be16 ipsecen_outiphdrlen;
__be16 len;
__be32 hdr_len;
__be16 vlan;
@@ -775,6 +807,155 @@ struct cpl_pass_accept_req {
#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+struct cpl_t7_pass_accept_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 ipsecen_to_outiphdrlen;
+ __be16 length;
+ __be32 ethhdrlen_to_rxchannel;
+ __be16 vlantag;
+ __be16 interface_to_mac_ix;
+ __be32 tos_ptid;
+ __be16 tcpmss;
+ __u8 tcpwsc;
+ __u8 tcptmstp_to_tcpunkn;
+};
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN 12
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPSECEN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPSECEN)
+#define F_CPL_PASS_T7_ACCEPT_REQ_IPSECEN \
+ V_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE 10
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE 0x3
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN 0x3ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN 24
+#define M_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN 0xff
+#define V_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN 14
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN 0x3ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN 8
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN 0x3f
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL 0xf
+#define V_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL)
+#define G_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE 12
+#define M_CPL_T7_PASS_ACCEPT_REQ_INTERFACE 0xf
+#define V_CPL_T7_PASS_ACCEPT_REQ_INTERFACE(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE)
+#define G_CPL_T7_PASS_ACCEPT_REQ_INTERFACE(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_INTERFACE)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH 9
+#define M_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH)
+#define G_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH)
+#define F_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH \
+ V_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_MAC_IX 0x1ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_MAC_IX(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX)
+#define G_CPL_T7_PASS_ACCEPT_REQ_MAC_IX(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX) & M_CPL_T7_PASS_ACCEPT_REQ_MAC_IX)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TOS 24
+#define M_CPL_T7_PASS_ACCEPT_REQ_TOS 0xff
+#define V_CPL_T7_PASS_ACCEPT_REQ_TOS(x) ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TOS)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TOS(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TOS) & M_CPL_T7_PASS_ACCEPT_REQ_TOS)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_PTID 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_PTID 0xffffff
+#define V_CPL_T7_PASS_ACCEPT_REQ_PTID(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_PTID)
+#define G_CPL_T7_PASS_ACCEPT_REQ_PTID(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_PTID) & M_CPL_T7_PASS_ACCEPT_REQ_PTID)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP 7
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK 6
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPSACK 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPSACK)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPSACK \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPECN 5
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPECN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPECN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPECN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPECN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPECN) & M_CPL_T7_PASS_ACCEPT_REQ_TCPECN)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPECN \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPECN(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN 4
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(1U)
+
struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
@@ -810,6 +991,7 @@ struct cpl_act_open_req {
#define M_FILTER_TUPLE 0xFFFFFFFFFF
#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+
struct cpl_t5_act_open_req {
WR_HDR;
union opcode_tid ot;
@@ -843,6 +1025,26 @@ struct cpl_t6_act_open_req {
#define V_AOPEN_FCOEMASK(x) ((x) << S_AOPEN_FCOEMASK)
#define F_AOPEN_FCOEMASK V_AOPEN_FCOEMASK(1U)
+struct cpl_t7_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 iss;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_T7_FILTER_TUPLE 1
+#define M_T7_FILTER_TUPLE 0x7FFFFFFFFFFFFFFFULL
+#define V_T7_FILTER_TUPLE(x) ((x) << S_T7_FILTER_TUPLE)
+#define G_T7_FILTER_TUPLE(x) (((x) >> S_T7_FILTER_TUPLE) & M_T7_FILTER_TUPLE)
+
struct cpl_act_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -889,6 +1091,23 @@ struct cpl_t6_act_open_req6 {
__be32 opt3;
};
+struct cpl_t7_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 iss;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
struct cpl_act_open_rpl {
RSS_HDR
union opcode_tid ot;
@@ -921,8 +1140,7 @@ struct cpl_get_tcb {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
- __u8 rsvd;
- __u8 cookie;
+ __be16 cookie;
};
/* cpl_get_tcb.reply_ctrl fields */
@@ -931,10 +1149,20 @@ struct cpl_get_tcb {
#define V_QUEUENO(x) ((x) << S_QUEUENO)
#define G_QUEUENO(x) (((x) >> S_QUEUENO) & M_QUEUENO)
+#define S_T7_QUEUENO 0
+#define M_T7_QUEUENO 0xFFF
+#define V_T7_QUEUENO(x) ((x) << S_T7_QUEUENO)
+#define G_T7_QUEUENO(x) (((x) >> S_T7_QUEUENO) & M_T7_QUEUENO)
+
#define S_REPLY_CHAN 14
#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
#define F_REPLY_CHAN V_REPLY_CHAN(1U)
+#define S_T7_REPLY_CHAN 12
+#define M_T7_REPLY_CHAN 0x7
+#define V_T7_REPLY_CHAN(x) ((x) << S_T7_REPLY_CHAN)
+#define G_T7_REPLY_CHAN(x) (((x) >> S_T7_REPLY_CHAN) & M_T7_REPLY_CHAN)
+
#define S_NO_REPLY 15
#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
#define F_NO_REPLY V_NO_REPLY(1U)
@@ -1018,6 +1246,40 @@ struct cpl_close_listsvr_req {
#define V_LISTSVR_IPV6(x) ((x) << S_LISTSVR_IPV6)
#define F_LISTSVR_IPV6 V_LISTSVR_IPV6(1U)
+struct cpl_t7_close_listsvr_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 noreply_to_queue;
+ __be16 r2;
+};
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY 15
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY 0x1
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY) & \
+ M_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY)
+#define F_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY \
+ V_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 14
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 0x1
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6) & M_CPL_T7_CLOSE_LISTSVR_REQ_IPV6)
+#define F_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 \
+ V_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(1U)
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE 0
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE 0xfff
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE) & \
+ M_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE)
+
struct cpl_close_listsvr_rpl {
RSS_HDR
union opcode_tid ot;
@@ -1250,6 +1512,71 @@ struct cpl_tx_data_ack {
__be32 snd_una;
};
+struct cpl_tx_data_ack_xt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+ __be32 extinfoh[2];
+ __be32 extinfol[2];
+};
+
+struct cpl_tx_data_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+};
+
+#define S_CPL_TX_DATA_REQ_TID 0
+#define M_CPL_TX_DATA_REQ_TID 0xffffff
+#define V_CPL_TX_DATA_REQ_TID(x) ((x) << S_CPL_TX_DATA_REQ_TID)
+#define G_CPL_TX_DATA_REQ_TID(x) \
+ (((x) >> S_CPL_TX_DATA_REQ_TID) & M_CPL_TX_DATA_REQ_TID)
+
+struct cpl_sack_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+ __be32 block1[2];
+ __be32 block2[2];
+ __be32 block3[2];
+};
+
+struct cpl_sge_flr_flush {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 cookievalue_cookiesel;
+};
+
+#define S_CPL_SGE_FLR_FLUSH_COOKIEVALUE 4
+#define M_CPL_SGE_FLR_FLUSH_COOKIEVALUE 0x3ff
+#define V_CPL_SGE_FLR_FLUSH_COOKIEVALUE(x) \
+ ((x) << S_CPL_SGE_FLR_FLUSH_COOKIEVALUE)
+#define G_CPL_SGE_FLR_FLUSH_COOKIEVALUE(x) \
+ (((x) >> S_CPL_SGE_FLR_FLUSH_COOKIEVALUE) & \
+ M_CPL_SGE_FLR_FLUSH_COOKIEVALUE)
+
+#define S_CPL_SGE_FLR_FLUSH_COOKIESEL 0
+#define M_CPL_SGE_FLR_FLUSH_COOKIESEL 0xf
+#define V_CPL_SGE_FLR_FLUSH_COOKIESEL(x) \
+ ((x) << S_CPL_SGE_FLR_FLUSH_COOKIESEL)
+#define G_CPL_SGE_FLR_FLUSH_COOKIESEL(x) \
+ (((x) >> S_CPL_SGE_FLR_FLUSH_COOKIESEL) & M_CPL_SGE_FLR_FLUSH_COOKIESEL)
+
struct cpl_wr_ack { /* XXX */
RSS_HDR
union opcode_tid ot;
@@ -1271,8 +1598,6 @@ struct cpl_tx_pkt {
struct cpl_tx_pkt_core c;
};
-#define cpl_tx_pkt_xt cpl_tx_pkt
-
/* cpl_tx_pkt_core.ctrl0 fields */
#define S_TXPKT_VF 0
#define M_TXPKT_VF 0xFF
@@ -1404,6 +1729,261 @@ struct cpl_tx_pkt {
#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL)
+struct cpl_tx_pkt_xt {
+ WR_HDR;
+ __be32 ctrl0;
+ __be16 pack;
+ __be16 len;
+ __be32 ctrl1;
+ __be32 ctrl2;
+};
+
+/* cpl_tx_pkt_xt.core.ctrl0 fields */
+#define S_CPL_TX_PKT_XT_OPCODE 24
+#define M_CPL_TX_PKT_XT_OPCODE 0xff
+#define V_CPL_TX_PKT_XT_OPCODE(x) ((x) << S_CPL_TX_PKT_XT_OPCODE)
+#define G_CPL_TX_PKT_XT_OPCODE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OPCODE) & M_CPL_TX_PKT_XT_OPCODE)
+
+#define S_CPL_TX_PKT_XT_TIMESTAMP 23
+#define M_CPL_TX_PKT_XT_TIMESTAMP 0x1
+#define V_CPL_TX_PKT_XT_TIMESTAMP(x) ((x) << S_CPL_TX_PKT_XT_TIMESTAMP)
+#define G_CPL_TX_PKT_XT_TIMESTAMP(x) \
+ (((x) >> S_CPL_TX_PKT_XT_TIMESTAMP) & M_CPL_TX_PKT_XT_TIMESTAMP)
+#define F_CPL_TX_PKT_XT_TIMESTAMP V_CPL_TX_PKT_XT_TIMESTAMP(1U)
+
+#define S_CPL_TX_PKT_XT_STATDISABLE 22
+#define M_CPL_TX_PKT_XT_STATDISABLE 0x1
+#define V_CPL_TX_PKT_XT_STATDISABLE(x) ((x) << S_CPL_TX_PKT_XT_STATDISABLE)
+#define G_CPL_TX_PKT_XT_STATDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_STATDISABLE) & M_CPL_TX_PKT_XT_STATDISABLE)
+#define F_CPL_TX_PKT_XT_STATDISABLE V_CPL_TX_PKT_XT_STATDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_FCSDIS 21
+#define M_CPL_TX_PKT_XT_FCSDIS 0x1
+#define V_CPL_TX_PKT_XT_FCSDIS(x) ((x) << S_CPL_TX_PKT_XT_FCSDIS)
+#define G_CPL_TX_PKT_XT_FCSDIS(x) \
+ (((x) >> S_CPL_TX_PKT_XT_FCSDIS) & M_CPL_TX_PKT_XT_FCSDIS)
+#define F_CPL_TX_PKT_XT_FCSDIS V_CPL_TX_PKT_XT_FCSDIS(1U)
+
+#define S_CPL_TX_PKT_XT_STATSPECIAL 20
+#define M_CPL_TX_PKT_XT_STATSPECIAL 0x1
+#define V_CPL_TX_PKT_XT_STATSPECIAL(x) ((x) << S_CPL_TX_PKT_XT_STATSPECIAL)
+#define G_CPL_TX_PKT_XT_STATSPECIAL(x) \
+ (((x) >> S_CPL_TX_PKT_XT_STATSPECIAL) & M_CPL_TX_PKT_XT_STATSPECIAL)
+#define F_CPL_TX_PKT_XT_STATSPECIAL V_CPL_TX_PKT_XT_STATSPECIAL(1U)
+
+#define S_CPL_TX_PKT_XT_INTERFACE 16
+#define M_CPL_TX_PKT_XT_INTERFACE 0xf
+#define V_CPL_TX_PKT_XT_INTERFACE(x) ((x) << S_CPL_TX_PKT_XT_INTERFACE)
+#define G_CPL_TX_PKT_XT_INTERFACE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_INTERFACE) & M_CPL_TX_PKT_XT_INTERFACE)
+
+#define S_CPL_TX_PKT_XT_OVLAN 15
+#define M_CPL_TX_PKT_XT_OVLAN 0x1
+#define V_CPL_TX_PKT_XT_OVLAN(x) ((x) << S_CPL_TX_PKT_XT_OVLAN)
+#define G_CPL_TX_PKT_XT_OVLAN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OVLAN) & M_CPL_TX_PKT_XT_OVLAN)
+#define F_CPL_TX_PKT_XT_OVLAN V_CPL_TX_PKT_XT_OVLAN(1U)
+
+#define S_CPL_TX_PKT_XT_OVLANIDX 12
+#define M_CPL_TX_PKT_XT_OVLANIDX 0x7
+#define V_CPL_TX_PKT_XT_OVLANIDX(x) ((x) << S_CPL_TX_PKT_XT_OVLANIDX)
+#define G_CPL_TX_PKT_XT_OVLANIDX(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OVLANIDX) & M_CPL_TX_PKT_XT_OVLANIDX)
+
+#define S_CPL_TX_PKT_XT_VFVALID 11
+#define M_CPL_TX_PKT_XT_VFVALID 0x1
+#define V_CPL_TX_PKT_XT_VFVALID(x) ((x) << S_CPL_TX_PKT_XT_VFVALID)
+#define G_CPL_TX_PKT_XT_VFVALID(x) \
+ (((x) >> S_CPL_TX_PKT_XT_VFVALID) & M_CPL_TX_PKT_XT_VFVALID)
+#define F_CPL_TX_PKT_XT_VFVALID V_CPL_TX_PKT_XT_VFVALID(1U)
+
+#define S_CPL_TX_PKT_XT_PF 8
+#define M_CPL_TX_PKT_XT_PF 0x7
+#define V_CPL_TX_PKT_XT_PF(x) ((x) << S_CPL_TX_PKT_XT_PF)
+#define G_CPL_TX_PKT_XT_PF(x) \
+ (((x) >> S_CPL_TX_PKT_XT_PF) & M_CPL_TX_PKT_XT_PF)
+
+#define S_CPL_TX_PKT_XT_VF 0
+#define M_CPL_TX_PKT_XT_VF 0xff
+#define V_CPL_TX_PKT_XT_VF(x) ((x) << S_CPL_TX_PKT_XT_VF)
+#define G_CPL_TX_PKT_XT_VF(x) \
+ (((x) >> S_CPL_TX_PKT_XT_VF) & M_CPL_TX_PKT_XT_VF)
+
+/* cpl_tx_pkt_xt.core.ctrl1 fields */
+#define S_CPL_TX_PKT_XT_L4CHKDISABLE 31
+#define M_CPL_TX_PKT_XT_L4CHKDISABLE 0x1
+#define V_CPL_TX_PKT_XT_L4CHKDISABLE(x) ((x) << S_CPL_TX_PKT_XT_L4CHKDISABLE)
+#define G_CPL_TX_PKT_XT_L4CHKDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_L4CHKDISABLE) & M_CPL_TX_PKT_XT_L4CHKDISABLE)
+#define F_CPL_TX_PKT_XT_L4CHKDISABLE V_CPL_TX_PKT_XT_L4CHKDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_L3CHKDISABLE 30
+#define M_CPL_TX_PKT_XT_L3CHKDISABLE 0x1
+#define V_CPL_TX_PKT_XT_L3CHKDISABLE(x) ((x) << S_CPL_TX_PKT_XT_L3CHKDISABLE)
+#define G_CPL_TX_PKT_XT_L3CHKDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_L3CHKDISABLE) & M_CPL_TX_PKT_XT_L3CHKDISABLE)
+#define F_CPL_TX_PKT_XT_L3CHKDISABLE V_CPL_TX_PKT_XT_L3CHKDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_OUTL4CHKEN 29
+#define M_CPL_TX_PKT_XT_OUTL4CHKEN 0x1
+#define V_CPL_TX_PKT_XT_OUTL4CHKEN(x) ((x) << S_CPL_TX_PKT_XT_OUTL4CHKEN)
+#define G_CPL_TX_PKT_XT_OUTL4CHKEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OUTL4CHKEN) & M_CPL_TX_PKT_XT_OUTL4CHKEN)
+#define F_CPL_TX_PKT_XT_OUTL4CHKEN V_CPL_TX_PKT_XT_OUTL4CHKEN(1U)
+
+#define S_CPL_TX_PKT_XT_IVLAN 28
+#define M_CPL_TX_PKT_XT_IVLAN 0x1
+#define V_CPL_TX_PKT_XT_IVLAN(x) ((x) << S_CPL_TX_PKT_XT_IVLAN)
+#define G_CPL_TX_PKT_XT_IVLAN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IVLAN) & M_CPL_TX_PKT_XT_IVLAN)
+#define F_CPL_TX_PKT_XT_IVLAN V_CPL_TX_PKT_XT_IVLAN(1U)
+
+#define S_CPL_TX_PKT_XT_IVLANTAG 12
+#define M_CPL_TX_PKT_XT_IVLANTAG 0xffff
+#define V_CPL_TX_PKT_XT_IVLANTAG(x) ((x) << S_CPL_TX_PKT_XT_IVLANTAG)
+#define G_CPL_TX_PKT_XT_IVLANTAG(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IVLANTAG) & M_CPL_TX_PKT_XT_IVLANTAG)
+
+#define S_CPL_TX_PKT_XT_CHKTYPE 8
+#define M_CPL_TX_PKT_XT_CHKTYPE 0xf
+#define V_CPL_TX_PKT_XT_CHKTYPE(x) ((x) << S_CPL_TX_PKT_XT_CHKTYPE)
+#define G_CPL_TX_PKT_XT_CHKTYPE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKTYPE) & M_CPL_TX_PKT_XT_CHKTYPE)
+
+#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI 0
+#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI 0xff
+#define V_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI)
+#define G_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI) & \
+ M_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI)
+
+#define S_CPL_TX_PKT_XT_ETHHDRLEN 0
+#define M_CPL_TX_PKT_XT_ETHHDRLEN 0xff
+#define V_CPL_TX_PKT_XT_ETHHDRLEN(x) ((x) << S_CPL_TX_PKT_XT_ETHHDRLEN)
+#define G_CPL_TX_PKT_XT_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ETHHDRLEN) & M_CPL_TX_PKT_XT_ETHHDRLEN)
+
+#define S_CPL_TX_PKT_XT_ROCECHKINSMODE 6
+#define M_CPL_TX_PKT_XT_ROCECHKINSMODE 0x3
+#define V_CPL_TX_PKT_XT_ROCECHKINSMODE(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCECHKINSMODE)
+#define G_CPL_TX_PKT_XT_ROCECHKINSMODE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCECHKINSMODE) & M_CPL_TX_PKT_XT_ROCECHKINSMODE)
+
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI 0
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI 0x3f
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
+
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 30
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 0x3
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+
+/* cpl_tx_pkt_xt.core.ctrl2 fields */
+#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 30
+#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 0x3
+#define V_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+#define G_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO) & \
+ M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+
+#define S_CPL_TX_PKT_XT_CHKSTARTOFFSET 20
+#define M_CPL_TX_PKT_XT_CHKSTARTOFFSET 0x3ff
+#define V_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKSTARTOFFSET)
+#define G_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKSTARTOFFSET) & M_CPL_TX_PKT_XT_CHKSTARTOFFSET)
+
+#define S_CPL_TX_PKT_XT_IPHDRLEN 20
+#define M_CPL_TX_PKT_XT_IPHDRLEN 0xfff
+#define V_CPL_TX_PKT_XT_IPHDRLEN(x) ((x) << S_CPL_TX_PKT_XT_IPHDRLEN)
+#define G_CPL_TX_PKT_XT_IPHDRLEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IPHDRLEN) & M_CPL_TX_PKT_XT_IPHDRLEN)
+
+#define S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET 20
+#define M_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET 0x3ff
+#define V_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET)
+#define G_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET) & \
+ M_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET)
+
+#define S_CPL_TX_PKT_XT_CHKSTOPOFFSET 12
+#define M_CPL_TX_PKT_XT_CHKSTOPOFFSET 0xff
+#define V_CPL_TX_PKT_XT_CHKSTOPOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKSTOPOFFSET)
+#define G_CPL_TX_PKT_XT_CHKSTOPOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKSTOPOFFSET) & M_CPL_TX_PKT_XT_CHKSTOPOFFSET)
+
+#define S_CPL_TX_PKT_XT_IPSECIDX 0
+#define M_CPL_TX_PKT_XT_IPSECIDX 0xfff
+#define V_CPL_TX_PKT_XT_IPSECIDX(x) ((x) << S_CPL_TX_PKT_XT_IPSECIDX)
+#define G_CPL_TX_PKT_XT_IPSECIDX(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IPSECIDX) & M_CPL_TX_PKT_XT_IPSECIDX)
+
+#define S_CPL_TX_TNL_LSO_BTH_OPCODE 24
+#define M_CPL_TX_TNL_LSO_BTH_OPCODE 0xff
+#define V_CPL_TX_TNL_LSO_BTH_OPCODE(x) ((x) << S_CPL_TX_TNL_LSO_BTH_OPCODE)
+#define G_CPL_TX_TNL_LSO_BTH_OPCODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_BTH_OPCODE) & \
+ M_CPL_TX_TNL_LSO_BTH_OPCODE)
+
+#define S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0
+#define M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0xffffff
+#define V_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+#define G_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN) & \
+ M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+
+#define S_CPL_TX_TNL_LSO_MSS_TVER 8
+#define M_CPL_TX_TNL_LSO_MSS_TVER 0xf
+#define V_CPL_TX_TNL_LSO_MSS_TVER(x) ((x) << S_CPL_TX_TNL_LSO_MSS_TVER)
+#define G_CPL_TX_TNL_LSO_MSS_TVER(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_TVER) & M_CPL_TX_TNL_LSO_MSS_TVER)
+
+#define S_CPL_TX_TNL_LSO_MSS_M 7
+#define M_CPL_TX_TNL_LSO_MSS_M 0x1
+#define V_CPL_TX_TNL_LSO_MSS_M(x) ((x) << S_CPL_TX_TNL_LSO_MSS_M)
+#define G_CPL_TX_TNL_LSO_MSS_M(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_M) & M_CPL_TX_TNL_LSO_MSS_M)
+
+#define S_CPL_TX_TNL_LSO_MSS_PMTU 4
+#define M_CPL_TX_TNL_LSO_MSS_PMTU 0x7
+#define V_CPL_TX_TNL_LSO_MSS_PMTU(x) ((x) << S_CPL_TX_TNL_LSO_MSS_PMTU)
+#define G_CPL_TX_TNL_LSO_MSS_PMTU(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_PMTU) & M_CPL_TX_TNL_LSO_MSS_PMTU)
+
+#define S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 3
+#define M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 0x1
+#define V_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ ((x) << S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+#define G_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR) & M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+
+#define S_CPL_TX_TNL_LSO_MSS_ACKREQ 1
+#define M_CPL_TX_TNL_LSO_MSS_ACKREQ 0x3
+#define V_CPL_TX_TNL_LSO_MSS_ACKREQ(x) ((x) << S_CPL_TX_TNL_LSO_MSS_ACKREQ)
+#define G_CPL_TX_TNL_LSO_MSS_ACKREQ(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_ACKREQ) & M_CPL_TX_TNL_LSO_MSS_ACKREQ)
+
+#define S_CPL_TX_TNL_LSO_MSS_SE 0
+#define M_CPL_TX_TNL_LSO_MSS_SE 0x1
+#define V_CPL_TX_TNL_LSO_MSS_SE(x) ((x) << S_CPL_TX_TNL_LSO_MSS_SE)
+#define G_CPL_TX_TNL_LSO_MSS_SE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_SE) & M_CPL_TX_TNL_LSO_MSS_SE)
+
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
__be16 ipid_ofst;
@@ -1600,6 +2180,100 @@ struct cpl_tx_data_iso {
(((x) >> S_CPL_TX_DATA_ISO_SEGLEN_OFFSET) & \
M_CPL_TX_DATA_ISO_SEGLEN_OFFSET)
+struct cpl_t7_tx_data_iso {
+ __be32 op_to_scsi;
+ __u8 nvme_tcp_pkd;
+ __u8 ahs;
+ __be16 mpdu;
+ __be32 burst;
+ __be32 size;
+ __be32 num_pi_bytes_seglen_offset;
+ __be32 datasn_offset;
+ __be32 buffer_offset;
+ __be32 reserved3;
+};
+
+#define S_CPL_T7_TX_DATA_ISO_OPCODE 24
+#define M_CPL_T7_TX_DATA_ISO_OPCODE 0xff
+#define V_CPL_T7_TX_DATA_ISO_OPCODE(x) ((x) << S_CPL_T7_TX_DATA_ISO_OPCODE)
+#define G_CPL_T7_TX_DATA_ISO_OPCODE(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_OPCODE) & M_CPL_T7_TX_DATA_ISO_OPCODE)
+
+#define S_CPL_T7_TX_DATA_ISO_FIRST 23
+#define M_CPL_T7_TX_DATA_ISO_FIRST 0x1
+#define V_CPL_T7_TX_DATA_ISO_FIRST(x) ((x) << S_CPL_T7_TX_DATA_ISO_FIRST)
+#define G_CPL_T7_TX_DATA_ISO_FIRST(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_FIRST) & M_CPL_T7_TX_DATA_ISO_FIRST)
+#define F_CPL_T7_TX_DATA_ISO_FIRST V_CPL_T7_TX_DATA_ISO_FIRST(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_LAST 22
+#define M_CPL_T7_TX_DATA_ISO_LAST 0x1
+#define V_CPL_T7_TX_DATA_ISO_LAST(x) ((x) << S_CPL_T7_TX_DATA_ISO_LAST)
+#define G_CPL_T7_TX_DATA_ISO_LAST(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_LAST) & M_CPL_T7_TX_DATA_ISO_LAST)
+#define F_CPL_T7_TX_DATA_ISO_LAST V_CPL_T7_TX_DATA_ISO_LAST(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_CPLHDRLEN 21
+#define M_CPL_T7_TX_DATA_ISO_CPLHDRLEN 0x1
+#define V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_CPLHDRLEN)
+#define G_CPL_T7_TX_DATA_ISO_CPLHDRLEN(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_CPLHDRLEN) & M_CPL_T7_TX_DATA_ISO_CPLHDRLEN)
+#define F_CPL_T7_TX_DATA_ISO_CPLHDRLEN V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_HDRCRC 20
+#define M_CPL_T7_TX_DATA_ISO_HDRCRC 0x1
+#define V_CPL_T7_TX_DATA_ISO_HDRCRC(x) ((x) << S_CPL_T7_TX_DATA_ISO_HDRCRC)
+#define G_CPL_T7_TX_DATA_ISO_HDRCRC(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_HDRCRC) & M_CPL_T7_TX_DATA_ISO_HDRCRC)
+#define F_CPL_T7_TX_DATA_ISO_HDRCRC V_CPL_T7_TX_DATA_ISO_HDRCRC(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_PLDCRC 19
+#define M_CPL_T7_TX_DATA_ISO_PLDCRC 0x1
+#define V_CPL_T7_TX_DATA_ISO_PLDCRC(x) ((x) << S_CPL_T7_TX_DATA_ISO_PLDCRC)
+#define G_CPL_T7_TX_DATA_ISO_PLDCRC(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_PLDCRC) & M_CPL_T7_TX_DATA_ISO_PLDCRC)
+#define F_CPL_T7_TX_DATA_ISO_PLDCRC V_CPL_T7_TX_DATA_ISO_PLDCRC(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_IMMEDIATE 18
+#define M_CPL_T7_TX_DATA_ISO_IMMEDIATE 0x1
+#define V_CPL_T7_TX_DATA_ISO_IMMEDIATE(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_IMMEDIATE)
+#define G_CPL_T7_TX_DATA_ISO_IMMEDIATE(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_IMMEDIATE) & M_CPL_T7_TX_DATA_ISO_IMMEDIATE)
+#define F_CPL_T7_TX_DATA_ISO_IMMEDIATE \
+ V_CPL_T7_TX_DATA_ISO_IMMEDIATE(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_SCSI 16
+#define M_CPL_T7_TX_DATA_ISO_SCSI 0x3
+#define V_CPL_T7_TX_DATA_ISO_SCSI(x) ((x) << S_CPL_T7_TX_DATA_ISO_SCSI)
+#define G_CPL_T7_TX_DATA_ISO_SCSI(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_SCSI) & M_CPL_T7_TX_DATA_ISO_SCSI)
+
+#define S_CPL_T7_TX_DATA_ISO_NVME_TCP 0
+#define M_CPL_T7_TX_DATA_ISO_NVME_TCP 0x1
+#define V_CPL_T7_TX_DATA_ISO_NVME_TCP(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_NVME_TCP)
+#define G_CPL_T7_TX_DATA_ISO_NVME_TCP(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_NVME_TCP) & M_CPL_T7_TX_DATA_ISO_NVME_TCP)
+#define F_CPL_T7_TX_DATA_ISO_NVME_TCP \
+ V_CPL_T7_TX_DATA_ISO_NVME_TCP(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_NUMPIBYTES 24
+#define M_CPL_T7_TX_DATA_ISO_NUMPIBYTES 0xff
+#define V_CPL_T7_TX_DATA_ISO_NUMPIBYTES(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_NUMPIBYTES)
+#define G_CPL_T7_TX_DATA_ISO_NUMPIBYTES(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_NUMPIBYTES) & M_CPL_T7_TX_DATA_ISO_NUMPIBYTES)
+
+#define S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET 0
+#define M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET 0xffffff
+#define V_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+#define G_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET) & \
+ M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+
struct cpl_iscsi_hdr {
RSS_HDR
union opcode_tid ot;
@@ -2324,6 +2998,18 @@ struct cpl_l2t_write_req {
#define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
#define F_L2T_W_NOREPLY V_L2T_W_NOREPLY(1U)
+
+/* cpl_l2t_write_req.vlan fields */
+#define S_L2T_VLANTAG 0
+#define M_L2T_VLANTAG 0xFFF
+#define V_L2T_VLANTAG(x) ((x) << S_L2T_VLANTAG)
+#define G_L2T_VLANTAG(x) (((x) >> S_L2T_VLANTAG) & M_L2T_VLANTAG)
+
+#define S_L2T_VLANPRIO 13
+#define M_L2T_VLANPRIO 0x7
+#define V_L2T_VLANPRIO(x) ((x) << S_L2T_VLANPRIO)
+#define G_L2T_VLANPRIO(x) (((x) >> S_L2T_VLANPRIO) & M_L2T_VLANPRIO)
+
#define CPL_L2T_VLAN_NONE 0xfff
struct cpl_l2t_write_rpl {
@@ -2400,6 +3086,175 @@ struct cpl_srq_table_rpl {
#define V_SRQT_IDX(x) ((x) << S_SRQT_IDX)
#define G_SRQT_IDX(x) (((x) >> S_SRQT_IDX) & M_SRQT_IDX)
+struct cpl_t7_srq_table_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_index;
+ __be16 srqlimit_pkd;
+ __be16 cqid;
+ __be16 xdid;
+ __be16 pdid;
+ __be32 quelen_quebase;
+ __be32 curmsn_maxmsn;
+};
+
+#define S_CPL_T7_SRQ_TABLE_REQ_NOREPLY 31
+#define M_CPL_T7_SRQ_TABLE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_NOREPLY)
+#define G_CPL_T7_SRQ_TABLE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_NOREPLY) & M_CPL_T7_SRQ_TABLE_REQ_NOREPLY)
+#define F_CPL_T7_SRQ_TABLE_REQ_NOREPLY \
+ V_CPL_T7_SRQ_TABLE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_WRITE 30
+#define M_CPL_T7_SRQ_TABLE_REQ_WRITE 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_WRITE(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_WRITE)
+#define G_CPL_T7_SRQ_TABLE_REQ_WRITE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_WRITE) & M_CPL_T7_SRQ_TABLE_REQ_WRITE)
+#define F_CPL_T7_SRQ_TABLE_REQ_WRITE V_CPL_T7_SRQ_TABLE_REQ_WRITE(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_INCR 28
+#define M_CPL_T7_SRQ_TABLE_REQ_INCR 0x3
+#define V_CPL_T7_SRQ_TABLE_REQ_INCR(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_INCR)
+#define G_CPL_T7_SRQ_TABLE_REQ_INCR(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_INCR) & M_CPL_T7_SRQ_TABLE_REQ_INCR)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_OVER 24
+#define M_CPL_T7_SRQ_TABLE_REQ_OVER 0xf
+#define V_CPL_T7_SRQ_TABLE_REQ_OVER(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_OVER)
+#define G_CPL_T7_SRQ_TABLE_REQ_OVER(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_OVER) & M_CPL_T7_SRQ_TABLE_REQ_OVER)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD 23
+#define M_CPL_T7_SRQ_TABLE_REQ_LIMITUPD 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD)
+#define G_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD) & M_CPL_T7_SRQ_TABLE_REQ_LIMITUPD)
+#define F_CPL_T7_SRQ_TABLE_REQ_LIMITUPD V_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_INDEX 0
+#define M_CPL_T7_SRQ_TABLE_REQ_INDEX 0x3ff
+#define V_CPL_T7_SRQ_TABLE_REQ_INDEX(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_INDEX)
+#define G_CPL_T7_SRQ_TABLE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_INDEX) & M_CPL_T7_SRQ_TABLE_REQ_INDEX)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT 0
+#define M_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT 0x3f
+#define V_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT)
+#define G_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT) & M_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_QUELEN 28
+#define M_CPL_T7_SRQ_TABLE_REQ_QUELEN 0xf
+#define V_CPL_T7_SRQ_TABLE_REQ_QUELEN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_QUELEN)
+#define G_CPL_T7_SRQ_TABLE_REQ_QUELEN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_QUELEN) & M_CPL_T7_SRQ_TABLE_REQ_QUELEN)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_QUEBASE 0
+#define M_CPL_T7_SRQ_TABLE_REQ_QUEBASE 0x3ffffff
+#define V_CPL_T7_SRQ_TABLE_REQ_QUEBASE(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_QUEBASE)
+#define G_CPL_T7_SRQ_TABLE_REQ_QUEBASE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_QUEBASE) & M_CPL_T7_SRQ_TABLE_REQ_QUEBASE)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_CURMSN 16
+#define M_CPL_T7_SRQ_TABLE_REQ_CURMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_REQ_CURMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_CURMSN)
+#define G_CPL_T7_SRQ_TABLE_REQ_CURMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_CURMSN) & M_CPL_T7_SRQ_TABLE_REQ_CURMSN)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_MAXMSN 0
+#define M_CPL_T7_SRQ_TABLE_REQ_MAXMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_REQ_MAXMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_MAXMSN)
+#define G_CPL_T7_SRQ_TABLE_REQ_MAXMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_MAXMSN) & M_CPL_T7_SRQ_TABLE_REQ_MAXMSN)
+
+struct cpl_t7_srq_table_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 status_index;
+ __be16 srqlimit_pkd;
+ __be16 cqid;
+ __be16 xdid;
+ __be16 pdid;
+ __be32 quelen_quebase;
+ __be32 curmsn_maxmsn;
+};
+
+#define S_CPL_T7_SRQ_TABLE_RPL_STATUS 24
+#define M_CPL_T7_SRQ_TABLE_RPL_STATUS 0xff
+#define V_CPL_T7_SRQ_TABLE_RPL_STATUS(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_STATUS)
+#define G_CPL_T7_SRQ_TABLE_RPL_STATUS(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_STATUS) & M_CPL_T7_SRQ_TABLE_RPL_STATUS)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_INDEX 0
+#define M_CPL_T7_SRQ_TABLE_RPL_INDEX 0x3ff
+#define V_CPL_T7_SRQ_TABLE_RPL_INDEX(x) ((x) << S_CPL_T7_SRQ_TABLE_RPL_INDEX)
+#define G_CPL_T7_SRQ_TABLE_RPL_INDEX(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_INDEX) & M_CPL_T7_SRQ_TABLE_RPL_INDEX)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT 0
+#define M_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT 0x3f
+#define V_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT)
+#define G_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT) & M_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_QUELEN 28
+#define M_CPL_T7_SRQ_TABLE_RPL_QUELEN 0xf
+#define V_CPL_T7_SRQ_TABLE_RPL_QUELEN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_QUELEN)
+#define G_CPL_T7_SRQ_TABLE_RPL_QUELEN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_QUELEN) & M_CPL_T7_SRQ_TABLE_RPL_QUELEN)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_QUEBASE 0
+#define M_CPL_T7_SRQ_TABLE_RPL_QUEBASE 0x3ffffff
+#define V_CPL_T7_SRQ_TABLE_RPL_QUEBASE(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_QUEBASE)
+#define G_CPL_T7_SRQ_TABLE_RPL_QUEBASE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_QUEBASE) & M_CPL_T7_SRQ_TABLE_RPL_QUEBASE)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_CURMSN 16
+#define M_CPL_T7_SRQ_TABLE_RPL_CURMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_RPL_CURMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_CURMSN)
+#define G_CPL_T7_SRQ_TABLE_RPL_CURMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_CURMSN) & M_CPL_T7_SRQ_TABLE_RPL_CURMSN)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_MAXMSN 0
+#define M_CPL_T7_SRQ_TABLE_RPL_MAXMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_RPL_MAXMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_MAXMSN)
+#define G_CPL_T7_SRQ_TABLE_RPL_MAXMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_MAXMSN) & M_CPL_T7_SRQ_TABLE_RPL_MAXMSN)
+
+struct cpl_rdma_async_event {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 EventInfo;
+};
+
+#define S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE 16
+#define M_CPL_RDMA_ASYNC_EVENT_EVENTTYPE 0xf
+#define V_CPL_RDMA_ASYNC_EVENT_EVENTTYPE(x) \
+ ((x) << S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE)
+#define G_CPL_RDMA_ASYNC_EVENT_EVENTTYPE(x) \
+ (((x) >> S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE) & \
+ M_CPL_RDMA_ASYNC_EVENT_EVENTTYPE)
+
+#define S_CPL_RDMA_ASYNC_EVENT_INDEX 0
+#define M_CPL_RDMA_ASYNC_EVENT_INDEX 0xffff
+#define V_CPL_RDMA_ASYNC_EVENT_INDEX(x) ((x) << S_CPL_RDMA_ASYNC_EVENT_INDEX)
+#define G_CPL_RDMA_ASYNC_EVENT_INDEX(x) \
+ (((x) >> S_CPL_RDMA_ASYNC_EVENT_INDEX) & M_CPL_RDMA_ASYNC_EVENT_INDEX)
+
struct cpl_smt_write_req {
WR_HDR;
union opcode_tid ot;
@@ -2479,6 +3334,118 @@ struct cpl_smt_read_rpl {
#define V_SMTW_VF_VLD(x) ((x) << S_SMTW_VF_VLD)
#define F_SMTW_VF_VLD V_SMTW_VF_VLD(1U)
+struct cpl_t7_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_mtu;
+ union smt_write_req {
+ struct smt_write_req_pfvf {
+ __be64 tagvalue;
+ __be32 pfvf_smac_hi;
+ __be32 smac_lo;
+ __be64 tagext;
+ } pfvf;
+ struct smt_write_req_ipv4 {
+ __be32 srcipv4;
+ __be32 destipv4;
+ } ipv4;
+ struct smt_write_req_ipv6 {
+ __be64 ipv6ms;
+ __be64 ipv6ls;
+ } ipv6;
+ } u;
+};
+
+#define S_CPL_T7_SMT_WRITE_REQ_NOREPLY 31
+#define M_CPL_T7_SMT_WRITE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_NOREPLY)
+#define G_CPL_T7_SMT_WRITE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_NOREPLY) & M_CPL_T7_SMT_WRITE_REQ_NOREPLY)
+#define F_CPL_T7_SMT_WRITE_REQ_NOREPLY \
+ V_CPL_T7_SMT_WRITE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_TAGINSERT 30
+#define M_CPL_T7_SMT_WRITE_REQ_TAGINSERT 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_TAGINSERT(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_TAGINSERT)
+#define G_CPL_T7_SMT_WRITE_REQ_TAGINSERT(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_TAGINSERT) & \
+ M_CPL_T7_SMT_WRITE_REQ_TAGINSERT)
+#define F_CPL_T7_SMT_WRITE_REQ_TAGINSERT \
+ V_CPL_T7_SMT_WRITE_REQ_TAGINSERT(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_TAGTYPE 28
+#define M_CPL_T7_SMT_WRITE_REQ_TAGTYPE 0x3
+#define V_CPL_T7_SMT_WRITE_REQ_TAGTYPE(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_TAGTYPE)
+#define G_CPL_T7_SMT_WRITE_REQ_TAGTYPE(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_TAGTYPE) & M_CPL_T7_SMT_WRITE_REQ_TAGTYPE)
+
+#define S_CPL_T7_SMT_WRITE_REQ_INDEX 20
+#define M_CPL_T7_SMT_WRITE_REQ_INDEX 0xff
+#define V_CPL_T7_SMT_WRITE_REQ_INDEX(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_INDEX)
+#define G_CPL_T7_SMT_WRITE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_INDEX) & M_CPL_T7_SMT_WRITE_REQ_INDEX)
+
+#define S_CPL_T7_SMT_WRITE_REQ_OVLAN 16
+#define M_CPL_T7_SMT_WRITE_REQ_OVLAN 0xf
+#define V_CPL_T7_SMT_WRITE_REQ_OVLAN(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_OVLAN)
+#define G_CPL_T7_SMT_WRITE_REQ_OVLAN(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_OVLAN) & M_CPL_T7_SMT_WRITE_REQ_OVLAN)
+
+#define S_CPL_T7_SMT_WRITE_REQ_IPSEC 14
+#define M_CPL_T7_SMT_WRITE_REQ_IPSEC 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_IPSEC(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_IPSEC)
+#define G_CPL_T7_SMT_WRITE_REQ_IPSEC(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_IPSEC) & M_CPL_T7_SMT_WRITE_REQ_IPSEC)
+#define F_CPL_T7_SMT_WRITE_REQ_IPSEC V_CPL_T7_SMT_WRITE_REQ_IPSEC(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_MTU 0
+#define M_CPL_T7_SMT_WRITE_REQ_MTU 0x3fff
+#define V_CPL_T7_SMT_WRITE_REQ_MTU(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_MTU)
+#define G_CPL_T7_SMT_WRITE_REQ_MTU(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_MTU) & M_CPL_T7_SMT_WRITE_REQ_MTU)
+
+#define S_CPL_T7_SMT_WRITE_REQ_PFVF 16
+#define M_CPL_T7_SMT_WRITE_REQ_PFVF 0xfff
+#define V_CPL_T7_SMT_WRITE_REQ_PFVF(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_PFVF)
+#define G_CPL_T7_SMT_WRITE_REQ_PFVF(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_PFVF) & M_CPL_T7_SMT_WRITE_REQ_PFVF)
+
+#define S_CPL_T7_SMT_WRITE_REQ_SMAC_HI 0
+#define M_CPL_T7_SMT_WRITE_REQ_SMAC_HI 0xffff
+#define V_CPL_T7_SMT_WRITE_REQ_SMAC_HI(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_SMAC_HI)
+#define G_CPL_T7_SMT_WRITE_REQ_SMAC_HI(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_SMAC_HI) & M_CPL_T7_SMT_WRITE_REQ_SMAC_HI)
+
+struct cpl_t7_smt_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 index_to_ipsecidx;
+};
+
+#define S_CPL_T7_SMT_READ_REQ_INDEX 20
+#define M_CPL_T7_SMT_READ_REQ_INDEX 0xff
+#define V_CPL_T7_SMT_READ_REQ_INDEX(x) ((x) << S_CPL_T7_SMT_READ_REQ_INDEX)
+#define G_CPL_T7_SMT_READ_REQ_INDEX(x) \
+ (((x) >> S_CPL_SMT_READ_REQ_INDEX) & M_CPL_T7_SMT_READ_REQ_INDEX)
+
+#define S_CPL_T7_SMT_READ_REQ_IPSEC 14
+#define M_CPL_T7_SMT_READ_REQ_IPSEC 0x1
+#define V_CPL_T7_SMT_READ_REQ_IPSEC(x) ((x) << S_CPL_T7_SMT_READ_REQ_IPSEC)
+#define G_CPL_T7_SMT_READ_REQ_IPSEC(x) \
+ (((x) >> S_CPL_T7_SMT_READ_REQ_IPSEC) & M_CPL_T7_SMT_READ_REQ_IPSEC)
+#define F_CPL_T7_SMT_READ_REQ_IPSEC V_CPL_T7_SMT_READ_REQ_IPSEC(1U)
+
+#define S_CPL_T7_SMT_READ_REQ_IPSECIDX 0
+#define M_CPL_T7_SMT_READ_REQ_IPSECIDX 0x1fff
+#define V_CPL_T7_SMT_READ_REQ_IPSECIDX(x) \
+ ((x) << S_CPL_T7_SMT_READ_REQ_IPSECIDX)
+#define G_CPL_T7_SMT_READ_REQ_IPSECIDX(x) \
+ (((x) >> S_CPL_T7_SMT_READ_REQ_IPSECIDX) & M_CPL_T7_SMT_READ_REQ_IPSECIDX)
+
struct cpl_tag_write_req {
WR_HDR;
union opcode_tid ot;
@@ -2611,6 +3578,352 @@ struct cpl_pkt_notify {
#define V_NTFY_T5_ETHHDR_LEN(x) ((x) << S_NTFY_T5_ETHHDR_LEN)
#define G_NTFY_T5_ETHHDR_LEN(x) (((x) >> S_NTFY_T5_ETHHDR_LEN) & M_NTFY_T5_ETHHDR_LEN)
+struct cpl_t7_pkt_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r1;
+ __be16 length;
+ __be32 ethhdrlen_to_macindex;
+ __be32 lineinfo;
+};
+
+#define S_CPL_T7_PKT_NOTIFY_ETHHDRLEN 24
+#define M_CPL_T7_PKT_NOTIFY_ETHHDRLEN 0xff
+#define V_CPL_T7_PKT_NOTIFY_ETHHDRLEN(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_ETHHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_ETHHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_ETHHDRLEN) & M_CPL_T7_PKT_NOTIFY_ETHHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_IPHDRLEN 18
+#define M_CPL_T7_PKT_NOTIFY_IPHDRLEN 0x3f
+#define V_CPL_T7_PKT_NOTIFY_IPHDRLEN(x) ((x) << S_CPL_T7_PKT_NOTIFY_IPHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_IPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_IPHDRLEN) & M_CPL_T7_PKT_NOTIFY_IPHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_TCPHDRLEN 14
+#define M_CPL_T7_PKT_NOTIFY_TCPHDRLEN 0xf
+#define V_CPL_T7_PKT_NOTIFY_TCPHDRLEN(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_TCPHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_TCPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_TCPHDRLEN) & M_CPL_T7_PKT_NOTIFY_TCPHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_INTERFACE 10
+#define M_CPL_T7_PKT_NOTIFY_INTERFACE 0xf
+#define V_CPL_T7_PKT_NOTIFY_INTERFACE(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_INTERFACE)
+#define G_CPL_T7_PKT_NOTIFY_INTERFACE(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_INTERFACE) & M_CPL_T7_PKT_NOTIFY_INTERFACE)
+
+#define S_CPL_T7_PKT_NOTIFY_MACINDEX 0
+#define M_CPL_T7_PKT_NOTIFY_MACINDEX 0x1ff
+#define V_CPL_T7_PKT_NOTIFY_MACINDEX(x) ((x) << S_CPL_T7_PKT_NOTIFY_MACINDEX)
+#define G_CPL_T7_PKT_NOTIFY_MACINDEX(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_MACINDEX) & M_CPL_T7_PKT_NOTIFY_MACINDEX)
+
+struct cpl_rdma_cqe {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_RSSCTRL 16
+#define M_CPL_RDMA_CQE_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_RSSCTRL)
+#define G_CPL_RDMA_CQE_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_RSSCTRL) & M_CPL_RDMA_CQE_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_CQID 0
+#define M_CPL_RDMA_CQE_CQID 0xffff
+#define V_CPL_RDMA_CQE_CQID(x) ((x) << S_CPL_RDMA_CQE_CQID)
+#define G_CPL_RDMA_CQE_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_CQID) & M_CPL_RDMA_CQE_CQID)
+
+#define S_CPL_RDMA_CQE_TID 8
+#define M_CPL_RDMA_CQE_TID 0xfffff
+#define V_CPL_RDMA_CQE_TID(x) ((x) << S_CPL_RDMA_CQE_TID)
+#define G_CPL_RDMA_CQE_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_TID) & M_CPL_RDMA_CQE_TID)
+
+#define S_CPL_RDMA_CQE_FLITCNT 0
+#define M_CPL_RDMA_CQE_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_FLITCNT)
+#define G_CPL_RDMA_CQE_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FLITCNT) & M_CPL_RDMA_CQE_FLITCNT)
+
+#define S_CPL_RDMA_CQE_QPID 12
+#define M_CPL_RDMA_CQE_QPID 0xfffff
+#define V_CPL_RDMA_CQE_QPID(x) ((x) << S_CPL_RDMA_CQE_QPID)
+#define G_CPL_RDMA_CQE_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_QPID) & M_CPL_RDMA_CQE_QPID)
+
+#define S_CPL_RDMA_CQE_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_GENERATION_BIT) & M_CPL_RDMA_CQE_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_GENERATION_BIT V_CPL_RDMA_CQE_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_STATUS 5
+#define M_CPL_RDMA_CQE_STATUS 0x1f
+#define V_CPL_RDMA_CQE_STATUS(x) ((x) << S_CPL_RDMA_CQE_STATUS)
+#define G_CPL_RDMA_CQE_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_STATUS) & M_CPL_RDMA_CQE_STATUS)
+
+#define S_CPL_RDMA_CQE_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_CQE_TYPE)
+#define G_CPL_RDMA_CQE_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_CQE_TYPE) & M_CPL_RDMA_CQE_CQE_TYPE)
+#define F_CPL_RDMA_CQE_CQE_TYPE V_CPL_RDMA_CQE_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_WR_TYPE 0
+#define M_CPL_RDMA_CQE_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_WR_TYPE)
+#define G_CPL_RDMA_CQE_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_WR_TYPE) & M_CPL_RDMA_CQE_WR_TYPE)
+
+struct cpl_rdma_cqe_srq {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 r3;
+ __be32 rqe;
+};
+
+#define S_CPL_RDMA_CQE_SRQ_OPCODE 24
+#define M_CPL_RDMA_CQE_SRQ_OPCODE 0xff
+#define V_CPL_RDMA_CQE_SRQ_OPCODE(x) ((x) << S_CPL_RDMA_CQE_SRQ_OPCODE)
+#define G_CPL_RDMA_CQE_SRQ_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_OPCODE) & M_CPL_RDMA_CQE_SRQ_OPCODE)
+
+#define S_CPL_RDMA_CQE_SRQ_RSSCTRL 16
+#define M_CPL_RDMA_CQE_SRQ_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_SRQ_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_SRQ_RSSCTRL)
+#define G_CPL_RDMA_CQE_SRQ_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_RSSCTRL) & M_CPL_RDMA_CQE_SRQ_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_SRQ_CQID 0
+#define M_CPL_RDMA_CQE_SRQ_CQID 0xffff
+#define V_CPL_RDMA_CQE_SRQ_CQID(x) ((x) << S_CPL_RDMA_CQE_SRQ_CQID)
+#define G_CPL_RDMA_CQE_SRQ_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_CQID) & M_CPL_RDMA_CQE_SRQ_CQID)
+
+#define S_CPL_RDMA_CQE_SRQ_TID 8
+#define M_CPL_RDMA_CQE_SRQ_TID 0xfffff
+#define V_CPL_RDMA_CQE_SRQ_TID(x) ((x) << S_CPL_RDMA_CQE_SRQ_TID)
+#define G_CPL_RDMA_CQE_SRQ_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_TID) & M_CPL_RDMA_CQE_SRQ_TID)
+
+#define S_CPL_RDMA_CQE_SRQ_FLITCNT 0
+#define M_CPL_RDMA_CQE_SRQ_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_SRQ_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_SRQ_FLITCNT)
+#define G_CPL_RDMA_CQE_SRQ_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_FLITCNT) & M_CPL_RDMA_CQE_SRQ_FLITCNT)
+
+#define S_CPL_RDMA_CQE_SRQ_QPID 12
+#define M_CPL_RDMA_CQE_SRQ_QPID 0xfffff
+#define V_CPL_RDMA_CQE_SRQ_QPID(x) ((x) << S_CPL_RDMA_CQE_SRQ_QPID)
+#define G_CPL_RDMA_CQE_SRQ_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_QPID) & M_CPL_RDMA_CQE_SRQ_QPID)
+
+#define S_CPL_RDMA_CQE_SRQ_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_SRQ_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_SRQ_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_SRQ_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_SRQ_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_SRQ_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_SRQ_GENERATION_BIT \
+ V_CPL_RDMA_CQE_SRQ_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_SRQ_STATUS 5
+#define M_CPL_RDMA_CQE_SRQ_STATUS 0x1f
+#define V_CPL_RDMA_CQE_SRQ_STATUS(x) ((x) << S_CPL_RDMA_CQE_SRQ_STATUS)
+#define G_CPL_RDMA_CQE_SRQ_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_STATUS) & M_CPL_RDMA_CQE_SRQ_STATUS)
+
+#define S_CPL_RDMA_CQE_SRQ_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_SRQ_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_SRQ_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_SRQ_CQE_TYPE)
+#define G_CPL_RDMA_CQE_SRQ_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_CQE_TYPE) & M_CPL_RDMA_CQE_SRQ_CQE_TYPE)
+#define F_CPL_RDMA_CQE_SRQ_CQE_TYPE V_CPL_RDMA_CQE_SRQ_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_SRQ_WR_TYPE 0
+#define M_CPL_RDMA_CQE_SRQ_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_SRQ_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_SRQ_WR_TYPE)
+#define G_CPL_RDMA_CQE_SRQ_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_WR_TYPE) & M_CPL_RDMA_CQE_SRQ_WR_TYPE)
+
+struct cpl_rdma_cqe_read_rsp {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_READ_RSP_RSSCTRL 16
+#define M_CPL_RDMA_CQE_READ_RSP_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_READ_RSP_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_RSSCTRL)
+#define G_CPL_RDMA_CQE_READ_RSP_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_RSSCTRL) & \
+ M_CPL_RDMA_CQE_READ_RSP_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_READ_RSP_CQID 0
+#define M_CPL_RDMA_CQE_READ_RSP_CQID 0xffff
+#define V_CPL_RDMA_CQE_READ_RSP_CQID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_CQID)
+#define G_CPL_RDMA_CQE_READ_RSP_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_CQID) & M_CPL_RDMA_CQE_READ_RSP_CQID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_TID 8
+#define M_CPL_RDMA_CQE_READ_RSP_TID 0xfffff
+#define V_CPL_RDMA_CQE_READ_RSP_TID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_TID)
+#define G_CPL_RDMA_CQE_READ_RSP_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_TID) & M_CPL_RDMA_CQE_READ_RSP_TID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_FLITCNT 0
+#define M_CPL_RDMA_CQE_READ_RSP_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_READ_RSP_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_FLITCNT)
+#define G_CPL_RDMA_CQE_READ_RSP_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_FLITCNT) & \
+ M_CPL_RDMA_CQE_READ_RSP_FLITCNT)
+
+#define S_CPL_RDMA_CQE_READ_RSP_QPID 12
+#define M_CPL_RDMA_CQE_READ_RSP_QPID 0xfffff
+#define V_CPL_RDMA_CQE_READ_RSP_QPID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_QPID)
+#define G_CPL_RDMA_CQE_READ_RSP_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_QPID) & M_CPL_RDMA_CQE_READ_RSP_QPID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT \
+ V_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_READ_RSP_STATUS 5
+#define M_CPL_RDMA_CQE_READ_RSP_STATUS 0x1f
+#define V_CPL_RDMA_CQE_READ_RSP_STATUS(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_STATUS)
+#define G_CPL_RDMA_CQE_READ_RSP_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_STATUS) & M_CPL_RDMA_CQE_READ_RSP_STATUS)
+
+#define S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_READ_RSP_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE)
+#define G_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE) & \
+ M_CPL_RDMA_CQE_READ_RSP_CQE_TYPE)
+#define F_CPL_RDMA_CQE_READ_RSP_CQE_TYPE V_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_READ_RSP_WR_TYPE 0
+#define M_CPL_RDMA_CQE_READ_RSP_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_READ_RSP_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_WR_TYPE)
+#define G_CPL_RDMA_CQE_READ_RSP_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_WR_TYPE) & \
+ M_CPL_RDMA_CQE_READ_RSP_WR_TYPE)
+
+struct cpl_rdma_cqe_err {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_ERR_RSSCTRL 16
+#define M_CPL_RDMA_CQE_ERR_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_ERR_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_ERR_RSSCTRL)
+#define G_CPL_RDMA_CQE_ERR_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_RSSCTRL) & M_CPL_RDMA_CQE_ERR_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_ERR_CQID 0
+#define M_CPL_RDMA_CQE_ERR_CQID 0xffff
+#define V_CPL_RDMA_CQE_ERR_CQID(x) ((x) << S_CPL_RDMA_CQE_ERR_CQID)
+#define G_CPL_RDMA_CQE_ERR_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_CQID) & M_CPL_RDMA_CQE_ERR_CQID)
+
+#define S_CPL_RDMA_CQE_ERR_TID 8
+#define M_CPL_RDMA_CQE_ERR_TID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_TID(x) ((x) << S_CPL_RDMA_CQE_ERR_TID)
+#define G_CPL_RDMA_CQE_ERR_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_TID) & M_CPL_RDMA_CQE_ERR_TID)
+
+#define S_CPL_RDMA_CQE_ERR_FLITCNT 0
+#define M_CPL_RDMA_CQE_ERR_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_ERR_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_ERR_FLITCNT)
+#define G_CPL_RDMA_CQE_ERR_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_FLITCNT) & M_CPL_RDMA_CQE_ERR_FLITCNT)
+
+#define S_CPL_RDMA_CQE_ERR_QPID 12
+#define M_CPL_RDMA_CQE_ERR_QPID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_QPID(x) ((x) << S_CPL_RDMA_CQE_ERR_QPID)
+#define G_CPL_RDMA_CQE_ERR_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_QPID) & M_CPL_RDMA_CQE_ERR_QPID)
+
+#define S_CPL_RDMA_CQE_ERR_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_ERR_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_ERR_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_ERR_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_ERR_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_ERR_GENERATION_BIT \
+ V_CPL_RDMA_CQE_ERR_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_ERR_STATUS 5
+#define M_CPL_RDMA_CQE_ERR_STATUS 0x1f
+#define V_CPL_RDMA_CQE_ERR_STATUS(x) ((x) << S_CPL_RDMA_CQE_ERR_STATUS)
+#define G_CPL_RDMA_CQE_ERR_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_STATUS) & M_CPL_RDMA_CQE_ERR_STATUS)
+
+#define S_CPL_RDMA_CQE_ERR_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_ERR_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_ERR_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_ERR_CQE_TYPE)
+#define G_CPL_RDMA_CQE_ERR_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_CQE_TYPE) & M_CPL_RDMA_CQE_ERR_CQE_TYPE)
+#define F_CPL_RDMA_CQE_ERR_CQE_TYPE V_CPL_RDMA_CQE_ERR_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_WR_TYPE 0
+#define M_CPL_RDMA_CQE_ERR_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_ERR_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_ERR_WR_TYPE)
+#define G_CPL_RDMA_CQE_ERR_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_WR_TYPE) & M_CPL_RDMA_CQE_ERR_WR_TYPE)
+
+struct cpl_rdma_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 srq_pkd;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_READ_REQ_SRQ 0
+#define M_CPL_RDMA_READ_REQ_SRQ 0xfff
+#define V_CPL_RDMA_READ_REQ_SRQ(x) ((x) << S_CPL_RDMA_READ_REQ_SRQ)
+#define G_CPL_RDMA_READ_REQ_SRQ(x) \
+ (((x) >> S_CPL_RDMA_READ_REQ_SRQ) & M_CPL_RDMA_READ_REQ_SRQ)
+
struct cpl_rdma_terminate {
RSS_HDR
union opcode_tid ot;
@@ -2618,6 +3931,404 @@ struct cpl_rdma_terminate {
__be16 len;
};
+struct cpl_rdma_atomic_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 opcode_srq;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_ATOMIC_REQ_OPCODE 12
+#define M_CPL_RDMA_ATOMIC_REQ_OPCODE 0xf
+#define V_CPL_RDMA_ATOMIC_REQ_OPCODE(x) ((x) << S_CPL_RDMA_ATOMIC_REQ_OPCODE)
+#define G_CPL_RDMA_ATOMIC_REQ_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_REQ_OPCODE) & M_CPL_RDMA_ATOMIC_REQ_OPCODE)
+
+#define S_CPL_RDMA_ATOMIC_REQ_SRQ 0
+#define M_CPL_RDMA_ATOMIC_REQ_SRQ 0xfff
+#define V_CPL_RDMA_ATOMIC_REQ_SRQ(x) ((x) << S_CPL_RDMA_ATOMIC_REQ_SRQ)
+#define G_CPL_RDMA_ATOMIC_REQ_SRQ(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_REQ_SRQ) & M_CPL_RDMA_ATOMIC_REQ_SRQ)
+
+struct cpl_rdma_atomic_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 opcode_srq;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_ATOMIC_RPL_OPCODE 12
+#define M_CPL_RDMA_ATOMIC_RPL_OPCODE 0xf
+#define V_CPL_RDMA_ATOMIC_RPL_OPCODE(x) ((x) << S_CPL_RDMA_ATOMIC_RPL_OPCODE)
+#define G_CPL_RDMA_ATOMIC_RPL_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_RPL_OPCODE) & M_CPL_RDMA_ATOMIC_RPL_OPCODE)
+
+#define S_CPL_RDMA_ATOMIC_RPL_SRQ 0
+#define M_CPL_RDMA_ATOMIC_RPL_SRQ 0xfff
+#define V_CPL_RDMA_ATOMIC_RPL_SRQ(x) ((x) << S_CPL_RDMA_ATOMIC_RPL_SRQ)
+#define G_CPL_RDMA_ATOMIC_RPL_SRQ(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_RPL_SRQ) & M_CPL_RDMA_ATOMIC_RPL_SRQ)
+
+struct cpl_rdma_imm_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r;
+ __be16 Length;
+};
+
+struct cpl_rdma_imm_data_se {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r;
+ __be16 Length;
+};
+
+struct cpl_rdma_inv_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 stag;
+ __be32 cqid_pdid_hi;
+ __be32 pdid_lo_qpid;
+};
+
+#define S_CPL_RDMA_INV_REQ_CQID 8
+#define M_CPL_RDMA_INV_REQ_CQID 0xfffff
+#define V_CPL_RDMA_INV_REQ_CQID(x) ((x) << S_CPL_RDMA_INV_REQ_CQID)
+#define G_CPL_RDMA_INV_REQ_CQID(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_CQID) & M_CPL_RDMA_INV_REQ_CQID)
+
+#define S_CPL_RDMA_INV_REQ_PDID_HI 0
+#define M_CPL_RDMA_INV_REQ_PDID_HI 0xff
+#define V_CPL_RDMA_INV_REQ_PDID_HI(x) ((x) << S_CPL_RDMA_INV_REQ_PDID_HI)
+#define G_CPL_RDMA_INV_REQ_PDID_HI(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_PDID_HI) & M_CPL_RDMA_INV_REQ_PDID_HI)
+
+#define S_CPL_RDMA_INV_REQ_PDID_LO 20
+#define M_CPL_RDMA_INV_REQ_PDID_LO 0xfff
+#define V_CPL_RDMA_INV_REQ_PDID_LO(x) ((x) << S_CPL_RDMA_INV_REQ_PDID_LO)
+#define G_CPL_RDMA_INV_REQ_PDID_LO(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_PDID_LO) & M_CPL_RDMA_INV_REQ_PDID_LO)
+
+#define S_CPL_RDMA_INV_REQ_QPID 0
+#define M_CPL_RDMA_INV_REQ_QPID 0xfffff
+#define V_CPL_RDMA_INV_REQ_QPID(x) ((x) << S_CPL_RDMA_INV_REQ_QPID)
+#define G_CPL_RDMA_INV_REQ_QPID(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_QPID) & M_CPL_RDMA_INV_REQ_QPID)
+
+struct cpl_rdma_cqe_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_EXT_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_RSSCTRL) & M_CPL_RDMA_CQE_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_EXT_CQID 0
+#define M_CPL_RDMA_CQE_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_EXT_CQID)
+#define G_CPL_RDMA_CQE_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_CQID) & M_CPL_RDMA_CQE_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_EXT_TID 8
+#define M_CPL_RDMA_CQE_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_EXT_TID)
+#define G_CPL_RDMA_CQE_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_TID) & M_CPL_RDMA_CQE_EXT_TID)
+
+#define S_CPL_RDMA_CQE_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_EXT_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_FLITCNT) & M_CPL_RDMA_CQE_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_EXT_QPID 12
+#define M_CPL_RDMA_CQE_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_EXT_QPID)
+#define G_CPL_RDMA_CQE_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_QPID) & M_CPL_RDMA_CQE_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_EXT_EXTMODE(x) ((x) << S_CPL_RDMA_CQE_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_EXTMODE) & M_CPL_RDMA_CQE_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_EXT_EXTMODE V_CPL_RDMA_CQE_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_EXT_STATUS(x) ((x) << S_CPL_RDMA_CQE_EXT_STATUS)
+#define G_CPL_RDMA_CQE_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_STATUS) & M_CPL_RDMA_CQE_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_EXT_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_CQE_TYPE) & M_CPL_RDMA_CQE_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_EXT_CQE_TYPE V_CPL_RDMA_CQE_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_EXT_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE) & M_CPL_RDMA_CQE_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_EXT_SE 31
+#define M_CPL_RDMA_CQE_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_EXT_SE)
+#define G_CPL_RDMA_CQE_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_SE) & M_CPL_RDMA_CQE_EXT_SE)
+#define F_CPL_RDMA_CQE_EXT_SE V_CPL_RDMA_CQE_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT) & M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_EXT_SRQ)
+#define G_CPL_RDMA_CQE_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_SRQ) & M_CPL_RDMA_CQE_EXT_SRQ)
+
+struct cpl_rdma_cqe_fw_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_FW_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_FW_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_FW_EXT_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_FW_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_RSSCTRL) & M_CPL_RDMA_CQE_FW_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_FW_EXT_CQID 0
+#define M_CPL_RDMA_CQE_FW_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_FW_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_CQID)
+#define G_CPL_RDMA_CQE_FW_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_CQID) & M_CPL_RDMA_CQE_FW_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_TID 8
+#define M_CPL_RDMA_CQE_FW_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_FW_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_TID)
+#define G_CPL_RDMA_CQE_FW_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_TID) & M_CPL_RDMA_CQE_FW_EXT_TID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_FW_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_FW_EXT_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_FW_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_FLITCNT) & M_CPL_RDMA_CQE_FW_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_FW_EXT_QPID 12
+#define M_CPL_RDMA_CQE_FW_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_FW_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_QPID)
+#define G_CPL_RDMA_CQE_FW_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_QPID) & M_CPL_RDMA_CQE_FW_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_FW_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_EXTMODE) & M_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_FW_EXT_EXTMODE V_CPL_RDMA_CQE_FW_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_FW_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_FW_EXT_STATUS(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_STATUS)
+#define G_CPL_RDMA_CQE_FW_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_STATUS) & M_CPL_RDMA_CQE_FW_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_FW_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE) & M_CPL_RDMA_CQE_FW_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_FW_EXT_CQE_TYPE V_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_FW_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_FW_EXT_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_FW_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE) & M_CPL_RDMA_CQE_FW_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_FW_EXT_SE 31
+#define M_CPL_RDMA_CQE_FW_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SE)
+#define G_CPL_RDMA_CQE_FW_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_SE) & M_CPL_RDMA_CQE_FW_EXT_SE)
+#define F_CPL_RDMA_CQE_FW_EXT_SE V_CPL_RDMA_CQE_FW_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT) & \
+ M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_FW_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_FW_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_FW_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SRQ)
+#define G_CPL_RDMA_CQE_FW_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_SRQ) & M_CPL_RDMA_CQE_FW_EXT_SRQ)
+
+struct cpl_rdma_cqe_err_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_ERR_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_ERR_EXT_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_ERR_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL) & M_CPL_RDMA_CQE_ERR_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_CQID 0
+#define M_CPL_RDMA_CQE_ERR_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_ERR_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_CQID)
+#define G_CPL_RDMA_CQE_ERR_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_CQID) & M_CPL_RDMA_CQE_ERR_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_TID 8
+#define M_CPL_RDMA_CQE_ERR_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_TID)
+#define G_CPL_RDMA_CQE_ERR_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_TID) & M_CPL_RDMA_CQE_ERR_EXT_TID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_ERR_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_ERR_EXT_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_ERR_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_FLITCNT) & M_CPL_RDMA_CQE_ERR_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_QPID 12
+#define M_CPL_RDMA_CQE_ERR_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_QPID)
+#define G_CPL_RDMA_CQE_ERR_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_QPID) & M_CPL_RDMA_CQE_ERR_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_ERR_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_EXTMODE) & M_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_ERR_EXT_EXTMODE V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_ERR_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_ERR_EXT_STATUS(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_STATUS)
+#define G_CPL_RDMA_CQE_ERR_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_STATUS) & M_CPL_RDMA_CQE_ERR_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE) & \
+ M_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE V_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_ERR_EXT_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_ERR_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE) & M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_SE 31
+#define M_CPL_RDMA_CQE_ERR_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SE)
+#define G_CPL_RDMA_CQE_ERR_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_SE) & M_CPL_RDMA_CQE_ERR_EXT_SE)
+#define F_CPL_RDMA_CQE_ERR_EXT_SE V_CPL_RDMA_CQE_ERR_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT) & \
+ M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_ERR_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_ERR_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SRQ)
+#define G_CPL_RDMA_CQE_ERR_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_SRQ) & M_CPL_RDMA_CQE_ERR_EXT_SRQ)
+
struct cpl_set_le_req {
WR_HDR;
union opcode_tid ot;
@@ -2630,6 +4341,13 @@ struct cpl_set_le_req {
};
/* cpl_set_le_req.reply_ctrl additional fields */
+#define S_LE_REQ_RXCHANNEL 14
+#define M_LE_REQ_RXCHANNEL 0x1
+#define V_LE_REQ_RXCHANNEL(x) ((x) << S_LE_REQ_RXCHANNEL)
+#define G_LE_REQ_RXCHANNEL(x) \
+ (((x) >> S_LE_REQ_RXCHANNEL) & M_LE_REQ_RXCHANNEL)
+#define F_LE_REQ_RXCHANNEL V_LE_REQ_RXCHANNEL(1U)
+
#define S_LE_REQ_IP6 13
#define V_LE_REQ_IP6(x) ((x) << S_LE_REQ_IP6)
#define F_LE_REQ_IP6 V_LE_REQ_IP6(1U)
@@ -2659,6 +4377,80 @@ struct cpl_set_le_req {
#define V_LE_REQCMD(x) ((x) << S_LE_REQCMD)
#define G_LE_REQCMD(x) (((x) >> S_LE_REQCMD) & M_LE_REQCMD)
+struct cpl_t7_set_le_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_channel;
+ __be32 mask1[2];
+ __be32 mask0[2];
+ __be32 value1[2];
+ __be32 value0[2];
+};
+
+#define S_CPL_T7_SET_LE_REQ_INDEX 0
+#define M_CPL_T7_SET_LE_REQ_INDEX 0xffffff
+#define V_CPL_T7_SET_LE_REQ_INDEX(x) ((x) << S_CPL_T7_SET_LE_REQ_INDEX)
+#define G_CPL_T7_SET_LE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_INDEX) & M_CPL_T7_SET_LE_REQ_INDEX)
+
+#define S_CPL_T7_SET_LE_REQ_NOREPLY 31
+#define M_CPL_T7_SET_LE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SET_LE_REQ_NOREPLY(x) ((x) << S_CPL_T7_SET_LE_REQ_NOREPLY)
+#define G_CPL_T7_SET_LE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_NOREPLY) & M_CPL_T7_SET_LE_REQ_NOREPLY)
+#define F_CPL_T7_SET_LE_REQ_NOREPLY V_CPL_T7_SET_LE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SET_LE_REQ_RXCHANNEL 28
+#define M_CPL_T7_SET_LE_REQ_RXCHANNEL 0x7
+#define V_CPL_T7_SET_LE_REQ_RXCHANNEL(x) \
+ ((x) << S_CPL_T7_SET_LE_REQ_RXCHANNEL)
+#define G_CPL_T7_SET_LE_REQ_RXCHANNEL(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_RXCHANNEL) & M_CPL_T7_SET_LE_REQ_RXCHANNEL)
+
+#define S_CPL_T7_SET_LE_REQ_QUEUE 16
+#define M_CPL_T7_SET_LE_REQ_QUEUE 0xfff
+#define V_CPL_T7_SET_LE_REQ_QUEUE(x) ((x) << S_CPL_T7_SET_LE_REQ_QUEUE)
+#define G_CPL_T7_SET_LE_REQ_QUEUE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_QUEUE) & M_CPL_T7_SET_LE_REQ_QUEUE)
+
+#define S_CPL_T7_SET_LE_REQ_REQCMD 12
+#define M_CPL_T7_SET_LE_REQ_REQCMD 0xf
+#define V_CPL_T7_SET_LE_REQ_REQCMD(x) ((x) << S_CPL_T7_SET_LE_REQ_REQCMD)
+#define G_CPL_T7_SET_LE_REQ_REQCMD(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQCMD) & M_CPL_T7_SET_LE_REQ_REQCMD)
+
+#define S_CPL_T7_SET_LE_REQ_REQSIZE 9
+#define M_CPL_T7_SET_LE_REQ_REQSIZE 0x7
+#define V_CPL_T7_SET_LE_REQ_REQSIZE(x) ((x) << S_CPL_T7_SET_LE_REQ_REQSIZE)
+#define G_CPL_T7_SET_LE_REQ_REQSIZE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQSIZE) & M_CPL_T7_SET_LE_REQ_REQSIZE)
+
+#define S_CPL_T7_SET_LE_REQ_MORE 8
+#define M_CPL_T7_SET_LE_REQ_MORE 0x1
+#define V_CPL_T7_SET_LE_REQ_MORE(x) ((x) << S_CPL_T7_SET_LE_REQ_MORE)
+#define G_CPL_T7_SET_LE_REQ_MORE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_MORE) & M_CPL_T7_SET_LE_REQ_MORE)
+#define F_CPL_T7_SET_LE_REQ_MORE V_CPL_T7_SET_LE_REQ_MORE(1U)
+
+#define S_CPL_T7_SET_LE_REQ_OFFSET 5
+#define M_CPL_T7_SET_LE_REQ_OFFSET 0x7
+#define V_CPL_T7_SET_LE_REQ_OFFSET(x) ((x) << S_CPL_T7_SET_LE_REQ_OFFSET)
+#define G_CPL_T7_SET_LE_REQ_OFFSET(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_OFFSET) & M_CPL_T7_SET_LE_REQ_OFFSET)
+
+#define S_CPL_T7_SET_LE_REQ_REQTYPE 4
+#define M_CPL_T7_SET_LE_REQ_REQTYPE 0x1
+#define V_CPL_T7_SET_LE_REQ_REQTYPE(x) ((x) << S_CPL_T7_SET_LE_REQ_REQTYPE)
+#define G_CPL_T7_SET_LE_REQ_REQTYPE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQTYPE) & M_CPL_T7_SET_LE_REQ_REQTYPE)
+#define F_CPL_T7_SET_LE_REQ_REQTYPE V_CPL_T7_SET_LE_REQ_REQTYPE(1U)
+
+#define S_CPL_T7_SET_LE_REQ_CHANNEL 0
+#define M_CPL_T7_SET_LE_REQ_CHANNEL 0x3
+#define V_CPL_T7_SET_LE_REQ_CHANNEL(x) ((x) << S_CPL_T7_SET_LE_REQ_CHANNEL)
+#define G_CPL_T7_SET_LE_REQ_CHANNEL(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_CHANNEL) & M_CPL_T7_SET_LE_REQ_CHANNEL)
+
struct cpl_set_le_rpl {
RSS_HDR
union opcode_tid ot;
@@ -2710,6 +4502,7 @@ enum {
FW_TYPE_WRERR_RPL = 5,
FW_TYPE_PI_ERR = 6,
FW_TYPE_TLS_KEY = 7,
+ FW_TYPE_IPSEC_SA = 8,
};
struct cpl_fw2_pld {
@@ -2811,6 +4604,8 @@ enum {
FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL,
FW6_TYPE_WRERR_RPL = FW_TYPE_WRERR_RPL,
FW6_TYPE_PI_ERR = FW_TYPE_PI_ERR,
+ FW6_TYPE_TLS_KEY = FW_TYPE_TLS_KEY,
+ FW6_TYPE_IPSEC_SA = FW_TYPE_IPSEC_SA,
NUM_FW6_TYPES
};
@@ -2932,6 +4727,10 @@ struct ulp_mem_io {
#define M_ULP_MEMIO_DATA_LEN 0x1F
#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+#define S_T7_ULP_MEMIO_DATA_LEN 0
+#define M_T7_ULP_MEMIO_DATA_LEN 0x7FF
+#define V_T7_ULP_MEMIO_DATA_LEN(x) ((x) << S_T7_ULP_MEMIO_DATA_LEN)
+
/* ULP_TXPKT field values */
enum {
ULP_TXPKT_DEST_TP = 0,
@@ -2960,11 +4759,25 @@ struct ulp_txpkt {
(((x) >> S_ULP_TXPKT_CHANNELID) & M_ULP_TXPKT_CHANNELID)
#define F_ULP_TXPKT_CHANNELID V_ULP_TXPKT_CHANNELID(1U)
+#define S_T7_ULP_TXPKT_CHANNELID 22
+#define M_T7_ULP_TXPKT_CHANNELID 0x3
+#define V_T7_ULP_TXPKT_CHANNELID(x) ((x) << S_T7_ULP_TXPKT_CHANNELID)
+#define G_T7_ULP_TXPKT_CHANNELID(x) \
+ (((x) >> S_T7_ULP_TXPKT_CHANNELID) & M_T7_ULP_TXPKT_CHANNELID)
+#define F_T7_ULP_TXPKT_CHANNELID V_T7_ULP_TXPKT_CHANNELID(1U)
+
/* ulp_txpkt.cmd_dest fields */
#define S_ULP_TXPKT_DEST 16
#define M_ULP_TXPKT_DEST 0x3
#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+#define S_ULP_TXPKT_CMDMORE 15
+#define M_ULP_TXPKT_CMDMORE 0x1
+#define V_ULP_TXPKT_CMDMORE(x) ((x) << S_ULP_TXPKT_CMDMORE)
+#define G_ULP_TXPKT_CMDMORE(x) \
+ (((x) >> S_ULP_TXPKT_CMDMORE) & M_ULP_TXPKT_CMDMORE)
+#define F_ULP_TXPKT_CMDMORE V_ULP_TXPKT_CMDMORE(1U)
+
#define S_ULP_TXPKT_FID 4
#define M_ULP_TXPKT_FID 0x7ff
#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID)
@@ -2978,13 +4791,15 @@ enum cpl_tx_tnl_lso_type {
TX_TNL_TYPE_NVGRE,
TX_TNL_TYPE_VXLAN,
TX_TNL_TYPE_GENEVE,
+ TX_TNL_TYPE_IPSEC,
};
struct cpl_tx_tnl_lso {
__be32 op_to_IpIdSplitOut;
__be16 IpIdOffsetOut;
__be16 UdpLenSetOut_to_TnlHdrLen;
- __be64 r1;
+ __be32 ipsecen_to_rocev2;
+ __be32 roce_eth;
__be32 Flow_to_TcpHdrLen;
__be16 IpIdOffset;
__be16 IpIdSplit_to_Mss;
@@ -3098,6 +4913,68 @@ struct cpl_tx_tnl_lso {
#define G_CPL_TX_TNL_LSO_TNLHDRLEN(x) \
(((x) >> S_CPL_TX_TNL_LSO_TNLHDRLEN) & M_CPL_TX_TNL_LSO_TNLHDRLEN)
+#define S_CPL_TX_TNL_LSO_IPSECEN 31
+#define M_CPL_TX_TNL_LSO_IPSECEN 0x1
+#define V_CPL_TX_TNL_LSO_IPSECEN(x) ((x) << S_CPL_TX_TNL_LSO_IPSECEN)
+#define G_CPL_TX_TNL_LSO_IPSECEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECEN) & M_CPL_TX_TNL_LSO_IPSECEN)
+#define F_CPL_TX_TNL_LSO_IPSECEN V_CPL_TX_TNL_LSO_IPSECEN(1U)
+
+#define S_CPL_TX_TNL_LSO_ENCAPDIS 30
+#define M_CPL_TX_TNL_LSO_ENCAPDIS 0x1
+#define V_CPL_TX_TNL_LSO_ENCAPDIS(x) ((x) << S_CPL_TX_TNL_LSO_ENCAPDIS)
+#define G_CPL_TX_TNL_LSO_ENCAPDIS(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ENCAPDIS) & M_CPL_TX_TNL_LSO_ENCAPDIS)
+#define F_CPL_TX_TNL_LSO_ENCAPDIS V_CPL_TX_TNL_LSO_ENCAPDIS(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECMODE 29
+#define M_CPL_TX_TNL_LSO_IPSECMODE 0x1
+#define V_CPL_TX_TNL_LSO_IPSECMODE(x) ((x) << S_CPL_TX_TNL_LSO_IPSECMODE)
+#define G_CPL_TX_TNL_LSO_IPSECMODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECMODE) & M_CPL_TX_TNL_LSO_IPSECMODE)
+#define F_CPL_TX_TNL_LSO_IPSECMODE V_CPL_TX_TNL_LSO_IPSECMODE(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPV6 28
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPV6 0x1
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPV6(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPV6)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPV6(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPV6) & M_CPL_TX_TNL_LSO_IPSECTNLIPV6)
+#define F_CPL_TX_TNL_LSO_IPSECTNLIPV6 V_CPL_TX_TNL_LSO_IPSECTNLIPV6(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN 20
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN 0xff
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN) & \
+ M_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT 19
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT 0x1
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT) & \
+ M_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT)
+#define F_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT \
+ V_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(1U)
+
+#define S_CPL_TX_TNL_LSO_ROCEV2 18
+#define M_CPL_TX_TNL_LSO_ROCEV2 0x1
+#define V_CPL_TX_TNL_LSO_ROCEV2(x) ((x) << S_CPL_TX_TNL_LSO_ROCEV2)
+#define G_CPL_TX_TNL_LSO_ROCEV2(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ROCEV2) & M_CPL_TX_TNL_LSO_ROCEV2)
+#define F_CPL_TX_TNL_LSO_ROCEV2 V_CPL_TX_TNL_LSO_ROCEV2(1U)
+
+#define S_CPL_TX_TNL_LSO_UDPCHKUPDOUT 17
+#define M_CPL_TX_TNL_LSO_UDPCHKUPDOUT 0x1
+#define V_CPL_TX_TNL_LSO_UDPCHKUPDOUT(x) \
+ ((x) << S_CPL_TX_TNL_LSO_UDPCHKUPDOUT)
+#define G_CPL_TX_TNL_LSO_UDPCHKUPDOUT(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_UDPCHKUPDOUT) & M_CPL_TX_TNL_LSO_UDPCHKUPDOUT)
+#define F_CPL_TX_TNL_LSO_UDPCHKUPDOUT V_CPL_TX_TNL_LSO_UDPCHKUPDOUT(1U)
+
#define S_CPL_TX_TNL_LSO_FLOW 21
#define M_CPL_TX_TNL_LSO_FLOW 0x1
#define V_CPL_TX_TNL_LSO_FLOW(x) ((x) << S_CPL_TX_TNL_LSO_FLOW)
@@ -3180,6 +5057,12 @@ struct cpl_rx_mps_pkt {
#define G_CPL_RX_MPS_PKT_TYPE(x) \
(((x) >> S_CPL_RX_MPS_PKT_TYPE) & M_CPL_RX_MPS_PKT_TYPE)
+#define S_CPL_RX_MPS_PKT_LENGTH 0
+#define M_CPL_RX_MPS_PKT_LENGTH 0xffff
+#define V_CPL_RX_MPS_PKT_LENGTH(x) ((x) << S_CPL_RX_MPS_PKT_LENGTH)
+#define G_CPL_RX_MPS_PKT_LENGTH(x) \
+ (((x) >> S_CPL_RX_MPS_PKT_LENGTH) & M_CPL_RX_MPS_PKT_LENGTH)
+
/*
* Values for CPL_RX_MPS_PKT_TYPE, a bit-wise orthogonal field.
*/
@@ -3188,6 +5071,88 @@ struct cpl_rx_mps_pkt {
#define X_CPL_RX_MPS_PKT_TYPE_QFC (1 << 2)
#define X_CPL_RX_MPS_PKT_TYPE_PTP (1 << 3)
+struct cpl_t7_rx_mps_pkt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 length_pkd;
+};
+
+#define S_CPL_T7_RX_MPS_PKT_TYPE 20
+#define M_CPL_T7_RX_MPS_PKT_TYPE 0xf
+#define V_CPL_T7_RX_MPS_PKT_TYPE(x) ((x) << S_CPL_T7_RX_MPS_PKT_TYPE)
+#define G_CPL_T7_RX_MPS_PKT_TYPE(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_TYPE) & M_CPL_T7_RX_MPS_PKT_TYPE)
+
+#define S_CPL_T7_RX_MPS_PKT_INTERFACE 16
+#define M_CPL_T7_RX_MPS_PKT_INTERFACE 0xf
+#define V_CPL_T7_RX_MPS_PKT_INTERFACE(x) \
+ ((x) << S_CPL_T7_RX_MPS_PKT_INTERFACE)
+#define G_CPL_T7_RX_MPS_PKT_INTERFACE(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_INTERFACE) & M_CPL_T7_RX_MPS_PKT_INTERFACE)
+
+#define S_CPL_T7_RX_MPS_PKT_TRUNCATED 7
+#define M_CPL_T7_RX_MPS_PKT_TRUNCATED 0x1
+#define V_CPL_T7_RX_MPS_PKT_TRUNCATED(x) \
+ ((x) << S_CPL_T7_RX_MPS_PKT_TRUNCATED)
+#define G_CPL_T7_RX_MPS_PKT_TRUNCATED(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_TRUNCATED) & M_CPL_T7_RX_MPS_PKT_TRUNCATED)
+#define F_CPL_T7_RX_MPS_PKT_TRUNCATED V_CPL_T7_RX_MPS_PKT_TRUNCATED(1U)
+
+#define S_CPL_T7_RX_MPS_PKT_PKTERR 6
+#define M_CPL_T7_RX_MPS_PKT_PKTERR 0x1
+#define V_CPL_T7_RX_MPS_PKT_PKTERR(x) ((x) << S_CPL_T7_RX_MPS_PKT_PKTERR)
+#define G_CPL_T7_RX_MPS_PKT_PKTERR(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_PKTERR) & M_CPL_T7_RX_MPS_PKT_PKTERR)
+#define F_CPL_T7_RX_MPS_PKT_PKTERR V_CPL_T7_RX_MPS_PKT_PKTERR(1U)
+
+#define S_CPL_T7_RX_MPS_PKT_LENGTH 0
+#define M_CPL_T7_RX_MPS_PKT_LENGTH 0xffff
+#define V_CPL_T7_RX_MPS_PKT_LENGTH(x) ((x) << S_CPL_T7_RX_MPS_PKT_LENGTH)
+#define G_CPL_T7_RX_MPS_PKT_LENGTH(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_LENGTH) & M_CPL_T7_RX_MPS_PKT_LENGTH)
+
+struct cpl_tx_tls_pdu {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 pldlen_pkd;
+ __be32 customtype_customprotover;
+ __be32 r2_lo;
+ __be32 scmd0[2];
+ __be32 scmd1[2];
+};
+
+#define S_CPL_TX_TLS_PDU_DATATYPE 20
+#define M_CPL_TX_TLS_PDU_DATATYPE 0xf
+#define V_CPL_TX_TLS_PDU_DATATYPE(x) ((x) << S_CPL_TX_TLS_PDU_DATATYPE)
+#define G_CPL_TX_TLS_PDU_DATATYPE(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_DATATYPE) & M_CPL_TX_TLS_PDU_DATATYPE)
+
+#define S_CPL_TX_TLS_PDU_CPLLEN 16
+#define M_CPL_TX_TLS_PDU_CPLLEN 0xf
+#define V_CPL_TX_TLS_PDU_CPLLEN(x) ((x) << S_CPL_TX_TLS_PDU_CPLLEN)
+#define G_CPL_TX_TLS_PDU_CPLLEN(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CPLLEN) & M_CPL_TX_TLS_PDU_CPLLEN)
+
+#define S_CPL_TX_TLS_PDU_PLDLEN 0
+#define M_CPL_TX_TLS_PDU_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_PDU_PLDLEN(x) ((x) << S_CPL_TX_TLS_PDU_PLDLEN)
+#define G_CPL_TX_TLS_PDU_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_PLDLEN) & M_CPL_TX_TLS_PDU_PLDLEN)
+
+#define S_CPL_TX_TLS_PDU_CUSTOMTYPE 24
+#define M_CPL_TX_TLS_PDU_CUSTOMTYPE 0xff
+#define V_CPL_TX_TLS_PDU_CUSTOMTYPE(x) ((x) << S_CPL_TX_TLS_PDU_CUSTOMTYPE)
+#define G_CPL_TX_TLS_PDU_CUSTOMTYPE(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CUSTOMTYPE) & M_CPL_TX_TLS_PDU_CUSTOMTYPE)
+
+#define S_CPL_TX_TLS_PDU_CUSTOMPROTOVER 8
+#define M_CPL_TX_TLS_PDU_CUSTOMPROTOVER 0xffff
+#define V_CPL_TX_TLS_PDU_CUSTOMPROTOVER(x) \
+ ((x) << S_CPL_TX_TLS_PDU_CUSTOMPROTOVER)
+#define G_CPL_TX_TLS_PDU_CUSTOMPROTOVER(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CUSTOMPROTOVER) & \
+ M_CPL_TX_TLS_PDU_CUSTOMPROTOVER)
+
struct cpl_tx_tls_sfo {
__be32 op_to_seg_len;
__be32 pld_len;
@@ -3223,6 +5188,12 @@ struct cpl_tx_tls_sfo {
#define G_CPL_TX_TLS_SFO_SEG_LEN(x) \
(((x) >> S_CPL_TX_TLS_SFO_SEG_LEN) & M_CPL_TX_TLS_SFO_SEG_LEN)
+#define S_CPL_TX_TLS_SFO_PLDLEN 0
+#define M_CPL_TX_TLS_SFO_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_SFO_PLDLEN(x) ((x) << S_CPL_TX_TLS_SFO_PLDLEN)
+#define G_CPL_TX_TLS_SFO_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_SFO_PLDLEN) & M_CPL_TX_TLS_SFO_PLDLEN)
+
#define S_CPL_TX_TLS_SFO_TYPE 24
#define M_CPL_TX_TLS_SFO_TYPE 0xff
#define V_CPL_TX_TLS_SFO_TYPE(x) ((x) << S_CPL_TX_TLS_SFO_TYPE)
@@ -3454,6 +5425,119 @@ struct cpl_rx_tls_cmp {
#define G_SCMD_HDR_LEN(x) \
(((x) >> S_SCMD_HDR_LEN) & M_SCMD_HDR_LEN)
+struct cpl_rx_pkt_ipsec {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 vlan;
+ __be16 length;
+ __be32 rxchannel_to_ethhdrlen;
+ __be32 iphdrlen_to_rxerror;
+ __be64 timestamp;
+};
+
+#define S_CPL_RX_PKT_IPSEC_OPCODE 24
+#define M_CPL_RX_PKT_IPSEC_OPCODE 0xff
+#define V_CPL_RX_PKT_IPSEC_OPCODE(x) ((x) << S_CPL_RX_PKT_IPSEC_OPCODE)
+#define G_CPL_RX_PKT_IPSEC_OPCODE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_OPCODE) & M_CPL_RX_PKT_IPSEC_OPCODE)
+
+#define S_CPL_RX_PKT_IPSEC_IPFRAG 23
+#define M_CPL_RX_PKT_IPSEC_IPFRAG 0x1
+#define V_CPL_RX_PKT_IPSEC_IPFRAG(x) ((x) << S_CPL_RX_PKT_IPSEC_IPFRAG)
+#define G_CPL_RX_PKT_IPSEC_IPFRAG(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPFRAG) & M_CPL_RX_PKT_IPSEC_IPFRAG)
+#define F_CPL_RX_PKT_IPSEC_IPFRAG V_CPL_RX_PKT_IPSEC_IPFRAG(1U)
+
+#define S_CPL_RX_PKT_IPSEC_VLAN_EX 22
+#define M_CPL_RX_PKT_IPSEC_VLAN_EX 0x1
+#define V_CPL_RX_PKT_IPSEC_VLAN_EX(x) ((x) << S_CPL_RX_PKT_IPSEC_VLAN_EX)
+#define G_CPL_RX_PKT_IPSEC_VLAN_EX(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_VLAN_EX) & M_CPL_RX_PKT_IPSEC_VLAN_EX)
+#define F_CPL_RX_PKT_IPSEC_VLAN_EX V_CPL_RX_PKT_IPSEC_VLAN_EX(1U)
+
+#define S_CPL_RX_PKT_IPSEC_IPMI 21
+#define M_CPL_RX_PKT_IPSEC_IPMI 0x1
+#define V_CPL_RX_PKT_IPSEC_IPMI(x) ((x) << S_CPL_RX_PKT_IPSEC_IPMI)
+#define G_CPL_RX_PKT_IPSEC_IPMI(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPMI) & M_CPL_RX_PKT_IPSEC_IPMI)
+#define F_CPL_RX_PKT_IPSEC_IPMI V_CPL_RX_PKT_IPSEC_IPMI(1U)
+
+#define S_CPL_RX_PKT_IPSEC_INTERFACE 16
+#define M_CPL_RX_PKT_IPSEC_INTERFACE 0xf
+#define V_CPL_RX_PKT_IPSEC_INTERFACE(x) ((x) << S_CPL_RX_PKT_IPSEC_INTERFACE)
+#define G_CPL_RX_PKT_IPSEC_INTERFACE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_INTERFACE) & M_CPL_RX_PKT_IPSEC_INTERFACE)
+
+#define S_CPL_RX_PKT_IPSEC_IPSECEXTERR 12
+#define M_CPL_RX_PKT_IPSEC_IPSECEXTERR 0xf
+#define V_CPL_RX_PKT_IPSEC_IPSECEXTERR(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_IPSECEXTERR)
+#define G_CPL_RX_PKT_IPSEC_IPSECEXTERR(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPSECEXTERR) & M_CPL_RX_PKT_IPSEC_IPSECEXTERR)
+
+#define S_CPL_RX_PKT_IPSEC_IPSECTYPE 10
+#define M_CPL_RX_PKT_IPSEC_IPSECTYPE 0x3
+#define V_CPL_RX_PKT_IPSEC_IPSECTYPE(x) ((x) << S_CPL_RX_PKT_IPSEC_IPSECTYPE)
+#define G_CPL_RX_PKT_IPSEC_IPSECTYPE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPSECTYPE) & M_CPL_RX_PKT_IPSEC_IPSECTYPE)
+
+#define S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN 0
+#define M_CPL_RX_PKT_IPSEC_OUTIPHDRLEN 0x3ff
+#define V_CPL_RX_PKT_IPSEC_OUTIPHDRLEN(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_OUTIPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN) & M_CPL_RX_PKT_IPSEC_OUTIPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_RXCHANNEL 28
+#define M_CPL_RX_PKT_IPSEC_RXCHANNEL 0xf
+#define V_CPL_RX_PKT_IPSEC_RXCHANNEL(x) ((x) << S_CPL_RX_PKT_IPSEC_RXCHANNEL)
+#define G_CPL_RX_PKT_IPSEC_RXCHANNEL(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_RXCHANNEL) & M_CPL_RX_PKT_IPSEC_RXCHANNEL)
+
+#define S_CPL_RX_PKT_IPSEC_FLAGS 20
+#define M_CPL_RX_PKT_IPSEC_FLAGS 0xff
+#define V_CPL_RX_PKT_IPSEC_FLAGS(x) ((x) << S_CPL_RX_PKT_IPSEC_FLAGS)
+#define G_CPL_RX_PKT_IPSEC_FLAGS(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_FLAGS) & M_CPL_RX_PKT_IPSEC_FLAGS)
+
+#define S_CPL_RX_PKT_IPSEC_MACMATCHTYPE 17
+#define M_CPL_RX_PKT_IPSEC_MACMATCHTYPE 0x7
+#define V_CPL_RX_PKT_IPSEC_MACMATCHTYPE(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_MACMATCHTYPE)
+#define G_CPL_RX_PKT_IPSEC_MACMATCHTYPE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_MACMATCHTYPE) & \
+ M_CPL_RX_PKT_IPSEC_MACMATCHTYPE)
+
+#define S_CPL_RX_PKT_IPSEC_MACINDEX 8
+#define M_CPL_RX_PKT_IPSEC_MACINDEX 0x1ff
+#define V_CPL_RX_PKT_IPSEC_MACINDEX(x) ((x) << S_CPL_RX_PKT_IPSEC_MACINDEX)
+#define G_CPL_RX_PKT_IPSEC_MACINDEX(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_MACINDEX) & M_CPL_RX_PKT_IPSEC_MACINDEX)
+
+#define S_CPL_RX_PKT_IPSEC_ETHHDRLEN 0
+#define M_CPL_RX_PKT_IPSEC_ETHHDRLEN 0xff
+#define V_CPL_RX_PKT_IPSEC_ETHHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_ETHHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_ETHHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_ETHHDRLEN) & M_CPL_RX_PKT_IPSEC_ETHHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_IPHDRLEN 22
+#define M_CPL_RX_PKT_IPSEC_IPHDRLEN 0x3ff
+#define V_CPL_RX_PKT_IPSEC_IPHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_IPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_IPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPHDRLEN) & M_CPL_RX_PKT_IPSEC_IPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_TCPHDRLEN 16
+#define M_CPL_RX_PKT_IPSEC_TCPHDRLEN 0x3f
+#define V_CPL_RX_PKT_IPSEC_TCPHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_TCPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_TCPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_TCPHDRLEN) & M_CPL_RX_PKT_IPSEC_TCPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_RXERROR 0
+#define M_CPL_RX_PKT_IPSEC_RXERROR 0xffff
+#define V_CPL_RX_PKT_IPSEC_RXERROR(x) ((x) << S_CPL_RX_PKT_IPSEC_RXERROR)
+#define G_CPL_RX_PKT_IPSEC_RXERROR(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_RXERROR) & M_CPL_RX_PKT_IPSEC_RXERROR)
+
struct cpl_tx_sec_pdu {
__be32 op_ivinsrtofst;
__be32 pldlen;
@@ -3478,6 +5562,13 @@ struct cpl_tx_sec_pdu {
(((x) >> S_CPL_TX_SEC_PDU_RXCHID) & M_CPL_TX_SEC_PDU_RXCHID)
#define F_CPL_TX_SEC_PDU_RXCHID V_CPL_TX_SEC_PDU_RXCHID(1U)
+#define S_T7_CPL_TX_SEC_PDU_RXCHID 22
+#define M_T7_CPL_TX_SEC_PDU_RXCHID 0x3
+#define V_T7_CPL_TX_SEC_PDU_RXCHID(x) ((x) << S_T7_CPL_TX_SEC_PDU_RXCHID)
+#define G_T7_CPL_TX_SEC_PDU_RXCHID(x) \
+(((x) >> S_T7_CPL_TX_SEC_PDU_RXCHID) & M_T7_CPL_TX_SEC_PDU_RXCHID)
+#define F_T7_CPL_TX_SEC_PDU_RXCHID V_T7_CPL_TX_SEC_PDU_RXCHID(1U)
+
/* Ack Follows */
#define S_CPL_TX_SEC_PDU_ACKFOLLOWS 21
#define M_CPL_TX_SEC_PDU_ACKFOLLOWS 0x1
@@ -3501,6 +5592,13 @@ struct cpl_tx_sec_pdu {
#define G_CPL_TX_SEC_PDU_CPLLEN(x) \
(((x) >> S_CPL_TX_SEC_PDU_CPLLEN) & M_CPL_TX_SEC_PDU_CPLLEN)
+#define S_CPL_TX_SEC_PDU_ACKNEXT 15
+#define M_CPL_TX_SEC_PDU_ACKNEXT 0x1
+#define V_CPL_TX_SEC_PDU_ACKNEXT(x) ((x) << S_CPL_TX_SEC_PDU_ACKNEXT)
+#define G_CPL_TX_SEC_PDU_ACKNEXT(x) \
+ (((x) >> S_CPL_TX_SEC_PDU_ACKNEXT) & M_CPL_TX_SEC_PDU_ACKNEXT)
+#define F_CPL_TX_SEC_PDU_ACKNEXT V_CPL_TX_SEC_PDU_ACKNEXT(1U)
+
/* PlaceHolder */
#define S_CPL_TX_SEC_PDU_PLACEHOLDER 10
#define M_CPL_TX_SEC_PDU_PLACEHOLDER 0x1
@@ -3517,6 +5615,12 @@ struct cpl_tx_sec_pdu {
(((x) >> S_CPL_TX_SEC_PDU_IVINSRTOFST) & \
M_CPL_TX_SEC_PDU_IVINSRTOFST)
+#define S_CPL_TX_SEC_PDU_PLDLEN 0
+#define M_CPL_TX_SEC_PDU_PLDLEN 0xfffff
+#define V_CPL_TX_SEC_PDU_PLDLEN(x) ((x) << S_CPL_TX_SEC_PDU_PLDLEN)
+#define G_CPL_TX_SEC_PDU_PLDLEN(x) \
+ (((x) >> S_CPL_TX_SEC_PDU_PLDLEN) & M_CPL_TX_SEC_PDU_PLDLEN)
+
/* AadStartOffset: Offset in bytes for AAD start from
* the first byte following
* the pkt headers (0-255
@@ -3666,6 +5770,62 @@ struct cpl_rx_phys_dsgl {
(((x) >> S_CPL_RX_PHYS_DSGL_NOOFSGENTR) & \
M_CPL_RX_PHYS_DSGL_NOOFSGENTR)
+struct cpl_t7_rx_phys_dsgl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 PhysAddrFields_lo_to_NumSGE;
+ __be32 RSSCopy[2];
+};
+
+#define S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI 0
+#define M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI 0xffffff
+#define V_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI)
+#define G_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI) & \
+ M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI)
+
+#define S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO 16
+#define M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO 0xffff
+#define V_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO)
+#define G_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO) & \
+ M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO)
+
+#define S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR 11
+#define M_CPL_T7_RX_PHYS_DSGL_NUMSGEERR 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR)
+#define G_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR) & M_CPL_T7_RX_PHYS_DSGL_NUMSGEERR)
+#define F_CPL_T7_RX_PHYS_DSGL_NUMSGEERR V_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE 10
+#define M_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE)
+#define G_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE) & \
+ M_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE)
+#define F_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE \
+ V_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_SPLITMODE 9
+#define M_CPL_T7_RX_PHYS_DSGL_SPLITMODE 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_SPLITMODE(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_SPLITMODE)
+#define G_CPL_T7_RX_PHYS_DSGL_SPLITMODE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_SPLITMODE) & M_CPL_T7_RX_PHYS_DSGL_SPLITMODE)
+#define F_CPL_T7_RX_PHYS_DSGL_SPLITMODE \
+ V_CPL_T7_RX_PHYS_DSGL_SPLITMODE(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_NUMSGE 0
+#define M_CPL_T7_RX_PHYS_DSGL_NUMSGE 0x1ff
+#define V_CPL_T7_RX_PHYS_DSGL_NUMSGE(x) ((x) << S_CPL_T7_RX_PHYS_DSGL_NUMSGE)
+#define G_CPL_T7_RX_PHYS_DSGL_NUMSGE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_NUMSGE) & M_CPL_T7_RX_PHYS_DSGL_NUMSGE)
+
/* CPL_TX_TLS_ACK */
struct cpl_tx_tls_ack {
__be32 op_to_Rsvd2;
@@ -3679,12 +5839,11 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_OPCODE(x) \
(((x) >> S_CPL_TX_TLS_ACK_OPCODE) & M_CPL_TX_TLS_ACK_OPCODE)
-#define S_CPL_TX_TLS_ACK_RSVD1 23
-#define M_CPL_TX_TLS_ACK_RSVD1 0x1
-#define V_CPL_TX_TLS_ACK_RSVD1(x) ((x) << S_CPL_TX_TLS_ACK_RSVD1)
-#define G_CPL_TX_TLS_ACK_RSVD1(x) \
- (((x) >> S_CPL_TX_TLS_ACK_RSVD1) & M_CPL_TX_TLS_ACK_RSVD1)
-#define F_CPL_TX_TLS_ACK_RSVD1 V_CPL_TX_TLS_ACK_RSVD1(1U)
+#define S_T7_CPL_TX_TLS_ACK_RXCHID 22
+#define M_T7_CPL_TX_TLS_ACK_RXCHID 0x3
+#define V_T7_CPL_TX_TLS_ACK_RXCHID(x) ((x) << S_T7_CPL_TX_TLS_ACK_RXCHID)
+#define G_T7_CPL_TX_TLS_ACK_RXCHID(x) \
+ (((x) >> S_T7_CPL_TX_TLS_ACK_RXCHID) & M_T7_CPL_TX_TLS_ACK_RXCHID)
#define S_CPL_TX_TLS_ACK_RXCHID 22
#define M_CPL_TX_TLS_ACK_RXCHID 0x1
@@ -3740,4 +5899,822 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_RSVD2(x) \
(((x) >> S_CPL_TX_TLS_ACK_RSVD2) & M_CPL_TX_TLS_ACK_RSVD2)
+#define S_CPL_TX_TLS_ACK_PLDLEN 0
+#define M_CPL_TX_TLS_ACK_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_ACK_PLDLEN(x) ((x) << S_CPL_TX_TLS_ACK_PLDLEN)
+#define G_CPL_TX_TLS_ACK_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_ACK_PLDLEN) & M_CPL_TX_TLS_ACK_PLDLEN)
+
+struct cpl_rcb_upd {
+ __be32 op_to_tid;
+ __be32 opcode_psn;
+ __u8 nodata_to_cnprepclr;
+ __u8 r0;
+ __be16 wrptr;
+ __be32 length;
+};
+
+#define S_CPL_RCB_UPD_OPCODE 24
+#define M_CPL_RCB_UPD_OPCODE 0xff
+#define V_CPL_RCB_UPD_OPCODE(x) ((x) << S_CPL_RCB_UPD_OPCODE)
+#define G_CPL_RCB_UPD_OPCODE(x) \
+ (((x) >> S_CPL_RCB_UPD_OPCODE) & M_CPL_RCB_UPD_OPCODE)
+
+#define S_CPL_RCB_UPD_TID 0
+#define M_CPL_RCB_UPD_TID 0xffffff
+#define V_CPL_RCB_UPD_TID(x) ((x) << S_CPL_RCB_UPD_TID)
+#define G_CPL_RCB_UPD_TID(x) \
+ (((x) >> S_CPL_RCB_UPD_TID) & M_CPL_RCB_UPD_TID)
+
+#define S_CPL_RCB_UPD_OPCODE 24
+#define M_CPL_RCB_UPD_OPCODE 0xff
+#define V_CPL_RCB_UPD_OPCODE(x) ((x) << S_CPL_RCB_UPD_OPCODE)
+#define G_CPL_RCB_UPD_OPCODE(x) \
+ (((x) >> S_CPL_RCB_UPD_OPCODE) & M_CPL_RCB_UPD_OPCODE)
+
+#define S_CPL_RCB_UPD_PSN 0
+#define M_CPL_RCB_UPD_PSN 0xffffff
+#define V_CPL_RCB_UPD_PSN(x) ((x) << S_CPL_RCB_UPD_PSN)
+#define G_CPL_RCB_UPD_PSN(x) \
+ (((x) >> S_CPL_RCB_UPD_PSN) & M_CPL_RCB_UPD_PSN)
+
+#define S_CPL_RCB_UPD_NODATA 7
+#define M_CPL_RCB_UPD_NODATA 0x1
+#define V_CPL_RCB_UPD_NODATA(x) ((x) << S_CPL_RCB_UPD_NODATA)
+#define G_CPL_RCB_UPD_NODATA(x) \
+ (((x) >> S_CPL_RCB_UPD_NODATA) & M_CPL_RCB_UPD_NODATA)
+#define F_CPL_RCB_UPD_NODATA V_CPL_RCB_UPD_NODATA(1U)
+
+#define S_CPL_RCB_UPD_RTTSTAMP 6
+#define M_CPL_RCB_UPD_RTTSTAMP 0x1
+#define V_CPL_RCB_UPD_RTTSTAMP(x) ((x) << S_CPL_RCB_UPD_RTTSTAMP)
+#define G_CPL_RCB_UPD_RTTSTAMP(x) \
+ (((x) >> S_CPL_RCB_UPD_RTTSTAMP) & M_CPL_RCB_UPD_RTTSTAMP)
+#define F_CPL_RCB_UPD_RTTSTAMP V_CPL_RCB_UPD_RTTSTAMP(1U)
+
+#define S_CPL_RCB_UPD_ECNREPCLR 5
+#define M_CPL_RCB_UPD_ECNREPCLR 0x1
+#define V_CPL_RCB_UPD_ECNREPCLR(x) ((x) << S_CPL_RCB_UPD_ECNREPCLR)
+#define G_CPL_RCB_UPD_ECNREPCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_ECNREPCLR) & M_CPL_RCB_UPD_ECNREPCLR)
+#define F_CPL_RCB_UPD_ECNREPCLR V_CPL_RCB_UPD_ECNREPCLR(1U)
+
+#define S_CPL_RCB_UPD_NAKSEQCLR 4
+#define M_CPL_RCB_UPD_NAKSEQCLR 0x1
+#define V_CPL_RCB_UPD_NAKSEQCLR(x) ((x) << S_CPL_RCB_UPD_NAKSEQCLR)
+#define G_CPL_RCB_UPD_NAKSEQCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_NAKSEQCLR) & M_CPL_RCB_UPD_NAKSEQCLR)
+#define F_CPL_RCB_UPD_NAKSEQCLR V_CPL_RCB_UPD_NAKSEQCLR(1U)
+
+#define S_CPL_RCB_UPD_QPERRSET 3
+#define M_CPL_RCB_UPD_QPERRSET 0x1
+#define V_CPL_RCB_UPD_QPERRSET(x) ((x) << S_CPL_RCB_UPD_QPERRSET)
+#define G_CPL_RCB_UPD_QPERRSET(x) \
+ (((x) >> S_CPL_RCB_UPD_QPERRSET) & M_CPL_RCB_UPD_QPERRSET)
+#define F_CPL_RCB_UPD_QPERRSET V_CPL_RCB_UPD_QPERRSET(1U)
+
+#define S_CPL_RCB_UPD_RRQUPDEN 2
+#define M_CPL_RCB_UPD_RRQUPDEN 0x1
+#define V_CPL_RCB_UPD_RRQUPDEN(x) ((x) << S_CPL_RCB_UPD_RRQUPDEN)
+#define G_CPL_RCB_UPD_RRQUPDEN(x) \
+ (((x) >> S_CPL_RCB_UPD_RRQUPDEN) & M_CPL_RCB_UPD_RRQUPDEN)
+#define F_CPL_RCB_UPD_RRQUPDEN V_CPL_RCB_UPD_RRQUPDEN(1U)
+
+#define S_CPL_RCB_UPD_RQUPDEN 1
+#define M_CPL_RCB_UPD_RQUPDEN 0x1
+#define V_CPL_RCB_UPD_RQUPDEN(x) ((x) << S_CPL_RCB_UPD_RQUPDEN)
+#define G_CPL_RCB_UPD_RQUPDEN(x) \
+ (((x) >> S_CPL_RCB_UPD_RQUPDEN) & M_CPL_RCB_UPD_RQUPDEN)
+#define F_CPL_RCB_UPD_RQUPDEN V_CPL_RCB_UPD_RQUPDEN(1U)
+
+#define S_CPL_RCB_UPD_CNPREPCLR 0
+#define M_CPL_RCB_UPD_CNPREPCLR 0x1
+#define V_CPL_RCB_UPD_CNPREPCLR(x) ((x) << S_CPL_RCB_UPD_CNPREPCLR)
+#define G_CPL_RCB_UPD_CNPREPCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_CNPREPCLR) & M_CPL_RCB_UPD_CNPREPCLR)
+#define F_CPL_RCB_UPD_CNPREPCLR V_CPL_RCB_UPD_CNPREPCLR(1U)
+
+#define S_CPL_RCB_UPD_RSPNAKSEQCLR 7
+#define M_CPL_RCB_UPD_RSPNAKSEQCLR 0x1
+#define V_CPL_RCB_UPD_RSPNAKSEQCLR(x) ((x) << S_CPL_RCB_UPD_RSPNAKSEQCLR)
+#define G_CPL_RCB_UPD_RSPNAKSEQCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_RSPNAKSEQCLR) & M_CPL_RCB_UPD_RSPNAKSEQCLR)
+#define F_CPL_RCB_UPD_RSPNAKSEQCLR V_CPL_RCB_UPD_RSPNAKSEQCLR(1U)
+
+struct cpl_roce_fw_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 type_pkd;
+};
+
+#define S_CPL_ROCE_FW_NOTIFY_OPCODE 24
+#define M_CPL_ROCE_FW_NOTIFY_OPCODE 0xff
+#define V_CPL_ROCE_FW_NOTIFY_OPCODE(x) ((x) << S_CPL_ROCE_FW_NOTIFY_OPCODE)
+#define G_CPL_ROCE_FW_NOTIFY_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_OPCODE) & M_CPL_ROCE_FW_NOTIFY_OPCODE)
+
+#define S_CPL_ROCE_FW_NOTIFY_TID 0
+#define M_CPL_ROCE_FW_NOTIFY_TID 0xffffff
+#define V_CPL_ROCE_FW_NOTIFY_TID(x) ((x) << S_CPL_ROCE_FW_NOTIFY_TID)
+#define G_CPL_ROCE_FW_NOTIFY_TID(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_TID) & M_CPL_ROCE_FW_NOTIFY_TID)
+
+#define S_CPL_ROCE_FW_NOTIFY_TYPE 28
+#define M_CPL_ROCE_FW_NOTIFY_TYPE 0xf
+#define V_CPL_ROCE_FW_NOTIFY_TYPE(x) ((x) << S_CPL_ROCE_FW_NOTIFY_TYPE)
+#define G_CPL_ROCE_FW_NOTIFY_TYPE(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_TYPE) & M_CPL_ROCE_FW_NOTIFY_TYPE)
+
+struct cpl_roce_ack_nak_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 type_to_opcode;
+ __be16 length;
+ __be32 psn_msn_hi;
+ __be32 msn_lo_pkd;
+};
+
+#define S_CPL_ROCE_ACK_NAK_REQ_OPCODE 24
+#define M_CPL_ROCE_ACK_NAK_REQ_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_OPCODE(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_REQ_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_OPCODE) & M_CPL_ROCE_ACK_NAK_REQ_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_TID 0
+#define M_CPL_ROCE_ACK_NAK_REQ_TID 0xffffff
+#define V_CPL_ROCE_ACK_NAK_REQ_TID(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_TID)
+#define G_CPL_ROCE_ACK_NAK_REQ_TID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_TID) & M_CPL_ROCE_ACK_NAK_REQ_TID)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_TYPE 12
+#define M_CPL_ROCE_ACK_NAK_REQ_TYPE 0xf
+#define V_CPL_ROCE_ACK_NAK_REQ_TYPE(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_TYPE)
+#define G_CPL_ROCE_ACK_NAK_REQ_TYPE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_TYPE) & M_CPL_ROCE_ACK_NAK_REQ_TYPE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_STATUS 8
+#define M_CPL_ROCE_ACK_NAK_REQ_STATUS 0xf
+#define V_CPL_ROCE_ACK_NAK_REQ_STATUS(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_STATUS)
+#define G_CPL_ROCE_ACK_NAK_REQ_STATUS(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_STATUS) & M_CPL_ROCE_ACK_NAK_REQ_STATUS)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE 0
+#define M_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE) & M_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_PSN 8
+#define M_CPL_ROCE_ACK_NAK_REQ_PSN 0xffffff
+#define V_CPL_ROCE_ACK_NAK_REQ_PSN(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_PSN)
+#define G_CPL_ROCE_ACK_NAK_REQ_PSN(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_PSN) & M_CPL_ROCE_ACK_NAK_REQ_PSN)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_MSN_HI 0
+#define M_CPL_ROCE_ACK_NAK_REQ_MSN_HI 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_MSN_HI(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_MSN_HI)
+#define G_CPL_ROCE_ACK_NAK_REQ_MSN_HI(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_MSN_HI) & M_CPL_ROCE_ACK_NAK_REQ_MSN_HI)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_MSN_LO 16
+#define M_CPL_ROCE_ACK_NAK_REQ_MSN_LO 0xffff
+#define V_CPL_ROCE_ACK_NAK_REQ_MSN_LO(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_MSN_LO)
+#define G_CPL_ROCE_ACK_NAK_REQ_MSN_LO(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_MSN_LO) & M_CPL_ROCE_ACK_NAK_REQ_MSN_LO)
+
+struct cpl_roce_ack_nak {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 type_to_opcode;
+ __be16 length;
+ __be32 psn_rtt_hi;
+ __be32 rtt_lo_to_rttbad;
+};
+
+#define S_CPL_ROCE_ACK_NAK_OPCODE 24
+#define M_CPL_ROCE_ACK_NAK_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_OPCODE(x) ((x) << S_CPL_ROCE_ACK_NAK_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_OPCODE) & M_CPL_ROCE_ACK_NAK_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_TID 0
+#define M_CPL_ROCE_ACK_NAK_TID 0xffffff
+#define V_CPL_ROCE_ACK_NAK_TID(x) ((x) << S_CPL_ROCE_ACK_NAK_TID)
+#define G_CPL_ROCE_ACK_NAK_TID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_TID) & M_CPL_ROCE_ACK_NAK_TID)
+
+#define S_CPL_ROCE_ACK_NAK_TYPE 12
+#define M_CPL_ROCE_ACK_NAK_TYPE 0xf
+#define V_CPL_ROCE_ACK_NAK_TYPE(x) ((x) << S_CPL_ROCE_ACK_NAK_TYPE)
+#define G_CPL_ROCE_ACK_NAK_TYPE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_TYPE) & M_CPL_ROCE_ACK_NAK_TYPE)
+
+#define S_CPL_ROCE_ACK_NAK_STATUS 8
+#define M_CPL_ROCE_ACK_NAK_STATUS 0xf
+#define V_CPL_ROCE_ACK_NAK_STATUS(x) ((x) << S_CPL_ROCE_ACK_NAK_STATUS)
+#define G_CPL_ROCE_ACK_NAK_STATUS(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_STATUS) & M_CPL_ROCE_ACK_NAK_STATUS)
+
+#define S_CPL_ROCE_ACK_NAK_WIRE_OPCODE 0
+#define M_CPL_ROCE_ACK_NAK_WIRE_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_WIRE_OPCODE(x) ((x) << S_CPL_ROCE_ACK_NAK_WIRE_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_WIRE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_WIRE_OPCODE) & M_CPL_ROCE_ACK_NAK_WIRE_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_PSN 8
+#define M_CPL_ROCE_ACK_NAK_PSN 0xffffff
+#define V_CPL_ROCE_ACK_NAK_PSN(x) ((x) << S_CPL_ROCE_ACK_NAK_PSN)
+#define G_CPL_ROCE_ACK_NAK_PSN(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_PSN) & M_CPL_ROCE_ACK_NAK_PSN)
+
+#define S_CPL_ROCE_ACK_NAK_RTT_HI 0
+#define M_CPL_ROCE_ACK_NAK_RTT_HI 0xff
+#define V_CPL_ROCE_ACK_NAK_RTT_HI(x) ((x) << S_CPL_ROCE_ACK_NAK_RTT_HI)
+#define G_CPL_ROCE_ACK_NAK_RTT_HI(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTT_HI) & M_CPL_ROCE_ACK_NAK_RTT_HI)
+
+#define S_CPL_ROCE_ACK_NAK_RTT_LO 24
+#define M_CPL_ROCE_ACK_NAK_RTT_LO 0xff
+#define V_CPL_ROCE_ACK_NAK_RTT_LO(x) ((x) << S_CPL_ROCE_ACK_NAK_RTT_LO)
+#define G_CPL_ROCE_ACK_NAK_RTT_LO(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTT_LO) & M_CPL_ROCE_ACK_NAK_RTT_LO)
+
+#define S_CPL_ROCE_ACK_NAK_RTTVALID 23
+#define M_CPL_ROCE_ACK_NAK_RTTVALID 0x1
+#define V_CPL_ROCE_ACK_NAK_RTTVALID(x) ((x) << S_CPL_ROCE_ACK_NAK_RTTVALID)
+#define G_CPL_ROCE_ACK_NAK_RTTVALID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTTVALID) & M_CPL_ROCE_ACK_NAK_RTTVALID)
+#define F_CPL_ROCE_ACK_NAK_RTTVALID V_CPL_ROCE_ACK_NAK_RTTVALID(1U)
+
+#define S_CPL_ROCE_ACK_NAK_RTTBAD 22
+#define M_CPL_ROCE_ACK_NAK_RTTBAD 0x1
+#define V_CPL_ROCE_ACK_NAK_RTTBAD(x) ((x) << S_CPL_ROCE_ACK_NAK_RTTBAD)
+#define G_CPL_ROCE_ACK_NAK_RTTBAD(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTTBAD) & M_CPL_ROCE_ACK_NAK_RTTBAD)
+#define F_CPL_ROCE_ACK_NAK_RTTBAD V_CPL_ROCE_ACK_NAK_RTTBAD(1U)
+
+struct cpl_roce_cqe {
+ __be16 op_rssctrl;
+ __be16 cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_ROCE_CQE_OPCODE 8
+#define M_CPL_ROCE_CQE_OPCODE 0xff
+#define V_CPL_ROCE_CQE_OPCODE(x) ((x) << S_CPL_ROCE_CQE_OPCODE)
+#define G_CPL_ROCE_CQE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_OPCODE) & M_CPL_ROCE_CQE_OPCODE)
+
+#define S_CPL_ROCE_CQE_RSSCTRL 0
+#define M_CPL_ROCE_CQE_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_RSSCTRL)
+#define G_CPL_ROCE_CQE_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_RSSCTRL) & M_CPL_ROCE_CQE_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_TID 8
+#define M_CPL_ROCE_CQE_TID 0xfffff
+#define V_CPL_ROCE_CQE_TID(x) ((x) << S_CPL_ROCE_CQE_TID)
+#define G_CPL_ROCE_CQE_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_TID) & M_CPL_ROCE_CQE_TID)
+
+#define S_CPL_ROCE_CQE_FLITCNT 0
+#define M_CPL_ROCE_CQE_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_FLITCNT)
+#define G_CPL_ROCE_CQE_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FLITCNT) & M_CPL_ROCE_CQE_FLITCNT)
+
+#define S_CPL_ROCE_CQE_QPID 12
+#define M_CPL_ROCE_CQE_QPID 0xfffff
+#define V_CPL_ROCE_CQE_QPID(x) ((x) << S_CPL_ROCE_CQE_QPID)
+#define G_CPL_ROCE_CQE_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_QPID) & M_CPL_ROCE_CQE_QPID)
+
+#define S_CPL_ROCE_CQE_EXTMODE 11
+#define M_CPL_ROCE_CQE_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_EXTMODE)
+#define G_CPL_ROCE_CQE_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_EXTMODE) & M_CPL_ROCE_CQE_EXTMODE)
+#define F_CPL_ROCE_CQE_EXTMODE V_CPL_ROCE_CQE_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_GENERATION_BIT) & M_CPL_ROCE_CQE_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_GENERATION_BIT V_CPL_ROCE_CQE_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_STATUS 5
+#define M_CPL_ROCE_CQE_STATUS 0x1f
+#define V_CPL_ROCE_CQE_STATUS(x) ((x) << S_CPL_ROCE_CQE_STATUS)
+#define G_CPL_ROCE_CQE_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_STATUS) & M_CPL_ROCE_CQE_STATUS)
+
+#define S_CPL_ROCE_CQE_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_CQE_TYPE)
+#define G_CPL_ROCE_CQE_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_CQE_TYPE) & M_CPL_ROCE_CQE_CQE_TYPE)
+#define F_CPL_ROCE_CQE_CQE_TYPE V_CPL_ROCE_CQE_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_WR_TYPE 0
+#define M_CPL_ROCE_CQE_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_WR_TYPE)
+#define G_CPL_ROCE_CQE_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_WR_TYPE) & M_CPL_ROCE_CQE_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_SE 31
+#define M_CPL_ROCE_CQE_SE 0x1
+#define V_CPL_ROCE_CQE_SE(x) ((x) << S_CPL_ROCE_CQE_SE)
+#define G_CPL_ROCE_CQE_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_SE) & M_CPL_ROCE_CQE_SE)
+#define F_CPL_ROCE_CQE_SE V_CPL_ROCE_CQE_SE(1U)
+
+#define S_CPL_ROCE_CQE_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_WR_TYPE_EXT(x) ((x) << S_CPL_ROCE_CQE_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_WR_TYPE_EXT) & M_CPL_ROCE_CQE_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_SRQ 0
+#define M_CPL_ROCE_CQE_SRQ 0xfff
+#define V_CPL_ROCE_CQE_SRQ(x) ((x) << S_CPL_ROCE_CQE_SRQ)
+#define G_CPL_ROCE_CQE_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_SRQ) & M_CPL_ROCE_CQE_SRQ)
+
+struct cpl_roce_cqe_fw {
+ __be32 op_to_cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_ROCE_CQE_FW_OPCODE 24
+#define M_CPL_ROCE_CQE_FW_OPCODE 0xff
+#define V_CPL_ROCE_CQE_FW_OPCODE(x) ((x) << S_CPL_ROCE_CQE_FW_OPCODE)
+#define G_CPL_ROCE_CQE_FW_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_OPCODE) & M_CPL_ROCE_CQE_FW_OPCODE)
+
+#define S_CPL_ROCE_CQE_FW_RSSCTRL 16
+#define M_CPL_ROCE_CQE_FW_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_FW_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_FW_RSSCTRL)
+#define G_CPL_ROCE_CQE_FW_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_RSSCTRL) & M_CPL_ROCE_CQE_FW_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_FW_CQID 0
+#define M_CPL_ROCE_CQE_FW_CQID 0xffff
+#define V_CPL_ROCE_CQE_FW_CQID(x) ((x) << S_CPL_ROCE_CQE_FW_CQID)
+#define G_CPL_ROCE_CQE_FW_CQID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_CQID) & M_CPL_ROCE_CQE_FW_CQID)
+
+#define S_CPL_ROCE_CQE_FW_TID 8
+#define M_CPL_ROCE_CQE_FW_TID 0xfffff
+#define V_CPL_ROCE_CQE_FW_TID(x) ((x) << S_CPL_ROCE_CQE_FW_TID)
+#define G_CPL_ROCE_CQE_FW_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_TID) & M_CPL_ROCE_CQE_FW_TID)
+
+#define S_CPL_ROCE_CQE_FW_FLITCNT 0
+#define M_CPL_ROCE_CQE_FW_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_FW_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_FW_FLITCNT)
+#define G_CPL_ROCE_CQE_FW_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_FLITCNT) & M_CPL_ROCE_CQE_FW_FLITCNT)
+
+#define S_CPL_ROCE_CQE_FW_QPID 12
+#define M_CPL_ROCE_CQE_FW_QPID 0xfffff
+#define V_CPL_ROCE_CQE_FW_QPID(x) ((x) << S_CPL_ROCE_CQE_FW_QPID)
+#define G_CPL_ROCE_CQE_FW_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_QPID) & M_CPL_ROCE_CQE_FW_QPID)
+
+#define S_CPL_ROCE_CQE_FW_EXTMODE 11
+#define M_CPL_ROCE_CQE_FW_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_FW_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_FW_EXTMODE)
+#define G_CPL_ROCE_CQE_FW_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_EXTMODE) & M_CPL_ROCE_CQE_FW_EXTMODE)
+#define F_CPL_ROCE_CQE_FW_EXTMODE V_CPL_ROCE_CQE_FW_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_FW_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_FW_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_FW_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_GENERATION_BIT) & \
+ M_CPL_ROCE_CQE_FW_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_FW_GENERATION_BIT V_CPL_ROCE_CQE_FW_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_FW_STATUS 5
+#define M_CPL_ROCE_CQE_FW_STATUS 0x1f
+#define V_CPL_ROCE_CQE_FW_STATUS(x) ((x) << S_CPL_ROCE_CQE_FW_STATUS)
+#define G_CPL_ROCE_CQE_FW_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_STATUS) & M_CPL_ROCE_CQE_FW_STATUS)
+
+#define S_CPL_ROCE_CQE_FW_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_FW_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_FW_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_FW_CQE_TYPE)
+#define G_CPL_ROCE_CQE_FW_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_CQE_TYPE) & M_CPL_ROCE_CQE_FW_CQE_TYPE)
+#define F_CPL_ROCE_CQE_FW_CQE_TYPE V_CPL_ROCE_CQE_FW_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_FW_WR_TYPE 0
+#define M_CPL_ROCE_CQE_FW_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_FW_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_FW_WR_TYPE)
+#define G_CPL_ROCE_CQE_FW_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE) & M_CPL_ROCE_CQE_FW_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_FW_SE 31
+#define M_CPL_ROCE_CQE_FW_SE 0x1
+#define V_CPL_ROCE_CQE_FW_SE(x) ((x) << S_CPL_ROCE_CQE_FW_SE)
+#define G_CPL_ROCE_CQE_FW_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_SE) & M_CPL_ROCE_CQE_FW_SE)
+#define F_CPL_ROCE_CQE_FW_SE V_CPL_ROCE_CQE_FW_SE(1U)
+
+#define S_CPL_ROCE_CQE_FW_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_FW_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE_EXT) & M_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_FW_SRQ 0
+#define M_CPL_ROCE_CQE_FW_SRQ 0xfff
+#define V_CPL_ROCE_CQE_FW_SRQ(x) ((x) << S_CPL_ROCE_CQE_FW_SRQ)
+#define G_CPL_ROCE_CQE_FW_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_SRQ) & M_CPL_ROCE_CQE_FW_SRQ)
+
+struct cpl_roce_cqe_err {
+ __be32 op_to_CQID;
+ __be32 Tid_FlitCnt;
+ __be32 QPID_to_WR_type;
+ __be32 Length;
+ __be32 TAG;
+ __be32 MSN;
+ __be32 SE_to_SRQ;
+ __be32 RQE;
+ __be32 ExtInfoMS[2];
+ __be32 ExtInfoLS[2];
+};
+
+#define S_CPL_ROCE_CQE_ERR_OPCODE 24
+#define M_CPL_ROCE_CQE_ERR_OPCODE 0xff
+#define V_CPL_ROCE_CQE_ERR_OPCODE(x) ((x) << S_CPL_ROCE_CQE_ERR_OPCODE)
+#define G_CPL_ROCE_CQE_ERR_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_OPCODE) & M_CPL_ROCE_CQE_ERR_OPCODE)
+
+#define S_CPL_ROCE_CQE_ERR_RSSCTRL 16
+#define M_CPL_ROCE_CQE_ERR_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_ERR_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_ERR_RSSCTRL)
+#define G_CPL_ROCE_CQE_ERR_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_RSSCTRL) & M_CPL_ROCE_CQE_ERR_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_ERR_CQID 0
+#define M_CPL_ROCE_CQE_ERR_CQID 0xffff
+#define V_CPL_ROCE_CQE_ERR_CQID(x) ((x) << S_CPL_ROCE_CQE_ERR_CQID)
+#define G_CPL_ROCE_CQE_ERR_CQID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_CQID) & M_CPL_ROCE_CQE_ERR_CQID)
+
+#define S_CPL_ROCE_CQE_ERR_TID 8
+#define M_CPL_ROCE_CQE_ERR_TID 0xfffff
+#define V_CPL_ROCE_CQE_ERR_TID(x) ((x) << S_CPL_ROCE_CQE_ERR_TID)
+#define G_CPL_ROCE_CQE_ERR_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_TID) & M_CPL_ROCE_CQE_ERR_TID)
+
+#define S_CPL_ROCE_CQE_ERR_FLITCNT 0
+#define M_CPL_ROCE_CQE_ERR_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_ERR_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_ERR_FLITCNT)
+#define G_CPL_ROCE_CQE_ERR_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_FLITCNT) & M_CPL_ROCE_CQE_ERR_FLITCNT)
+
+#define S_CPL_ROCE_CQE_ERR_QPID 12
+#define M_CPL_ROCE_CQE_ERR_QPID 0xfffff
+#define V_CPL_ROCE_CQE_ERR_QPID(x) ((x) << S_CPL_ROCE_CQE_ERR_QPID)
+#define G_CPL_ROCE_CQE_ERR_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_QPID) & M_CPL_ROCE_CQE_ERR_QPID)
+
+#define S_CPL_ROCE_CQE_ERR_EXTMODE 11
+#define M_CPL_ROCE_CQE_ERR_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_ERR_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_ERR_EXTMODE)
+#define G_CPL_ROCE_CQE_ERR_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_EXTMODE) & M_CPL_ROCE_CQE_ERR_EXTMODE)
+#define F_CPL_ROCE_CQE_ERR_EXTMODE V_CPL_ROCE_CQE_ERR_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_ERR_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_ERR_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_GENERATION_BIT) & \
+ M_CPL_ROCE_CQE_ERR_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_ERR_GENERATION_BIT \
+ V_CPL_ROCE_CQE_ERR_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_ERR_STATUS 5
+#define M_CPL_ROCE_CQE_ERR_STATUS 0x1f
+#define V_CPL_ROCE_CQE_ERR_STATUS(x) ((x) << S_CPL_ROCE_CQE_ERR_STATUS)
+#define G_CPL_ROCE_CQE_ERR_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_STATUS) & M_CPL_ROCE_CQE_ERR_STATUS)
+
+#define S_CPL_ROCE_CQE_ERR_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_ERR_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_ERR_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_ERR_CQE_TYPE)
+#define G_CPL_ROCE_CQE_ERR_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_CQE_TYPE) & M_CPL_ROCE_CQE_ERR_CQE_TYPE)
+#define F_CPL_ROCE_CQE_ERR_CQE_TYPE V_CPL_ROCE_CQE_ERR_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_WR_TYPE 0
+#define M_CPL_ROCE_CQE_ERR_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_ERR_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_ERR_WR_TYPE)
+#define G_CPL_ROCE_CQE_ERR_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE) & M_CPL_ROCE_CQE_ERR_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_ERR_SE 31
+#define M_CPL_ROCE_CQE_ERR_SE 0x1
+#define V_CPL_ROCE_CQE_ERR_SE(x) ((x) << S_CPL_ROCE_CQE_ERR_SE)
+#define G_CPL_ROCE_CQE_ERR_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_SE) & M_CPL_ROCE_CQE_ERR_SE)
+#define F_CPL_ROCE_CQE_ERR_SE V_CPL_ROCE_CQE_ERR_SE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT) & M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_ERR_SRQ 0
+#define M_CPL_ROCE_CQE_ERR_SRQ 0xfff
+#define V_CPL_ROCE_CQE_ERR_SRQ(x) ((x) << S_CPL_ROCE_CQE_ERR_SRQ)
+#define G_CPL_ROCE_CQE_ERR_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_SRQ) & M_CPL_ROCE_CQE_ERR_SRQ)
+
+struct cpl_accelerator_hdr {
+ __be16 op_accelerator_id;
+ __be16 rxchid_payload_to_inner_cpl_length_ack;
+ __be32 inner_cpl_length_payload_status_loc;
+};
+
+#define S_CPL_ACCELERATOR_HDR_OPCODE 8
+#define M_CPL_ACCELERATOR_HDR_OPCODE 0xff
+#define V_CPL_ACCELERATOR_HDR_OPCODE(x) ((x) << S_CPL_ACCELERATOR_HDR_OPCODE)
+#define G_CPL_ACCELERATOR_HDR_OPCODE(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_OPCODE) & M_CPL_ACCELERATOR_HDR_OPCODE)
+
+#define S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID 0
+#define M_CPL_ACCELERATOR_HDR_ACCELERATOR_ID 0xff
+#define V_CPL_ACCELERATOR_HDR_ACCELERATOR_ID(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID)
+#define G_CPL_ACCELERATOR_HDR_ACCELERATOR_ID(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID) & \
+ M_CPL_ACCELERATOR_HDR_ACCELERATOR_ID)
+
+#define S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD 14
+#define M_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD 0x3
+#define V_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD 12
+#define M_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD 0x3
+#define V_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_RXCHID_ACK 10
+#define M_CPL_ACCELERATOR_HDR_RXCHID_ACK 0x3
+#define V_CPL_ACCELERATOR_HDR_RXCHID_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_RXCHID_ACK)
+#define G_CPL_ACCELERATOR_HDR_RXCHID_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_RXCHID_ACK) & \
+ M_CPL_ACCELERATOR_HDR_RXCHID_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_DESTID_ACK 8
+#define M_CPL_ACCELERATOR_HDR_DESTID_ACK 0x3
+#define V_CPL_ACCELERATOR_HDR_DESTID_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_DESTID_ACK)
+#define G_CPL_ACCELERATOR_HDR_DESTID_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_DESTID_ACK) & \
+ M_CPL_ACCELERATOR_HDR_DESTID_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK 0
+#define M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK 0xff
+#define V_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK)
+#define G_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK) & \
+ M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD 24
+#define M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD 0xff
+#define V_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_STATUS_LOC 22
+#define M_CPL_ACCELERATOR_HDR_STATUS_LOC 0x3
+#define V_CPL_ACCELERATOR_HDR_STATUS_LOC(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_STATUS_LOC)
+#define G_CPL_ACCELERATOR_HDR_STATUS_LOC(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_STATUS_LOC) & \
+ M_CPL_ACCELERATOR_HDR_STATUS_LOC)
+
+struct cpl_accelerator_ack {
+ RSS_HDR
+ __be16 op_accelerator_id;
+ __be16 r0;
+ __be32 status;
+ __be64 r1;
+ __be64 r2;
+};
+
+#define S_CPL_ACCELERATOR_ACK_OPCODE 8
+#define M_CPL_ACCELERATOR_ACK_OPCODE 0xff
+#define V_CPL_ACCELERATOR_ACK_OPCODE(x) ((x) << S_CPL_ACCELERATOR_ACK_OPCODE)
+#define G_CPL_ACCELERATOR_ACK_OPCODE(x) \
+ (((x) >> S_CPL_ACCELERATOR_ACK_OPCODE) & M_CPL_ACCELERATOR_ACK_OPCODE)
+
+#define S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID 0
+#define M_CPL_ACCELERATOR_ACK_ACCELERATOR_ID 0xff
+#define V_CPL_ACCELERATOR_ACK_ACCELERATOR_ID(x) \
+ ((x) << S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID)
+#define G_CPL_ACCELERATOR_ACK_ACCELERATOR_ID(x) \
+ (((x) >> S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID) & \
+ M_CPL_ACCELERATOR_ACK_ACCELERATOR_ID)
+
+struct cpl_nvmt_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r0;
+ __be16 length;
+ __be32 seq;
+ __be32 status_pkd;
+};
+
+#define S_CPL_NVMT_DATA_OPCODE 24
+#define M_CPL_NVMT_DATA_OPCODE 0xff
+#define V_CPL_NVMT_DATA_OPCODE(x) ((x) << S_CPL_NVMT_DATA_OPCODE)
+#define G_CPL_NVMT_DATA_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_DATA_OPCODE) & M_CPL_NVMT_DATA_OPCODE)
+
+#define S_CPL_NVMT_DATA_TID 0
+#define M_CPL_NVMT_DATA_TID 0xffffff
+#define V_CPL_NVMT_DATA_TID(x) ((x) << S_CPL_NVMT_DATA_TID)
+#define G_CPL_NVMT_DATA_TID(x) \
+ (((x) >> S_CPL_NVMT_DATA_TID) & M_CPL_NVMT_DATA_TID)
+
+#define S_CPL_NVMT_DATA_STATUS 0
+#define M_CPL_NVMT_DATA_STATUS 0xff
+#define V_CPL_NVMT_DATA_STATUS(x) ((x) << S_CPL_NVMT_DATA_STATUS)
+#define G_CPL_NVMT_DATA_STATUS(x) \
+ (((x) >> S_CPL_NVMT_DATA_STATUS) & M_CPL_NVMT_DATA_STATUS)
+
+struct cpl_nvmt_cmp {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 crch;
+ __be16 length;
+ __be32 seq;
+ __u8 t10status;
+ __u8 status;
+ __be16 crcl;
+};
+
+#define S_CPL_NVMT_CMP_OPCODE 24
+#define M_CPL_NVMT_CMP_OPCODE 0xff
+#define V_CPL_NVMT_CMP_OPCODE(x) ((x) << S_CPL_NVMT_CMP_OPCODE)
+#define G_CPL_NVMT_CMP_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_OPCODE) & M_CPL_NVMT_CMP_OPCODE)
+
+#define S_CPL_NVMT_CMP_TID 0
+#define M_CPL_NVMT_CMP_TID 0xffffff
+#define V_CPL_NVMT_CMP_TID(x) ((x) << S_CPL_NVMT_CMP_TID)
+#define G_CPL_NVMT_CMP_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_TID) & M_CPL_NVMT_CMP_TID)
+
+struct cpl_nvmt_cmp_imm {
+ __be32 op_to_cqid;
+ __be32 generation_bit_to_oprqinc;
+ __be32 seq;
+ __be16 crch;
+ __be16 length;
+ __be16 crcl;
+ __u8 t10status;
+ __u8 status;
+ __be32 r1;
+};
+
+#define S_CPL_NVMT_CMP_IMM_OPCODE 24
+#define M_CPL_NVMT_CMP_IMM_OPCODE 0xff
+#define V_CPL_NVMT_CMP_IMM_OPCODE(x) ((x) << S_CPL_NVMT_CMP_IMM_OPCODE)
+#define G_CPL_NVMT_CMP_IMM_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_OPCODE) & M_CPL_NVMT_CMP_IMM_OPCODE)
+
+#define S_CPL_NVMT_CMP_IMM_RSSCTRL 16
+#define M_CPL_NVMT_CMP_IMM_RSSCTRL 0xff
+#define V_CPL_NVMT_CMP_IMM_RSSCTRL(x) ((x) << S_CPL_NVMT_CMP_IMM_RSSCTRL)
+#define G_CPL_NVMT_CMP_IMM_RSSCTRL(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_RSSCTRL) & M_CPL_NVMT_CMP_IMM_RSSCTRL)
+
+#define S_CPL_NVMT_CMP_IMM_CQID 0
+#define M_CPL_NVMT_CMP_IMM_CQID 0xffff
+#define V_CPL_NVMT_CMP_IMM_CQID(x) ((x) << S_CPL_NVMT_CMP_IMM_CQID)
+#define G_CPL_NVMT_CMP_IMM_CQID(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_CQID) & M_CPL_NVMT_CMP_IMM_CQID)
+
+#define S_CPL_NVMT_CMP_IMM_GENERATION_BIT 31
+#define M_CPL_NVMT_CMP_IMM_GENERATION_BIT 0x1
+#define V_CPL_NVMT_CMP_IMM_GENERATION_BIT(x) \
+ ((x) << S_CPL_NVMT_CMP_IMM_GENERATION_BIT)
+#define G_CPL_NVMT_CMP_IMM_GENERATION_BIT(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_GENERATION_BIT) & \
+ M_CPL_NVMT_CMP_IMM_GENERATION_BIT)
+#define F_CPL_NVMT_CMP_IMM_GENERATION_BIT \
+ V_CPL_NVMT_CMP_IMM_GENERATION_BIT(1U)
+
+#define S_CPL_NVMT_CMP_IMM_TID 8
+#define M_CPL_NVMT_CMP_IMM_TID 0xfffff
+#define V_CPL_NVMT_CMP_IMM_TID(x) ((x) << S_CPL_NVMT_CMP_IMM_TID)
+#define G_CPL_NVMT_CMP_IMM_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_TID) & M_CPL_NVMT_CMP_IMM_TID)
+
+#define S_CPL_NVMT_CMP_IMM_OPRQINC 0
+#define M_CPL_NVMT_CMP_IMM_OPRQINC 0xff
+#define V_CPL_NVMT_CMP_IMM_OPRQINC(x) ((x) << S_CPL_NVMT_CMP_IMM_OPRQINC)
+#define G_CPL_NVMT_CMP_IMM_OPRQINC(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_OPRQINC) & M_CPL_NVMT_CMP_IMM_OPRQINC)
+
+struct cpl_nvmt_cmp_srq {
+ __be32 op_to_cqid;
+ __be32 generation_bit_to_oprqinc;
+ __be32 seq;
+ __be16 crch;
+ __be16 length;
+ __be16 crcl;
+ __u8 t10status;
+ __u8 status;
+ __be32 rqe;
+};
+
+#define S_CPL_NVMT_CMP_SRQ_OPCODE 24
+#define M_CPL_NVMT_CMP_SRQ_OPCODE 0xff
+#define V_CPL_NVMT_CMP_SRQ_OPCODE(x) ((x) << S_CPL_NVMT_CMP_SRQ_OPCODE)
+#define G_CPL_NVMT_CMP_SRQ_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_OPCODE) & M_CPL_NVMT_CMP_SRQ_OPCODE)
+
+#define S_CPL_NVMT_CMP_SRQ_RSSCTRL 16
+#define M_CPL_NVMT_CMP_SRQ_RSSCTRL 0xff
+#define V_CPL_NVMT_CMP_SRQ_RSSCTRL(x) ((x) << S_CPL_NVMT_CMP_SRQ_RSSCTRL)
+#define G_CPL_NVMT_CMP_SRQ_RSSCTRL(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_RSSCTRL) & M_CPL_NVMT_CMP_SRQ_RSSCTRL)
+
+#define S_CPL_NVMT_CMP_SRQ_CQID 0
+#define M_CPL_NVMT_CMP_SRQ_CQID 0xffff
+#define V_CPL_NVMT_CMP_SRQ_CQID(x) ((x) << S_CPL_NVMT_CMP_SRQ_CQID)
+#define G_CPL_NVMT_CMP_SRQ_CQID(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_CQID) & M_CPL_NVMT_CMP_SRQ_CQID)
+
+#define S_CPL_NVMT_CMP_SRQ_GENERATION_BIT 31
+#define M_CPL_NVMT_CMP_SRQ_GENERATION_BIT 0x1
+#define V_CPL_NVMT_CMP_SRQ_GENERATION_BIT(x) \
+ ((x) << S_CPL_NVMT_CMP_SRQ_GENERATION_BIT)
+#define G_CPL_NVMT_CMP_SRQ_GENERATION_BIT(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_GENERATION_BIT) & \
+ M_CPL_NVMT_CMP_SRQ_GENERATION_BIT)
+#define F_CPL_NVMT_CMP_SRQ_GENERATION_BIT \
+ V_CPL_NVMT_CMP_SRQ_GENERATION_BIT(1U)
+
+#define S_CPL_NVMT_CMP_SRQ_TID 8
+#define M_CPL_NVMT_CMP_SRQ_TID 0xfffff
+#define V_CPL_NVMT_CMP_SRQ_TID(x) ((x) << S_CPL_NVMT_CMP_SRQ_TID)
+#define G_CPL_NVMT_CMP_SRQ_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_TID) & M_CPL_NVMT_CMP_SRQ_TID)
+
+#define S_CPL_NVMT_CMP_SRQ_OPRQINC 0
+#define M_CPL_NVMT_CMP_SRQ_OPRQINC 0xff
+#define V_CPL_NVMT_CMP_SRQ_OPRQINC(x) ((x) << S_CPL_NVMT_CMP_SRQ_OPRQINC)
+#define G_CPL_NVMT_CMP_SRQ_OPRQINC(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_OPRQINC) & M_CPL_NVMT_CMP_SRQ_OPRQINC)
+
#endif /* T4_MSG_H */
diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h
index e3b2a29b2ea9..8f500ec0fbdd 100644
--- a/sys/dev/cxgbe/common/t4_regs.h
+++ b/sys/dev/cxgbe/common/t4_regs.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2013, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2013, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +27,11 @@
*/
/* This file is automatically generated --- changes will be lost */
-/* Generation Date : Wed Jan 27 10:57:51 IST 2016 */
-/* Directory name: t4_reg.txt, Changeset: */
-/* Directory name: t5_reg.txt, Changeset: 6936:7f6342b03d61 */
-/* Directory name: t6_reg.txt, Changeset: 4191:ce3ccd95c109 */
+/* Generation Date : Thu Sep 11 05:25:56 PM IST 2025 */
+/* Directory name: t4_reg.txt, Date: Not specified */
+/* Directory name: t5_reg.txt, Changeset: 6945:54ba4ba7ee8b */
+/* Directory name: t6_reg.txt, Changeset: 4277:9c165d0f4899 */
+/* Directory name: t7_reg.txt, Changeset: 5945:1487219ecb20 */
#define MYPF_BASE 0x1b000
#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
@@ -285,9 +285,6 @@
#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
-#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
-#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
-
#define PCIE_PF_INT_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
#define NUM_PCIE_PF_INT_INSTANCES 8
@@ -459,9 +456,6 @@
#define LE_DB_DBGI_REQ_MASK_T6(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
#define NUM_LE_DB_DBGI_REQ_MASK_T6_INSTANCES 11
-#define LE_DB_DBGI_RSP_DATA_T6(idx) (A_LE_DB_DBGI_RSP_DATA + (idx) * 4)
-#define NUM_LE_DB_DBGI_RSP_DATA_T6_INSTANCES 11
-
#define LE_DB_ACTIVE_MASK_IPV6_T6(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
#define NUM_LE_DB_ACTIVE_MASK_IPV6_T6_INSTANCES 8
@@ -501,12 +495,175 @@
#define CIM_CTL_MAILBOX_VFN_CTL_T6(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T6_INSTANCES 256
+#define T7_MYPORT_BASE 0x2e000
+#define T7_MYPORT_REG(reg_addr) (T7_MYPORT_BASE + (reg_addr))
+
+#define T7_PORT0_BASE 0x30000
+#define T7_PORT0_REG(reg_addr) (T7_PORT0_BASE + (reg_addr))
+
+#define T7_PORT1_BASE 0x32000
+#define T7_PORT1_REG(reg_addr) (T7_PORT1_BASE + (reg_addr))
+
+#define T7_PORT2_BASE 0x34000
+#define T7_PORT2_REG(reg_addr) (T7_PORT2_BASE + (reg_addr))
+
+#define T7_PORT3_BASE 0x36000
+#define T7_PORT3_REG(reg_addr) (T7_PORT3_BASE + (reg_addr))
+
+#define T7_PORT_STRIDE 0x2000
+#define T7_PORT_BASE(idx) (T7_PORT0_BASE + (idx) * T7_PORT_STRIDE)
+#define T7_PORT_REG(idx, reg) (T7_PORT_BASE(idx) + (reg))
+
+#define PCIE_MEM_ACCESS_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_MEM_ACCESS_T7_INSTANCES 16
+
+#define PCIE_T7_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T7_CMD_INSTANCES 1
+
+#define PCIE_T5_ARM_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T5_ARM_INSTANCES 1
+
+#define PCIE_JBOF_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_JBOF_INSTANCES 16
+
+#define PCIE_EMUADRRMAP_REG(reg_addr, idx) ((reg_addr) + (idx) * 32)
+#define NUM_PCIE_EMUADRRMAP_INSTANCES 3
+
+#define CIM_GFT_MASK(idx) (A_CIM_GFT_MASK + (idx) * 4)
+#define NUM_CIM_GFT_MASK_INSTANCES 4
+
+#define T7_MPS_TRC_FILTER_MATCH_CTL_A(idx) (A_T7_MPS_TRC_FILTER_MATCH_CTL_A + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_MATCH_CTL_A_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_MATCH_CTL_B(idx) (A_T7_MPS_TRC_FILTER_MATCH_CTL_B + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_MATCH_CTL_B_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_RUNT_CTL(idx) (A_T7_MPS_TRC_FILTER_RUNT_CTL + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_RUNT_CTL_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_DROP(idx) (A_T7_MPS_TRC_FILTER_DROP + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_DROP_INSTANCES 8
+
+#define MPS_TRC_FILTER4_MATCH(idx) (A_MPS_TRC_FILTER4_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER4_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER4_DONT_CARE(idx) (A_MPS_TRC_FILTER4_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER4_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER5_MATCH(idx) (A_MPS_TRC_FILTER5_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER5_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER5_DONT_CARE(idx) (A_MPS_TRC_FILTER5_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER5_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER6_MATCH(idx) (A_MPS_TRC_FILTER6_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER6_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER6_DONT_CARE(idx) (A_MPS_TRC_FILTER6_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER6_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER7_MATCH(idx) (A_MPS_TRC_FILTER7_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER7_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER7_DONT_CARE(idx) (A_MPS_TRC_FILTER7_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER7_DONT_CARE_INSTANCES 28
+
+#define LE_DB_DBGI_REQ_DATA_T7(idx) (A_LE_DB_DBGI_REQ_DATA + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_DATA_T7_INSTANCES 13
+
+#define LE_DB_DBGI_REQ_MASK_T7(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_MASK_T7_INSTANCES 13
+
+#define LE_DB_ACTIVE_MASK_IPV6_T7(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
+#define NUM_LE_DB_ACTIVE_MASK_IPV6_T7_INSTANCES 8
+
+#define LE_HASH_MASK_GEN_IPV4T7(idx) (A_LE_HASH_MASK_GEN_IPV4T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV4T7_INSTANCES 8
+
+#define T7_LE_HASH_MASK_GEN_IPV6T5(idx) (A_T7_LE_HASH_MASK_GEN_IPV6T5 + (idx) * 4)
+#define NUM_T7_LE_HASH_MASK_GEN_IPV6T5_INSTANCES 8
+
+#define LE_DB_SECOND_GEN_HASH_MASK_IPV4_T7(idx) (A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_SECOND_GEN_HASH_MASK_IPV4_T7_INSTANCES 8
+
+#define TLS_TX_CH_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_TLS_TX_CH_INSTANCES 6
+
+#define TLS_TX_CH_IND_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_TLS_TX_CH_IND_INSTANCES 6
+
+#define ARM_CPU_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_ARM_CPU_INSTANCES 4
+
+#define ARM_CCIM_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_ARM_CCIM_INSTANCES 4
+
+#define ARM_CCIS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_ARM_CCIS_INSTANCES 5
+
+#define ARM_CCI_EVNTBUS(idx) (A_ARM_CCI_EVNTBUS + (idx) * 4)
+#define NUM_ARM_CCI_EVNTBUS_INSTANCES 5
+
+#define ARM_ARM_CFG1(idx) (A_ARM_ARM_CFG1 + (idx) * 4)
+#define NUM_ARM_ARM_CFG1_INSTANCES 2
+
+#define ARM_ARM_CFG2(idx) (A_ARM_ARM_CFG2 + (idx) * 4)
+#define NUM_ARM_ARM_CFG2_INSTANCES 2
+
+#define ARM_MSG_REG(reg_addr, idx) ((reg_addr) + (idx) * 48)
+#define NUM_ARM_MSG_INSTANCES 4
+
+#define ARM_MSG_PCIE_MESSAGE2AXI_CFG4(idx) (A_ARM_MSG_PCIE_MESSAGE2AXI_CFG4 + (idx) * 4)
+#define NUM_ARM_MSG_PCIE_MESSAGE2AXI_CFG4_INSTANCES 2
+
+#define MC_CE_ERR_DATA_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_CE_ERR_DATA_T7_INSTANCES 16
+
+#define MC_UE_ERR_DATA_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_UE_ERR_DATA_T7_INSTANCES 16
+
+#define MC_P_BIST_USER_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_P_BIST_USER_INSTANCES 36
+
+#define HMA_H_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_HMA_H_BIST_STATUS_INSTANCES 18
+
+#define GCACHE_P_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_GCACHE_P_BIST_STATUS_INSTANCES 18
+
+#define CIM_CTL_MAILBOX_VF_STATUS_T7(idx) (A_CIM_CTL_MAILBOX_VF_STATUS + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VF_STATUS_T7_INSTANCES 8
+
+#define CIM_CTL_MAILBOX_VFN_CTL_T7(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T7_INSTANCES 256
+
+#define CIM_CTL_TID_MAP_EN(idx) (A_CIM_CTL_TID_MAP_EN + (idx) * 4)
+#define NUM_CIM_CTL_TID_MAP_EN_INSTANCES 8
+
+#define CIM_CTL_TID_MAP_CORE(idx) (A_CIM_CTL_TID_MAP_CORE + (idx) * 4)
+#define NUM_CIM_CTL_TID_MAP_CORE_INSTANCES 8
+
+#define CIM_CTL_CRYPTO_KEY_DATA(idx) (A_CIM_CTL_CRYPTO_KEY_DATA + (idx) * 4)
+#define NUM_CIM_CTL_CRYPTO_KEY_DATA_INSTANCES 17
+
+#define CIM_CTL_FLOWID_OP_VALID(idx) (A_CIM_CTL_FLOWID_OP_VALID + (idx) * 4)
+#define NUM_CIM_CTL_FLOWID_OP_VALID_INSTANCES 8
+
+#define CIM_CTL_SLV_REG(reg_addr, idx) ((reg_addr) + (idx) * 1024)
+#define NUM_CIM_CTL_SLV_INSTANCES 7
+
#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define MC_T7_STRIDE (MC_T71_BASE_ADDR - MC_T70_BASE_ADDR)
+#define MC_T7_REG(reg, idx) (reg + MC_T7_STRIDE * idx)
+
/* registers for module SGE */
#define SGE_BASE_ADDR 0x1000
@@ -637,6 +794,24 @@
#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
#define F_GLOBALENABLE V_GLOBALENABLE(1U)
+#define S_NUMOFFID 19
+#define M_NUMOFFID 0x7U
+#define V_NUMOFFID(x) ((x) << S_NUMOFFID)
+#define G_NUMOFFID(x) (((x) >> S_NUMOFFID) & M_NUMOFFID)
+
+#define S_INGHINTENABLE2 16
+#define V_INGHINTENABLE2(x) ((x) << S_INGHINTENABLE2)
+#define F_INGHINTENABLE2 V_INGHINTENABLE2(1U)
+
+#define S_INGHINTENABLE3 3
+#define V_INGHINTENABLE3(x) ((x) << S_INGHINTENABLE3)
+#define F_INGHINTENABLE3 V_INGHINTENABLE3(1U)
+
+#define S_TF_MODE 1
+#define M_TF_MODE 0x3U
+#define V_TF_MODE(x) ((x) << S_TF_MODE)
+#define G_TF_MODE(x) (((x) >> S_TF_MODE) & M_TF_MODE)
+
#define A_SGE_HOST_PAGE_SIZE 0x100c
#define S_HOSTPAGESIZEPF7 28
@@ -792,6 +967,16 @@
#define V_WR_ERROR_OPCODE(x) ((x) << S_WR_ERROR_OPCODE)
#define G_WR_ERROR_OPCODE(x) (((x) >> S_WR_ERROR_OPCODE) & M_WR_ERROR_OPCODE)
+#define S_WR_SENDPATH_ERROR_OPCODE 16
+#define M_WR_SENDPATH_ERROR_OPCODE 0xffU
+#define V_WR_SENDPATH_ERROR_OPCODE(x) ((x) << S_WR_SENDPATH_ERROR_OPCODE)
+#define G_WR_SENDPATH_ERROR_OPCODE(x) (((x) >> S_WR_SENDPATH_ERROR_OPCODE) & M_WR_SENDPATH_ERROR_OPCODE)
+
+#define S_WR_SENDPATH_OPCODE 8
+#define M_WR_SENDPATH_OPCODE 0xffU
+#define V_WR_SENDPATH_OPCODE(x) ((x) << S_WR_SENDPATH_OPCODE)
+#define G_WR_SENDPATH_OPCODE(x) (((x) >> S_WR_SENDPATH_OPCODE) & M_WR_SENDPATH_OPCODE)
+
#define A_SGE_PERR_INJECT 0x1020
#define S_MEMSEL 1
@@ -941,6 +1126,22 @@
#define V_PERR_PC_REQ(x) ((x) << S_PERR_PC_REQ)
#define F_PERR_PC_REQ V_PERR_PC_REQ(1U)
+#define S_PERR_HEADERSPLIT_FIFO3 28
+#define V_PERR_HEADERSPLIT_FIFO3(x) ((x) << S_PERR_HEADERSPLIT_FIFO3)
+#define F_PERR_HEADERSPLIT_FIFO3 V_PERR_HEADERSPLIT_FIFO3(1U)
+
+#define S_PERR_HEADERSPLIT_FIFO2 27
+#define V_PERR_HEADERSPLIT_FIFO2(x) ((x) << S_PERR_HEADERSPLIT_FIFO2)
+#define F_PERR_HEADERSPLIT_FIFO2 V_PERR_HEADERSPLIT_FIFO2(1U)
+
+#define S_PERR_PAYLOAD_FIFO3 26
+#define V_PERR_PAYLOAD_FIFO3(x) ((x) << S_PERR_PAYLOAD_FIFO3)
+#define F_PERR_PAYLOAD_FIFO3 V_PERR_PAYLOAD_FIFO3(1U)
+
+#define S_PERR_PAYLOAD_FIFO2 25
+#define V_PERR_PAYLOAD_FIFO2(x) ((x) << S_PERR_PAYLOAD_FIFO2)
+#define F_PERR_PAYLOAD_FIFO2 V_PERR_PAYLOAD_FIFO2(1U)
+
#define A_SGE_INT_ENABLE1 0x1028
#define A_SGE_PERR_ENABLE1 0x102c
#define A_SGE_INT_CAUSE2 0x1030
@@ -1105,6 +1306,22 @@
#define V_PERR_DB_FIFO(x) ((x) << S_PERR_DB_FIFO)
#define F_PERR_DB_FIFO V_PERR_DB_FIFO(1U)
+#define S_TF_FIFO_PERR 24
+#define V_TF_FIFO_PERR(x) ((x) << S_TF_FIFO_PERR)
+#define F_TF_FIFO_PERR V_TF_FIFO_PERR(1U)
+
+#define S_PERR_ISW_IDMA3_FIFO 15
+#define V_PERR_ISW_IDMA3_FIFO(x) ((x) << S_PERR_ISW_IDMA3_FIFO)
+#define F_PERR_ISW_IDMA3_FIFO V_PERR_ISW_IDMA3_FIFO(1U)
+
+#define S_PERR_ISW_IDMA2_FIFO 13
+#define V_PERR_ISW_IDMA2_FIFO(x) ((x) << S_PERR_ISW_IDMA2_FIFO)
+#define F_PERR_ISW_IDMA2_FIFO V_PERR_ISW_IDMA2_FIFO(1U)
+
+#define S_SGE_IPP_FIFO_PERR 5
+#define V_SGE_IPP_FIFO_PERR(x) ((x) << S_SGE_IPP_FIFO_PERR)
+#define F_SGE_IPP_FIFO_PERR V_SGE_IPP_FIFO_PERR(1U)
+
#define A_SGE_INT_ENABLE2 0x1034
#define A_SGE_PERR_ENABLE2 0x1038
#define A_SGE_INT_CAUSE3 0x103c
@@ -1259,110 +1476,20 @@
#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
#define A_SGE_FL_BUFFER_SIZE1 0x1048
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE2 0x104c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE3 0x1050
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE4 0x1054
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE5 0x1058
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE6 0x105c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE7 0x1060
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE8 0x1064
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE9 0x1068
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE10 0x106c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE11 0x1070
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE12 0x1074
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE13 0x1078
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE14 0x107c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE15 0x1080
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_DBQ_CTXT_BADDR 0x1084
#define S_BASEADDR 3
@@ -1426,6 +1553,10 @@
#define V_NULLPTREN(x) ((x) << S_NULLPTREN)
#define F_NULLPTREN V_NULLPTREN(1U)
+#define S_HDRSTARTFLQ4K 1
+#define V_HDRSTARTFLQ4K(x) ((x) << S_HDRSTARTFLQ4K)
+#define F_HDRSTARTFLQ4K V_HDRSTARTFLQ4K(1U)
+
#define A_SGE_CONM_CTRL 0x1094
#define S_EGRTHRESHOLD 8
@@ -2243,6 +2374,34 @@
#define V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(x) ((x) << S_PERR_IDMA_SWITCH_OUTPUT_FIFO0)
#define F_PERR_IDMA_SWITCH_OUTPUT_FIFO0 V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(1U)
+#define S_PERR_POINTER_HDR_FIFO3 10
+#define V_PERR_POINTER_HDR_FIFO3(x) ((x) << S_PERR_POINTER_HDR_FIFO3)
+#define F_PERR_POINTER_HDR_FIFO3 V_PERR_POINTER_HDR_FIFO3(1U)
+
+#define S_PERR_POINTER_HDR_FIFO2 9
+#define V_PERR_POINTER_HDR_FIFO2(x) ((x) << S_PERR_POINTER_HDR_FIFO2)
+#define F_PERR_POINTER_HDR_FIFO2 V_PERR_POINTER_HDR_FIFO2(1U)
+
+#define S_PERR_POINTER_DATA_FIFO3 8
+#define V_PERR_POINTER_DATA_FIFO3(x) ((x) << S_PERR_POINTER_DATA_FIFO3)
+#define F_PERR_POINTER_DATA_FIFO3 V_PERR_POINTER_DATA_FIFO3(1U)
+
+#define S_PERR_POINTER_DATA_FIFO2 7
+#define V_PERR_POINTER_DATA_FIFO2(x) ((x) << S_PERR_POINTER_DATA_FIFO2)
+#define F_PERR_POINTER_DATA_FIFO2 V_PERR_POINTER_DATA_FIFO2(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO3 3
+#define V_PERR_IDMA2IMSG_FIFO3(x) ((x) << S_PERR_IDMA2IMSG_FIFO3)
+#define F_PERR_IDMA2IMSG_FIFO3 V_PERR_IDMA2IMSG_FIFO3(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO2 2
+#define V_PERR_IDMA2IMSG_FIFO2(x) ((x) << S_PERR_IDMA2IMSG_FIFO2)
+#define F_PERR_IDMA2IMSG_FIFO2 V_PERR_IDMA2IMSG_FIFO2(1U)
+
+#define S_PERR_HINT_DELAY_FIFO 0
+#define V_PERR_HINT_DELAY_FIFO(x) ((x) << S_PERR_HINT_DELAY_FIFO)
+#define F_PERR_HINT_DELAY_FIFO V_PERR_HINT_DELAY_FIFO(1U)
+
#define A_SGE_INT_ENABLE5 0x1110
#define A_SGE_PERR_ENABLE5 0x1114
#define A_SGE_DBFIFO_STATUS2 0x1118
@@ -2359,6 +2518,46 @@
#define V_TX_COALESCE_PRI(x) ((x) << S_TX_COALESCE_PRI)
#define F_TX_COALESCE_PRI V_TX_COALESCE_PRI(1U)
+#define S_HINT_SGE_SEL 31
+#define V_HINT_SGE_SEL(x) ((x) << S_HINT_SGE_SEL)
+#define F_HINT_SGE_SEL V_HINT_SGE_SEL(1U)
+
+#define S_HINT_SEL 30
+#define V_HINT_SEL(x) ((x) << S_HINT_SEL)
+#define F_HINT_SEL V_HINT_SEL(1U)
+
+#define S_HINT_DISABLE 29
+#define V_HINT_DISABLE(x) ((x) << S_HINT_DISABLE)
+#define F_HINT_DISABLE V_HINT_DISABLE(1U)
+
+#define S_RXCPLMODE_ISCSI 28
+#define V_RXCPLMODE_ISCSI(x) ((x) << S_RXCPLMODE_ISCSI)
+#define F_RXCPLMODE_ISCSI V_RXCPLMODE_ISCSI(1U)
+
+#define S_RXCPLMODE_NVMT 27
+#define V_RXCPLMODE_NVMT(x) ((x) << S_RXCPLMODE_NVMT)
+#define F_RXCPLMODE_NVMT V_RXCPLMODE_NVMT(1U)
+
+#define S_WRE_REPLAY_INORDER 26
+#define V_WRE_REPLAY_INORDER(x) ((x) << S_WRE_REPLAY_INORDER)
+#define F_WRE_REPLAY_INORDER V_WRE_REPLAY_INORDER(1U)
+
+#define S_ETH2XEN 25
+#define V_ETH2XEN(x) ((x) << S_ETH2XEN)
+#define F_ETH2XEN V_ETH2XEN(1U)
+
+#define S_ARMDBENDDIS 24
+#define V_ARMDBENDDIS(x) ((x) << S_ARMDBENDDIS)
+#define F_ARMDBENDDIS V_ARMDBENDDIS(1U)
+
+#define S_PACKPADT7 23
+#define V_PACKPADT7(x) ((x) << S_PACKPADT7)
+#define F_PACKPADT7 V_PACKPADT7(1U)
+
+#define S_WRE_UPFLCREDIT 22
+#define V_WRE_UPFLCREDIT(x) ((x) << S_WRE_UPFLCREDIT)
+#define F_WRE_UPFLCREDIT V_WRE_UPFLCREDIT(1U)
+
#define A_SGE_DEEP_SLEEP 0x1128
#define S_IDMA1_SLEEP_STATUS 11
@@ -2493,6 +2692,42 @@
#define V_FATAL_DEQ(x) ((x) << S_FATAL_DEQ)
#define F_FATAL_DEQ V_FATAL_DEQ(1U)
+#define S_FATAL_DEQ0_DRDY 29
+#define M_FATAL_DEQ0_DRDY 0x7U
+#define V_FATAL_DEQ0_DRDY(x) ((x) << S_FATAL_DEQ0_DRDY)
+#define G_FATAL_DEQ0_DRDY(x) (((x) >> S_FATAL_DEQ0_DRDY) & M_FATAL_DEQ0_DRDY)
+
+#define S_FATAL_OUT0_DRDY 26
+#define M_FATAL_OUT0_DRDY 0x7U
+#define V_FATAL_OUT0_DRDY(x) ((x) << S_FATAL_OUT0_DRDY)
+#define G_FATAL_OUT0_DRDY(x) (((x) >> S_FATAL_OUT0_DRDY) & M_FATAL_OUT0_DRDY)
+
+#define S_IMSG_DBG3_STUCK 25
+#define V_IMSG_DBG3_STUCK(x) ((x) << S_IMSG_DBG3_STUCK)
+#define F_IMSG_DBG3_STUCK V_IMSG_DBG3_STUCK(1U)
+
+#define S_IMSG_DBG2_STUCK 24
+#define V_IMSG_DBG2_STUCK(x) ((x) << S_IMSG_DBG2_STUCK)
+#define F_IMSG_DBG2_STUCK V_IMSG_DBG2_STUCK(1U)
+
+#define S_IMSG_DBG1_STUCK 23
+#define V_IMSG_DBG1_STUCK(x) ((x) << S_IMSG_DBG1_STUCK)
+#define F_IMSG_DBG1_STUCK V_IMSG_DBG1_STUCK(1U)
+
+#define S_IMSG_DBG0_STUCK 22
+#define V_IMSG_DBG0_STUCK(x) ((x) << S_IMSG_DBG0_STUCK)
+#define F_IMSG_DBG0_STUCK V_IMSG_DBG0_STUCK(1U)
+
+#define S_FATAL_DEQ1_DRDY 3
+#define M_FATAL_DEQ1_DRDY 0x3U
+#define V_FATAL_DEQ1_DRDY(x) ((x) << S_FATAL_DEQ1_DRDY)
+#define G_FATAL_DEQ1_DRDY(x) (((x) >> S_FATAL_DEQ1_DRDY) & M_FATAL_DEQ1_DRDY)
+
+#define S_FATAL_OUT1_DRDY 1
+#define M_FATAL_OUT1_DRDY 0x3U
+#define V_FATAL_OUT1_DRDY(x) ((x) << S_FATAL_OUT1_DRDY)
+#define G_FATAL_OUT1_DRDY(x) (((x) >> S_FATAL_OUT1_DRDY) & M_FATAL_OUT1_DRDY)
+
#define A_SGE_DOORBELL_THROTTLE_THRESHOLD 0x112c
#define S_THROTTLE_THRESHOLD_FL 16
@@ -2612,6 +2847,55 @@
#define V_DBPTBUFRSV0(x) ((x) << S_DBPTBUFRSV0)
#define G_DBPTBUFRSV0(x) (((x) >> S_DBPTBUFRSV0) & M_DBPTBUFRSV0)
+#define A_SGE_TBUF_CONTROL0 0x114c
+#define A_SGE_TBUF_CONTROL1 0x1150
+
+#define S_DBPTBUFRSV3 9
+#define M_DBPTBUFRSV3 0x1ffU
+#define V_DBPTBUFRSV3(x) ((x) << S_DBPTBUFRSV3)
+#define G_DBPTBUFRSV3(x) (((x) >> S_DBPTBUFRSV3) & M_DBPTBUFRSV3)
+
+#define S_DBPTBUFRSV2 0
+#define M_DBPTBUFRSV2 0x1ffU
+#define V_DBPTBUFRSV2(x) ((x) << S_DBPTBUFRSV2)
+#define G_DBPTBUFRSV2(x) (((x) >> S_DBPTBUFRSV2) & M_DBPTBUFRSV2)
+
+#define A_SGE_TBUF_CONTROL2 0x1154
+
+#define S_DBPTBUFRSV5 9
+#define M_DBPTBUFRSV5 0x1ffU
+#define V_DBPTBUFRSV5(x) ((x) << S_DBPTBUFRSV5)
+#define G_DBPTBUFRSV5(x) (((x) >> S_DBPTBUFRSV5) & M_DBPTBUFRSV5)
+
+#define S_DBPTBUFRSV4 0
+#define M_DBPTBUFRSV4 0x1ffU
+#define V_DBPTBUFRSV4(x) ((x) << S_DBPTBUFRSV4)
+#define G_DBPTBUFRSV4(x) (((x) >> S_DBPTBUFRSV4) & M_DBPTBUFRSV4)
+
+#define A_SGE_TBUF_CONTROL3 0x1158
+
+#define S_DBPTBUFRSV7 9
+#define M_DBPTBUFRSV7 0x1ffU
+#define V_DBPTBUFRSV7(x) ((x) << S_DBPTBUFRSV7)
+#define G_DBPTBUFRSV7(x) (((x) >> S_DBPTBUFRSV7) & M_DBPTBUFRSV7)
+
+#define S_DBPTBUFRSV6 0
+#define M_DBPTBUFRSV6 0x1ffU
+#define V_DBPTBUFRSV6(x) ((x) << S_DBPTBUFRSV6)
+#define G_DBPTBUFRSV6(x) (((x) >> S_DBPTBUFRSV6) & M_DBPTBUFRSV6)
+
+#define A_SGE_TBUF_CONTROL4 0x115c
+
+#define S_DBPTBUFRSV9 9
+#define M_DBPTBUFRSV9 0x1ffU
+#define V_DBPTBUFRSV9(x) ((x) << S_DBPTBUFRSV9)
+#define G_DBPTBUFRSV9(x) (((x) >> S_DBPTBUFRSV9) & M_DBPTBUFRSV9)
+
+#define S_DBPTBUFRSV8 0
+#define M_DBPTBUFRSV8 0x1ffU
+#define V_DBPTBUFRSV8(x) ((x) << S_DBPTBUFRSV8)
+#define G_DBPTBUFRSV8(x) (((x) >> S_DBPTBUFRSV8) & M_DBPTBUFRSV8)
+
#define A_SGE_PC0_REQ_BIST_CMD 0x1180
#define A_SGE_PC0_REQ_BIST_ERROR_CNT 0x1184
#define A_SGE_PC1_REQ_BIST_CMD 0x1190
@@ -2620,6 +2904,113 @@
#define A_SGE_PC0_RSP_BIST_ERROR_CNT 0x11a4
#define A_SGE_PC1_RSP_BIST_CMD 0x11b0
#define A_SGE_PC1_RSP_BIST_ERROR_CNT 0x11b4
+#define A_SGE_DBQ_TIMER_THRESH0 0x11b8
+
+#define S_TXTIMETH3 24
+#define M_TXTIMETH3 0x3fU
+#define V_TXTIMETH3(x) ((x) << S_TXTIMETH3)
+#define G_TXTIMETH3(x) (((x) >> S_TXTIMETH3) & M_TXTIMETH3)
+
+#define S_TXTIMETH2 16
+#define M_TXTIMETH2 0x3fU
+#define V_TXTIMETH2(x) ((x) << S_TXTIMETH2)
+#define G_TXTIMETH2(x) (((x) >> S_TXTIMETH2) & M_TXTIMETH2)
+
+#define S_TXTIMETH1 8
+#define M_TXTIMETH1 0x3fU
+#define V_TXTIMETH1(x) ((x) << S_TXTIMETH1)
+#define G_TXTIMETH1(x) (((x) >> S_TXTIMETH1) & M_TXTIMETH1)
+
+#define S_TXTIMETH0 0
+#define M_TXTIMETH0 0x3fU
+#define V_TXTIMETH0(x) ((x) << S_TXTIMETH0)
+#define G_TXTIMETH0(x) (((x) >> S_TXTIMETH0) & M_TXTIMETH0)
+
+#define A_SGE_DBQ_TIMER_THRESH1 0x11bc
+
+#define S_TXTIMETH7 24
+#define M_TXTIMETH7 0x3fU
+#define V_TXTIMETH7(x) ((x) << S_TXTIMETH7)
+#define G_TXTIMETH7(x) (((x) >> S_TXTIMETH7) & M_TXTIMETH7)
+
+#define S_TXTIMETH6 16
+#define M_TXTIMETH6 0x3fU
+#define V_TXTIMETH6(x) ((x) << S_TXTIMETH6)
+#define G_TXTIMETH6(x) (((x) >> S_TXTIMETH6) & M_TXTIMETH6)
+
+#define S_TXTIMETH5 8
+#define M_TXTIMETH5 0x3fU
+#define V_TXTIMETH5(x) ((x) << S_TXTIMETH5)
+#define G_TXTIMETH5(x) (((x) >> S_TXTIMETH5) & M_TXTIMETH5)
+
+#define S_TXTIMETH4 0
+#define M_TXTIMETH4 0x3fU
+#define V_TXTIMETH4(x) ((x) << S_TXTIMETH4)
+#define G_TXTIMETH4(x) (((x) >> S_TXTIMETH4) & M_TXTIMETH4)
+
+#define A_SGE_DBQ_TIMER_CONFIG 0x11c0
+
+#define S_DBQ_TIMER_OP 0
+#define M_DBQ_TIMER_OP 0xffU
+#define V_DBQ_TIMER_OP(x) ((x) << S_DBQ_TIMER_OP)
+#define G_DBQ_TIMER_OP(x) (((x) >> S_DBQ_TIMER_OP) & M_DBQ_TIMER_OP)
+
+#define A_SGE_DBQ_TIMER_DBG 0x11c4
+
+#define S_DBQ_TIMER_CMD 31
+#define V_DBQ_TIMER_CMD(x) ((x) << S_DBQ_TIMER_CMD)
+#define F_DBQ_TIMER_CMD V_DBQ_TIMER_CMD(1U)
+
+#define S_DBQ_TIMER_INDEX 24
+#define M_DBQ_TIMER_INDEX 0x3fU
+#define V_DBQ_TIMER_INDEX(x) ((x) << S_DBQ_TIMER_INDEX)
+#define G_DBQ_TIMER_INDEX(x) (((x) >> S_DBQ_TIMER_INDEX) & M_DBQ_TIMER_INDEX)
+
+#define S_DBQ_TIMER_QCNT 0
+#define M_DBQ_TIMER_QCNT 0x1ffffU
+#define V_DBQ_TIMER_QCNT(x) ((x) << S_DBQ_TIMER_QCNT)
+#define G_DBQ_TIMER_QCNT(x) (((x) >> S_DBQ_TIMER_QCNT) & M_DBQ_TIMER_QCNT)
+
+#define A_SGE_INT_CAUSE8 0x11c8
+
+#define S_TRACE_RXPERR 8
+#define V_TRACE_RXPERR(x) ((x) << S_TRACE_RXPERR)
+#define F_TRACE_RXPERR V_TRACE_RXPERR(1U)
+
+#define S_U3_RXPERR 7
+#define V_U3_RXPERR(x) ((x) << S_U3_RXPERR)
+#define F_U3_RXPERR V_U3_RXPERR(1U)
+
+#define S_U2_RXPERR 6
+#define V_U2_RXPERR(x) ((x) << S_U2_RXPERR)
+#define F_U2_RXPERR V_U2_RXPERR(1U)
+
+#define S_U1_RXPERR 5
+#define V_U1_RXPERR(x) ((x) << S_U1_RXPERR)
+#define F_U1_RXPERR V_U1_RXPERR(1U)
+
+#define S_U0_RXPERR 4
+#define V_U0_RXPERR(x) ((x) << S_U0_RXPERR)
+#define F_U0_RXPERR V_U0_RXPERR(1U)
+
+#define S_T3_RXPERR 3
+#define V_T3_RXPERR(x) ((x) << S_T3_RXPERR)
+#define F_T3_RXPERR V_T3_RXPERR(1U)
+
+#define S_T2_RXPERR 2
+#define V_T2_RXPERR(x) ((x) << S_T2_RXPERR)
+#define F_T2_RXPERR V_T2_RXPERR(1U)
+
+#define S_T1_RXPERR 1
+#define V_T1_RXPERR(x) ((x) << S_T1_RXPERR)
+#define F_T1_RXPERR V_T1_RXPERR(1U)
+
+#define S_T0_RXPERR 0
+#define V_T0_RXPERR(x) ((x) << S_T0_RXPERR)
+#define F_T0_RXPERR V_T0_RXPERR(1U)
+
+#define A_SGE_INT_ENABLE8 0x11cc
+#define A_SGE_PERR_ENABLE8 0x11d0
#define A_SGE_CTXT_CMD 0x11fc
#define S_BUSY 31
@@ -2648,6 +3039,17 @@
#define A_SGE_CTXT_DATA4 0x1210
#define A_SGE_CTXT_DATA5 0x1214
#define A_SGE_CTXT_DATA6 0x1218
+
+#define S_DATA_UNUSED 7
+#define M_DATA_UNUSED 0x1ffffffU
+#define V_DATA_UNUSED(x) ((x) << S_DATA_UNUSED)
+#define G_DATA_UNUSED(x) (((x) >> S_DATA_UNUSED) & M_DATA_UNUSED)
+
+#define S_DATA6 0
+#define M_DATA6 0x7fU
+#define V_DATA6(x) ((x) << S_DATA6)
+#define G_DATA6(x) (((x) >> S_DATA6) & M_DATA6)
+
#define A_SGE_CTXT_DATA7 0x121c
#define A_SGE_CTXT_MASK0 0x1220
#define A_SGE_CTXT_MASK1 0x1224
@@ -2656,6 +3058,17 @@
#define A_SGE_CTXT_MASK4 0x1230
#define A_SGE_CTXT_MASK5 0x1234
#define A_SGE_CTXT_MASK6 0x1238
+
+#define S_MASK_UNUSED 7
+#define M_MASK_UNUSED 0x1ffffffU
+#define V_MASK_UNUSED(x) ((x) << S_MASK_UNUSED)
+#define G_MASK_UNUSED(x) (((x) >> S_MASK_UNUSED) & M_MASK_UNUSED)
+
+#define S_MASK 0
+#define M_MASK 0x7fU
+#define V_MASK(x) ((x) << S_MASK)
+#define G_MASK(x) (((x) >> S_MASK) & M_MASK)
+
#define A_SGE_CTXT_MASK7 0x123c
#define A_SGE_QBASE_MAP0 0x1240
@@ -2674,6 +3087,10 @@
#define V_INGRESS0_SIZE(x) ((x) << S_INGRESS0_SIZE)
#define G_INGRESS0_SIZE(x) (((x) >> S_INGRESS0_SIZE) & M_INGRESS0_SIZE)
+#define S_DESTINATION 31
+#define V_DESTINATION(x) ((x) << S_DESTINATION)
+#define F_DESTINATION V_DESTINATION(1U)
+
#define A_SGE_QBASE_MAP1 0x1244
#define S_EGRESS0_BASE 0
@@ -2719,6 +3136,10 @@
#define V_FLMTHRESH(x) ((x) << S_FLMTHRESH)
#define G_FLMTHRESH(x) (((x) >> S_FLMTHRESH) & M_FLMTHRESH)
+#define S_CONENMIDDLE 7
+#define V_CONENMIDDLE(x) ((x) << S_CONENMIDDLE)
+#define F_CONENMIDDLE V_CONENMIDDLE(1U)
+
#define A_SGE_DEBUG_CONM 0x1258
#define S_MPS_CH_CNG 16
@@ -2745,6 +3166,16 @@
#define V_LAST_QID(x) ((x) << S_LAST_QID)
#define G_LAST_QID(x) (((x) >> S_LAST_QID) & M_LAST_QID)
+#define S_CH_CNG 16
+#define M_CH_CNG 0xffffU
+#define V_CH_CNG(x) ((x) << S_CH_CNG)
+#define G_CH_CNG(x) (((x) >> S_CH_CNG) & M_CH_CNG)
+
+#define S_CH_SEL 14
+#define M_CH_SEL 0x3U
+#define V_CH_SEL(x) ((x) << S_CH_SEL)
+#define G_CH_SEL(x) (((x) >> S_CH_SEL) & M_CH_SEL)
+
#define A_SGE_DBG_QUEUE_STAT0_CTRL 0x125c
#define S_IMSG_GTS_SEL 18
@@ -2766,6 +3197,7 @@
#define A_SGE_DBG_BAR2_PKT_CNT 0x126c
#define A_SGE_DBG_DB_PKT_CNT 0x1270
#define A_SGE_DBG_GTS_PKT_CNT 0x1274
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_16 0x1278
#define A_SGE_DEBUG_DATA_HIGH_INDEX_0 0x1280
#define S_CIM_WM 24
@@ -3965,6 +4397,352 @@
#define V_VFWCOFFSET(x) ((x) << S_VFWCOFFSET)
#define G_VFWCOFFSET(x) (((x) >> S_VFWCOFFSET) & M_VFWCOFFSET)
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_17 0x1340
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_18 0x1344
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_19 0x1348
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_20 0x134c
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_21 0x1350
+#define A_SGE_DEBUG_DATA_LOW_INDEX_16 0x1354
+#define A_SGE_DEBUG_DATA_LOW_INDEX_17 0x1358
+#define A_SGE_DEBUG_DATA_LOW_INDEX_18 0x135c
+#define A_SGE_INT_CAUSE7 0x1360
+
+#define S_HINT_FIFO_FULL 25
+#define V_HINT_FIFO_FULL(x) ((x) << S_HINT_FIFO_FULL)
+#define F_HINT_FIFO_FULL V_HINT_FIFO_FULL(1U)
+
+#define S_CERR_HINT_DELAY_FIFO 24
+#define V_CERR_HINT_DELAY_FIFO(x) ((x) << S_CERR_HINT_DELAY_FIFO)
+#define F_CERR_HINT_DELAY_FIFO V_CERR_HINT_DELAY_FIFO(1U)
+
+#define S_COAL_TIMER_FIFO_PERR 23
+#define V_COAL_TIMER_FIFO_PERR(x) ((x) << S_COAL_TIMER_FIFO_PERR)
+#define F_COAL_TIMER_FIFO_PERR V_COAL_TIMER_FIFO_PERR(1U)
+
+#define S_CMP_FIFO_PERR 22
+#define V_CMP_FIFO_PERR(x) ((x) << S_CMP_FIFO_PERR)
+#define F_CMP_FIFO_PERR V_CMP_FIFO_PERR(1U)
+
+#define S_SGE_IPP_FIFO_CERR 21
+#define V_SGE_IPP_FIFO_CERR(x) ((x) << S_SGE_IPP_FIFO_CERR)
+#define F_SGE_IPP_FIFO_CERR V_SGE_IPP_FIFO_CERR(1U)
+
+#define S_CERR_ING_CTXT_CACHE 20
+#define V_CERR_ING_CTXT_CACHE(x) ((x) << S_CERR_ING_CTXT_CACHE)
+#define F_CERR_ING_CTXT_CACHE V_CERR_ING_CTXT_CACHE(1U)
+
+#define S_IMSG_CNTX_PERR 19
+#define V_IMSG_CNTX_PERR(x) ((x) << S_IMSG_CNTX_PERR)
+#define F_IMSG_CNTX_PERR V_IMSG_CNTX_PERR(1U)
+
+#define S_PD_FIFO_PERR 18
+#define V_PD_FIFO_PERR(x) ((x) << S_PD_FIFO_PERR)
+#define F_PD_FIFO_PERR V_PD_FIFO_PERR(1U)
+
+#define S_IMSG_512_FIFO_PERR 17
+#define V_IMSG_512_FIFO_PERR(x) ((x) << S_IMSG_512_FIFO_PERR)
+#define F_IMSG_512_FIFO_PERR V_IMSG_512_FIFO_PERR(1U)
+
+#define S_CPLSW_FIFO_PERR 16
+#define V_CPLSW_FIFO_PERR(x) ((x) << S_CPLSW_FIFO_PERR)
+#define F_CPLSW_FIFO_PERR V_CPLSW_FIFO_PERR(1U)
+
+#define S_IMSG_FIFO_PERR 15
+#define V_IMSG_FIFO_PERR(x) ((x) << S_IMSG_FIFO_PERR)
+#define F_IMSG_FIFO_PERR V_IMSG_FIFO_PERR(1U)
+
+#define S_CERR_ITP_EVR 14
+#define V_CERR_ITP_EVR(x) ((x) << S_CERR_ITP_EVR)
+#define F_CERR_ITP_EVR V_CERR_ITP_EVR(1U)
+
+#define S_CERR_CONM_SRAM 13
+#define V_CERR_CONM_SRAM(x) ((x) << S_CERR_CONM_SRAM)
+#define F_CERR_CONM_SRAM V_CERR_CONM_SRAM(1U)
+
+#define S_CERR_EGR_CTXT_CACHE 12
+#define V_CERR_EGR_CTXT_CACHE(x) ((x) << S_CERR_EGR_CTXT_CACHE)
+#define F_CERR_EGR_CTXT_CACHE V_CERR_EGR_CTXT_CACHE(1U)
+
+#define S_CERR_FLM_CNTXMEM 11
+#define V_CERR_FLM_CNTXMEM(x) ((x) << S_CERR_FLM_CNTXMEM)
+#define F_CERR_FLM_CNTXMEM V_CERR_FLM_CNTXMEM(1U)
+
+#define S_CERR_FUNC_QBASE 10
+#define V_CERR_FUNC_QBASE(x) ((x) << S_CERR_FUNC_QBASE)
+#define F_CERR_FUNC_QBASE V_CERR_FUNC_QBASE(1U)
+
+#define S_IMSG_CNTX_CERR 9
+#define V_IMSG_CNTX_CERR(x) ((x) << S_IMSG_CNTX_CERR)
+#define F_IMSG_CNTX_CERR V_IMSG_CNTX_CERR(1U)
+
+#define S_PD_FIFO_CERR 8
+#define V_PD_FIFO_CERR(x) ((x) << S_PD_FIFO_CERR)
+#define F_PD_FIFO_CERR V_PD_FIFO_CERR(1U)
+
+#define S_IMSG_512_FIFO_CERR 7
+#define V_IMSG_512_FIFO_CERR(x) ((x) << S_IMSG_512_FIFO_CERR)
+#define F_IMSG_512_FIFO_CERR V_IMSG_512_FIFO_CERR(1U)
+
+#define S_CPLSW_FIFO_CERR 6
+#define V_CPLSW_FIFO_CERR(x) ((x) << S_CPLSW_FIFO_CERR)
+#define F_CPLSW_FIFO_CERR V_CPLSW_FIFO_CERR(1U)
+
+#define S_IMSG_FIFO_CERR 5
+#define V_IMSG_FIFO_CERR(x) ((x) << S_IMSG_FIFO_CERR)
+#define F_IMSG_FIFO_CERR V_IMSG_FIFO_CERR(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO3 4
+#define V_CERR_HEADERSPLIT_FIFO3(x) ((x) << S_CERR_HEADERSPLIT_FIFO3)
+#define F_CERR_HEADERSPLIT_FIFO3 V_CERR_HEADERSPLIT_FIFO3(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO2 3
+#define V_CERR_HEADERSPLIT_FIFO2(x) ((x) << S_CERR_HEADERSPLIT_FIFO2)
+#define F_CERR_HEADERSPLIT_FIFO2 V_CERR_HEADERSPLIT_FIFO2(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO1 2
+#define V_CERR_HEADERSPLIT_FIFO1(x) ((x) << S_CERR_HEADERSPLIT_FIFO1)
+#define F_CERR_HEADERSPLIT_FIFO1 V_CERR_HEADERSPLIT_FIFO1(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO0 1
+#define V_CERR_HEADERSPLIT_FIFO0(x) ((x) << S_CERR_HEADERSPLIT_FIFO0)
+#define F_CERR_HEADERSPLIT_FIFO0 V_CERR_HEADERSPLIT_FIFO0(1U)
+
+#define S_CERR_FLM_L1CACHE 0
+#define V_CERR_FLM_L1CACHE(x) ((x) << S_CERR_FLM_L1CACHE)
+#define F_CERR_FLM_L1CACHE V_CERR_FLM_L1CACHE(1U)
+
+#define A_SGE_INT_ENABLE7 0x1364
+#define A_SGE_PERR_ENABLE7 0x1368
+#define A_SGE_ING_COMP_COAL_CFG 0x1700
+
+#define S_USE_PTP_TIMER 27
+#define V_USE_PTP_TIMER(x) ((x) << S_USE_PTP_TIMER)
+#define F_USE_PTP_TIMER V_USE_PTP_TIMER(1U)
+
+#define S_IMSG_SET_OFLOW_ALL_ENTRIES_43060 26
+#define V_IMSG_SET_OFLOW_ALL_ENTRIES_43060(x) ((x) << S_IMSG_SET_OFLOW_ALL_ENTRIES_43060)
+#define F_IMSG_SET_OFLOW_ALL_ENTRIES_43060 V_IMSG_SET_OFLOW_ALL_ENTRIES_43060(1U)
+
+#define S_IMSG_STUCK_INDIRECT_QUEUE_42907 25
+#define V_IMSG_STUCK_INDIRECT_QUEUE_42907(x) ((x) << S_IMSG_STUCK_INDIRECT_QUEUE_42907)
+#define F_IMSG_STUCK_INDIRECT_QUEUE_42907 V_IMSG_STUCK_INDIRECT_QUEUE_42907(1U)
+
+#define S_COMP_COAL_PIDX_INCR 24
+#define V_COMP_COAL_PIDX_INCR(x) ((x) << S_COMP_COAL_PIDX_INCR)
+#define F_COMP_COAL_PIDX_INCR V_COMP_COAL_PIDX_INCR(1U)
+
+#define S_COMP_COAL_TIMER_CNT 16
+#define M_COMP_COAL_TIMER_CNT 0xffU
+#define V_COMP_COAL_TIMER_CNT(x) ((x) << S_COMP_COAL_TIMER_CNT)
+#define G_COMP_COAL_TIMER_CNT(x) (((x) >> S_COMP_COAL_TIMER_CNT) & M_COMP_COAL_TIMER_CNT)
+
+#define S_COMP_COAL_CNTR_TH 8
+#define M_COMP_COAL_CNTR_TH 0xffU
+#define V_COMP_COAL_CNTR_TH(x) ((x) << S_COMP_COAL_CNTR_TH)
+#define G_COMP_COAL_CNTR_TH(x) (((x) >> S_COMP_COAL_CNTR_TH) & M_COMP_COAL_CNTR_TH)
+
+#define S_COMP_COAL_OPCODE 0
+#define M_COMP_COAL_OPCODE 0xffU
+#define V_COMP_COAL_OPCODE(x) ((x) << S_COMP_COAL_OPCODE)
+#define G_COMP_COAL_OPCODE(x) (((x) >> S_COMP_COAL_OPCODE) & M_COMP_COAL_OPCODE)
+
+#define A_SGE_ING_IMSG_DBG 0x1704
+
+#define S_STUCK_CTR_TH 1
+#define M_STUCK_CTR_TH 0xffU
+#define V_STUCK_CTR_TH(x) ((x) << S_STUCK_CTR_TH)
+#define G_STUCK_CTR_TH(x) (((x) >> S_STUCK_CTR_TH) & M_STUCK_CTR_TH)
+
+#define S_STUCK_INT_EN 0
+#define V_STUCK_INT_EN(x) ((x) << S_STUCK_INT_EN)
+#define F_STUCK_INT_EN V_STUCK_INT_EN(1U)
+
+#define A_SGE_ING_IMSG_RSP0_DBG 0x1708
+
+#define S_IDMA1_QID 16
+#define M_IDMA1_QID 0xffffU
+#define V_IDMA1_QID(x) ((x) << S_IDMA1_QID)
+#define G_IDMA1_QID(x) (((x) >> S_IDMA1_QID) & M_IDMA1_QID)
+
+#define S_IDMA0_QID 0
+#define M_IDMA0_QID 0xffffU
+#define V_IDMA0_QID(x) ((x) << S_IDMA0_QID)
+#define G_IDMA0_QID(x) (((x) >> S_IDMA0_QID) & M_IDMA0_QID)
+
+#define A_SGE_ING_IMSG_RSP1_DBG 0x170c
+
+#define S_IDMA3_QID 16
+#define M_IDMA3_QID 0xffffU
+#define V_IDMA3_QID(x) ((x) << S_IDMA3_QID)
+#define G_IDMA3_QID(x) (((x) >> S_IDMA3_QID) & M_IDMA3_QID)
+
+#define S_IDMA2_QID 0
+#define M_IDMA2_QID 0xffffU
+#define V_IDMA2_QID(x) ((x) << S_IDMA2_QID)
+#define G_IDMA2_QID(x) (((x) >> S_IDMA2_QID) & M_IDMA2_QID)
+
+#define A_SGE_LB_MODE 0x1710
+
+#define S_LB_MODE 0
+#define M_LB_MODE 0x3U
+#define V_LB_MODE(x) ((x) << S_LB_MODE)
+#define G_LB_MODE(x) (((x) >> S_LB_MODE) & M_LB_MODE)
+
+#define A_SGE_IMSG_QUESCENT 0x1714
+
+#define S_IMSG_QUESCENT 0
+#define V_IMSG_QUESCENT(x) ((x) << S_IMSG_QUESCENT)
+#define F_IMSG_QUESCENT V_IMSG_QUESCENT(1U)
+
+#define A_SGE_LA_CTRL 0x1718
+
+#define S_LA_GLOBAL_EN 8
+#define V_LA_GLOBAL_EN(x) ((x) << S_LA_GLOBAL_EN)
+#define F_LA_GLOBAL_EN V_LA_GLOBAL_EN(1U)
+
+#define S_PTP_TIMESTAMP_SEL 7
+#define V_PTP_TIMESTAMP_SEL(x) ((x) << S_PTP_TIMESTAMP_SEL)
+#define F_PTP_TIMESTAMP_SEL V_PTP_TIMESTAMP_SEL(1U)
+
+#define S_CIM2SGE_ID_CHK_VLD 6
+#define V_CIM2SGE_ID_CHK_VLD(x) ((x) << S_CIM2SGE_ID_CHK_VLD)
+#define F_CIM2SGE_ID_CHK_VLD V_CIM2SGE_ID_CHK_VLD(1U)
+
+#define S_CPLSW_ID_CHK_VLD 5
+#define V_CPLSW_ID_CHK_VLD(x) ((x) << S_CPLSW_ID_CHK_VLD)
+#define F_CPLSW_ID_CHK_VLD V_CPLSW_ID_CHK_VLD(1U)
+
+#define S_FLM_ID_CHK_VLD 4
+#define V_FLM_ID_CHK_VLD(x) ((x) << S_FLM_ID_CHK_VLD)
+#define F_FLM_ID_CHK_VLD V_FLM_ID_CHK_VLD(1U)
+
+#define S_IQ_DBP_ID_CHK_VLD 3
+#define V_IQ_DBP_ID_CHK_VLD(x) ((x) << S_IQ_DBP_ID_CHK_VLD)
+#define F_IQ_DBP_ID_CHK_VLD V_IQ_DBP_ID_CHK_VLD(1U)
+
+#define S_UP_OBQ_ID_CHK_VLD 2
+#define V_UP_OBQ_ID_CHK_VLD(x) ((x) << S_UP_OBQ_ID_CHK_VLD)
+#define F_UP_OBQ_ID_CHK_VLD V_UP_OBQ_ID_CHK_VLD(1U)
+
+#define S_CIM_ID_CHK_VLD 1
+#define V_CIM_ID_CHK_VLD(x) ((x) << S_CIM_ID_CHK_VLD)
+#define F_CIM_ID_CHK_VLD V_CIM_ID_CHK_VLD(1U)
+
+#define S_DBP_ID_CHK_VLD 0
+#define V_DBP_ID_CHK_VLD(x) ((x) << S_DBP_ID_CHK_VLD)
+#define F_DBP_ID_CHK_VLD V_DBP_ID_CHK_VLD(1U)
+
+#define A_SGE_LA_CTRL_EQID_LOW 0x171c
+
+#define S_EQ_ID_CHK_LOW 0
+#define M_EQ_ID_CHK_LOW 0x1ffffU
+#define V_EQ_ID_CHK_LOW(x) ((x) << S_EQ_ID_CHK_LOW)
+#define G_EQ_ID_CHK_LOW(x) (((x) >> S_EQ_ID_CHK_LOW) & M_EQ_ID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_EQID_HIGH 0x1720
+
+#define S_EQ_ID_CHK_HIGH 0
+#define M_EQ_ID_CHK_HIGH 0x1ffffU
+#define V_EQ_ID_CHK_HIGH(x) ((x) << S_EQ_ID_CHK_HIGH)
+#define G_EQ_ID_CHK_HIGH(x) (((x) >> S_EQ_ID_CHK_HIGH) & M_EQ_ID_CHK_HIGH)
+
+#define A_SGE_LA_CTRL_IQID 0x1724
+
+#define S_IQ_ID_CHK_HIGH 16
+#define M_IQ_ID_CHK_HIGH 0xffffU
+#define V_IQ_ID_CHK_HIGH(x) ((x) << S_IQ_ID_CHK_HIGH)
+#define G_IQ_ID_CHK_HIGH(x) (((x) >> S_IQ_ID_CHK_HIGH) & M_IQ_ID_CHK_HIGH)
+
+#define S_IQ_ID_CHK_LOW 0
+#define M_IQ_ID_CHK_LOW 0xffffU
+#define V_IQ_ID_CHK_LOW(x) ((x) << S_IQ_ID_CHK_LOW)
+#define G_IQ_ID_CHK_LOW(x) (((x) >> S_IQ_ID_CHK_LOW) & M_IQ_ID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_TID_LOW 0x1728
+
+#define S_TID_CHK_LOW 0
+#define M_TID_CHK_LOW 0xffffffU
+#define V_TID_CHK_LOW(x) ((x) << S_TID_CHK_LOW)
+#define G_TID_CHK_LOW(x) (((x) >> S_TID_CHK_LOW) & M_TID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_TID_HIGH 0x172c
+
+#define S_TID_CHK_HIGH 0
+#define M_TID_CHK_HIGH 0xffffffU
+#define V_TID_CHK_HIGH(x) ((x) << S_TID_CHK_HIGH)
+#define G_TID_CHK_HIGH(x) (((x) >> S_TID_CHK_HIGH) & M_TID_CHK_HIGH)
+
+#define A_SGE_CFG_TP_ERR 0x173c
+
+#define S_TP_ERR_STATUS_CH3 30
+#define M_TP_ERR_STATUS_CH3 0x3U
+#define V_TP_ERR_STATUS_CH3(x) ((x) << S_TP_ERR_STATUS_CH3)
+#define G_TP_ERR_STATUS_CH3(x) (((x) >> S_TP_ERR_STATUS_CH3) & M_TP_ERR_STATUS_CH3)
+
+#define S_TP_ERR_STATUS_CH2 28
+#define M_TP_ERR_STATUS_CH2 0x3U
+#define V_TP_ERR_STATUS_CH2(x) ((x) << S_TP_ERR_STATUS_CH2)
+#define G_TP_ERR_STATUS_CH2(x) (((x) >> S_TP_ERR_STATUS_CH2) & M_TP_ERR_STATUS_CH2)
+
+#define S_TP_ERR_STATUS_CH1 26
+#define M_TP_ERR_STATUS_CH1 0x3U
+#define V_TP_ERR_STATUS_CH1(x) ((x) << S_TP_ERR_STATUS_CH1)
+#define G_TP_ERR_STATUS_CH1(x) (((x) >> S_TP_ERR_STATUS_CH1) & M_TP_ERR_STATUS_CH1)
+
+#define S_TP_ERR_STATUS_CH0 24
+#define M_TP_ERR_STATUS_CH0 0x3U
+#define V_TP_ERR_STATUS_CH0(x) ((x) << S_TP_ERR_STATUS_CH0)
+#define G_TP_ERR_STATUS_CH0(x) (((x) >> S_TP_ERR_STATUS_CH0) & M_TP_ERR_STATUS_CH0)
+
+#define S_CPL0_SIZE 16
+#define M_CPL0_SIZE 0xffU
+#define V_CPL0_SIZE(x) ((x) << S_CPL0_SIZE)
+#define G_CPL0_SIZE(x) (((x) >> S_CPL0_SIZE) & M_CPL0_SIZE)
+
+#define S_CPL1_SIZE 8
+#define M_CPL1_SIZE 0xffU
+#define V_CPL1_SIZE(x) ((x) << S_CPL1_SIZE)
+#define G_CPL1_SIZE(x) (((x) >> S_CPL1_SIZE) & M_CPL1_SIZE)
+
+#define S_SIZE_LATCH_CLR 3
+#define V_SIZE_LATCH_CLR(x) ((x) << S_SIZE_LATCH_CLR)
+#define F_SIZE_LATCH_CLR V_SIZE_LATCH_CLR(1U)
+
+#define S_EXT_LATCH_CLR 2
+#define V_EXT_LATCH_CLR(x) ((x) << S_EXT_LATCH_CLR)
+#define F_EXT_LATCH_CLR V_EXT_LATCH_CLR(1U)
+
+#define S_EXT_CHANGE_42875 1
+#define V_EXT_CHANGE_42875(x) ((x) << S_EXT_CHANGE_42875)
+#define F_EXT_CHANGE_42875 V_EXT_CHANGE_42875(1U)
+
+#define S_SIZE_CHANGE_42913 0
+#define V_SIZE_CHANGE_42913(x) ((x) << S_SIZE_CHANGE_42913)
+#define F_SIZE_CHANGE_42913 V_SIZE_CHANGE_42913(1U)
+
+#define A_SGE_CHNL0_CTX_ERROR_COUNT_PER_TID 0x1740
+#define A_SGE_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1744
+#define A_SGE_CHNL2_CTX_ERROR_COUNT_PER_TID 0x1748
+#define A_SGE_CHNL3_CTX_ERROR_COUNT_PER_TID 0x174c
+#define A_SGE_CTX_ACC_CH0 0x1750
+
+#define S_RDMA_INV_HANDLING 24
+#define M_RDMA_INV_HANDLING 0x3U
+#define V_RDMA_INV_HANDLING(x) ((x) << S_RDMA_INV_HANDLING)
+#define G_RDMA_INV_HANDLING(x) (((x) >> S_RDMA_INV_HANDLING) & M_RDMA_INV_HANDLING)
+
+#define S_T7_TERMINATE_STATUS_EN 23
+#define V_T7_TERMINATE_STATUS_EN(x) ((x) << S_T7_TERMINATE_STATUS_EN)
+#define F_T7_TERMINATE_STATUS_EN V_T7_TERMINATE_STATUS_EN(1U)
+
+#define S_T7_DISABLE 22
+#define V_T7_DISABLE(x) ((x) << S_T7_DISABLE)
+#define F_T7_DISABLE V_T7_DISABLE(1U)
+
+#define A_SGE_CTX_ACC_CH1 0x1754
+#define A_SGE_CTX_ACC_CH2 0x1758
+#define A_SGE_CTX_ACC_CH3 0x175c
+#define A_SGE_CTX_BASE 0x1760
#define A_SGE_LA_RDPTR_0 0x1800
#define A_SGE_LA_RDDATA_0 0x1804
#define A_SGE_LA_WRPTR_0 0x1808
@@ -4296,6 +5074,11 @@
#define A_PCIE_INT_CAUSE 0x3004
#define A_PCIE_PERR_ENABLE 0x3008
+
+#define S_TGTTAGQCLIENT1PERR 29
+#define V_TGTTAGQCLIENT1PERR(x) ((x) << S_TGTTAGQCLIENT1PERR)
+#define F_TGTTAGQCLIENT1PERR V_TGTTAGQCLIENT1PERR(1U)
+
#define A_PCIE_PERR_INJECT 0x300c
#define S_IDE 0
@@ -4582,10 +5365,6 @@
#define V_LINKREQRSTPCIECRSTMODE(x) ((x) << S_LINKREQRSTPCIECRSTMODE)
#define F_LINKREQRSTPCIECRSTMODE V_LINKREQRSTPCIECRSTMODE(1U)
-#define S_T6_PIOSTOPEN 31
-#define V_T6_PIOSTOPEN(x) ((x) << S_T6_PIOSTOPEN)
-#define F_T6_PIOSTOPEN V_T6_PIOSTOPEN(1U)
-
#define A_PCIE_DMA_CTRL 0x3018
#define S_LITTLEENDIAN 7
@@ -4618,6 +5397,14 @@
#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
#define G_T6_TOTMAXTAG(x) (((x) >> S_T6_TOTMAXTAG) & M_T6_TOTMAXTAG)
+#define S_REG_VDM_ONLY 17
+#define V_REG_VDM_ONLY(x) ((x) << S_REG_VDM_ONLY)
+#define F_REG_VDM_ONLY V_REG_VDM_ONLY(1U)
+
+#define S_MULT_REQID_SUP 16
+#define V_MULT_REQID_SUP(x) ((x) << S_MULT_REQID_SUP)
+#define F_MULT_REQID_SUP V_MULT_REQID_SUP(1U)
+
#define A_PCIE_DMA_CFG 0x301c
#define S_MAXPYLDSIZE 28
@@ -4668,6 +5455,10 @@
#define V_DMADCASTFIRSTONLY(x) ((x) << S_DMADCASTFIRSTONLY)
#define F_DMADCASTFIRSTONLY V_DMADCASTFIRSTONLY(1U)
+#define S_ARMDCASTFIRSTONLY 7
+#define V_ARMDCASTFIRSTONLY(x) ((x) << S_ARMDCASTFIRSTONLY)
+#define F_ARMDCASTFIRSTONLY V_ARMDCASTFIRSTONLY(1U)
+
#define A_PCIE_DMA_STAT 0x3020
#define S_STATEREQ 28
@@ -4748,7 +5539,157 @@
#define G_PERSTTIMER(x) (((x) >> S_PERSTTIMER) & M_PERSTTIMER)
#define A_PCIE_CFG7 0x302c
+#define A_PCIE_INT_ENABLE_EXT 0x3030
+
+#define S_TCAMRSPERR 31
+#define V_TCAMRSPERR(x) ((x) << S_TCAMRSPERR)
+#define F_TCAMRSPERR V_TCAMRSPERR(1U)
+
+#define S_IPFORMQPERR 30
+#define V_IPFORMQPERR(x) ((x) << S_IPFORMQPERR)
+#define F_IPFORMQPERR V_IPFORMQPERR(1U)
+
+#define S_IPFORMQCERR 29
+#define V_IPFORMQCERR(x) ((x) << S_IPFORMQCERR)
+#define F_IPFORMQCERR V_IPFORMQCERR(1U)
+
+#define S_TRGT1GRPCERR 28
+#define V_TRGT1GRPCERR(x) ((x) << S_TRGT1GRPCERR)
+#define F_TRGT1GRPCERR V_TRGT1GRPCERR(1U)
+
+#define S_IPSOTCERR 27
+#define V_IPSOTCERR(x) ((x) << S_IPSOTCERR)
+#define F_IPSOTCERR V_IPSOTCERR(1U)
+
+#define S_IPRETRYCERR 26
+#define V_IPRETRYCERR(x) ((x) << S_IPRETRYCERR)
+#define F_IPRETRYCERR V_IPRETRYCERR(1U)
+
+#define S_IPRXDATAGRPCERR 25
+#define V_IPRXDATAGRPCERR(x) ((x) << S_IPRXDATAGRPCERR)
+#define F_IPRXDATAGRPCERR V_IPRXDATAGRPCERR(1U)
+
+#define S_IPRXHDRGRPCERR 24
+#define V_IPRXHDRGRPCERR(x) ((x) << S_IPRXHDRGRPCERR)
+#define F_IPRXHDRGRPCERR V_IPRXHDRGRPCERR(1U)
+
+#define S_A0ARBRSPORDFIFOPERR 19
+#define V_A0ARBRSPORDFIFOPERR(x) ((x) << S_A0ARBRSPORDFIFOPERR)
+#define F_A0ARBRSPORDFIFOPERR V_A0ARBRSPORDFIFOPERR(1U)
+
+#define S_HRSPCERR 18
+#define V_HRSPCERR(x) ((x) << S_HRSPCERR)
+#define F_HRSPCERR V_HRSPCERR(1U)
+
+#define S_HREQRDCERR 17
+#define V_HREQRDCERR(x) ((x) << S_HREQRDCERR)
+#define F_HREQRDCERR V_HREQRDCERR(1U)
+
+#define S_HREQWRCERR 16
+#define V_HREQWRCERR(x) ((x) << S_HREQWRCERR)
+#define F_HREQWRCERR V_HREQWRCERR(1U)
+
+#define S_DRSPCERR 15
+#define V_DRSPCERR(x) ((x) << S_DRSPCERR)
+#define F_DRSPCERR V_DRSPCERR(1U)
+
+#define S_DREQRDCERR 14
+#define V_DREQRDCERR(x) ((x) << S_DREQRDCERR)
+#define F_DREQRDCERR V_DREQRDCERR(1U)
+
+#define S_DREQWRCERR 13
+#define V_DREQWRCERR(x) ((x) << S_DREQWRCERR)
+#define F_DREQWRCERR V_DREQWRCERR(1U)
+
+#define S_CRSPCERR 12
+#define V_CRSPCERR(x) ((x) << S_CRSPCERR)
+#define F_CRSPCERR V_CRSPCERR(1U)
+
+#define S_ARSPPERR 11
+#define V_ARSPPERR(x) ((x) << S_ARSPPERR)
+#define F_ARSPPERR V_ARSPPERR(1U)
+
+#define S_AREQRDPERR 10
+#define V_AREQRDPERR(x) ((x) << S_AREQRDPERR)
+#define F_AREQRDPERR V_AREQRDPERR(1U)
+
+#define S_AREQWRPERR 9
+#define V_AREQWRPERR(x) ((x) << S_AREQWRPERR)
+#define F_AREQWRPERR V_AREQWRPERR(1U)
+
+#define S_PIOREQGRPCERR 8
+#define V_PIOREQGRPCERR(x) ((x) << S_PIOREQGRPCERR)
+#define F_PIOREQGRPCERR V_PIOREQGRPCERR(1U)
+
+#define S_ARSPCERR 7
+#define V_ARSPCERR(x) ((x) << S_ARSPCERR)
+#define F_ARSPCERR V_ARSPCERR(1U)
+
+#define S_AREQRDCERR 6
+#define V_AREQRDCERR(x) ((x) << S_AREQRDCERR)
+#define F_AREQRDCERR V_AREQRDCERR(1U)
+
+#define S_AREQWRCERR 5
+#define V_AREQWRCERR(x) ((x) << S_AREQWRCERR)
+#define F_AREQWRCERR V_AREQWRCERR(1U)
+
+#define S_MARSPPERR 4
+#define V_MARSPPERR(x) ((x) << S_MARSPPERR)
+#define F_MARSPPERR V_MARSPPERR(1U)
+
+#define S_INICMAWDATAORDPERR 3
+#define V_INICMAWDATAORDPERR(x) ((x) << S_INICMAWDATAORDPERR)
+#define F_INICMAWDATAORDPERR V_INICMAWDATAORDPERR(1U)
+
+#define S_EMUPERR 2
+#define V_EMUPERR(x) ((x) << S_EMUPERR)
+#define F_EMUPERR V_EMUPERR(1U)
+
+#define S_ERRSPPERR 1
+#define V_ERRSPPERR(x) ((x) << S_ERRSPPERR)
+#define F_ERRSPPERR V_ERRSPPERR(1U)
+
+#define S_MSTGRPCERR 0
+#define V_MSTGRPCERR(x) ((x) << S_MSTGRPCERR)
+#define F_MSTGRPCERR V_MSTGRPCERR(1U)
+
+#define A_PCIE_INT_ENABLE_X8 0x3034
+
+#define S_X8TGTGRPPERR 23
+#define V_X8TGTGRPPERR(x) ((x) << S_X8TGTGRPPERR)
+#define F_X8TGTGRPPERR V_X8TGTGRPPERR(1U)
+
+#define S_X8IPSOTPERR 22
+#define V_X8IPSOTPERR(x) ((x) << S_X8IPSOTPERR)
+#define F_X8IPSOTPERR V_X8IPSOTPERR(1U)
+
+#define S_X8IPRETRYPERR 21
+#define V_X8IPRETRYPERR(x) ((x) << S_X8IPRETRYPERR)
+#define F_X8IPRETRYPERR V_X8IPRETRYPERR(1U)
+
+#define S_X8IPRXDATAGRPPERR 20
+#define V_X8IPRXDATAGRPPERR(x) ((x) << S_X8IPRXDATAGRPPERR)
+#define F_X8IPRXDATAGRPPERR V_X8IPRXDATAGRPPERR(1U)
+
+#define S_X8IPRXHDRGRPPERR 19
+#define V_X8IPRXHDRGRPPERR(x) ((x) << S_X8IPRXHDRGRPPERR)
+#define F_X8IPRXHDRGRPPERR V_X8IPRXHDRGRPPERR(1U)
+
+#define S_X8IPCORECERR 3
+#define V_X8IPCORECERR(x) ((x) << S_X8IPCORECERR)
+#define F_X8IPCORECERR V_X8IPCORECERR(1U)
+
+#define S_X8MSTGRPPERR 2
+#define V_X8MSTGRPPERR(x) ((x) << S_X8MSTGRPPERR)
+#define F_X8MSTGRPPERR V_X8MSTGRPPERR(1U)
+
+#define S_X8MSTGRPCERR 1
+#define V_X8MSTGRPCERR(x) ((x) << S_X8MSTGRPCERR)
+#define F_X8MSTGRPCERR V_X8MSTGRPCERR(1U)
+
+#define A_PCIE_INT_CAUSE_EXT 0x3038
#define A_PCIE_CMD_CTRL 0x303c
+#define A_PCIE_INT_CAUSE_X8 0x303c
#define A_PCIE_CMD_CFG 0x3040
#define S_MAXRSPCNT 16
@@ -4761,6 +5702,40 @@
#define V_MAXREQCNT(x) ((x) << S_MAXREQCNT)
#define G_MAXREQCNT(x) (((x) >> S_MAXREQCNT) & M_MAXREQCNT)
+#define A_PCIE_PERR_ENABLE_EXT 0x3040
+
+#define S_T7_ARSPPERR 18
+#define V_T7_ARSPPERR(x) ((x) << S_T7_ARSPPERR)
+#define F_T7_ARSPPERR V_T7_ARSPPERR(1U)
+
+#define S_T7_AREQRDPERR 17
+#define V_T7_AREQRDPERR(x) ((x) << S_T7_AREQRDPERR)
+#define F_T7_AREQRDPERR V_T7_AREQRDPERR(1U)
+
+#define S_T7_AREQWRPERR 16
+#define V_T7_AREQWRPERR(x) ((x) << S_T7_AREQWRPERR)
+#define F_T7_AREQWRPERR V_T7_AREQWRPERR(1U)
+
+#define S_T7_A0ARBRSPORDFIFOPERR 15
+#define V_T7_A0ARBRSPORDFIFOPERR(x) ((x) << S_T7_A0ARBRSPORDFIFOPERR)
+#define F_T7_A0ARBRSPORDFIFOPERR V_T7_A0ARBRSPORDFIFOPERR(1U)
+
+#define S_T7_MARSPPERR 14
+#define V_T7_MARSPPERR(x) ((x) << S_T7_MARSPPERR)
+#define F_T7_MARSPPERR V_T7_MARSPPERR(1U)
+
+#define S_T7_INICMAWDATAORDPERR 13
+#define V_T7_INICMAWDATAORDPERR(x) ((x) << S_T7_INICMAWDATAORDPERR)
+#define F_T7_INICMAWDATAORDPERR V_T7_INICMAWDATAORDPERR(1U)
+
+#define S_T7_EMUPERR 12
+#define V_T7_EMUPERR(x) ((x) << S_T7_EMUPERR)
+#define F_T7_EMUPERR V_T7_EMUPERR(1U)
+
+#define S_T7_ERRSPPERR 11
+#define V_T7_ERRSPPERR(x) ((x) << S_T7_ERRSPPERR)
+#define F_T7_ERRSPPERR V_T7_ERRSPPERR(1U)
+
#define A_PCIE_CMD_STAT 0x3044
#define S_RSPCNT 16
@@ -4773,6 +5748,32 @@
#define V_REQCNT(x) ((x) << S_REQCNT)
#define G_REQCNT(x) (((x) >> S_REQCNT) & M_REQCNT)
+#define A_PCIE_PERR_ENABLE_X8 0x3044
+
+#define S_T7_X8TGTGRPPERR 28
+#define V_T7_X8TGTGRPPERR(x) ((x) << S_T7_X8TGTGRPPERR)
+#define F_T7_X8TGTGRPPERR V_T7_X8TGTGRPPERR(1U)
+
+#define S_T7_X8IPSOTPERR 27
+#define V_T7_X8IPSOTPERR(x) ((x) << S_T7_X8IPSOTPERR)
+#define F_T7_X8IPSOTPERR V_T7_X8IPSOTPERR(1U)
+
+#define S_T7_X8IPRETRYPERR 26
+#define V_T7_X8IPRETRYPERR(x) ((x) << S_T7_X8IPRETRYPERR)
+#define F_T7_X8IPRETRYPERR V_T7_X8IPRETRYPERR(1U)
+
+#define S_T7_X8IPRXDATAGRPPERR 25
+#define V_T7_X8IPRXDATAGRPPERR(x) ((x) << S_T7_X8IPRXDATAGRPPERR)
+#define F_T7_X8IPRXDATAGRPPERR V_T7_X8IPRXDATAGRPPERR(1U)
+
+#define S_T7_X8IPRXHDRGRPPERR 24
+#define V_T7_X8IPRXHDRGRPPERR(x) ((x) << S_T7_X8IPRXHDRGRPPERR)
+#define F_T7_X8IPRXHDRGRPPERR V_T7_X8IPRXHDRGRPPERR(1U)
+
+#define S_T7_X8MSTGRPPERR 0
+#define V_T7_X8MSTGRPPERR(x) ((x) << S_T7_X8MSTGRPPERR)
+#define F_T7_X8MSTGRPPERR V_T7_X8MSTGRPPERR(1U)
+
#define A_PCIE_HMA_CTRL 0x3050
#define S_IPLTSSM 12
@@ -4889,9 +5890,9 @@
#define V_T6_ENABLE(x) ((x) << S_T6_ENABLE)
#define F_T6_ENABLE V_T6_ENABLE(1U)
-#define S_T6_AI 30
-#define V_T6_AI(x) ((x) << S_T6_AI)
-#define F_T6_AI V_T6_AI(1U)
+#define S_T6_1_AI 30
+#define V_T6_1_AI(x) ((x) << S_T6_1_AI)
+#define F_T6_1_AI V_T6_1_AI(1U)
#define S_T6_CS2 29
#define V_T6_CS2(x) ((x) << S_T6_CS2)
@@ -4936,6 +5937,7 @@
#define V_MEMOFST(x) ((x) << S_MEMOFST)
#define G_MEMOFST(x) (((x) >> S_MEMOFST) & M_MEMOFST)
+#define A_T7_PCIE_MAILBOX_BASE_WIN 0x30a4
#define A_PCIE_MAILBOX_BASE_WIN 0x30a8
#define S_MBOXPCIEOFST 6
@@ -4953,7 +5955,21 @@
#define V_MBOXWIN(x) ((x) << S_MBOXWIN)
#define G_MBOXWIN(x) (((x) >> S_MBOXWIN) & M_MBOXWIN)
+#define A_PCIE_MAILBOX_OFFSET0 0x30a8
+
+#define S_MEMOFST0 3
+#define M_MEMOFST0 0x1fffffffU
+#define V_MEMOFST0(x) ((x) << S_MEMOFST0)
+#define G_MEMOFST0(x) (((x) >> S_MEMOFST0) & M_MEMOFST0)
+
#define A_PCIE_MAILBOX_OFFSET 0x30ac
+#define A_PCIE_MAILBOX_OFFSET1 0x30ac
+
+#define S_MEMOFST1 0
+#define M_MEMOFST1 0xfU
+#define V_MEMOFST1(x) ((x) << S_MEMOFST1)
+#define G_MEMOFST1(x) (((x) >> S_MEMOFST1) & M_MEMOFST1)
+
#define A_PCIE_MA_CTRL 0x30b0
#define S_MA_TAGFREE 29
@@ -5098,6 +6114,11 @@
#define V_STATIC_SPARE3(x) ((x) << S_STATIC_SPARE3)
#define G_STATIC_SPARE3(x) (((x) >> S_STATIC_SPARE3) & M_STATIC_SPARE3)
+#define S_T7_STATIC_SPARE3 0
+#define M_T7_STATIC_SPARE3 0x7fffU
+#define V_T7_STATIC_SPARE3(x) ((x) << S_T7_STATIC_SPARE3)
+#define G_T7_STATIC_SPARE3(x) (((x) >> S_T7_STATIC_SPARE3) & M_T7_STATIC_SPARE3)
+
#define A_PCIE_DBG_INDIR_REQ 0x30ec
#define S_DBGENABLE 31
@@ -5173,6 +6194,17 @@
#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
#define A_PCIE_PF_INT_CFG 0x3140
+
+#define S_T7_VECNUM 12
+#define M_T7_VECNUM 0x7ffU
+#define V_T7_VECNUM(x) ((x) << S_T7_VECNUM)
+#define G_T7_VECNUM(x) (((x) >> S_T7_VECNUM) & M_T7_VECNUM)
+
+#define S_T7_VECBASE 0
+#define M_T7_VECBASE 0xfffU
+#define V_T7_VECBASE(x) ((x) << S_T7_VECBASE)
+#define G_T7_VECBASE(x) (((x) >> S_T7_VECBASE) & M_T7_VECBASE)
+
#define A_PCIE_PF_INT_CFG2 0x3144
#define A_PCIE_VF_INT_CFG 0x3180
#define A_PCIE_VF_INT_CFG2 0x3184
@@ -5198,6 +6230,20 @@
#define A_PCIE_VF_MSIX_EN_1 0x35c4
#define A_PCIE_VF_MSIX_EN_2 0x35c8
#define A_PCIE_VF_MSIX_EN_3 0x35cc
+#define A_PCIE_FID_PASID 0x35e0
+#define A_PCIE_FID_VFID_CTL 0x35e4
+
+#define S_T7_WRITE 0
+#define V_T7_WRITE(x) ((x) << S_T7_WRITE)
+#define F_T7_WRITE V_T7_WRITE(1U)
+
+#define A_T7_PCIE_FID_VFID_SEL 0x35e8
+
+#define S_T7_ADDR 2
+#define M_T7_ADDR 0x1fffU
+#define V_T7_ADDR(x) ((x) << S_T7_ADDR)
+#define G_T7_ADDR(x) (((x) >> S_T7_ADDR) & M_T7_ADDR)
+
#define A_PCIE_FID_VFID_SEL 0x35ec
#define S_FID_VFID_SEL_SELECT 0
@@ -5205,6 +6251,17 @@
#define V_FID_VFID_SEL_SELECT(x) ((x) << S_FID_VFID_SEL_SELECT)
#define G_FID_VFID_SEL_SELECT(x) (((x) >> S_FID_VFID_SEL_SELECT) & M_FID_VFID_SEL_SELECT)
+#define A_T7_PCIE_FID_VFID 0x35ec
+
+#define S_FID_VFID_NVMEGROUPEN 29
+#define V_FID_VFID_NVMEGROUPEN(x) ((x) << S_FID_VFID_NVMEGROUPEN)
+#define F_FID_VFID_NVMEGROUPEN V_FID_VFID_NVMEGROUPEN(1U)
+
+#define S_FID_VFID_GROUPSEL 25
+#define M_FID_VFID_GROUPSEL 0xfU
+#define V_FID_VFID_GROUPSEL(x) ((x) << S_FID_VFID_GROUPSEL)
+#define G_FID_VFID_GROUPSEL(x) (((x) >> S_FID_VFID_GROUPSEL) & M_FID_VFID_GROUPSEL)
+
#define A_PCIE_FID_VFID 0x3600
#define S_FID_VFID_SELECT 30
@@ -5264,6 +6321,227 @@
#define V_T6_FID_VFID_RVF(x) ((x) << S_T6_FID_VFID_RVF)
#define G_T6_FID_VFID_RVF(x) (((x) >> S_T6_FID_VFID_RVF) & M_T6_FID_VFID_RVF)
+#define A_PCIE_JBOF_NVME_HIGH_DW_START_ADDR 0x3600
+#define A_PCIE_JBOF_NVME_LOW_DW_START_ADDR 0x3604
+#define A_PCIE_JBOF_NVME_LENGTH 0x3608
+
+#define S_NVMEDISABLE 31
+#define V_NVMEDISABLE(x) ((x) << S_NVMEDISABLE)
+#define F_NVMEDISABLE V_NVMEDISABLE(1U)
+
+#define S_NVMELENGTH 0
+#define M_NVMELENGTH 0x3fffffffU
+#define V_NVMELENGTH(x) ((x) << S_NVMELENGTH)
+#define G_NVMELENGTH(x) (((x) >> S_NVMELENGTH) & M_NVMELENGTH)
+
+#define A_PCIE_JBOF_NVME_GROUP 0x360c
+
+#define S_NVMEGROUPSEL 0
+#define M_NVMEGROUPSEL 0xfU
+#define V_NVMEGROUPSEL(x) ((x) << S_NVMEGROUPSEL)
+#define G_NVMEGROUPSEL(x) (((x) >> S_NVMEGROUPSEL) & M_NVMEGROUPSEL)
+
+#define A_T7_PCIE_MEM_ACCESS_BASE_WIN 0x3700
+#define A_PCIE_MEM_ACCESS_BASE_WIN1 0x3704
+
+#define S_PCIEOFST1 0
+#define M_PCIEOFST1 0xffU
+#define V_PCIEOFST1(x) ((x) << S_PCIEOFST1)
+#define G_PCIEOFST1(x) (((x) >> S_PCIEOFST1) & M_PCIEOFST1)
+
+#define A_PCIE_MEM_ACCESS_OFFSET0 0x3708
+#define A_PCIE_MEM_ACCESS_OFFSET1 0x370c
+#define A_PCIE_PTM_EP_EXT_STROBE 0x3804
+
+#define S_PTM_AUTO_UPDATE 1
+#define V_PTM_AUTO_UPDATE(x) ((x) << S_PTM_AUTO_UPDATE)
+#define F_PTM_AUTO_UPDATE V_PTM_AUTO_UPDATE(1U)
+
+#define S_PTM_EXT_STROBE 0
+#define V_PTM_EXT_STROBE(x) ((x) << S_PTM_EXT_STROBE)
+#define F_PTM_EXT_STROBE V_PTM_EXT_STROBE(1U)
+
+#define A_PCIE_PTM_EP_EXT_TIME0 0x3808
+#define A_PCIE_PTM_EP_EXT_TIME1 0x380c
+#define A_PCIE_PTM_MAN_UPD_PULSE 0x3810
+
+#define S_PTM_MAN_UPD_PULSE 0
+#define V_PTM_MAN_UPD_PULSE(x) ((x) << S_PTM_MAN_UPD_PULSE)
+#define F_PTM_MAN_UPD_PULSE V_PTM_MAN_UPD_PULSE(1U)
+
+#define A_PCIE_SWAP_DATA_B2L_X16 0x3814
+#define A_PCIE_PCIE_RC_RST 0x3818
+
+#define S_PERST 0
+#define V_PERST(x) ((x) << S_PERST)
+#define F_PERST V_PERST(1U)
+
+#define A_PCIE_PCIE_LN_CLKSEL 0x3880
+
+#define S_DS8_SEL 30
+#define M_DS8_SEL 0x3U
+#define V_DS8_SEL(x) ((x) << S_DS8_SEL)
+#define G_DS8_SEL(x) (((x) >> S_DS8_SEL) & M_DS8_SEL)
+
+#define S_DS7_SEL 28
+#define M_DS7_SEL 0x3U
+#define V_DS7_SEL(x) ((x) << S_DS7_SEL)
+#define G_DS7_SEL(x) (((x) >> S_DS7_SEL) & M_DS7_SEL)
+
+#define S_DS6_SEL 26
+#define M_DS6_SEL 0x3U
+#define V_DS6_SEL(x) ((x) << S_DS6_SEL)
+#define G_DS6_SEL(x) (((x) >> S_DS6_SEL) & M_DS6_SEL)
+
+#define S_DS5_SEL 24
+#define M_DS5_SEL 0x3U
+#define V_DS5_SEL(x) ((x) << S_DS5_SEL)
+#define G_DS5_SEL(x) (((x) >> S_DS5_SEL) & M_DS5_SEL)
+
+#define S_DS4_SEL 22
+#define M_DS4_SEL 0x3U
+#define V_DS4_SEL(x) ((x) << S_DS4_SEL)
+#define G_DS4_SEL(x) (((x) >> S_DS4_SEL) & M_DS4_SEL)
+
+#define S_DS3_SEL 20
+#define M_DS3_SEL 0x3U
+#define V_DS3_SEL(x) ((x) << S_DS3_SEL)
+#define G_DS3_SEL(x) (((x) >> S_DS3_SEL) & M_DS3_SEL)
+
+#define S_DS2_SEL 18
+#define M_DS2_SEL 0x3U
+#define V_DS2_SEL(x) ((x) << S_DS2_SEL)
+#define G_DS2_SEL(x) (((x) >> S_DS2_SEL) & M_DS2_SEL)
+
+#define S_DS1_SEL 16
+#define M_DS1_SEL 0x3U
+#define V_DS1_SEL(x) ((x) << S_DS1_SEL)
+#define G_DS1_SEL(x) (((x) >> S_DS1_SEL) & M_DS1_SEL)
+
+#define S_LN14_SEL 14
+#define M_LN14_SEL 0x3U
+#define V_LN14_SEL(x) ((x) << S_LN14_SEL)
+#define G_LN14_SEL(x) (((x) >> S_LN14_SEL) & M_LN14_SEL)
+
+#define S_LN12_SEL 12
+#define M_LN12_SEL 0x3U
+#define V_LN12_SEL(x) ((x) << S_LN12_SEL)
+#define G_LN12_SEL(x) (((x) >> S_LN12_SEL) & M_LN12_SEL)
+
+#define S_LN10_SEL 10
+#define M_LN10_SEL 0x3U
+#define V_LN10_SEL(x) ((x) << S_LN10_SEL)
+#define G_LN10_SEL(x) (((x) >> S_LN10_SEL) & M_LN10_SEL)
+
+#define S_LN8_SEL 8
+#define M_LN8_SEL 0x3U
+#define V_LN8_SEL(x) ((x) << S_LN8_SEL)
+#define G_LN8_SEL(x) (((x) >> S_LN8_SEL) & M_LN8_SEL)
+
+#define S_LN6_SEL 6
+#define M_LN6_SEL 0x3U
+#define V_LN6_SEL(x) ((x) << S_LN6_SEL)
+#define G_LN6_SEL(x) (((x) >> S_LN6_SEL) & M_LN6_SEL)
+
+#define S_LN4_SEL 4
+#define M_LN4_SEL 0x3U
+#define V_LN4_SEL(x) ((x) << S_LN4_SEL)
+#define G_LN4_SEL(x) (((x) >> S_LN4_SEL) & M_LN4_SEL)
+
+#define S_LN2_SEL 2
+#define M_LN2_SEL 0x3U
+#define V_LN2_SEL(x) ((x) << S_LN2_SEL)
+#define G_LN2_SEL(x) (((x) >> S_LN2_SEL) & M_LN2_SEL)
+
+#define S_LN0_SEL 0
+#define M_LN0_SEL 0x3U
+#define V_LN0_SEL(x) ((x) << S_LN0_SEL)
+#define G_LN0_SEL(x) (((x) >> S_LN0_SEL) & M_LN0_SEL)
+
+#define A_PCIE_PCIE_MSIX_EN 0x3884
+
+#define S_MSIX_ENABLE 0
+#define M_MSIX_ENABLE 0xffU
+#define V_MSIX_ENABLE(x) ((x) << S_MSIX_ENABLE)
+#define G_MSIX_ENABLE(x) (((x) >> S_MSIX_ENABLE) & M_MSIX_ENABLE)
+
+#define A_PCIE_LFSR_WRCTRL 0x3888
+
+#define S_WR_LFSR_CMP_DATA 16
+#define M_WR_LFSR_CMP_DATA 0xffffU
+#define V_WR_LFSR_CMP_DATA(x) ((x) << S_WR_LFSR_CMP_DATA)
+#define G_WR_LFSR_CMP_DATA(x) (((x) >> S_WR_LFSR_CMP_DATA) & M_WR_LFSR_CMP_DATA)
+
+#define S_WR_LFSR_RSVD 2
+#define M_WR_LFSR_RSVD 0x3fffU
+#define V_WR_LFSR_RSVD(x) ((x) << S_WR_LFSR_RSVD)
+#define G_WR_LFSR_RSVD(x) (((x) >> S_WR_LFSR_RSVD) & M_WR_LFSR_RSVD)
+
+#define S_WR_LFSR_EN 1
+#define V_WR_LFSR_EN(x) ((x) << S_WR_LFSR_EN)
+#define F_WR_LFSR_EN V_WR_LFSR_EN(1U)
+
+#define S_WR_LFSR_START 0
+#define V_WR_LFSR_START(x) ((x) << S_WR_LFSR_START)
+#define F_WR_LFSR_START V_WR_LFSR_START(1U)
+
+#define A_PCIE_LFSR_RDCTRL 0x388c
+
+#define S_CMD_LFSR_CMP_DATA 24
+#define M_CMD_LFSR_CMP_DATA 0xffU
+#define V_CMD_LFSR_CMP_DATA(x) ((x) << S_CMD_LFSR_CMP_DATA)
+#define G_CMD_LFSR_CMP_DATA(x) (((x) >> S_CMD_LFSR_CMP_DATA) & M_CMD_LFSR_CMP_DATA)
+
+#define S_RD_LFSR_CMD_DATA 16
+#define M_RD_LFSR_CMD_DATA 0xffU
+#define V_RD_LFSR_CMD_DATA(x) ((x) << S_RD_LFSR_CMD_DATA)
+#define G_RD_LFSR_CMD_DATA(x) (((x) >> S_RD_LFSR_CMD_DATA) & M_RD_LFSR_CMD_DATA)
+
+#define S_RD_LFSR_RSVD 10
+#define M_RD_LFSR_RSVD 0x3fU
+#define V_RD_LFSR_RSVD(x) ((x) << S_RD_LFSR_RSVD)
+#define G_RD_LFSR_RSVD(x) (((x) >> S_RD_LFSR_RSVD) & M_RD_LFSR_RSVD)
+
+#define S_RD3_LFSR_EN 9
+#define V_RD3_LFSR_EN(x) ((x) << S_RD3_LFSR_EN)
+#define F_RD3_LFSR_EN V_RD3_LFSR_EN(1U)
+
+#define S_RD3_LFSR_START 8
+#define V_RD3_LFSR_START(x) ((x) << S_RD3_LFSR_START)
+#define F_RD3_LFSR_START V_RD3_LFSR_START(1U)
+
+#define S_RD2_LFSR_EN 7
+#define V_RD2_LFSR_EN(x) ((x) << S_RD2_LFSR_EN)
+#define F_RD2_LFSR_EN V_RD2_LFSR_EN(1U)
+
+#define S_RD2_LFSR_START 6
+#define V_RD2_LFSR_START(x) ((x) << S_RD2_LFSR_START)
+#define F_RD2_LFSR_START V_RD2_LFSR_START(1U)
+
+#define S_RD1_LFSR_EN 5
+#define V_RD1_LFSR_EN(x) ((x) << S_RD1_LFSR_EN)
+#define F_RD1_LFSR_EN V_RD1_LFSR_EN(1U)
+
+#define S_RD1_LFSR_START 4
+#define V_RD1_LFSR_START(x) ((x) << S_RD1_LFSR_START)
+#define F_RD1_LFSR_START V_RD1_LFSR_START(1U)
+
+#define S_RD0_LFSR_EN 3
+#define V_RD0_LFSR_EN(x) ((x) << S_RD0_LFSR_EN)
+#define F_RD0_LFSR_EN V_RD0_LFSR_EN(1U)
+
+#define S_RD0_LFSR_START 2
+#define V_RD0_LFSR_START(x) ((x) << S_RD0_LFSR_START)
+#define F_RD0_LFSR_START V_RD0_LFSR_START(1U)
+
+#define S_CMD_LFSR_EN 1
+#define V_CMD_LFSR_EN(x) ((x) << S_CMD_LFSR_EN)
+#define F_CMD_LFSR_EN V_CMD_LFSR_EN(1U)
+
+#define S_CMD_LFSR_START 0
+#define V_CMD_LFSR_START(x) ((x) << S_CMD_LFSR_START)
+#define F_CMD_LFSR_START V_CMD_LFSR_START(1U)
+
#define A_PCIE_FID 0x3900
#define S_PAD 11
@@ -5280,6 +6558,309 @@
#define V_FUNC(x) ((x) << S_FUNC)
#define G_FUNC(x) (((x) >> S_FUNC) & M_FUNC)
+#define A_PCIE_EMU_ADDR 0x3900
+
+#define S_EMU_ADDR 0
+#define M_EMU_ADDR 0x1ffU
+#define V_EMU_ADDR(x) ((x) << S_EMU_ADDR)
+#define G_EMU_ADDR(x) (((x) >> S_EMU_ADDR) & M_EMU_ADDR)
+
+#define A_PCIE_EMU_CFG 0x3904
+
+#define S_EMUENABLE 16
+#define V_EMUENABLE(x) ((x) << S_EMUENABLE)
+#define F_EMUENABLE V_EMUENABLE(1U)
+
+#define S_EMUTYPE 14
+#define M_EMUTYPE 0x3U
+#define V_EMUTYPE(x) ((x) << S_EMUTYPE)
+#define G_EMUTYPE(x) (((x) >> S_EMUTYPE) & M_EMUTYPE)
+
+#define S_BAR0TARGET 12
+#define M_BAR0TARGET 0x3U
+#define V_BAR0TARGET(x) ((x) << S_BAR0TARGET)
+#define G_BAR0TARGET(x) (((x) >> S_BAR0TARGET) & M_BAR0TARGET)
+
+#define S_BAR2TARGET 10
+#define M_BAR2TARGET 0x3U
+#define V_BAR2TARGET(x) ((x) << S_BAR2TARGET)
+#define G_BAR2TARGET(x) (((x) >> S_BAR2TARGET) & M_BAR2TARGET)
+
+#define S_BAR4TARGET 8
+#define M_BAR4TARGET 0x3U
+#define V_BAR4TARGET(x) ((x) << S_BAR4TARGET)
+#define G_BAR4TARGET(x) (((x) >> S_BAR4TARGET) & M_BAR4TARGET)
+
+#define S_RELEATIVEEMUID 0
+#define M_RELEATIVEEMUID 0xffU
+#define V_RELEATIVEEMUID(x) ((x) << S_RELEATIVEEMUID)
+#define G_RELEATIVEEMUID(x) (((x) >> S_RELEATIVEEMUID) & M_RELEATIVEEMUID)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET0_BAR0 0x3910
+
+#define S_T7_MEMOFST0 0
+#define M_T7_MEMOFST0 0xfffffffU
+#define V_T7_MEMOFST0(x) ((x) << S_T7_MEMOFST0)
+#define G_T7_MEMOFST0(x) (((x) >> S_T7_MEMOFST0) & M_T7_MEMOFST0)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG0_BAR0 0x3914
+
+#define S_SIZE0 0
+#define M_SIZE0 0x1fU
+#define V_SIZE0(x) ((x) << S_SIZE0)
+#define G_SIZE0(x) (((x) >> S_SIZE0) & M_SIZE0)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET1_BAR0 0x3918
+
+#define S_T7_MEMOFST1 0
+#define M_T7_MEMOFST1 0xfffffffU
+#define V_T7_MEMOFST1(x) ((x) << S_T7_MEMOFST1)
+#define G_T7_MEMOFST1(x) (((x) >> S_T7_MEMOFST1) & M_T7_MEMOFST1)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG1_BAR0 0x391c
+
+#define S_SIZE1 0
+#define M_SIZE1 0x1fU
+#define V_SIZE1(x) ((x) << S_SIZE1)
+#define G_SIZE1(x) (((x) >> S_SIZE1) & M_SIZE1)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET2_BAR0 0x3920
+
+#define S_MEMOFST2 0
+#define M_MEMOFST2 0xfffffffU
+#define V_MEMOFST2(x) ((x) << S_MEMOFST2)
+#define G_MEMOFST2(x) (((x) >> S_MEMOFST2) & M_MEMOFST2)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG2_BAR0 0x3924
+
+#define S_SIZE2 0
+#define M_SIZE2 0x1fU
+#define V_SIZE2(x) ((x) << S_SIZE2)
+#define G_SIZE2(x) (((x) >> S_SIZE2) & M_SIZE2)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET3_BAR0 0x3928
+
+#define S_MEMOFST3 0
+#define M_MEMOFST3 0xfffffffU
+#define V_MEMOFST3(x) ((x) << S_MEMOFST3)
+#define G_MEMOFST3(x) (((x) >> S_MEMOFST3) & M_MEMOFST3)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG3_BAR0 0x392c
+
+#define S_SIZE3 0
+#define M_SIZE3 0x1fU
+#define V_SIZE3(x) ((x) << S_SIZE3)
+#define G_SIZE3(x) (((x) >> S_SIZE3) & M_SIZE3)
+
+#define A_PCIE_TCAM_DATA 0x3970
+#define A_PCIE_TCAM_CTL 0x3974
+
+#define S_TCAMADDR 8
+#define M_TCAMADDR 0x3ffU
+#define V_TCAMADDR(x) ((x) << S_TCAMADDR)
+#define G_TCAMADDR(x) (((x) >> S_TCAMADDR) & M_TCAMADDR)
+
+#define S_CAMEN 0
+#define V_CAMEN(x) ((x) << S_CAMEN)
+#define F_CAMEN V_CAMEN(1U)
+
+#define A_PCIE_TCAM_DBG 0x3978
+
+#define S_CBPASS 24
+#define V_CBPASS(x) ((x) << S_CBPASS)
+#define F_CBPASS V_CBPASS(1U)
+
+#define S_CBBUSY 20
+#define V_CBBUSY(x) ((x) << S_CBBUSY)
+#define F_CBBUSY V_CBBUSY(1U)
+
+#define S_CBSTART 17
+#define V_CBSTART(x) ((x) << S_CBSTART)
+#define F_CBSTART V_CBSTART(1U)
+
+#define S_RSTCB 16
+#define V_RSTCB(x) ((x) << S_RSTCB)
+#define F_RSTCB V_RSTCB(1U)
+
+#define S_TCAM_DBG_DATA 0
+#define M_TCAM_DBG_DATA 0xffffU
+#define V_TCAM_DBG_DATA(x) ((x) << S_TCAM_DBG_DATA)
+#define G_TCAM_DBG_DATA(x) (((x) >> S_TCAM_DBG_DATA) & M_TCAM_DBG_DATA)
+
+#define A_PCIE_TEST_CTRL0 0x3980
+#define A_PCIE_TEST_CTRL1 0x3984
+#define A_PCIE_TEST_CTRL2 0x3988
+#define A_PCIE_TEST_CTRL3 0x398c
+#define A_PCIE_TEST_STS0 0x3990
+#define A_PCIE_TEST_STS1 0x3994
+#define A_PCIE_TEST_STS2 0x3998
+#define A_PCIE_TEST_STS3 0x399c
+#define A_PCIE_X8_CORE_ACK_LATENCY_TIMER_REPLAY_TIMER 0x4700
+#define A_PCIE_X8_CORE_VENDOR_SPECIFIC_DLLP 0x4704
+#define A_PCIE_X8_CORE_PORT_FORCE_LINK 0x4708
+#define A_PCIE_X8_CORE_ACK_FREQUENCY_L0L1_ASPM_CONTROL 0x470c
+#define A_PCIE_X8_CORE_PORT_LINK_CONTROL 0x4710
+#define A_PCIE_X8_CORE_LANE_SKEW 0x4714
+#define A_PCIE_X8_CORE_SYMBOL_NUMBER 0x4718
+#define A_PCIE_X8_CORE_SYMBOL_TIMER_FILTER_MASK1 0x471c
+#define A_PCIE_X8_CORE_FILTER_MASK2 0x4720
+#define A_PCIE_X8_CORE_DEBUG_0 0x4728
+#define A_PCIE_X8_CORE_DEBUG_1 0x472c
+#define A_PCIE_X8_CORE_TRANSMIT_POSTED_FC_CREDIT_STATUS 0x4730
+#define A_PCIE_X8_CORE_TRANSMIT_NONPOSTED_FC_CREDIT_STATUS 0x4734
+#define A_PCIE_X8_CORE_TRANSMIT_COMPLETION_FC_CREDIT_STATUS 0x4738
+#define A_PCIE_X8_CORE_QUEUE_STATUS 0x473c
+#define A_PCIE_X8_CORE_VC_TRANSMIT_ARBITRATION_1 0x4740
+#define A_PCIE_X8_CORE_VC_TRANSMIT_ARBITRATION_2 0x4744
+#define A_PCIE_X8_CORE_VC0_POSTED_RECEIVE_QUEUE_CONTROL 0x4748
+#define A_PCIE_X8_CORE_VC0_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x474c
+#define A_PCIE_X8_CORE_VC0_COMPLETION_RECEIVE_QUEUE_CONTROL 0x4750
+#define A_PCIE_X8_CORE_VC1_POSTED_RECEIVE_QUEUE_CONTROL 0x4754
+#define A_PCIE_X8_CORE_VC1_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x4758
+#define A_PCIE_X8_CORE_VC1_COMPLETION_RECEIVE_QUEUE_CONTROL 0x475c
+#define A_PCIE_X8_CORE_LINK_WIDTH_SPEED_CHANGE 0x480c
+#define A_PCIE_X8_CORE_PHY_STATUS 0x4810
+#define A_PCIE_X8_CORE_PHY_CONTROL 0x4814
+#define A_PCIE_X8_CORE_GEN3_CONTROL 0x4890
+#define A_PCIE_X8_CORE_GEN3_EQ_FS_LF 0x4894
+#define A_PCIE_X8_CORE_GEN3_EQ_PRESET_COEFF 0x4898
+#define A_PCIE_X8_CORE_GEN3_EQ_PRESET_INDEX 0x489c
+#define A_PCIE_X8_CORE_GEN3_EQ_STATUS 0x48a4
+#define A_PCIE_X8_CORE_GEN3_EQ_CONTROL 0x48a8
+#define A_PCIE_X8_CORE_GEN3_EQ_DIRCHANGE_FEEDBACK 0x48ac
+#define A_PCIE_X8_CORE_PIPE_CONTROL 0x48b8
+#define A_PCIE_X8_CORE_DBI_RO_WE 0x48bc
+#define A_PCIE_X8_CFG_SPACE_REQ 0x48c0
+#define A_PCIE_X8_CFG_SPACE_DATA 0x48c4
+#define A_PCIE_X8_CFG_MPS_MRS 0x4900
+
+#define S_MRS 3
+#define M_MRS 0x7U
+#define V_MRS(x) ((x) << S_MRS)
+#define G_MRS(x) (((x) >> S_MRS) & M_MRS)
+
+#define S_T7_MPS 0
+#define M_T7_MPS 0x7U
+#define V_T7_MPS(x) ((x) << S_T7_MPS)
+#define G_T7_MPS(x) (((x) >> S_T7_MPS) & M_T7_MPS)
+
+#define A_PCIE_X8_CFG_ATTRIBUTES 0x4904
+
+#define S_T7_DCAEN 2
+#define V_T7_DCAEN(x) ((x) << S_T7_DCAEN)
+#define F_T7_DCAEN V_T7_DCAEN(1U)
+
+#define S_DCASTFITTRAONLEN 1
+#define V_DCASTFITTRAONLEN(x) ((x) << S_DCASTFITTRAONLEN)
+#define F_DCASTFITTRAONLEN V_DCASTFITTRAONLEN(1U)
+
+#define S_REQCTLDYNSTCLKEN 0
+#define V_REQCTLDYNSTCLKEN(x) ((x) << S_REQCTLDYNSTCLKEN)
+#define F_REQCTLDYNSTCLKEN V_REQCTLDYNSTCLKEN(1U)
+
+#define A_PCIE_X8_CFG_LTSSM 0x4908
+
+#define S_APP_LTSSM_ENABLE 0
+#define V_APP_LTSSM_ENABLE(x) ((x) << S_APP_LTSSM_ENABLE)
+#define F_APP_LTSSM_ENABLE V_APP_LTSSM_ENABLE(1U)
+
+#define A_PCIE_ARM_REQUESTER_ID_X8 0x490c
+
+#define S_A1_RSVD1 24
+#define M_A1_RSVD1 0xffU
+#define V_A1_RSVD1(x) ((x) << S_A1_RSVD1)
+#define G_A1_RSVD1(x) (((x) >> S_A1_RSVD1) & M_A1_RSVD1)
+
+#define S_A1_PRIMBUSNUMBER 16
+#define M_A1_PRIMBUSNUMBER 0xffU
+#define V_A1_PRIMBUSNUMBER(x) ((x) << S_A1_PRIMBUSNUMBER)
+#define G_A1_PRIMBUSNUMBER(x) (((x) >> S_A1_PRIMBUSNUMBER) & M_A1_PRIMBUSNUMBER)
+
+#define S_A1_REQUESTERID 0
+#define M_A1_REQUESTERID 0xffffU
+#define V_A1_REQUESTERID(x) ((x) << S_A1_REQUESTERID)
+#define G_A1_REQUESTERID(x) (((x) >> S_A1_REQUESTERID) & M_A1_REQUESTERID)
+
+#define A_PCIE_SWAP_DATA_B2L_X8 0x4910
+
+#define S_CFGRD_SWAP_EN 1
+#define V_CFGRD_SWAP_EN(x) ((x) << S_CFGRD_SWAP_EN)
+#define F_CFGRD_SWAP_EN V_CFGRD_SWAP_EN(1U)
+
+#define S_CFGWR_SWAP_EN 0
+#define V_CFGWR_SWAP_EN(x) ((x) << S_CFGWR_SWAP_EN)
+#define F_CFGWR_SWAP_EN V_CFGWR_SWAP_EN(1U)
+
+#define A_PCIE_PDEBUG_DATA0_X8 0x4914
+#define A_PCIE_PDEBUG_DATA1_X8 0x4918
+#define A_PCIE_PDEBUG_DATA2_X8 0x491c
+#define A_PCIE_PDEBUG_CTRL_X8 0x4920
+#define A_PCIE_PDEBUG_DATA_X8 0x4924
+#define A_PCIE_SPARE_REGISTER_SPACES_X8 0x4ffc
+#define A_PCIE_PIPE_LANE0_REG0 0x5500
+#define A_PCIE_PIPE_LANE0_REG1 0x5504
+#define A_PCIE_PIPE_LANE0_REG2 0x5508
+#define A_PCIE_PIPE_LANE0_REG3 0x550c
+#define A_PCIE_PIPE_LANE1_REG0 0x5510
+#define A_PCIE_PIPE_LANE1_REG1 0x5514
+#define A_PCIE_PIPE_LANE1_REG2 0x5518
+#define A_PCIE_PIPE_LANE1_REG3 0x551c
+#define A_PCIE_PIPE_LANE2_REG0 0x5520
+#define A_PCIE_PIPE_LANE2_REG1 0x5524
+#define A_PCIE_PIPE_LANE2_REG2 0x5528
+#define A_PCIE_PIPE_LANE2_REG3 0x552c
+#define A_PCIE_PIPE_LANE3_REG0 0x5530
+#define A_PCIE_PIPE_LANE3_REG1 0x5534
+#define A_PCIE_PIPE_LANE3_REG2 0x5538
+#define A_PCIE_PIPE_LANE3_REG3 0x553c
+#define A_PCIE_PIPE_LANE4_REG0 0x5540
+#define A_PCIE_PIPE_LANE4_REG1 0x5544
+#define A_PCIE_PIPE_LANE4_REG2 0x5548
+#define A_PCIE_PIPE_LANE4_REG3 0x554c
+#define A_PCIE_PIPE_LANE5_REG0 0x5550
+#define A_PCIE_PIPE_LANE5_REG1 0x5554
+#define A_PCIE_PIPE_LANE5_REG2 0x5558
+#define A_PCIE_PIPE_LANE5_REG3 0x555c
+#define A_PCIE_PIPE_LANE6_REG0 0x5560
+#define A_PCIE_PIPE_LANE6_REG1 0x5564
+#define A_PCIE_PIPE_LANE6_REG2 0x5568
+#define A_PCIE_PIPE_LANE6_REG3 0x556c
+#define A_PCIE_PIPE_LANE7_REG0 0x5570
+#define A_PCIE_PIPE_LANE7_REG1 0x5574
+#define A_PCIE_PIPE_LANE7_REG2 0x5578
+#define A_PCIE_PIPE_LANE7_REG3 0x557c
+#define A_PCIE_PIPE_LANE8_REG0 0x5580
+#define A_PCIE_PIPE_LANE8_REG1 0x5584
+#define A_PCIE_PIPE_LANE8_REG2 0x5588
+#define A_PCIE_PIPE_LANE8_REG3 0x558c
+#define A_PCIE_PIPE_LANE9_REG0 0x5590
+#define A_PCIE_PIPE_LANE9_REG1 0x5594
+#define A_PCIE_PIPE_LANE9_REG2 0x5598
+#define A_PCIE_PIPE_LANE9_REG3 0x559c
+#define A_PCIE_PIPE_LANE10_REG0 0x55a0
+#define A_PCIE_PIPE_LANE10_REG1 0x55a4
+#define A_PCIE_PIPE_LANE10_REG2 0x55a8
+#define A_PCIE_PIPE_LANE10_REG3 0x55ac
+#define A_PCIE_PIPE_LANE11_REG0 0x55b0
+#define A_PCIE_PIPE_LANE11_REG1 0x55b4
+#define A_PCIE_PIPE_LANE11_REG2 0x55b8
+#define A_PCIE_PIPE_LANE11_REG3 0x55bc
+#define A_PCIE_PIPE_LANE12_REG0 0x55c0
+#define A_PCIE_PIPE_LANE12_REG1 0x55c4
+#define A_PCIE_PIPE_LANE12_REG2 0x55c8
+#define A_PCIE_PIPE_LANE12_REG3 0x55cc
+#define A_PCIE_PIPE_LANE13_REG0 0x55d0
+#define A_PCIE_PIPE_LANE13_REG1 0x55d4
+#define A_PCIE_PIPE_LANE13_REG2 0x55d8
+#define A_PCIE_PIPE_LANE13_REG3 0x55dc
+#define A_PCIE_PIPE_LANE14_REG0 0x55e0
+#define A_PCIE_PIPE_LANE14_REG1 0x55e4
+#define A_PCIE_PIPE_LANE14_REG2 0x55e8
+#define A_PCIE_PIPE_LANE14_REG3 0x55ec
+#define A_PCIE_PIPE_LANE15_REG0 0x55f0
+#define A_PCIE_PIPE_LANE15_REG1 0x55f4
+#define A_PCIE_PIPE_LANE15_REG2 0x55f8
+#define A_PCIE_PIPE_LANE15_REG3 0x55fc
#define A_PCIE_COOKIE_STAT 0x5600
#define S_COOKIEB 16
@@ -5346,6 +6927,30 @@
#define V_T6_RCVDPIOREQCOOKIE(x) ((x) << S_T6_RCVDPIOREQCOOKIE)
#define G_T6_RCVDPIOREQCOOKIE(x) (((x) >> S_T6_RCVDPIOREQCOOKIE) & M_T6_RCVDPIOREQCOOKIE)
+#define A_T7_PCIE_VC0_CDTS0 0x56c4
+
+#define S_T7_CPLD0 16
+#define M_T7_CPLD0 0xffffU
+#define V_T7_CPLD0(x) ((x) << S_T7_CPLD0)
+#define G_T7_CPLD0(x) (((x) >> S_T7_CPLD0) & M_T7_CPLD0)
+
+#define S_T7_CPLH0 0
+#define M_T7_CPLH0 0xfffU
+#define V_T7_CPLH0(x) ((x) << S_T7_CPLH0)
+#define G_T7_CPLH0(x) (((x) >> S_T7_CPLH0) & M_T7_CPLH0)
+
+#define A_T7_PCIE_VC0_CDTS1 0x56c8
+
+#define S_T7_PD0 16
+#define M_T7_PD0 0xffffU
+#define V_T7_PD0(x) ((x) << S_T7_PD0)
+#define G_T7_PD0(x) (((x) >> S_T7_PD0) & M_T7_PD0)
+
+#define S_T7_PH0 0
+#define M_T7_PH0 0xfffU
+#define V_T7_PH0(x) ((x) << S_T7_PH0)
+#define G_T7_PH0(x) (((x) >> S_T7_PH0) & M_T7_PH0)
+
#define A_PCIE_VC0_CDTS0 0x56cc
#define S_CPLD0 20
@@ -5363,6 +6968,18 @@
#define V_PD0(x) ((x) << S_PD0)
#define G_PD0(x) (((x) >> S_PD0) & M_PD0)
+#define A_PCIE_VC0_CDTS2 0x56cc
+
+#define S_T7_NPD0 16
+#define M_T7_NPD0 0xffffU
+#define V_T7_NPD0(x) ((x) << S_T7_NPD0)
+#define G_T7_NPD0(x) (((x) >> S_T7_NPD0) & M_T7_NPD0)
+
+#define S_T7_NPH0 0
+#define M_T7_NPH0 0xfffU
+#define V_T7_NPH0(x) ((x) << S_T7_NPH0)
+#define G_T7_NPH0(x) (((x) >> S_T7_NPH0) & M_T7_NPH0)
+
#define A_PCIE_VC0_CDTS1 0x56d0
#define S_CPLH0 20
@@ -5380,6 +6997,7 @@
#define V_NPD0(x) ((x) << S_NPD0)
#define G_NPD0(x) (((x) >> S_NPD0) & M_NPD0)
+#define A_T7_PCIE_VC1_CDTS0 0x56d0
#define A_PCIE_VC1_CDTS0 0x56d4
#define S_CPLD1 20
@@ -5397,6 +7015,7 @@
#define V_PD1(x) ((x) << S_PD1)
#define G_PD1(x) (((x) >> S_PD1) & M_PD1)
+#define A_T7_PCIE_VC1_CDTS1 0x56d4
#define A_PCIE_VC1_CDTS1 0x56d8
#define S_CPLH1 20
@@ -5414,6 +7033,7 @@
#define V_NPD1(x) ((x) << S_NPD1)
#define G_NPD1(x) (((x) >> S_NPD1) & M_NPD1)
+#define A_PCIE_VC1_CDTS2 0x56d8
#define A_PCIE_FLR_PF_STATUS 0x56dc
#define A_PCIE_FLR_VF0_STATUS 0x56e0
#define A_PCIE_FLR_VF1_STATUS 0x56e4
@@ -5916,6 +7536,11 @@
#define V_DISABLE_SCRAMBLER(x) ((x) << S_DISABLE_SCRAMBLER)
#define F_DISABLE_SCRAMBLER V_DISABLE_SCRAMBLER(1U)
+#define S_RATE_SHADOW_SEL 24
+#define M_RATE_SHADOW_SEL 0x3U
+#define V_RATE_SHADOW_SEL(x) ((x) << S_RATE_SHADOW_SEL)
+#define G_RATE_SHADOW_SEL(x) (((x) >> S_RATE_SHADOW_SEL) & M_RATE_SHADOW_SEL)
+
#define A_PCIE_CORE_GEN3_EQ_FS_LF 0x5894
#define S_FULL_SWING 6
@@ -6347,6 +7972,35 @@
#define V_RDSOPCNT(x) ((x) << S_RDSOPCNT)
#define G_RDSOPCNT(x) (((x) >> S_RDSOPCNT) & M_RDSOPCNT)
+#define S_DMA_COOKIECNT 24
+#define M_DMA_COOKIECNT 0xfU
+#define V_DMA_COOKIECNT(x) ((x) << S_DMA_COOKIECNT)
+#define G_DMA_COOKIECNT(x) (((x) >> S_DMA_COOKIECNT) & M_DMA_COOKIECNT)
+
+#define S_DMA_RDSEQNUMUPDCNT 20
+#define M_DMA_RDSEQNUMUPDCNT 0xfU
+#define V_DMA_RDSEQNUMUPDCNT(x) ((x) << S_DMA_RDSEQNUMUPDCNT)
+#define G_DMA_RDSEQNUMUPDCNT(x) (((x) >> S_DMA_RDSEQNUMUPDCNT) & M_DMA_RDSEQNUMUPDCNT)
+
+#define S_DMA_SIREQCNT 16
+#define M_DMA_SIREQCNT 0xfU
+#define V_DMA_SIREQCNT(x) ((x) << S_DMA_SIREQCNT)
+#define G_DMA_SIREQCNT(x) (((x) >> S_DMA_SIREQCNT) & M_DMA_SIREQCNT)
+
+#define S_DMA_WREOPMATCHSOP 12
+#define V_DMA_WREOPMATCHSOP(x) ((x) << S_DMA_WREOPMATCHSOP)
+#define F_DMA_WREOPMATCHSOP V_DMA_WREOPMATCHSOP(1U)
+
+#define S_DMA_WRSOPCNT 8
+#define M_DMA_WRSOPCNT 0xfU
+#define V_DMA_WRSOPCNT(x) ((x) << S_DMA_WRSOPCNT)
+#define G_DMA_WRSOPCNT(x) (((x) >> S_DMA_WRSOPCNT) & M_DMA_WRSOPCNT)
+
+#define S_DMA_RDSOPCNT 0
+#define M_DMA_RDSOPCNT 0xffU
+#define V_DMA_RDSOPCNT(x) ((x) << S_DMA_RDSOPCNT)
+#define G_DMA_RDSOPCNT(x) (((x) >> S_DMA_RDSOPCNT) & M_DMA_RDSOPCNT)
+
#define A_PCIE_T5_DMA_STAT3 0x594c
#define S_ATMREQSOPCNT 24
@@ -6372,6 +8026,29 @@
#define V_RSPSOPCNT(x) ((x) << S_RSPSOPCNT)
#define G_RSPSOPCNT(x) (((x) >> S_RSPSOPCNT) & M_RSPSOPCNT)
+#define S_DMA_ATMREQSOPCNT 24
+#define M_DMA_ATMREQSOPCNT 0xffU
+#define V_DMA_ATMREQSOPCNT(x) ((x) << S_DMA_ATMREQSOPCNT)
+#define G_DMA_ATMREQSOPCNT(x) (((x) >> S_DMA_ATMREQSOPCNT) & M_DMA_ATMREQSOPCNT)
+
+#define S_DMA_ATMEOPMATCHSOP 17
+#define V_DMA_ATMEOPMATCHSOP(x) ((x) << S_DMA_ATMEOPMATCHSOP)
+#define F_DMA_ATMEOPMATCHSOP V_DMA_ATMEOPMATCHSOP(1U)
+
+#define S_DMA_RSPEOPMATCHSOP 16
+#define V_DMA_RSPEOPMATCHSOP(x) ((x) << S_DMA_RSPEOPMATCHSOP)
+#define F_DMA_RSPEOPMATCHSOP V_DMA_RSPEOPMATCHSOP(1U)
+
+#define S_DMA_RSPERRCNT 8
+#define M_DMA_RSPERRCNT 0xffU
+#define V_DMA_RSPERRCNT(x) ((x) << S_DMA_RSPERRCNT)
+#define G_DMA_RSPERRCNT(x) (((x) >> S_DMA_RSPERRCNT) & M_DMA_RSPERRCNT)
+
+#define S_DMA_RSPSOPCNT 0
+#define M_DMA_RSPSOPCNT 0xffU
+#define V_DMA_RSPSOPCNT(x) ((x) << S_DMA_RSPSOPCNT)
+#define G_DMA_RSPSOPCNT(x) (((x) >> S_DMA_RSPSOPCNT) & M_DMA_RSPSOPCNT)
+
#define A_PCIE_CORE_OUTBOUND_POSTED_HEADER_BUFFER_ALLOCATION 0x5960
#define S_OP0H 24
@@ -6507,11 +8184,6 @@
#define V_T6_USECMDPOOL(x) ((x) << S_T6_USECMDPOOL)
#define F_T6_USECMDPOOL V_T6_USECMDPOOL(1U)
-#define S_T6_MINTAG 0
-#define M_T6_MINTAG 0xffU
-#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
-#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
-
#define A_PCIE_T5_CMD_STAT 0x5984
#define S_T5_STAT_RSPCNT 20
@@ -6558,6 +8230,21 @@
#define A_PCIE_T5_CMD_STAT2 0x5988
#define A_PCIE_T5_CMD_STAT3 0x598c
+
+#define S_CMD_RSPEOPMATCHSOP 16
+#define V_CMD_RSPEOPMATCHSOP(x) ((x) << S_CMD_RSPEOPMATCHSOP)
+#define F_CMD_RSPEOPMATCHSOP V_CMD_RSPEOPMATCHSOP(1U)
+
+#define S_CMD_RSPERRCNT 8
+#define M_CMD_RSPERRCNT 0xffU
+#define V_CMD_RSPERRCNT(x) ((x) << S_CMD_RSPERRCNT)
+#define G_CMD_RSPERRCNT(x) (((x) >> S_CMD_RSPERRCNT) & M_CMD_RSPERRCNT)
+
+#define S_CMD_RSPSOPCNT 0
+#define M_CMD_RSPSOPCNT 0xffU
+#define V_CMD_RSPSOPCNT(x) ((x) << S_CMD_RSPSOPCNT)
+#define G_CMD_RSPSOPCNT(x) (((x) >> S_CMD_RSPSOPCNT) & M_CMD_RSPSOPCNT)
+
#define A_PCIE_CORE_PCI_EXPRESS_TAGS_ALLOCATION 0x5990
#define S_OC0T 24
@@ -6868,14 +8555,14 @@
#define V_T6_T5_HMA_MAXRSPCNT(x) ((x) << S_T6_T5_HMA_MAXRSPCNT)
#define G_T6_T5_HMA_MAXRSPCNT(x) (((x) >> S_T6_T5_HMA_MAXRSPCNT) & M_T6_T5_HMA_MAXRSPCNT)
-#define S_T6_SEQCHKDIS 8
-#define V_T6_SEQCHKDIS(x) ((x) << S_T6_SEQCHKDIS)
-#define F_T6_SEQCHKDIS V_T6_SEQCHKDIS(1U)
+#define S_T5_HMA_SEQCHKDIS 8
+#define V_T5_HMA_SEQCHKDIS(x) ((x) << S_T5_HMA_SEQCHKDIS)
+#define F_T5_HMA_SEQCHKDIS V_T5_HMA_SEQCHKDIS(1U)
-#define S_T6_MINTAG 0
-#define M_T6_MINTAG 0xffU
-#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
-#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
+#define S_T5_MINTAG 0
+#define M_T5_MINTAG 0xffU
+#define V_T5_MINTAG(x) ((x) << S_T5_MINTAG)
+#define G_T5_MINTAG(x) (((x) >> S_T5_MINTAG) & M_T5_MINTAG)
#define A_PCIE_CORE_ROOT_COMPLEX_ERROR_SEVERITY 0x59b4
@@ -6992,6 +8679,31 @@
#define F_CRSI V_CRSI(1U)
#define A_PCIE_T5_HMA_STAT2 0x59b8
+
+#define S_HMA_COOKIECNT 24
+#define M_HMA_COOKIECNT 0xfU
+#define V_HMA_COOKIECNT(x) ((x) << S_HMA_COOKIECNT)
+#define G_HMA_COOKIECNT(x) (((x) >> S_HMA_COOKIECNT) & M_HMA_COOKIECNT)
+
+#define S_HMA_RDSEQNUMUPDCNT 20
+#define M_HMA_RDSEQNUMUPDCNT 0xfU
+#define V_HMA_RDSEQNUMUPDCNT(x) ((x) << S_HMA_RDSEQNUMUPDCNT)
+#define G_HMA_RDSEQNUMUPDCNT(x) (((x) >> S_HMA_RDSEQNUMUPDCNT) & M_HMA_RDSEQNUMUPDCNT)
+
+#define S_HMA_WREOPMATCHSOP 12
+#define V_HMA_WREOPMATCHSOP(x) ((x) << S_HMA_WREOPMATCHSOP)
+#define F_HMA_WREOPMATCHSOP V_HMA_WREOPMATCHSOP(1U)
+
+#define S_HMA_WRSOPCNT 8
+#define M_HMA_WRSOPCNT 0xfU
+#define V_HMA_WRSOPCNT(x) ((x) << S_HMA_WRSOPCNT)
+#define G_HMA_WRSOPCNT(x) (((x) >> S_HMA_WRSOPCNT) & M_HMA_WRSOPCNT)
+
+#define S_HMA_RDSOPCNT 0
+#define M_HMA_RDSOPCNT 0xffU
+#define V_HMA_RDSOPCNT(x) ((x) << S_HMA_RDSOPCNT)
+#define G_HMA_RDSOPCNT(x) (((x) >> S_HMA_RDSOPCNT) & M_HMA_RDSOPCNT)
+
#define A_PCIE_CORE_ENDPOINT_STATUS 0x59bc
#define S_PTOM 31
@@ -7035,6 +8747,21 @@
#define F_PMC7 V_PMC7(1U)
#define A_PCIE_T5_HMA_STAT3 0x59bc
+
+#define S_HMA_RSPEOPMATCHSOP 16
+#define V_HMA_RSPEOPMATCHSOP(x) ((x) << S_HMA_RSPEOPMATCHSOP)
+#define F_HMA_RSPEOPMATCHSOP V_HMA_RSPEOPMATCHSOP(1U)
+
+#define S_HMA_RSPERRCNT 8
+#define M_HMA_RSPERRCNT 0xffU
+#define V_HMA_RSPERRCNT(x) ((x) << S_HMA_RSPERRCNT)
+#define G_HMA_RSPERRCNT(x) (((x) >> S_HMA_RSPERRCNT) & M_HMA_RSPERRCNT)
+
+#define S_HMA_RSPSOPCNT 0
+#define M_HMA_RSPSOPCNT 0xffU
+#define V_HMA_RSPSOPCNT(x) ((x) << S_HMA_RSPSOPCNT)
+#define G_HMA_RSPSOPCNT(x) (((x) >> S_HMA_RSPSOPCNT) & M_HMA_RSPSOPCNT)
+
#define A_PCIE_CORE_ENDPOINT_ERROR_SEVERITY 0x59c0
#define S_PTOS 31
@@ -7187,6 +8914,14 @@
#define V_STI_SLEEPREQ(x) ((x) << S_STI_SLEEPREQ)
#define F_STI_SLEEPREQ V_STI_SLEEPREQ(1U)
+#define S_ARM_STATIC_CGEN 28
+#define V_ARM_STATIC_CGEN(x) ((x) << S_ARM_STATIC_CGEN)
+#define F_ARM_STATIC_CGEN V_ARM_STATIC_CGEN(1U)
+
+#define S_ARM_DYNAMIC_CGEN 27
+#define V_ARM_DYNAMIC_CGEN(x) ((x) << S_ARM_DYNAMIC_CGEN)
+#define F_ARM_DYNAMIC_CGEN V_ARM_DYNAMIC_CGEN(1U)
+
#define A_PCIE_CORE_ENDPOINT_INTERRUPT_ENABLE 0x59c4
#define S_PTOI 31
@@ -7521,6 +9256,14 @@
#define V_PIOCPL_VDMTXDATAPERR(x) ((x) << S_PIOCPL_VDMTXDATAPERR)
#define F_PIOCPL_VDMTXDATAPERR V_PIOCPL_VDMTXDATAPERR(1U)
+#define S_TGT1_MEM_PERR 28
+#define V_TGT1_MEM_PERR(x) ((x) << S_TGT1_MEM_PERR)
+#define F_TGT1_MEM_PERR V_TGT1_MEM_PERR(1U)
+
+#define S_TGT2_MEM_PERR 27
+#define V_TGT2_MEM_PERR(x) ((x) << S_TGT2_MEM_PERR)
+#define F_TGT2_MEM_PERR V_TGT2_MEM_PERR(1U)
+
#define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_2 0x59d4
#define A_PCIE_RSP_ERR_INT_LOG_EN 0x59d4
@@ -7622,6 +9365,16 @@
#define V_T6_REQVFID(x) ((x) << S_T6_REQVFID)
#define G_T6_REQVFID(x) (((x) >> S_T6_REQVFID) & M_T6_REQVFID)
+#define S_LOGADDR10B 9
+#define M_LOGADDR10B 0x3ffU
+#define V_LOGADDR10B(x) ((x) << S_LOGADDR10B)
+#define G_LOGADDR10B(x) (((x) >> S_LOGADDR10B) & M_LOGADDR10B)
+
+#define S_LOGREQVFID 0
+#define M_LOGREQVFID 0x1ffU
+#define V_LOGREQVFID(x) ((x) << S_LOGREQVFID)
+#define G_LOGREQVFID(x) (((x) >> S_LOGREQVFID) & M_LOGREQVFID)
+
#define A_PCIE_CHANGESET 0x59fc
#define A_PCIE_REVISION 0x5a00
#define A_PCIE_PDEBUG_INDEX 0x5a04
@@ -7646,6 +9399,16 @@
#define V_T6_PDEBUGSELL(x) ((x) << S_T6_PDEBUGSELL)
#define G_T6_PDEBUGSELL(x) (((x) >> S_T6_PDEBUGSELL) & M_T6_PDEBUGSELL)
+#define S_T7_1_PDEBUGSELH 16
+#define M_T7_1_PDEBUGSELH 0xffU
+#define V_T7_1_PDEBUGSELH(x) ((x) << S_T7_1_PDEBUGSELH)
+#define G_T7_1_PDEBUGSELH(x) (((x) >> S_T7_1_PDEBUGSELH) & M_T7_1_PDEBUGSELH)
+
+#define S_T7_1_PDEBUGSELL 0
+#define M_T7_1_PDEBUGSELL 0xffU
+#define V_T7_1_PDEBUGSELL(x) ((x) << S_T7_1_PDEBUGSELL)
+#define G_T7_1_PDEBUGSELL(x) (((x) >> S_T7_1_PDEBUGSELL) & M_T7_1_PDEBUGSELL)
+
#define A_PCIE_PDEBUG_DATA_HIGH 0x5a08
#define A_PCIE_PDEBUG_DATA_LOW 0x5a0c
#define A_PCIE_CDEBUG_INDEX 0x5a10
@@ -8468,6 +10231,21 @@
#define A_PCIE_PHY_INDIR_DATA 0x5bf4
#define A_PCIE_STATIC_SPARE1 0x5bf8
#define A_PCIE_STATIC_SPARE2 0x5bfc
+
+#define S_X8_SW_EN 30
+#define V_X8_SW_EN(x) ((x) << S_X8_SW_EN)
+#define F_X8_SW_EN V_X8_SW_EN(1U)
+
+#define S_SWITCHCFG 28
+#define M_SWITCHCFG 0x3U
+#define V_SWITCHCFG(x) ((x) << S_SWITCHCFG)
+#define G_SWITCHCFG(x) (((x) >> S_SWITCHCFG) & M_SWITCHCFG)
+
+#define S_STATIC_SPARE2 0
+#define M_STATIC_SPARE2 0xfffffffU
+#define V_STATIC_SPARE2(x) ((x) << S_STATIC_SPARE2)
+#define G_STATIC_SPARE2(x) (((x) >> S_STATIC_SPARE2) & M_STATIC_SPARE2)
+
#define A_PCIE_KDOORBELL_GTS_PF_BASE_LEN 0x5c10
#define S_KDB_PF_LEN 24
@@ -8872,9 +10650,13 @@
#define A_PCIE_FLR_VF6_STATUS 0x5e78
#define A_PCIE_FLR_VF7_STATUS 0x5e7c
#define A_T6_PCIE_BUS_MST_STAT_4 0x5e80
+#define A_T7_PCIE_BUS_MST_STAT_4 0x5e80
#define A_T6_PCIE_BUS_MST_STAT_5 0x5e84
+#define A_T7_PCIE_BUS_MST_STAT_5 0x5e84
#define A_T6_PCIE_BUS_MST_STAT_6 0x5e88
+#define A_T7_PCIE_BUS_MST_STAT_6 0x5e88
#define A_T6_PCIE_BUS_MST_STAT_7 0x5e8c
+#define A_T7_PCIE_BUS_MST_STAT_7 0x5e8c
#define A_PCIE_BUS_MST_STAT_8 0x5e90
#define S_BUSMST_263_256 0
@@ -8895,9 +10677,13 @@
#define G_DATAFREECNT(x) (((x) >> S_DATAFREECNT) & M_DATAFREECNT)
#define A_T6_PCIE_RSP_ERR_STAT_4 0x5ea0
+#define A_T7_PCIE_RSP_ERR_STAT_4 0x5ea0
#define A_T6_PCIE_RSP_ERR_STAT_5 0x5ea4
+#define A_T7_PCIE_RSP_ERR_STAT_5 0x5ea4
#define A_T6_PCIE_RSP_ERR_STAT_6 0x5ea8
+#define A_T7_PCIE_RSP_ERR_STAT_6 0x5ea8
#define A_T6_PCIE_RSP_ERR_STAT_7 0x5eac
+#define A_T7_PCIE_RSP_ERR_STAT_7 0x5eac
#define A_PCIE_RSP_ERR_STAT_8 0x5eb0
#define S_RSPERR_263_256 0
@@ -9025,6 +10811,1028 @@
#define A_PCIE_DEBUG_ADDR_RANGE1 0x5ee0
#define A_PCIE_DEBUG_ADDR_RANGE2 0x5ef0
#define A_PCIE_DEBUG_ADDR_RANGE_CNT 0x5f00
+#define A_PCIE_PHY_PGM_LOAD_CTRL 0x5f04
+
+#define S_HSS_PMLD_ACC_EN 31
+#define V_HSS_PMLD_ACC_EN(x) ((x) << S_HSS_PMLD_ACC_EN)
+#define F_HSS_PMLD_ACC_EN V_HSS_PMLD_ACC_EN(1U)
+
+#define S_HSS_PMRDWR_ADDR 0
+#define M_HSS_PMRDWR_ADDR 0x3ffffU
+#define V_HSS_PMRDWR_ADDR(x) ((x) << S_HSS_PMRDWR_ADDR)
+#define G_HSS_PMRDWR_ADDR(x) (((x) >> S_HSS_PMRDWR_ADDR) & M_HSS_PMRDWR_ADDR)
+
+#define A_PCIE_PHY_PGM_LOAD_DATA 0x5f08
+#define A_PCIE_HSS_CFG 0x5f0c
+
+#define S_HSS_PCS_AGGREGATION_MODE 30
+#define M_HSS_PCS_AGGREGATION_MODE 0x3U
+#define V_HSS_PCS_AGGREGATION_MODE(x) ((x) << S_HSS_PCS_AGGREGATION_MODE)
+#define G_HSS_PCS_AGGREGATION_MODE(x) (((x) >> S_HSS_PCS_AGGREGATION_MODE) & M_HSS_PCS_AGGREGATION_MODE)
+
+#define S_HSS_PCS_FURCATE_MODE 28
+#define M_HSS_PCS_FURCATE_MODE 0x3U
+#define V_HSS_PCS_FURCATE_MODE(x) ((x) << S_HSS_PCS_FURCATE_MODE)
+#define G_HSS_PCS_FURCATE_MODE(x) (((x) >> S_HSS_PCS_FURCATE_MODE) & M_HSS_PCS_FURCATE_MODE)
+
+#define S_HSS_PCS_PCLK_ON_IN_P2 27
+#define V_HSS_PCS_PCLK_ON_IN_P2(x) ((x) << S_HSS_PCS_PCLK_ON_IN_P2)
+#define F_HSS_PCS_PCLK_ON_IN_P2 V_HSS_PCS_PCLK_ON_IN_P2(1U)
+
+#define S_HSS0_PHY_CTRL_REFCLK 17
+#define M_HSS0_PHY_CTRL_REFCLK 0x1fU
+#define V_HSS0_PHY_CTRL_REFCLK(x) ((x) << S_HSS0_PHY_CTRL_REFCLK)
+#define G_HSS0_PHY_CTRL_REFCLK(x) (((x) >> S_HSS0_PHY_CTRL_REFCLK) & M_HSS0_PHY_CTRL_REFCLK)
+
+#define S_HSS1_PHY_CTRL_REFCLK 12
+#define M_HSS1_PHY_CTRL_REFCLK 0x1fU
+#define V_HSS1_PHY_CTRL_REFCLK(x) ((x) << S_HSS1_PHY_CTRL_REFCLK)
+#define G_HSS1_PHY_CTRL_REFCLK(x) (((x) >> S_HSS1_PHY_CTRL_REFCLK) & M_HSS1_PHY_CTRL_REFCLK)
+
+#define S_HSS0_PHY_REXT_MASTER 11
+#define V_HSS0_PHY_REXT_MASTER(x) ((x) << S_HSS0_PHY_REXT_MASTER)
+#define F_HSS0_PHY_REXT_MASTER V_HSS0_PHY_REXT_MASTER(1U)
+
+#define S_HSS1_PHY_REXT_MASTER 10
+#define V_HSS1_PHY_REXT_MASTER(x) ((x) << S_HSS1_PHY_REXT_MASTER)
+#define F_HSS1_PHY_REXT_MASTER V_HSS1_PHY_REXT_MASTER(1U)
+
+#define S_HSS0_PHY_CTRL_VDDA_SEL 9
+#define V_HSS0_PHY_CTRL_VDDA_SEL(x) ((x) << S_HSS0_PHY_CTRL_VDDA_SEL)
+#define F_HSS0_PHY_CTRL_VDDA_SEL V_HSS0_PHY_CTRL_VDDA_SEL(1U)
+
+#define S_HSS0_PHY_CTRL_VDDHA_SEL 8
+#define V_HSS0_PHY_CTRL_VDDHA_SEL(x) ((x) << S_HSS0_PHY_CTRL_VDDHA_SEL)
+#define F_HSS0_PHY_CTRL_VDDHA_SEL V_HSS0_PHY_CTRL_VDDHA_SEL(1U)
+
+#define S_HSS1_PHY_CTRL_VDDA_SEL 7
+#define V_HSS1_PHY_CTRL_VDDA_SEL(x) ((x) << S_HSS1_PHY_CTRL_VDDA_SEL)
+#define F_HSS1_PHY_CTRL_VDDA_SEL V_HSS1_PHY_CTRL_VDDA_SEL(1U)
+
+#define S_HSS1_PHY_CTRL_VDDHA_SEL 6
+#define V_HSS1_PHY_CTRL_VDDHA_SEL(x) ((x) << S_HSS1_PHY_CTRL_VDDHA_SEL)
+#define F_HSS1_PHY_CTRL_VDDHA_SEL V_HSS1_PHY_CTRL_VDDHA_SEL(1U)
+
+#define S_HSS1_CPU_MEMPSACK 5
+#define V_HSS1_CPU_MEMPSACK(x) ((x) << S_HSS1_CPU_MEMPSACK)
+#define F_HSS1_CPU_MEMPSACK V_HSS1_CPU_MEMPSACK(1U)
+
+#define S_HSS0_CPU_MEMPSACK 3
+#define V_HSS0_CPU_MEMPSACK(x) ((x) << S_HSS0_CPU_MEMPSACK)
+#define F_HSS0_CPU_MEMPSACK V_HSS0_CPU_MEMPSACK(1U)
+
+#define S_HSS1_CPU_MEMACK 4
+#define V_HSS1_CPU_MEMACK(x) ((x) << S_HSS1_CPU_MEMACK)
+#define F_HSS1_CPU_MEMACK V_HSS1_CPU_MEMACK(1U)
+
+#define S_HSS0_CPU_MEMACK 2
+#define V_HSS0_CPU_MEMACK(x) ((x) << S_HSS0_CPU_MEMACK)
+#define F_HSS0_CPU_MEMACK V_HSS0_CPU_MEMACK(1U)
+
+#define S_HSS_PM_IS_ROM 1
+#define V_HSS_PM_IS_ROM(x) ((x) << S_HSS_PM_IS_ROM)
+#define F_HSS_PM_IS_ROM V_HSS_PM_IS_ROM(1U)
+
+#define A_PCIE_HSS_RST 0x5f10
+
+#define S_HSS_RST_CTRL_BY_FW 31
+#define V_HSS_RST_CTRL_BY_FW(x) ((x) << S_HSS_RST_CTRL_BY_FW)
+#define F_HSS_RST_CTRL_BY_FW V_HSS_RST_CTRL_BY_FW(1U)
+
+#define S_HSS_PIPE0_RESET_N 30
+#define V_HSS_PIPE0_RESET_N(x) ((x) << S_HSS_PIPE0_RESET_N)
+#define F_HSS_PIPE0_RESET_N V_HSS_PIPE0_RESET_N(1U)
+
+#define S_HSS0_POR_N 29
+#define V_HSS0_POR_N(x) ((x) << S_HSS0_POR_N)
+#define F_HSS0_POR_N V_HSS0_POR_N(1U)
+
+#define S_HSS1_POR_N 28
+#define V_HSS1_POR_N(x) ((x) << S_HSS1_POR_N)
+#define F_HSS1_POR_N V_HSS1_POR_N(1U)
+
+#define S_HSS0_CPU_RESET 27
+#define V_HSS0_CPU_RESET(x) ((x) << S_HSS0_CPU_RESET)
+#define F_HSS0_CPU_RESET V_HSS0_CPU_RESET(1U)
+
+#define S_HSS1_CPU_RESET 26
+#define V_HSS1_CPU_RESET(x) ((x) << S_HSS1_CPU_RESET)
+#define F_HSS1_CPU_RESET V_HSS1_CPU_RESET(1U)
+
+#define S_HSS_PCS_POR_N 25
+#define V_HSS_PCS_POR_N(x) ((x) << S_HSS_PCS_POR_N)
+#define F_HSS_PCS_POR_N V_HSS_PCS_POR_N(1U)
+
+#define S_SW_CRST_ 24
+#define V_SW_CRST_(x) ((x) << S_SW_CRST_)
+#define F_SW_CRST_ V_SW_CRST_(1U)
+
+#define S_SW_PCIECRST_ 23
+#define V_SW_PCIECRST_(x) ((x) << S_SW_PCIECRST_)
+#define F_SW_PCIECRST_ V_SW_PCIECRST_(1U)
+
+#define S_SW_PCIEPIPERST_ 22
+#define V_SW_PCIEPIPERST_(x) ((x) << S_SW_PCIEPIPERST_)
+#define F_SW_PCIEPIPERST_ V_SW_PCIEPIPERST_(1U)
+
+#define S_SW_PCIEPHYRST_ 21
+#define V_SW_PCIEPHYRST_(x) ((x) << S_SW_PCIEPHYRST_)
+#define F_SW_PCIEPHYRST_ V_SW_PCIEPHYRST_(1U)
+
+#define S_HSS1_ERR_O 3
+#define V_HSS1_ERR_O(x) ((x) << S_HSS1_ERR_O)
+#define F_HSS1_ERR_O V_HSS1_ERR_O(1U)
+
+#define S_HSS0_ERR_O 2
+#define V_HSS0_ERR_O(x) ((x) << S_HSS0_ERR_O)
+#define F_HSS0_ERR_O V_HSS0_ERR_O(1U)
+
+#define S_HSS1_PLL_LOCK 1
+#define V_HSS1_PLL_LOCK(x) ((x) << S_HSS1_PLL_LOCK)
+#define F_HSS1_PLL_LOCK V_HSS1_PLL_LOCK(1U)
+
+#define S_HSS0_PLL_LOCK 0
+#define V_HSS0_PLL_LOCK(x) ((x) << S_HSS0_PLL_LOCK)
+#define F_HSS0_PLL_LOCK V_HSS0_PLL_LOCK(1U)
+
+#define A_PCIE_T5_ARM_CFG 0x5f20
+
+#define S_T5_ARM_MAXREQCNT 20
+#define M_T5_ARM_MAXREQCNT 0x7fU
+#define V_T5_ARM_MAXREQCNT(x) ((x) << S_T5_ARM_MAXREQCNT)
+#define G_T5_ARM_MAXREQCNT(x) (((x) >> S_T5_ARM_MAXREQCNT) & M_T5_ARM_MAXREQCNT)
+
+#define S_T5_ARM_MAXRDREQSIZE 17
+#define M_T5_ARM_MAXRDREQSIZE 0x7U
+#define V_T5_ARM_MAXRDREQSIZE(x) ((x) << S_T5_ARM_MAXRDREQSIZE)
+#define G_T5_ARM_MAXRDREQSIZE(x) (((x) >> S_T5_ARM_MAXRDREQSIZE) & M_T5_ARM_MAXRDREQSIZE)
+
+#define S_T5_ARM_MAXRSPCNT 9
+#define M_T5_ARM_MAXRSPCNT 0xffU
+#define V_T5_ARM_MAXRSPCNT(x) ((x) << S_T5_ARM_MAXRSPCNT)
+#define G_T5_ARM_MAXRSPCNT(x) (((x) >> S_T5_ARM_MAXRSPCNT) & M_T5_ARM_MAXRSPCNT)
+
+#define A_PCIE_T5_ARM_STAT 0x5f24
+
+#define S_ARM_RESPCNT 20
+#define M_ARM_RESPCNT 0x1ffU
+#define V_ARM_RESPCNT(x) ((x) << S_ARM_RESPCNT)
+#define G_ARM_RESPCNT(x) (((x) >> S_ARM_RESPCNT) & M_ARM_RESPCNT)
+
+#define S_ARM_RDREQCNT 12
+#define M_ARM_RDREQCNT 0x3fU
+#define V_ARM_RDREQCNT(x) ((x) << S_ARM_RDREQCNT)
+#define G_ARM_RDREQCNT(x) (((x) >> S_ARM_RDREQCNT) & M_ARM_RDREQCNT)
+
+#define S_ARM_WRREQCNT 0
+#define M_ARM_WRREQCNT 0x1ffU
+#define V_ARM_WRREQCNT(x) ((x) << S_ARM_WRREQCNT)
+#define G_ARM_WRREQCNT(x) (((x) >> S_ARM_WRREQCNT) & M_ARM_WRREQCNT)
+
+#define A_PCIE_T5_ARM_STAT2 0x5f28
+
+#define S_ARM_COOKIECNT 24
+#define M_ARM_COOKIECNT 0xfU
+#define V_ARM_COOKIECNT(x) ((x) << S_ARM_COOKIECNT)
+#define G_ARM_COOKIECNT(x) (((x) >> S_ARM_COOKIECNT) & M_ARM_COOKIECNT)
+
+#define S_ARM_RDSEQNUMUPDCNT 20
+#define M_ARM_RDSEQNUMUPDCNT 0xfU
+#define V_ARM_RDSEQNUMUPDCNT(x) ((x) << S_ARM_RDSEQNUMUPDCNT)
+#define G_ARM_RDSEQNUMUPDCNT(x) (((x) >> S_ARM_RDSEQNUMUPDCNT) & M_ARM_RDSEQNUMUPDCNT)
+
+#define S_ARM_SIREQCNT 16
+#define M_ARM_SIREQCNT 0xfU
+#define V_ARM_SIREQCNT(x) ((x) << S_ARM_SIREQCNT)
+#define G_ARM_SIREQCNT(x) (((x) >> S_ARM_SIREQCNT) & M_ARM_SIREQCNT)
+
+#define S_ARM_WREOPMATCHSOP 12
+#define V_ARM_WREOPMATCHSOP(x) ((x) << S_ARM_WREOPMATCHSOP)
+#define F_ARM_WREOPMATCHSOP V_ARM_WREOPMATCHSOP(1U)
+
+#define S_ARM_WRSOPCNT 8
+#define M_ARM_WRSOPCNT 0xfU
+#define V_ARM_WRSOPCNT(x) ((x) << S_ARM_WRSOPCNT)
+#define G_ARM_WRSOPCNT(x) (((x) >> S_ARM_WRSOPCNT) & M_ARM_WRSOPCNT)
+
+#define S_ARM_RDSOPCNT 0
+#define M_ARM_RDSOPCNT 0xffU
+#define V_ARM_RDSOPCNT(x) ((x) << S_ARM_RDSOPCNT)
+#define G_ARM_RDSOPCNT(x) (((x) >> S_ARM_RDSOPCNT) & M_ARM_RDSOPCNT)
+
+#define A_PCIE_T5_ARM_STAT3 0x5f2c
+
+#define S_ARM_ATMREQSOPCNT 24
+#define M_ARM_ATMREQSOPCNT 0xffU
+#define V_ARM_ATMREQSOPCNT(x) ((x) << S_ARM_ATMREQSOPCNT)
+#define G_ARM_ATMREQSOPCNT(x) (((x) >> S_ARM_ATMREQSOPCNT) & M_ARM_ATMREQSOPCNT)
+
+#define S_ARM_ATMEOPMATCHSOP 17
+#define V_ARM_ATMEOPMATCHSOP(x) ((x) << S_ARM_ATMEOPMATCHSOP)
+#define F_ARM_ATMEOPMATCHSOP V_ARM_ATMEOPMATCHSOP(1U)
+
+#define S_ARM_RSPEOPMATCHSOP 16
+#define V_ARM_RSPEOPMATCHSOP(x) ((x) << S_ARM_RSPEOPMATCHSOP)
+#define F_ARM_RSPEOPMATCHSOP V_ARM_RSPEOPMATCHSOP(1U)
+
+#define S_ARM_RSPERRCNT 8
+#define M_ARM_RSPERRCNT 0xffU
+#define V_ARM_RSPERRCNT(x) ((x) << S_ARM_RSPERRCNT)
+#define G_ARM_RSPERRCNT(x) (((x) >> S_ARM_RSPERRCNT) & M_ARM_RSPERRCNT)
+
+#define S_ARM_RSPSOPCNT 0
+#define M_ARM_RSPSOPCNT 0xffU
+#define V_ARM_RSPSOPCNT(x) ((x) << S_ARM_RSPSOPCNT)
+#define G_ARM_RSPSOPCNT(x) (((x) >> S_ARM_RSPSOPCNT) & M_ARM_RSPSOPCNT)
+
+#define A_PCIE_ARM_REQUESTER_ID 0x5f30
+
+#define S_A0_RSVD1 24
+#define M_A0_RSVD1 0xffU
+#define V_A0_RSVD1(x) ((x) << S_A0_RSVD1)
+#define G_A0_RSVD1(x) (((x) >> S_A0_RSVD1) & M_A0_RSVD1)
+
+#define S_A0_PRIMBUSNUMBER 16
+#define M_A0_PRIMBUSNUMBER 0xffU
+#define V_A0_PRIMBUSNUMBER(x) ((x) << S_A0_PRIMBUSNUMBER)
+#define G_A0_PRIMBUSNUMBER(x) (((x) >> S_A0_PRIMBUSNUMBER) & M_A0_PRIMBUSNUMBER)
+
+#define S_A0_REQUESTERID 0
+#define M_A0_REQUESTERID 0xffffU
+#define V_A0_REQUESTERID(x) ((x) << S_A0_REQUESTERID)
+#define G_A0_REQUESTERID(x) (((x) >> S_A0_REQUESTERID) & M_A0_REQUESTERID)
+
+#define A_PCIE_SWITCH_CFG_SPACE_REQ0 0x5f34
+
+#define S_REQ0ENABLE 31
+#define V_REQ0ENABLE(x) ((x) << S_REQ0ENABLE)
+#define F_REQ0ENABLE V_REQ0ENABLE(1U)
+
+#define S_RDREQ0TYPE 19
+#define V_RDREQ0TYPE(x) ((x) << S_RDREQ0TYPE)
+#define F_RDREQ0TYPE V_RDREQ0TYPE(1U)
+
+#define S_BYTEENABLE0 15
+#define M_BYTEENABLE0 0xfU
+#define V_BYTEENABLE0(x) ((x) << S_BYTEENABLE0)
+#define G_BYTEENABLE0(x) (((x) >> S_BYTEENABLE0) & M_BYTEENABLE0)
+
+#define S_REGADDR0 0
+#define M_REGADDR0 0x7fffU
+#define V_REGADDR0(x) ((x) << S_REGADDR0)
+#define G_REGADDR0(x) (((x) >> S_REGADDR0) & M_REGADDR0)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA0 0x5f38
+#define A_PCIE_SWITCH_CFG_SPACE_REQ1 0x5f3c
+
+#define S_REQ1ENABLE 31
+#define V_REQ1ENABLE(x) ((x) << S_REQ1ENABLE)
+#define F_REQ1ENABLE V_REQ1ENABLE(1U)
+
+#define S_RDREQ1TYPE 26
+#define M_RDREQ1TYPE 0xfU
+#define V_RDREQ1TYPE(x) ((x) << S_RDREQ1TYPE)
+#define G_RDREQ1TYPE(x) (((x) >> S_RDREQ1TYPE) & M_RDREQ1TYPE)
+
+#define S_BYTEENABLE1 15
+#define M_BYTEENABLE1 0x7ffU
+#define V_BYTEENABLE1(x) ((x) << S_BYTEENABLE1)
+#define G_BYTEENABLE1(x) (((x) >> S_BYTEENABLE1) & M_BYTEENABLE1)
+
+#define S_REGADDR1 0
+#define M_REGADDR1 0x7fffU
+#define V_REGADDR1(x) ((x) << S_REGADDR1)
+#define G_REGADDR1(x) (((x) >> S_REGADDR1) & M_REGADDR1)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA1 0x5f40
+#define A_PCIE_SWITCH_CFG_SPACE_REQ2 0x5f44
+
+#define S_REQ2ENABLE 31
+#define V_REQ2ENABLE(x) ((x) << S_REQ2ENABLE)
+#define F_REQ2ENABLE V_REQ2ENABLE(1U)
+
+#define S_RDREQ2TYPE 26
+#define M_RDREQ2TYPE 0xfU
+#define V_RDREQ2TYPE(x) ((x) << S_RDREQ2TYPE)
+#define G_RDREQ2TYPE(x) (((x) >> S_RDREQ2TYPE) & M_RDREQ2TYPE)
+
+#define S_BYTEENABLE2 15
+#define M_BYTEENABLE2 0x7ffU
+#define V_BYTEENABLE2(x) ((x) << S_BYTEENABLE2)
+#define G_BYTEENABLE2(x) (((x) >> S_BYTEENABLE2) & M_BYTEENABLE2)
+
+#define S_REGADDR2 0
+#define M_REGADDR2 0x7fffU
+#define V_REGADDR2(x) ((x) << S_REGADDR2)
+#define G_REGADDR2(x) (((x) >> S_REGADDR2) & M_REGADDR2)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA2 0x5f48
+#define A_PCIE_SWITCH_CFG_SPACE_REQ3 0x5f4c
+
+#define S_REQ3ENABLE 31
+#define V_REQ3ENABLE(x) ((x) << S_REQ3ENABLE)
+#define F_REQ3ENABLE V_REQ3ENABLE(1U)
+
+#define S_RDREQ3TYPE 26
+#define M_RDREQ3TYPE 0xfU
+#define V_RDREQ3TYPE(x) ((x) << S_RDREQ3TYPE)
+#define G_RDREQ3TYPE(x) (((x) >> S_RDREQ3TYPE) & M_RDREQ3TYPE)
+
+#define S_BYTEENABLE3 15
+#define M_BYTEENABLE3 0x7ffU
+#define V_BYTEENABLE3(x) ((x) << S_BYTEENABLE3)
+#define G_BYTEENABLE3(x) (((x) >> S_BYTEENABLE3) & M_BYTEENABLE3)
+
+#define S_REGADDR3 0
+#define M_REGADDR3 0x7fffU
+#define V_REGADDR3(x) ((x) << S_REGADDR3)
+#define G_REGADDR3(x) (((x) >> S_REGADDR3) & M_REGADDR3)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA3 0x5f50
+#define A_PCIE_SWITCH_CFG_SPACE_REQ4 0x5f54
+
+#define S_REQ4ENABLE 31
+#define V_REQ4ENABLE(x) ((x) << S_REQ4ENABLE)
+#define F_REQ4ENABLE V_REQ4ENABLE(1U)
+
+#define S_RDREQ4TYPE 26
+#define M_RDREQ4TYPE 0xfU
+#define V_RDREQ4TYPE(x) ((x) << S_RDREQ4TYPE)
+#define G_RDREQ4TYPE(x) (((x) >> S_RDREQ4TYPE) & M_RDREQ4TYPE)
+
+#define S_BYTEENABLE4 15
+#define M_BYTEENABLE4 0x7ffU
+#define V_BYTEENABLE4(x) ((x) << S_BYTEENABLE4)
+#define G_BYTEENABLE4(x) (((x) >> S_BYTEENABLE4) & M_BYTEENABLE4)
+
+#define S_REGADDR4 0
+#define M_REGADDR4 0x7fffU
+#define V_REGADDR4(x) ((x) << S_REGADDR4)
+#define G_REGADDR4(x) (((x) >> S_REGADDR4) & M_REGADDR4)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA4 0x5f58
+#define A_PCIE_SWITCH_CFG_SPACE_REQ5 0x5f5c
+
+#define S_REQ5ENABLE 31
+#define V_REQ5ENABLE(x) ((x) << S_REQ5ENABLE)
+#define F_REQ5ENABLE V_REQ5ENABLE(1U)
+
+#define S_RDREQ5TYPE 26
+#define M_RDREQ5TYPE 0xfU
+#define V_RDREQ5TYPE(x) ((x) << S_RDREQ5TYPE)
+#define G_RDREQ5TYPE(x) (((x) >> S_RDREQ5TYPE) & M_RDREQ5TYPE)
+
+#define S_BYTEENABLE5 15
+#define M_BYTEENABLE5 0x7ffU
+#define V_BYTEENABLE5(x) ((x) << S_BYTEENABLE5)
+#define G_BYTEENABLE5(x) (((x) >> S_BYTEENABLE5) & M_BYTEENABLE5)
+
+#define S_REGADDR5 0
+#define M_REGADDR5 0x7fffU
+#define V_REGADDR5(x) ((x) << S_REGADDR5)
+#define G_REGADDR5(x) (((x) >> S_REGADDR5) & M_REGADDR5)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA5 0x5f60
+#define A_PCIE_SWITCH_CFG_SPACE_REQ6 0x5f64
+
+#define S_REQ6ENABLE 31
+#define V_REQ6ENABLE(x) ((x) << S_REQ6ENABLE)
+#define F_REQ6ENABLE V_REQ6ENABLE(1U)
+
+#define S_RDREQ6TYPE 26
+#define M_RDREQ6TYPE 0xfU
+#define V_RDREQ6TYPE(x) ((x) << S_RDREQ6TYPE)
+#define G_RDREQ6TYPE(x) (((x) >> S_RDREQ6TYPE) & M_RDREQ6TYPE)
+
+#define S_BYTEENABLE6 15
+#define M_BYTEENABLE6 0x7ffU
+#define V_BYTEENABLE6(x) ((x) << S_BYTEENABLE6)
+#define G_BYTEENABLE6(x) (((x) >> S_BYTEENABLE6) & M_BYTEENABLE6)
+
+#define S_REGADDR6 0
+#define M_REGADDR6 0x7fffU
+#define V_REGADDR6(x) ((x) << S_REGADDR6)
+#define G_REGADDR6(x) (((x) >> S_REGADDR6) & M_REGADDR6)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA6 0x5f68
+#define A_PCIE_SWITCH_CFG_SPACE_REQ7 0x5f6c
+
+#define S_REQ7ENABLE 31
+#define V_REQ7ENABLE(x) ((x) << S_REQ7ENABLE)
+#define F_REQ7ENABLE V_REQ7ENABLE(1U)
+
+#define S_RDREQ7TYPE 26
+#define M_RDREQ7TYPE 0xfU
+#define V_RDREQ7TYPE(x) ((x) << S_RDREQ7TYPE)
+#define G_RDREQ7TYPE(x) (((x) >> S_RDREQ7TYPE) & M_RDREQ7TYPE)
+
+#define S_BYTEENABLE7 15
+#define M_BYTEENABLE7 0x7ffU
+#define V_BYTEENABLE7(x) ((x) << S_BYTEENABLE7)
+#define G_BYTEENABLE7(x) (((x) >> S_BYTEENABLE7) & M_BYTEENABLE7)
+
+#define S_REGADDR7 0
+#define M_REGADDR7 0x7fffU
+#define V_REGADDR7(x) ((x) << S_REGADDR7)
+#define G_REGADDR7(x) (((x) >> S_REGADDR7) & M_REGADDR7)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA7 0x5f70
+#define A_PCIE_SWITCH_CFG_SPACE_REQ8 0x5f74
+
+#define S_REQ8ENABLE 31
+#define V_REQ8ENABLE(x) ((x) << S_REQ8ENABLE)
+#define F_REQ8ENABLE V_REQ8ENABLE(1U)
+
+#define S_RDREQ8TYPE 26
+#define M_RDREQ8TYPE 0xfU
+#define V_RDREQ8TYPE(x) ((x) << S_RDREQ8TYPE)
+#define G_RDREQ8TYPE(x) (((x) >> S_RDREQ8TYPE) & M_RDREQ8TYPE)
+
+#define S_BYTEENABLE8 15
+#define M_BYTEENABLE8 0x7ffU
+#define V_BYTEENABLE8(x) ((x) << S_BYTEENABLE8)
+#define G_BYTEENABLE8(x) (((x) >> S_BYTEENABLE8) & M_BYTEENABLE8)
+
+#define S_REGADDR8 0
+#define M_REGADDR8 0x7fffU
+#define V_REGADDR8(x) ((x) << S_REGADDR8)
+#define G_REGADDR8(x) (((x) >> S_REGADDR8) & M_REGADDR8)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA8 0x5f78
+#define A_PCIE_SNPS_G5_PHY_CR_REQ 0x5f7c
+
+#define S_REGSEL 31
+#define V_REGSEL(x) ((x) << S_REGSEL)
+#define F_REGSEL V_REGSEL(1U)
+
+#define S_RDENABLE 30
+#define V_RDENABLE(x) ((x) << S_RDENABLE)
+#define F_RDENABLE V_RDENABLE(1U)
+
+#define S_WRENABLE 29
+#define V_WRENABLE(x) ((x) << S_WRENABLE)
+#define F_WRENABLE V_WRENABLE(1U)
+
+#define S_AUTOINCRVAL 21
+#define M_AUTOINCRVAL 0x3U
+#define V_AUTOINCRVAL(x) ((x) << S_AUTOINCRVAL)
+#define G_AUTOINCRVAL(x) (((x) >> S_AUTOINCRVAL) & M_AUTOINCRVAL)
+
+#define S_AUTOINCR 20
+#define V_AUTOINCR(x) ((x) << S_AUTOINCR)
+#define F_AUTOINCR V_AUTOINCR(1U)
+
+#define S_PHYSEL 16
+#define M_PHYSEL 0xfU
+#define V_PHYSEL(x) ((x) << S_PHYSEL)
+#define G_PHYSEL(x) (((x) >> S_PHYSEL) & M_PHYSEL)
+
+#define S_T7_REGADDR 0
+#define M_T7_REGADDR 0xffffU
+#define V_T7_REGADDR(x) ((x) << S_T7_REGADDR)
+#define G_T7_REGADDR(x) (((x) >> S_T7_REGADDR) & M_T7_REGADDR)
+
+#define A_PCIE_SNPS_G5_PHY_CR_DATA 0x5f80
+#define A_PCIE_SNPS_G5_PHY_SRAM_CFG 0x5f84
+
+#define S_PHY3_SRAM_BOOTLOAD_BYPASS 27
+#define V_PHY3_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY3_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY3_SRAM_BOOTLOAD_BYPASS V_PHY3_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY3_SRAM_BYPASS 26
+#define V_PHY3_SRAM_BYPASS(x) ((x) << S_PHY3_SRAM_BYPASS)
+#define F_PHY3_SRAM_BYPASS V_PHY3_SRAM_BYPASS(1U)
+
+#define S_PHY3_SRAM_ECC_EN 25
+#define V_PHY3_SRAM_ECC_EN(x) ((x) << S_PHY3_SRAM_ECC_EN)
+#define F_PHY3_SRAM_ECC_EN V_PHY3_SRAM_ECC_EN(1U)
+
+#define S_PHY3_SRAM_EXT_LD_DONE 24
+#define V_PHY3_SRAM_EXT_LD_DONE(x) ((x) << S_PHY3_SRAM_EXT_LD_DONE)
+#define F_PHY3_SRAM_EXT_LD_DONE V_PHY3_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY2_SRAM_BOOTLOAD_BYPASS 19
+#define V_PHY2_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY2_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY2_SRAM_BOOTLOAD_BYPASS V_PHY2_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY2_SRAM_BYPASS 18
+#define V_PHY2_SRAM_BYPASS(x) ((x) << S_PHY2_SRAM_BYPASS)
+#define F_PHY2_SRAM_BYPASS V_PHY2_SRAM_BYPASS(1U)
+
+#define S_PHY2_SRAM_ECC_EN 17
+#define V_PHY2_SRAM_ECC_EN(x) ((x) << S_PHY2_SRAM_ECC_EN)
+#define F_PHY2_SRAM_ECC_EN V_PHY2_SRAM_ECC_EN(1U)
+
+#define S_PHY2_SRAM_EXT_LD_DONE 16
+#define V_PHY2_SRAM_EXT_LD_DONE(x) ((x) << S_PHY2_SRAM_EXT_LD_DONE)
+#define F_PHY2_SRAM_EXT_LD_DONE V_PHY2_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY1_SRAM_BOOTLOAD_BYPASS 11
+#define V_PHY1_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY1_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY1_SRAM_BOOTLOAD_BYPASS V_PHY1_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY1_SRAM_BYPASS 10
+#define V_PHY1_SRAM_BYPASS(x) ((x) << S_PHY1_SRAM_BYPASS)
+#define F_PHY1_SRAM_BYPASS V_PHY1_SRAM_BYPASS(1U)
+
+#define S_PHY1_SRAM_ECC_EN 9
+#define V_PHY1_SRAM_ECC_EN(x) ((x) << S_PHY1_SRAM_ECC_EN)
+#define F_PHY1_SRAM_ECC_EN V_PHY1_SRAM_ECC_EN(1U)
+
+#define S_PHY1_SRAM_EXT_LD_DONE 8
+#define V_PHY1_SRAM_EXT_LD_DONE(x) ((x) << S_PHY1_SRAM_EXT_LD_DONE)
+#define F_PHY1_SRAM_EXT_LD_DONE V_PHY1_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY_CR_PARA_SEL 4
+#define M_PHY_CR_PARA_SEL 0xfU
+#define V_PHY_CR_PARA_SEL(x) ((x) << S_PHY_CR_PARA_SEL)
+#define G_PHY_CR_PARA_SEL(x) (((x) >> S_PHY_CR_PARA_SEL) & M_PHY_CR_PARA_SEL)
+
+#define S_PHY0_SRAM_BOOTLOAD_BYPASS 3
+#define V_PHY0_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY0_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY0_SRAM_BOOTLOAD_BYPASS V_PHY0_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY0_SRAM_BYPASS 2
+#define V_PHY0_SRAM_BYPASS(x) ((x) << S_PHY0_SRAM_BYPASS)
+#define F_PHY0_SRAM_BYPASS V_PHY0_SRAM_BYPASS(1U)
+
+#define S_PHY0_SRAM_ECC_EN 1
+#define V_PHY0_SRAM_ECC_EN(x) ((x) << S_PHY0_SRAM_ECC_EN)
+#define F_PHY0_SRAM_ECC_EN V_PHY0_SRAM_ECC_EN(1U)
+
+#define S_PHY0_SRAM_EXT_LD_DONE 0
+#define V_PHY0_SRAM_EXT_LD_DONE(x) ((x) << S_PHY0_SRAM_EXT_LD_DONE)
+#define F_PHY0_SRAM_EXT_LD_DONE V_PHY0_SRAM_EXT_LD_DONE(1U)
+
+#define A_PCIE_SNPS_G5_PHY_SRAM_STS 0x5f88
+
+#define S_PHY3_SRAM_INIT_DONE 3
+#define V_PHY3_SRAM_INIT_DONE(x) ((x) << S_PHY3_SRAM_INIT_DONE)
+#define F_PHY3_SRAM_INIT_DONE V_PHY3_SRAM_INIT_DONE(1U)
+
+#define S_PHY2_SRAM_INIT_DONE 2
+#define V_PHY2_SRAM_INIT_DONE(x) ((x) << S_PHY2_SRAM_INIT_DONE)
+#define F_PHY2_SRAM_INIT_DONE V_PHY2_SRAM_INIT_DONE(1U)
+
+#define S_PHY1_SRAM_INIT_DONE 1
+#define V_PHY1_SRAM_INIT_DONE(x) ((x) << S_PHY1_SRAM_INIT_DONE)
+#define F_PHY1_SRAM_INIT_DONE V_PHY1_SRAM_INIT_DONE(1U)
+
+#define S_PHY0_SRAM_INIT_DONE 0
+#define V_PHY0_SRAM_INIT_DONE(x) ((x) << S_PHY0_SRAM_INIT_DONE)
+#define F_PHY0_SRAM_INIT_DONE V_PHY0_SRAM_INIT_DONE(1U)
+
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_0_TO_3 0x5f90
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_0_DATA 0x5f94
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_1_DATA 0x5f98
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_2_DATA 0x5f9c
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_3_DATA 0x5fa0
+#define A_PCIE_SNPS_G5_PHY_DEFAULTS 0x5fa4
+#define A_PCIE_SNPS_G5_PHY_0_VALUES 0x5fa8
+
+#define S_RX_TERM_OFFSET 28
+#define V_RX_TERM_OFFSET(x) ((x) << S_RX_TERM_OFFSET)
+#define F_RX_TERM_OFFSET V_RX_TERM_OFFSET(1U)
+
+#define S_REFB_RAW_CLK_DIV2_EN 27
+#define V_REFB_RAW_CLK_DIV2_EN(x) ((x) << S_REFB_RAW_CLK_DIV2_EN)
+#define F_REFB_RAW_CLK_DIV2_EN V_REFB_RAW_CLK_DIV2_EN(1U)
+
+#define S_REFB_RANGE 23
+#define M_REFB_RANGE 0xfU
+#define V_REFB_RANGE(x) ((x) << S_REFB_RANGE)
+#define G_REFB_RANGE(x) (((x) >> S_REFB_RANGE) & M_REFB_RANGE)
+
+#define S_REFB_LANE_CLK_EN 22
+#define V_REFB_LANE_CLK_EN(x) ((x) << S_REFB_LANE_CLK_EN)
+#define F_REFB_LANE_CLK_EN V_REFB_LANE_CLK_EN(1U)
+
+#define S_REFB_CLK_DIV2_EN 21
+#define V_REFB_CLK_DIV2_EN(x) ((x) << S_REFB_CLK_DIV2_EN)
+#define F_REFB_CLK_DIV2_EN V_REFB_CLK_DIV2_EN(1U)
+
+#define S_REFA_RAW_CLK_DIV2_EN 20
+#define V_REFA_RAW_CLK_DIV2_EN(x) ((x) << S_REFA_RAW_CLK_DIV2_EN)
+#define F_REFA_RAW_CLK_DIV2_EN V_REFA_RAW_CLK_DIV2_EN(1U)
+
+#define S_REFA_RANGE 16
+#define M_REFA_RANGE 0xfU
+#define V_REFA_RANGE(x) ((x) << S_REFA_RANGE)
+#define G_REFA_RANGE(x) (((x) >> S_REFA_RANGE) & M_REFA_RANGE)
+
+#define S_REFA_LANE_CLK_EN 15
+#define V_REFA_LANE_CLK_EN(x) ((x) << S_REFA_LANE_CLK_EN)
+#define F_REFA_LANE_CLK_EN V_REFA_LANE_CLK_EN(1U)
+
+#define S_REFA_CLK_DIV2_EN 14
+#define V_REFA_CLK_DIV2_EN(x) ((x) << S_REFA_CLK_DIV2_EN)
+#define F_REFA_CLK_DIV2_EN V_REFA_CLK_DIV2_EN(1U)
+
+#define S_NOMINAL_VPH_SEL 10
+#define M_NOMINAL_VPH_SEL 0x3U
+#define V_NOMINAL_VPH_SEL(x) ((x) << S_NOMINAL_VPH_SEL)
+#define G_NOMINAL_VPH_SEL(x) (((x) >> S_NOMINAL_VPH_SEL) & M_NOMINAL_VPH_SEL)
+
+#define S_NOMINAL_VP_SEL 8
+#define M_NOMINAL_VP_SEL 0x3U
+#define V_NOMINAL_VP_SEL(x) ((x) << S_NOMINAL_VP_SEL)
+#define G_NOMINAL_VP_SEL(x) (((x) >> S_NOMINAL_VP_SEL) & M_NOMINAL_VP_SEL)
+
+#define S_MPLLB_WORD_CLK_EN 7
+#define V_MPLLB_WORD_CLK_EN(x) ((x) << S_MPLLB_WORD_CLK_EN)
+#define F_MPLLB_WORD_CLK_EN V_MPLLB_WORD_CLK_EN(1U)
+
+#define S_MPLLB_SSC_EN 6
+#define V_MPLLB_SSC_EN(x) ((x) << S_MPLLB_SSC_EN)
+#define F_MPLLB_SSC_EN V_MPLLB_SSC_EN(1U)
+
+#define S_MPLLB_SHORT_LOCK_EN 5
+#define V_MPLLB_SHORT_LOCK_EN(x) ((x) << S_MPLLB_SHORT_LOCK_EN)
+#define F_MPLLB_SHORT_LOCK_EN V_MPLLB_SHORT_LOCK_EN(1U)
+
+#define S_MPLLB_FORCE_EN 4
+#define V_MPLLB_FORCE_EN(x) ((x) << S_MPLLB_FORCE_EN)
+#define F_MPLLB_FORCE_EN V_MPLLB_FORCE_EN(1U)
+
+#define S_MPLLA_WORD_CLK_EN 3
+#define V_MPLLA_WORD_CLK_EN(x) ((x) << S_MPLLA_WORD_CLK_EN)
+#define F_MPLLA_WORD_CLK_EN V_MPLLA_WORD_CLK_EN(1U)
+
+#define S_MPLLA_SSC_EN 2
+#define V_MPLLA_SSC_EN(x) ((x) << S_MPLLA_SSC_EN)
+#define F_MPLLA_SSC_EN V_MPLLA_SSC_EN(1U)
+
+#define S_MPLLA_SHORT_LOCK_EN 1
+#define V_MPLLA_SHORT_LOCK_EN(x) ((x) << S_MPLLA_SHORT_LOCK_EN)
+#define F_MPLLA_SHORT_LOCK_EN V_MPLLA_SHORT_LOCK_EN(1U)
+
+#define S_MPLLA_FORCE_EN 0
+#define V_MPLLA_FORCE_EN(x) ((x) << S_MPLLA_FORCE_EN)
+#define F_MPLLA_FORCE_EN V_MPLLA_FORCE_EN(1U)
+
+#define A_PCIE_SNPS_G5_PHY_1_VALUES 0x5fac
+
+#define S_REF_ALT1_CLK_M 13
+#define V_REF_ALT1_CLK_M(x) ((x) << S_REF_ALT1_CLK_M)
+#define F_REF_ALT1_CLK_M V_REF_ALT1_CLK_M(1U)
+
+#define S_REF_ALT1_CLK_P 12
+#define V_REF_ALT1_CLK_P(x) ((x) << S_REF_ALT1_CLK_P)
+#define F_REF_ALT1_CLK_P V_REF_ALT1_CLK_P(1U)
+
+#define A_PCIE_SNPS_G5_PHY_2_VALUES 0x5fb0
+#define A_PCIE_SNPS_G5_PHY_3_VALUES 0x5fb4
+#define A_PCIE_SNPS_G5_PHY_0_RX_LANEPLL_BYPASS_MODE 0x5fb8
+
+#define S_T7_LANE3 15
+#define M_T7_LANE3 0x1fU
+#define V_T7_LANE3(x) ((x) << S_T7_LANE3)
+#define G_T7_LANE3(x) (((x) >> S_T7_LANE3) & M_T7_LANE3)
+
+#define S_T7_LANE2 10
+#define M_T7_LANE2 0x1fU
+#define V_T7_LANE2(x) ((x) << S_T7_LANE2)
+#define G_T7_LANE2(x) (((x) >> S_T7_LANE2) & M_T7_LANE2)
+
+#define S_T7_LANE1 5
+#define M_T7_LANE1 0x1fU
+#define V_T7_LANE1(x) ((x) << S_T7_LANE1)
+#define G_T7_LANE1(x) (((x) >> S_T7_LANE1) & M_T7_LANE1)
+
+#define S_T7_LANE0 0
+#define M_T7_LANE0 0x1fU
+#define V_T7_LANE0(x) ((x) << S_T7_LANE0)
+#define G_T7_LANE0(x) (((x) >> S_T7_LANE0) & M_T7_LANE0)
+
+#define A_PCIE_SNPS_G5_PHY_1_RX_LANEPLL_BYPASS_MODE 0x5fbc
+#define A_PCIE_SNPS_G5_PHY_2_RX_LANEPLL_BYPASS_MODE 0x5fc0
+#define A_PCIE_SNPS_G5_PHY_3_RX_LANEPLL_BYPASS_MODE 0x5fc4
+#define A_PCIE_SNPS_G5_PHY_0_1_RX_LANEPLL_SRC_SEL 0x5fc8
+
+#define S_LANE7_LANEPLL_SRC_SEL 28
+#define M_LANE7_LANEPLL_SRC_SEL 0xfU
+#define V_LANE7_LANEPLL_SRC_SEL(x) ((x) << S_LANE7_LANEPLL_SRC_SEL)
+#define G_LANE7_LANEPLL_SRC_SEL(x) (((x) >> S_LANE7_LANEPLL_SRC_SEL) & M_LANE7_LANEPLL_SRC_SEL)
+
+#define S_LANE6_LANEPLL_SRC_SEL 24
+#define M_LANE6_LANEPLL_SRC_SEL 0xfU
+#define V_LANE6_LANEPLL_SRC_SEL(x) ((x) << S_LANE6_LANEPLL_SRC_SEL)
+#define G_LANE6_LANEPLL_SRC_SEL(x) (((x) >> S_LANE6_LANEPLL_SRC_SEL) & M_LANE6_LANEPLL_SRC_SEL)
+
+#define S_LANE5_LANEPLL_SRC_SEL 20
+#define M_LANE5_LANEPLL_SRC_SEL 0xfU
+#define V_LANE5_LANEPLL_SRC_SEL(x) ((x) << S_LANE5_LANEPLL_SRC_SEL)
+#define G_LANE5_LANEPLL_SRC_SEL(x) (((x) >> S_LANE5_LANEPLL_SRC_SEL) & M_LANE5_LANEPLL_SRC_SEL)
+
+#define S_LANE4_LANEPLL_SRC_SEL 16
+#define M_LANE4_LANEPLL_SRC_SEL 0xfU
+#define V_LANE4_LANEPLL_SRC_SEL(x) ((x) << S_LANE4_LANEPLL_SRC_SEL)
+#define G_LANE4_LANEPLL_SRC_SEL(x) (((x) >> S_LANE4_LANEPLL_SRC_SEL) & M_LANE4_LANEPLL_SRC_SEL)
+
+#define S_LANE3_LANEPLL_SRC_SEL 12
+#define M_LANE3_LANEPLL_SRC_SEL 0xfU
+#define V_LANE3_LANEPLL_SRC_SEL(x) ((x) << S_LANE3_LANEPLL_SRC_SEL)
+#define G_LANE3_LANEPLL_SRC_SEL(x) (((x) >> S_LANE3_LANEPLL_SRC_SEL) & M_LANE3_LANEPLL_SRC_SEL)
+
+#define S_LANE2_LANEPLL_SRC_SEL 8
+#define M_LANE2_LANEPLL_SRC_SEL 0xfU
+#define V_LANE2_LANEPLL_SRC_SEL(x) ((x) << S_LANE2_LANEPLL_SRC_SEL)
+#define G_LANE2_LANEPLL_SRC_SEL(x) (((x) >> S_LANE2_LANEPLL_SRC_SEL) & M_LANE2_LANEPLL_SRC_SEL)
+
+#define S_LANE1_LANEPLL_SRC_SEL 4
+#define M_LANE1_LANEPLL_SRC_SEL 0xfU
+#define V_LANE1_LANEPLL_SRC_SEL(x) ((x) << S_LANE1_LANEPLL_SRC_SEL)
+#define G_LANE1_LANEPLL_SRC_SEL(x) (((x) >> S_LANE1_LANEPLL_SRC_SEL) & M_LANE1_LANEPLL_SRC_SEL)
+
+#define S_LANE0_LANEPLL_SRC_SEL 0
+#define M_LANE0_LANEPLL_SRC_SEL 0xfU
+#define V_LANE0_LANEPLL_SRC_SEL(x) ((x) << S_LANE0_LANEPLL_SRC_SEL)
+#define G_LANE0_LANEPLL_SRC_SEL(x) (((x) >> S_LANE0_LANEPLL_SRC_SEL) & M_LANE0_LANEPLL_SRC_SEL)
+
+#define A_PCIE_SNPS_G5_PHY_2_3_RX_LANEPLL_SRC_SEL 0x5fcc
+#define A_PCIE_SNPS_G5_PHY_RX_DECERR 0x5fd0
+
+#define S_LANE15_REC_OVRD_8B10B_DECERR 30
+#define M_LANE15_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE15_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE15_REC_OVRD_8B10B_DECERR)
+#define G_LANE15_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE15_REC_OVRD_8B10B_DECERR) & M_LANE15_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE14_REC_OVRD_8B10B_DECERR 28
+#define M_LANE14_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE14_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE14_REC_OVRD_8B10B_DECERR)
+#define G_LANE14_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE14_REC_OVRD_8B10B_DECERR) & M_LANE14_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE13_REC_OVRD_8B10B_DECERR 26
+#define M_LANE13_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE13_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE13_REC_OVRD_8B10B_DECERR)
+#define G_LANE13_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE13_REC_OVRD_8B10B_DECERR) & M_LANE13_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE12_REC_OVRD_8B10B_DECERR 24
+#define M_LANE12_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE12_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE12_REC_OVRD_8B10B_DECERR)
+#define G_LANE12_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE12_REC_OVRD_8B10B_DECERR) & M_LANE12_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE11_REC_OVRD_8B10B_DECERR 22
+#define M_LANE11_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE11_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE11_REC_OVRD_8B10B_DECERR)
+#define G_LANE11_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE11_REC_OVRD_8B10B_DECERR) & M_LANE11_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE10_REC_OVRD_8B10B_DECERR 20
+#define M_LANE10_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE10_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE10_REC_OVRD_8B10B_DECERR)
+#define G_LANE10_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE10_REC_OVRD_8B10B_DECERR) & M_LANE10_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE9_REC_OVRD_8B10B_DECERR 18
+#define M_LANE9_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE9_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE9_REC_OVRD_8B10B_DECERR)
+#define G_LANE9_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE9_REC_OVRD_8B10B_DECERR) & M_LANE9_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE8_REC_OVRD_8B10B_DECERR 16
+#define M_LANE8_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE8_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE8_REC_OVRD_8B10B_DECERR)
+#define G_LANE8_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE8_REC_OVRD_8B10B_DECERR) & M_LANE8_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE7_REC_OVRD_8B10B_DECERR 14
+#define M_LANE7_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE7_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE7_REC_OVRD_8B10B_DECERR)
+#define G_LANE7_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE7_REC_OVRD_8B10B_DECERR) & M_LANE7_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE6_REC_OVRD_8B10B_DECERR 12
+#define M_LANE6_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE6_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE6_REC_OVRD_8B10B_DECERR)
+#define G_LANE6_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE6_REC_OVRD_8B10B_DECERR) & M_LANE6_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE5_REC_OVRD_8B10B_DECERR 10
+#define M_LANE5_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE5_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE5_REC_OVRD_8B10B_DECERR)
+#define G_LANE5_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE5_REC_OVRD_8B10B_DECERR) & M_LANE5_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE4_REC_OVRD_8B10B_DECERR 8
+#define M_LANE4_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE4_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE4_REC_OVRD_8B10B_DECERR)
+#define G_LANE4_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE4_REC_OVRD_8B10B_DECERR) & M_LANE4_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE3_REC_OVRD_8B10B_DECERR 6
+#define M_LANE3_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE3_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE3_REC_OVRD_8B10B_DECERR)
+#define G_LANE3_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE3_REC_OVRD_8B10B_DECERR) & M_LANE3_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE2_REC_OVRD_8B10B_DECERR 4
+#define M_LANE2_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE2_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE2_REC_OVRD_8B10B_DECERR)
+#define G_LANE2_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE2_REC_OVRD_8B10B_DECERR) & M_LANE2_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE1_REC_OVRD_8B10B_DECERR 2
+#define M_LANE1_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE1_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE1_REC_OVRD_8B10B_DECERR)
+#define G_LANE1_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE1_REC_OVRD_8B10B_DECERR) & M_LANE1_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE0_REC_OVRD_8B10B_DECERR 0
+#define M_LANE0_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE0_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE0_REC_OVRD_8B10B_DECERR)
+#define G_LANE0_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE0_REC_OVRD_8B10B_DECERR) & M_LANE0_REC_OVRD_8B10B_DECERR)
+
+#define A_PCIE_SNPS_G5_PHY_TX2RX_LOOPBK_REC_OVRD_EN 0x5fd4
+
+#define S_LANE15_REC_OVRD_EN 31
+#define V_LANE15_REC_OVRD_EN(x) ((x) << S_LANE15_REC_OVRD_EN)
+#define F_LANE15_REC_OVRD_EN V_LANE15_REC_OVRD_EN(1U)
+
+#define S_LANE14_REC_OVRD_EN 30
+#define V_LANE14_REC_OVRD_EN(x) ((x) << S_LANE14_REC_OVRD_EN)
+#define F_LANE14_REC_OVRD_EN V_LANE14_REC_OVRD_EN(1U)
+
+#define S_LANE13_REC_OVRD_EN 29
+#define V_LANE13_REC_OVRD_EN(x) ((x) << S_LANE13_REC_OVRD_EN)
+#define F_LANE13_REC_OVRD_EN V_LANE13_REC_OVRD_EN(1U)
+
+#define S_LANE11_REC_OVRD_EN 27
+#define V_LANE11_REC_OVRD_EN(x) ((x) << S_LANE11_REC_OVRD_EN)
+#define F_LANE11_REC_OVRD_EN V_LANE11_REC_OVRD_EN(1U)
+
+#define S_LANE12_REC_OVRD_EN 28
+#define V_LANE12_REC_OVRD_EN(x) ((x) << S_LANE12_REC_OVRD_EN)
+#define F_LANE12_REC_OVRD_EN V_LANE12_REC_OVRD_EN(1U)
+
+#define S_LANE10_REC_OVRD_EN 26
+#define V_LANE10_REC_OVRD_EN(x) ((x) << S_LANE10_REC_OVRD_EN)
+#define F_LANE10_REC_OVRD_EN V_LANE10_REC_OVRD_EN(1U)
+
+#define S_LANE9_REC_OVRD_EN 25
+#define V_LANE9_REC_OVRD_EN(x) ((x) << S_LANE9_REC_OVRD_EN)
+#define F_LANE9_REC_OVRD_EN V_LANE9_REC_OVRD_EN(1U)
+
+#define S_LANE8_REC_OVRD_EN 24
+#define V_LANE8_REC_OVRD_EN(x) ((x) << S_LANE8_REC_OVRD_EN)
+#define F_LANE8_REC_OVRD_EN V_LANE8_REC_OVRD_EN(1U)
+
+#define S_LANE7_REC_OVRD_EN 23
+#define V_LANE7_REC_OVRD_EN(x) ((x) << S_LANE7_REC_OVRD_EN)
+#define F_LANE7_REC_OVRD_EN V_LANE7_REC_OVRD_EN(1U)
+
+#define S_LANE6_REC_OVRD_EN 22
+#define V_LANE6_REC_OVRD_EN(x) ((x) << S_LANE6_REC_OVRD_EN)
+#define F_LANE6_REC_OVRD_EN V_LANE6_REC_OVRD_EN(1U)
+
+#define S_LANE5_REC_OVRD_EN 21
+#define V_LANE5_REC_OVRD_EN(x) ((x) << S_LANE5_REC_OVRD_EN)
+#define F_LANE5_REC_OVRD_EN V_LANE5_REC_OVRD_EN(1U)
+
+#define S_LANE4_REC_OVRD_EN 20
+#define V_LANE4_REC_OVRD_EN(x) ((x) << S_LANE4_REC_OVRD_EN)
+#define F_LANE4_REC_OVRD_EN V_LANE4_REC_OVRD_EN(1U)
+
+#define S_LANE3_REC_OVRD_EN 19
+#define V_LANE3_REC_OVRD_EN(x) ((x) << S_LANE3_REC_OVRD_EN)
+#define F_LANE3_REC_OVRD_EN V_LANE3_REC_OVRD_EN(1U)
+
+#define S_LANE2_REC_OVRD_EN 18
+#define V_LANE2_REC_OVRD_EN(x) ((x) << S_LANE2_REC_OVRD_EN)
+#define F_LANE2_REC_OVRD_EN V_LANE2_REC_OVRD_EN(1U)
+
+#define S_LANE1_REC_OVRD_EN 17
+#define V_LANE1_REC_OVRD_EN(x) ((x) << S_LANE1_REC_OVRD_EN)
+#define F_LANE1_REC_OVRD_EN V_LANE1_REC_OVRD_EN(1U)
+
+#define S_LANE0_REC_OVRD_EN 16
+#define V_LANE0_REC_OVRD_EN(x) ((x) << S_LANE0_REC_OVRD_EN)
+#define F_LANE0_REC_OVRD_EN V_LANE0_REC_OVRD_EN(1U)
+
+#define S_LANE15_TX2RX_LOOPBK 15
+#define V_LANE15_TX2RX_LOOPBK(x) ((x) << S_LANE15_TX2RX_LOOPBK)
+#define F_LANE15_TX2RX_LOOPBK V_LANE15_TX2RX_LOOPBK(1U)
+
+#define S_LANE14_TX2RX_LOOPBK 14
+#define V_LANE14_TX2RX_LOOPBK(x) ((x) << S_LANE14_TX2RX_LOOPBK)
+#define F_LANE14_TX2RX_LOOPBK V_LANE14_TX2RX_LOOPBK(1U)
+
+#define S_LANE13_TX2RX_LOOPBK 13
+#define V_LANE13_TX2RX_LOOPBK(x) ((x) << S_LANE13_TX2RX_LOOPBK)
+#define F_LANE13_TX2RX_LOOPBK V_LANE13_TX2RX_LOOPBK(1U)
+
+#define S_LANE12_TX2RX_LOOPBK 12
+#define V_LANE12_TX2RX_LOOPBK(x) ((x) << S_LANE12_TX2RX_LOOPBK)
+#define F_LANE12_TX2RX_LOOPBK V_LANE12_TX2RX_LOOPBK(1U)
+
+#define S_LANE11_TX2RX_LOOPBK 11
+#define V_LANE11_TX2RX_LOOPBK(x) ((x) << S_LANE11_TX2RX_LOOPBK)
+#define F_LANE11_TX2RX_LOOPBK V_LANE11_TX2RX_LOOPBK(1U)
+
+#define S_LANE10_TX2RX_LOOPBK 10
+#define V_LANE10_TX2RX_LOOPBK(x) ((x) << S_LANE10_TX2RX_LOOPBK)
+#define F_LANE10_TX2RX_LOOPBK V_LANE10_TX2RX_LOOPBK(1U)
+
+#define S_LANE9_TX2RX_LOOPBK 9
+#define V_LANE9_TX2RX_LOOPBK(x) ((x) << S_LANE9_TX2RX_LOOPBK)
+#define F_LANE9_TX2RX_LOOPBK V_LANE9_TX2RX_LOOPBK(1U)
+
+#define S_LANE8_TX2RX_LOOPBK 8
+#define V_LANE8_TX2RX_LOOPBK(x) ((x) << S_LANE8_TX2RX_LOOPBK)
+#define F_LANE8_TX2RX_LOOPBK V_LANE8_TX2RX_LOOPBK(1U)
+
+#define S_LANE7_TX2RX_LOOPBK 7
+#define V_LANE7_TX2RX_LOOPBK(x) ((x) << S_LANE7_TX2RX_LOOPBK)
+#define F_LANE7_TX2RX_LOOPBK V_LANE7_TX2RX_LOOPBK(1U)
+
+#define S_LANE6_TX2RX_LOOPBK 6
+#define V_LANE6_TX2RX_LOOPBK(x) ((x) << S_LANE6_TX2RX_LOOPBK)
+#define F_LANE6_TX2RX_LOOPBK V_LANE6_TX2RX_LOOPBK(1U)
+
+#define S_LANE5_TX2RX_LOOPBK 5
+#define V_LANE5_TX2RX_LOOPBK(x) ((x) << S_LANE5_TX2RX_LOOPBK)
+#define F_LANE5_TX2RX_LOOPBK V_LANE5_TX2RX_LOOPBK(1U)
+
+#define S_LANE4_TX2RX_LOOPBK 4
+#define V_LANE4_TX2RX_LOOPBK(x) ((x) << S_LANE4_TX2RX_LOOPBK)
+#define F_LANE4_TX2RX_LOOPBK V_LANE4_TX2RX_LOOPBK(1U)
+
+#define S_LANE3_TX2RX_LOOPBK 3
+#define V_LANE3_TX2RX_LOOPBK(x) ((x) << S_LANE3_TX2RX_LOOPBK)
+#define F_LANE3_TX2RX_LOOPBK V_LANE3_TX2RX_LOOPBK(1U)
+
+#define S_LANE2_TX2RX_LOOPBK 2
+#define V_LANE2_TX2RX_LOOPBK(x) ((x) << S_LANE2_TX2RX_LOOPBK)
+#define F_LANE2_TX2RX_LOOPBK V_LANE2_TX2RX_LOOPBK(1U)
+
+#define S_LANE1_TX2RX_LOOPBK 1
+#define V_LANE1_TX2RX_LOOPBK(x) ((x) << S_LANE1_TX2RX_LOOPBK)
+#define F_LANE1_TX2RX_LOOPBK V_LANE1_TX2RX_LOOPBK(1U)
+
+#define S_LANE0_TX2RX_LOOPBK 0
+#define V_LANE0_TX2RX_LOOPBK(x) ((x) << S_LANE0_TX2RX_LOOPBK)
+#define F_LANE0_TX2RX_LOOPBK V_LANE0_TX2RX_LOOPBK(1U)
+
+#define A_PCIE_PHY_TX_DISABLE_UPCS_PIPE_CONFIG 0x5fd8
+
+#define S_UPCS_PIPE_CONFIG 16
+#define M_UPCS_PIPE_CONFIG 0xffffU
+#define V_UPCS_PIPE_CONFIG(x) ((x) << S_UPCS_PIPE_CONFIG)
+#define G_UPCS_PIPE_CONFIG(x) (((x) >> S_UPCS_PIPE_CONFIG) & M_UPCS_PIPE_CONFIG)
+
+#define S_TX15_DISABLE 15
+#define V_TX15_DISABLE(x) ((x) << S_TX15_DISABLE)
+#define F_TX15_DISABLE V_TX15_DISABLE(1U)
+
+#define S_TX14_DISABLE 14
+#define V_TX14_DISABLE(x) ((x) << S_TX14_DISABLE)
+#define F_TX14_DISABLE V_TX14_DISABLE(1U)
+
+#define S_TX13_DISABLE 13
+#define V_TX13_DISABLE(x) ((x) << S_TX13_DISABLE)
+#define F_TX13_DISABLE V_TX13_DISABLE(1U)
+
+#define S_TX12_DISABLE 12
+#define V_TX12_DISABLE(x) ((x) << S_TX12_DISABLE)
+#define F_TX12_DISABLE V_TX12_DISABLE(1U)
+
+#define S_TX11_DISABLE 11
+#define V_TX11_DISABLE(x) ((x) << S_TX11_DISABLE)
+#define F_TX11_DISABLE V_TX11_DISABLE(1U)
+
+#define S_TX10_DISABLE 10
+#define V_TX10_DISABLE(x) ((x) << S_TX10_DISABLE)
+#define F_TX10_DISABLE V_TX10_DISABLE(1U)
+
+#define S_TX9_DISABLE 9
+#define V_TX9_DISABLE(x) ((x) << S_TX9_DISABLE)
+#define F_TX9_DISABLE V_TX9_DISABLE(1U)
+
+#define S_TX8_DISABLE 8
+#define V_TX8_DISABLE(x) ((x) << S_TX8_DISABLE)
+#define F_TX8_DISABLE V_TX8_DISABLE(1U)
+
+#define S_TX7_DISABLE 7
+#define V_TX7_DISABLE(x) ((x) << S_TX7_DISABLE)
+#define F_TX7_DISABLE V_TX7_DISABLE(1U)
+
+#define S_TX6_DISABLE 6
+#define V_TX6_DISABLE(x) ((x) << S_TX6_DISABLE)
+#define F_TX6_DISABLE V_TX6_DISABLE(1U)
+
+#define S_TX5_DISABLE 5
+#define V_TX5_DISABLE(x) ((x) << S_TX5_DISABLE)
+#define F_TX5_DISABLE V_TX5_DISABLE(1U)
+
+#define S_TX4_DISABLE 4
+#define V_TX4_DISABLE(x) ((x) << S_TX4_DISABLE)
+#define F_TX4_DISABLE V_TX4_DISABLE(1U)
+
+#define S_TX3_DISABLE 3
+#define V_TX3_DISABLE(x) ((x) << S_TX3_DISABLE)
+#define F_TX3_DISABLE V_TX3_DISABLE(1U)
+
+#define S_TX2_DISABLE 2
+#define V_TX2_DISABLE(x) ((x) << S_TX2_DISABLE)
+#define F_TX2_DISABLE V_TX2_DISABLE(1U)
+
+#define S_TX1_DISABLE 1
+#define V_TX1_DISABLE(x) ((x) << S_TX1_DISABLE)
+#define F_TX1_DISABLE V_TX1_DISABLE(1U)
+
+#define S_TX0_DISABLE 0
+#define V_TX0_DISABLE(x) ((x) << S_TX0_DISABLE)
+#define F_TX0_DISABLE V_TX0_DISABLE(1U)
+
#define A_PCIE_PDEBUG_REG_0X0 0x0
#define A_PCIE_PDEBUG_REG_0X1 0x1
#define A_PCIE_PDEBUG_REG_0X2 0x2
@@ -11668,6 +14476,40 @@
#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+#define A_DBG_GPIO_OUT 0x6010
+
+#define S_GPIO23_OUT_VAL 23
+#define V_GPIO23_OUT_VAL(x) ((x) << S_GPIO23_OUT_VAL)
+#define F_GPIO23_OUT_VAL V_GPIO23_OUT_VAL(1U)
+
+#define S_GPIO22_OUT_VAL 22
+#define V_GPIO22_OUT_VAL(x) ((x) << S_GPIO22_OUT_VAL)
+#define F_GPIO22_OUT_VAL V_GPIO22_OUT_VAL(1U)
+
+#define S_GPIO21_OUT_VAL 21
+#define V_GPIO21_OUT_VAL(x) ((x) << S_GPIO21_OUT_VAL)
+#define F_GPIO21_OUT_VAL V_GPIO21_OUT_VAL(1U)
+
+#define S_GPIO20_OUT_VAL 20
+#define V_GPIO20_OUT_VAL(x) ((x) << S_GPIO20_OUT_VAL)
+#define F_GPIO20_OUT_VAL V_GPIO20_OUT_VAL(1U)
+
+#define S_T7_GPIO19_OUT_VAL 19
+#define V_T7_GPIO19_OUT_VAL(x) ((x) << S_T7_GPIO19_OUT_VAL)
+#define F_T7_GPIO19_OUT_VAL V_T7_GPIO19_OUT_VAL(1U)
+
+#define S_T7_GPIO18_OUT_VAL 18
+#define V_T7_GPIO18_OUT_VAL(x) ((x) << S_T7_GPIO18_OUT_VAL)
+#define F_T7_GPIO18_OUT_VAL V_T7_GPIO18_OUT_VAL(1U)
+
+#define S_T7_GPIO17_OUT_VAL 17
+#define V_T7_GPIO17_OUT_VAL(x) ((x) << S_T7_GPIO17_OUT_VAL)
+#define F_T7_GPIO17_OUT_VAL V_T7_GPIO17_OUT_VAL(1U)
+
+#define S_T7_GPIO16_OUT_VAL 16
+#define V_T7_GPIO16_OUT_VAL(x) ((x) << S_T7_GPIO16_OUT_VAL)
+#define F_T7_GPIO16_OUT_VAL V_T7_GPIO16_OUT_VAL(1U)
+
#define A_DBG_GPIO_IN 0x6014
#define S_GPIO15_CHG_DET 31
@@ -11798,6 +14640,38 @@
#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN)
#define F_GPIO0_IN V_GPIO0_IN(1U)
+#define S_GPIO23_IN 23
+#define V_GPIO23_IN(x) ((x) << S_GPIO23_IN)
+#define F_GPIO23_IN V_GPIO23_IN(1U)
+
+#define S_GPIO22_IN 22
+#define V_GPIO22_IN(x) ((x) << S_GPIO22_IN)
+#define F_GPIO22_IN V_GPIO22_IN(1U)
+
+#define S_GPIO21_IN 21
+#define V_GPIO21_IN(x) ((x) << S_GPIO21_IN)
+#define F_GPIO21_IN V_GPIO21_IN(1U)
+
+#define S_GPIO20_IN 20
+#define V_GPIO20_IN(x) ((x) << S_GPIO20_IN)
+#define F_GPIO20_IN V_GPIO20_IN(1U)
+
+#define S_T7_GPIO19_IN 19
+#define V_T7_GPIO19_IN(x) ((x) << S_T7_GPIO19_IN)
+#define F_T7_GPIO19_IN V_T7_GPIO19_IN(1U)
+
+#define S_T7_GPIO18_IN 18
+#define V_T7_GPIO18_IN(x) ((x) << S_T7_GPIO18_IN)
+#define F_T7_GPIO18_IN V_T7_GPIO18_IN(1U)
+
+#define S_T7_GPIO17_IN 17
+#define V_T7_GPIO17_IN(x) ((x) << S_T7_GPIO17_IN)
+#define F_T7_GPIO17_IN V_T7_GPIO17_IN(1U)
+
+#define S_T7_GPIO16_IN 16
+#define V_T7_GPIO16_IN(x) ((x) << S_T7_GPIO16_IN)
+#define F_T7_GPIO16_IN V_T7_GPIO16_IN(1U)
+
#define A_DBG_INT_ENABLE 0x6018
#define S_IBM_FDL_FAIL_INT_ENBL 25
@@ -11920,6 +14794,58 @@
#define V_GPIO16(x) ((x) << S_GPIO16)
#define F_GPIO16 V_GPIO16(1U)
+#define S_USBFIFOPARERR 12
+#define V_USBFIFOPARERR(x) ((x) << S_USBFIFOPARERR)
+#define F_USBFIFOPARERR V_USBFIFOPARERR(1U)
+
+#define S_T7_IBM_FDL_FAIL_INT_ENBL 11
+#define V_T7_IBM_FDL_FAIL_INT_ENBL(x) ((x) << S_T7_IBM_FDL_FAIL_INT_ENBL)
+#define F_T7_IBM_FDL_FAIL_INT_ENBL V_T7_IBM_FDL_FAIL_INT_ENBL(1U)
+
+#define S_T7_PLL_LOCK_LOST_INT_ENBL 10
+#define V_T7_PLL_LOCK_LOST_INT_ENBL(x) ((x) << S_T7_PLL_LOCK_LOST_INT_ENBL)
+#define F_T7_PLL_LOCK_LOST_INT_ENBL V_T7_PLL_LOCK_LOST_INT_ENBL(1U)
+
+#define S_M1_LOCK 9
+#define V_M1_LOCK(x) ((x) << S_M1_LOCK)
+#define F_M1_LOCK V_M1_LOCK(1U)
+
+#define S_T7_PCIE_LOCK 8
+#define V_T7_PCIE_LOCK(x) ((x) << S_T7_PCIE_LOCK)
+#define F_T7_PCIE_LOCK V_T7_PCIE_LOCK(1U)
+
+#define S_T7_U_LOCK 7
+#define V_T7_U_LOCK(x) ((x) << S_T7_U_LOCK)
+#define F_T7_U_LOCK V_T7_U_LOCK(1U)
+
+#define S_MAC_LOCK 6
+#define V_MAC_LOCK(x) ((x) << S_MAC_LOCK)
+#define F_MAC_LOCK V_MAC_LOCK(1U)
+
+#define S_ARM_LOCK 5
+#define V_ARM_LOCK(x) ((x) << S_ARM_LOCK)
+#define F_ARM_LOCK V_ARM_LOCK(1U)
+
+#define S_M0_LOCK 4
+#define V_M0_LOCK(x) ((x) << S_M0_LOCK)
+#define F_M0_LOCK V_M0_LOCK(1U)
+
+#define S_XGPBUS_LOCK 3
+#define V_XGPBUS_LOCK(x) ((x) << S_XGPBUS_LOCK)
+#define F_XGPBUS_LOCK V_XGPBUS_LOCK(1U)
+
+#define S_XGPHY_LOCK 2
+#define V_XGPHY_LOCK(x) ((x) << S_XGPHY_LOCK)
+#define F_XGPHY_LOCK V_XGPHY_LOCK(1U)
+
+#define S_USB_LOCK 1
+#define V_USB_LOCK(x) ((x) << S_USB_LOCK)
+#define F_USB_LOCK V_USB_LOCK(1U)
+
+#define S_T7_C_LOCK 0
+#define V_T7_C_LOCK(x) ((x) << S_T7_C_LOCK)
+#define F_T7_C_LOCK V_T7_C_LOCK(1U)
+
#define A_DBG_INT_CAUSE 0x601c
#define S_IBM_FDL_FAIL_INT_CAUSE 25
@@ -11938,6 +14864,14 @@
#define V_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_PLL_LOCK_LOST_INT_CAUSE)
#define F_PLL_LOCK_LOST_INT_CAUSE V_PLL_LOCK_LOST_INT_CAUSE(1U)
+#define S_T7_IBM_FDL_FAIL_INT_CAUSE 11
+#define V_T7_IBM_FDL_FAIL_INT_CAUSE(x) ((x) << S_T7_IBM_FDL_FAIL_INT_CAUSE)
+#define F_T7_IBM_FDL_FAIL_INT_CAUSE V_T7_IBM_FDL_FAIL_INT_CAUSE(1U)
+
+#define S_T7_PLL_LOCK_LOST_INT_CAUSE 10
+#define V_T7_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_T7_PLL_LOCK_LOST_INT_CAUSE)
+#define F_T7_PLL_LOCK_LOST_INT_CAUSE V_T7_PLL_LOCK_LOST_INT_CAUSE(1U)
+
#define A_DBG_DBG0_RST_VALUE 0x6020
#define S_DEBUGDATA 0
@@ -11977,6 +14911,10 @@
#define V_C_OCLK_EN(x) ((x) << S_C_OCLK_EN)
#define F_C_OCLK_EN V_C_OCLK_EN(1U)
+#define S_INIC_MODE_EN 0
+#define V_INIC_MODE_EN(x) ((x) << S_INIC_MODE_EN)
+#define F_INIC_MODE_EN V_INIC_MODE_EN(1U)
+
#define A_DBG_PLL_LOCK 0x602c
#define S_PLL_P_LOCK 20
@@ -12003,6 +14941,38 @@
#define V_PLL_C_LOCK(x) ((x) << S_PLL_C_LOCK)
#define F_PLL_C_LOCK V_PLL_C_LOCK(1U)
+#define S_T7_PLL_M_LOCK 9
+#define V_T7_PLL_M_LOCK(x) ((x) << S_T7_PLL_M_LOCK)
+#define F_T7_PLL_M_LOCK V_T7_PLL_M_LOCK(1U)
+
+#define S_PLL_PCIE_LOCK 8
+#define V_PLL_PCIE_LOCK(x) ((x) << S_PLL_PCIE_LOCK)
+#define F_PLL_PCIE_LOCK V_PLL_PCIE_LOCK(1U)
+
+#define S_T7_PLL_U_LOCK 7
+#define V_T7_PLL_U_LOCK(x) ((x) << S_T7_PLL_U_LOCK)
+#define F_T7_PLL_U_LOCK V_T7_PLL_U_LOCK(1U)
+
+#define S_PLL_MAC_LOCK 6
+#define V_PLL_MAC_LOCK(x) ((x) << S_PLL_MAC_LOCK)
+#define F_PLL_MAC_LOCK V_PLL_MAC_LOCK(1U)
+
+#define S_PLL_ARM_LOCK 5
+#define V_PLL_ARM_LOCK(x) ((x) << S_PLL_ARM_LOCK)
+#define F_PLL_ARM_LOCK V_PLL_ARM_LOCK(1U)
+
+#define S_PLL_XGPBUS_LOCK 3
+#define V_PLL_XGPBUS_LOCK(x) ((x) << S_PLL_XGPBUS_LOCK)
+#define F_PLL_XGPBUS_LOCK V_PLL_XGPBUS_LOCK(1U)
+
+#define S_PLL_XGPHY_LOCK 2
+#define V_PLL_XGPHY_LOCK(x) ((x) << S_PLL_XGPHY_LOCK)
+#define F_PLL_XGPHY_LOCK V_PLL_XGPHY_LOCK(1U)
+
+#define S_PLL_USB_LOCK 1
+#define V_PLL_USB_LOCK(x) ((x) << S_PLL_USB_LOCK)
+#define F_PLL_USB_LOCK V_PLL_USB_LOCK(1U)
+
#define A_DBG_GPIO_ACT_LOW 0x6030
#define S_P_LOCK_ACT_LOW 21
@@ -12109,6 +15079,48 @@
#define V_GPIO16_ACT_LOW(x) ((x) << S_GPIO16_ACT_LOW)
#define F_GPIO16_ACT_LOW V_GPIO16_ACT_LOW(1U)
+#define A_DBG_PLL_LOCK_ACT_LOW 0x6030
+
+#define S_M1_LOCK_ACT_LOW 9
+#define V_M1_LOCK_ACT_LOW(x) ((x) << S_M1_LOCK_ACT_LOW)
+#define F_M1_LOCK_ACT_LOW V_M1_LOCK_ACT_LOW(1U)
+
+#define S_PCIE_LOCK_ACT_LOW 8
+#define V_PCIE_LOCK_ACT_LOW(x) ((x) << S_PCIE_LOCK_ACT_LOW)
+#define F_PCIE_LOCK_ACT_LOW V_PCIE_LOCK_ACT_LOW(1U)
+
+#define S_T7_U_LOCK_ACT_LOW 7
+#define V_T7_U_LOCK_ACT_LOW(x) ((x) << S_T7_U_LOCK_ACT_LOW)
+#define F_T7_U_LOCK_ACT_LOW V_T7_U_LOCK_ACT_LOW(1U)
+
+#define S_MAC_LOCK_ACT_LOW 6
+#define V_MAC_LOCK_ACT_LOW(x) ((x) << S_MAC_LOCK_ACT_LOW)
+#define F_MAC_LOCK_ACT_LOW V_MAC_LOCK_ACT_LOW(1U)
+
+#define S_ARM_LOCK_ACT_LOW 5
+#define V_ARM_LOCK_ACT_LOW(x) ((x) << S_ARM_LOCK_ACT_LOW)
+#define F_ARM_LOCK_ACT_LOW V_ARM_LOCK_ACT_LOW(1U)
+
+#define S_M0_LOCK_ACT_LOW 4
+#define V_M0_LOCK_ACT_LOW(x) ((x) << S_M0_LOCK_ACT_LOW)
+#define F_M0_LOCK_ACT_LOW V_M0_LOCK_ACT_LOW(1U)
+
+#define S_XGPBUS_LOCK_ACT_LOW 3
+#define V_XGPBUS_LOCK_ACT_LOW(x) ((x) << S_XGPBUS_LOCK_ACT_LOW)
+#define F_XGPBUS_LOCK_ACT_LOW V_XGPBUS_LOCK_ACT_LOW(1U)
+
+#define S_XGPHY_LOCK_ACT_LOW 2
+#define V_XGPHY_LOCK_ACT_LOW(x) ((x) << S_XGPHY_LOCK_ACT_LOW)
+#define F_XGPHY_LOCK_ACT_LOW V_XGPHY_LOCK_ACT_LOW(1U)
+
+#define S_USB_LOCK_ACT_LOW 1
+#define V_USB_LOCK_ACT_LOW(x) ((x) << S_USB_LOCK_ACT_LOW)
+#define F_USB_LOCK_ACT_LOW V_USB_LOCK_ACT_LOW(1U)
+
+#define S_T7_C_LOCK_ACT_LOW 0
+#define V_T7_C_LOCK_ACT_LOW(x) ((x) << S_T7_C_LOCK_ACT_LOW)
+#define F_T7_C_LOCK_ACT_LOW V_T7_C_LOCK_ACT_LOW(1U)
+
#define A_DBG_EFUSE_BYTE0_3 0x6034
#define A_DBG_EFUSE_BYTE4_7 0x6038
#define A_DBG_EFUSE_BYTE8_11 0x603c
@@ -12140,6 +15152,32 @@
#define V_STATIC_U_PLL_TUNE(x) ((x) << S_STATIC_U_PLL_TUNE)
#define G_STATIC_U_PLL_TUNE(x) (((x) >> S_STATIC_U_PLL_TUNE) & M_STATIC_U_PLL_TUNE)
+#define A_T7_DBG_STATIC_U_PLL_CONF1 0x6044
+
+#define S_STATIC_U_PLL_RANGE 22
+#define M_STATIC_U_PLL_RANGE 0x7U
+#define V_STATIC_U_PLL_RANGE(x) ((x) << S_STATIC_U_PLL_RANGE)
+#define G_STATIC_U_PLL_RANGE(x) (((x) >> S_STATIC_U_PLL_RANGE) & M_STATIC_U_PLL_RANGE)
+
+#define S_STATIC_U_PLL_DIVQ 17
+#define M_STATIC_U_PLL_DIVQ 0x1fU
+#define V_STATIC_U_PLL_DIVQ(x) ((x) << S_STATIC_U_PLL_DIVQ)
+#define G_STATIC_U_PLL_DIVQ(x) (((x) >> S_STATIC_U_PLL_DIVQ) & M_STATIC_U_PLL_DIVQ)
+
+#define S_STATIC_U_PLL_DIVFI 8
+#define M_STATIC_U_PLL_DIVFI 0x1ffU
+#define V_STATIC_U_PLL_DIVFI(x) ((x) << S_STATIC_U_PLL_DIVFI)
+#define G_STATIC_U_PLL_DIVFI(x) (((x) >> S_STATIC_U_PLL_DIVFI) & M_STATIC_U_PLL_DIVFI)
+
+#define S_STATIC_U_PLL_DIVR 2
+#define M_STATIC_U_PLL_DIVR 0x3fU
+#define V_STATIC_U_PLL_DIVR(x) ((x) << S_STATIC_U_PLL_DIVR)
+#define G_STATIC_U_PLL_DIVR(x) (((x) >> S_STATIC_U_PLL_DIVR) & M_STATIC_U_PLL_DIVR)
+
+#define S_T7_1_STATIC_U_PLL_BYPASS 1
+#define V_T7_1_STATIC_U_PLL_BYPASS(x) ((x) << S_T7_1_STATIC_U_PLL_BYPASS)
+#define F_T7_1_STATIC_U_PLL_BYPASS V_T7_1_STATIC_U_PLL_BYPASS(1U)
+
#define A_DBG_STATIC_C_PLL_CONF 0x6048
#define S_STATIC_C_PLL_MULT 23
@@ -12167,6 +15205,26 @@
#define V_STATIC_C_PLL_TUNE(x) ((x) << S_STATIC_C_PLL_TUNE)
#define G_STATIC_C_PLL_TUNE(x) (((x) >> S_STATIC_C_PLL_TUNE) & M_STATIC_C_PLL_TUNE)
+#define A_T7_DBG_STATIC_U_PLL_CONF2 0x6048
+
+#define S_STATIC_U_PLL_SSMF 5
+#define M_STATIC_U_PLL_SSMF 0xfU
+#define V_STATIC_U_PLL_SSMF(x) ((x) << S_STATIC_U_PLL_SSMF)
+#define G_STATIC_U_PLL_SSMF(x) (((x) >> S_STATIC_U_PLL_SSMF) & M_STATIC_U_PLL_SSMF)
+
+#define S_STATIC_U_PLL_SSMD 2
+#define M_STATIC_U_PLL_SSMD 0x7U
+#define V_STATIC_U_PLL_SSMD(x) ((x) << S_STATIC_U_PLL_SSMD)
+#define G_STATIC_U_PLL_SSMD(x) (((x) >> S_STATIC_U_PLL_SSMD) & M_STATIC_U_PLL_SSMD)
+
+#define S_STATIC_U_PLL_SSDS 1
+#define V_STATIC_U_PLL_SSDS(x) ((x) << S_STATIC_U_PLL_SSDS)
+#define F_STATIC_U_PLL_SSDS V_STATIC_U_PLL_SSDS(1U)
+
+#define S_STATIC_U_PLL_SSE 0
+#define V_STATIC_U_PLL_SSE(x) ((x) << S_STATIC_U_PLL_SSE)
+#define F_STATIC_U_PLL_SSE V_STATIC_U_PLL_SSE(1U)
+
#define A_DBG_STATIC_M_PLL_CONF 0x604c
#define S_STATIC_M_PLL_MULT 23
@@ -12194,6 +15252,32 @@
#define V_STATIC_M_PLL_TUNE(x) ((x) << S_STATIC_M_PLL_TUNE)
#define G_STATIC_M_PLL_TUNE(x) (((x) >> S_STATIC_M_PLL_TUNE) & M_STATIC_M_PLL_TUNE)
+#define A_T7_DBG_STATIC_C_PLL_CONF1 0x604c
+
+#define S_STATIC_C_PLL_RANGE 22
+#define M_STATIC_C_PLL_RANGE 0x7U
+#define V_STATIC_C_PLL_RANGE(x) ((x) << S_STATIC_C_PLL_RANGE)
+#define G_STATIC_C_PLL_RANGE(x) (((x) >> S_STATIC_C_PLL_RANGE) & M_STATIC_C_PLL_RANGE)
+
+#define S_STATIC_C_PLL_DIVQ 17
+#define M_STATIC_C_PLL_DIVQ 0x1fU
+#define V_STATIC_C_PLL_DIVQ(x) ((x) << S_STATIC_C_PLL_DIVQ)
+#define G_STATIC_C_PLL_DIVQ(x) (((x) >> S_STATIC_C_PLL_DIVQ) & M_STATIC_C_PLL_DIVQ)
+
+#define S_STATIC_C_PLL_DIVFI 8
+#define M_STATIC_C_PLL_DIVFI 0x1ffU
+#define V_STATIC_C_PLL_DIVFI(x) ((x) << S_STATIC_C_PLL_DIVFI)
+#define G_STATIC_C_PLL_DIVFI(x) (((x) >> S_STATIC_C_PLL_DIVFI) & M_STATIC_C_PLL_DIVFI)
+
+#define S_STATIC_C_PLL_DIVR 2
+#define M_STATIC_C_PLL_DIVR 0x3fU
+#define V_STATIC_C_PLL_DIVR(x) ((x) << S_STATIC_C_PLL_DIVR)
+#define G_STATIC_C_PLL_DIVR(x) (((x) >> S_STATIC_C_PLL_DIVR) & M_STATIC_C_PLL_DIVR)
+
+#define S_T7_1_STATIC_C_PLL_BYPASS 1
+#define V_T7_1_STATIC_C_PLL_BYPASS(x) ((x) << S_T7_1_STATIC_C_PLL_BYPASS)
+#define F_T7_1_STATIC_C_PLL_BYPASS V_T7_1_STATIC_C_PLL_BYPASS(1U)
+
#define A_DBG_STATIC_KX_PLL_CONF 0x6050
#define S_STATIC_KX_PLL_C 21
@@ -12226,6 +15310,26 @@
#define V_STATIC_KX_PLL_P(x) ((x) << S_STATIC_KX_PLL_P)
#define G_STATIC_KX_PLL_P(x) (((x) >> S_STATIC_KX_PLL_P) & M_STATIC_KX_PLL_P)
+#define A_T7_DBG_STATIC_C_PLL_CONF2 0x6050
+
+#define S_STATIC_C_PLL_SSMF 5
+#define M_STATIC_C_PLL_SSMF 0xfU
+#define V_STATIC_C_PLL_SSMF(x) ((x) << S_STATIC_C_PLL_SSMF)
+#define G_STATIC_C_PLL_SSMF(x) (((x) >> S_STATIC_C_PLL_SSMF) & M_STATIC_C_PLL_SSMF)
+
+#define S_STATIC_C_PLL_SSMD 2
+#define M_STATIC_C_PLL_SSMD 0x7U
+#define V_STATIC_C_PLL_SSMD(x) ((x) << S_STATIC_C_PLL_SSMD)
+#define G_STATIC_C_PLL_SSMD(x) (((x) >> S_STATIC_C_PLL_SSMD) & M_STATIC_C_PLL_SSMD)
+
+#define S_STATIC_C_PLL_SSDS 1
+#define V_STATIC_C_PLL_SSDS(x) ((x) << S_STATIC_C_PLL_SSDS)
+#define F_STATIC_C_PLL_SSDS V_STATIC_C_PLL_SSDS(1U)
+
+#define S_STATIC_C_PLL_SSE 0
+#define V_STATIC_C_PLL_SSE(x) ((x) << S_STATIC_C_PLL_SSE)
+#define F_STATIC_C_PLL_SSE V_STATIC_C_PLL_SSE(1U)
+
#define A_DBG_STATIC_KR_PLL_CONF 0x6054
#define S_STATIC_KR_PLL_C 21
@@ -12258,6 +15362,38 @@
#define V_STATIC_KR_PLL_P(x) ((x) << S_STATIC_KR_PLL_P)
#define G_STATIC_KR_PLL_P(x) (((x) >> S_STATIC_KR_PLL_P) & M_STATIC_KR_PLL_P)
+#define A_DBG_STATIC_PLL_DFS_CONF 0x6054
+
+#define S_STATIC_U_DFS_ACK 23
+#define V_STATIC_U_DFS_ACK(x) ((x) << S_STATIC_U_DFS_ACK)
+#define F_STATIC_U_DFS_ACK V_STATIC_U_DFS_ACK(1U)
+
+#define S_STATIC_C_DFS_ACK 22
+#define V_STATIC_C_DFS_ACK(x) ((x) << S_STATIC_C_DFS_ACK)
+#define F_STATIC_C_DFS_ACK V_STATIC_C_DFS_ACK(1U)
+
+#define S_STATIC_U_DFS_DIVFI 13
+#define M_STATIC_U_DFS_DIVFI 0x1ffU
+#define V_STATIC_U_DFS_DIVFI(x) ((x) << S_STATIC_U_DFS_DIVFI)
+#define G_STATIC_U_DFS_DIVFI(x) (((x) >> S_STATIC_U_DFS_DIVFI) & M_STATIC_U_DFS_DIVFI)
+
+#define S_STATIC_U_DFS_NEWDIV 12
+#define V_STATIC_U_DFS_NEWDIV(x) ((x) << S_STATIC_U_DFS_NEWDIV)
+#define F_STATIC_U_DFS_NEWDIV V_STATIC_U_DFS_NEWDIV(1U)
+
+#define S_T7_STATIC_U_DFS_ENABLE 11
+#define V_T7_STATIC_U_DFS_ENABLE(x) ((x) << S_T7_STATIC_U_DFS_ENABLE)
+#define F_T7_STATIC_U_DFS_ENABLE V_T7_STATIC_U_DFS_ENABLE(1U)
+
+#define S_STATIC_C_DFS_DIVFI 2
+#define M_STATIC_C_DFS_DIVFI 0x1ffU
+#define V_STATIC_C_DFS_DIVFI(x) ((x) << S_STATIC_C_DFS_DIVFI)
+#define G_STATIC_C_DFS_DIVFI(x) (((x) >> S_STATIC_C_DFS_DIVFI) & M_STATIC_C_DFS_DIVFI)
+
+#define S_STATIC_C_DFS_NEWDIV 1
+#define V_STATIC_C_DFS_NEWDIV(x) ((x) << S_STATIC_C_DFS_NEWDIV)
+#define F_STATIC_C_DFS_NEWDIV V_STATIC_C_DFS_NEWDIV(1U)
+
#define A_DBG_EXTRA_STATIC_BITS_CONF 0x6058
#define S_STATIC_M_PLL_RESET 30
@@ -12343,6 +15479,14 @@
#define V_PSRO_SEL(x) ((x) << S_PSRO_SEL)
#define G_PSRO_SEL(x) (((x) >> S_PSRO_SEL) & M_PSRO_SEL)
+#define S_T7_STATIC_LVDS_CLKOUT_EN 21
+#define V_T7_STATIC_LVDS_CLKOUT_EN(x) ((x) << S_T7_STATIC_LVDS_CLKOUT_EN)
+#define F_T7_STATIC_LVDS_CLKOUT_EN V_T7_STATIC_LVDS_CLKOUT_EN(1U)
+
+#define S_T7_EXPHYCLK_SEL_EN 16
+#define V_T7_EXPHYCLK_SEL_EN(x) ((x) << S_T7_EXPHYCLK_SEL_EN)
+#define F_T7_EXPHYCLK_SEL_EN V_T7_EXPHYCLK_SEL_EN(1U)
+
#define A_DBG_STATIC_OCLK_MUXSEL_CONF 0x605c
#define S_M_OCLK_MUXSEL 12
@@ -12467,16 +15611,6 @@
#define V_T5_RD_ADDR0(x) ((x) << S_T5_RD_ADDR0)
#define G_T5_RD_ADDR0(x) (((x) >> S_T5_RD_ADDR0) & M_T5_RD_ADDR0)
-#define S_T6_RD_ADDR1 11
-#define M_T6_RD_ADDR1 0x1ffU
-#define V_T6_RD_ADDR1(x) ((x) << S_T6_RD_ADDR1)
-#define G_T6_RD_ADDR1(x) (((x) >> S_T6_RD_ADDR1) & M_T6_RD_ADDR1)
-
-#define S_T6_RD_ADDR0 2
-#define M_T6_RD_ADDR0 0x1ffU
-#define V_T6_RD_ADDR0(x) ((x) << S_T6_RD_ADDR0)
-#define G_T6_RD_ADDR0(x) (((x) >> S_T6_RD_ADDR0) & M_T6_RD_ADDR0)
-
#define A_DBG_TRACE_WRADDR 0x6090
#define S_WR_POINTER_ADDR1 16
@@ -12499,16 +15633,6 @@
#define V_T5_WR_POINTER_ADDR0(x) ((x) << S_T5_WR_POINTER_ADDR0)
#define G_T5_WR_POINTER_ADDR0(x) (((x) >> S_T5_WR_POINTER_ADDR0) & M_T5_WR_POINTER_ADDR0)
-#define S_T6_WR_POINTER_ADDR1 16
-#define M_T6_WR_POINTER_ADDR1 0x1ffU
-#define V_T6_WR_POINTER_ADDR1(x) ((x) << S_T6_WR_POINTER_ADDR1)
-#define G_T6_WR_POINTER_ADDR1(x) (((x) >> S_T6_WR_POINTER_ADDR1) & M_T6_WR_POINTER_ADDR1)
-
-#define S_T6_WR_POINTER_ADDR0 0
-#define M_T6_WR_POINTER_ADDR0 0x1ffU
-#define V_T6_WR_POINTER_ADDR0(x) ((x) << S_T6_WR_POINTER_ADDR0)
-#define G_T6_WR_POINTER_ADDR0(x) (((x) >> S_T6_WR_POINTER_ADDR0) & M_T6_WR_POINTER_ADDR0)
-
#define A_DBG_TRACE0_DATA_OUT 0x6094
#define A_DBG_TRACE1_DATA_OUT 0x6098
#define A_DBG_FUSE_SENSE_DONE 0x609c
@@ -12575,7 +15699,52 @@
#define V_T6_TVSENSE_RST(x) ((x) << S_T6_TVSENSE_RST)
#define F_T6_TVSENSE_RST V_T6_TVSENSE_RST(1U)
+#define A_DBG_PVT_EN1 0x60a8
+
+#define S_PVT_TRIMO 18
+#define M_PVT_TRIMO 0x3fU
+#define V_PVT_TRIMO(x) ((x) << S_PVT_TRIMO)
+#define G_PVT_TRIMO(x) (((x) >> S_PVT_TRIMO) & M_PVT_TRIMO)
+
+#define S_PVT_TRIMG 13
+#define M_PVT_TRIMG 0x1fU
+#define V_PVT_TRIMG(x) ((x) << S_PVT_TRIMG)
+#define G_PVT_TRIMG(x) (((x) >> S_PVT_TRIMG) & M_PVT_TRIMG)
+
+#define S_PVT_VSAMPLE 12
+#define V_PVT_VSAMPLE(x) ((x) << S_PVT_VSAMPLE)
+#define F_PVT_VSAMPLE V_PVT_VSAMPLE(1U)
+
+#define S_PVT_PSAMPLE 10
+#define M_PVT_PSAMPLE 0x3U
+#define V_PVT_PSAMPLE(x) ((x) << S_PVT_PSAMPLE)
+#define G_PVT_PSAMPLE(x) (((x) >> S_PVT_PSAMPLE) & M_PVT_PSAMPLE)
+
+#define S_PVT_ENA 9
+#define V_PVT_ENA(x) ((x) << S_PVT_ENA)
+#define F_PVT_ENA V_PVT_ENA(1U)
+
+#define S_PVT_RESET 8
+#define V_PVT_RESET(x) ((x) << S_PVT_RESET)
+#define F_PVT_RESET V_PVT_RESET(1U)
+
+#define S_PVT_DIV 0
+#define M_PVT_DIV 0xffU
+#define V_PVT_DIV(x) ((x) << S_PVT_DIV)
+#define G_PVT_DIV(x) (((x) >> S_PVT_DIV) & M_PVT_DIV)
+
#define A_DBG_CUST_EFUSE_OUT_EN 0x60ac
+#define A_DBG_PVT_EN2 0x60ac
+
+#define S_PVT_DATA_OUT 1
+#define M_PVT_DATA_OUT 0x3ffU
+#define V_PVT_DATA_OUT(x) ((x) << S_PVT_DATA_OUT)
+#define G_PVT_DATA_OUT(x) (((x) >> S_PVT_DATA_OUT) & M_PVT_DATA_OUT)
+
+#define S_PVT_DATA_VALID 0
+#define V_PVT_DATA_VALID(x) ((x) << S_PVT_DATA_VALID)
+#define F_PVT_DATA_VALID V_PVT_DATA_VALID(1U)
+
#define A_DBG_CUST_EFUSE_SEL1_EN 0x60b0
#define A_DBG_CUST_EFUSE_SEL2_EN 0x60b4
@@ -12638,6 +15807,36 @@
#define V_STATIC_M_PLL_FFSLEWRATE(x) ((x) << S_STATIC_M_PLL_FFSLEWRATE)
#define G_STATIC_M_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_M_PLL_FFSLEWRATE) & M_STATIC_M_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_M0_PLL_CONF1 0x60b8
+
+#define S_STATIC_M0_PLL_RANGE 22
+#define M_STATIC_M0_PLL_RANGE 0x7U
+#define V_STATIC_M0_PLL_RANGE(x) ((x) << S_STATIC_M0_PLL_RANGE)
+#define G_STATIC_M0_PLL_RANGE(x) (((x) >> S_STATIC_M0_PLL_RANGE) & M_STATIC_M0_PLL_RANGE)
+
+#define S_STATIC_M0_PLL_DIVQ 17
+#define M_STATIC_M0_PLL_DIVQ 0x1fU
+#define V_STATIC_M0_PLL_DIVQ(x) ((x) << S_STATIC_M0_PLL_DIVQ)
+#define G_STATIC_M0_PLL_DIVQ(x) (((x) >> S_STATIC_M0_PLL_DIVQ) & M_STATIC_M0_PLL_DIVQ)
+
+#define S_STATIC_M0_PLL_DIVFI 8
+#define M_STATIC_M0_PLL_DIVFI 0x1ffU
+#define V_STATIC_M0_PLL_DIVFI(x) ((x) << S_STATIC_M0_PLL_DIVFI)
+#define G_STATIC_M0_PLL_DIVFI(x) (((x) >> S_STATIC_M0_PLL_DIVFI) & M_STATIC_M0_PLL_DIVFI)
+
+#define S_STATIC_M0_PLL_DIVR 2
+#define M_STATIC_M0_PLL_DIVR 0x3fU
+#define V_STATIC_M0_PLL_DIVR(x) ((x) << S_STATIC_M0_PLL_DIVR)
+#define G_STATIC_M0_PLL_DIVR(x) (((x) >> S_STATIC_M0_PLL_DIVR) & M_STATIC_M0_PLL_DIVR)
+
+#define S_STATIC_M0_PLL_BYPASS 1
+#define V_STATIC_M0_PLL_BYPASS(x) ((x) << S_STATIC_M0_PLL_BYPASS)
+#define F_STATIC_M0_PLL_BYPASS V_STATIC_M0_PLL_BYPASS(1U)
+
+#define S_STATIC_M0_PLL_RESET 0
+#define V_STATIC_M0_PLL_RESET(x) ((x) << S_STATIC_M0_PLL_RESET)
+#define F_STATIC_M0_PLL_RESET V_STATIC_M0_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF2 0x60bc
#define S_T5_STATIC_M_PLL_DCO_BYPASS 23
@@ -12715,6 +15914,50 @@
#define V_STATIC_M_PLL_LOCKTUNE(x) ((x) << S_STATIC_M_PLL_LOCKTUNE)
#define G_STATIC_M_PLL_LOCKTUNE(x) (((x) >> S_STATIC_M_PLL_LOCKTUNE) & M_STATIC_M_PLL_LOCKTUNE)
+#define A_DBG_STATIC_M0_PLL_CONF2 0x60bc
+
+#define S_T7_STATIC_SWMC1RST_ 14
+#define V_T7_STATIC_SWMC1RST_(x) ((x) << S_T7_STATIC_SWMC1RST_)
+#define F_T7_STATIC_SWMC1RST_ V_T7_STATIC_SWMC1RST_(1U)
+
+#define S_T7_STATIC_SWMC1CFGRST_ 13
+#define V_T7_STATIC_SWMC1CFGRST_(x) ((x) << S_T7_STATIC_SWMC1CFGRST_)
+#define F_T7_STATIC_SWMC1CFGRST_ V_T7_STATIC_SWMC1CFGRST_(1U)
+
+#define S_T7_STATIC_PHY0RECRST_ 12
+#define V_T7_STATIC_PHY0RECRST_(x) ((x) << S_T7_STATIC_PHY0RECRST_)
+#define F_T7_STATIC_PHY0RECRST_ V_T7_STATIC_PHY0RECRST_(1U)
+
+#define S_T7_STATIC_PHY1RECRST_ 11
+#define V_T7_STATIC_PHY1RECRST_(x) ((x) << S_T7_STATIC_PHY1RECRST_)
+#define F_T7_STATIC_PHY1RECRST_ V_T7_STATIC_PHY1RECRST_(1U)
+
+#define S_T7_STATIC_SWMC0RST_ 10
+#define V_T7_STATIC_SWMC0RST_(x) ((x) << S_T7_STATIC_SWMC0RST_)
+#define F_T7_STATIC_SWMC0RST_ V_T7_STATIC_SWMC0RST_(1U)
+
+#define S_T7_STATIC_SWMC0CFGRST_ 9
+#define V_T7_STATIC_SWMC0CFGRST_(x) ((x) << S_T7_STATIC_SWMC0CFGRST_)
+#define F_T7_STATIC_SWMC0CFGRST_ V_T7_STATIC_SWMC0CFGRST_(1U)
+
+#define S_STATIC_M0_PLL_SSMF 5
+#define M_STATIC_M0_PLL_SSMF 0xfU
+#define V_STATIC_M0_PLL_SSMF(x) ((x) << S_STATIC_M0_PLL_SSMF)
+#define G_STATIC_M0_PLL_SSMF(x) (((x) >> S_STATIC_M0_PLL_SSMF) & M_STATIC_M0_PLL_SSMF)
+
+#define S_STATIC_M0_PLL_SSMD 2
+#define M_STATIC_M0_PLL_SSMD 0x7U
+#define V_STATIC_M0_PLL_SSMD(x) ((x) << S_STATIC_M0_PLL_SSMD)
+#define G_STATIC_M0_PLL_SSMD(x) (((x) >> S_STATIC_M0_PLL_SSMD) & M_STATIC_M0_PLL_SSMD)
+
+#define S_STATIC_M0_PLL_SSDS 1
+#define V_STATIC_M0_PLL_SSDS(x) ((x) << S_STATIC_M0_PLL_SSDS)
+#define F_STATIC_M0_PLL_SSDS V_STATIC_M0_PLL_SSDS(1U)
+
+#define S_STATIC_M0_PLL_SSE 0
+#define V_STATIC_M0_PLL_SSE(x) ((x) << S_STATIC_M0_PLL_SSE)
+#define F_STATIC_M0_PLL_SSE V_STATIC_M0_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF3 0x60c0
#define S_T5_STATIC_M_PLL_MULTPRE 30
@@ -12778,8 +16021,58 @@
#define V_T6_STATIC_M_PLL_RANGEA(x) ((x) << S_T6_STATIC_M_PLL_RANGEA)
#define G_T6_STATIC_M_PLL_RANGEA(x) (((x) >> S_T6_STATIC_M_PLL_RANGEA) & M_T6_STATIC_M_PLL_RANGEA)
+#define A_DBG_STATIC_MAC_PLL_CONF1 0x60c0
+
+#define S_STATIC_MAC_PLL_RANGE 22
+#define M_STATIC_MAC_PLL_RANGE 0x7U
+#define V_STATIC_MAC_PLL_RANGE(x) ((x) << S_STATIC_MAC_PLL_RANGE)
+#define G_STATIC_MAC_PLL_RANGE(x) (((x) >> S_STATIC_MAC_PLL_RANGE) & M_STATIC_MAC_PLL_RANGE)
+
+#define S_STATIC_MAC_PLL_DIVQ 17
+#define M_STATIC_MAC_PLL_DIVQ 0x1fU
+#define V_STATIC_MAC_PLL_DIVQ(x) ((x) << S_STATIC_MAC_PLL_DIVQ)
+#define G_STATIC_MAC_PLL_DIVQ(x) (((x) >> S_STATIC_MAC_PLL_DIVQ) & M_STATIC_MAC_PLL_DIVQ)
+
+#define S_STATIC_MAC_PLL_DIVFI 8
+#define M_STATIC_MAC_PLL_DIVFI 0x1ffU
+#define V_STATIC_MAC_PLL_DIVFI(x) ((x) << S_STATIC_MAC_PLL_DIVFI)
+#define G_STATIC_MAC_PLL_DIVFI(x) (((x) >> S_STATIC_MAC_PLL_DIVFI) & M_STATIC_MAC_PLL_DIVFI)
+
+#define S_STATIC_MAC_PLL_DIVR 2
+#define M_STATIC_MAC_PLL_DIVR 0x3fU
+#define V_STATIC_MAC_PLL_DIVR(x) ((x) << S_STATIC_MAC_PLL_DIVR)
+#define G_STATIC_MAC_PLL_DIVR(x) (((x) >> S_STATIC_MAC_PLL_DIVR) & M_STATIC_MAC_PLL_DIVR)
+
+#define S_STATIC_MAC_PLL_BYPASS 1
+#define V_STATIC_MAC_PLL_BYPASS(x) ((x) << S_STATIC_MAC_PLL_BYPASS)
+#define F_STATIC_MAC_PLL_BYPASS V_STATIC_MAC_PLL_BYPASS(1U)
+
+#define S_STATIC_MAC_PLL_RESET 0
+#define V_STATIC_MAC_PLL_RESET(x) ((x) << S_STATIC_MAC_PLL_RESET)
+#define F_STATIC_MAC_PLL_RESET V_STATIC_MAC_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF4 0x60c4
#define A_DBG_STATIC_M_PLL_CONF4 0x60c4
+#define A_DBG_STATIC_MAC_PLL_CONF2 0x60c4
+
+#define S_STATIC_MAC_PLL_SSMF 5
+#define M_STATIC_MAC_PLL_SSMF 0xfU
+#define V_STATIC_MAC_PLL_SSMF(x) ((x) << S_STATIC_MAC_PLL_SSMF)
+#define G_STATIC_MAC_PLL_SSMF(x) (((x) >> S_STATIC_MAC_PLL_SSMF) & M_STATIC_MAC_PLL_SSMF)
+
+#define S_STATIC_MAC_PLL_SSMD 2
+#define M_STATIC_MAC_PLL_SSMD 0x7U
+#define V_STATIC_MAC_PLL_SSMD(x) ((x) << S_STATIC_MAC_PLL_SSMD)
+#define G_STATIC_MAC_PLL_SSMD(x) (((x) >> S_STATIC_MAC_PLL_SSMD) & M_STATIC_MAC_PLL_SSMD)
+
+#define S_STATIC_MAC_PLL_SSDS 1
+#define V_STATIC_MAC_PLL_SSDS(x) ((x) << S_STATIC_MAC_PLL_SSDS)
+#define F_STATIC_MAC_PLL_SSDS V_STATIC_MAC_PLL_SSDS(1U)
+
+#define S_STATIC_MAC_PLL_SSE 0
+#define V_STATIC_MAC_PLL_SSE(x) ((x) << S_STATIC_MAC_PLL_SSE)
+#define F_STATIC_MAC_PLL_SSE V_STATIC_MAC_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF5 0x60c8
#define S_T5_STATIC_M_PLL_VCVTUNE 24
@@ -12835,6 +16128,36 @@
#define V_T6_STATIC_M_PLL_MULT(x) ((x) << S_T6_STATIC_M_PLL_MULT)
#define G_T6_STATIC_M_PLL_MULT(x) (((x) >> S_T6_STATIC_M_PLL_MULT) & M_T6_STATIC_M_PLL_MULT)
+#define A_DBG_STATIC_ARM_PLL_CONF1 0x60c8
+
+#define S_STATIC_ARM_PLL_RANGE 22
+#define M_STATIC_ARM_PLL_RANGE 0x7U
+#define V_STATIC_ARM_PLL_RANGE(x) ((x) << S_STATIC_ARM_PLL_RANGE)
+#define G_STATIC_ARM_PLL_RANGE(x) (((x) >> S_STATIC_ARM_PLL_RANGE) & M_STATIC_ARM_PLL_RANGE)
+
+#define S_STATIC_ARM_PLL_DIVQ 17
+#define M_STATIC_ARM_PLL_DIVQ 0x1fU
+#define V_STATIC_ARM_PLL_DIVQ(x) ((x) << S_STATIC_ARM_PLL_DIVQ)
+#define G_STATIC_ARM_PLL_DIVQ(x) (((x) >> S_STATIC_ARM_PLL_DIVQ) & M_STATIC_ARM_PLL_DIVQ)
+
+#define S_STATIC_ARM_PLL_DIVFI 8
+#define M_STATIC_ARM_PLL_DIVFI 0x1ffU
+#define V_STATIC_ARM_PLL_DIVFI(x) ((x) << S_STATIC_ARM_PLL_DIVFI)
+#define G_STATIC_ARM_PLL_DIVFI(x) (((x) >> S_STATIC_ARM_PLL_DIVFI) & M_STATIC_ARM_PLL_DIVFI)
+
+#define S_STATIC_ARM_PLL_DIVR 2
+#define M_STATIC_ARM_PLL_DIVR 0x3fU
+#define V_STATIC_ARM_PLL_DIVR(x) ((x) << S_STATIC_ARM_PLL_DIVR)
+#define G_STATIC_ARM_PLL_DIVR(x) (((x) >> S_STATIC_ARM_PLL_DIVR) & M_STATIC_ARM_PLL_DIVR)
+
+#define S_STATIC_ARM_PLL_BYPASS 1
+#define V_STATIC_ARM_PLL_BYPASS(x) ((x) << S_STATIC_ARM_PLL_BYPASS)
+#define F_STATIC_ARM_PLL_BYPASS V_STATIC_ARM_PLL_BYPASS(1U)
+
+#define S_STATIC_ARM_PLL_RESET 0
+#define V_STATIC_ARM_PLL_RESET(x) ((x) << S_STATIC_ARM_PLL_RESET)
+#define F_STATIC_ARM_PLL_RESET V_STATIC_ARM_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF6 0x60cc
#define S_T5_STATIC_PHY0RECRST_ 5
@@ -12913,6 +16236,26 @@
#define V_STATIC_SWMC1CFGRST_(x) ((x) << S_STATIC_SWMC1CFGRST_)
#define F_STATIC_SWMC1CFGRST_ V_STATIC_SWMC1CFGRST_(1U)
+#define A_DBG_STATIC_ARM_PLL_CONF2 0x60cc
+
+#define S_STATIC_ARM_PLL_SSMF 5
+#define M_STATIC_ARM_PLL_SSMF 0xfU
+#define V_STATIC_ARM_PLL_SSMF(x) ((x) << S_STATIC_ARM_PLL_SSMF)
+#define G_STATIC_ARM_PLL_SSMF(x) (((x) >> S_STATIC_ARM_PLL_SSMF) & M_STATIC_ARM_PLL_SSMF)
+
+#define S_STATIC_ARM_PLL_SSMD 2
+#define M_STATIC_ARM_PLL_SSMD 0x7U
+#define V_STATIC_ARM_PLL_SSMD(x) ((x) << S_STATIC_ARM_PLL_SSMD)
+#define G_STATIC_ARM_PLL_SSMD(x) (((x) >> S_STATIC_ARM_PLL_SSMD) & M_STATIC_ARM_PLL_SSMD)
+
+#define S_STATIC_ARM_PLL_SSDS 1
+#define V_STATIC_ARM_PLL_SSDS(x) ((x) << S_STATIC_ARM_PLL_SSDS)
+#define F_STATIC_ARM_PLL_SSDS V_STATIC_ARM_PLL_SSDS(1U)
+
+#define S_STATIC_ARM_PLL_SSE 0
+#define V_STATIC_ARM_PLL_SSE(x) ((x) << S_STATIC_ARM_PLL_SSE)
+#define F_STATIC_ARM_PLL_SSE V_STATIC_ARM_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF1 0x60d0
#define S_T5_STATIC_C_PLL_MULTFRAC 8
@@ -12937,6 +16280,36 @@
#define V_STATIC_C_PLL_FFSLEWRATE(x) ((x) << S_STATIC_C_PLL_FFSLEWRATE)
#define G_STATIC_C_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_C_PLL_FFSLEWRATE) & M_STATIC_C_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_USB_PLL_CONF1 0x60d0
+
+#define S_STATIC_USB_PLL_RANGE 22
+#define M_STATIC_USB_PLL_RANGE 0x7U
+#define V_STATIC_USB_PLL_RANGE(x) ((x) << S_STATIC_USB_PLL_RANGE)
+#define G_STATIC_USB_PLL_RANGE(x) (((x) >> S_STATIC_USB_PLL_RANGE) & M_STATIC_USB_PLL_RANGE)
+
+#define S_STATIC_USB_PLL_DIVQ 17
+#define M_STATIC_USB_PLL_DIVQ 0x1fU
+#define V_STATIC_USB_PLL_DIVQ(x) ((x) << S_STATIC_USB_PLL_DIVQ)
+#define G_STATIC_USB_PLL_DIVQ(x) (((x) >> S_STATIC_USB_PLL_DIVQ) & M_STATIC_USB_PLL_DIVQ)
+
+#define S_STATIC_USB_PLL_DIVFI 8
+#define M_STATIC_USB_PLL_DIVFI 0x1ffU
+#define V_STATIC_USB_PLL_DIVFI(x) ((x) << S_STATIC_USB_PLL_DIVFI)
+#define G_STATIC_USB_PLL_DIVFI(x) (((x) >> S_STATIC_USB_PLL_DIVFI) & M_STATIC_USB_PLL_DIVFI)
+
+#define S_STATIC_USB_PLL_DIVR 2
+#define M_STATIC_USB_PLL_DIVR 0x3fU
+#define V_STATIC_USB_PLL_DIVR(x) ((x) << S_STATIC_USB_PLL_DIVR)
+#define G_STATIC_USB_PLL_DIVR(x) (((x) >> S_STATIC_USB_PLL_DIVR) & M_STATIC_USB_PLL_DIVR)
+
+#define S_STATIC_USB_PLL_BYPASS 1
+#define V_STATIC_USB_PLL_BYPASS(x) ((x) << S_STATIC_USB_PLL_BYPASS)
+#define F_STATIC_USB_PLL_BYPASS V_STATIC_USB_PLL_BYPASS(1U)
+
+#define S_STATIC_USB_PLL_RESET 0
+#define V_STATIC_USB_PLL_RESET(x) ((x) << S_STATIC_USB_PLL_RESET)
+#define F_STATIC_USB_PLL_RESET V_STATIC_USB_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF2 0x60d4
#define S_T5_STATIC_C_PLL_DCO_BYPASS 23
@@ -13019,6 +16392,26 @@
#define V_STATIC_C_PLL_LOCKTUNE(x) ((x) << S_STATIC_C_PLL_LOCKTUNE)
#define G_STATIC_C_PLL_LOCKTUNE(x) (((x) >> S_STATIC_C_PLL_LOCKTUNE) & M_STATIC_C_PLL_LOCKTUNE)
+#define A_DBG_STATIC_USB_PLL_CONF2 0x60d4
+
+#define S_STATIC_USB_PLL_SSMF 5
+#define M_STATIC_USB_PLL_SSMF 0xfU
+#define V_STATIC_USB_PLL_SSMF(x) ((x) << S_STATIC_USB_PLL_SSMF)
+#define G_STATIC_USB_PLL_SSMF(x) (((x) >> S_STATIC_USB_PLL_SSMF) & M_STATIC_USB_PLL_SSMF)
+
+#define S_STATIC_USB_PLL_SSMD 2
+#define M_STATIC_USB_PLL_SSMD 0x7U
+#define V_STATIC_USB_PLL_SSMD(x) ((x) << S_STATIC_USB_PLL_SSMD)
+#define G_STATIC_USB_PLL_SSMD(x) (((x) >> S_STATIC_USB_PLL_SSMD) & M_STATIC_USB_PLL_SSMD)
+
+#define S_STATIC_USB_PLL_SSDS 1
+#define V_STATIC_USB_PLL_SSDS(x) ((x) << S_STATIC_USB_PLL_SSDS)
+#define F_STATIC_USB_PLL_SSDS V_STATIC_USB_PLL_SSDS(1U)
+
+#define S_STATIC_USB_PLL_SSE 0
+#define V_STATIC_USB_PLL_SSE(x) ((x) << S_STATIC_USB_PLL_SSE)
+#define F_STATIC_USB_PLL_SSE V_STATIC_USB_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF3 0x60d8
#define S_T5_STATIC_C_PLL_MULTPRE 30
@@ -13082,8 +16475,58 @@
#define V_T6_STATIC_C_PLL_RANGEA(x) ((x) << S_T6_STATIC_C_PLL_RANGEA)
#define G_T6_STATIC_C_PLL_RANGEA(x) (((x) >> S_T6_STATIC_C_PLL_RANGEA) & M_T6_STATIC_C_PLL_RANGEA)
+#define A_DBG_STATIC_XGPHY_PLL_CONF1 0x60d8
+
+#define S_STATIC_XGPHY_PLL_RANGE 22
+#define M_STATIC_XGPHY_PLL_RANGE 0x7U
+#define V_STATIC_XGPHY_PLL_RANGE(x) ((x) << S_STATIC_XGPHY_PLL_RANGE)
+#define G_STATIC_XGPHY_PLL_RANGE(x) (((x) >> S_STATIC_XGPHY_PLL_RANGE) & M_STATIC_XGPHY_PLL_RANGE)
+
+#define S_STATIC_XGPHY_PLL_DIVQ 17
+#define M_STATIC_XGPHY_PLL_DIVQ 0x1fU
+#define V_STATIC_XGPHY_PLL_DIVQ(x) ((x) << S_STATIC_XGPHY_PLL_DIVQ)
+#define G_STATIC_XGPHY_PLL_DIVQ(x) (((x) >> S_STATIC_XGPHY_PLL_DIVQ) & M_STATIC_XGPHY_PLL_DIVQ)
+
+#define S_STATIC_XGPHY_PLL_DIVFI 8
+#define M_STATIC_XGPHY_PLL_DIVFI 0x1ffU
+#define V_STATIC_XGPHY_PLL_DIVFI(x) ((x) << S_STATIC_XGPHY_PLL_DIVFI)
+#define G_STATIC_XGPHY_PLL_DIVFI(x) (((x) >> S_STATIC_XGPHY_PLL_DIVFI) & M_STATIC_XGPHY_PLL_DIVFI)
+
+#define S_STATIC_XGPHY_PLL_DIVR 2
+#define M_STATIC_XGPHY_PLL_DIVR 0x3fU
+#define V_STATIC_XGPHY_PLL_DIVR(x) ((x) << S_STATIC_XGPHY_PLL_DIVR)
+#define G_STATIC_XGPHY_PLL_DIVR(x) (((x) >> S_STATIC_XGPHY_PLL_DIVR) & M_STATIC_XGPHY_PLL_DIVR)
+
+#define S_STATIC_XGPHY_PLL_BYPASS 1
+#define V_STATIC_XGPHY_PLL_BYPASS(x) ((x) << S_STATIC_XGPHY_PLL_BYPASS)
+#define F_STATIC_XGPHY_PLL_BYPASS V_STATIC_XGPHY_PLL_BYPASS(1U)
+
+#define S_STATIC_XGPHY_PLL_RESET 0
+#define V_STATIC_XGPHY_PLL_RESET(x) ((x) << S_STATIC_XGPHY_PLL_RESET)
+#define F_STATIC_XGPHY_PLL_RESET V_STATIC_XGPHY_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF4 0x60dc
#define A_DBG_STATIC_C_PLL_CONF4 0x60dc
+#define A_DBG_STATIC_XGPHY_PLL_CONF2 0x60dc
+
+#define S_STATIC_XGPHY_PLL_SSMF 5
+#define M_STATIC_XGPHY_PLL_SSMF 0xfU
+#define V_STATIC_XGPHY_PLL_SSMF(x) ((x) << S_STATIC_XGPHY_PLL_SSMF)
+#define G_STATIC_XGPHY_PLL_SSMF(x) (((x) >> S_STATIC_XGPHY_PLL_SSMF) & M_STATIC_XGPHY_PLL_SSMF)
+
+#define S_STATIC_XGPHY_PLL_SSMD 2
+#define M_STATIC_XGPHY_PLL_SSMD 0x7U
+#define V_STATIC_XGPHY_PLL_SSMD(x) ((x) << S_STATIC_XGPHY_PLL_SSMD)
+#define G_STATIC_XGPHY_PLL_SSMD(x) (((x) >> S_STATIC_XGPHY_PLL_SSMD) & M_STATIC_XGPHY_PLL_SSMD)
+
+#define S_STATIC_XGPHY_PLL_SSDS 1
+#define V_STATIC_XGPHY_PLL_SSDS(x) ((x) << S_STATIC_XGPHY_PLL_SSDS)
+#define F_STATIC_XGPHY_PLL_SSDS V_STATIC_XGPHY_PLL_SSDS(1U)
+
+#define S_STATIC_XGPHY_PLL_SSE 0
+#define V_STATIC_XGPHY_PLL_SSE(x) ((x) << S_STATIC_XGPHY_PLL_SSE)
+#define F_STATIC_XGPHY_PLL_SSE V_STATIC_XGPHY_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF5 0x60e0
#define S_T5_STATIC_C_PLL_VCVTUNE 22
@@ -13140,6 +16583,40 @@
#define V_T6_STATIC_C_PLL_MULT(x) ((x) << S_T6_STATIC_C_PLL_MULT)
#define G_T6_STATIC_C_PLL_MULT(x) (((x) >> S_T6_STATIC_C_PLL_MULT) & M_T6_STATIC_C_PLL_MULT)
+#define A_DBG_STATIC_XGPBUS_PLL_CONF1 0x60e0
+
+#define S_STATIC_XGPBUS_SWRST_ 25
+#define V_STATIC_XGPBUS_SWRST_(x) ((x) << S_STATIC_XGPBUS_SWRST_)
+#define F_STATIC_XGPBUS_SWRST_ V_STATIC_XGPBUS_SWRST_(1U)
+
+#define S_STATIC_XGPBUS_PLL_RANGE 22
+#define M_STATIC_XGPBUS_PLL_RANGE 0x7U
+#define V_STATIC_XGPBUS_PLL_RANGE(x) ((x) << S_STATIC_XGPBUS_PLL_RANGE)
+#define G_STATIC_XGPBUS_PLL_RANGE(x) (((x) >> S_STATIC_XGPBUS_PLL_RANGE) & M_STATIC_XGPBUS_PLL_RANGE)
+
+#define S_STATIC_XGPBUS_PLL_DIVQ 17
+#define M_STATIC_XGPBUS_PLL_DIVQ 0x1fU
+#define V_STATIC_XGPBUS_PLL_DIVQ(x) ((x) << S_STATIC_XGPBUS_PLL_DIVQ)
+#define G_STATIC_XGPBUS_PLL_DIVQ(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVQ) & M_STATIC_XGPBUS_PLL_DIVQ)
+
+#define S_STATIC_XGPBUS_PLL_DIVFI 8
+#define M_STATIC_XGPBUS_PLL_DIVFI 0x1ffU
+#define V_STATIC_XGPBUS_PLL_DIVFI(x) ((x) << S_STATIC_XGPBUS_PLL_DIVFI)
+#define G_STATIC_XGPBUS_PLL_DIVFI(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVFI) & M_STATIC_XGPBUS_PLL_DIVFI)
+
+#define S_STATIC_XGPBUS_PLL_DIVR 2
+#define M_STATIC_XGPBUS_PLL_DIVR 0x3fU
+#define V_STATIC_XGPBUS_PLL_DIVR(x) ((x) << S_STATIC_XGPBUS_PLL_DIVR)
+#define G_STATIC_XGPBUS_PLL_DIVR(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVR) & M_STATIC_XGPBUS_PLL_DIVR)
+
+#define S_STATIC_XGPBUS_PLL_BYPASS 1
+#define V_STATIC_XGPBUS_PLL_BYPASS(x) ((x) << S_STATIC_XGPBUS_PLL_BYPASS)
+#define F_STATIC_XGPBUS_PLL_BYPASS V_STATIC_XGPBUS_PLL_BYPASS(1U)
+
+#define S_STATIC_XGPBUS_PLL_RESET 0
+#define V_STATIC_XGPBUS_PLL_RESET(x) ((x) << S_STATIC_XGPBUS_PLL_RESET)
+#define F_STATIC_XGPBUS_PLL_RESET V_STATIC_XGPBUS_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF1 0x60e4
#define S_T5_STATIC_U_PLL_MULTFRAC 8
@@ -13164,6 +16641,26 @@
#define V_STATIC_U_PLL_FFSLEWRATE(x) ((x) << S_STATIC_U_PLL_FFSLEWRATE)
#define G_STATIC_U_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_U_PLL_FFSLEWRATE) & M_STATIC_U_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_XGPBUS_PLL_CONF2 0x60e4
+
+#define S_STATIC_XGPBUS_PLL_SSMF 5
+#define M_STATIC_XGPBUS_PLL_SSMF 0xfU
+#define V_STATIC_XGPBUS_PLL_SSMF(x) ((x) << S_STATIC_XGPBUS_PLL_SSMF)
+#define G_STATIC_XGPBUS_PLL_SSMF(x) (((x) >> S_STATIC_XGPBUS_PLL_SSMF) & M_STATIC_XGPBUS_PLL_SSMF)
+
+#define S_STATIC_XGPBUS_PLL_SSMD 2
+#define M_STATIC_XGPBUS_PLL_SSMD 0x7U
+#define V_STATIC_XGPBUS_PLL_SSMD(x) ((x) << S_STATIC_XGPBUS_PLL_SSMD)
+#define G_STATIC_XGPBUS_PLL_SSMD(x) (((x) >> S_STATIC_XGPBUS_PLL_SSMD) & M_STATIC_XGPBUS_PLL_SSMD)
+
+#define S_STATIC_XGPBUS_PLL_SSDS 1
+#define V_STATIC_XGPBUS_PLL_SSDS(x) ((x) << S_STATIC_XGPBUS_PLL_SSDS)
+#define F_STATIC_XGPBUS_PLL_SSDS V_STATIC_XGPBUS_PLL_SSDS(1U)
+
+#define S_STATIC_XGPBUS_PLL_SSE 0
+#define V_STATIC_XGPBUS_PLL_SSE(x) ((x) << S_STATIC_XGPBUS_PLL_SSE)
+#define F_STATIC_XGPBUS_PLL_SSE V_STATIC_XGPBUS_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF2 0x60e8
#define S_T5_STATIC_U_PLL_DCO_BYPASS 23
@@ -13246,6 +16743,36 @@
#define V_STATIC_U_PLL_LOCKTUNE(x) ((x) << S_STATIC_U_PLL_LOCKTUNE)
#define G_STATIC_U_PLL_LOCKTUNE(x) (((x) >> S_STATIC_U_PLL_LOCKTUNE) & M_STATIC_U_PLL_LOCKTUNE)
+#define A_DBG_STATIC_M1_PLL_CONF1 0x60e8
+
+#define S_STATIC_M1_PLL_RANGE 22
+#define M_STATIC_M1_PLL_RANGE 0x7U
+#define V_STATIC_M1_PLL_RANGE(x) ((x) << S_STATIC_M1_PLL_RANGE)
+#define G_STATIC_M1_PLL_RANGE(x) (((x) >> S_STATIC_M1_PLL_RANGE) & M_STATIC_M1_PLL_RANGE)
+
+#define S_STATIC_M1_PLL_DIVQ 17
+#define M_STATIC_M1_PLL_DIVQ 0x1fU
+#define V_STATIC_M1_PLL_DIVQ(x) ((x) << S_STATIC_M1_PLL_DIVQ)
+#define G_STATIC_M1_PLL_DIVQ(x) (((x) >> S_STATIC_M1_PLL_DIVQ) & M_STATIC_M1_PLL_DIVQ)
+
+#define S_STATIC_M1_PLL_DIVFI 8
+#define M_STATIC_M1_PLL_DIVFI 0x1ffU
+#define V_STATIC_M1_PLL_DIVFI(x) ((x) << S_STATIC_M1_PLL_DIVFI)
+#define G_STATIC_M1_PLL_DIVFI(x) (((x) >> S_STATIC_M1_PLL_DIVFI) & M_STATIC_M1_PLL_DIVFI)
+
+#define S_STATIC_M1_PLL_DIVR 2
+#define M_STATIC_M1_PLL_DIVR 0x3fU
+#define V_STATIC_M1_PLL_DIVR(x) ((x) << S_STATIC_M1_PLL_DIVR)
+#define G_STATIC_M1_PLL_DIVR(x) (((x) >> S_STATIC_M1_PLL_DIVR) & M_STATIC_M1_PLL_DIVR)
+
+#define S_STATIC_M1_PLL_BYPASS 1
+#define V_STATIC_M1_PLL_BYPASS(x) ((x) << S_STATIC_M1_PLL_BYPASS)
+#define F_STATIC_M1_PLL_BYPASS V_STATIC_M1_PLL_BYPASS(1U)
+
+#define S_STATIC_M1_PLL_RESET 0
+#define V_STATIC_M1_PLL_RESET(x) ((x) << S_STATIC_M1_PLL_RESET)
+#define F_STATIC_M1_PLL_RESET V_STATIC_M1_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF3 0x60ec
#define S_T5_STATIC_U_PLL_MULTPRE 30
@@ -13309,6 +16836,26 @@
#define V_T6_STATIC_U_PLL_RANGEA(x) ((x) << S_T6_STATIC_U_PLL_RANGEA)
#define G_T6_STATIC_U_PLL_RANGEA(x) (((x) >> S_T6_STATIC_U_PLL_RANGEA) & M_T6_STATIC_U_PLL_RANGEA)
+#define A_DBG_STATIC_M1_PLL_CONF2 0x60ec
+
+#define S_STATIC_M1_PLL_SSMF 5
+#define M_STATIC_M1_PLL_SSMF 0xfU
+#define V_STATIC_M1_PLL_SSMF(x) ((x) << S_STATIC_M1_PLL_SSMF)
+#define G_STATIC_M1_PLL_SSMF(x) (((x) >> S_STATIC_M1_PLL_SSMF) & M_STATIC_M1_PLL_SSMF)
+
+#define S_STATIC_M1_PLL_SSMD 2
+#define M_STATIC_M1_PLL_SSMD 0x7U
+#define V_STATIC_M1_PLL_SSMD(x) ((x) << S_STATIC_M1_PLL_SSMD)
+#define G_STATIC_M1_PLL_SSMD(x) (((x) >> S_STATIC_M1_PLL_SSMD) & M_STATIC_M1_PLL_SSMD)
+
+#define S_STATIC_M1_PLL_SSDS 1
+#define V_STATIC_M1_PLL_SSDS(x) ((x) << S_STATIC_M1_PLL_SSDS)
+#define F_STATIC_M1_PLL_SSDS V_STATIC_M1_PLL_SSDS(1U)
+
+#define S_STATIC_M1_PLL_SSE 0
+#define V_STATIC_M1_PLL_SSE(x) ((x) << S_STATIC_M1_PLL_SSE)
+#define F_STATIC_M1_PLL_SSE V_STATIC_M1_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF4 0x60f0
#define A_DBG_STATIC_U_PLL_CONF4 0x60f0
#define A_DBG_T5_STATIC_U_PLL_CONF5 0x60f4
@@ -13557,6 +17104,104 @@
#define V_GPIO19_OUT_VAL(x) ((x) << S_GPIO19_OUT_VAL)
#define F_GPIO19_OUT_VAL V_GPIO19_OUT_VAL(1U)
+#define A_DBG_GPIO_OEN 0x6100
+
+#define S_GPIO23_OEN 23
+#define V_GPIO23_OEN(x) ((x) << S_GPIO23_OEN)
+#define F_GPIO23_OEN V_GPIO23_OEN(1U)
+
+#define S_GPIO22_OEN 22
+#define V_GPIO22_OEN(x) ((x) << S_GPIO22_OEN)
+#define F_GPIO22_OEN V_GPIO22_OEN(1U)
+
+#define S_GPIO21_OEN 21
+#define V_GPIO21_OEN(x) ((x) << S_GPIO21_OEN)
+#define F_GPIO21_OEN V_GPIO21_OEN(1U)
+
+#define S_GPIO20_OEN 20
+#define V_GPIO20_OEN(x) ((x) << S_GPIO20_OEN)
+#define F_GPIO20_OEN V_GPIO20_OEN(1U)
+
+#define S_T7_GPIO19_OEN 19
+#define V_T7_GPIO19_OEN(x) ((x) << S_T7_GPIO19_OEN)
+#define F_T7_GPIO19_OEN V_T7_GPIO19_OEN(1U)
+
+#define S_T7_GPIO18_OEN 18
+#define V_T7_GPIO18_OEN(x) ((x) << S_T7_GPIO18_OEN)
+#define F_T7_GPIO18_OEN V_T7_GPIO18_OEN(1U)
+
+#define S_T7_GPIO17_OEN 17
+#define V_T7_GPIO17_OEN(x) ((x) << S_T7_GPIO17_OEN)
+#define F_T7_GPIO17_OEN V_T7_GPIO17_OEN(1U)
+
+#define S_T7_GPIO16_OEN 16
+#define V_T7_GPIO16_OEN(x) ((x) << S_T7_GPIO16_OEN)
+#define F_T7_GPIO16_OEN V_T7_GPIO16_OEN(1U)
+
+#define S_T7_GPIO15_OEN 15
+#define V_T7_GPIO15_OEN(x) ((x) << S_T7_GPIO15_OEN)
+#define F_T7_GPIO15_OEN V_T7_GPIO15_OEN(1U)
+
+#define S_T7_GPIO14_OEN 14
+#define V_T7_GPIO14_OEN(x) ((x) << S_T7_GPIO14_OEN)
+#define F_T7_GPIO14_OEN V_T7_GPIO14_OEN(1U)
+
+#define S_T7_GPIO13_OEN 13
+#define V_T7_GPIO13_OEN(x) ((x) << S_T7_GPIO13_OEN)
+#define F_T7_GPIO13_OEN V_T7_GPIO13_OEN(1U)
+
+#define S_T7_GPIO12_OEN 12
+#define V_T7_GPIO12_OEN(x) ((x) << S_T7_GPIO12_OEN)
+#define F_T7_GPIO12_OEN V_T7_GPIO12_OEN(1U)
+
+#define S_T7_GPIO11_OEN 11
+#define V_T7_GPIO11_OEN(x) ((x) << S_T7_GPIO11_OEN)
+#define F_T7_GPIO11_OEN V_T7_GPIO11_OEN(1U)
+
+#define S_T7_GPIO10_OEN 10
+#define V_T7_GPIO10_OEN(x) ((x) << S_T7_GPIO10_OEN)
+#define F_T7_GPIO10_OEN V_T7_GPIO10_OEN(1U)
+
+#define S_T7_GPIO9_OEN 9
+#define V_T7_GPIO9_OEN(x) ((x) << S_T7_GPIO9_OEN)
+#define F_T7_GPIO9_OEN V_T7_GPIO9_OEN(1U)
+
+#define S_T7_GPIO8_OEN 8
+#define V_T7_GPIO8_OEN(x) ((x) << S_T7_GPIO8_OEN)
+#define F_T7_GPIO8_OEN V_T7_GPIO8_OEN(1U)
+
+#define S_T7_GPIO7_OEN 7
+#define V_T7_GPIO7_OEN(x) ((x) << S_T7_GPIO7_OEN)
+#define F_T7_GPIO7_OEN V_T7_GPIO7_OEN(1U)
+
+#define S_T7_GPIO6_OEN 6
+#define V_T7_GPIO6_OEN(x) ((x) << S_T7_GPIO6_OEN)
+#define F_T7_GPIO6_OEN V_T7_GPIO6_OEN(1U)
+
+#define S_T7_GPIO5_OEN 5
+#define V_T7_GPIO5_OEN(x) ((x) << S_T7_GPIO5_OEN)
+#define F_T7_GPIO5_OEN V_T7_GPIO5_OEN(1U)
+
+#define S_T7_GPIO4_OEN 4
+#define V_T7_GPIO4_OEN(x) ((x) << S_T7_GPIO4_OEN)
+#define F_T7_GPIO4_OEN V_T7_GPIO4_OEN(1U)
+
+#define S_T7_GPIO3_OEN 3
+#define V_T7_GPIO3_OEN(x) ((x) << S_T7_GPIO3_OEN)
+#define F_T7_GPIO3_OEN V_T7_GPIO3_OEN(1U)
+
+#define S_T7_GPIO2_OEN 2
+#define V_T7_GPIO2_OEN(x) ((x) << S_T7_GPIO2_OEN)
+#define F_T7_GPIO2_OEN V_T7_GPIO2_OEN(1U)
+
+#define S_T7_GPIO1_OEN 1
+#define V_T7_GPIO1_OEN(x) ((x) << S_T7_GPIO1_OEN)
+#define F_T7_GPIO1_OEN V_T7_GPIO1_OEN(1U)
+
+#define S_T7_GPIO0_OEN 0
+#define V_T7_GPIO0_OEN(x) ((x) << S_T7_GPIO0_OEN)
+#define F_T7_GPIO0_OEN V_T7_GPIO0_OEN(1U)
+
#define A_DBG_PVT_REG_UPDATE_CTL 0x6104
#define S_FAST_UPDATE 8
@@ -13605,6 +17250,104 @@
#define V_GPIO16_IN(x) ((x) << S_GPIO16_IN)
#define F_GPIO16_IN V_GPIO16_IN(1U)
+#define A_DBG_GPIO_CHG_DET 0x6104
+
+#define S_GPIO23_CHG_DET 23
+#define V_GPIO23_CHG_DET(x) ((x) << S_GPIO23_CHG_DET)
+#define F_GPIO23_CHG_DET V_GPIO23_CHG_DET(1U)
+
+#define S_GPIO22_CHG_DET 22
+#define V_GPIO22_CHG_DET(x) ((x) << S_GPIO22_CHG_DET)
+#define F_GPIO22_CHG_DET V_GPIO22_CHG_DET(1U)
+
+#define S_GPIO21_CHG_DET 21
+#define V_GPIO21_CHG_DET(x) ((x) << S_GPIO21_CHG_DET)
+#define F_GPIO21_CHG_DET V_GPIO21_CHG_DET(1U)
+
+#define S_GPIO20_CHG_DET 20
+#define V_GPIO20_CHG_DET(x) ((x) << S_GPIO20_CHG_DET)
+#define F_GPIO20_CHG_DET V_GPIO20_CHG_DET(1U)
+
+#define S_T7_GPIO19_CHG_DET 19
+#define V_T7_GPIO19_CHG_DET(x) ((x) << S_T7_GPIO19_CHG_DET)
+#define F_T7_GPIO19_CHG_DET V_T7_GPIO19_CHG_DET(1U)
+
+#define S_T7_GPIO18_CHG_DET 18
+#define V_T7_GPIO18_CHG_DET(x) ((x) << S_T7_GPIO18_CHG_DET)
+#define F_T7_GPIO18_CHG_DET V_T7_GPIO18_CHG_DET(1U)
+
+#define S_T7_GPIO17_CHG_DET 17
+#define V_T7_GPIO17_CHG_DET(x) ((x) << S_T7_GPIO17_CHG_DET)
+#define F_T7_GPIO17_CHG_DET V_T7_GPIO17_CHG_DET(1U)
+
+#define S_T7_GPIO16_CHG_DET 16
+#define V_T7_GPIO16_CHG_DET(x) ((x) << S_T7_GPIO16_CHG_DET)
+#define F_T7_GPIO16_CHG_DET V_T7_GPIO16_CHG_DET(1U)
+
+#define S_T7_GPIO15_CHG_DET 15
+#define V_T7_GPIO15_CHG_DET(x) ((x) << S_T7_GPIO15_CHG_DET)
+#define F_T7_GPIO15_CHG_DET V_T7_GPIO15_CHG_DET(1U)
+
+#define S_T7_GPIO14_CHG_DET 14
+#define V_T7_GPIO14_CHG_DET(x) ((x) << S_T7_GPIO14_CHG_DET)
+#define F_T7_GPIO14_CHG_DET V_T7_GPIO14_CHG_DET(1U)
+
+#define S_T7_GPIO13_CHG_DET 13
+#define V_T7_GPIO13_CHG_DET(x) ((x) << S_T7_GPIO13_CHG_DET)
+#define F_T7_GPIO13_CHG_DET V_T7_GPIO13_CHG_DET(1U)
+
+#define S_T7_GPIO12_CHG_DET 12
+#define V_T7_GPIO12_CHG_DET(x) ((x) << S_T7_GPIO12_CHG_DET)
+#define F_T7_GPIO12_CHG_DET V_T7_GPIO12_CHG_DET(1U)
+
+#define S_T7_GPIO11_CHG_DET 11
+#define V_T7_GPIO11_CHG_DET(x) ((x) << S_T7_GPIO11_CHG_DET)
+#define F_T7_GPIO11_CHG_DET V_T7_GPIO11_CHG_DET(1U)
+
+#define S_T7_GPIO10_CHG_DET 10
+#define V_T7_GPIO10_CHG_DET(x) ((x) << S_T7_GPIO10_CHG_DET)
+#define F_T7_GPIO10_CHG_DET V_T7_GPIO10_CHG_DET(1U)
+
+#define S_T7_GPIO9_CHG_DET 9
+#define V_T7_GPIO9_CHG_DET(x) ((x) << S_T7_GPIO9_CHG_DET)
+#define F_T7_GPIO9_CHG_DET V_T7_GPIO9_CHG_DET(1U)
+
+#define S_T7_GPIO8_CHG_DET 8
+#define V_T7_GPIO8_CHG_DET(x) ((x) << S_T7_GPIO8_CHG_DET)
+#define F_T7_GPIO8_CHG_DET V_T7_GPIO8_CHG_DET(1U)
+
+#define S_T7_GPIO7_CHG_DET 7
+#define V_T7_GPIO7_CHG_DET(x) ((x) << S_T7_GPIO7_CHG_DET)
+#define F_T7_GPIO7_CHG_DET V_T7_GPIO7_CHG_DET(1U)
+
+#define S_T7_GPIO6_CHG_DET 6
+#define V_T7_GPIO6_CHG_DET(x) ((x) << S_T7_GPIO6_CHG_DET)
+#define F_T7_GPIO6_CHG_DET V_T7_GPIO6_CHG_DET(1U)
+
+#define S_T7_GPIO5_CHG_DET 5
+#define V_T7_GPIO5_CHG_DET(x) ((x) << S_T7_GPIO5_CHG_DET)
+#define F_T7_GPIO5_CHG_DET V_T7_GPIO5_CHG_DET(1U)
+
+#define S_T7_GPIO4_CHG_DET 4
+#define V_T7_GPIO4_CHG_DET(x) ((x) << S_T7_GPIO4_CHG_DET)
+#define F_T7_GPIO4_CHG_DET V_T7_GPIO4_CHG_DET(1U)
+
+#define S_T7_GPIO3_CHG_DET 3
+#define V_T7_GPIO3_CHG_DET(x) ((x) << S_T7_GPIO3_CHG_DET)
+#define F_T7_GPIO3_CHG_DET V_T7_GPIO3_CHG_DET(1U)
+
+#define S_T7_GPIO2_CHG_DET 2
+#define V_T7_GPIO2_CHG_DET(x) ((x) << S_T7_GPIO2_CHG_DET)
+#define F_T7_GPIO2_CHG_DET V_T7_GPIO2_CHG_DET(1U)
+
+#define S_T7_GPIO1_CHG_DET 1
+#define V_T7_GPIO1_CHG_DET(x) ((x) << S_T7_GPIO1_CHG_DET)
+#define F_T7_GPIO1_CHG_DET V_T7_GPIO1_CHG_DET(1U)
+
+#define S_T7_GPIO0_CHG_DET 0
+#define V_T7_GPIO0_CHG_DET(x) ((x) << S_T7_GPIO0_CHG_DET)
+#define F_T7_GPIO0_CHG_DET V_T7_GPIO0_CHG_DET(1U)
+
#define A_DBG_PVT_REG_LAST_MEASUREMENT 0x6108
#define S_LAST_MEASUREMENT_SELECT 8
@@ -13964,6 +17707,22 @@
#define V_GPIO0_PE_EN(x) ((x) << S_GPIO0_PE_EN)
#define F_GPIO0_PE_EN V_GPIO0_PE_EN(1U)
+#define S_GPIO23_PE_EN 23
+#define V_GPIO23_PE_EN(x) ((x) << S_GPIO23_PE_EN)
+#define F_GPIO23_PE_EN V_GPIO23_PE_EN(1U)
+
+#define S_GPIO22_PE_EN 22
+#define V_GPIO22_PE_EN(x) ((x) << S_GPIO22_PE_EN)
+#define F_GPIO22_PE_EN V_GPIO22_PE_EN(1U)
+
+#define S_GPIO21_PE_EN 21
+#define V_GPIO21_PE_EN(x) ((x) << S_GPIO21_PE_EN)
+#define F_GPIO21_PE_EN V_GPIO21_PE_EN(1U)
+
+#define S_GPIO20_PE_EN 20
+#define V_GPIO20_PE_EN(x) ((x) << S_GPIO20_PE_EN)
+#define F_GPIO20_PE_EN V_GPIO20_PE_EN(1U)
+
#define A_DBG_PVT_REG_THRESHOLD 0x611c
#define S_PVT_CALIBRATION_DONE 8
@@ -14084,6 +17843,22 @@
#define V_GPIO0_PS_EN(x) ((x) << S_GPIO0_PS_EN)
#define F_GPIO0_PS_EN V_GPIO0_PS_EN(1U)
+#define S_GPIO23_PS_EN 23
+#define V_GPIO23_PS_EN(x) ((x) << S_GPIO23_PS_EN)
+#define F_GPIO23_PS_EN V_GPIO23_PS_EN(1U)
+
+#define S_GPIO22_PS_EN 22
+#define V_GPIO22_PS_EN(x) ((x) << S_GPIO22_PS_EN)
+#define F_GPIO22_PS_EN V_GPIO22_PS_EN(1U)
+
+#define S_GPIO21_PS_EN 21
+#define V_GPIO21_PS_EN(x) ((x) << S_GPIO21_PS_EN)
+#define F_GPIO21_PS_EN V_GPIO21_PS_EN(1U)
+
+#define S_GPIO20_PS_EN 20
+#define V_GPIO20_PS_EN(x) ((x) << S_GPIO20_PS_EN)
+#define F_GPIO20_PS_EN V_GPIO20_PS_EN(1U)
+
#define A_DBG_PVT_REG_IN_TERMP 0x6120
#define S_REG_IN_TERMP_B 4
@@ -14254,6 +18029,17 @@
#define V_STATIC_U_PLL_VREGTUNE(x) ((x) << S_STATIC_U_PLL_VREGTUNE)
#define G_STATIC_U_PLL_VREGTUNE(x) (((x) >> S_STATIC_U_PLL_VREGTUNE) & M_STATIC_U_PLL_VREGTUNE)
+#define A_DBG_STATIC_PLL_LOCK_WAIT_CONF 0x6150
+
+#define S_STATIC_WAIT_LOCK 24
+#define V_STATIC_WAIT_LOCK(x) ((x) << S_STATIC_WAIT_LOCK)
+#define F_STATIC_WAIT_LOCK V_STATIC_WAIT_LOCK(1U)
+
+#define S_STATIC_LOCK_WAIT_TIME 0
+#define M_STATIC_LOCK_WAIT_TIME 0xffffffU
+#define V_STATIC_LOCK_WAIT_TIME(x) ((x) << S_STATIC_LOCK_WAIT_TIME)
+#define G_STATIC_LOCK_WAIT_TIME(x) (((x) >> S_STATIC_LOCK_WAIT_TIME) & M_STATIC_LOCK_WAIT_TIME)
+
#define A_DBG_STATIC_C_PLL_CONF6 0x6154
#define S_STATIC_C_PLL_VREGTUNE 0
@@ -14303,13 +18089,274 @@
#define A_DBG_CUST_EFUSE_BYTE24_27 0x6178
#define A_DBG_CUST_EFUSE_BYTE28_31 0x617c
#define A_DBG_CUST_EFUSE_BYTE32_35 0x6180
+#define A_DBG_GPIO_INT_ENABLE 0x6180
+
+#define S_GPIO23 23
+#define V_GPIO23(x) ((x) << S_GPIO23)
+#define F_GPIO23 V_GPIO23(1U)
+
+#define S_GPIO22 22
+#define V_GPIO22(x) ((x) << S_GPIO22)
+#define F_GPIO22 V_GPIO22(1U)
+
+#define S_GPIO21 21
+#define V_GPIO21(x) ((x) << S_GPIO21)
+#define F_GPIO21 V_GPIO21(1U)
+
+#define S_GPIO20 20
+#define V_GPIO20(x) ((x) << S_GPIO20)
+#define F_GPIO20 V_GPIO20(1U)
+
+#define S_T7_GPIO19 19
+#define V_T7_GPIO19(x) ((x) << S_T7_GPIO19)
+#define F_T7_GPIO19 V_T7_GPIO19(1U)
+
+#define S_T7_GPIO18 18
+#define V_T7_GPIO18(x) ((x) << S_T7_GPIO18)
+#define F_T7_GPIO18 V_T7_GPIO18(1U)
+
+#define S_T7_GPIO17 17
+#define V_T7_GPIO17(x) ((x) << S_T7_GPIO17)
+#define F_T7_GPIO17 V_T7_GPIO17(1U)
+
+#define S_T7_GPIO16 16
+#define V_T7_GPIO16(x) ((x) << S_T7_GPIO16)
+#define F_T7_GPIO16 V_T7_GPIO16(1U)
+
#define A_DBG_CUST_EFUSE_BYTE36_39 0x6184
+#define A_DBG_GPIO_INT_CAUSE 0x6184
#define A_DBG_CUST_EFUSE_BYTE40_43 0x6188
+#define A_T7_DBG_GPIO_ACT_LOW 0x6188
+
+#define S_GPIO23_ACT_LOW 23
+#define V_GPIO23_ACT_LOW(x) ((x) << S_GPIO23_ACT_LOW)
+#define F_GPIO23_ACT_LOW V_GPIO23_ACT_LOW(1U)
+
+#define S_GPIO22_ACT_LOW 22
+#define V_GPIO22_ACT_LOW(x) ((x) << S_GPIO22_ACT_LOW)
+#define F_GPIO22_ACT_LOW V_GPIO22_ACT_LOW(1U)
+
+#define S_GPIO21_ACT_LOW 21
+#define V_GPIO21_ACT_LOW(x) ((x) << S_GPIO21_ACT_LOW)
+#define F_GPIO21_ACT_LOW V_GPIO21_ACT_LOW(1U)
+
+#define S_GPIO20_ACT_LOW 20
+#define V_GPIO20_ACT_LOW(x) ((x) << S_GPIO20_ACT_LOW)
+#define F_GPIO20_ACT_LOW V_GPIO20_ACT_LOW(1U)
+
+#define S_T7_GPIO19_ACT_LOW 19
+#define V_T7_GPIO19_ACT_LOW(x) ((x) << S_T7_GPIO19_ACT_LOW)
+#define F_T7_GPIO19_ACT_LOW V_T7_GPIO19_ACT_LOW(1U)
+
+#define S_T7_GPIO18_ACT_LOW 18
+#define V_T7_GPIO18_ACT_LOW(x) ((x) << S_T7_GPIO18_ACT_LOW)
+#define F_T7_GPIO18_ACT_LOW V_T7_GPIO18_ACT_LOW(1U)
+
+#define S_T7_GPIO17_ACT_LOW 17
+#define V_T7_GPIO17_ACT_LOW(x) ((x) << S_T7_GPIO17_ACT_LOW)
+#define F_T7_GPIO17_ACT_LOW V_T7_GPIO17_ACT_LOW(1U)
+
+#define S_T7_GPIO16_ACT_LOW 16
+#define V_T7_GPIO16_ACT_LOW(x) ((x) << S_T7_GPIO16_ACT_LOW)
+#define F_T7_GPIO16_ACT_LOW V_T7_GPIO16_ACT_LOW(1U)
+
#define A_DBG_CUST_EFUSE_BYTE44_47 0x618c
+#define A_DBG_DDR_CAL 0x618c
+
+#define S_CAL_ENDC 9
+#define V_CAL_ENDC(x) ((x) << S_CAL_ENDC)
+#define F_CAL_ENDC V_CAL_ENDC(1U)
+
+#define S_CAL_MODE 8
+#define V_CAL_MODE(x) ((x) << S_CAL_MODE)
+#define F_CAL_MODE V_CAL_MODE(1U)
+
+#define S_CAL_REFSEL 7
+#define V_CAL_REFSEL(x) ((x) << S_CAL_REFSEL)
+#define F_CAL_REFSEL V_CAL_REFSEL(1U)
+
+#define S_PD 6
+#define V_PD(x) ((x) << S_PD)
+#define F_PD V_PD(1U)
+
+#define S_CAL_RST 5
+#define V_CAL_RST(x) ((x) << S_CAL_RST)
+#define F_CAL_RST V_CAL_RST(1U)
+
+#define S_CAL_READ 4
+#define V_CAL_READ(x) ((x) << S_CAL_READ)
+#define F_CAL_READ V_CAL_READ(1U)
+
+#define S_CAL_SC 3
+#define V_CAL_SC(x) ((x) << S_CAL_SC)
+#define F_CAL_SC V_CAL_SC(1U)
+
+#define S_CAL_LC 2
+#define V_CAL_LC(x) ((x) << S_CAL_LC)
+#define F_CAL_LC V_CAL_LC(1U)
+
+#define S_CAL_CCAL 1
+#define V_CAL_CCAL(x) ((x) << S_CAL_CCAL)
+#define F_CAL_CCAL V_CAL_CCAL(1U)
+
+#define S_CAL_RES 0
+#define V_CAL_RES(x) ((x) << S_CAL_RES)
+#define F_CAL_RES V_CAL_RES(1U)
+
#define A_DBG_CUST_EFUSE_BYTE48_51 0x6190
+#define A_DBG_EFUSE_CTL_0 0x6190
+
+#define S_EFUSE_CSB 31
+#define V_EFUSE_CSB(x) ((x) << S_EFUSE_CSB)
+#define F_EFUSE_CSB V_EFUSE_CSB(1U)
+
+#define S_EFUSE_STROBE 30
+#define V_EFUSE_STROBE(x) ((x) << S_EFUSE_STROBE)
+#define F_EFUSE_STROBE V_EFUSE_STROBE(1U)
+
+#define S_EFUSE_LOAD 29
+#define V_EFUSE_LOAD(x) ((x) << S_EFUSE_LOAD)
+#define F_EFUSE_LOAD V_EFUSE_LOAD(1U)
+
+#define S_EFUSE_PGENB 28
+#define V_EFUSE_PGENB(x) ((x) << S_EFUSE_PGENB)
+#define F_EFUSE_PGENB V_EFUSE_PGENB(1U)
+
+#define S_EFUSE_PS 27
+#define V_EFUSE_PS(x) ((x) << S_EFUSE_PS)
+#define F_EFUSE_PS V_EFUSE_PS(1U)
+
+#define S_EFUSE_MR 26
+#define V_EFUSE_MR(x) ((x) << S_EFUSE_MR)
+#define F_EFUSE_MR V_EFUSE_MR(1U)
+
+#define S_EFUSE_PD 25
+#define V_EFUSE_PD(x) ((x) << S_EFUSE_PD)
+#define F_EFUSE_PD V_EFUSE_PD(1U)
+
+#define S_EFUSE_RWL 24
+#define V_EFUSE_RWL(x) ((x) << S_EFUSE_RWL)
+#define F_EFUSE_RWL V_EFUSE_RWL(1U)
+
+#define S_EFUSE_RSB 23
+#define V_EFUSE_RSB(x) ((x) << S_EFUSE_RSB)
+#define F_EFUSE_RSB V_EFUSE_RSB(1U)
+
+#define S_EFUSE_TRCS 22
+#define V_EFUSE_TRCS(x) ((x) << S_EFUSE_TRCS)
+#define F_EFUSE_TRCS V_EFUSE_TRCS(1U)
+
+#define S_EFUSE_AT 20
+#define M_EFUSE_AT 0x3U
+#define V_EFUSE_AT(x) ((x) << S_EFUSE_AT)
+#define G_EFUSE_AT(x) (((x) >> S_EFUSE_AT) & M_EFUSE_AT)
+
+#define S_EFUSE_RD_STATE 16
+#define M_EFUSE_RD_STATE 0xfU
+#define V_EFUSE_RD_STATE(x) ((x) << S_EFUSE_RD_STATE)
+#define G_EFUSE_RD_STATE(x) (((x) >> S_EFUSE_RD_STATE) & M_EFUSE_RD_STATE)
+
+#define S_EFUSE_BUSY 15
+#define V_EFUSE_BUSY(x) ((x) << S_EFUSE_BUSY)
+#define F_EFUSE_BUSY V_EFUSE_BUSY(1U)
+
+#define S_EFUSE_WR_RD 13
+#define M_EFUSE_WR_RD 0x3U
+#define V_EFUSE_WR_RD(x) ((x) << S_EFUSE_WR_RD)
+#define G_EFUSE_WR_RD(x) (((x) >> S_EFUSE_WR_RD) & M_EFUSE_WR_RD)
+
+#define S_EFUSE_A 0
+#define M_EFUSE_A 0x7ffU
+#define V_EFUSE_A(x) ((x) << S_EFUSE_A)
+#define G_EFUSE_A(x) (((x) >> S_EFUSE_A) & M_EFUSE_A)
+
#define A_DBG_CUST_EFUSE_BYTE52_55 0x6194
+#define A_DBG_EFUSE_CTL_1 0x6194
#define A_DBG_CUST_EFUSE_BYTE56_59 0x6198
+#define A_DBG_EFUSE_RD_CTL 0x6198
+
+#define S_EFUSE_RD_ID 6
+#define M_EFUSE_RD_ID 0x3U
+#define V_EFUSE_RD_ID(x) ((x) << S_EFUSE_RD_ID)
+#define G_EFUSE_RD_ID(x) (((x) >> S_EFUSE_RD_ID) & M_EFUSE_RD_ID)
+
+#define S_EFUSE_RD_ADDR 0
+#define M_EFUSE_RD_ADDR 0x3fU
+#define V_EFUSE_RD_ADDR(x) ((x) << S_EFUSE_RD_ADDR)
+#define G_EFUSE_RD_ADDR(x) (((x) >> S_EFUSE_RD_ADDR) & M_EFUSE_RD_ADDR)
+
#define A_DBG_CUST_EFUSE_BYTE60_63 0x619c
+#define A_DBG_EFUSE_RD_DATA 0x619c
+#define A_DBG_EFUSE_TIME_0 0x61a0
+
+#define S_EFUSE_TIME_1 16
+#define M_EFUSE_TIME_1 0xffffU
+#define V_EFUSE_TIME_1(x) ((x) << S_EFUSE_TIME_1)
+#define G_EFUSE_TIME_1(x) (((x) >> S_EFUSE_TIME_1) & M_EFUSE_TIME_1)
+
+#define S_EFUSE_TIME_0 0
+#define M_EFUSE_TIME_0 0xffffU
+#define V_EFUSE_TIME_0(x) ((x) << S_EFUSE_TIME_0)
+#define G_EFUSE_TIME_0(x) (((x) >> S_EFUSE_TIME_0) & M_EFUSE_TIME_0)
+
+#define A_DBG_EFUSE_TIME_1 0x61a4
+
+#define S_EFUSE_TIME_3 16
+#define M_EFUSE_TIME_3 0xffffU
+#define V_EFUSE_TIME_3(x) ((x) << S_EFUSE_TIME_3)
+#define G_EFUSE_TIME_3(x) (((x) >> S_EFUSE_TIME_3) & M_EFUSE_TIME_3)
+
+#define S_EFUSE_TIME_2 0
+#define M_EFUSE_TIME_2 0xffffU
+#define V_EFUSE_TIME_2(x) ((x) << S_EFUSE_TIME_2)
+#define G_EFUSE_TIME_2(x) (((x) >> S_EFUSE_TIME_2) & M_EFUSE_TIME_2)
+
+#define A_DBG_EFUSE_TIME_2 0x61a8
+
+#define S_EFUSE_TIME_5 16
+#define M_EFUSE_TIME_5 0xffffU
+#define V_EFUSE_TIME_5(x) ((x) << S_EFUSE_TIME_5)
+#define G_EFUSE_TIME_5(x) (((x) >> S_EFUSE_TIME_5) & M_EFUSE_TIME_5)
+
+#define S_EFUSE_TIME_4 0
+#define M_EFUSE_TIME_4 0xffffU
+#define V_EFUSE_TIME_4(x) ((x) << S_EFUSE_TIME_4)
+#define G_EFUSE_TIME_4(x) (((x) >> S_EFUSE_TIME_4) & M_EFUSE_TIME_4)
+
+#define A_DBG_EFUSE_TIME_3 0x61ac
+
+#define S_EFUSE_TIME_7 16
+#define M_EFUSE_TIME_7 0xffffU
+#define V_EFUSE_TIME_7(x) ((x) << S_EFUSE_TIME_7)
+#define G_EFUSE_TIME_7(x) (((x) >> S_EFUSE_TIME_7) & M_EFUSE_TIME_7)
+
+#define S_EFUSE_TIME_6 0
+#define M_EFUSE_TIME_6 0xffffU
+#define V_EFUSE_TIME_6(x) ((x) << S_EFUSE_TIME_6)
+#define G_EFUSE_TIME_6(x) (((x) >> S_EFUSE_TIME_6) & M_EFUSE_TIME_6)
+
+#define A_DBG_VREF_CTL 0x61b0
+
+#define S_VREF_SEL_1 15
+#define V_VREF_SEL_1(x) ((x) << S_VREF_SEL_1)
+#define F_VREF_SEL_1 V_VREF_SEL_1(1U)
+
+#define S_VREF_R_1 8
+#define M_VREF_R_1 0x7fU
+#define V_VREF_R_1(x) ((x) << S_VREF_R_1)
+#define G_VREF_R_1(x) (((x) >> S_VREF_R_1) & M_VREF_R_1)
+
+#define S_VREF_SEL_0 7
+#define V_VREF_SEL_0(x) ((x) << S_VREF_SEL_0)
+#define F_VREF_SEL_0 V_VREF_SEL_0(1U)
+
+#define S_VREF_R_0 0
+#define M_VREF_R_0 0x7fU
+#define V_VREF_R_0(x) ((x) << S_VREF_R_0)
+#define G_VREF_R_0(x) (((x) >> S_VREF_R_0) & M_VREF_R_0)
+
+#define A_DBG_FPGA_EFUSE_CTL 0x61b4
+#define A_DBG_FPGA_EFUSE_DATA 0x61b8
/* registers for module MC */
#define MC_BASE_ADDR 0x6200
@@ -16048,31 +20095,91 @@
#define V_THRESHOLD0_EN(x) ((x) << S_THRESHOLD0_EN)
#define F_THRESHOLD0_EN V_THRESHOLD0_EN(1U)
+#define A_MA_CLIENT0_PR_THRESHOLD 0x7700
+
+#define S_T7_THRESHOLD1_EN 31
+#define V_T7_THRESHOLD1_EN(x) ((x) << S_T7_THRESHOLD1_EN)
+#define F_T7_THRESHOLD1_EN V_T7_THRESHOLD1_EN(1U)
+
+#define S_T7_THRESHOLD1 16
+#define M_T7_THRESHOLD1 0x7fffU
+#define V_T7_THRESHOLD1(x) ((x) << S_T7_THRESHOLD1)
+#define G_T7_THRESHOLD1(x) (((x) >> S_T7_THRESHOLD1) & M_T7_THRESHOLD1)
+
+#define S_T7_THRESHOLD0_EN 15
+#define V_T7_THRESHOLD0_EN(x) ((x) << S_T7_THRESHOLD0_EN)
+#define F_T7_THRESHOLD0_EN V_T7_THRESHOLD0_EN(1U)
+
+#define S_T7_THRESHOLD0 0
+#define M_T7_THRESHOLD0 0x7fffU
+#define V_T7_THRESHOLD0(x) ((x) << S_T7_THRESHOLD0)
+#define G_T7_THRESHOLD0(x) (((x) >> S_T7_THRESHOLD0) & M_T7_THRESHOLD0)
+
#define A_MA_CLIENT0_WR_LATENCY_THRESHOLD 0x7704
+#define A_MA_CLIENT0_CR_THRESHOLD 0x7704
+
+#define S_CREDITSHAPER_EN 31
+#define V_CREDITSHAPER_EN(x) ((x) << S_CREDITSHAPER_EN)
+#define F_CREDITSHAPER_EN V_CREDITSHAPER_EN(1U)
+
+#define S_CREDIT_MAX 16
+#define M_CREDIT_MAX 0xfffU
+#define V_CREDIT_MAX(x) ((x) << S_CREDIT_MAX)
+#define G_CREDIT_MAX(x) (((x) >> S_CREDIT_MAX) & M_CREDIT_MAX)
+
+#define S_CREDIT_VAL 0
+#define M_CREDIT_VAL 0xfffU
+#define V_CREDIT_VAL(x) ((x) << S_CREDIT_VAL)
+#define G_CREDIT_VAL(x) (((x) >> S_CREDIT_VAL) & M_CREDIT_VAL)
+
#define A_MA_CLIENT1_RD_LATENCY_THRESHOLD 0x7708
+#define A_MA_CLIENT1_PR_THRESHOLD 0x7708
#define A_MA_CLIENT1_WR_LATENCY_THRESHOLD 0x770c
+#define A_MA_CLIENT1_CR_THRESHOLD 0x770c
#define A_MA_CLIENT2_RD_LATENCY_THRESHOLD 0x7710
+#define A_MA_CLIENT2_PR_THRESHOLD 0x7710
#define A_MA_CLIENT2_WR_LATENCY_THRESHOLD 0x7714
+#define A_MA_CLIENT2_CR_THRESHOLD 0x7714
#define A_MA_CLIENT3_RD_LATENCY_THRESHOLD 0x7718
+#define A_MA_CLIENT3_PR_THRESHOLD 0x7718
#define A_MA_CLIENT3_WR_LATENCY_THRESHOLD 0x771c
+#define A_MA_CLIENT3_CR_THRESHOLD 0x771c
#define A_MA_CLIENT4_RD_LATENCY_THRESHOLD 0x7720
+#define A_MA_CLIENT4_PR_THRESHOLD 0x7720
#define A_MA_CLIENT4_WR_LATENCY_THRESHOLD 0x7724
+#define A_MA_CLIENT4_CR_THRESHOLD 0x7724
#define A_MA_CLIENT5_RD_LATENCY_THRESHOLD 0x7728
+#define A_MA_CLIENT5_PR_THRESHOLD 0x7728
#define A_MA_CLIENT5_WR_LATENCY_THRESHOLD 0x772c
+#define A_MA_CLIENT5_CR_THRESHOLD 0x772c
#define A_MA_CLIENT6_RD_LATENCY_THRESHOLD 0x7730
+#define A_MA_CLIENT6_PR_THRESHOLD 0x7730
#define A_MA_CLIENT6_WR_LATENCY_THRESHOLD 0x7734
+#define A_MA_CLIENT6_CR_THRESHOLD 0x7734
#define A_MA_CLIENT7_RD_LATENCY_THRESHOLD 0x7738
+#define A_MA_CLIENT7_PR_THRESHOLD 0x7738
#define A_MA_CLIENT7_WR_LATENCY_THRESHOLD 0x773c
+#define A_MA_CLIENT7_CR_THRESHOLD 0x773c
#define A_MA_CLIENT8_RD_LATENCY_THRESHOLD 0x7740
+#define A_MA_CLIENT8_PR_THRESHOLD 0x7740
#define A_MA_CLIENT8_WR_LATENCY_THRESHOLD 0x7744
+#define A_MA_CLIENT8_CR_THRESHOLD 0x7744
#define A_MA_CLIENT9_RD_LATENCY_THRESHOLD 0x7748
+#define A_MA_CLIENT9_PR_THRESHOLD 0x7748
#define A_MA_CLIENT9_WR_LATENCY_THRESHOLD 0x774c
+#define A_MA_CLIENT9_CR_THRESHOLD 0x774c
#define A_MA_CLIENT10_RD_LATENCY_THRESHOLD 0x7750
+#define A_MA_CLIENT10_PR_THRESHOLD 0x7750
#define A_MA_CLIENT10_WR_LATENCY_THRESHOLD 0x7754
+#define A_MA_CLIENT10_CR_THRESHOLD 0x7754
#define A_MA_CLIENT11_RD_LATENCY_THRESHOLD 0x7758
+#define A_MA_CLIENT11_PR_THRESHOLD 0x7758
#define A_MA_CLIENT11_WR_LATENCY_THRESHOLD 0x775c
+#define A_MA_CLIENT11_CR_THRESHOLD 0x775c
#define A_MA_CLIENT12_RD_LATENCY_THRESHOLD 0x7760
+#define A_MA_CLIENT12_PR_THRESHOLD 0x7760
#define A_MA_CLIENT12_WR_LATENCY_THRESHOLD 0x7764
+#define A_MA_CLIENT12_CR_THRESHOLD 0x7764
#define A_MA_SGE_TH0_DEBUG_CNT 0x7768
#define S_DBG_READ_DATA_CNT 24
@@ -16103,10 +20210,359 @@
#define A_MA_TP_TH1_DEBUG_CNT 0x7780
#define A_MA_LE_DEBUG_CNT 0x7784
#define A_MA_CIM_DEBUG_CNT 0x7788
+#define A_MA_CIM_TH0_DEBUG_CNT 0x7788
#define A_MA_PCIE_DEBUG_CNT 0x778c
#define A_MA_PMTX_DEBUG_CNT 0x7790
#define A_MA_PMRX_DEBUG_CNT 0x7794
#define A_MA_HMA_DEBUG_CNT 0x7798
+#define A_MA_COR_ERROR_ENABLE1 0x779c
+
+#define S_ARB4_COR_WRQUEUE_ERROR_EN 9
+#define V_ARB4_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB4_COR_WRQUEUE_ERROR_EN)
+#define F_ARB4_COR_WRQUEUE_ERROR_EN V_ARB4_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_COR_WRQUEUE_ERROR_EN 8
+#define V_ARB3_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB3_COR_WRQUEUE_ERROR_EN)
+#define F_ARB3_COR_WRQUEUE_ERROR_EN V_ARB3_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_COR_WRQUEUE_ERROR_EN 7
+#define V_ARB2_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB2_COR_WRQUEUE_ERROR_EN)
+#define F_ARB2_COR_WRQUEUE_ERROR_EN V_ARB2_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_COR_WRQUEUE_ERROR_EN 6
+#define V_ARB1_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB1_COR_WRQUEUE_ERROR_EN)
+#define F_ARB1_COR_WRQUEUE_ERROR_EN V_ARB1_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_COR_WRQUEUE_ERROR_EN 5
+#define V_ARB0_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB0_COR_WRQUEUE_ERROR_EN)
+#define F_ARB0_COR_WRQUEUE_ERROR_EN V_ARB0_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB4_COR_RDQUEUE_ERROR_EN 4
+#define V_ARB4_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_COR_RDQUEUE_ERROR_EN)
+#define F_ARB4_COR_RDQUEUE_ERROR_EN V_ARB4_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_COR_RDQUEUE_ERROR_EN 3
+#define V_ARB3_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB3_COR_RDQUEUE_ERROR_EN)
+#define F_ARB3_COR_RDQUEUE_ERROR_EN V_ARB3_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_COR_RDQUEUE_ERROR_EN 2
+#define V_ARB2_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB2_COR_RDQUEUE_ERROR_EN)
+#define F_ARB2_COR_RDQUEUE_ERROR_EN V_ARB2_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_COR_RDQUEUE_ERROR_EN 1
+#define V_ARB1_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB1_COR_RDQUEUE_ERROR_EN)
+#define F_ARB1_COR_RDQUEUE_ERROR_EN V_ARB1_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_COR_RDQUEUE_ERROR_EN 0
+#define V_ARB0_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB0_COR_RDQUEUE_ERROR_EN)
+#define F_ARB0_COR_RDQUEUE_ERROR_EN V_ARB0_COR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS1 0x77a0
+
+#define S_ARB4_COR_WRQUEUE_ERROR 9
+#define V_ARB4_COR_WRQUEUE_ERROR(x) ((x) << S_ARB4_COR_WRQUEUE_ERROR)
+#define F_ARB4_COR_WRQUEUE_ERROR V_ARB4_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB3_COR_WRQUEUE_ERROR 8
+#define V_ARB3_COR_WRQUEUE_ERROR(x) ((x) << S_ARB3_COR_WRQUEUE_ERROR)
+#define F_ARB3_COR_WRQUEUE_ERROR V_ARB3_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB2_COR_WRQUEUE_ERROR 7
+#define V_ARB2_COR_WRQUEUE_ERROR(x) ((x) << S_ARB2_COR_WRQUEUE_ERROR)
+#define F_ARB2_COR_WRQUEUE_ERROR V_ARB2_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB1_COR_WRQUEUE_ERROR 6
+#define V_ARB1_COR_WRQUEUE_ERROR(x) ((x) << S_ARB1_COR_WRQUEUE_ERROR)
+#define F_ARB1_COR_WRQUEUE_ERROR V_ARB1_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB0_COR_WRQUEUE_ERROR 5
+#define V_ARB0_COR_WRQUEUE_ERROR(x) ((x) << S_ARB0_COR_WRQUEUE_ERROR)
+#define F_ARB0_COR_WRQUEUE_ERROR V_ARB0_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB4_COR_RDQUEUE_ERROR 4
+#define V_ARB4_COR_RDQUEUE_ERROR(x) ((x) << S_ARB4_COR_RDQUEUE_ERROR)
+#define F_ARB4_COR_RDQUEUE_ERROR V_ARB4_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB3_COR_RDQUEUE_ERROR 3
+#define V_ARB3_COR_RDQUEUE_ERROR(x) ((x) << S_ARB3_COR_RDQUEUE_ERROR)
+#define F_ARB3_COR_RDQUEUE_ERROR V_ARB3_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB2_COR_RDQUEUE_ERROR 2
+#define V_ARB2_COR_RDQUEUE_ERROR(x) ((x) << S_ARB2_COR_RDQUEUE_ERROR)
+#define F_ARB2_COR_RDQUEUE_ERROR V_ARB2_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB1_COR_RDQUEUE_ERROR 1
+#define V_ARB1_COR_RDQUEUE_ERROR(x) ((x) << S_ARB1_COR_RDQUEUE_ERROR)
+#define F_ARB1_COR_RDQUEUE_ERROR V_ARB1_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB0_COR_RDQUEUE_ERROR 0
+#define V_ARB0_COR_RDQUEUE_ERROR(x) ((x) << S_ARB0_COR_RDQUEUE_ERROR)
+#define F_ARB0_COR_RDQUEUE_ERROR V_ARB0_COR_RDQUEUE_ERROR(1U)
+
+#define A_MA_DBG_CTL 0x77a4
+
+#define S_DATAH_SEL 20
+#define V_DATAH_SEL(x) ((x) << S_DATAH_SEL)
+#define F_DATAH_SEL V_DATAH_SEL(1U)
+
+#define S_EN_DBG 16
+#define V_EN_DBG(x) ((x) << S_EN_DBG)
+#define F_EN_DBG V_EN_DBG(1U)
+
+#define S_T7_SEL 0
+#define M_T7_SEL 0xffU
+#define V_T7_SEL(x) ((x) << S_T7_SEL)
+#define G_T7_SEL(x) (((x) >> S_T7_SEL) & M_T7_SEL)
+
+#define A_MA_DBG_DATA 0x77a8
+#define A_MA_COR_ERROR_ENABLE2 0x77b0
+
+#define S_CL14_COR_WRQUEUE_ERROR_EN 14
+#define V_CL14_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL14_COR_WRQUEUE_ERROR_EN)
+#define F_CL14_COR_WRQUEUE_ERROR_EN V_CL14_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL13_COR_WRQUEUE_ERROR_EN 13
+#define V_CL13_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL13_COR_WRQUEUE_ERROR_EN)
+#define F_CL13_COR_WRQUEUE_ERROR_EN V_CL13_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL12_COR_WRQUEUE_ERROR_EN 12
+#define V_CL12_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL12_COR_WRQUEUE_ERROR_EN)
+#define F_CL12_COR_WRQUEUE_ERROR_EN V_CL12_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL11_COR_WRQUEUE_ERROR_EN 11
+#define V_CL11_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL11_COR_WRQUEUE_ERROR_EN)
+#define F_CL11_COR_WRQUEUE_ERROR_EN V_CL11_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL10_COR_WRQUEUE_ERROR_EN 10
+#define V_CL10_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL10_COR_WRQUEUE_ERROR_EN)
+#define F_CL10_COR_WRQUEUE_ERROR_EN V_CL10_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL9_COR_WRQUEUE_ERROR_EN 9
+#define V_CL9_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL9_COR_WRQUEUE_ERROR_EN)
+#define F_CL9_COR_WRQUEUE_ERROR_EN V_CL9_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL8_COR_WRQUEUE_ERROR_EN 8
+#define V_CL8_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL8_COR_WRQUEUE_ERROR_EN)
+#define F_CL8_COR_WRQUEUE_ERROR_EN V_CL8_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL7_COR_WRQUEUE_ERROR_EN 7
+#define V_CL7_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL7_COR_WRQUEUE_ERROR_EN)
+#define F_CL7_COR_WRQUEUE_ERROR_EN V_CL7_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL6_COR_WRQUEUE_ERROR_EN 6
+#define V_CL6_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL6_COR_WRQUEUE_ERROR_EN)
+#define F_CL6_COR_WRQUEUE_ERROR_EN V_CL6_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL5_COR_WRQUEUE_ERROR_EN 5
+#define V_CL5_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL5_COR_WRQUEUE_ERROR_EN)
+#define F_CL5_COR_WRQUEUE_ERROR_EN V_CL5_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL4_COR_WRQUEUE_ERROR_EN 4
+#define V_CL4_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL4_COR_WRQUEUE_ERROR_EN)
+#define F_CL4_COR_WRQUEUE_ERROR_EN V_CL4_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL3_COR_WRQUEUE_ERROR_EN 3
+#define V_CL3_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL3_COR_WRQUEUE_ERROR_EN)
+#define F_CL3_COR_WRQUEUE_ERROR_EN V_CL3_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL2_COR_WRQUEUE_ERROR_EN 2
+#define V_CL2_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL2_COR_WRQUEUE_ERROR_EN)
+#define F_CL2_COR_WRQUEUE_ERROR_EN V_CL2_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL1_COR_WRQUEUE_ERROR_EN 1
+#define V_CL1_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL1_COR_WRQUEUE_ERROR_EN)
+#define F_CL1_COR_WRQUEUE_ERROR_EN V_CL1_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL0_COR_WRQUEUE_ERROR_EN 0
+#define V_CL0_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL0_COR_WRQUEUE_ERROR_EN)
+#define F_CL0_COR_WRQUEUE_ERROR_EN V_CL0_COR_WRQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS2 0x77b4
+
+#define S_CL14_COR_WRQUEUE_ERROR 14
+#define V_CL14_COR_WRQUEUE_ERROR(x) ((x) << S_CL14_COR_WRQUEUE_ERROR)
+#define F_CL14_COR_WRQUEUE_ERROR V_CL14_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL13_COR_WRQUEUE_ERROR 13
+#define V_CL13_COR_WRQUEUE_ERROR(x) ((x) << S_CL13_COR_WRQUEUE_ERROR)
+#define F_CL13_COR_WRQUEUE_ERROR V_CL13_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL12_COR_WRQUEUE_ERROR 12
+#define V_CL12_COR_WRQUEUE_ERROR(x) ((x) << S_CL12_COR_WRQUEUE_ERROR)
+#define F_CL12_COR_WRQUEUE_ERROR V_CL12_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL11_COR_WRQUEUE_ERROR 11
+#define V_CL11_COR_WRQUEUE_ERROR(x) ((x) << S_CL11_COR_WRQUEUE_ERROR)
+#define F_CL11_COR_WRQUEUE_ERROR V_CL11_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL10_COR_WRQUEUE_ERROR 10
+#define V_CL10_COR_WRQUEUE_ERROR(x) ((x) << S_CL10_COR_WRQUEUE_ERROR)
+#define F_CL10_COR_WRQUEUE_ERROR V_CL10_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL9_COR_WRQUEUE_ERROR 9
+#define V_CL9_COR_WRQUEUE_ERROR(x) ((x) << S_CL9_COR_WRQUEUE_ERROR)
+#define F_CL9_COR_WRQUEUE_ERROR V_CL9_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL8_COR_WRQUEUE_ERROR 8
+#define V_CL8_COR_WRQUEUE_ERROR(x) ((x) << S_CL8_COR_WRQUEUE_ERROR)
+#define F_CL8_COR_WRQUEUE_ERROR V_CL8_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL7_COR_WRQUEUE_ERROR 7
+#define V_CL7_COR_WRQUEUE_ERROR(x) ((x) << S_CL7_COR_WRQUEUE_ERROR)
+#define F_CL7_COR_WRQUEUE_ERROR V_CL7_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL6_COR_WRQUEUE_ERROR 6
+#define V_CL6_COR_WRQUEUE_ERROR(x) ((x) << S_CL6_COR_WRQUEUE_ERROR)
+#define F_CL6_COR_WRQUEUE_ERROR V_CL6_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL5_COR_WRQUEUE_ERROR 5
+#define V_CL5_COR_WRQUEUE_ERROR(x) ((x) << S_CL5_COR_WRQUEUE_ERROR)
+#define F_CL5_COR_WRQUEUE_ERROR V_CL5_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL4_COR_WRQUEUE_ERROR 4
+#define V_CL4_COR_WRQUEUE_ERROR(x) ((x) << S_CL4_COR_WRQUEUE_ERROR)
+#define F_CL4_COR_WRQUEUE_ERROR V_CL4_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL3_COR_WRQUEUE_ERROR 3
+#define V_CL3_COR_WRQUEUE_ERROR(x) ((x) << S_CL3_COR_WRQUEUE_ERROR)
+#define F_CL3_COR_WRQUEUE_ERROR V_CL3_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL2_COR_WRQUEUE_ERROR 2
+#define V_CL2_COR_WRQUEUE_ERROR(x) ((x) << S_CL2_COR_WRQUEUE_ERROR)
+#define F_CL2_COR_WRQUEUE_ERROR V_CL2_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL1_COR_WRQUEUE_ERROR 1
+#define V_CL1_COR_WRQUEUE_ERROR(x) ((x) << S_CL1_COR_WRQUEUE_ERROR)
+#define F_CL1_COR_WRQUEUE_ERROR V_CL1_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL0_COR_WRQUEUE_ERROR 0
+#define V_CL0_COR_WRQUEUE_ERROR(x) ((x) << S_CL0_COR_WRQUEUE_ERROR)
+#define F_CL0_COR_WRQUEUE_ERROR V_CL0_COR_WRQUEUE_ERROR(1U)
+
+#define A_MA_COR_ERROR_ENABLE3 0x77b8
+
+#define S_CL14_COR_RDQUEUE_ERROR_EN 14
+#define V_CL14_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL14_COR_RDQUEUE_ERROR_EN)
+#define F_CL14_COR_RDQUEUE_ERROR_EN V_CL14_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL13_COR_RDQUEUE_ERROR_EN 13
+#define V_CL13_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL13_COR_RDQUEUE_ERROR_EN)
+#define F_CL13_COR_RDQUEUE_ERROR_EN V_CL13_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL12_COR_RDQUEUE_ERROR_EN 12
+#define V_CL12_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL12_COR_RDQUEUE_ERROR_EN)
+#define F_CL12_COR_RDQUEUE_ERROR_EN V_CL12_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL11_COR_RDQUEUE_ERROR_EN 11
+#define V_CL11_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL11_COR_RDQUEUE_ERROR_EN)
+#define F_CL11_COR_RDQUEUE_ERROR_EN V_CL11_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL10_COR_RDQUEUE_ERROR_EN 10
+#define V_CL10_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL10_COR_RDQUEUE_ERROR_EN)
+#define F_CL10_COR_RDQUEUE_ERROR_EN V_CL10_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL9_COR_RDQUEUE_ERROR_EN 9
+#define V_CL9_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL9_COR_RDQUEUE_ERROR_EN)
+#define F_CL9_COR_RDQUEUE_ERROR_EN V_CL9_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL8_COR_RDQUEUE_ERROR_EN 8
+#define V_CL8_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL8_COR_RDQUEUE_ERROR_EN)
+#define F_CL8_COR_RDQUEUE_ERROR_EN V_CL8_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL7_COR_RDQUEUE_ERROR_EN 7
+#define V_CL7_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL7_COR_RDQUEUE_ERROR_EN)
+#define F_CL7_COR_RDQUEUE_ERROR_EN V_CL7_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL6_COR_RDQUEUE_ERROR_EN 6
+#define V_CL6_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL6_COR_RDQUEUE_ERROR_EN)
+#define F_CL6_COR_RDQUEUE_ERROR_EN V_CL6_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL5_COR_RDQUEUE_ERROR_EN 5
+#define V_CL5_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL5_COR_RDQUEUE_ERROR_EN)
+#define F_CL5_COR_RDQUEUE_ERROR_EN V_CL5_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL4_COR_RDQUEUE_ERROR_EN 4
+#define V_CL4_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL4_COR_RDQUEUE_ERROR_EN)
+#define F_CL4_COR_RDQUEUE_ERROR_EN V_CL4_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL3_COR_RDQUEUE_ERROR_EN 3
+#define V_CL3_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL3_COR_RDQUEUE_ERROR_EN)
+#define F_CL3_COR_RDQUEUE_ERROR_EN V_CL3_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL2_COR_RDQUEUE_ERROR_EN 2
+#define V_CL2_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL2_COR_RDQUEUE_ERROR_EN)
+#define F_CL2_COR_RDQUEUE_ERROR_EN V_CL2_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL1_COR_RDQUEUE_ERROR_EN 1
+#define V_CL1_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL1_COR_RDQUEUE_ERROR_EN)
+#define F_CL1_COR_RDQUEUE_ERROR_EN V_CL1_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL0_COR_RDQUEUE_ERROR_EN 0
+#define V_CL0_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL0_COR_RDQUEUE_ERROR_EN)
+#define F_CL0_COR_RDQUEUE_ERROR_EN V_CL0_COR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS3 0x77bc
+
+#define S_CL14_COR_RDQUEUE_ERROR 14
+#define V_CL14_COR_RDQUEUE_ERROR(x) ((x) << S_CL14_COR_RDQUEUE_ERROR)
+#define F_CL14_COR_RDQUEUE_ERROR V_CL14_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL13_COR_RDQUEUE_ERROR 13
+#define V_CL13_COR_RDQUEUE_ERROR(x) ((x) << S_CL13_COR_RDQUEUE_ERROR)
+#define F_CL13_COR_RDQUEUE_ERROR V_CL13_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL12_COR_RDQUEUE_ERROR 12
+#define V_CL12_COR_RDQUEUE_ERROR(x) ((x) << S_CL12_COR_RDQUEUE_ERROR)
+#define F_CL12_COR_RDQUEUE_ERROR V_CL12_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL11_COR_RDQUEUE_ERROR 11
+#define V_CL11_COR_RDQUEUE_ERROR(x) ((x) << S_CL11_COR_RDQUEUE_ERROR)
+#define F_CL11_COR_RDQUEUE_ERROR V_CL11_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL10_COR_RDQUEUE_ERROR 10
+#define V_CL10_COR_RDQUEUE_ERROR(x) ((x) << S_CL10_COR_RDQUEUE_ERROR)
+#define F_CL10_COR_RDQUEUE_ERROR V_CL10_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL9_COR_RDQUEUE_ERROR 9
+#define V_CL9_COR_RDQUEUE_ERROR(x) ((x) << S_CL9_COR_RDQUEUE_ERROR)
+#define F_CL9_COR_RDQUEUE_ERROR V_CL9_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL8_COR_RDQUEUE_ERROR 8
+#define V_CL8_COR_RDQUEUE_ERROR(x) ((x) << S_CL8_COR_RDQUEUE_ERROR)
+#define F_CL8_COR_RDQUEUE_ERROR V_CL8_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL7_COR_RDQUEUE_ERROR 7
+#define V_CL7_COR_RDQUEUE_ERROR(x) ((x) << S_CL7_COR_RDQUEUE_ERROR)
+#define F_CL7_COR_RDQUEUE_ERROR V_CL7_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL6_COR_RDQUEUE_ERROR 6
+#define V_CL6_COR_RDQUEUE_ERROR(x) ((x) << S_CL6_COR_RDQUEUE_ERROR)
+#define F_CL6_COR_RDQUEUE_ERROR V_CL6_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL5_COR_RDQUEUE_ERROR 5
+#define V_CL5_COR_RDQUEUE_ERROR(x) ((x) << S_CL5_COR_RDQUEUE_ERROR)
+#define F_CL5_COR_RDQUEUE_ERROR V_CL5_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL4_COR_RDQUEUE_ERROR 4
+#define V_CL4_COR_RDQUEUE_ERROR(x) ((x) << S_CL4_COR_RDQUEUE_ERROR)
+#define F_CL4_COR_RDQUEUE_ERROR V_CL4_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL3_COR_RDQUEUE_ERROR 3
+#define V_CL3_COR_RDQUEUE_ERROR(x) ((x) << S_CL3_COR_RDQUEUE_ERROR)
+#define F_CL3_COR_RDQUEUE_ERROR V_CL3_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL2_COR_RDQUEUE_ERROR 2
+#define V_CL2_COR_RDQUEUE_ERROR(x) ((x) << S_CL2_COR_RDQUEUE_ERROR)
+#define F_CL2_COR_RDQUEUE_ERROR V_CL2_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL1_COR_RDQUEUE_ERROR 1
+#define V_CL1_COR_RDQUEUE_ERROR(x) ((x) << S_CL1_COR_RDQUEUE_ERROR)
+#define F_CL1_COR_RDQUEUE_ERROR V_CL1_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL0_COR_RDQUEUE_ERROR 0
+#define V_CL0_COR_RDQUEUE_ERROR(x) ((x) << S_CL0_COR_RDQUEUE_ERROR)
+#define F_CL0_COR_RDQUEUE_ERROR V_CL0_COR_RDQUEUE_ERROR(1U)
+
#define A_MA_EDRAM0_BAR 0x77c0
#define S_EDRAM0_BASE 16
@@ -16119,6 +20575,16 @@
#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+#define S_T7_EDRAM0_BASE 16
+#define M_T7_EDRAM0_BASE 0xffffU
+#define V_T7_EDRAM0_BASE(x) ((x) << S_T7_EDRAM0_BASE)
+#define G_T7_EDRAM0_BASE(x) (((x) >> S_T7_EDRAM0_BASE) & M_T7_EDRAM0_BASE)
+
+#define S_T7_EDRAM0_SIZE 0
+#define M_T7_EDRAM0_SIZE 0xffffU
+#define V_T7_EDRAM0_SIZE(x) ((x) << S_T7_EDRAM0_SIZE)
+#define G_T7_EDRAM0_SIZE(x) (((x) >> S_T7_EDRAM0_SIZE) & M_T7_EDRAM0_SIZE)
+
#define A_MA_EDRAM1_BAR 0x77c4
#define S_EDRAM1_BASE 16
@@ -16131,6 +20597,16 @@
#define V_EDRAM1_SIZE(x) ((x) << S_EDRAM1_SIZE)
#define G_EDRAM1_SIZE(x) (((x) >> S_EDRAM1_SIZE) & M_EDRAM1_SIZE)
+#define S_T7_EDRAM1_BASE 16
+#define M_T7_EDRAM1_BASE 0xffffU
+#define V_T7_EDRAM1_BASE(x) ((x) << S_T7_EDRAM1_BASE)
+#define G_T7_EDRAM1_BASE(x) (((x) >> S_T7_EDRAM1_BASE) & M_T7_EDRAM1_BASE)
+
+#define S_T7_EDRAM1_SIZE 0
+#define M_T7_EDRAM1_SIZE 0xffffU
+#define V_T7_EDRAM1_SIZE(x) ((x) << S_T7_EDRAM1_SIZE)
+#define G_T7_EDRAM1_SIZE(x) (((x) >> S_T7_EDRAM1_SIZE) & M_T7_EDRAM1_SIZE)
+
#define A_MA_EXT_MEMORY_BAR 0x77c8
#define S_EXT_MEM_BASE 16
@@ -16155,6 +20631,16 @@
#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+#define S_T7_EXT_MEM0_BASE 16
+#define M_T7_EXT_MEM0_BASE 0xffffU
+#define V_T7_EXT_MEM0_BASE(x) ((x) << S_T7_EXT_MEM0_BASE)
+#define G_T7_EXT_MEM0_BASE(x) (((x) >> S_T7_EXT_MEM0_BASE) & M_T7_EXT_MEM0_BASE)
+
+#define S_T7_EXT_MEM0_SIZE 0
+#define M_T7_EXT_MEM0_SIZE 0xffffU
+#define V_T7_EXT_MEM0_SIZE(x) ((x) << S_T7_EXT_MEM0_SIZE)
+#define G_T7_EXT_MEM0_SIZE(x) (((x) >> S_T7_EXT_MEM0_SIZE) & M_T7_EXT_MEM0_SIZE)
+
#define A_MA_HOST_MEMORY_BAR 0x77cc
#define S_HMA_BASE 16
@@ -16167,6 +20653,16 @@
#define V_HMA_SIZE(x) ((x) << S_HMA_SIZE)
#define G_HMA_SIZE(x) (((x) >> S_HMA_SIZE) & M_HMA_SIZE)
+#define S_HMATARGETBASE 16
+#define M_HMATARGETBASE 0xffffU
+#define V_HMATARGETBASE(x) ((x) << S_HMATARGETBASE)
+#define G_HMATARGETBASE(x) (((x) >> S_HMATARGETBASE) & M_HMATARGETBASE)
+
+#define S_T7_HMA_SIZE 0
+#define M_T7_HMA_SIZE 0xffffU
+#define V_T7_HMA_SIZE(x) ((x) << S_T7_HMA_SIZE)
+#define G_T7_HMA_SIZE(x) (((x) >> S_T7_HMA_SIZE) & M_T7_HMA_SIZE)
+
#define A_MA_EXT_MEM_PAGE_SIZE 0x77d0
#define S_BRC_MODE 2
@@ -16290,6 +20786,14 @@
#define V_MC_SPLIT(x) ((x) << S_MC_SPLIT)
#define F_MC_SPLIT V_MC_SPLIT(1U)
+#define S_EDC512 8
+#define V_EDC512(x) ((x) << S_EDC512)
+#define F_EDC512 V_EDC512(1U)
+
+#define S_MC_SPLIT_BOUNDARY 7
+#define V_MC_SPLIT_BOUNDARY(x) ((x) << S_MC_SPLIT_BOUNDARY)
+#define F_MC_SPLIT_BOUNDARY V_MC_SPLIT_BOUNDARY(1U)
+
#define A_MA_INT_ENABLE 0x77dc
#define S_MEM_PERR_INT_ENABLE 1
@@ -16475,6 +20979,55 @@
#define F_CL0_PAR_RDQUEUE_ERROR_EN V_CL0_PAR_RDQUEUE_ERROR_EN(1U)
#define A_MA_PARITY_ERROR_ENABLE1 0x77f0
+
+#define S_T7_ARB4_PAR_WRQUEUE_ERROR_EN 11
+#define V_T7_ARB4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB4_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB4_PAR_WRQUEUE_ERROR_EN V_T7_ARB4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB3_PAR_WRQUEUE_ERROR_EN 10
+#define V_T7_ARB3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB3_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB3_PAR_WRQUEUE_ERROR_EN V_T7_ARB3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB2_PAR_WRQUEUE_ERROR_EN 9
+#define V_T7_ARB2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB2_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB2_PAR_WRQUEUE_ERROR_EN V_T7_ARB2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB1_PAR_WRQUEUE_ERROR_EN 8
+#define V_T7_ARB1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB1_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB1_PAR_WRQUEUE_ERROR_EN V_T7_ARB1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB0_PAR_WRQUEUE_ERROR_EN 7
+#define V_T7_ARB0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB0_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB0_PAR_WRQUEUE_ERROR_EN V_T7_ARB0_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB4_PAR_RDQUEUE_ERROR_EN 6
+#define V_T7_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB4_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB4_PAR_RDQUEUE_ERROR_EN V_T7_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB3_PAR_RDQUEUE_ERROR_EN 5
+#define V_T7_ARB3_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB3_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB3_PAR_RDQUEUE_ERROR_EN V_T7_ARB3_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB2_PAR_RDQUEUE_ERROR_EN 4
+#define V_T7_ARB2_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB2_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB2_PAR_RDQUEUE_ERROR_EN V_T7_ARB2_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB1_PAR_RDQUEUE_ERROR_EN 3
+#define V_T7_ARB1_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB1_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB1_PAR_RDQUEUE_ERROR_EN V_T7_ARB1_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB0_PAR_RDQUEUE_ERROR_EN 2
+#define V_T7_ARB0_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB0_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB0_PAR_RDQUEUE_ERROR_EN V_T7_ARB0_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_TP_DMARBT_PAR_ERROR_EN 1
+#define V_T7_TP_DMARBT_PAR_ERROR_EN(x) ((x) << S_T7_TP_DMARBT_PAR_ERROR_EN)
+#define F_T7_TP_DMARBT_PAR_ERROR_EN V_T7_TP_DMARBT_PAR_ERROR_EN(1U)
+
+#define S_T7_LOGIC_FIFO_PAR_ERROR_EN 0
+#define V_T7_LOGIC_FIFO_PAR_ERROR_EN(x) ((x) << S_T7_LOGIC_FIFO_PAR_ERROR_EN)
+#define F_T7_LOGIC_FIFO_PAR_ERROR_EN V_T7_LOGIC_FIFO_PAR_ERROR_EN(1U)
+
#define A_MA_PARITY_ERROR_STATUS 0x77f4
#define S_TP_DMARBT_PAR_ERROR 31
@@ -16606,6 +21159,55 @@
#define F_CL0_PAR_RDQUEUE_ERROR V_CL0_PAR_RDQUEUE_ERROR(1U)
#define A_MA_PARITY_ERROR_STATUS1 0x77f4
+
+#define S_T7_ARB4_PAR_WRQUEUE_ERROR 11
+#define V_T7_ARB4_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB4_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB4_PAR_WRQUEUE_ERROR V_T7_ARB4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB3_PAR_WRQUEUE_ERROR 10
+#define V_T7_ARB3_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB3_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB3_PAR_WRQUEUE_ERROR V_T7_ARB3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB2_PAR_WRQUEUE_ERROR 9
+#define V_T7_ARB2_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB2_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB2_PAR_WRQUEUE_ERROR V_T7_ARB2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB1_PAR_WRQUEUE_ERROR 8
+#define V_T7_ARB1_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB1_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB1_PAR_WRQUEUE_ERROR V_T7_ARB1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB0_PAR_WRQUEUE_ERROR 7
+#define V_T7_ARB0_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB0_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB0_PAR_WRQUEUE_ERROR V_T7_ARB0_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB4_PAR_RDQUEUE_ERROR 6
+#define V_T7_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB4_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB4_PAR_RDQUEUE_ERROR V_T7_ARB4_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB3_PAR_RDQUEUE_ERROR 5
+#define V_T7_ARB3_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB3_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB3_PAR_RDQUEUE_ERROR V_T7_ARB3_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB2_PAR_RDQUEUE_ERROR 4
+#define V_T7_ARB2_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB2_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB2_PAR_RDQUEUE_ERROR V_T7_ARB2_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB1_PAR_RDQUEUE_ERROR 3
+#define V_T7_ARB1_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB1_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB1_PAR_RDQUEUE_ERROR V_T7_ARB1_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB0_PAR_RDQUEUE_ERROR 2
+#define V_T7_ARB0_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB0_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB0_PAR_RDQUEUE_ERROR V_T7_ARB0_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_TP_DMARBT_PAR_ERROR 1
+#define V_T7_TP_DMARBT_PAR_ERROR(x) ((x) << S_T7_TP_DMARBT_PAR_ERROR)
+#define F_T7_TP_DMARBT_PAR_ERROR V_T7_TP_DMARBT_PAR_ERROR(1U)
+
+#define S_T7_LOGIC_FIFO_PAR_ERROR 0
+#define V_T7_LOGIC_FIFO_PAR_ERROR(x) ((x) << S_T7_LOGIC_FIFO_PAR_ERROR)
+#define F_T7_LOGIC_FIFO_PAR_ERROR V_T7_LOGIC_FIFO_PAR_ERROR(1U)
+
#define A_MA_SGE_PCIE_COHERANCY_CTRL 0x77f8
#define S_BONUS_REG 6
@@ -16653,6 +21255,66 @@
#define V_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR_EN)
#define F_ARB4_PAR_RDQUEUE_ERROR_EN V_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+#define S_CL14_PAR_WRQUEUE_ERROR_EN 14
+#define V_CL14_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL14_PAR_WRQUEUE_ERROR_EN)
+#define F_CL14_PAR_WRQUEUE_ERROR_EN V_CL14_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL13_PAR_WRQUEUE_ERROR_EN 13
+#define V_CL13_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL13_PAR_WRQUEUE_ERROR_EN)
+#define F_CL13_PAR_WRQUEUE_ERROR_EN V_CL13_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL12_PAR_WRQUEUE_ERROR_EN 12
+#define V_CL12_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL12_PAR_WRQUEUE_ERROR_EN)
+#define F_CL12_PAR_WRQUEUE_ERROR_EN V_CL12_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL11_PAR_WRQUEUE_ERROR_EN 11
+#define V_CL11_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL11_PAR_WRQUEUE_ERROR_EN)
+#define F_CL11_PAR_WRQUEUE_ERROR_EN V_CL11_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL10_PAR_WRQUEUE_ERROR_EN 10
+#define V_T7_CL10_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL10_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL10_PAR_WRQUEUE_ERROR_EN V_T7_CL10_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL9_PAR_WRQUEUE_ERROR_EN 9
+#define V_T7_CL9_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL9_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL9_PAR_WRQUEUE_ERROR_EN V_T7_CL9_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL8_PAR_WRQUEUE_ERROR_EN 8
+#define V_T7_CL8_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL8_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL8_PAR_WRQUEUE_ERROR_EN V_T7_CL8_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL7_PAR_WRQUEUE_ERROR_EN 7
+#define V_T7_CL7_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL7_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL7_PAR_WRQUEUE_ERROR_EN V_T7_CL7_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL6_PAR_WRQUEUE_ERROR_EN 6
+#define V_T7_CL6_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL6_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL6_PAR_WRQUEUE_ERROR_EN V_T7_CL6_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL5_PAR_WRQUEUE_ERROR_EN 5
+#define V_T7_CL5_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL5_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL5_PAR_WRQUEUE_ERROR_EN V_T7_CL5_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL4_PAR_WRQUEUE_ERROR_EN 4
+#define V_T7_CL4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL4_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL4_PAR_WRQUEUE_ERROR_EN V_T7_CL4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL3_PAR_WRQUEUE_ERROR_EN 3
+#define V_T7_CL3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL3_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL3_PAR_WRQUEUE_ERROR_EN V_T7_CL3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL2_PAR_WRQUEUE_ERROR_EN 2
+#define V_T7_CL2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL2_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL2_PAR_WRQUEUE_ERROR_EN V_T7_CL2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL1_PAR_WRQUEUE_ERROR_EN 1
+#define V_T7_CL1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL1_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL1_PAR_WRQUEUE_ERROR_EN V_T7_CL1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL0_PAR_WRQUEUE_ERROR_EN 0
+#define V_T7_CL0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL0_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL0_PAR_WRQUEUE_ERROR_EN V_T7_CL0_PAR_WRQUEUE_ERROR_EN(1U)
+
#define A_MA_PARITY_ERROR_STATUS2 0x7804
#define S_ARB4_PAR_WRQUEUE_ERROR 1
@@ -16663,6 +21325,66 @@
#define V_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR)
#define F_ARB4_PAR_RDQUEUE_ERROR V_ARB4_PAR_RDQUEUE_ERROR(1U)
+#define S_CL14_PAR_WRQUEUE_ERROR 14
+#define V_CL14_PAR_WRQUEUE_ERROR(x) ((x) << S_CL14_PAR_WRQUEUE_ERROR)
+#define F_CL14_PAR_WRQUEUE_ERROR V_CL14_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL13_PAR_WRQUEUE_ERROR 13
+#define V_CL13_PAR_WRQUEUE_ERROR(x) ((x) << S_CL13_PAR_WRQUEUE_ERROR)
+#define F_CL13_PAR_WRQUEUE_ERROR V_CL13_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL12_PAR_WRQUEUE_ERROR 12
+#define V_CL12_PAR_WRQUEUE_ERROR(x) ((x) << S_CL12_PAR_WRQUEUE_ERROR)
+#define F_CL12_PAR_WRQUEUE_ERROR V_CL12_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL11_PAR_WRQUEUE_ERROR 11
+#define V_CL11_PAR_WRQUEUE_ERROR(x) ((x) << S_CL11_PAR_WRQUEUE_ERROR)
+#define F_CL11_PAR_WRQUEUE_ERROR V_CL11_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL10_PAR_WRQUEUE_ERROR 10
+#define V_T7_CL10_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL10_PAR_WRQUEUE_ERROR)
+#define F_T7_CL10_PAR_WRQUEUE_ERROR V_T7_CL10_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL9_PAR_WRQUEUE_ERROR 9
+#define V_T7_CL9_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL9_PAR_WRQUEUE_ERROR)
+#define F_T7_CL9_PAR_WRQUEUE_ERROR V_T7_CL9_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL8_PAR_WRQUEUE_ERROR 8
+#define V_T7_CL8_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL8_PAR_WRQUEUE_ERROR)
+#define F_T7_CL8_PAR_WRQUEUE_ERROR V_T7_CL8_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL7_PAR_WRQUEUE_ERROR 7
+#define V_T7_CL7_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL7_PAR_WRQUEUE_ERROR)
+#define F_T7_CL7_PAR_WRQUEUE_ERROR V_T7_CL7_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL6_PAR_WRQUEUE_ERROR 6
+#define V_T7_CL6_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL6_PAR_WRQUEUE_ERROR)
+#define F_T7_CL6_PAR_WRQUEUE_ERROR V_T7_CL6_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL5_PAR_WRQUEUE_ERROR 5
+#define V_T7_CL5_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL5_PAR_WRQUEUE_ERROR)
+#define F_T7_CL5_PAR_WRQUEUE_ERROR V_T7_CL5_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL4_PAR_WRQUEUE_ERROR 4
+#define V_T7_CL4_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL4_PAR_WRQUEUE_ERROR)
+#define F_T7_CL4_PAR_WRQUEUE_ERROR V_T7_CL4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL3_PAR_WRQUEUE_ERROR 3
+#define V_T7_CL3_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL3_PAR_WRQUEUE_ERROR)
+#define F_T7_CL3_PAR_WRQUEUE_ERROR V_T7_CL3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL2_PAR_WRQUEUE_ERROR 2
+#define V_T7_CL2_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL2_PAR_WRQUEUE_ERROR)
+#define F_T7_CL2_PAR_WRQUEUE_ERROR V_T7_CL2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL1_PAR_WRQUEUE_ERROR 1
+#define V_T7_CL1_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL1_PAR_WRQUEUE_ERROR)
+#define F_T7_CL1_PAR_WRQUEUE_ERROR V_T7_CL1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL0_PAR_WRQUEUE_ERROR 0
+#define V_T7_CL0_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL0_PAR_WRQUEUE_ERROR)
+#define F_T7_CL0_PAR_WRQUEUE_ERROR V_T7_CL0_PAR_WRQUEUE_ERROR(1U)
+
#define A_MA_EXT_MEMORY1_BAR 0x7808
#define S_EXT_MEM1_BASE 16
@@ -16675,6 +21397,16 @@
#define V_EXT_MEM1_SIZE(x) ((x) << S_EXT_MEM1_SIZE)
#define G_EXT_MEM1_SIZE(x) (((x) >> S_EXT_MEM1_SIZE) & M_EXT_MEM1_SIZE)
+#define S_T7_EXT_MEM1_BASE 16
+#define M_T7_EXT_MEM1_BASE 0xffffU
+#define V_T7_EXT_MEM1_BASE(x) ((x) << S_T7_EXT_MEM1_BASE)
+#define G_T7_EXT_MEM1_BASE(x) (((x) >> S_T7_EXT_MEM1_BASE) & M_T7_EXT_MEM1_BASE)
+
+#define S_T7_EXT_MEM1_SIZE 0
+#define M_T7_EXT_MEM1_SIZE 0xffffU
+#define V_T7_EXT_MEM1_SIZE(x) ((x) << S_T7_EXT_MEM1_SIZE)
+#define G_T7_EXT_MEM1_SIZE(x) (((x) >> S_T7_EXT_MEM1_SIZE) & M_T7_EXT_MEM1_SIZE)
+
#define A_MA_PMTX_THROTTLE 0x780c
#define S_FL_ENABLE 31
@@ -16696,6 +21428,7 @@
#define A_MA_TP_TH1_WRDATA_CNT 0x782c
#define A_MA_LE_WRDATA_CNT 0x7830
#define A_MA_CIM_WRDATA_CNT 0x7834
+#define A_MA_CIM_TH0_WRDATA_CNT 0x7834
#define A_MA_PCIE_WRDATA_CNT 0x7838
#define A_MA_PMTX_WRDATA_CNT 0x783c
#define A_MA_PMRX_WRDATA_CNT 0x7840
@@ -16709,6 +21442,7 @@
#define A_MA_TP_TH1_RDDATA_CNT 0x7860
#define A_MA_LE_RDDATA_CNT 0x7864
#define A_MA_CIM_RDDATA_CNT 0x7868
+#define A_MA_CIM_TH0_RDDATA_CNT 0x7868
#define A_MA_PCIE_RDDATA_CNT 0x786c
#define A_MA_PMTX_RDDATA_CNT 0x7870
#define A_MA_PMRX_RDDATA_CNT 0x7874
@@ -16733,7 +21467,43 @@
#define F_DDR_MODE V_DDR_MODE(1U)
#define A_MA_EDRAM1_WRDATA_CNT1 0x7884
+#define A_MA_PARITY_ERROR_ENABLE3 0x7884
+
+#define S_CL14_PAR_RDQUEUE_ERROR_EN 14
+#define V_CL14_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL14_PAR_RDQUEUE_ERROR_EN)
+#define F_CL14_PAR_RDQUEUE_ERROR_EN V_CL14_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL13_PAR_RDQUEUE_ERROR_EN 13
+#define V_CL13_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL13_PAR_RDQUEUE_ERROR_EN)
+#define F_CL13_PAR_RDQUEUE_ERROR_EN V_CL13_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL12_PAR_RDQUEUE_ERROR_EN 12
+#define V_CL12_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL12_PAR_RDQUEUE_ERROR_EN)
+#define F_CL12_PAR_RDQUEUE_ERROR_EN V_CL12_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL11_PAR_RDQUEUE_ERROR_EN 11
+#define V_CL11_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL11_PAR_RDQUEUE_ERROR_EN)
+#define F_CL11_PAR_RDQUEUE_ERROR_EN V_CL11_PAR_RDQUEUE_ERROR_EN(1U)
+
#define A_MA_EDRAM1_WRDATA_CNT0 0x7888
+#define A_MA_PARITY_ERROR_STATUS3 0x7888
+
+#define S_CL14_PAR_RDQUEUE_ERROR 14
+#define V_CL14_PAR_RDQUEUE_ERROR(x) ((x) << S_CL14_PAR_RDQUEUE_ERROR)
+#define F_CL14_PAR_RDQUEUE_ERROR V_CL14_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL13_PAR_RDQUEUE_ERROR 13
+#define V_CL13_PAR_RDQUEUE_ERROR(x) ((x) << S_CL13_PAR_RDQUEUE_ERROR)
+#define F_CL13_PAR_RDQUEUE_ERROR V_CL13_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL12_PAR_RDQUEUE_ERROR 12
+#define V_CL12_PAR_RDQUEUE_ERROR(x) ((x) << S_CL12_PAR_RDQUEUE_ERROR)
+#define F_CL12_PAR_RDQUEUE_ERROR V_CL12_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL11_PAR_RDQUEUE_ERROR 11
+#define V_CL11_PAR_RDQUEUE_ERROR(x) ((x) << S_CL11_PAR_RDQUEUE_ERROR)
+#define F_CL11_PAR_RDQUEUE_ERROR V_CL11_PAR_RDQUEUE_ERROR(1U)
+
#define A_MA_EXT_MEMORY0_WRDATA_CNT1 0x788c
#define A_MA_EXT_MEMORY0_WRDATA_CNT0 0x7890
#define A_MA_HOST_MEMORY_WRDATA_CNT1 0x7894
@@ -16915,6 +21685,30 @@
#define V_FUTURE_DEXPANSION_WTE(x) ((x) << S_FUTURE_DEXPANSION_WTE)
#define G_FUTURE_DEXPANSION_WTE(x) (((x) >> S_FUTURE_DEXPANSION_WTE) & M_FUTURE_DEXPANSION_WTE)
+#define S_T7_FUTURE_CEXPANSION_WTE 31
+#define V_T7_FUTURE_CEXPANSION_WTE(x) ((x) << S_T7_FUTURE_CEXPANSION_WTE)
+#define F_T7_FUTURE_CEXPANSION_WTE V_T7_FUTURE_CEXPANSION_WTE(1U)
+
+#define S_CL14_WR_CMD_TO_EN 30
+#define V_CL14_WR_CMD_TO_EN(x) ((x) << S_CL14_WR_CMD_TO_EN)
+#define F_CL14_WR_CMD_TO_EN V_CL14_WR_CMD_TO_EN(1U)
+
+#define S_CL13_WR_CMD_TO_EN 29
+#define V_CL13_WR_CMD_TO_EN(x) ((x) << S_CL13_WR_CMD_TO_EN)
+#define F_CL13_WR_CMD_TO_EN V_CL13_WR_CMD_TO_EN(1U)
+
+#define S_T7_FUTURE_DEXPANSION_WTE 15
+#define V_T7_FUTURE_DEXPANSION_WTE(x) ((x) << S_T7_FUTURE_DEXPANSION_WTE)
+#define F_T7_FUTURE_DEXPANSION_WTE V_T7_FUTURE_DEXPANSION_WTE(1U)
+
+#define S_CL14_WR_DATA_TO_EN 14
+#define V_CL14_WR_DATA_TO_EN(x) ((x) << S_CL14_WR_DATA_TO_EN)
+#define F_CL14_WR_DATA_TO_EN V_CL14_WR_DATA_TO_EN(1U)
+
+#define S_CL13_WR_DATA_TO_EN 13
+#define V_CL13_WR_DATA_TO_EN(x) ((x) << S_CL13_WR_DATA_TO_EN)
+#define F_CL13_WR_DATA_TO_EN V_CL13_WR_DATA_TO_EN(1U)
+
#define A_MA_WRITE_TIMEOUT_ERROR_STATUS 0x78d8
#define S_CL12_WR_CMD_TO_ERROR 28
@@ -17031,6 +21825,30 @@
#define V_FUTURE_DEXPANSION_WTS(x) ((x) << S_FUTURE_DEXPANSION_WTS)
#define G_FUTURE_DEXPANSION_WTS(x) (((x) >> S_FUTURE_DEXPANSION_WTS) & M_FUTURE_DEXPANSION_WTS)
+#define S_T7_FUTURE_CEXPANSION_WTS 31
+#define V_T7_FUTURE_CEXPANSION_WTS(x) ((x) << S_T7_FUTURE_CEXPANSION_WTS)
+#define F_T7_FUTURE_CEXPANSION_WTS V_T7_FUTURE_CEXPANSION_WTS(1U)
+
+#define S_CL14_WR_CMD_TO_ERROR 30
+#define V_CL14_WR_CMD_TO_ERROR(x) ((x) << S_CL14_WR_CMD_TO_ERROR)
+#define F_CL14_WR_CMD_TO_ERROR V_CL14_WR_CMD_TO_ERROR(1U)
+
+#define S_CL13_WR_CMD_TO_ERROR 29
+#define V_CL13_WR_CMD_TO_ERROR(x) ((x) << S_CL13_WR_CMD_TO_ERROR)
+#define F_CL13_WR_CMD_TO_ERROR V_CL13_WR_CMD_TO_ERROR(1U)
+
+#define S_T7_FUTURE_DEXPANSION_WTS 15
+#define V_T7_FUTURE_DEXPANSION_WTS(x) ((x) << S_T7_FUTURE_DEXPANSION_WTS)
+#define F_T7_FUTURE_DEXPANSION_WTS V_T7_FUTURE_DEXPANSION_WTS(1U)
+
+#define S_CL14_WR_DATA_TO_ERROR 14
+#define V_CL14_WR_DATA_TO_ERROR(x) ((x) << S_CL14_WR_DATA_TO_ERROR)
+#define F_CL14_WR_DATA_TO_ERROR V_CL14_WR_DATA_TO_ERROR(1U)
+
+#define S_CL13_WR_DATA_TO_ERROR 13
+#define V_CL13_WR_DATA_TO_ERROR(x) ((x) << S_CL13_WR_DATA_TO_ERROR)
+#define F_CL13_WR_DATA_TO_ERROR V_CL13_WR_DATA_TO_ERROR(1U)
+
#define A_MA_READ_TIMEOUT_ERROR_ENABLE 0x78dc
#define S_CL12_RD_CMD_TO_EN 28
@@ -17147,6 +21965,30 @@
#define V_FUTURE_DEXPANSION_RTE(x) ((x) << S_FUTURE_DEXPANSION_RTE)
#define G_FUTURE_DEXPANSION_RTE(x) (((x) >> S_FUTURE_DEXPANSION_RTE) & M_FUTURE_DEXPANSION_RTE)
+#define S_T7_FUTURE_CEXPANSION_RTE 31
+#define V_T7_FUTURE_CEXPANSION_RTE(x) ((x) << S_T7_FUTURE_CEXPANSION_RTE)
+#define F_T7_FUTURE_CEXPANSION_RTE V_T7_FUTURE_CEXPANSION_RTE(1U)
+
+#define S_CL14_RD_CMD_TO_EN 30
+#define V_CL14_RD_CMD_TO_EN(x) ((x) << S_CL14_RD_CMD_TO_EN)
+#define F_CL14_RD_CMD_TO_EN V_CL14_RD_CMD_TO_EN(1U)
+
+#define S_CL13_RD_CMD_TO_EN 29
+#define V_CL13_RD_CMD_TO_EN(x) ((x) << S_CL13_RD_CMD_TO_EN)
+#define F_CL13_RD_CMD_TO_EN V_CL13_RD_CMD_TO_EN(1U)
+
+#define S_T7_FUTURE_DEXPANSION_RTE 15
+#define V_T7_FUTURE_DEXPANSION_RTE(x) ((x) << S_T7_FUTURE_DEXPANSION_RTE)
+#define F_T7_FUTURE_DEXPANSION_RTE V_T7_FUTURE_DEXPANSION_RTE(1U)
+
+#define S_CL14_RD_DATA_TO_EN 14
+#define V_CL14_RD_DATA_TO_EN(x) ((x) << S_CL14_RD_DATA_TO_EN)
+#define F_CL14_RD_DATA_TO_EN V_CL14_RD_DATA_TO_EN(1U)
+
+#define S_CL13_RD_DATA_TO_EN 13
+#define V_CL13_RD_DATA_TO_EN(x) ((x) << S_CL13_RD_DATA_TO_EN)
+#define F_CL13_RD_DATA_TO_EN V_CL13_RD_DATA_TO_EN(1U)
+
#define A_MA_READ_TIMEOUT_ERROR_STATUS 0x78e0
#define S_CL12_RD_CMD_TO_ERROR 28
@@ -17263,6 +22105,27 @@
#define V_FUTURE_DEXPANSION_RTS(x) ((x) << S_FUTURE_DEXPANSION_RTS)
#define G_FUTURE_DEXPANSION_RTS(x) (((x) >> S_FUTURE_DEXPANSION_RTS) & M_FUTURE_DEXPANSION_RTS)
+#define S_T7_FUTURE_CEXPANSION_RTS 31
+#define V_T7_FUTURE_CEXPANSION_RTS(x) ((x) << S_T7_FUTURE_CEXPANSION_RTS)
+#define F_T7_FUTURE_CEXPANSION_RTS V_T7_FUTURE_CEXPANSION_RTS(1U)
+
+#define S_CL14_RD_CMD_TO_ERROR 30
+#define V_CL14_RD_CMD_TO_ERROR(x) ((x) << S_CL14_RD_CMD_TO_ERROR)
+#define F_CL14_RD_CMD_TO_ERROR V_CL14_RD_CMD_TO_ERROR(1U)
+
+#define S_CL13_RD_CMD_TO_ERROR 29
+#define V_CL13_RD_CMD_TO_ERROR(x) ((x) << S_CL13_RD_CMD_TO_ERROR)
+#define F_CL13_RD_CMD_TO_ERROR V_CL13_RD_CMD_TO_ERROR(1U)
+
+#define S_T7_FUTURE_DEXPANSION_RTS 14
+#define M_T7_FUTURE_DEXPANSION_RTS 0x3U
+#define V_T7_FUTURE_DEXPANSION_RTS(x) ((x) << S_T7_FUTURE_DEXPANSION_RTS)
+#define G_T7_FUTURE_DEXPANSION_RTS(x) (((x) >> S_T7_FUTURE_DEXPANSION_RTS) & M_T7_FUTURE_DEXPANSION_RTS)
+
+#define S_CL13_RD_DATA_TO_ERROR 13
+#define V_CL13_RD_DATA_TO_ERROR(x) ((x) << S_CL13_RD_DATA_TO_ERROR)
+#define F_CL13_RD_DATA_TO_ERROR V_CL13_RD_DATA_TO_ERROR(1U)
+
#define A_MA_BKP_CNT_SEL 0x78e4
#define S_BKP_CNT_TYPE 30
@@ -17361,12 +22224,16 @@
#define V_FUTURE_DEXPANSION_IPE(x) ((x) << S_FUTURE_DEXPANSION_IPE)
#define G_FUTURE_DEXPANSION_IPE(x) (((x) >> S_FUTURE_DEXPANSION_IPE) & M_FUTURE_DEXPANSION_IPE)
-#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
+#define S_T7_FUTURE_DEXPANSION_IPE 14
+#define M_T7_FUTURE_DEXPANSION_IPE 0x3ffffU
+#define V_T7_FUTURE_DEXPANSION_IPE(x) ((x) << S_T7_FUTURE_DEXPANSION_IPE)
+#define G_T7_FUTURE_DEXPANSION_IPE(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPE) & M_T7_FUTURE_DEXPANSION_IPE)
-#define S_T5_FUTURE_DEXPANSION 13
-#define M_T5_FUTURE_DEXPANSION 0x7ffffU
-#define V_T5_FUTURE_DEXPANSION(x) ((x) << S_T5_FUTURE_DEXPANSION)
-#define G_T5_FUTURE_DEXPANSION(x) (((x) >> S_T5_FUTURE_DEXPANSION) & M_T5_FUTURE_DEXPANSION)
+#define S_CL13_IF_PAR_EN 13
+#define V_CL13_IF_PAR_EN(x) ((x) << S_CL13_IF_PAR_EN)
+#define F_CL13_IF_PAR_EN V_CL13_IF_PAR_EN(1U)
+
+#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
#define S_CL12_IF_PAR_ERROR 12
#define V_CL12_IF_PAR_ERROR(x) ((x) << S_CL12_IF_PAR_ERROR)
@@ -17425,6 +22292,15 @@
#define V_FUTURE_DEXPANSION_IPS(x) ((x) << S_FUTURE_DEXPANSION_IPS)
#define G_FUTURE_DEXPANSION_IPS(x) (((x) >> S_FUTURE_DEXPANSION_IPS) & M_FUTURE_DEXPANSION_IPS)
+#define S_T7_FUTURE_DEXPANSION_IPS 14
+#define M_T7_FUTURE_DEXPANSION_IPS 0x3ffffU
+#define V_T7_FUTURE_DEXPANSION_IPS(x) ((x) << S_T7_FUTURE_DEXPANSION_IPS)
+#define G_T7_FUTURE_DEXPANSION_IPS(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPS) & M_T7_FUTURE_DEXPANSION_IPS)
+
+#define S_CL13_IF_PAR_ERROR 13
+#define V_CL13_IF_PAR_ERROR(x) ((x) << S_CL13_IF_PAR_ERROR)
+#define F_CL13_IF_PAR_ERROR V_CL13_IF_PAR_ERROR(1U)
+
#define A_MA_LOCAL_DEBUG_CFG 0x78f8
#define S_DEBUG_OR 15
@@ -17445,6 +22321,131 @@
#define G_DEBUGPAGE(x) (((x) >> S_DEBUGPAGE) & M_DEBUGPAGE)
#define A_MA_LOCAL_DEBUG_RPT 0x78fc
+#define A_MA_CLIENT13_PR_THRESHOLD 0x7900
+#define A_MA_CLIENT13_CR_THRESHOLD 0x7904
+#define A_MA_CRYPTO_DEBUG_CNT 0x7908
+#define A_MA_CRYPTO_WRDATA_CNT 0x790c
+#define A_MA_CRYPTO_RDDATA_CNT 0x7910
+#define A_MA_LOCAL_DEBUG_PERF_CFG 0x7914
+#define A_MA_LOCAL_DEBUG_PERF_RPT 0x7918
+#define A_MA_PCIE_THROTTLE 0x791c
+#define A_MA_CLIENT14_PR_THRESHOLD 0x7920
+#define A_MA_CLIENT14_CR_THRESHOLD 0x7924
+#define A_MA_CIM_TH1_DEBUG_CNT 0x7928
+#define A_MA_CIM_TH1_WRDATA_CNT 0x792c
+#define A_MA_CIM_TH1_RDDATA_CNT 0x7930
+#define A_MA_CIM_THREAD1_MAPPER 0x7934
+
+#define S_CIM_THREAD1_EN 0
+#define M_CIM_THREAD1_EN 0xffU
+#define V_CIM_THREAD1_EN(x) ((x) << S_CIM_THREAD1_EN)
+#define G_CIM_THREAD1_EN(x) (((x) >> S_CIM_THREAD1_EN) & M_CIM_THREAD1_EN)
+
+#define A_MA_PIO_CI_SGE_TH0_BASE 0x7938
+
+#define S_SGE_TH0_BASE 0
+#define M_SGE_TH0_BASE 0xffffU
+#define V_SGE_TH0_BASE(x) ((x) << S_SGE_TH0_BASE)
+#define G_SGE_TH0_BASE(x) (((x) >> S_SGE_TH0_BASE) & M_SGE_TH0_BASE)
+
+#define A_MA_PIO_CI_SGE_TH1_BASE 0x793c
+
+#define S_SGE_TH1_BASE 0
+#define M_SGE_TH1_BASE 0xffffU
+#define V_SGE_TH1_BASE(x) ((x) << S_SGE_TH1_BASE)
+#define G_SGE_TH1_BASE(x) (((x) >> S_SGE_TH1_BASE) & M_SGE_TH1_BASE)
+
+#define A_MA_PIO_CI_ULPTX_BASE 0x7940
+
+#define S_ULPTX_BASE 0
+#define M_ULPTX_BASE 0xffffU
+#define V_ULPTX_BASE(x) ((x) << S_ULPTX_BASE)
+#define G_ULPTX_BASE(x) (((x) >> S_ULPTX_BASE) & M_ULPTX_BASE)
+
+#define A_MA_PIO_CI_ULPRX_BASE 0x7944
+
+#define S_ULPRX_BASE 0
+#define M_ULPRX_BASE 0xffffU
+#define V_ULPRX_BASE(x) ((x) << S_ULPRX_BASE)
+#define G_ULPRX_BASE(x) (((x) >> S_ULPRX_BASE) & M_ULPRX_BASE)
+
+#define A_MA_PIO_CI_ULPTXRX_BASE 0x7948
+
+#define S_ULPTXRX_BASE 0
+#define M_ULPTXRX_BASE 0xffffU
+#define V_ULPTXRX_BASE(x) ((x) << S_ULPTXRX_BASE)
+#define G_ULPTXRX_BASE(x) (((x) >> S_ULPTXRX_BASE) & M_ULPTXRX_BASE)
+
+#define A_MA_PIO_CI_TP_TH0_BASE 0x794c
+
+#define S_TP_TH0_BASE 0
+#define M_TP_TH0_BASE 0xffffU
+#define V_TP_TH0_BASE(x) ((x) << S_TP_TH0_BASE)
+#define G_TP_TH0_BASE(x) (((x) >> S_TP_TH0_BASE) & M_TP_TH0_BASE)
+
+#define A_MA_PIO_CI_TP_TH1_BASE 0x7950
+
+#define S_TP_TH1_BASE 0
+#define M_TP_TH1_BASE 0xffffU
+#define V_TP_TH1_BASE(x) ((x) << S_TP_TH1_BASE)
+#define G_TP_TH1_BASE(x) (((x) >> S_TP_TH1_BASE) & M_TP_TH1_BASE)
+
+#define A_MA_PIO_CI_LE_BASE 0x7954
+
+#define S_LE_BASE 0
+#define M_LE_BASE 0xffffU
+#define V_LE_BASE(x) ((x) << S_LE_BASE)
+#define G_LE_BASE(x) (((x) >> S_LE_BASE) & M_LE_BASE)
+
+#define A_MA_PIO_CI_CIM_TH0_BASE 0x7958
+
+#define S_CIM_TH0_BASE 0
+#define M_CIM_TH0_BASE 0xffffU
+#define V_CIM_TH0_BASE(x) ((x) << S_CIM_TH0_BASE)
+#define G_CIM_TH0_BASE(x) (((x) >> S_CIM_TH0_BASE) & M_CIM_TH0_BASE)
+
+#define A_MA_PIO_CI_PCIE_BASE 0x795c
+
+#define S_PCIE_BASE 0
+#define M_PCIE_BASE 0xffffU
+#define V_PCIE_BASE(x) ((x) << S_PCIE_BASE)
+#define G_PCIE_BASE(x) (((x) >> S_PCIE_BASE) & M_PCIE_BASE)
+
+#define A_MA_PIO_CI_PMTX_BASE 0x7960
+
+#define S_PMTX_BASE 0
+#define M_PMTX_BASE 0xffffU
+#define V_PMTX_BASE(x) ((x) << S_PMTX_BASE)
+#define G_PMTX_BASE(x) (((x) >> S_PMTX_BASE) & M_PMTX_BASE)
+
+#define A_MA_PIO_CI_PMRX_BASE 0x7964
+
+#define S_PMRX_BASE 0
+#define M_PMRX_BASE 0xffffU
+#define V_PMRX_BASE(x) ((x) << S_PMRX_BASE)
+#define G_PMRX_BASE(x) (((x) >> S_PMRX_BASE) & M_PMRX_BASE)
+
+#define A_MA_PIO_CI_HMA_BASE 0x7968
+
+#define S_HMACLIENTBASE 0
+#define M_HMACLIENTBASE 0xffffU
+#define V_HMACLIENTBASE(x) ((x) << S_HMACLIENTBASE)
+#define G_HMACLIENTBASE(x) (((x) >> S_HMACLIENTBASE) & M_HMACLIENTBASE)
+
+#define A_MA_PIO_CI_CRYPTO_BASE 0x796c
+
+#define S_CRYPTO_BASE 0
+#define M_CRYPTO_BASE 0xffffU
+#define V_CRYPTO_BASE(x) ((x) << S_CRYPTO_BASE)
+#define G_CRYPTO_BASE(x) (((x) >> S_CRYPTO_BASE) & M_CRYPTO_BASE)
+
+#define A_MA_PIO_CI_CIM_TH1_BASE 0x7970
+
+#define S_CIM_TH1_BASE 0
+#define M_CIM_TH1_BASE 0xffffU
+#define V_CIM_TH1_BASE(x) ((x) << S_CIM_TH1_BASE)
+#define G_CIM_TH1_BASE(x) (((x) >> S_CIM_TH1_BASE) & M_CIM_TH1_BASE)
+
#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_EXTERNAL 0xa000
#define S_CMDVLD0 31
@@ -20418,6 +25419,124 @@
#define V_FLASHADDRSIZE(x) ((x) << S_FLASHADDRSIZE)
#define G_FLASHADDRSIZE(x) (((x) >> S_FLASHADDRSIZE) & M_FLASHADDRSIZE)
+#define A_T7_CIM_PERR_ENABLE 0x7b08
+
+#define S_T7_MA_CIM_INTFPERR 31
+#define V_T7_MA_CIM_INTFPERR(x) ((x) << S_T7_MA_CIM_INTFPERR)
+#define F_T7_MA_CIM_INTFPERR V_T7_MA_CIM_INTFPERR(1U)
+
+#define S_T7_MBHOSTPARERR 30
+#define V_T7_MBHOSTPARERR(x) ((x) << S_T7_MBHOSTPARERR)
+#define F_T7_MBHOSTPARERR V_T7_MBHOSTPARERR(1U)
+
+#define S_MAARBINVRSPTAG 29
+#define V_MAARBINVRSPTAG(x) ((x) << S_MAARBINVRSPTAG)
+#define F_MAARBINVRSPTAG V_MAARBINVRSPTAG(1U)
+
+#define S_MAARBFIFOPARERR 28
+#define V_MAARBFIFOPARERR(x) ((x) << S_MAARBFIFOPARERR)
+#define F_MAARBFIFOPARERR V_MAARBFIFOPARERR(1U)
+
+#define S_SEMSRAMPARERR 27
+#define V_SEMSRAMPARERR(x) ((x) << S_SEMSRAMPARERR)
+#define F_SEMSRAMPARERR V_SEMSRAMPARERR(1U)
+
+#define S_RSACPARERR 26
+#define V_RSACPARERR(x) ((x) << S_RSACPARERR)
+#define F_RSACPARERR V_RSACPARERR(1U)
+
+#define S_RSADPARERR 25
+#define V_RSADPARERR(x) ((x) << S_RSADPARERR)
+#define F_RSADPARERR V_RSADPARERR(1U)
+
+#define S_T7_PLCIM_MSTRSPDATAPARERR 24
+#define V_T7_PLCIM_MSTRSPDATAPARERR(x) ((x) << S_T7_PLCIM_MSTRSPDATAPARERR)
+#define F_T7_PLCIM_MSTRSPDATAPARERR V_T7_PLCIM_MSTRSPDATAPARERR(1U)
+
+#define S_T7_PCIE2CIMINTFPARERR 23
+#define V_T7_PCIE2CIMINTFPARERR(x) ((x) << S_T7_PCIE2CIMINTFPARERR)
+#define F_T7_PCIE2CIMINTFPARERR V_T7_PCIE2CIMINTFPARERR(1U)
+
+#define S_T7_NCSI2CIMINTFPARERR 22
+#define V_T7_NCSI2CIMINTFPARERR(x) ((x) << S_T7_NCSI2CIMINTFPARERR)
+#define F_T7_NCSI2CIMINTFPARERR V_T7_NCSI2CIMINTFPARERR(1U)
+
+#define S_T7_SGE2CIMINTFPARERR 21
+#define V_T7_SGE2CIMINTFPARERR(x) ((x) << S_T7_SGE2CIMINTFPARERR)
+#define F_T7_SGE2CIMINTFPARERR V_T7_SGE2CIMINTFPARERR(1U)
+
+#define S_T7_ULP2CIMINTFPARERR 20
+#define V_T7_ULP2CIMINTFPARERR(x) ((x) << S_T7_ULP2CIMINTFPARERR)
+#define F_T7_ULP2CIMINTFPARERR V_T7_ULP2CIMINTFPARERR(1U)
+
+#define S_T7_TP2CIMINTFPARERR 19
+#define V_T7_TP2CIMINTFPARERR(x) ((x) << S_T7_TP2CIMINTFPARERR)
+#define F_T7_TP2CIMINTFPARERR V_T7_TP2CIMINTFPARERR(1U)
+
+#define S_CORE7PARERR 18
+#define V_CORE7PARERR(x) ((x) << S_CORE7PARERR)
+#define F_CORE7PARERR V_CORE7PARERR(1U)
+
+#define S_CORE6PARERR 17
+#define V_CORE6PARERR(x) ((x) << S_CORE6PARERR)
+#define F_CORE6PARERR V_CORE6PARERR(1U)
+
+#define S_CORE5PARERR 16
+#define V_CORE5PARERR(x) ((x) << S_CORE5PARERR)
+#define F_CORE5PARERR V_CORE5PARERR(1U)
+
+#define S_CORE4PARERR 15
+#define V_CORE4PARERR(x) ((x) << S_CORE4PARERR)
+#define F_CORE4PARERR V_CORE4PARERR(1U)
+
+#define S_CORE3PARERR 14
+#define V_CORE3PARERR(x) ((x) << S_CORE3PARERR)
+#define F_CORE3PARERR V_CORE3PARERR(1U)
+
+#define S_CORE2PARERR 13
+#define V_CORE2PARERR(x) ((x) << S_CORE2PARERR)
+#define F_CORE2PARERR V_CORE2PARERR(1U)
+
+#define S_CORE1PARERR 12
+#define V_CORE1PARERR(x) ((x) << S_CORE1PARERR)
+#define F_CORE1PARERR V_CORE1PARERR(1U)
+
+#define S_GFTPARERR 10
+#define V_GFTPARERR(x) ((x) << S_GFTPARERR)
+#define F_GFTPARERR V_GFTPARERR(1U)
+
+#define S_MPSRSPDATAPARERR 9
+#define V_MPSRSPDATAPARERR(x) ((x) << S_MPSRSPDATAPARERR)
+#define F_MPSRSPDATAPARERR V_MPSRSPDATAPARERR(1U)
+
+#define S_ER_RSPDATAPARERR 8
+#define V_ER_RSPDATAPARERR(x) ((x) << S_ER_RSPDATAPARERR)
+#define F_ER_RSPDATAPARERR V_ER_RSPDATAPARERR(1U)
+
+#define S_FLOWFIFOPARERR 7
+#define V_FLOWFIFOPARERR(x) ((x) << S_FLOWFIFOPARERR)
+#define F_FLOWFIFOPARERR V_FLOWFIFOPARERR(1U)
+
+#define S_OBQSRAMPARERR 6
+#define V_OBQSRAMPARERR(x) ((x) << S_OBQSRAMPARERR)
+#define F_OBQSRAMPARERR V_OBQSRAMPARERR(1U)
+
+#define S_TIEQOUTPARERR 3
+#define V_TIEQOUTPARERR(x) ((x) << S_TIEQOUTPARERR)
+#define F_TIEQOUTPARERR V_TIEQOUTPARERR(1U)
+
+#define S_TIEQINPARERR 2
+#define V_TIEQINPARERR(x) ((x) << S_TIEQINPARERR)
+#define F_TIEQINPARERR V_TIEQINPARERR(1U)
+
+#define S_PIFRSPPARERR 1
+#define V_PIFRSPPARERR(x) ((x) << S_PIFRSPPARERR)
+#define F_PIFRSPPARERR V_PIFRSPPARERR(1U)
+
+#define S_PIFREQPARERR 0
+#define V_PIFREQPARERR(x) ((x) << S_PIFREQPARERR)
+#define F_PIFREQPARERR V_PIFREQPARERR(1U)
+
#define A_CIM_EEPROM_BASE_ADDR 0x7b0c
#define S_EEPROMBASEADDR 6
@@ -20425,6 +25544,7 @@
#define V_EEPROMBASEADDR(x) ((x) << S_EEPROMBASEADDR)
#define G_EEPROMBASEADDR(x) (((x) >> S_EEPROMBASEADDR) & M_EEPROMBASEADDR)
+#define A_CIM_PERR_CAUSE 0x7b0c
#define A_CIM_EEPROM_ADDR_SIZE 0x7b10
#define S_EEPROMADDRSIZE 4
@@ -20593,6 +25713,38 @@
#define V_IBQPCIEPARERR(x) ((x) << S_IBQPCIEPARERR)
#define F_IBQPCIEPARERR V_IBQPCIEPARERR(1U)
+#define S_CORE7ACCINT 22
+#define V_CORE7ACCINT(x) ((x) << S_CORE7ACCINT)
+#define F_CORE7ACCINT V_CORE7ACCINT(1U)
+
+#define S_CORE6ACCINT 21
+#define V_CORE6ACCINT(x) ((x) << S_CORE6ACCINT)
+#define F_CORE6ACCINT V_CORE6ACCINT(1U)
+
+#define S_CORE5ACCINT 20
+#define V_CORE5ACCINT(x) ((x) << S_CORE5ACCINT)
+#define F_CORE5ACCINT V_CORE5ACCINT(1U)
+
+#define S_CORE4ACCINT 19
+#define V_CORE4ACCINT(x) ((x) << S_CORE4ACCINT)
+#define F_CORE4ACCINT V_CORE4ACCINT(1U)
+
+#define S_CORE3ACCINT 18
+#define V_CORE3ACCINT(x) ((x) << S_CORE3ACCINT)
+#define F_CORE3ACCINT V_CORE3ACCINT(1U)
+
+#define S_CORE2ACCINT 17
+#define V_CORE2ACCINT(x) ((x) << S_CORE2ACCINT)
+#define F_CORE2ACCINT V_CORE2ACCINT(1U)
+
+#define S_CORE1ACCINT 16
+#define V_CORE1ACCINT(x) ((x) << S_CORE1ACCINT)
+#define F_CORE1ACCINT V_CORE1ACCINT(1U)
+
+#define S_PERRNONZERO 1
+#define V_PERRNONZERO(x) ((x) << S_PERRNONZERO)
+#define F_PERRNONZERO V_PERRNONZERO(1U)
+
#define A_CIM_HOST_INT_CAUSE 0x7b2c
#define S_TIEQOUTPARERRINT 20
@@ -20745,6 +25897,10 @@
#define V_RSVDSPACEINTEN(x) ((x) << S_RSVDSPACEINTEN)
#define F_RSVDSPACEINTEN V_RSVDSPACEINTEN(1U)
+#define S_CONWRERRINTEN 31
+#define V_CONWRERRINTEN(x) ((x) << S_CONWRERRINTEN)
+#define F_CONWRERRINTEN V_CONWRERRINTEN(1U)
+
#define A_CIM_HOST_UPACC_INT_CAUSE 0x7b34
#define S_EEPROMWRINT 30
@@ -20871,12 +26027,32 @@
#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+#define S_CONWRERRINT 31
+#define V_CONWRERRINT(x) ((x) << S_CONWRERRINT)
+#define F_CONWRERRINT V_CONWRERRINT(1U)
+
#define A_CIM_UP_INT_ENABLE 0x7b38
#define S_MSTPLINTEN 4
#define V_MSTPLINTEN(x) ((x) << S_MSTPLINTEN)
#define F_MSTPLINTEN V_MSTPLINTEN(1U)
+#define S_SEMINT 8
+#define V_SEMINT(x) ((x) << S_SEMINT)
+#define F_SEMINT V_SEMINT(1U)
+
+#define S_RSAINT 7
+#define V_RSAINT(x) ((x) << S_RSAINT)
+#define F_RSAINT V_RSAINT(1U)
+
+#define S_TRNGINT 6
+#define V_TRNGINT(x) ((x) << S_TRNGINT)
+#define F_TRNGINT V_TRNGINT(1U)
+
+#define S_PEERHALTINT 5
+#define V_PEERHALTINT(x) ((x) << S_PEERHALTINT)
+#define F_PEERHALTINT V_PEERHALTINT(1U)
+
#define A_CIM_UP_INT_CAUSE 0x7b3c
#define S_MSTPLINT 4
@@ -20900,6 +26076,33 @@
#define V_QUENUMSELECT(x) ((x) << S_QUENUMSELECT)
#define G_QUENUMSELECT(x) (((x) >> S_QUENUMSELECT) & M_QUENUMSELECT)
+#define S_MAPOFFSET 11
+#define M_MAPOFFSET 0x1fU
+#define V_MAPOFFSET(x) ((x) << S_MAPOFFSET)
+#define G_MAPOFFSET(x) (((x) >> S_MAPOFFSET) & M_MAPOFFSET)
+
+#define S_MAPSELECT 10
+#define V_MAPSELECT(x) ((x) << S_MAPSELECT)
+#define F_MAPSELECT V_MAPSELECT(1U)
+
+#define S_CORESELECT 6
+#define M_CORESELECT 0xfU
+#define V_CORESELECT(x) ((x) << S_CORESELECT)
+#define G_CORESELECT(x) (((x) >> S_CORESELECT) & M_CORESELECT)
+
+#define S_T7_OBQSELECT 5
+#define V_T7_OBQSELECT(x) ((x) << S_T7_OBQSELECT)
+#define F_T7_OBQSELECT V_T7_OBQSELECT(1U)
+
+#define S_T7_IBQSELECT 4
+#define V_T7_IBQSELECT(x) ((x) << S_T7_IBQSELECT)
+#define F_T7_IBQSELECT V_T7_IBQSELECT(1U)
+
+#define S_T7_QUENUMSELECT 0
+#define M_T7_QUENUMSELECT 0xfU
+#define V_T7_QUENUMSELECT(x) ((x) << S_T7_QUENUMSELECT)
+#define G_T7_QUENUMSELECT(x) (((x) >> S_T7_QUENUMSELECT) & M_T7_QUENUMSELECT)
+
#define A_CIM_QUEUE_CONFIG_CTRL 0x7b4c
#define S_CIMQSIZE 24
@@ -20940,6 +26143,29 @@
#define V_HOSTADDR(x) ((x) << S_HOSTADDR)
#define G_HOSTADDR(x) (((x) >> S_HOSTADDR) & M_HOSTADDR)
+#define S_T7_HOSTBUSY 31
+#define V_T7_HOSTBUSY(x) ((x) << S_T7_HOSTBUSY)
+#define F_T7_HOSTBUSY V_T7_HOSTBUSY(1U)
+
+#define S_T7_HOSTWRITE 30
+#define V_T7_HOSTWRITE(x) ((x) << S_T7_HOSTWRITE)
+#define F_T7_HOSTWRITE V_T7_HOSTWRITE(1U)
+
+#define S_HOSTGRPSEL 28
+#define M_HOSTGRPSEL 0x3U
+#define V_HOSTGRPSEL(x) ((x) << S_HOSTGRPSEL)
+#define G_HOSTGRPSEL(x) (((x) >> S_HOSTGRPSEL) & M_HOSTGRPSEL)
+
+#define S_HOSTCORESEL 24
+#define M_HOSTCORESEL 0xfU
+#define V_HOSTCORESEL(x) ((x) << S_HOSTCORESEL)
+#define G_HOSTCORESEL(x) (((x) >> S_HOSTCORESEL) & M_HOSTCORESEL)
+
+#define S_T7_HOSTADDR 0
+#define M_T7_HOSTADDR 0xffffffU
+#define V_T7_HOSTADDR(x) ((x) << S_T7_HOSTADDR)
+#define G_T7_HOSTADDR(x) (((x) >> S_T7_HOSTADDR) & M_T7_HOSTADDR)
+
#define A_CIM_HOST_ACC_DATA 0x7b54
#define A_CIM_CDEBUGDATA 0x7b58
@@ -20953,6 +26179,31 @@
#define V_CDEBUGDATAL(x) ((x) << S_CDEBUGDATAL)
#define G_CDEBUGDATAL(x) (((x) >> S_CDEBUGDATAL) & M_CDEBUGDATAL)
+#define A_CIM_DEBUG_CFG 0x7b58
+
+#define S_OR_EN 20
+#define V_OR_EN(x) ((x) << S_OR_EN)
+#define F_OR_EN V_OR_EN(1U)
+
+#define S_USEL 19
+#define V_USEL(x) ((x) << S_USEL)
+#define F_USEL V_USEL(1U)
+
+#define S_HI 18
+#define V_HI(x) ((x) << S_HI)
+#define F_HI V_HI(1U)
+
+#define S_SELH 9
+#define M_SELH 0x1ffU
+#define V_SELH(x) ((x) << S_SELH)
+#define G_SELH(x) (((x) >> S_SELH) & M_SELH)
+
+#define S_SELL 0
+#define M_SELL 0x1ffU
+#define V_SELL(x) ((x) << S_SELL)
+#define G_SELL(x) (((x) >> S_SELL) & M_SELL)
+
+#define A_CIM_DEBUG_DATA 0x7b5c
#define A_CIM_IBQ_DBG_CFG 0x7b60
#define S_IBQDBGADDR 16
@@ -20972,6 +26223,25 @@
#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
#define F_IBQDBGEN V_IBQDBGEN(1U)
+#define S_IBQDBGCORE 28
+#define M_IBQDBGCORE 0xfU
+#define V_IBQDBGCORE(x) ((x) << S_IBQDBGCORE)
+#define G_IBQDBGCORE(x) (((x) >> S_IBQDBGCORE) & M_IBQDBGCORE)
+
+#define S_T7_IBQDBGADDR 12
+#define M_T7_IBQDBGADDR 0x1fffU
+#define V_T7_IBQDBGADDR(x) ((x) << S_T7_IBQDBGADDR)
+#define G_T7_IBQDBGADDR(x) (((x) >> S_T7_IBQDBGADDR) & M_T7_IBQDBGADDR)
+
+#define S_IBQDBGSTATE 4
+#define M_IBQDBGSTATE 0x3U
+#define V_IBQDBGSTATE(x) ((x) << S_IBQDBGSTATE)
+#define G_IBQDBGSTATE(x) (((x) >> S_IBQDBGSTATE) & M_IBQDBGSTATE)
+
+#define S_PERRADDRCLR 3
+#define V_PERRADDRCLR(x) ((x) << S_PERRADDRCLR)
+#define F_PERRADDRCLR V_PERRADDRCLR(1U)
+
#define A_CIM_OBQ_DBG_CFG 0x7b64
#define S_OBQDBGADDR 16
@@ -20991,6 +26261,21 @@
#define V_OBQDBGEN(x) ((x) << S_OBQDBGEN)
#define F_OBQDBGEN V_OBQDBGEN(1U)
+#define S_OBQDBGCORE 28
+#define M_OBQDBGCORE 0xfU
+#define V_OBQDBGCORE(x) ((x) << S_OBQDBGCORE)
+#define G_OBQDBGCORE(x) (((x) >> S_OBQDBGCORE) & M_OBQDBGCORE)
+
+#define S_T7_OBQDBGADDR 12
+#define M_T7_OBQDBGADDR 0x1fffU
+#define V_T7_OBQDBGADDR(x) ((x) << S_T7_OBQDBGADDR)
+#define G_T7_OBQDBGADDR(x) (((x) >> S_T7_OBQDBGADDR) & M_T7_OBQDBGADDR)
+
+#define S_OBQDBGSTATE 4
+#define M_OBQDBGSTATE 0x3U
+#define V_OBQDBGSTATE(x) ((x) << S_OBQDBGSTATE)
+#define G_OBQDBGSTATE(x) (((x) >> S_OBQDBGSTATE) & M_OBQDBGSTATE)
+
#define A_CIM_IBQ_DBG_DATA 0x7b68
#define A_CIM_OBQ_DBG_DATA 0x7b6c
#define A_CIM_DEBUGCFG 0x7b70
@@ -21075,6 +26360,11 @@
#define V_ZONE_DST(x) ((x) << S_ZONE_DST)
#define G_ZONE_DST(x) (((x) >> S_ZONE_DST) & M_ZONE_DST)
+#define S_THREAD_ID 2
+#define M_THREAD_ID 0x7U
+#define V_THREAD_ID(x) ((x) << S_THREAD_ID)
+#define G_THREAD_ID(x) (((x) >> S_THREAD_ID) & M_THREAD_ID)
+
#define A_CIM_MEM_ZONE0_LEN 0x7b98
#define S_MEM_ZONE_LEN 4
@@ -21207,6 +26497,7 @@
#define G_DUPUACCMASK(x) (((x) >> S_DUPUACCMASK) & M_DUPUACCMASK)
#define A_CIM_PERR_INJECT 0x7c20
+#define A_CIM_FPGA_ROM_EFUSE_CMD 0x7c20
#define A_CIM_PERR_ENABLE 0x7c24
#define S_PERREN 0
@@ -21224,6 +26515,7 @@
#define V_T6_T5_PERREN(x) ((x) << S_T6_T5_PERREN)
#define G_T6_T5_PERREN(x) (((x) >> S_T6_T5_PERREN) & M_T6_T5_PERREN)
+#define A_CIM_FPGA_ROM_EFUSE_DATA 0x7c24
#define A_CIM_EEPROM_BUSY_BIT 0x7c28
#define S_EEPROMBUSY 0
@@ -21240,6 +26532,22 @@
#define V_SLOW_TIMER_ENABLE(x) ((x) << S_SLOW_TIMER_ENABLE)
#define F_SLOW_TIMER_ENABLE V_SLOW_TIMER_ENABLE(1U)
+#define S_FLASHWRPAGEMORE 5
+#define V_FLASHWRPAGEMORE(x) ((x) << S_FLASHWRPAGEMORE)
+#define F_FLASHWRPAGEMORE V_FLASHWRPAGEMORE(1U)
+
+#define S_FLASHWRENABLE 4
+#define V_FLASHWRENABLE(x) ((x) << S_FLASHWRENABLE)
+#define F_FLASHWRENABLE V_FLASHWRENABLE(1U)
+
+#define S_FLASHMOREENABLE 3
+#define V_FLASHMOREENABLE(x) ((x) << S_FLASHMOREENABLE)
+#define F_FLASHMOREENABLE V_FLASHMOREENABLE(1U)
+
+#define S_WR_RESP_ENABLE 2
+#define V_WR_RESP_ENABLE(x) ((x) << S_WR_RESP_ENABLE)
+#define F_WR_RESP_ENABLE V_WR_RESP_ENABLE(1U)
+
#define A_CIM_UP_PO_SINGLE_OUTSTANDING 0x7c30
#define S_UP_PO_SINGLE_OUTSTANDING 0
@@ -21271,6 +26579,18 @@
#define G_CIM_PCIE_PKT_ERR_CODE(x) (((x) >> S_CIM_PCIE_PKT_ERR_CODE) & M_CIM_PCIE_PKT_ERR_CODE)
#define A_CIM_IBQ_DBG_WAIT_COUNTER 0x7c40
+#define A_CIM_QUE_PERR_ADDR 0x7c40
+
+#define S_IBQPERRADDR 16
+#define M_IBQPERRADDR 0xfffU
+#define V_IBQPERRADDR(x) ((x) << S_IBQPERRADDR)
+#define G_IBQPERRADDR(x) (((x) >> S_IBQPERRADDR) & M_IBQPERRADDR)
+
+#define S_OBQPERRADDR 0
+#define M_OBQPERRADDR 0xfffU
+#define V_OBQPERRADDR(x) ((x) << S_OBQPERRADDR)
+#define G_OBQPERRADDR(x) (((x) >> S_OBQPERRADDR) & M_OBQPERRADDR)
+
#define A_CIM_PIO_UP_MST_CFG_SEL 0x7c44
#define S_PIO_UP_MST_CFG_SEL 0
@@ -21309,6 +26629,20 @@
#define V_PCIE_OBQ_IF_DISABLE(x) ((x) << S_PCIE_OBQ_IF_DISABLE)
#define F_PCIE_OBQ_IF_DISABLE V_PCIE_OBQ_IF_DISABLE(1U)
+#define S_ULP_OBQ_SIZE 8
+#define M_ULP_OBQ_SIZE 0x3U
+#define V_ULP_OBQ_SIZE(x) ((x) << S_ULP_OBQ_SIZE)
+#define G_ULP_OBQ_SIZE(x) (((x) >> S_ULP_OBQ_SIZE) & M_ULP_OBQ_SIZE)
+
+#define S_TP_IBQ_SIZE 6
+#define M_TP_IBQ_SIZE 0x3U
+#define V_TP_IBQ_SIZE(x) ((x) << S_TP_IBQ_SIZE)
+#define G_TP_IBQ_SIZE(x) (((x) >> S_TP_IBQ_SIZE) & M_TP_IBQ_SIZE)
+
+#define S_OBQ_EOM_ENABLE 5
+#define V_OBQ_EOM_ENABLE(x) ((x) << S_OBQ_EOM_ENABLE)
+#define F_OBQ_EOM_ENABLE V_OBQ_EOM_ENABLE(1U)
+
#define A_CIM_CGEN_GLOBAL 0x7c50
#define S_CGEN_GLOBAL 0
@@ -21321,6 +26655,77 @@
#define V_PIFDBGLA_DPSLP_EN(x) ((x) << S_PIFDBGLA_DPSLP_EN)
#define F_PIFDBGLA_DPSLP_EN V_PIFDBGLA_DPSLP_EN(1U)
+#define A_CIM_GFT_CMM_CONFIG 0x7c58
+
+#define S_GLFL 31
+#define V_GLFL(x) ((x) << S_GLFL)
+#define F_GLFL V_GLFL(1U)
+
+#define S_T7_WRCNTIDLE 16
+#define M_T7_WRCNTIDLE 0x7fffU
+#define V_T7_WRCNTIDLE(x) ((x) << S_T7_WRCNTIDLE)
+#define G_T7_WRCNTIDLE(x) (((x) >> S_T7_WRCNTIDLE) & M_T7_WRCNTIDLE)
+
+#define A_CIM_GFT_CONFIG 0x7c5c
+
+#define S_GFTMABASE 16
+#define M_GFTMABASE 0xffffU
+#define V_GFTMABASE(x) ((x) << S_GFTMABASE)
+#define G_GFTMABASE(x) (((x) >> S_GFTMABASE) & M_GFTMABASE)
+
+#define S_GFTHASHTBLSIZE 12
+#define M_GFTHASHTBLSIZE 0xfU
+#define V_GFTHASHTBLSIZE(x) ((x) << S_GFTHASHTBLSIZE)
+#define G_GFTHASHTBLSIZE(x) (((x) >> S_GFTHASHTBLSIZE) & M_GFTHASHTBLSIZE)
+
+#define S_GFTTCAMPRIORITY 11
+#define V_GFTTCAMPRIORITY(x) ((x) << S_GFTTCAMPRIORITY)
+#define F_GFTTCAMPRIORITY V_GFTTCAMPRIORITY(1U)
+
+#define S_GFTMATHREADID 8
+#define M_GFTMATHREADID 0x7U
+#define V_GFTMATHREADID(x) ((x) << S_GFTMATHREADID)
+#define G_GFTMATHREADID(x) (((x) >> S_GFTMATHREADID) & M_GFTMATHREADID)
+
+#define S_GFTTCAMINIT 7
+#define V_GFTTCAMINIT(x) ((x) << S_GFTTCAMINIT)
+#define F_GFTTCAMINIT V_GFTTCAMINIT(1U)
+
+#define S_GFTTCAMINITDONE 6
+#define V_GFTTCAMINITDONE(x) ((x) << S_GFTTCAMINITDONE)
+#define F_GFTTCAMINITDONE V_GFTTCAMINITDONE(1U)
+
+#define S_GFTTBLMODEEN 0
+#define V_GFTTBLMODEEN(x) ((x) << S_GFTTBLMODEEN)
+#define F_GFTTBLMODEEN V_GFTTBLMODEEN(1U)
+
+#define A_CIM_TCAM_BIST_CTRL 0x7c60
+
+#define S_RST_CB 31
+#define V_RST_CB(x) ((x) << S_RST_CB)
+#define F_RST_CB V_RST_CB(1U)
+
+#define S_CB_START 0
+#define M_CB_START 0xfffffffU
+#define V_CB_START(x) ((x) << S_CB_START)
+#define G_CB_START(x) (((x) >> S_CB_START) & M_CB_START)
+
+#define A_CIM_TCAM_BIST_CB_PASS 0x7c64
+
+#define S_CB_PASS 0
+#define M_CB_PASS 0xfffffffU
+#define V_CB_PASS(x) ((x) << S_CB_PASS)
+#define G_CB_PASS(x) (((x) >> S_CB_PASS) & M_CB_PASS)
+
+#define A_CIM_TCAM_BIST_CB_BUSY 0x7c68
+
+#define S_CB_BUSY 0
+#define M_CB_BUSY 0xfffffffU
+#define V_CB_BUSY(x) ((x) << S_CB_BUSY)
+#define G_CB_BUSY(x) (((x) >> S_CB_BUSY) & M_CB_BUSY)
+
+#define A_CIM_GFT_MASK 0x7c70
+
/* registers for module TP */
#define TP_BASE_ADDR 0x7d00
@@ -21613,6 +27018,14 @@
#define V_CRXPKTXT(x) ((x) << S_CRXPKTXT)
#define F_CRXPKTXT V_CRXPKTXT(1U)
+#define S_ETOEBYPCSUMNOWAIT 15
+#define V_ETOEBYPCSUMNOWAIT(x) ((x) << S_ETOEBYPCSUMNOWAIT)
+#define F_ETOEBYPCSUMNOWAIT V_ETOEBYPCSUMNOWAIT(1U)
+
+#define S_ENICCSUMNOWAIT 14
+#define V_ENICCSUMNOWAIT(x) ((x) << S_ENICCSUMNOWAIT)
+#define F_ENICCSUMNOWAIT V_ENICCSUMNOWAIT(1U)
+
#define A_TP_GLOBAL_CONFIG 0x7d08
#define S_SYNCOOKIEPARAMS 26
@@ -21703,6 +27116,31 @@
#define V_ACTIVEFILTERCOUNTS(x) ((x) << S_ACTIVEFILTERCOUNTS)
#define F_ACTIVEFILTERCOUNTS V_ACTIVEFILTERCOUNTS(1U)
+#define S_RXSACKPARSE 31
+#define V_RXSACKPARSE(x) ((x) << S_RXSACKPARSE)
+#define F_RXSACKPARSE V_RXSACKPARSE(1U)
+
+#define S_RXSACKFWDMODE 29
+#define M_RXSACKFWDMODE 0x3U
+#define V_RXSACKFWDMODE(x) ((x) << S_RXSACKFWDMODE)
+#define G_RXSACKFWDMODE(x) (((x) >> S_RXSACKFWDMODE) & M_RXSACKFWDMODE)
+
+#define S_SRVRCHRSSEN 26
+#define V_SRVRCHRSSEN(x) ((x) << S_SRVRCHRSSEN)
+#define F_SRVRCHRSSEN V_SRVRCHRSSEN(1U)
+
+#define S_LBCHNDISTEN 23
+#define V_LBCHNDISTEN(x) ((x) << S_LBCHNDISTEN)
+#define F_LBCHNDISTEN V_LBCHNDISTEN(1U)
+
+#define S_ETHTNLLEN2X 20
+#define V_ETHTNLLEN2X(x) ((x) << S_ETHTNLLEN2X)
+#define F_ETHTNLLEN2X V_ETHTNLLEN2X(1U)
+
+#define S_EGLBCHNDISTEN 19
+#define V_EGLBCHNDISTEN(x) ((x) << S_EGLBCHNDISTEN)
+#define F_EGLBCHNDISTEN V_EGLBCHNDISTEN(1U)
+
#define A_TP_DB_CONFIG 0x7d0c
#define S_DBMAXOPCNT 24
@@ -21767,6 +27205,11 @@
#define V_PMRXMAXPAGE(x) ((x) << S_PMRXMAXPAGE)
#define G_PMRXMAXPAGE(x) (((x) >> S_PMRXMAXPAGE) & M_PMRXMAXPAGE)
+#define S_T7_PMRXNUMCHN 29
+#define M_T7_PMRXNUMCHN 0x7U
+#define V_T7_PMRXNUMCHN(x) ((x) << S_T7_PMRXNUMCHN)
+#define G_T7_PMRXNUMCHN(x) (((x) >> S_T7_PMRXNUMCHN) & M_T7_PMRXNUMCHN)
+
#define A_TP_PMM_TX_PAGE_SIZE 0x7d34
#define A_TP_PMM_TX_MAX_PAGE 0x7d38
@@ -21780,6 +27223,83 @@
#define V_PMTXMAXPAGE(x) ((x) << S_PMTXMAXPAGE)
#define G_PMTXMAXPAGE(x) (((x) >> S_PMTXMAXPAGE) & M_PMTXMAXPAGE)
+#define S_T7_PMTXNUMCHN 29
+#define M_T7_PMTXNUMCHN 0x7U
+#define V_T7_PMTXNUMCHN(x) ((x) << S_T7_PMTXNUMCHN)
+#define G_T7_PMTXNUMCHN(x) (((x) >> S_T7_PMTXNUMCHN) & M_T7_PMTXNUMCHN)
+
+#define A_TP_EXT_CONFIG 0x7d3c
+
+#define S_TNLERRORIPSECARW 29
+#define V_TNLERRORIPSECARW(x) ((x) << S_TNLERRORIPSECARW)
+#define F_TNLERRORIPSECARW V_TNLERRORIPSECARW(1U)
+
+#define S_TNLERRORIPSECICV 28
+#define V_TNLERRORIPSECICV(x) ((x) << S_TNLERRORIPSECICV)
+#define F_TNLERRORIPSECICV V_TNLERRORIPSECICV(1U)
+
+#define S_DROPERRORIPSECARW 25
+#define V_DROPERRORIPSECARW(x) ((x) << S_DROPERRORIPSECARW)
+#define F_DROPERRORIPSECARW V_DROPERRORIPSECARW(1U)
+
+#define S_DROPERRORIPSECICV 24
+#define V_DROPERRORIPSECICV(x) ((x) << S_DROPERRORIPSECICV)
+#define F_DROPERRORIPSECICV V_DROPERRORIPSECICV(1U)
+
+#define S_MIBRDMAROCEEN 19
+#define V_MIBRDMAROCEEN(x) ((x) << S_MIBRDMAROCEEN)
+#define F_MIBRDMAROCEEN V_MIBRDMAROCEEN(1U)
+
+#define S_MIBRDMAIWARPEN 18
+#define V_MIBRDMAIWARPEN(x) ((x) << S_MIBRDMAIWARPEN)
+#define F_MIBRDMAIWARPEN V_MIBRDMAIWARPEN(1U)
+
+#define S_BYPTXDATAACKALLEN 17
+#define V_BYPTXDATAACKALLEN(x) ((x) << S_BYPTXDATAACKALLEN)
+#define F_BYPTXDATAACKALLEN V_BYPTXDATAACKALLEN(1U)
+
+#define S_DATAACKEXTEN 16
+#define V_DATAACKEXTEN(x) ((x) << S_DATAACKEXTEN)
+#define F_DATAACKEXTEN V_DATAACKEXTEN(1U)
+
+#define S_MACMATCH11FWD 11
+#define V_MACMATCH11FWD(x) ((x) << S_MACMATCH11FWD)
+#define F_MACMATCH11FWD V_MACMATCH11FWD(1U)
+
+#define S_USERTMSTPEN 10
+#define V_USERTMSTPEN(x) ((x) << S_USERTMSTPEN)
+#define F_USERTMSTPEN V_USERTMSTPEN(1U)
+
+#define S_MMGRCACHEDIS 9
+#define V_MMGRCACHEDIS(x) ((x) << S_MMGRCACHEDIS)
+#define F_MMGRCACHEDIS V_MMGRCACHEDIS(1U)
+
+#define S_TXPKTPACKOUTUDPEN 8
+#define V_TXPKTPACKOUTUDPEN(x) ((x) << S_TXPKTPACKOUTUDPEN)
+#define F_TXPKTPACKOUTUDPEN V_TXPKTPACKOUTUDPEN(1U)
+
+#define S_IPSECROCECRCMODE 6
+#define M_IPSECROCECRCMODE 0x3U
+#define V_IPSECROCECRCMODE(x) ((x) << S_IPSECROCECRCMODE)
+#define G_IPSECROCECRCMODE(x) (((x) >> S_IPSECROCECRCMODE) & M_IPSECROCECRCMODE)
+
+#define S_IPSECIDXLOC 5
+#define V_IPSECIDXLOC(x) ((x) << S_IPSECIDXLOC)
+#define F_IPSECIDXLOC V_IPSECIDXLOC(1U)
+
+#define S_IPSECIDXCAPEN 4
+#define V_IPSECIDXCAPEN(x) ((x) << S_IPSECIDXCAPEN)
+#define F_IPSECIDXCAPEN V_IPSECIDXCAPEN(1U)
+
+#define S_IPSECOFEN 3
+#define V_IPSECOFEN(x) ((x) << S_IPSECOFEN)
+#define F_IPSECOFEN V_IPSECOFEN(1U)
+
+#define S_IPSECCFG 0
+#define M_IPSECCFG 0x7U
+#define V_IPSECCFG(x) ((x) << S_IPSECCFG)
+#define G_IPSECCFG(x) (((x) >> S_IPSECCFG) & M_IPSECCFG)
+
#define A_TP_TCP_OPTIONS 0x7d40
#define S_MTUDEFAULT 16
@@ -22615,10 +28135,6 @@
#define V_TXPDUSIZEADJ(x) ((x) << S_TXPDUSIZEADJ)
#define G_TXPDUSIZEADJ(x) (((x) >> S_TXPDUSIZEADJ) & M_TXPDUSIZEADJ)
-#define S_ENABLECBYP 21
-#define V_ENABLECBYP(x) ((x) << S_ENABLECBYP)
-#define F_ENABLECBYP V_ENABLECBYP(1U)
-
#define S_LIMITEDTRANSMIT 20
#define M_LIMITEDTRANSMIT 0xfU
#define V_LIMITEDTRANSMIT(x) ((x) << S_LIMITEDTRANSMIT)
@@ -22779,6 +28295,18 @@
#define V_ECNSYNECT(x) ((x) << S_ECNSYNECT)
#define F_ECNSYNECT V_ECNSYNECT(1U)
+#define A_TP_PARA_REG9 0x7d88
+
+#define S_PMMAXXFERLEN3 16
+#define M_PMMAXXFERLEN3 0xffffU
+#define V_PMMAXXFERLEN3(x) ((x) << S_PMMAXXFERLEN3)
+#define G_PMMAXXFERLEN3(x) (((x) >> S_PMMAXXFERLEN3) & M_PMMAXXFERLEN3)
+
+#define S_PMMAXXFERLEN2 0
+#define M_PMMAXXFERLEN2 0xffffU
+#define V_PMMAXXFERLEN2(x) ((x) << S_PMMAXXFERLEN2)
+#define G_PMMAXXFERLEN2(x) (((x) >> S_PMMAXXFERLEN2) & M_PMMAXXFERLEN2)
+
#define A_TP_ERR_CONFIG 0x7d8c
#define S_TNLERRORPING 30
@@ -22926,6 +28454,11 @@
#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & M_DELAYEDACKRESOLUTION)
+#define S_ROCETIMERRESOLUTION 24
+#define M_ROCETIMERRESOLUTION 0xffU
+#define V_ROCETIMERRESOLUTION(x) ((x) << S_ROCETIMERRESOLUTION)
+#define G_ROCETIMERRESOLUTION(x) (((x) >> S_ROCETIMERRESOLUTION) & M_ROCETIMERRESOLUTION)
+
#define A_TP_MSL 0x7d94
#define S_MSL 0
@@ -23423,6 +28956,14 @@
#define V_FRMWRQUEMASK(x) ((x) << S_FRMWRQUEMASK)
#define G_FRMWRQUEMASK(x) (((x) >> S_FRMWRQUEMASK) & M_FRMWRQUEMASK)
+#define S_RRCPLOPT1SMSELEN 11
+#define V_RRCPLOPT1SMSELEN(x) ((x) << S_RRCPLOPT1SMSELEN)
+#define F_RRCPLOPT1SMSELEN V_RRCPLOPT1SMSELEN(1U)
+
+#define S_RRCPLOPT1BQEN 10
+#define V_RRCPLOPT1BQEN(x) ((x) << S_RRCPLOPT1BQEN)
+#define F_RRCPLOPT1BQEN V_RRCPLOPT1BQEN(1U)
+
#define A_TP_RSS_CONFIG_SYN 0x7dfc
#define A_TP_RSS_CONFIG_VRT 0x7e00
@@ -23595,6 +29136,69 @@
#define V_QUEUE(x) ((x) << S_QUEUE)
#define G_QUEUE(x) (((x) >> S_QUEUE) & M_QUEUE)
+#define S_T7_UPDVLD 19
+#define V_T7_UPDVLD(x) ((x) << S_T7_UPDVLD)
+#define F_T7_UPDVLD V_T7_UPDVLD(1U)
+
+#define S_T7_XOFF 18
+#define V_T7_XOFF(x) ((x) << S_T7_XOFF)
+#define F_T7_XOFF V_T7_XOFF(1U)
+
+#define S_T7_UPDCHN3 17
+#define V_T7_UPDCHN3(x) ((x) << S_T7_UPDCHN3)
+#define F_T7_UPDCHN3 V_T7_UPDCHN3(1U)
+
+#define S_T7_UPDCHN2 16
+#define V_T7_UPDCHN2(x) ((x) << S_T7_UPDCHN2)
+#define F_T7_UPDCHN2 V_T7_UPDCHN2(1U)
+
+#define S_T7_UPDCHN1 15
+#define V_T7_UPDCHN1(x) ((x) << S_T7_UPDCHN1)
+#define F_T7_UPDCHN1 V_T7_UPDCHN1(1U)
+
+#define S_T7_UPDCHN0 14
+#define V_T7_UPDCHN0(x) ((x) << S_T7_UPDCHN0)
+#define F_T7_UPDCHN0 V_T7_UPDCHN0(1U)
+
+#define S_T7_QUEUE 0
+#define M_T7_QUEUE 0x3fffU
+#define V_T7_QUEUE(x) ((x) << S_T7_QUEUE)
+#define G_T7_QUEUE(x) (((x) >> S_T7_QUEUE) & M_T7_QUEUE)
+
+#define A_TP_RSS_CONFIG_4CH 0x7e08
+
+#define S_BASEQIDEN 1
+#define V_BASEQIDEN(x) ((x) << S_BASEQIDEN)
+#define F_BASEQIDEN V_BASEQIDEN(1U)
+
+#define S_200GMODE 0
+#define V_200GMODE(x) ((x) << S_200GMODE)
+#define F_200GMODE V_200GMODE(1U)
+
+#define A_TP_RSS_CONFIG_SRAM 0x7e0c
+
+#define S_SRAMRDDIS 20
+#define V_SRAMRDDIS(x) ((x) << S_SRAMRDDIS)
+#define F_SRAMRDDIS V_SRAMRDDIS(1U)
+
+#define S_SRAMSTART 19
+#define V_SRAMSTART(x) ((x) << S_SRAMSTART)
+#define F_SRAMSTART V_SRAMSTART(1U)
+
+#define S_SRAMWRITE 18
+#define V_SRAMWRITE(x) ((x) << S_SRAMWRITE)
+#define F_SRAMWRITE V_SRAMWRITE(1U)
+
+#define S_SRAMSEL 16
+#define M_SRAMSEL 0x3U
+#define V_SRAMSEL(x) ((x) << S_SRAMSEL)
+#define G_SRAMSEL(x) (((x) >> S_SRAMSEL) & M_SRAMSEL)
+
+#define S_SRAMADDR 0
+#define M_SRAMADDR 0x3fffU
+#define V_SRAMADDR(x) ((x) << S_SRAMADDR)
+#define G_SRAMADDR(x) (((x) >> S_SRAMADDR) & M_SRAMADDR)
+
#define A_TP_LA_TABLE_0 0x7e10
#define S_VIRTPORT1TABLE 16
@@ -23621,6 +29225,18 @@
#define A_TP_TM_PIO_ADDR 0x7e18
#define A_TP_TM_PIO_DATA 0x7e1c
+#define A_TP_RX_MOD_CONFIG_CH3_CH2 0x7e20
+
+#define S_RXCHANNELWEIGHT3 8
+#define M_RXCHANNELWEIGHT3 0xffU
+#define V_RXCHANNELWEIGHT3(x) ((x) << S_RXCHANNELWEIGHT3)
+#define G_RXCHANNELWEIGHT3(x) (((x) >> S_RXCHANNELWEIGHT3) & M_RXCHANNELWEIGHT3)
+
+#define S_RXCHANNELWEIGHT2 0
+#define M_RXCHANNELWEIGHT2 0xffU
+#define V_RXCHANNELWEIGHT2(x) ((x) << S_RXCHANNELWEIGHT2)
+#define G_RXCHANNELWEIGHT2(x) (((x) >> S_RXCHANNELWEIGHT2) & M_RXCHANNELWEIGHT2)
+
#define A_TP_MOD_CONFIG 0x7e24
#define S_RXCHANNELWEIGHT1 24
@@ -23887,6 +29503,30 @@
#define V_SRQTABLEPERR(x) ((x) << S_SRQTABLEPERR)
#define F_SRQTABLEPERR V_SRQTABLEPERR(1U)
+#define S_TPCERR 5
+#define V_TPCERR(x) ((x) << S_TPCERR)
+#define F_TPCERR V_TPCERR(1U)
+
+#define S_OTHERPERR 4
+#define V_OTHERPERR(x) ((x) << S_OTHERPERR)
+#define F_OTHERPERR V_OTHERPERR(1U)
+
+#define S_TPEING1PERR 3
+#define V_TPEING1PERR(x) ((x) << S_TPEING1PERR)
+#define F_TPEING1PERR V_TPEING1PERR(1U)
+
+#define S_TPEING0PERR 2
+#define V_TPEING0PERR(x) ((x) << S_TPEING0PERR)
+#define F_TPEING0PERR V_TPEING0PERR(1U)
+
+#define S_TPEEGPERR 1
+#define V_TPEEGPERR(x) ((x) << S_TPEEGPERR)
+#define F_TPEEGPERR V_TPEEGPERR(1U)
+
+#define S_TPCPERR 0
+#define V_TPCPERR(x) ((x) << S_TPCPERR)
+#define F_TPCPERR V_TPCPERR(1U)
+
#define A_TP_INT_CAUSE 0x7e74
#define A_TP_PER_ENABLE 0x7e78
#define A_TP_FLM_FREE_PS_CNT 0x7e80
@@ -23907,6 +29547,11 @@
#define V_FREERXPAGECOUNT(x) ((x) << S_FREERXPAGECOUNT)
#define G_FREERXPAGECOUNT(x) (((x) >> S_FREERXPAGECOUNT) & M_FREERXPAGECOUNT)
+#define S_T7_FREERXPAGECHN 28
+#define M_T7_FREERXPAGECHN 0x7U
+#define V_T7_FREERXPAGECHN(x) ((x) << S_T7_FREERXPAGECHN)
+#define G_T7_FREERXPAGECHN(x) (((x) >> S_T7_FREERXPAGECHN) & M_T7_FREERXPAGECHN)
+
#define A_TP_FLM_FREE_TX_CNT 0x7e88
#define S_FREETXPAGECHN 28
@@ -23919,6 +29564,11 @@
#define V_FREETXPAGECOUNT(x) ((x) << S_FREETXPAGECOUNT)
#define G_FREETXPAGECOUNT(x) (((x) >> S_FREETXPAGECOUNT) & M_FREETXPAGECOUNT)
+#define S_T7_FREETXPAGECHN 28
+#define M_T7_FREETXPAGECHN 0x7U
+#define V_T7_FREETXPAGECHN(x) ((x) << S_T7_FREETXPAGECHN)
+#define G_T7_FREETXPAGECHN(x) (((x) >> S_T7_FREETXPAGECHN) & M_T7_FREETXPAGECHN)
+
#define A_TP_TM_HEAP_PUSH_CNT 0x7e8c
#define A_TP_TM_HEAP_POP_CNT 0x7e90
#define A_TP_TM_DACK_PUSH_CNT 0x7e94
@@ -24111,6 +29761,38 @@
#define V_COMMITLIMIT0(x) ((x) << S_COMMITLIMIT0)
#define G_COMMITLIMIT0(x) (((x) >> S_COMMITLIMIT0) & M_COMMITLIMIT0)
+#define S_RXCOMMITRESET3 7
+#define V_RXCOMMITRESET3(x) ((x) << S_RXCOMMITRESET3)
+#define F_RXCOMMITRESET3 V_RXCOMMITRESET3(1U)
+
+#define S_RXCOMMITRESET2 6
+#define V_RXCOMMITRESET2(x) ((x) << S_RXCOMMITRESET2)
+#define F_RXCOMMITRESET2 V_RXCOMMITRESET2(1U)
+
+#define S_T7_RXCOMMITRESET1 5
+#define V_T7_RXCOMMITRESET1(x) ((x) << S_T7_RXCOMMITRESET1)
+#define F_T7_RXCOMMITRESET1 V_T7_RXCOMMITRESET1(1U)
+
+#define S_T7_RXCOMMITRESET0 4
+#define V_T7_RXCOMMITRESET0(x) ((x) << S_T7_RXCOMMITRESET0)
+#define F_T7_RXCOMMITRESET0 V_T7_RXCOMMITRESET0(1U)
+
+#define S_RXFORCECONG3 3
+#define V_RXFORCECONG3(x) ((x) << S_RXFORCECONG3)
+#define F_RXFORCECONG3 V_RXFORCECONG3(1U)
+
+#define S_RXFORCECONG2 2
+#define V_RXFORCECONG2(x) ((x) << S_RXFORCECONG2)
+#define F_RXFORCECONG2 V_RXFORCECONG2(1U)
+
+#define S_T7_RXFORCECONG1 1
+#define V_T7_RXFORCECONG1(x) ((x) << S_T7_RXFORCECONG1)
+#define F_T7_RXFORCECONG1 V_T7_RXFORCECONG1(1U)
+
+#define S_T7_RXFORCECONG0 0
+#define V_T7_RXFORCECONG0(x) ((x) << S_T7_RXFORCECONG0)
+#define F_T7_RXFORCECONG0 V_T7_RXFORCECONG0(1U)
+
#define A_TP_TX_SCHED 0x7eb4
#define S_COMMITRESET3 31
@@ -24229,6 +29911,14 @@
#define V_RXMODXOFF0(x) ((x) << S_RXMODXOFF0)
#define F_RXMODXOFF0 V_RXMODXOFF0(1U)
+#define S_RXMODXOFF3 3
+#define V_RXMODXOFF3(x) ((x) << S_RXMODXOFF3)
+#define F_RXMODXOFF3 V_RXMODXOFF3(1U)
+
+#define S_RXMODXOFF2 2
+#define V_RXMODXOFF2(x) ((x) << S_RXMODXOFF2)
+#define F_RXMODXOFF2 V_RXMODXOFF2(1U)
+
#define A_TP_TX_ORATE 0x7ebc
#define S_OFDRATE3 24
@@ -24313,6 +30003,37 @@
#define A_TP_DBG_LA_DATAL 0x7ed8
#define A_TP_DBG_LA_DATAH 0x7edc
+#define A_TP_DBG_LA_FILTER 0x7ee0
+
+#define S_FILTERTID 12
+#define M_FILTERTID 0xfffffU
+#define V_FILTERTID(x) ((x) << S_FILTERTID)
+#define G_FILTERTID(x) (((x) >> S_FILTERTID) & M_FILTERTID)
+
+#define S_ENTIDFILTER 5
+#define V_ENTIDFILTER(x) ((x) << S_ENTIDFILTER)
+#define F_ENTIDFILTER V_ENTIDFILTER(1U)
+
+#define S_ENOFFLOAD 4
+#define V_ENOFFLOAD(x) ((x) << S_ENOFFLOAD)
+#define F_ENOFFLOAD V_ENOFFLOAD(1U)
+
+#define S_ENTUNNEL 3
+#define V_ENTUNNEL(x) ((x) << S_ENTUNNEL)
+#define F_ENTUNNEL V_ENTUNNEL(1U)
+
+#define S_ENI 2
+#define V_ENI(x) ((x) << S_ENI)
+#define F_ENI V_ENI(1U)
+
+#define S_ENC 1
+#define V_ENC(x) ((x) << S_ENC)
+#define F_ENC V_ENC(1U)
+
+#define S_ENE 0
+#define V_ENE(x) ((x) << S_ENE)
+#define F_ENE V_ENE(1U)
+
#define A_TP_PROTOCOL_CNTRL 0x7ee8
#define S_WRITEENABLE 31
@@ -24348,6 +30069,546 @@
#define V_PROTOCOLDATAFIELD(x) ((x) << S_PROTOCOLDATAFIELD)
#define G_PROTOCOLDATAFIELD(x) (((x) >> S_PROTOCOLDATAFIELD) & M_PROTOCOLDATAFIELD)
+#define A_TP_INIC_CTRL0 0x7f00
+#define A_TP_INIC_DBG 0x7f04
+#define A_TP_INIC_PERR_ENABLE 0x7f08
+
+#define S_INICMAC1_ERR 16
+#define M_INICMAC1_ERR 0x3fU
+#define V_INICMAC1_ERR(x) ((x) << S_INICMAC1_ERR)
+#define G_INICMAC1_ERR(x) (((x) >> S_INICMAC1_ERR) & M_INICMAC1_ERR)
+
+#define S_INICMAC0_ERR 0
+#define M_INICMAC0_ERR 0x3fU
+#define V_INICMAC0_ERR(x) ((x) << S_INICMAC0_ERR)
+#define G_INICMAC0_ERR(x) (((x) >> S_INICMAC0_ERR) & M_INICMAC0_ERR)
+
+#define A_TP_INIC_PERR_CAUSE 0x7f0c
+#define A_TP_PARA_REG10 0x7f20
+
+#define S_DIS39320FIX 20
+#define V_DIS39320FIX(x) ((x) << S_DIS39320FIX)
+#define F_DIS39320FIX V_DIS39320FIX(1U)
+
+#define S_IWARPMAXPDULEN 16
+#define M_IWARPMAXPDULEN 0xfU
+#define V_IWARPMAXPDULEN(x) ((x) << S_IWARPMAXPDULEN)
+#define G_IWARPMAXPDULEN(x) (((x) >> S_IWARPMAXPDULEN) & M_IWARPMAXPDULEN)
+
+#define S_TLSMAXRXDATA 0
+#define M_TLSMAXRXDATA 0xffffU
+#define V_TLSMAXRXDATA(x) ((x) << S_TLSMAXRXDATA)
+#define G_TLSMAXRXDATA(x) (((x) >> S_TLSMAXRXDATA) & M_TLSMAXRXDATA)
+
+#define A_TP_TCAM_BIST_CTRL 0x7f24
+#define A_TP_TCAM_BIST_CB_PASS 0x7f28
+#define A_TP_TCAM_BIST_CB_BUSY 0x7f2c
+#define A_TP_C_PERR_ENABLE 0x7f30
+
+#define S_DMXFIFOOVFL 26
+#define V_DMXFIFOOVFL(x) ((x) << S_DMXFIFOOVFL)
+#define F_DMXFIFOOVFL V_DMXFIFOOVFL(1U)
+
+#define S_URX2TPCDDPINTF 25
+#define V_URX2TPCDDPINTF(x) ((x) << S_URX2TPCDDPINTF)
+#define F_URX2TPCDDPINTF V_URX2TPCDDPINTF(1U)
+
+#define S_TPCDISPTOKENFIFO 24
+#define V_TPCDISPTOKENFIFO(x) ((x) << S_TPCDISPTOKENFIFO)
+#define F_TPCDISPTOKENFIFO V_TPCDISPTOKENFIFO(1U)
+
+#define S_TPCDISPCPLFIFO3 23
+#define V_TPCDISPCPLFIFO3(x) ((x) << S_TPCDISPCPLFIFO3)
+#define F_TPCDISPCPLFIFO3 V_TPCDISPCPLFIFO3(1U)
+
+#define S_TPCDISPCPLFIFO2 22
+#define V_TPCDISPCPLFIFO2(x) ((x) << S_TPCDISPCPLFIFO2)
+#define F_TPCDISPCPLFIFO2 V_TPCDISPCPLFIFO2(1U)
+
+#define S_TPCDISPCPLFIFO1 21
+#define V_TPCDISPCPLFIFO1(x) ((x) << S_TPCDISPCPLFIFO1)
+#define F_TPCDISPCPLFIFO1 V_TPCDISPCPLFIFO1(1U)
+
+#define S_TPCDISPCPLFIFO0 20
+#define V_TPCDISPCPLFIFO0(x) ((x) << S_TPCDISPCPLFIFO0)
+#define F_TPCDISPCPLFIFO0 V_TPCDISPCPLFIFO0(1U)
+
+#define S_URXPLDINTFCRC3 19
+#define V_URXPLDINTFCRC3(x) ((x) << S_URXPLDINTFCRC3)
+#define F_URXPLDINTFCRC3 V_URXPLDINTFCRC3(1U)
+
+#define S_URXPLDINTFCRC2 18
+#define V_URXPLDINTFCRC2(x) ((x) << S_URXPLDINTFCRC2)
+#define F_URXPLDINTFCRC2 V_URXPLDINTFCRC2(1U)
+
+#define S_URXPLDINTFCRC1 17
+#define V_URXPLDINTFCRC1(x) ((x) << S_URXPLDINTFCRC1)
+#define F_URXPLDINTFCRC1 V_URXPLDINTFCRC1(1U)
+
+#define S_URXPLDINTFCRC0 16
+#define V_URXPLDINTFCRC0(x) ((x) << S_URXPLDINTFCRC0)
+#define F_URXPLDINTFCRC0 V_URXPLDINTFCRC0(1U)
+
+#define S_DMXDBFIFO 15
+#define V_DMXDBFIFO(x) ((x) << S_DMXDBFIFO)
+#define F_DMXDBFIFO V_DMXDBFIFO(1U)
+
+#define S_DMXDBSRAM 14
+#define V_DMXDBSRAM(x) ((x) << S_DMXDBSRAM)
+#define F_DMXDBSRAM V_DMXDBSRAM(1U)
+
+#define S_DMXCPLFIFO 13
+#define V_DMXCPLFIFO(x) ((x) << S_DMXCPLFIFO)
+#define F_DMXCPLFIFO V_DMXCPLFIFO(1U)
+
+#define S_DMXCPLSRAM 12
+#define V_DMXCPLSRAM(x) ((x) << S_DMXCPLSRAM)
+#define F_DMXCPLSRAM V_DMXCPLSRAM(1U)
+
+#define S_DMXCSUMFIFO 11
+#define V_DMXCSUMFIFO(x) ((x) << S_DMXCSUMFIFO)
+#define F_DMXCSUMFIFO V_DMXCSUMFIFO(1U)
+
+#define S_DMXLENFIFO 10
+#define V_DMXLENFIFO(x) ((x) << S_DMXLENFIFO)
+#define F_DMXLENFIFO V_DMXLENFIFO(1U)
+
+#define S_DMXCHECKFIFO 9
+#define V_DMXCHECKFIFO(x) ((x) << S_DMXCHECKFIFO)
+#define F_DMXCHECKFIFO V_DMXCHECKFIFO(1U)
+
+#define S_DMXWINFIFO 8
+#define V_DMXWINFIFO(x) ((x) << S_DMXWINFIFO)
+#define F_DMXWINFIFO V_DMXWINFIFO(1U)
+
+#define S_EGTOKENFIFO 7
+#define V_EGTOKENFIFO(x) ((x) << S_EGTOKENFIFO)
+#define F_EGTOKENFIFO V_EGTOKENFIFO(1U)
+
+#define S_EGDATAFIFO 6
+#define V_EGDATAFIFO(x) ((x) << S_EGDATAFIFO)
+#define F_EGDATAFIFO V_EGDATAFIFO(1U)
+
+#define S_UTX2TPCINTF3 5
+#define V_UTX2TPCINTF3(x) ((x) << S_UTX2TPCINTF3)
+#define F_UTX2TPCINTF3 V_UTX2TPCINTF3(1U)
+
+#define S_UTX2TPCINTF2 4
+#define V_UTX2TPCINTF2(x) ((x) << S_UTX2TPCINTF2)
+#define F_UTX2TPCINTF2 V_UTX2TPCINTF2(1U)
+
+#define S_UTX2TPCINTF1 3
+#define V_UTX2TPCINTF1(x) ((x) << S_UTX2TPCINTF1)
+#define F_UTX2TPCINTF1 V_UTX2TPCINTF1(1U)
+
+#define S_UTX2TPCINTF0 2
+#define V_UTX2TPCINTF0(x) ((x) << S_UTX2TPCINTF0)
+#define F_UTX2TPCINTF0 V_UTX2TPCINTF0(1U)
+
+#define S_LBKTOKENFIFO 1
+#define V_LBKTOKENFIFO(x) ((x) << S_LBKTOKENFIFO)
+#define F_LBKTOKENFIFO V_LBKTOKENFIFO(1U)
+
+#define S_LBKDATAFIFO 0
+#define V_LBKDATAFIFO(x) ((x) << S_LBKDATAFIFO)
+#define F_LBKDATAFIFO V_LBKDATAFIFO(1U)
+
+#define A_TP_C_PERR_CAUSE 0x7f34
+#define A_TP_E_EG_PERR_ENABLE 0x7f38
+
+#define S_MPSLPBKTOKENFIFO 25
+#define V_MPSLPBKTOKENFIFO(x) ((x) << S_MPSLPBKTOKENFIFO)
+#define F_MPSLPBKTOKENFIFO V_MPSLPBKTOKENFIFO(1U)
+
+#define S_MPSMACTOKENFIFO 24
+#define V_MPSMACTOKENFIFO(x) ((x) << S_MPSMACTOKENFIFO)
+#define F_MPSMACTOKENFIFO V_MPSMACTOKENFIFO(1U)
+
+#define S_DISPIPSECFIFO3 23
+#define V_DISPIPSECFIFO3(x) ((x) << S_DISPIPSECFIFO3)
+#define F_DISPIPSECFIFO3 V_DISPIPSECFIFO3(1U)
+
+#define S_DISPTCPFIFO3 22
+#define V_DISPTCPFIFO3(x) ((x) << S_DISPTCPFIFO3)
+#define F_DISPTCPFIFO3 V_DISPTCPFIFO3(1U)
+
+#define S_DISPIPFIFO3 21
+#define V_DISPIPFIFO3(x) ((x) << S_DISPIPFIFO3)
+#define F_DISPIPFIFO3 V_DISPIPFIFO3(1U)
+
+#define S_DISPETHFIFO3 20
+#define V_DISPETHFIFO3(x) ((x) << S_DISPETHFIFO3)
+#define F_DISPETHFIFO3 V_DISPETHFIFO3(1U)
+
+#define S_DISPGREFIFO3 19
+#define V_DISPGREFIFO3(x) ((x) << S_DISPGREFIFO3)
+#define F_DISPGREFIFO3 V_DISPGREFIFO3(1U)
+
+#define S_DISPCPL5FIFO3 18
+#define V_DISPCPL5FIFO3(x) ((x) << S_DISPCPL5FIFO3)
+#define F_DISPCPL5FIFO3 V_DISPCPL5FIFO3(1U)
+
+#define S_DISPIPSECFIFO2 17
+#define V_DISPIPSECFIFO2(x) ((x) << S_DISPIPSECFIFO2)
+#define F_DISPIPSECFIFO2 V_DISPIPSECFIFO2(1U)
+
+#define S_DISPTCPFIFO2 16
+#define V_DISPTCPFIFO2(x) ((x) << S_DISPTCPFIFO2)
+#define F_DISPTCPFIFO2 V_DISPTCPFIFO2(1U)
+
+#define S_DISPIPFIFO2 15
+#define V_DISPIPFIFO2(x) ((x) << S_DISPIPFIFO2)
+#define F_DISPIPFIFO2 V_DISPIPFIFO2(1U)
+
+#define S_DISPETHFIFO2 14
+#define V_DISPETHFIFO2(x) ((x) << S_DISPETHFIFO2)
+#define F_DISPETHFIFO2 V_DISPETHFIFO2(1U)
+
+#define S_DISPGREFIFO2 13
+#define V_DISPGREFIFO2(x) ((x) << S_DISPGREFIFO2)
+#define F_DISPGREFIFO2 V_DISPGREFIFO2(1U)
+
+#define S_DISPCPL5FIFO2 12
+#define V_DISPCPL5FIFO2(x) ((x) << S_DISPCPL5FIFO2)
+#define F_DISPCPL5FIFO2 V_DISPCPL5FIFO2(1U)
+
+#define S_DISPIPSECFIFO1 11
+#define V_DISPIPSECFIFO1(x) ((x) << S_DISPIPSECFIFO1)
+#define F_DISPIPSECFIFO1 V_DISPIPSECFIFO1(1U)
+
+#define S_DISPTCPFIFO1 10
+#define V_DISPTCPFIFO1(x) ((x) << S_DISPTCPFIFO1)
+#define F_DISPTCPFIFO1 V_DISPTCPFIFO1(1U)
+
+#define S_DISPIPFIFO1 9
+#define V_DISPIPFIFO1(x) ((x) << S_DISPIPFIFO1)
+#define F_DISPIPFIFO1 V_DISPIPFIFO1(1U)
+
+#define S_DISPETHFIFO1 8
+#define V_DISPETHFIFO1(x) ((x) << S_DISPETHFIFO1)
+#define F_DISPETHFIFO1 V_DISPETHFIFO1(1U)
+
+#define S_DISPGREFIFO1 7
+#define V_DISPGREFIFO1(x) ((x) << S_DISPGREFIFO1)
+#define F_DISPGREFIFO1 V_DISPGREFIFO1(1U)
+
+#define S_DISPCPL5FIFO1 6
+#define V_DISPCPL5FIFO1(x) ((x) << S_DISPCPL5FIFO1)
+#define F_DISPCPL5FIFO1 V_DISPCPL5FIFO1(1U)
+
+#define S_DISPIPSECFIFO0 5
+#define V_DISPIPSECFIFO0(x) ((x) << S_DISPIPSECFIFO0)
+#define F_DISPIPSECFIFO0 V_DISPIPSECFIFO0(1U)
+
+#define S_DISPTCPFIFO0 4
+#define V_DISPTCPFIFO0(x) ((x) << S_DISPTCPFIFO0)
+#define F_DISPTCPFIFO0 V_DISPTCPFIFO0(1U)
+
+#define S_DISPIPFIFO0 3
+#define V_DISPIPFIFO0(x) ((x) << S_DISPIPFIFO0)
+#define F_DISPIPFIFO0 V_DISPIPFIFO0(1U)
+
+#define S_DISPETHFIFO0 2
+#define V_DISPETHFIFO0(x) ((x) << S_DISPETHFIFO0)
+#define F_DISPETHFIFO0 V_DISPETHFIFO0(1U)
+
+#define S_DISPGREFIFO0 1
+#define V_DISPGREFIFO0(x) ((x) << S_DISPGREFIFO0)
+#define F_DISPGREFIFO0 V_DISPGREFIFO0(1U)
+
+#define S_DISPCPL5FIFO0 0
+#define V_DISPCPL5FIFO0(x) ((x) << S_DISPCPL5FIFO0)
+#define F_DISPCPL5FIFO0 V_DISPCPL5FIFO0(1U)
+
+#define A_TP_E_EG_PERR_CAUSE 0x7f3c
+#define A_TP_E_IN0_PERR_ENABLE 0x7f40
+
+#define S_DMXISSFIFO 30
+#define V_DMXISSFIFO(x) ((x) << S_DMXISSFIFO)
+#define F_DMXISSFIFO V_DMXISSFIFO(1U)
+
+#define S_DMXERRFIFO 29
+#define V_DMXERRFIFO(x) ((x) << S_DMXERRFIFO)
+#define F_DMXERRFIFO V_DMXERRFIFO(1U)
+
+#define S_DMXATTFIFO 28
+#define V_DMXATTFIFO(x) ((x) << S_DMXATTFIFO)
+#define F_DMXATTFIFO V_DMXATTFIFO(1U)
+
+#define S_DMXTCPFIFO 27
+#define V_DMXTCPFIFO(x) ((x) << S_DMXTCPFIFO)
+#define F_DMXTCPFIFO V_DMXTCPFIFO(1U)
+
+#define S_DMXMPAFIFO 26
+#define V_DMXMPAFIFO(x) ((x) << S_DMXMPAFIFO)
+#define F_DMXMPAFIFO V_DMXMPAFIFO(1U)
+
+#define S_DMXOPTFIFO 25
+#define V_DMXOPTFIFO(x) ((x) << S_DMXOPTFIFO)
+#define F_DMXOPTFIFO V_DMXOPTFIFO(1U)
+
+#define S_INGTOKENFIFO 24
+#define V_INGTOKENFIFO(x) ((x) << S_INGTOKENFIFO)
+#define F_INGTOKENFIFO V_INGTOKENFIFO(1U)
+
+#define S_DMXPLDCHKOVFL1 21
+#define V_DMXPLDCHKOVFL1(x) ((x) << S_DMXPLDCHKOVFL1)
+#define F_DMXPLDCHKOVFL1 V_DMXPLDCHKOVFL1(1U)
+
+#define S_DMXPLDCHKFIFO1 20
+#define V_DMXPLDCHKFIFO1(x) ((x) << S_DMXPLDCHKFIFO1)
+#define F_DMXPLDCHKFIFO1 V_DMXPLDCHKFIFO1(1U)
+
+#define S_DMXOPTFIFO1 19
+#define V_DMXOPTFIFO1(x) ((x) << S_DMXOPTFIFO1)
+#define F_DMXOPTFIFO1 V_DMXOPTFIFO1(1U)
+
+#define S_DMXMPAFIFO1 18
+#define V_DMXMPAFIFO1(x) ((x) << S_DMXMPAFIFO1)
+#define F_DMXMPAFIFO1 V_DMXMPAFIFO1(1U)
+
+#define S_DMXDBFIFO1 17
+#define V_DMXDBFIFO1(x) ((x) << S_DMXDBFIFO1)
+#define F_DMXDBFIFO1 V_DMXDBFIFO1(1U)
+
+#define S_DMXATTFIFO1 16
+#define V_DMXATTFIFO1(x) ((x) << S_DMXATTFIFO1)
+#define F_DMXATTFIFO1 V_DMXATTFIFO1(1U)
+
+#define S_DMXISSFIFO1 15
+#define V_DMXISSFIFO1(x) ((x) << S_DMXISSFIFO1)
+#define F_DMXISSFIFO1 V_DMXISSFIFO1(1U)
+
+#define S_DMXTCPFIFO1 14
+#define V_DMXTCPFIFO1(x) ((x) << S_DMXTCPFIFO1)
+#define F_DMXTCPFIFO1 V_DMXTCPFIFO1(1U)
+
+#define S_DMXERRFIFO1 13
+#define V_DMXERRFIFO1(x) ((x) << S_DMXERRFIFO1)
+#define F_DMXERRFIFO1 V_DMXERRFIFO1(1U)
+
+#define S_MPS2TPINTF1 12
+#define V_MPS2TPINTF1(x) ((x) << S_MPS2TPINTF1)
+#define F_MPS2TPINTF1 V_MPS2TPINTF1(1U)
+
+#define S_DMXPLDCHKOVFL0 9
+#define V_DMXPLDCHKOVFL0(x) ((x) << S_DMXPLDCHKOVFL0)
+#define F_DMXPLDCHKOVFL0 V_DMXPLDCHKOVFL0(1U)
+
+#define S_DMXPLDCHKFIFO0 8
+#define V_DMXPLDCHKFIFO0(x) ((x) << S_DMXPLDCHKFIFO0)
+#define F_DMXPLDCHKFIFO0 V_DMXPLDCHKFIFO0(1U)
+
+#define S_DMXOPTFIFO0 7
+#define V_DMXOPTFIFO0(x) ((x) << S_DMXOPTFIFO0)
+#define F_DMXOPTFIFO0 V_DMXOPTFIFO0(1U)
+
+#define S_DMXMPAFIFO0 6
+#define V_DMXMPAFIFO0(x) ((x) << S_DMXMPAFIFO0)
+#define F_DMXMPAFIFO0 V_DMXMPAFIFO0(1U)
+
+#define S_DMXDBFIFO0 5
+#define V_DMXDBFIFO0(x) ((x) << S_DMXDBFIFO0)
+#define F_DMXDBFIFO0 V_DMXDBFIFO0(1U)
+
+#define S_DMXATTFIFO0 4
+#define V_DMXATTFIFO0(x) ((x) << S_DMXATTFIFO0)
+#define F_DMXATTFIFO0 V_DMXATTFIFO0(1U)
+
+#define S_DMXISSFIFO0 3
+#define V_DMXISSFIFO0(x) ((x) << S_DMXISSFIFO0)
+#define F_DMXISSFIFO0 V_DMXISSFIFO0(1U)
+
+#define S_DMXTCPFIFO0 2
+#define V_DMXTCPFIFO0(x) ((x) << S_DMXTCPFIFO0)
+#define F_DMXTCPFIFO0 V_DMXTCPFIFO0(1U)
+
+#define S_DMXERRFIFO0 1
+#define V_DMXERRFIFO0(x) ((x) << S_DMXERRFIFO0)
+#define F_DMXERRFIFO0 V_DMXERRFIFO0(1U)
+
+#define S_MPS2TPINTF0 0
+#define V_MPS2TPINTF0(x) ((x) << S_MPS2TPINTF0)
+#define F_MPS2TPINTF0 V_MPS2TPINTF0(1U)
+
+#define A_TP_E_IN0_PERR_CAUSE 0x7f44
+#define A_TP_E_IN1_PERR_ENABLE 0x7f48
+
+#define S_DMXPLDCHKOVFL3 21
+#define V_DMXPLDCHKOVFL3(x) ((x) << S_DMXPLDCHKOVFL3)
+#define F_DMXPLDCHKOVFL3 V_DMXPLDCHKOVFL3(1U)
+
+#define S_DMXPLDCHKFIFO3 20
+#define V_DMXPLDCHKFIFO3(x) ((x) << S_DMXPLDCHKFIFO3)
+#define F_DMXPLDCHKFIFO3 V_DMXPLDCHKFIFO3(1U)
+
+#define S_DMXOPTFIFO3 19
+#define V_DMXOPTFIFO3(x) ((x) << S_DMXOPTFIFO3)
+#define F_DMXOPTFIFO3 V_DMXOPTFIFO3(1U)
+
+#define S_DMXMPAFIFO3 18
+#define V_DMXMPAFIFO3(x) ((x) << S_DMXMPAFIFO3)
+#define F_DMXMPAFIFO3 V_DMXMPAFIFO3(1U)
+
+#define S_DMXDBFIFO3 17
+#define V_DMXDBFIFO3(x) ((x) << S_DMXDBFIFO3)
+#define F_DMXDBFIFO3 V_DMXDBFIFO3(1U)
+
+#define S_DMXATTFIFO3 16
+#define V_DMXATTFIFO3(x) ((x) << S_DMXATTFIFO3)
+#define F_DMXATTFIFO3 V_DMXATTFIFO3(1U)
+
+#define S_DMXISSFIFO3 15
+#define V_DMXISSFIFO3(x) ((x) << S_DMXISSFIFO3)
+#define F_DMXISSFIFO3 V_DMXISSFIFO3(1U)
+
+#define S_DMXTCPFIFO3 14
+#define V_DMXTCPFIFO3(x) ((x) << S_DMXTCPFIFO3)
+#define F_DMXTCPFIFO3 V_DMXTCPFIFO3(1U)
+
+#define S_DMXERRFIFO3 13
+#define V_DMXERRFIFO3(x) ((x) << S_DMXERRFIFO3)
+#define F_DMXERRFIFO3 V_DMXERRFIFO3(1U)
+
+#define S_MPS2TPINTF3 12
+#define V_MPS2TPINTF3(x) ((x) << S_MPS2TPINTF3)
+#define F_MPS2TPINTF3 V_MPS2TPINTF3(1U)
+
+#define S_DMXPLDCHKOVFL2 9
+#define V_DMXPLDCHKOVFL2(x) ((x) << S_DMXPLDCHKOVFL2)
+#define F_DMXPLDCHKOVFL2 V_DMXPLDCHKOVFL2(1U)
+
+#define S_DMXPLDCHKFIFO2 8
+#define V_DMXPLDCHKFIFO2(x) ((x) << S_DMXPLDCHKFIFO2)
+#define F_DMXPLDCHKFIFO2 V_DMXPLDCHKFIFO2(1U)
+
+#define S_DMXOPTFIFO2 7
+#define V_DMXOPTFIFO2(x) ((x) << S_DMXOPTFIFO2)
+#define F_DMXOPTFIFO2 V_DMXOPTFIFO2(1U)
+
+#define S_DMXMPAFIFO2 6
+#define V_DMXMPAFIFO2(x) ((x) << S_DMXMPAFIFO2)
+#define F_DMXMPAFIFO2 V_DMXMPAFIFO2(1U)
+
+#define S_DMXDBFIFO2 5
+#define V_DMXDBFIFO2(x) ((x) << S_DMXDBFIFO2)
+#define F_DMXDBFIFO2 V_DMXDBFIFO2(1U)
+
+#define S_DMXATTFIFO2 4
+#define V_DMXATTFIFO2(x) ((x) << S_DMXATTFIFO2)
+#define F_DMXATTFIFO2 V_DMXATTFIFO2(1U)
+
+#define S_DMXISSFIFO2 3
+#define V_DMXISSFIFO2(x) ((x) << S_DMXISSFIFO2)
+#define F_DMXISSFIFO2 V_DMXISSFIFO2(1U)
+
+#define S_DMXTCPFIFO2 2
+#define V_DMXTCPFIFO2(x) ((x) << S_DMXTCPFIFO2)
+#define F_DMXTCPFIFO2 V_DMXTCPFIFO2(1U)
+
+#define S_DMXERRFIFO2 1
+#define V_DMXERRFIFO2(x) ((x) << S_DMXERRFIFO2)
+#define F_DMXERRFIFO2 V_DMXERRFIFO2(1U)
+
+#define S_MPS2TPINTF2 0
+#define V_MPS2TPINTF2(x) ((x) << S_MPS2TPINTF2)
+#define F_MPS2TPINTF2 V_MPS2TPINTF2(1U)
+
+#define A_TP_E_IN1_PERR_CAUSE 0x7f4c
+#define A_TP_O_PERR_ENABLE 0x7f50
+
+#define S_DMARBTPERR 31
+#define V_DMARBTPERR(x) ((x) << S_DMARBTPERR)
+#define F_DMARBTPERR V_DMARBTPERR(1U)
+
+#define S_MMGRCACHEDATASRAM 24
+#define V_MMGRCACHEDATASRAM(x) ((x) << S_MMGRCACHEDATASRAM)
+#define F_MMGRCACHEDATASRAM V_MMGRCACHEDATASRAM(1U)
+
+#define S_MMGRCACHETAGFIFO 23
+#define V_MMGRCACHETAGFIFO(x) ((x) << S_MMGRCACHETAGFIFO)
+#define F_MMGRCACHETAGFIFO V_MMGRCACHETAGFIFO(1U)
+
+#define S_TPPROTOSRAM 16
+#define V_TPPROTOSRAM(x) ((x) << S_TPPROTOSRAM)
+#define F_TPPROTOSRAM V_TPPROTOSRAM(1U)
+
+#define S_HSPSRAM 15
+#define V_HSPSRAM(x) ((x) << S_HSPSRAM)
+#define F_HSPSRAM V_HSPSRAM(1U)
+
+#define S_RATEGRPSRAM 14
+#define V_RATEGRPSRAM(x) ((x) << S_RATEGRPSRAM)
+#define F_RATEGRPSRAM V_RATEGRPSRAM(1U)
+
+#define S_TXFBSEQFIFO 13
+#define V_TXFBSEQFIFO(x) ((x) << S_TXFBSEQFIFO)
+#define F_TXFBSEQFIFO V_TXFBSEQFIFO(1U)
+
+#define S_CMDATASRAM 12
+#define V_CMDATASRAM(x) ((x) << S_CMDATASRAM)
+#define F_CMDATASRAM V_CMDATASRAM(1U)
+
+#define S_CMTAGFIFO 11
+#define V_CMTAGFIFO(x) ((x) << S_CMTAGFIFO)
+#define F_CMTAGFIFO V_CMTAGFIFO(1U)
+
+#define S_RFCOPFIFO 10
+#define V_RFCOPFIFO(x) ((x) << S_RFCOPFIFO)
+#define F_RFCOPFIFO V_RFCOPFIFO(1U)
+
+#define S_DELINVFIFO 9
+#define V_DELINVFIFO(x) ((x) << S_DELINVFIFO)
+#define F_DELINVFIFO V_DELINVFIFO(1U)
+
+#define S_RSSCFGSRAM 8
+#define V_RSSCFGSRAM(x) ((x) << S_RSSCFGSRAM)
+#define F_RSSCFGSRAM V_RSSCFGSRAM(1U)
+
+#define S_RSSKEYSRAM 7
+#define V_RSSKEYSRAM(x) ((x) << S_RSSKEYSRAM)
+#define F_RSSKEYSRAM V_RSSKEYSRAM(1U)
+
+#define S_RSSLKPSRAM 6
+#define V_RSSLKPSRAM(x) ((x) << S_RSSLKPSRAM)
+#define F_RSSLKPSRAM V_RSSLKPSRAM(1U)
+
+#define S_SRQSRAM 5
+#define V_SRQSRAM(x) ((x) << S_SRQSRAM)
+#define F_SRQSRAM V_SRQSRAM(1U)
+
+#define S_ARPDASRAM 4
+#define V_ARPDASRAM(x) ((x) << S_ARPDASRAM)
+#define F_ARPDASRAM V_ARPDASRAM(1U)
+
+#define S_ARPSASRAM 3
+#define V_ARPSASRAM(x) ((x) << S_ARPSASRAM)
+#define F_ARPSASRAM V_ARPSASRAM(1U)
+
+#define S_ARPGRESRAM 2
+#define V_ARPGRESRAM(x) ((x) << S_ARPGRESRAM)
+#define F_ARPGRESRAM V_ARPGRESRAM(1U)
+
+#define S_ARPIPSECSRAM1 1
+#define V_ARPIPSECSRAM1(x) ((x) << S_ARPIPSECSRAM1)
+#define F_ARPIPSECSRAM1 V_ARPIPSECSRAM1(1U)
+
+#define S_ARPIPSECSRAM0 0
+#define V_ARPIPSECSRAM0(x) ((x) << S_ARPIPSECSRAM0)
+#define F_ARPIPSECSRAM0 V_ARPIPSECSRAM0(1U)
+
+#define A_TP_O_PERR_CAUSE 0x7f54
+#define A_TP_CERR_ENABLE 0x7f58
+
+#define S_TPCEGDATAFIFO 8
+#define V_TPCEGDATAFIFO(x) ((x) << S_TPCEGDATAFIFO)
+#define F_TPCEGDATAFIFO V_TPCEGDATAFIFO(1U)
+
+#define S_TPCLBKDATAFIFO 7
+#define V_TPCLBKDATAFIFO(x) ((x) << S_TPCLBKDATAFIFO)
+#define F_TPCLBKDATAFIFO V_TPCLBKDATAFIFO(1U)
+
+#define A_TP_CERR_CAUSE 0x7f5c
#define A_TP_TX_MOD_Q7_Q6_TIMER_SEPARATOR 0x0
#define S_TXTIMERSEPQ7 16
@@ -24520,6 +30781,137 @@
#define A_TP_TX_MOD_C3_C2_RATE_LIMIT 0xa
#define A_TP_TX_MOD_C1_C0_RATE_LIMIT 0xb
+#define A_TP_RX_MOD_Q3_Q2_TIMER_SEPARATOR 0xc
+
+#define S_RXTIMERSEPQ3 16
+#define M_RXTIMERSEPQ3 0xffffU
+#define V_RXTIMERSEPQ3(x) ((x) << S_RXTIMERSEPQ3)
+#define G_RXTIMERSEPQ3(x) (((x) >> S_RXTIMERSEPQ3) & M_RXTIMERSEPQ3)
+
+#define S_RXTIMERSEPQ2 0
+#define M_RXTIMERSEPQ2 0xffffU
+#define V_RXTIMERSEPQ2(x) ((x) << S_RXTIMERSEPQ2)
+#define G_RXTIMERSEPQ2(x) (((x) >> S_RXTIMERSEPQ2) & M_RXTIMERSEPQ2)
+
+#define A_TP_RX_MOD_Q3_Q2_RATE_LIMIT 0xd
+
+#define S_RXRATEINCQ3 24
+#define M_RXRATEINCQ3 0xffU
+#define V_RXRATEINCQ3(x) ((x) << S_RXRATEINCQ3)
+#define G_RXRATEINCQ3(x) (((x) >> S_RXRATEINCQ3) & M_RXRATEINCQ3)
+
+#define S_RXRATETCKQ3 16
+#define M_RXRATETCKQ3 0xffU
+#define V_RXRATETCKQ3(x) ((x) << S_RXRATETCKQ3)
+#define G_RXRATETCKQ3(x) (((x) >> S_RXRATETCKQ3) & M_RXRATETCKQ3)
+
+#define S_RXRATEINCQ2 8
+#define M_RXRATEINCQ2 0xffU
+#define V_RXRATEINCQ2(x) ((x) << S_RXRATEINCQ2)
+#define G_RXRATEINCQ2(x) (((x) >> S_RXRATEINCQ2) & M_RXRATEINCQ2)
+
+#define S_RXRATETCKQ2 0
+#define M_RXRATETCKQ2 0xffU
+#define V_RXRATETCKQ2(x) ((x) << S_RXRATETCKQ2)
+#define G_RXRATETCKQ2(x) (((x) >> S_RXRATETCKQ2) & M_RXRATETCKQ2)
+
+#define A_TP_RX_LPBK_CONG 0x1c
+#define A_TP_RX_SCHED_MOD 0x1d
+
+#define S_T7_ENABLELPBKFULL1 28
+#define M_T7_ENABLELPBKFULL1 0xfU
+#define V_T7_ENABLELPBKFULL1(x) ((x) << S_T7_ENABLELPBKFULL1)
+#define G_T7_ENABLELPBKFULL1(x) (((x) >> S_T7_ENABLELPBKFULL1) & M_T7_ENABLELPBKFULL1)
+
+#define S_T7_ENABLEFIFOFULL1 24
+#define M_T7_ENABLEFIFOFULL1 0xfU
+#define V_T7_ENABLEFIFOFULL1(x) ((x) << S_T7_ENABLEFIFOFULL1)
+#define G_T7_ENABLEFIFOFULL1(x) (((x) >> S_T7_ENABLEFIFOFULL1) & M_T7_ENABLEFIFOFULL1)
+
+#define S_T7_ENABLEPCMDFULL1 20
+#define M_T7_ENABLEPCMDFULL1 0xfU
+#define V_T7_ENABLEPCMDFULL1(x) ((x) << S_T7_ENABLEPCMDFULL1)
+#define G_T7_ENABLEPCMDFULL1(x) (((x) >> S_T7_ENABLEPCMDFULL1) & M_T7_ENABLEPCMDFULL1)
+
+#define S_T7_ENABLEHDRFULL1 16
+#define M_T7_ENABLEHDRFULL1 0xfU
+#define V_T7_ENABLEHDRFULL1(x) ((x) << S_T7_ENABLEHDRFULL1)
+#define G_T7_ENABLEHDRFULL1(x) (((x) >> S_T7_ENABLEHDRFULL1) & M_T7_ENABLEHDRFULL1)
+
+#define S_T7_ENABLELPBKFULL0 12
+#define M_T7_ENABLELPBKFULL0 0xfU
+#define V_T7_ENABLELPBKFULL0(x) ((x) << S_T7_ENABLELPBKFULL0)
+#define G_T7_ENABLELPBKFULL0(x) (((x) >> S_T7_ENABLELPBKFULL0) & M_T7_ENABLELPBKFULL0)
+
+#define S_T7_ENABLEFIFOFULL0 8
+#define M_T7_ENABLEFIFOFULL0 0xfU
+#define V_T7_ENABLEFIFOFULL0(x) ((x) << S_T7_ENABLEFIFOFULL0)
+#define G_T7_ENABLEFIFOFULL0(x) (((x) >> S_T7_ENABLEFIFOFULL0) & M_T7_ENABLEFIFOFULL0)
+
+#define S_T7_ENABLEPCMDFULL0 4
+#define M_T7_ENABLEPCMDFULL0 0xfU
+#define V_T7_ENABLEPCMDFULL0(x) ((x) << S_T7_ENABLEPCMDFULL0)
+#define G_T7_ENABLEPCMDFULL0(x) (((x) >> S_T7_ENABLEPCMDFULL0) & M_T7_ENABLEPCMDFULL0)
+
+#define S_T7_ENABLEHDRFULL0 0
+#define M_T7_ENABLEHDRFULL0 0xfU
+#define V_T7_ENABLEHDRFULL0(x) ((x) << S_T7_ENABLEHDRFULL0)
+#define G_T7_ENABLEHDRFULL0(x) (((x) >> S_T7_ENABLEHDRFULL0) & M_T7_ENABLEHDRFULL0)
+
+#define A_TP_RX_SCHED_MOD_CH3_CH2 0x1e
+
+#define S_ENABLELPBKFULL3 28
+#define M_ENABLELPBKFULL3 0xfU
+#define V_ENABLELPBKFULL3(x) ((x) << S_ENABLELPBKFULL3)
+#define G_ENABLELPBKFULL3(x) (((x) >> S_ENABLELPBKFULL3) & M_ENABLELPBKFULL3)
+
+#define S_ENABLEFIFOFULL3 24
+#define M_ENABLEFIFOFULL3 0xfU
+#define V_ENABLEFIFOFULL3(x) ((x) << S_ENABLEFIFOFULL3)
+#define G_ENABLEFIFOFULL3(x) (((x) >> S_ENABLEFIFOFULL3) & M_ENABLEFIFOFULL3)
+
+#define S_ENABLEPCMDFULL3 20
+#define M_ENABLEPCMDFULL3 0xfU
+#define V_ENABLEPCMDFULL3(x) ((x) << S_ENABLEPCMDFULL3)
+#define G_ENABLEPCMDFULL3(x) (((x) >> S_ENABLEPCMDFULL3) & M_ENABLEPCMDFULL3)
+
+#define S_ENABLEHDRFULL3 16
+#define M_ENABLEHDRFULL3 0xfU
+#define V_ENABLEHDRFULL3(x) ((x) << S_ENABLEHDRFULL3)
+#define G_ENABLEHDRFULL3(x) (((x) >> S_ENABLEHDRFULL3) & M_ENABLEHDRFULL3)
+
+#define S_ENABLELPBKFULL2 12
+#define M_ENABLELPBKFULL2 0xfU
+#define V_ENABLELPBKFULL2(x) ((x) << S_ENABLELPBKFULL2)
+#define G_ENABLELPBKFULL2(x) (((x) >> S_ENABLELPBKFULL2) & M_ENABLELPBKFULL2)
+
+#define S_ENABLEFIFOFULL2 8
+#define M_ENABLEFIFOFULL2 0xfU
+#define V_ENABLEFIFOFULL2(x) ((x) << S_ENABLEFIFOFULL2)
+#define G_ENABLEFIFOFULL2(x) (((x) >> S_ENABLEFIFOFULL2) & M_ENABLEFIFOFULL2)
+
+#define S_ENABLEPCMDFULL2 4
+#define M_ENABLEPCMDFULL2 0xfU
+#define V_ENABLEPCMDFULL2(x) ((x) << S_ENABLEPCMDFULL2)
+#define G_ENABLEPCMDFULL2(x) (((x) >> S_ENABLEPCMDFULL2) & M_ENABLEPCMDFULL2)
+
+#define S_ENABLEHDRFULL2 0
+#define M_ENABLEHDRFULL2 0xfU
+#define V_ENABLEHDRFULL2(x) ((x) << S_ENABLEHDRFULL2)
+#define G_ENABLEHDRFULL2(x) (((x) >> S_ENABLEHDRFULL2) & M_ENABLEHDRFULL2)
+
+#define A_TP_RX_SCHED_MAP_CH3_CH2 0x1f
+
+#define S_T7_RXMAPCHANNEL3 16
+#define M_T7_RXMAPCHANNEL3 0xffffU
+#define V_T7_RXMAPCHANNEL3(x) ((x) << S_T7_RXMAPCHANNEL3)
+#define G_T7_RXMAPCHANNEL3(x) (((x) >> S_T7_RXMAPCHANNEL3) & M_T7_RXMAPCHANNEL3)
+
+#define S_T7_RXMAPCHANNEL2 0
+#define M_T7_RXMAPCHANNEL2 0xffffU
+#define V_T7_RXMAPCHANNEL2(x) ((x) << S_T7_RXMAPCHANNEL2)
+#define G_T7_RXMAPCHANNEL2(x) (((x) >> S_T7_RXMAPCHANNEL2) & M_T7_RXMAPCHANNEL2)
+
#define A_TP_RX_SCHED_MAP 0x20
#define S_RXMAPCHANNEL3 24
@@ -24542,6 +30934,16 @@
#define V_RXMAPCHANNEL0(x) ((x) << S_RXMAPCHANNEL0)
#define G_RXMAPCHANNEL0(x) (((x) >> S_RXMAPCHANNEL0) & M_RXMAPCHANNEL0)
+#define S_T7_RXMAPCHANNEL1 16
+#define M_T7_RXMAPCHANNEL1 0xffffU
+#define V_T7_RXMAPCHANNEL1(x) ((x) << S_T7_RXMAPCHANNEL1)
+#define G_T7_RXMAPCHANNEL1(x) (((x) >> S_T7_RXMAPCHANNEL1) & M_T7_RXMAPCHANNEL1)
+
+#define S_T7_RXMAPCHANNEL0 0
+#define M_T7_RXMAPCHANNEL0 0xffffU
+#define V_T7_RXMAPCHANNEL0(x) ((x) << S_T7_RXMAPCHANNEL0)
+#define G_T7_RXMAPCHANNEL0(x) (((x) >> S_T7_RXMAPCHANNEL0) & M_T7_RXMAPCHANNEL0)
+
#define A_TP_RX_SCHED_SGE 0x21
#define S_RXSGEMOD1 12
@@ -24570,6 +30972,16 @@
#define V_RXSGECHANNEL0(x) ((x) << S_RXSGECHANNEL0)
#define F_RXSGECHANNEL0 V_RXSGECHANNEL0(1U)
+#define S_RXSGEMOD3 20
+#define M_RXSGEMOD3 0xfU
+#define V_RXSGEMOD3(x) ((x) << S_RXSGEMOD3)
+#define G_RXSGEMOD3(x) (((x) >> S_RXSGEMOD3) & M_RXSGEMOD3)
+
+#define S_RXSGEMOD2 16
+#define M_RXSGEMOD2 0xfU
+#define V_RXSGEMOD2(x) ((x) << S_RXSGEMOD2)
+#define G_RXSGEMOD2(x) (((x) >> S_RXSGEMOD2) & M_RXSGEMOD2)
+
#define A_TP_TX_SCHED_MAP 0x22
#define S_TXMAPCHANNEL3 12
@@ -24600,6 +31012,14 @@
#define V_TXLPKCHANNEL0(x) ((x) << S_TXLPKCHANNEL0)
#define F_TXLPKCHANNEL0 V_TXLPKCHANNEL0(1U)
+#define S_TXLPKCHANNEL3 19
+#define V_TXLPKCHANNEL3(x) ((x) << S_TXLPKCHANNEL3)
+#define F_TXLPKCHANNEL3 V_TXLPKCHANNEL3(1U)
+
+#define S_TXLPKCHANNEL2 18
+#define V_TXLPKCHANNEL2(x) ((x) << S_TXLPKCHANNEL2)
+#define F_TXLPKCHANNEL2 V_TXLPKCHANNEL2(1U)
+
#define A_TP_TX_SCHED_HDR 0x23
#define S_TXMAPHDRCHANNEL7 28
@@ -24827,6 +31247,69 @@
#define V_RXMAPE2CCHANNEL0(x) ((x) << S_RXMAPE2CCHANNEL0)
#define F_RXMAPE2CCHANNEL0 V_RXMAPE2CCHANNEL0(1U)
+#define S_T7_LB_MODE 30
+#define M_T7_LB_MODE 0x3U
+#define V_T7_LB_MODE(x) ((x) << S_T7_LB_MODE)
+#define G_T7_LB_MODE(x) (((x) >> S_T7_LB_MODE) & M_T7_LB_MODE)
+
+#define S_ING_LB_MODE 28
+#define M_ING_LB_MODE 0x3U
+#define V_ING_LB_MODE(x) ((x) << S_ING_LB_MODE)
+#define G_ING_LB_MODE(x) (((x) >> S_ING_LB_MODE) & M_ING_LB_MODE)
+
+#define S_RXC_LB_MODE 26
+#define M_RXC_LB_MODE 0x3U
+#define V_RXC_LB_MODE(x) ((x) << S_RXC_LB_MODE)
+#define G_RXC_LB_MODE(x) (((x) >> S_RXC_LB_MODE) & M_RXC_LB_MODE)
+
+#define S_SINGLERXCHANNEL 25
+#define V_SINGLERXCHANNEL(x) ((x) << S_SINGLERXCHANNEL)
+#define F_SINGLERXCHANNEL V_SINGLERXCHANNEL(1U)
+
+#define S_RXCHANNELCHECK 24
+#define V_RXCHANNELCHECK(x) ((x) << S_RXCHANNELCHECK)
+#define F_RXCHANNELCHECK V_RXCHANNELCHECK(1U)
+
+#define S_T7_RXMAPC2CCHANNEL3 21
+#define M_T7_RXMAPC2CCHANNEL3 0x7U
+#define V_T7_RXMAPC2CCHANNEL3(x) ((x) << S_T7_RXMAPC2CCHANNEL3)
+#define G_T7_RXMAPC2CCHANNEL3(x) (((x) >> S_T7_RXMAPC2CCHANNEL3) & M_T7_RXMAPC2CCHANNEL3)
+
+#define S_T7_RXMAPC2CCHANNEL2 18
+#define M_T7_RXMAPC2CCHANNEL2 0x7U
+#define V_T7_RXMAPC2CCHANNEL2(x) ((x) << S_T7_RXMAPC2CCHANNEL2)
+#define G_T7_RXMAPC2CCHANNEL2(x) (((x) >> S_T7_RXMAPC2CCHANNEL2) & M_T7_RXMAPC2CCHANNEL2)
+
+#define S_T7_RXMAPC2CCHANNEL1 15
+#define M_T7_RXMAPC2CCHANNEL1 0x7U
+#define V_T7_RXMAPC2CCHANNEL1(x) ((x) << S_T7_RXMAPC2CCHANNEL1)
+#define G_T7_RXMAPC2CCHANNEL1(x) (((x) >> S_T7_RXMAPC2CCHANNEL1) & M_T7_RXMAPC2CCHANNEL1)
+
+#define S_T7_RXMAPC2CCHANNEL0 12
+#define M_T7_RXMAPC2CCHANNEL0 0x7U
+#define V_T7_RXMAPC2CCHANNEL0(x) ((x) << S_T7_RXMAPC2CCHANNEL0)
+#define G_T7_RXMAPC2CCHANNEL0(x) (((x) >> S_T7_RXMAPC2CCHANNEL0) & M_T7_RXMAPC2CCHANNEL0)
+
+#define S_T7_RXMAPE2CCHANNEL3 9
+#define M_T7_RXMAPE2CCHANNEL3 0x7U
+#define V_T7_RXMAPE2CCHANNEL3(x) ((x) << S_T7_RXMAPE2CCHANNEL3)
+#define G_T7_RXMAPE2CCHANNEL3(x) (((x) >> S_T7_RXMAPE2CCHANNEL3) & M_T7_RXMAPE2CCHANNEL3)
+
+#define S_T7_RXMAPE2CCHANNEL2 6
+#define M_T7_RXMAPE2CCHANNEL2 0x7U
+#define V_T7_RXMAPE2CCHANNEL2(x) ((x) << S_T7_RXMAPE2CCHANNEL2)
+#define G_T7_RXMAPE2CCHANNEL2(x) (((x) >> S_T7_RXMAPE2CCHANNEL2) & M_T7_RXMAPE2CCHANNEL2)
+
+#define S_T7_RXMAPE2CCHANNEL1 3
+#define M_T7_RXMAPE2CCHANNEL1 0x7U
+#define V_T7_RXMAPE2CCHANNEL1(x) ((x) << S_T7_RXMAPE2CCHANNEL1)
+#define G_T7_RXMAPE2CCHANNEL1(x) (((x) >> S_T7_RXMAPE2CCHANNEL1) & M_T7_RXMAPE2CCHANNEL1)
+
+#define S_T7_RXMAPE2CCHANNEL0 0
+#define M_T7_RXMAPE2CCHANNEL0 0x7U
+#define V_T7_RXMAPE2CCHANNEL0(x) ((x) << S_T7_RXMAPE2CCHANNEL0)
+#define G_T7_RXMAPE2CCHANNEL0(x) (((x) >> S_T7_RXMAPE2CCHANNEL0) & M_T7_RXMAPE2CCHANNEL0)
+
#define A_TP_RX_LPBK 0x28
#define A_TP_TX_LPBK 0x29
#define A_TP_TX_SCHED_PPP 0x2a
@@ -24873,6 +31356,55 @@
#define V_COMMITLIMIT0L(x) ((x) << S_COMMITLIMIT0L)
#define G_COMMITLIMIT0L(x) (((x) >> S_COMMITLIMIT0L) & M_COMMITLIMIT0L)
+#define A_TP_RX_SCHED_FIFO_CH3_CH2 0x2c
+
+#define S_COMMITLIMIT3H 24
+#define M_COMMITLIMIT3H 0xffU
+#define V_COMMITLIMIT3H(x) ((x) << S_COMMITLIMIT3H)
+#define G_COMMITLIMIT3H(x) (((x) >> S_COMMITLIMIT3H) & M_COMMITLIMIT3H)
+
+#define S_COMMITLIMIT3L 16
+#define M_COMMITLIMIT3L 0xffU
+#define V_COMMITLIMIT3L(x) ((x) << S_COMMITLIMIT3L)
+#define G_COMMITLIMIT3L(x) (((x) >> S_COMMITLIMIT3L) & M_COMMITLIMIT3L)
+
+#define S_COMMITLIMIT2H 8
+#define M_COMMITLIMIT2H 0xffU
+#define V_COMMITLIMIT2H(x) ((x) << S_COMMITLIMIT2H)
+#define G_COMMITLIMIT2H(x) (((x) >> S_COMMITLIMIT2H) & M_COMMITLIMIT2H)
+
+#define S_COMMITLIMIT2L 0
+#define M_COMMITLIMIT2L 0xffU
+#define V_COMMITLIMIT2L(x) ((x) << S_COMMITLIMIT2L)
+#define G_COMMITLIMIT2L(x) (((x) >> S_COMMITLIMIT2L) & M_COMMITLIMIT2L)
+
+#define A_TP_CHANNEL_MAP_LPBK 0x2d
+
+#define S_T7_RXMAPCHANNELELN 12
+#define M_T7_RXMAPCHANNELELN 0xfU
+#define V_T7_RXMAPCHANNELELN(x) ((x) << S_T7_RXMAPCHANNELELN)
+#define G_T7_RXMAPCHANNELELN(x) (((x) >> S_T7_RXMAPCHANNELELN) & M_T7_RXMAPCHANNELELN)
+
+#define S_T7_RXMAPE2LCHANNEL3 9
+#define M_T7_RXMAPE2LCHANNEL3 0x7U
+#define V_T7_RXMAPE2LCHANNEL3(x) ((x) << S_T7_RXMAPE2LCHANNEL3)
+#define G_T7_RXMAPE2LCHANNEL3(x) (((x) >> S_T7_RXMAPE2LCHANNEL3) & M_T7_RXMAPE2LCHANNEL3)
+
+#define S_T7_RXMAPE2LCHANNEL2 6
+#define M_T7_RXMAPE2LCHANNEL2 0x7U
+#define V_T7_RXMAPE2LCHANNEL2(x) ((x) << S_T7_RXMAPE2LCHANNEL2)
+#define G_T7_RXMAPE2LCHANNEL2(x) (((x) >> S_T7_RXMAPE2LCHANNEL2) & M_T7_RXMAPE2LCHANNEL2)
+
+#define S_T7_RXMAPE2LCHANNEL1 3
+#define M_T7_RXMAPE2LCHANNEL1 0x7U
+#define V_T7_RXMAPE2LCHANNEL1(x) ((x) << S_T7_RXMAPE2LCHANNEL1)
+#define G_T7_RXMAPE2LCHANNEL1(x) (((x) >> S_T7_RXMAPE2LCHANNEL1) & M_T7_RXMAPE2LCHANNEL1)
+
+#define S_T7_RXMAPE2LCHANNEL0 0
+#define M_T7_RXMAPE2LCHANNEL0 0x7U
+#define V_T7_RXMAPE2LCHANNEL0(x) ((x) << S_T7_RXMAPE2LCHANNEL0)
+#define G_T7_RXMAPE2LCHANNEL0(x) (((x) >> S_T7_RXMAPE2LCHANNEL0) & M_T7_RXMAPE2LCHANNEL0)
+
#define A_TP_IPMI_CFG1 0x2e
#define S_VLANENABLE 31
@@ -24966,47 +31498,12 @@
#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
#define A_TP_RSS_PF1_CONFIG 0x31
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF2_CONFIG 0x32
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF3_CONFIG 0x33
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF4_CONFIG 0x34
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF5_CONFIG 0x35
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF6_CONFIG 0x36
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF7_CONFIG 0x37
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF_MAP 0x38
#define S_LKPIDXSIZE 24
@@ -25097,6 +31594,22 @@
#define G_PF0MSKSIZE(x) (((x) >> S_PF0MSKSIZE) & M_PF0MSKSIZE)
#define A_TP_RSS_VFL_CONFIG 0x3a
+
+#define S_BASEQID 16
+#define M_BASEQID 0xfffU
+#define V_BASEQID(x) ((x) << S_BASEQID)
+#define G_BASEQID(x) (((x) >> S_BASEQID) & M_BASEQID)
+
+#define S_MAXRRQID 8
+#define M_MAXRRQID 0xffU
+#define V_MAXRRQID(x) ((x) << S_MAXRRQID)
+#define G_MAXRRQID(x) (((x) >> S_MAXRRQID) & M_MAXRRQID)
+
+#define S_RRCOUNTER 0
+#define M_RRCOUNTER 0xffU
+#define V_RRCOUNTER(x) ((x) << S_RRCOUNTER)
+#define G_RRCOUNTER(x) (((x) >> S_RRCOUNTER) & M_RRCOUNTER)
+
#define A_TP_RSS_VFH_CONFIG 0x3b
#define S_ENABLEUDPHASH 31
@@ -25150,6 +31663,10 @@
#define V_KEYINDEX(x) ((x) << S_KEYINDEX)
#define G_KEYINDEX(x) (((x) >> S_KEYINDEX) & M_KEYINDEX)
+#define S_ROUNDROBINEN 3
+#define V_ROUNDROBINEN(x) ((x) << S_ROUNDROBINEN)
+#define F_ROUNDROBINEN V_ROUNDROBINEN(1U)
+
#define A_TP_RSS_SECRET_KEY0 0x40
#define A_TP_RSS_SECRET_KEY1 0x41
#define A_TP_RSS_SECRET_KEY2 0x42
@@ -25283,6 +31800,36 @@
#define V_SHAREDXRC(x) ((x) << S_SHAREDXRC)
#define F_SHAREDXRC V_SHAREDXRC(1U)
+#define S_VERIFYRSPOP 25
+#define M_VERIFYRSPOP 0x1fU
+#define V_VERIFYRSPOP(x) ((x) << S_VERIFYRSPOP)
+#define G_VERIFYRSPOP(x) (((x) >> S_VERIFYRSPOP) & M_VERIFYRSPOP)
+
+#define S_VERIFYREQOP 20
+#define M_VERIFYREQOP 0x1fU
+#define V_VERIFYREQOP(x) ((x) << S_VERIFYREQOP)
+#define G_VERIFYREQOP(x) (((x) >> S_VERIFYREQOP) & M_VERIFYREQOP)
+
+#define S_AWRITERSPOP 15
+#define M_AWRITERSPOP 0x1fU
+#define V_AWRITERSPOP(x) ((x) << S_AWRITERSPOP)
+#define G_AWRITERSPOP(x) (((x) >> S_AWRITERSPOP) & M_AWRITERSPOP)
+
+#define S_AWRITEREQOP 10
+#define M_AWRITEREQOP 0x1fU
+#define V_AWRITEREQOP(x) ((x) << S_AWRITEREQOP)
+#define G_AWRITEREQOP(x) (((x) >> S_AWRITEREQOP) & M_AWRITEREQOP)
+
+#define S_FLUSHRSPOP 5
+#define M_FLUSHRSPOP 0x1fU
+#define V_FLUSHRSPOP(x) ((x) << S_FLUSHRSPOP)
+#define G_FLUSHRSPOP(x) (((x) >> S_FLUSHRSPOP) & M_FLUSHRSPOP)
+
+#define S_FLUSHREQOP 0
+#define M_FLUSHREQOP 0x1fU
+#define V_FLUSHREQOP(x) ((x) << S_FLUSHREQOP)
+#define G_FLUSHREQOP(x) (((x) >> S_FLUSHREQOP) & M_FLUSHREQOP)
+
#define A_TP_FRAG_CONFIG 0x56
#define S_TLSMODE 16
@@ -25330,6 +31877,21 @@
#define V_PASSMODE(x) ((x) << S_PASSMODE)
#define G_PASSMODE(x) (((x) >> S_PASSMODE) & M_PASSMODE)
+#define S_NVMTMODE 22
+#define M_NVMTMODE 0x3U
+#define V_NVMTMODE(x) ((x) << S_NVMTMODE)
+#define G_NVMTMODE(x) (((x) >> S_NVMTMODE) & M_NVMTMODE)
+
+#define S_ROCEMODE 20
+#define M_ROCEMODE 0x3U
+#define V_ROCEMODE(x) ((x) << S_ROCEMODE)
+#define G_ROCEMODE(x) (((x) >> S_ROCEMODE) & M_ROCEMODE)
+
+#define S_DTLSMODE 18
+#define M_DTLSMODE 0x3U
+#define V_DTLSMODE(x) ((x) << S_DTLSMODE)
+#define G_DTLSMODE(x) (((x) >> S_DTLSMODE) & M_DTLSMODE)
+
#define A_TP_CMM_CONFIG 0x57
#define S_WRCNTIDLE 16
@@ -25383,6 +31945,7 @@
#define V_GRETYPE(x) ((x) << S_GRETYPE)
#define G_GRETYPE(x) (((x) >> S_GRETYPE) & M_GRETYPE)
+#define A_TP_MMGR_CMM_CONFIG 0x5a
#define A_TP_DBG_CLEAR 0x60
#define A_TP_DBG_CORE_HDR0 0x61
@@ -25843,14 +32406,6 @@
#define V_T5_EPCMDBUSY(x) ((x) << S_T5_EPCMDBUSY)
#define F_T5_EPCMDBUSY V_T5_EPCMDBUSY(1U)
-#define S_T6_ETXBUSY 1
-#define V_T6_ETXBUSY(x) ((x) << S_T6_ETXBUSY)
-#define F_T6_ETXBUSY V_T6_ETXBUSY(1U)
-
-#define S_T6_EPCMDBUSY 0
-#define V_T6_EPCMDBUSY(x) ((x) << S_T6_EPCMDBUSY)
-#define F_T6_EPCMDBUSY V_T6_EPCMDBUSY(1U)
-
#define A_TP_DBG_ENG_RES1 0x67
#define S_RXCPLSRDY 31
@@ -26114,16 +32669,6 @@
#define V_T5_RXPCMDCNG(x) ((x) << S_T5_RXPCMDCNG)
#define G_T5_RXPCMDCNG(x) (((x) >> S_T5_RXPCMDCNG) & M_T5_RXPCMDCNG)
-#define S_T6_RXFIFOCNG 20
-#define M_T6_RXFIFOCNG 0xfU
-#define V_T6_RXFIFOCNG(x) ((x) << S_T6_RXFIFOCNG)
-#define G_T6_RXFIFOCNG(x) (((x) >> S_T6_RXFIFOCNG) & M_T6_RXFIFOCNG)
-
-#define S_T6_RXPCMDCNG 14
-#define M_T6_RXPCMDCNG 0x3U
-#define V_T6_RXPCMDCNG(x) ((x) << S_T6_RXPCMDCNG)
-#define G_T6_RXPCMDCNG(x) (((x) >> S_T6_RXPCMDCNG) & M_T6_RXPCMDCNG)
-
#define A_TP_DBG_ERROR_CNT 0x6c
#define A_TP_DBG_CORE_CPL 0x6d
@@ -26191,6 +32736,244 @@
#define A_TP_DBG_CACHE_RD_HIT 0x73
#define A_TP_DBG_CACHE_MC_REQ 0x74
#define A_TP_DBG_CACHE_MC_RSP 0x75
+#define A_TP_RSS_PF0_CONFIG_CH3_CH2 0x80
+
+#define S_PFMAPALWAYS 22
+#define V_PFMAPALWAYS(x) ((x) << S_PFMAPALWAYS)
+#define F_PFMAPALWAYS V_PFMAPALWAYS(1U)
+
+#define S_PFROUNDROBINEN 21
+#define V_PFROUNDROBINEN(x) ((x) << S_PFROUNDROBINEN)
+#define F_PFROUNDROBINEN V_PFROUNDROBINEN(1U)
+
+#define S_FOURCHNEN 20
+#define V_FOURCHNEN(x) ((x) << S_FOURCHNEN)
+#define F_FOURCHNEN V_FOURCHNEN(1U)
+
+#define S_CH3DEFAULTQUEUE 10
+#define M_CH3DEFAULTQUEUE 0x3ffU
+#define V_CH3DEFAULTQUEUE(x) ((x) << S_CH3DEFAULTQUEUE)
+#define G_CH3DEFAULTQUEUE(x) (((x) >> S_CH3DEFAULTQUEUE) & M_CH3DEFAULTQUEUE)
+
+#define S_CH2DEFAULTQUEUE 0
+#define M_CH2DEFAULTQUEUE 0x3ffU
+#define V_CH2DEFAULTQUEUE(x) ((x) << S_CH2DEFAULTQUEUE)
+#define G_CH2DEFAULTQUEUE(x) (((x) >> S_CH2DEFAULTQUEUE) & M_CH2DEFAULTQUEUE)
+
+#define A_TP_RSS_PF1_CONFIG_CH3_CH2 0x81
+#define A_TP_RSS_PF2_CONFIG_CH3_CH2 0x82
+#define A_TP_RSS_PF3_CONFIG_CH3_CH2 0x83
+#define A_TP_RSS_PF4_CONFIG_CH3_CH2 0x84
+#define A_TP_RSS_PF5_CONFIG_CH3_CH2 0x85
+#define A_TP_RSS_PF6_CONFIG_CH3_CH2 0x86
+#define A_TP_RSS_PF7_CONFIG_CH3_CH2 0x87
+#define A_TP_RSS_PF0_EXT_CONFIG 0x88
+#define A_TP_RSS_PF1_EXT_CONFIG 0x89
+#define A_TP_RSS_PF2_EXT_CONFIG 0x8a
+#define A_TP_RSS_PF3_EXT_CONFIG 0x8b
+#define A_TP_RSS_PF4_EXT_CONFIG 0x8c
+#define A_TP_RSS_PF5_EXT_CONFIG 0x8d
+#define A_TP_RSS_PF6_EXT_CONFIG 0x8e
+#define A_TP_RSS_PF7_EXT_CONFIG 0x8f
+#define A_TP_ROCE_CONFIG 0x90
+
+#define S_IGNAETHMSB 24
+#define V_IGNAETHMSB(x) ((x) << S_IGNAETHMSB)
+#define F_IGNAETHMSB V_IGNAETHMSB(1U)
+
+#define S_XDIDMMCTL 23
+#define V_XDIDMMCTL(x) ((x) << S_XDIDMMCTL)
+#define F_XDIDMMCTL V_XDIDMMCTL(1U)
+
+#define S_WRRETHDBGFWDEN 22
+#define V_WRRETHDBGFWDEN(x) ((x) << S_WRRETHDBGFWDEN)
+#define F_WRRETHDBGFWDEN V_WRRETHDBGFWDEN(1U)
+
+#define S_ACKINTGENCTRL 20
+#define M_ACKINTGENCTRL 0x3U
+#define V_ACKINTGENCTRL(x) ((x) << S_ACKINTGENCTRL)
+#define G_ACKINTGENCTRL(x) (((x) >> S_ACKINTGENCTRL) & M_ACKINTGENCTRL)
+
+#define S_ATOMICALIGNCHKEN 19
+#define V_ATOMICALIGNCHKEN(x) ((x) << S_ATOMICALIGNCHKEN)
+#define F_ATOMICALIGNCHKEN V_ATOMICALIGNCHKEN(1U)
+
+#define S_RDRETHLENCHKEN 18
+#define V_RDRETHLENCHKEN(x) ((x) << S_RDRETHLENCHKEN)
+#define F_RDRETHLENCHKEN V_RDRETHLENCHKEN(1U)
+
+#define S_WRTOTALLENCHKEN 17
+#define V_WRTOTALLENCHKEN(x) ((x) << S_WRTOTALLENCHKEN)
+#define F_WRTOTALLENCHKEN V_WRTOTALLENCHKEN(1U)
+
+#define S_WRRETHLENCHKEN 16
+#define V_WRRETHLENCHKEN(x) ((x) << S_WRRETHLENCHKEN)
+#define F_WRRETHLENCHKEN V_WRRETHLENCHKEN(1U)
+
+#define S_TNLERRORUDPLEN 11
+#define V_TNLERRORUDPLEN(x) ((x) << S_TNLERRORUDPLEN)
+#define F_TNLERRORUDPLEN V_TNLERRORUDPLEN(1U)
+
+#define S_TNLERRORPKEY 10
+#define V_TNLERRORPKEY(x) ((x) << S_TNLERRORPKEY)
+#define F_TNLERRORPKEY V_TNLERRORPKEY(1U)
+
+#define S_TNLERROROPCODE 9
+#define V_TNLERROROPCODE(x) ((x) << S_TNLERROROPCODE)
+#define F_TNLERROROPCODE V_TNLERROROPCODE(1U)
+
+#define S_TNLERRORTVER 8
+#define V_TNLERRORTVER(x) ((x) << S_TNLERRORTVER)
+#define F_TNLERRORTVER V_TNLERRORTVER(1U)
+
+#define S_DROPERRORUDPLEN 3
+#define V_DROPERRORUDPLEN(x) ((x) << S_DROPERRORUDPLEN)
+#define F_DROPERRORUDPLEN V_DROPERRORUDPLEN(1U)
+
+#define S_DROPERRORPKEY 2
+#define V_DROPERRORPKEY(x) ((x) << S_DROPERRORPKEY)
+#define F_DROPERRORPKEY V_DROPERRORPKEY(1U)
+
+#define S_DROPERROROPCODE 1
+#define V_DROPERROROPCODE(x) ((x) << S_DROPERROROPCODE)
+#define F_DROPERROROPCODE V_DROPERROROPCODE(1U)
+
+#define S_DROPERRORTVER 0
+#define V_DROPERRORTVER(x) ((x) << S_DROPERRORTVER)
+#define F_DROPERRORTVER V_DROPERRORTVER(1U)
+
+#define A_TP_NVMT_CONFIG 0x91
+
+#define S_PDACHKEN 2
+#define V_PDACHKEN(x) ((x) << S_PDACHKEN)
+#define F_PDACHKEN V_PDACHKEN(1U)
+
+#define S_FORCERQNONDDP 1
+#define V_FORCERQNONDDP(x) ((x) << S_FORCERQNONDDP)
+#define F_FORCERQNONDDP V_FORCERQNONDDP(1U)
+
+#define S_STRIPHCRC 0
+#define V_STRIPHCRC(x) ((x) << S_STRIPHCRC)
+#define F_STRIPHCRC V_STRIPHCRC(1U)
+
+#define A_TP_NVMT_MAXHDR 0x92
+
+#define S_MAXHDR3 24
+#define M_MAXHDR3 0xffU
+#define V_MAXHDR3(x) ((x) << S_MAXHDR3)
+#define G_MAXHDR3(x) (((x) >> S_MAXHDR3) & M_MAXHDR3)
+
+#define S_MAXHDR2 16
+#define M_MAXHDR2 0xffU
+#define V_MAXHDR2(x) ((x) << S_MAXHDR2)
+#define G_MAXHDR2(x) (((x) >> S_MAXHDR2) & M_MAXHDR2)
+
+#define S_MAXHDR1 8
+#define M_MAXHDR1 0xffU
+#define V_MAXHDR1(x) ((x) << S_MAXHDR1)
+#define G_MAXHDR1(x) (((x) >> S_MAXHDR1) & M_MAXHDR1)
+
+#define S_MAXHDR0 0
+#define M_MAXHDR0 0xffU
+#define V_MAXHDR0(x) ((x) << S_MAXHDR0)
+#define G_MAXHDR0(x) (((x) >> S_MAXHDR0) & M_MAXHDR0)
+
+#define A_TP_NVMT_PDORSVD 0x93
+
+#define S_PDORSVD3 24
+#define M_PDORSVD3 0xffU
+#define V_PDORSVD3(x) ((x) << S_PDORSVD3)
+#define G_PDORSVD3(x) (((x) >> S_PDORSVD3) & M_PDORSVD3)
+
+#define S_PDORSVD2 16
+#define M_PDORSVD2 0xffU
+#define V_PDORSVD2(x) ((x) << S_PDORSVD2)
+#define G_PDORSVD2(x) (((x) >> S_PDORSVD2) & M_PDORSVD2)
+
+#define S_PDORSVD1 8
+#define M_PDORSVD1 0xffU
+#define V_PDORSVD1(x) ((x) << S_PDORSVD1)
+#define G_PDORSVD1(x) (((x) >> S_PDORSVD1) & M_PDORSVD1)
+
+#define S_PDORSVD0 0
+#define M_PDORSVD0 0xffU
+#define V_PDORSVD0(x) ((x) << S_PDORSVD0)
+#define G_PDORSVD0(x) (((x) >> S_PDORSVD0) & M_PDORSVD0)
+
+#define A_TP_RDMA_CONFIG 0x94
+
+#define S_SRQLIMITEN 20
+#define V_SRQLIMITEN(x) ((x) << S_SRQLIMITEN)
+#define F_SRQLIMITEN V_SRQLIMITEN(1U)
+
+#define S_SNDIMMSEOP 15
+#define M_SNDIMMSEOP 0x1fU
+#define V_SNDIMMSEOP(x) ((x) << S_SNDIMMSEOP)
+#define G_SNDIMMSEOP(x) (((x) >> S_SNDIMMSEOP) & M_SNDIMMSEOP)
+
+#define S_SNDIMMOP 10
+#define M_SNDIMMOP 0x1fU
+#define V_SNDIMMOP(x) ((x) << S_SNDIMMOP)
+#define G_SNDIMMOP(x) (((x) >> S_SNDIMMOP) & M_SNDIMMOP)
+
+#define S_IWARPXRCIDCHKEN 4
+#define V_IWARPXRCIDCHKEN(x) ((x) << S_IWARPXRCIDCHKEN)
+#define F_IWARPXRCIDCHKEN V_IWARPXRCIDCHKEN(1U)
+
+#define S_IWARPEXTOPEN 3
+#define V_IWARPEXTOPEN(x) ((x) << S_IWARPEXTOPEN)
+#define F_IWARPEXTOPEN V_IWARPEXTOPEN(1U)
+
+#define S_XRCIMPLTYPE 1
+#define V_XRCIMPLTYPE(x) ((x) << S_XRCIMPLTYPE)
+#define F_XRCIMPLTYPE V_XRCIMPLTYPE(1U)
+
+#define S_XRCEN 0
+#define V_XRCEN(x) ((x) << S_XRCEN)
+#define F_XRCEN V_XRCEN(1U)
+
+#define A_TP_ROCE_RRQ_BASE 0x95
+#define A_TP_FILTER_RATE_CFG 0x96
+
+#define S_GRP_CFG_RD 30
+#define V_GRP_CFG_RD(x) ((x) << S_GRP_CFG_RD)
+#define F_GRP_CFG_RD V_GRP_CFG_RD(1U)
+
+#define S_GRP_CFG_INIT 29
+#define V_GRP_CFG_INIT(x) ((x) << S_GRP_CFG_INIT)
+#define F_GRP_CFG_INIT V_GRP_CFG_INIT(1U)
+
+#define S_GRP_CFG_RST 28
+#define V_GRP_CFG_RST(x) ((x) << S_GRP_CFG_RST)
+#define F_GRP_CFG_RST V_GRP_CFG_RST(1U)
+
+#define S_GRP_CFG_SEL 16
+#define M_GRP_CFG_SEL 0xfffU
+#define V_GRP_CFG_SEL(x) ((x) << S_GRP_CFG_SEL)
+#define G_GRP_CFG_SEL(x) (((x) >> S_GRP_CFG_SEL) & M_GRP_CFG_SEL)
+
+#define S_US_TIMER_TICK 0
+#define M_US_TIMER_TICK 0xffffU
+#define V_US_TIMER_TICK(x) ((x) << S_US_TIMER_TICK)
+#define G_US_TIMER_TICK(x) (((x) >> S_US_TIMER_TICK) & M_US_TIMER_TICK)
+
+#define A_TP_TLS_CONFIG 0x99
+
+#define S_QUIESCETYPE1 24
+#define M_QUIESCETYPE1 0xffU
+#define V_QUIESCETYPE1(x) ((x) << S_QUIESCETYPE1)
+#define G_QUIESCETYPE1(x) (((x) >> S_QUIESCETYPE1) & M_QUIESCETYPE1)
+
+#define S_QUIESCETYPE2 16
+#define M_QUIESCETYPE2 0xffU
+#define V_QUIESCETYPE2(x) ((x) << S_QUIESCETYPE2)
+#define G_QUIESCETYPE2(x) (((x) >> S_QUIESCETYPE2) & M_QUIESCETYPE2)
+
+#define S_QUIESCETYPE3 8
+#define M_QUIESCETYPE3 0xffU
+#define V_QUIESCETYPE3(x) ((x) << S_QUIESCETYPE3)
+#define G_QUIESCETYPE3(x) (((x) >> S_QUIESCETYPE3) & M_QUIESCETYPE3)
+
#define A_TP_T5_TX_DROP_CNT_CH0 0x120
#define A_TP_T5_TX_DROP_CNT_CH1 0x121
#define A_TP_TX_DROP_CNT_CH2 0x122
@@ -26682,10 +33465,6 @@
#define A_TP_DBG_ESIDE_DISP1 0x137
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE1 0
#define V_TXFULL_ESIDE1(x) ((x) << S_TXFULL_ESIDE1)
#define F_TXFULL_ESIDE1 V_TXFULL_ESIDE1(1U)
@@ -26719,20 +33498,12 @@
#define A_TP_DBG_ESIDE_DISP2 0x13a
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE2 0
#define V_TXFULL_ESIDE2(x) ((x) << S_TXFULL_ESIDE2)
#define F_TXFULL_ESIDE2 V_TXFULL_ESIDE2(1U)
#define A_TP_DBG_ESIDE_DISP3 0x13b
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE3 0
#define V_TXFULL_ESIDE3(x) ((x) << S_TXFULL_ESIDE3)
#define F_TXFULL_ESIDE3 V_TXFULL_ESIDE3(1U)
@@ -26836,6 +33607,94 @@
#define V_SRVRSRAM(x) ((x) << S_SRVRSRAM)
#define F_SRVRSRAM V_SRVRSRAM(1U)
+#define S_T7_FILTERMODE 31
+#define V_T7_FILTERMODE(x) ((x) << S_T7_FILTERMODE)
+#define F_T7_FILTERMODE V_T7_FILTERMODE(1U)
+
+#define S_T7_FCOEMASK 30
+#define V_T7_FCOEMASK(x) ((x) << S_T7_FCOEMASK)
+#define F_T7_FCOEMASK V_T7_FCOEMASK(1U)
+
+#define S_T7_SRVRSRAM 29
+#define V_T7_SRVRSRAM(x) ((x) << S_T7_SRVRSRAM)
+#define F_T7_SRVRSRAM V_T7_SRVRSRAM(1U)
+
+#define S_ROCEUDFORCEIPV6 28
+#define V_ROCEUDFORCEIPV6(x) ((x) << S_ROCEUDFORCEIPV6)
+#define F_ROCEUDFORCEIPV6 V_ROCEUDFORCEIPV6(1U)
+
+#define S_TCPFLAGS8 27
+#define V_TCPFLAGS8(x) ((x) << S_TCPFLAGS8)
+#define F_TCPFLAGS8 V_TCPFLAGS8(1U)
+
+#define S_MACMATCH11 26
+#define V_MACMATCH11(x) ((x) << S_MACMATCH11)
+#define F_MACMATCH11 V_MACMATCH11(1U)
+
+#define S_SMACMATCH10 25
+#define V_SMACMATCH10(x) ((x) << S_SMACMATCH10)
+#define F_SMACMATCH10 V_SMACMATCH10(1U)
+
+#define S_SMACMATCH 14
+#define V_SMACMATCH(x) ((x) << S_SMACMATCH)
+#define F_SMACMATCH V_SMACMATCH(1U)
+
+#define S_TCPFLAGS 13
+#define V_TCPFLAGS(x) ((x) << S_TCPFLAGS)
+#define F_TCPFLAGS V_TCPFLAGS(1U)
+
+#define S_SYNONLY 12
+#define V_SYNONLY(x) ((x) << S_SYNONLY)
+#define F_SYNONLY V_SYNONLY(1U)
+
+#define S_ROCE 11
+#define V_ROCE(x) ((x) << S_ROCE)
+#define F_ROCE V_ROCE(1U)
+
+#define S_T7_FRAGMENTATION 10
+#define V_T7_FRAGMENTATION(x) ((x) << S_T7_FRAGMENTATION)
+#define F_T7_FRAGMENTATION V_T7_FRAGMENTATION(1U)
+
+#define S_T7_MPSHITTYPE 9
+#define V_T7_MPSHITTYPE(x) ((x) << S_T7_MPSHITTYPE)
+#define F_T7_MPSHITTYPE V_T7_MPSHITTYPE(1U)
+
+#define S_T7_MACMATCH 8
+#define V_T7_MACMATCH(x) ((x) << S_T7_MACMATCH)
+#define F_T7_MACMATCH V_T7_MACMATCH(1U)
+
+#define S_T7_ETHERTYPE 7
+#define V_T7_ETHERTYPE(x) ((x) << S_T7_ETHERTYPE)
+#define F_T7_ETHERTYPE V_T7_ETHERTYPE(1U)
+
+#define S_T7_PROTOCOL 6
+#define V_T7_PROTOCOL(x) ((x) << S_T7_PROTOCOL)
+#define F_T7_PROTOCOL V_T7_PROTOCOL(1U)
+
+#define S_T7_TOS 5
+#define V_T7_TOS(x) ((x) << S_T7_TOS)
+#define F_T7_TOS V_T7_TOS(1U)
+
+#define S_T7_VLAN 4
+#define V_T7_VLAN(x) ((x) << S_T7_VLAN)
+#define F_T7_VLAN V_T7_VLAN(1U)
+
+#define S_T7_VNIC_ID 3
+#define V_T7_VNIC_ID(x) ((x) << S_T7_VNIC_ID)
+#define F_T7_VNIC_ID V_T7_VNIC_ID(1U)
+
+#define S_T7_PORT 2
+#define V_T7_PORT(x) ((x) << S_T7_PORT)
+#define F_T7_PORT V_T7_PORT(1U)
+
+#define S_T7_FCOE 1
+#define V_T7_FCOE(x) ((x) << S_T7_FCOE)
+#define F_T7_FCOE V_T7_FCOE(1U)
+
+#define S_IPSECIDX 0
+#define V_IPSECIDX(x) ((x) << S_IPSECIDX)
+#define F_IPSECIDX V_IPSECIDX(1U)
+
#define A_TP_INGRESS_CONFIG 0x141
#define S_OPAQUE_TYPE 16
@@ -26888,6 +33747,14 @@
#define V_USE_ENC_IDX(x) ((x) << S_USE_ENC_IDX)
#define F_USE_ENC_IDX V_USE_ENC_IDX(1U)
+#define S_USE_MPS_ECN 15
+#define V_USE_MPS_ECN(x) ((x) << S_USE_MPS_ECN)
+#define F_USE_MPS_ECN V_USE_MPS_ECN(1U)
+
+#define S_USE_MPS_CONG 14
+#define V_USE_MPS_CONG(x) ((x) << S_USE_MPS_CONG)
+#define F_USE_MPS_CONG V_USE_MPS_CONG(1U)
+
#define A_TP_TX_DROP_CFG_CH2 0x142
#define A_TP_TX_DROP_CFG_CH3 0x143
#define A_TP_EGRESS_CONFIG 0x145
@@ -27490,6 +34357,51 @@
#define V_ROCEV2UDPPORT(x) ((x) << S_ROCEV2UDPPORT)
#define G_ROCEV2UDPPORT(x) (((x) >> S_ROCEV2UDPPORT) & M_ROCEV2UDPPORT)
+#define S_IPSECTUNETHTRANSEN 29
+#define V_IPSECTUNETHTRANSEN(x) ((x) << S_IPSECTUNETHTRANSEN)
+#define F_IPSECTUNETHTRANSEN V_IPSECTUNETHTRANSEN(1U)
+
+#define S_ROCEV2ZEROUDP6CSUM 28
+#define V_ROCEV2ZEROUDP6CSUM(x) ((x) << S_ROCEV2ZEROUDP6CSUM)
+#define F_ROCEV2ZEROUDP6CSUM V_ROCEV2ZEROUDP6CSUM(1U)
+
+#define S_ROCEV2PROCEN 27
+#define V_ROCEV2PROCEN(x) ((x) << S_ROCEV2PROCEN)
+#define F_ROCEV2PROCEN V_ROCEV2PROCEN(1U)
+
+#define A_TP_ESIDE_ROCE_PORT12 0x161
+
+#define S_ROCEV2UDPPORT2 16
+#define M_ROCEV2UDPPORT2 0xffffU
+#define V_ROCEV2UDPPORT2(x) ((x) << S_ROCEV2UDPPORT2)
+#define G_ROCEV2UDPPORT2(x) (((x) >> S_ROCEV2UDPPORT2) & M_ROCEV2UDPPORT2)
+
+#define S_ROCEV2UDPPORT1 0
+#define M_ROCEV2UDPPORT1 0xffffU
+#define V_ROCEV2UDPPORT1(x) ((x) << S_ROCEV2UDPPORT1)
+#define G_ROCEV2UDPPORT1(x) (((x) >> S_ROCEV2UDPPORT1) & M_ROCEV2UDPPORT1)
+
+#define A_TP_ESIDE_ROCE_PORT34 0x162
+
+#define S_ROCEV2UDPPORT4 16
+#define M_ROCEV2UDPPORT4 0xffffU
+#define V_ROCEV2UDPPORT4(x) ((x) << S_ROCEV2UDPPORT4)
+#define G_ROCEV2UDPPORT4(x) (((x) >> S_ROCEV2UDPPORT4) & M_ROCEV2UDPPORT4)
+
+#define S_ROCEV2UDPPORT3 0
+#define M_ROCEV2UDPPORT3 0xffffU
+#define V_ROCEV2UDPPORT3(x) ((x) << S_ROCEV2UDPPORT3)
+#define G_ROCEV2UDPPORT3(x) (((x) >> S_ROCEV2UDPPORT3) & M_ROCEV2UDPPORT3)
+
+#define A_TP_ESIDE_CONFIG1 0x163
+
+#define S_ROCEV2CRCIGN 0
+#define M_ROCEV2CRCIGN 0xfU
+#define V_ROCEV2CRCIGN(x) ((x) << S_ROCEV2CRCIGN)
+#define G_ROCEV2CRCIGN(x) (((x) >> S_ROCEV2CRCIGN) & M_ROCEV2CRCIGN)
+
+#define A_TP_ESIDE_DEBUG_CFG 0x16c
+#define A_TP_ESIDE_DEBUG_DATA 0x16d
#define A_TP_DBG_CSIDE_RX0 0x230
#define S_CRXSOPCNT 28
@@ -27962,56 +34874,7 @@
#define V_TXFULL2X(x) ((x) << S_TXFULL2X)
#define F_TXFULL2X V_TXFULL2X(1U)
-#define S_T6_TXFULL 31
-#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
-#define F_T6_TXFULL V_T6_TXFULL(1U)
-
-#define S_T6_PLD_RXZEROP_SRDY 25
-#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
-#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U)
-
-#define S_T6_DDP_SRDY 22
-#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
-#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U)
-
-#define S_T6_DDP_DRDY 21
-#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
-#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U)
-
#define A_TP_DBG_CSIDE_DISP1 0x23b
-
-#define S_T5_TXFULL 31
-#define V_T5_TXFULL(x) ((x) << S_T5_TXFULL)
-#define F_T5_TXFULL V_T5_TXFULL(1U)
-
-#define S_T5_PLD_RXZEROP_SRDY 25
-#define V_T5_PLD_RXZEROP_SRDY(x) ((x) << S_T5_PLD_RXZEROP_SRDY)
-#define F_T5_PLD_RXZEROP_SRDY V_T5_PLD_RXZEROP_SRDY(1U)
-
-#define S_T5_DDP_SRDY 22
-#define V_T5_DDP_SRDY(x) ((x) << S_T5_DDP_SRDY)
-#define F_T5_DDP_SRDY V_T5_DDP_SRDY(1U)
-
-#define S_T5_DDP_DRDY 21
-#define V_T5_DDP_DRDY(x) ((x) << S_T5_DDP_DRDY)
-#define F_T5_DDP_DRDY V_T5_DDP_DRDY(1U)
-
-#define S_T6_TXFULL 31
-#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
-#define F_T6_TXFULL V_T6_TXFULL(1U)
-
-#define S_T6_PLD_RXZEROP_SRDY 25
-#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
-#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U)
-
-#define S_T6_DDP_SRDY 22
-#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
-#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U)
-
-#define S_T6_DDP_DRDY 21
-#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
-#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U)
-
#define A_TP_DBG_CSIDE_DDP0 0x23c
#define S_DDPMSGLATEST7 28
@@ -28222,6 +35085,59 @@
#define V_ISCSICMDMODE(x) ((x) << S_ISCSICMDMODE)
#define F_ISCSICMDMODE V_ISCSICMDMODE(1U)
+#define S_NVMTOPUPDEN 30
+#define V_NVMTOPUPDEN(x) ((x) << S_NVMTOPUPDEN)
+#define F_NVMTOPUPDEN V_NVMTOPUPDEN(1U)
+
+#define S_NOPDIS 29
+#define V_NOPDIS(x) ((x) << S_NOPDIS)
+#define F_NOPDIS V_NOPDIS(1U)
+
+#define S_IWARPINVREQEN 27
+#define V_IWARPINVREQEN(x) ((x) << S_IWARPINVREQEN)
+#define F_IWARPINVREQEN V_IWARPINVREQEN(1U)
+
+#define S_ROCEINVREQEN 26
+#define V_ROCEINVREQEN(x) ((x) << S_ROCEINVREQEN)
+#define F_ROCEINVREQEN V_ROCEINVREQEN(1U)
+
+#define S_ROCESRQFWEN 25
+#define V_ROCESRQFWEN(x) ((x) << S_ROCESRQFWEN)
+#define F_ROCESRQFWEN V_ROCESRQFWEN(1U)
+
+#define S_T7_WRITEZEROOP 20
+#define M_T7_WRITEZEROOP 0x1fU
+#define V_T7_WRITEZEROOP(x) ((x) << S_T7_WRITEZEROOP)
+#define G_T7_WRITEZEROOP(x) (((x) >> S_T7_WRITEZEROOP) & M_T7_WRITEZEROOP)
+
+#define S_IWARPEXTMODE 9
+#define V_IWARPEXTMODE(x) ((x) << S_IWARPEXTMODE)
+#define F_IWARPEXTMODE V_IWARPEXTMODE(1U)
+
+#define S_IWARPINVFWEN 8
+#define V_IWARPINVFWEN(x) ((x) << S_IWARPINVFWEN)
+#define F_IWARPINVFWEN V_IWARPINVFWEN(1U)
+
+#define S_IWARPSRQFWEN 7
+#define V_IWARPSRQFWEN(x) ((x) << S_IWARPSRQFWEN)
+#define F_IWARPSRQFWEN V_IWARPSRQFWEN(1U)
+
+#define S_T7_STARTSKIPPLD 3
+#define V_T7_STARTSKIPPLD(x) ((x) << S_T7_STARTSKIPPLD)
+#define F_T7_STARTSKIPPLD V_T7_STARTSKIPPLD(1U)
+
+#define S_NVMTFLIMMEN 2
+#define V_NVMTFLIMMEN(x) ((x) << S_NVMTFLIMMEN)
+#define F_NVMTFLIMMEN V_NVMTFLIMMEN(1U)
+
+#define S_NVMTOPCTRLEN 1
+#define V_NVMTOPCTRLEN(x) ((x) << S_NVMTOPCTRLEN)
+#define F_NVMTOPCTRLEN V_NVMTOPCTRLEN(1U)
+
+#define S_T7_WRITEZEROEN 0
+#define V_T7_WRITEZEROEN(x) ((x) << S_T7_WRITEZEROEN)
+#define F_T7_WRITEZEROEN V_T7_WRITEZEROEN(1U)
+
#define A_TP_CSPI_POWER 0x243
#define S_GATECHNTX3 11
@@ -28256,6 +35172,26 @@
#define V_SLEEPREQUTRN(x) ((x) << S_SLEEPREQUTRN)
#define F_SLEEPREQUTRN V_SLEEPREQUTRN(1U)
+#define S_GATECHNRX3 7
+#define V_GATECHNRX3(x) ((x) << S_GATECHNRX3)
+#define F_GATECHNRX3 V_GATECHNRX3(1U)
+
+#define S_GATECHNRX2 6
+#define V_GATECHNRX2(x) ((x) << S_GATECHNRX2)
+#define F_GATECHNRX2 V_GATECHNRX2(1U)
+
+#define S_T7_GATECHNRX1 5
+#define V_T7_GATECHNRX1(x) ((x) << S_T7_GATECHNRX1)
+#define F_T7_GATECHNRX1 V_T7_GATECHNRX1(1U)
+
+#define S_T7_GATECHNRX0 4
+#define V_T7_GATECHNRX0(x) ((x) << S_T7_GATECHNRX0)
+#define F_T7_GATECHNRX0 V_T7_GATECHNRX0(1U)
+
+#define S_T7_SLEEPRDYUTRN 3
+#define V_T7_SLEEPRDYUTRN(x) ((x) << S_T7_SLEEPRDYUTRN)
+#define F_T7_SLEEPRDYUTRN V_T7_SLEEPRDYUTRN(1U)
+
#define A_TP_TRC_CONFIG 0x244
#define S_TRCRR 1
@@ -28266,6 +35202,19 @@
#define V_TRCCH(x) ((x) << S_TRCCH)
#define F_TRCCH V_TRCCH(1U)
+#define S_DEBUGPG 3
+#define V_DEBUGPG(x) ((x) << S_DEBUGPG)
+#define F_DEBUGPG V_DEBUGPG(1U)
+
+#define S_T7_TRCRR 2
+#define V_T7_TRCRR(x) ((x) << S_T7_TRCRR)
+#define F_T7_TRCRR V_T7_TRCRR(1U)
+
+#define S_T7_TRCCH 0
+#define M_T7_TRCCH 0x3U
+#define V_T7_TRCCH(x) ((x) << S_T7_TRCCH)
+#define G_T7_TRCCH(x) (((x) >> S_T7_TRCCH) & M_T7_TRCCH)
+
#define A_TP_TAG_CONFIG 0x245
#define S_ETAGTYPE 16
@@ -28379,26 +35328,6 @@
#define V_T5_CPRSSTATE0(x) ((x) << S_T5_CPRSSTATE0)
#define G_T5_CPRSSTATE0(x) (((x) >> S_T5_CPRSSTATE0) & M_T5_CPRSSTATE0)
-#define S_T6_CPRSSTATE3 24
-#define M_T6_CPRSSTATE3 0xfU
-#define V_T6_CPRSSTATE3(x) ((x) << S_T6_CPRSSTATE3)
-#define G_T6_CPRSSTATE3(x) (((x) >> S_T6_CPRSSTATE3) & M_T6_CPRSSTATE3)
-
-#define S_T6_CPRSSTATE2 16
-#define M_T6_CPRSSTATE2 0xfU
-#define V_T6_CPRSSTATE2(x) ((x) << S_T6_CPRSSTATE2)
-#define G_T6_CPRSSTATE2(x) (((x) >> S_T6_CPRSSTATE2) & M_T6_CPRSSTATE2)
-
-#define S_T6_CPRSSTATE1 8
-#define M_T6_CPRSSTATE1 0xfU
-#define V_T6_CPRSSTATE1(x) ((x) << S_T6_CPRSSTATE1)
-#define G_T6_CPRSSTATE1(x) (((x) >> S_T6_CPRSSTATE1) & M_T6_CPRSSTATE1)
-
-#define S_T6_CPRSSTATE0 0
-#define M_T6_CPRSSTATE0 0xfU
-#define V_T6_CPRSSTATE0(x) ((x) << S_T6_CPRSSTATE0)
-#define G_T6_CPRSSTATE0(x) (((x) >> S_T6_CPRSSTATE0) & M_T6_CPRSSTATE0)
-
#define A_TP_DBG_CSIDE_DEMUX 0x247
#define S_CALLDONE 28
@@ -28630,6 +35559,62 @@
#define A_TP_DBG_CSIDE_ARBIT_WAIT1 0x24e
#define A_TP_DBG_CSIDE_ARBIT_CNT0 0x24f
#define A_TP_DBG_CSIDE_ARBIT_CNT1 0x250
+#define A_TP_CHDR_CONFIG1 0x259
+
+#define S_CH3HIGH 24
+#define M_CH3HIGH 0xffU
+#define V_CH3HIGH(x) ((x) << S_CH3HIGH)
+#define G_CH3HIGH(x) (((x) >> S_CH3HIGH) & M_CH3HIGH)
+
+#define S_CH3LOW 16
+#define M_CH3LOW 0xffU
+#define V_CH3LOW(x) ((x) << S_CH3LOW)
+#define G_CH3LOW(x) (((x) >> S_CH3LOW) & M_CH3LOW)
+
+#define S_CH2HIGH 8
+#define M_CH2HIGH 0xffU
+#define V_CH2HIGH(x) ((x) << S_CH2HIGH)
+#define G_CH2HIGH(x) (((x) >> S_CH2HIGH) & M_CH2HIGH)
+
+#define S_CH2LOW 0
+#define M_CH2LOW 0xffU
+#define V_CH2LOW(x) ((x) << S_CH2LOW)
+#define G_CH2LOW(x) (((x) >> S_CH2LOW) & M_CH2LOW)
+
+#define A_TP_CDSP_RDMA_CONFIG 0x260
+#define A_TP_NVMT_OP_CTRL 0x268
+
+#define S_DEFOPCTRL 30
+#define M_DEFOPCTRL 0x3U
+#define V_DEFOPCTRL(x) ((x) << S_DEFOPCTRL)
+#define G_DEFOPCTRL(x) (((x) >> S_DEFOPCTRL) & M_DEFOPCTRL)
+
+#define S_NVMTOPCTRL 0
+#define M_NVMTOPCTRL 0x3fffffffU
+#define V_NVMTOPCTRL(x) ((x) << S_NVMTOPCTRL)
+#define G_NVMTOPCTRL(x) (((x) >> S_NVMTOPCTRL) & M_NVMTOPCTRL)
+
+#define A_TP_CSIDE_DEBUG_CFG 0x26c
+
+#define S_T7_OR_EN 13
+#define V_T7_OR_EN(x) ((x) << S_T7_OR_EN)
+#define F_T7_OR_EN V_T7_OR_EN(1U)
+
+#define S_T7_HI 12
+#define V_T7_HI(x) ((x) << S_T7_HI)
+#define F_T7_HI V_T7_HI(1U)
+
+#define S_T7_SELH 6
+#define M_T7_SELH 0x3fU
+#define V_T7_SELH(x) ((x) << S_T7_SELH)
+#define G_T7_SELH(x) (((x) >> S_T7_SELH) & M_T7_SELH)
+
+#define S_T7_SELL 0
+#define M_T7_SELL 0x3fU
+#define V_T7_SELL(x) ((x) << S_T7_SELL)
+#define G_T7_SELL(x) (((x) >> S_T7_SELL) & M_T7_SELL)
+
+#define A_TP_CSIDE_DEBUG_DATA 0x26d
#define A_TP_FIFO_CONFIG 0x8c0
#define S_CH1_OUTPUT 27
@@ -28771,6 +35756,174 @@
#define A_TP_MIB_TNL_ERR_1 0x71
#define A_TP_MIB_TNL_ERR_2 0x72
#define A_TP_MIB_TNL_ERR_3 0x73
+#define A_TP_MIB_RDMA_IN_PKT_0 0x80
+#define A_TP_MIB_RDMA_IN_PKT_1 0x81
+#define A_TP_MIB_RDMA_IN_PKT_2 0x82
+#define A_TP_MIB_RDMA_IN_PKT_3 0x83
+#define A_TP_MIB_RDMA_IN_BYTE_HI_0 0x84
+#define A_TP_MIB_RDMA_IN_BYTE_LO_0 0x85
+#define A_TP_MIB_RDMA_IN_BYTE_HI_1 0x86
+#define A_TP_MIB_RDMA_IN_BYTE_LO_1 0x87
+#define A_TP_MIB_RDMA_IN_BYTE_HI_2 0x88
+#define A_TP_MIB_RDMA_IN_BYTE_LO_2 0x89
+#define A_TP_MIB_RDMA_IN_BYTE_HI_3 0x8a
+#define A_TP_MIB_RDMA_IN_BYTE_LO_3 0x8b
+#define A_TP_MIB_RDMA_OUT_PKT_0 0x90
+#define A_TP_MIB_RDMA_OUT_PKT_1 0x91
+#define A_TP_MIB_RDMA_OUT_PKT_2 0x92
+#define A_TP_MIB_RDMA_OUT_PKT_3 0x93
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_0 0x94
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_0 0x95
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_1 0x96
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_1 0x97
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_2 0x98
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_2 0x99
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_3 0x9a
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_3 0x9b
+#define A_TP_MIB_ISCSI_IN_PKT_0 0xa0
+#define A_TP_MIB_ISCSI_IN_PKT_1 0xa1
+#define A_TP_MIB_ISCSI_IN_PKT_2 0xa2
+#define A_TP_MIB_ISCSI_IN_PKT_3 0xa3
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_0 0xa4
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_0 0xa5
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_1 0xa6
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_1 0xa7
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_2 0xa8
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_2 0xa9
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_3 0xaa
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_3 0xab
+#define A_TP_MIB_ISCSI_OUT_PKT_0 0xb0
+#define A_TP_MIB_ISCSI_OUT_PKT_1 0xb1
+#define A_TP_MIB_ISCSI_OUT_PKT_2 0xb2
+#define A_TP_MIB_ISCSI_OUT_PKT_3 0xb3
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_0 0xb4
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_0 0xb5
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_1 0xb6
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_1 0xb7
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_2 0xb8
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_2 0xb9
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_3 0xba
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_3 0xbb
+#define A_TP_MIB_NVMT_IN_PKT_0 0xc0
+#define A_TP_MIB_NVMT_IN_PKT_1 0xc1
+#define A_TP_MIB_NVMT_IN_PKT_2 0xc2
+#define A_TP_MIB_NVMT_IN_PKT_3 0xc3
+#define A_TP_MIB_NVMT_IN_BYTE_HI_0 0xc4
+#define A_TP_MIB_NVMT_IN_BYTE_LO_0 0xc5
+#define A_TP_MIB_NVMT_IN_BYTE_HI_1 0xc6
+#define A_TP_MIB_NVMT_IN_BYTE_LO_1 0xc7
+#define A_TP_MIB_NVMT_IN_BYTE_HI_2 0xc8
+#define A_TP_MIB_NVMT_IN_BYTE_LO_2 0xc9
+#define A_TP_MIB_NVMT_IN_BYTE_HI_3 0xca
+#define A_TP_MIB_NVMT_IN_BYTE_LO_3 0xcb
+#define A_TP_MIB_NVMT_OUT_PKT_0 0xd0
+#define A_TP_MIB_NVMT_OUT_PKT_1 0xd1
+#define A_TP_MIB_NVMT_OUT_PKT_2 0xd2
+#define A_TP_MIB_NVMT_OUT_PKT_3 0xd3
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_0 0xd4
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_0 0xd5
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_1 0xd6
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_1 0xd7
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_2 0xd8
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_2 0xd9
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_3 0xda
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_3 0xdb
+#define A_TP_MIB_TLS_IN_PKT_0 0xe0
+#define A_TP_MIB_TLS_IN_PKT_1 0xe1
+#define A_TP_MIB_TLS_IN_PKT_2 0xe2
+#define A_TP_MIB_TLS_IN_PKT_3 0xe3
+#define A_TP_MIB_TLS_IN_BYTE_HI_0 0xe4
+#define A_TP_MIB_TLS_IN_BYTE_LO_0 0xe5
+#define A_TP_MIB_TLS_IN_BYTE_HI_1 0xe6
+#define A_TP_MIB_TLS_IN_BYTE_LO_1 0xe7
+#define A_TP_MIB_TLS_IN_BYTE_HI_2 0xe8
+#define A_TP_MIB_TLS_IN_BYTE_LO_2 0xe9
+#define A_TP_MIB_TLS_IN_BYTE_HI_3 0xea
+#define A_TP_MIB_TLS_IN_BYTE_LO_3 0xeb
+#define A_TP_MIB_TLS_OUT_PKT_0 0xf0
+#define A_TP_MIB_TLS_OUT_PKT_1 0xf1
+#define A_TP_MIB_TLS_OUT_PKT_2 0xf2
+#define A_TP_MIB_TLS_OUT_PKT_3 0xf3
+#define A_TP_MIB_TLS_OUT_BYTE_HI_0 0xf4
+#define A_TP_MIB_TLS_OUT_BYTE_LO_0 0xf5
+#define A_TP_MIB_TLS_OUT_BYTE_HI_1 0xf6
+#define A_TP_MIB_TLS_OUT_BYTE_LO_1 0xf7
+#define A_TP_MIB_TLS_OUT_BYTE_HI_2 0xf8
+#define A_TP_MIB_TLS_OUT_BYTE_LO_2 0xf9
+#define A_TP_MIB_TLS_OUT_BYTE_HI_3 0xfa
+#define A_TP_MIB_TLS_OUT_BYTE_LO_3 0xfb
+#define A_TP_MIB_ROCE_IN_PKT_0 0x100
+#define A_TP_MIB_ROCE_IN_PKT_1 0x101
+#define A_TP_MIB_ROCE_IN_PKT_2 0x102
+#define A_TP_MIB_ROCE_IN_PKT_3 0x103
+#define A_TP_MIB_ROCE_IN_BYTE_HI_0 0x104
+#define A_TP_MIB_ROCE_IN_BYTE_LO_0 0x105
+#define A_TP_MIB_ROCE_IN_BYTE_HI_1 0x106
+#define A_TP_MIB_ROCE_IN_BYTE_LO_1 0x107
+#define A_TP_MIB_ROCE_IN_BYTE_HI_2 0x108
+#define A_TP_MIB_ROCE_IN_BYTE_LO_2 0x109
+#define A_TP_MIB_ROCE_IN_BYTE_HI_3 0x10a
+#define A_TP_MIB_ROCE_IN_BYTE_LO_3 0x10b
+#define A_TP_MIB_ROCE_OUT_PKT_0 0x110
+#define A_TP_MIB_ROCE_OUT_PKT_1 0x111
+#define A_TP_MIB_ROCE_OUT_PKT_2 0x112
+#define A_TP_MIB_ROCE_OUT_PKT_3 0x113
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_0 0x114
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_0 0x115
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_1 0x116
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_1 0x117
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_2 0x118
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_2 0x119
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_3 0x11a
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_3 0x11b
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_0 0x120
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_1 0x121
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_2 0x122
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_3 0x123
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_0 0x124
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_0 0x125
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_1 0x126
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_1 0x127
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_2 0x128
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_2 0x129
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_3 0x12a
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_3 0x12b
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_0 0x130
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_1 0x131
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_2 0x132
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_3 0x133
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_0 0x134
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_0 0x135
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_1 0x136
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_1 0x137
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_2 0x138
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_2 0x139
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_3 0x13a
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_3 0x13b
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_0 0x140
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_1 0x141
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_2 0x142
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_3 0x143
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_0 0x144
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_0 0x145
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_1 0x146
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_1 0x147
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_2 0x148
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_2 0x149
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_3 0x14a
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_3 0x14b
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_0 0x150
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_1 0x151
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_2 0x152
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_3 0x153
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_0 0x154
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_0 0x155
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_1 0x156
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_1 0x157
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_2 0x158
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_2 0x159
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_3 0x15a
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_3 0x15b
/* registers for module ULP_TX */
#define ULP_TX_BASE_ADDR 0x8dc0
@@ -28853,7 +36006,58 @@
#define V_ATOMIC_FIX_DIS(x) ((x) << S_ATOMIC_FIX_DIS)
#define F_ATOMIC_FIX_DIS V_ATOMIC_FIX_DIS(1U)
+#define S_LB_LEN_SEL 28
+#define V_LB_LEN_SEL(x) ((x) << S_LB_LEN_SEL)
+#define F_LB_LEN_SEL V_LB_LEN_SEL(1U)
+
+#define S_DISABLE_TPT_CREDIT_CHK 27
+#define V_DISABLE_TPT_CREDIT_CHK(x) ((x) << S_DISABLE_TPT_CREDIT_CHK)
+#define F_DISABLE_TPT_CREDIT_CHK V_DISABLE_TPT_CREDIT_CHK(1U)
+
+#define S_REQSRC 26
+#define V_REQSRC(x) ((x) << S_REQSRC)
+#define F_REQSRC V_REQSRC(1U)
+
+#define S_ERR2UP 25
+#define V_ERR2UP(x) ((x) << S_ERR2UP)
+#define F_ERR2UP V_ERR2UP(1U)
+
+#define S_SGE_INVALIDATE_DIS 24
+#define V_SGE_INVALIDATE_DIS(x) ((x) << S_SGE_INVALIDATE_DIS)
+#define F_SGE_INVALIDATE_DIS V_SGE_INVALIDATE_DIS(1U)
+
+#define S_ROCE_ACKREQ_CTRL 23
+#define V_ROCE_ACKREQ_CTRL(x) ((x) << S_ROCE_ACKREQ_CTRL)
+#define F_ROCE_ACKREQ_CTRL V_ROCE_ACKREQ_CTRL(1U)
+
+#define S_MEM_ADDR_CTRL 21
+#define M_MEM_ADDR_CTRL 0x3U
+#define V_MEM_ADDR_CTRL(x) ((x) << S_MEM_ADDR_CTRL)
+#define G_MEM_ADDR_CTRL(x) (((x) >> S_MEM_ADDR_CTRL) & M_MEM_ADDR_CTRL)
+
+#define S_TPT_EXTENSION_MODE 20
+#define V_TPT_EXTENSION_MODE(x) ((x) << S_TPT_EXTENSION_MODE)
+#define F_TPT_EXTENSION_MODE V_TPT_EXTENSION_MODE(1U)
+
+#define S_XRC_INDICATION 19
+#define V_XRC_INDICATION(x) ((x) << S_XRC_INDICATION)
+#define F_XRC_INDICATION V_XRC_INDICATION(1U)
+
+#define S_LSO_1SEG_LEN_UPD_EN 18
+#define V_LSO_1SEG_LEN_UPD_EN(x) ((x) << S_LSO_1SEG_LEN_UPD_EN)
+#define F_LSO_1SEG_LEN_UPD_EN V_LSO_1SEG_LEN_UPD_EN(1U)
+
+#define S_PKT_ISGL_ERR_ST_EN 17
+#define V_PKT_ISGL_ERR_ST_EN(x) ((x) << S_PKT_ISGL_ERR_ST_EN)
+#define F_PKT_ISGL_ERR_ST_EN V_PKT_ISGL_ERR_ST_EN(1U)
+
#define A_ULP_TX_PERR_INJECT 0x8dc4
+
+#define S_T7_1_MEMSEL 1
+#define M_T7_1_MEMSEL 0x7fU
+#define V_T7_1_MEMSEL(x) ((x) << S_T7_1_MEMSEL)
+#define G_T7_1_MEMSEL(x) (((x) >> S_T7_1_MEMSEL) & M_T7_1_MEMSEL)
+
#define A_ULP_TX_INT_ENABLE 0x8dc8
#define S_PBL_BOUND_ERR_CH3 31
@@ -28984,8 +36188,28 @@
#define V_IMM_DATA_PERR_SET_CH0(x) ((x) << S_IMM_DATA_PERR_SET_CH0)
#define F_IMM_DATA_PERR_SET_CH0 V_IMM_DATA_PERR_SET_CH0(1U)
+#define A_ULP_TX_INT_ENABLE_1 0x8dc8
+
+#define S_TLS_DSGL_PARERR3 3
+#define V_TLS_DSGL_PARERR3(x) ((x) << S_TLS_DSGL_PARERR3)
+#define F_TLS_DSGL_PARERR3 V_TLS_DSGL_PARERR3(1U)
+
+#define S_TLS_DSGL_PARERR2 2
+#define V_TLS_DSGL_PARERR2(x) ((x) << S_TLS_DSGL_PARERR2)
+#define F_TLS_DSGL_PARERR2 V_TLS_DSGL_PARERR2(1U)
+
+#define S_TLS_DSGL_PARERR1 1
+#define V_TLS_DSGL_PARERR1(x) ((x) << S_TLS_DSGL_PARERR1)
+#define F_TLS_DSGL_PARERR1 V_TLS_DSGL_PARERR1(1U)
+
+#define S_TLS_DSGL_PARERR0 0
+#define V_TLS_DSGL_PARERR0(x) ((x) << S_TLS_DSGL_PARERR0)
+#define F_TLS_DSGL_PARERR0 V_TLS_DSGL_PARERR0(1U)
+
#define A_ULP_TX_INT_CAUSE 0x8dcc
+#define A_ULP_TX_INT_CAUSE_1 0x8dcc
#define A_ULP_TX_PERR_ENABLE 0x8dd0
+#define A_ULP_TX_PERR_ENABLE_1 0x8dd0
#define A_ULP_TX_TPT_LLIMIT 0x8dd4
#define A_ULP_TX_TPT_ULIMIT 0x8dd8
#define A_ULP_TX_PBL_LLIMIT 0x8ddc
@@ -29014,6 +36238,13 @@
#define F_TLSDISABLE V_TLSDISABLE(1U)
#define A_ULP_TX_CPL_ERR_MASK_L 0x8de8
+#define A_ULP_TX_FID_1 0x8de8
+
+#define S_FID_1 0
+#define M_FID_1 0x7ffU
+#define V_FID_1(x) ((x) << S_FID_1)
+#define G_FID_1(x) (((x) >> S_FID_1) & M_FID_1)
+
#define A_ULP_TX_CPL_ERR_MASK_H 0x8dec
#define A_ULP_TX_CPL_ERR_VALUE_L 0x8df0
#define A_ULP_TX_CPL_ERR_VALUE_H 0x8df4
@@ -29166,6 +36397,15 @@
#define V_WRREQ_SZ(x) ((x) << S_WRREQ_SZ)
#define G_WRREQ_SZ(x) (((x) >> S_WRREQ_SZ) & M_WRREQ_SZ)
+#define S_T7_GLOBALENABLE 31
+#define V_T7_GLOBALENABLE(x) ((x) << S_T7_GLOBALENABLE)
+#define F_T7_GLOBALENABLE V_T7_GLOBALENABLE(1U)
+
+#define S_RDREQ_SZ 3
+#define M_RDREQ_SZ 0x7U
+#define V_RDREQ_SZ(x) ((x) << S_RDREQ_SZ)
+#define G_RDREQ_SZ(x) (((x) >> S_RDREQ_SZ) & M_RDREQ_SZ)
+
#define A_ULP_TX_ULP2TP_BIST_ERROR_CNT 0x8e34
#define A_ULP_TX_PERR_INJECT_2 0x8e34
@@ -29385,6 +36625,200 @@
#define A_ULP_TX_INT_CAUSE_2 0x8e80
#define A_ULP_TX_PERR_ENABLE_2 0x8e84
+#define A_ULP_TX_INT_ENABLE_3 0x8e88
+
+#define S_GF_SGE_FIFO_PARERR3 31
+#define V_GF_SGE_FIFO_PARERR3(x) ((x) << S_GF_SGE_FIFO_PARERR3)
+#define F_GF_SGE_FIFO_PARERR3 V_GF_SGE_FIFO_PARERR3(1U)
+
+#define S_GF_SGE_FIFO_PARERR2 30
+#define V_GF_SGE_FIFO_PARERR2(x) ((x) << S_GF_SGE_FIFO_PARERR2)
+#define F_GF_SGE_FIFO_PARERR2 V_GF_SGE_FIFO_PARERR2(1U)
+
+#define S_GF_SGE_FIFO_PARERR1 29
+#define V_GF_SGE_FIFO_PARERR1(x) ((x) << S_GF_SGE_FIFO_PARERR1)
+#define F_GF_SGE_FIFO_PARERR1 V_GF_SGE_FIFO_PARERR1(1U)
+
+#define S_GF_SGE_FIFO_PARERR0 28
+#define V_GF_SGE_FIFO_PARERR0(x) ((x) << S_GF_SGE_FIFO_PARERR0)
+#define F_GF_SGE_FIFO_PARERR0 V_GF_SGE_FIFO_PARERR0(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR3 27
+#define V_DEDUPE_SGE_FIFO_PARERR3(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR3)
+#define F_DEDUPE_SGE_FIFO_PARERR3 V_DEDUPE_SGE_FIFO_PARERR3(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR2 26
+#define V_DEDUPE_SGE_FIFO_PARERR2(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR2)
+#define F_DEDUPE_SGE_FIFO_PARERR2 V_DEDUPE_SGE_FIFO_PARERR2(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR1 25
+#define V_DEDUPE_SGE_FIFO_PARERR1(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR1)
+#define F_DEDUPE_SGE_FIFO_PARERR1 V_DEDUPE_SGE_FIFO_PARERR1(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR0 24
+#define V_DEDUPE_SGE_FIFO_PARERR0(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR0)
+#define F_DEDUPE_SGE_FIFO_PARERR0 V_DEDUPE_SGE_FIFO_PARERR0(1U)
+
+#define S_GF3_DSGL_FIFO_PARERR 23
+#define V_GF3_DSGL_FIFO_PARERR(x) ((x) << S_GF3_DSGL_FIFO_PARERR)
+#define F_GF3_DSGL_FIFO_PARERR V_GF3_DSGL_FIFO_PARERR(1U)
+
+#define S_GF2_DSGL_FIFO_PARERR 22
+#define V_GF2_DSGL_FIFO_PARERR(x) ((x) << S_GF2_DSGL_FIFO_PARERR)
+#define F_GF2_DSGL_FIFO_PARERR V_GF2_DSGL_FIFO_PARERR(1U)
+
+#define S_GF1_DSGL_FIFO_PARERR 21
+#define V_GF1_DSGL_FIFO_PARERR(x) ((x) << S_GF1_DSGL_FIFO_PARERR)
+#define F_GF1_DSGL_FIFO_PARERR V_GF1_DSGL_FIFO_PARERR(1U)
+
+#define S_GF0_DSGL_FIFO_PARERR 20
+#define V_GF0_DSGL_FIFO_PARERR(x) ((x) << S_GF0_DSGL_FIFO_PARERR)
+#define F_GF0_DSGL_FIFO_PARERR V_GF0_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE3_DSGL_FIFO_PARERR 19
+#define V_DEDUPE3_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE3_DSGL_FIFO_PARERR)
+#define F_DEDUPE3_DSGL_FIFO_PARERR V_DEDUPE3_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE2_DSGL_FIFO_PARERR 18
+#define V_DEDUPE2_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE2_DSGL_FIFO_PARERR)
+#define F_DEDUPE2_DSGL_FIFO_PARERR V_DEDUPE2_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE1_DSGL_FIFO_PARERR 17
+#define V_DEDUPE1_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE1_DSGL_FIFO_PARERR)
+#define F_DEDUPE1_DSGL_FIFO_PARERR V_DEDUPE1_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE0_DSGL_FIFO_PARERR 16
+#define V_DEDUPE0_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE0_DSGL_FIFO_PARERR)
+#define F_DEDUPE0_DSGL_FIFO_PARERR V_DEDUPE0_DSGL_FIFO_PARERR(1U)
+
+#define S_XP10_SGE_FIFO_PARERR 15
+#define V_XP10_SGE_FIFO_PARERR(x) ((x) << S_XP10_SGE_FIFO_PARERR)
+#define F_XP10_SGE_FIFO_PARERR V_XP10_SGE_FIFO_PARERR(1U)
+
+#define S_DSGL_PAR_ERR 14
+#define V_DSGL_PAR_ERR(x) ((x) << S_DSGL_PAR_ERR)
+#define F_DSGL_PAR_ERR V_DSGL_PAR_ERR(1U)
+
+#define S_CDDIP_INT 13
+#define V_CDDIP_INT(x) ((x) << S_CDDIP_INT)
+#define F_CDDIP_INT V_CDDIP_INT(1U)
+
+#define S_CCEIP_INT 12
+#define V_CCEIP_INT(x) ((x) << S_CCEIP_INT)
+#define F_CCEIP_INT V_CCEIP_INT(1U)
+
+#define S_TLS_SGE_FIFO_PARERR3 11
+#define V_TLS_SGE_FIFO_PARERR3(x) ((x) << S_TLS_SGE_FIFO_PARERR3)
+#define F_TLS_SGE_FIFO_PARERR3 V_TLS_SGE_FIFO_PARERR3(1U)
+
+#define S_TLS_SGE_FIFO_PARERR2 10
+#define V_TLS_SGE_FIFO_PARERR2(x) ((x) << S_TLS_SGE_FIFO_PARERR2)
+#define F_TLS_SGE_FIFO_PARERR2 V_TLS_SGE_FIFO_PARERR2(1U)
+
+#define S_TLS_SGE_FIFO_PARERR1 9
+#define V_TLS_SGE_FIFO_PARERR1(x) ((x) << S_TLS_SGE_FIFO_PARERR1)
+#define F_TLS_SGE_FIFO_PARERR1 V_TLS_SGE_FIFO_PARERR1(1U)
+
+#define S_TLS_SGE_FIFO_PARERR0 8
+#define V_TLS_SGE_FIFO_PARERR0(x) ((x) << S_TLS_SGE_FIFO_PARERR0)
+#define F_TLS_SGE_FIFO_PARERR0 V_TLS_SGE_FIFO_PARERR0(1U)
+
+#define S_ULP2SMARBT_RSP_PERR 6
+#define V_ULP2SMARBT_RSP_PERR(x) ((x) << S_ULP2SMARBT_RSP_PERR)
+#define F_ULP2SMARBT_RSP_PERR V_ULP2SMARBT_RSP_PERR(1U)
+
+#define S_ULPTX2MA_RSP_PERR 5
+#define V_ULPTX2MA_RSP_PERR(x) ((x) << S_ULPTX2MA_RSP_PERR)
+#define F_ULPTX2MA_RSP_PERR V_ULPTX2MA_RSP_PERR(1U)
+
+#define S_PCIE2ULP_PERR3 4
+#define V_PCIE2ULP_PERR3(x) ((x) << S_PCIE2ULP_PERR3)
+#define F_PCIE2ULP_PERR3 V_PCIE2ULP_PERR3(1U)
+
+#define S_PCIE2ULP_PERR2 3
+#define V_PCIE2ULP_PERR2(x) ((x) << S_PCIE2ULP_PERR2)
+#define F_PCIE2ULP_PERR2 V_PCIE2ULP_PERR2(1U)
+
+#define S_PCIE2ULP_PERR1 2
+#define V_PCIE2ULP_PERR1(x) ((x) << S_PCIE2ULP_PERR1)
+#define F_PCIE2ULP_PERR1 V_PCIE2ULP_PERR1(1U)
+
+#define S_PCIE2ULP_PERR0 1
+#define V_PCIE2ULP_PERR0(x) ((x) << S_PCIE2ULP_PERR0)
+#define F_PCIE2ULP_PERR0 V_PCIE2ULP_PERR0(1U)
+
+#define S_CIM2ULP_PERR 0
+#define V_CIM2ULP_PERR(x) ((x) << S_CIM2ULP_PERR)
+#define F_CIM2ULP_PERR V_CIM2ULP_PERR(1U)
+
+#define A_ULP_TX_INT_CAUSE_3 0x8e8c
+#define A_ULP_TX_PERR_ENABLE_3 0x8e90
+#define A_ULP_TX_INT_ENABLE_4 0x8e94
+
+#define S_DMA_PAR_ERR3 28
+#define M_DMA_PAR_ERR3 0xfU
+#define V_DMA_PAR_ERR3(x) ((x) << S_DMA_PAR_ERR3)
+#define G_DMA_PAR_ERR3(x) (((x) >> S_DMA_PAR_ERR3) & M_DMA_PAR_ERR3)
+
+#define S_DMA_PAR_ERR2 24
+#define M_DMA_PAR_ERR2 0xfU
+#define V_DMA_PAR_ERR2(x) ((x) << S_DMA_PAR_ERR2)
+#define G_DMA_PAR_ERR2(x) (((x) >> S_DMA_PAR_ERR2) & M_DMA_PAR_ERR2)
+
+#define S_DMA_PAR_ERR1 20
+#define M_DMA_PAR_ERR1 0xfU
+#define V_DMA_PAR_ERR1(x) ((x) << S_DMA_PAR_ERR1)
+#define G_DMA_PAR_ERR1(x) (((x) >> S_DMA_PAR_ERR1) & M_DMA_PAR_ERR1)
+
+#define S_DMA_PAR_ERR0 16
+#define M_DMA_PAR_ERR0 0xfU
+#define V_DMA_PAR_ERR0(x) ((x) << S_DMA_PAR_ERR0)
+#define G_DMA_PAR_ERR0(x) (((x) >> S_DMA_PAR_ERR0) & M_DMA_PAR_ERR0)
+
+#define S_CORE_CMD_FIFO_LB1 12
+#define M_CORE_CMD_FIFO_LB1 0xfU
+#define V_CORE_CMD_FIFO_LB1(x) ((x) << S_CORE_CMD_FIFO_LB1)
+#define G_CORE_CMD_FIFO_LB1(x) (((x) >> S_CORE_CMD_FIFO_LB1) & M_CORE_CMD_FIFO_LB1)
+
+#define S_CORE_CMD_FIFO_LB0 8
+#define M_CORE_CMD_FIFO_LB0 0xfU
+#define V_CORE_CMD_FIFO_LB0(x) ((x) << S_CORE_CMD_FIFO_LB0)
+#define G_CORE_CMD_FIFO_LB0(x) (((x) >> S_CORE_CMD_FIFO_LB0) & M_CORE_CMD_FIFO_LB0)
+
+#define S_XP10_2_ULP_PERR 7
+#define V_XP10_2_ULP_PERR(x) ((x) << S_XP10_2_ULP_PERR)
+#define F_XP10_2_ULP_PERR V_XP10_2_ULP_PERR(1U)
+
+#define S_ULP_2_XP10_PERR 6
+#define V_ULP_2_XP10_PERR(x) ((x) << S_ULP_2_XP10_PERR)
+#define F_ULP_2_XP10_PERR V_ULP_2_XP10_PERR(1U)
+
+#define S_CMD_FIFO_LB1 5
+#define V_CMD_FIFO_LB1(x) ((x) << S_CMD_FIFO_LB1)
+#define F_CMD_FIFO_LB1 V_CMD_FIFO_LB1(1U)
+
+#define S_CMD_FIFO_LB0 4
+#define V_CMD_FIFO_LB0(x) ((x) << S_CMD_FIFO_LB0)
+#define F_CMD_FIFO_LB0 V_CMD_FIFO_LB0(1U)
+
+#define S_TF_TP_PERR 3
+#define V_TF_TP_PERR(x) ((x) << S_TF_TP_PERR)
+#define F_TF_TP_PERR V_TF_TP_PERR(1U)
+
+#define S_TF_SGE_PERR 2
+#define V_TF_SGE_PERR(x) ((x) << S_TF_SGE_PERR)
+#define F_TF_SGE_PERR V_TF_SGE_PERR(1U)
+
+#define S_TF_MEM_PERR 1
+#define V_TF_MEM_PERR(x) ((x) << S_TF_MEM_PERR)
+#define F_TF_MEM_PERR V_TF_MEM_PERR(1U)
+
+#define S_TF_MP_PERR 0
+#define V_TF_MP_PERR(x) ((x) << S_TF_MP_PERR)
+#define F_TF_MP_PERR V_TF_MP_PERR(1U)
+
+#define A_ULP_TX_INT_CAUSE_4 0x8e98
+#define A_ULP_TX_PERR_ENABLE_4 0x8e9c
#define A_ULP_TX_SE_CNT_ERR 0x8ea0
#define S_ERR_CH3 12
@@ -29531,16 +36965,381 @@
#define A_ULP_TX_CSU_REVISION 0x8ebc
#define A_ULP_TX_LA_RDPTR_0 0x8ec0
+#define A_ULP_TX_PL2APB_INFO 0x8ec0
+
+#define S_PL2APB_BRIDGE_HUNG 27
+#define V_PL2APB_BRIDGE_HUNG(x) ((x) << S_PL2APB_BRIDGE_HUNG)
+#define F_PL2APB_BRIDGE_HUNG V_PL2APB_BRIDGE_HUNG(1U)
+
+#define S_PL2APB_BRIDGE_STATE 26
+#define V_PL2APB_BRIDGE_STATE(x) ((x) << S_PL2APB_BRIDGE_STATE)
+#define F_PL2APB_BRIDGE_STATE V_PL2APB_BRIDGE_STATE(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_TYPE 25
+#define V_PL2APB_BRIDGE_HUNG_TYPE(x) ((x) << S_PL2APB_BRIDGE_HUNG_TYPE)
+#define F_PL2APB_BRIDGE_HUNG_TYPE V_PL2APB_BRIDGE_HUNG_TYPE(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_ID 24
+#define V_PL2APB_BRIDGE_HUNG_ID(x) ((x) << S_PL2APB_BRIDGE_HUNG_ID)
+#define F_PL2APB_BRIDGE_HUNG_ID V_PL2APB_BRIDGE_HUNG_ID(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_ADDR 0
+#define M_PL2APB_BRIDGE_HUNG_ADDR 0xfffffU
+#define V_PL2APB_BRIDGE_HUNG_ADDR(x) ((x) << S_PL2APB_BRIDGE_HUNG_ADDR)
+#define G_PL2APB_BRIDGE_HUNG_ADDR(x) (((x) >> S_PL2APB_BRIDGE_HUNG_ADDR) & M_PL2APB_BRIDGE_HUNG_ADDR)
+
#define A_ULP_TX_LA_RDDATA_0 0x8ec4
+#define A_ULP_TX_INT_ENABLE_5 0x8ec4
+
+#define S_DEDUPE_PERR3 23
+#define V_DEDUPE_PERR3(x) ((x) << S_DEDUPE_PERR3)
+#define F_DEDUPE_PERR3 V_DEDUPE_PERR3(1U)
+
+#define S_DEDUPE_PERR2 22
+#define V_DEDUPE_PERR2(x) ((x) << S_DEDUPE_PERR2)
+#define F_DEDUPE_PERR2 V_DEDUPE_PERR2(1U)
+
+#define S_DEDUPE_PERR1 21
+#define V_DEDUPE_PERR1(x) ((x) << S_DEDUPE_PERR1)
+#define F_DEDUPE_PERR1 V_DEDUPE_PERR1(1U)
+
+#define S_DEDUPE_PERR0 20
+#define V_DEDUPE_PERR0(x) ((x) << S_DEDUPE_PERR0)
+#define F_DEDUPE_PERR0 V_DEDUPE_PERR0(1U)
+
+#define S_GF_PERR3 19
+#define V_GF_PERR3(x) ((x) << S_GF_PERR3)
+#define F_GF_PERR3 V_GF_PERR3(1U)
+
+#define S_GF_PERR2 18
+#define V_GF_PERR2(x) ((x) << S_GF_PERR2)
+#define F_GF_PERR2 V_GF_PERR2(1U)
+
+#define S_GF_PERR1 17
+#define V_GF_PERR1(x) ((x) << S_GF_PERR1)
+#define F_GF_PERR1 V_GF_PERR1(1U)
+
+#define S_GF_PERR0 16
+#define V_GF_PERR0(x) ((x) << S_GF_PERR0)
+#define F_GF_PERR0 V_GF_PERR0(1U)
+
+#define S_SGE2ULP_INV_PERR 13
+#define V_SGE2ULP_INV_PERR(x) ((x) << S_SGE2ULP_INV_PERR)
+#define F_SGE2ULP_INV_PERR V_SGE2ULP_INV_PERR(1U)
+
+#define S_T7_PL_BUSPERR 12
+#define V_T7_PL_BUSPERR(x) ((x) << S_T7_PL_BUSPERR)
+#define F_T7_PL_BUSPERR V_T7_PL_BUSPERR(1U)
+
+#define S_TLSTX2ULPTX_PERR3 11
+#define V_TLSTX2ULPTX_PERR3(x) ((x) << S_TLSTX2ULPTX_PERR3)
+#define F_TLSTX2ULPTX_PERR3 V_TLSTX2ULPTX_PERR3(1U)
+
+#define S_TLSTX2ULPTX_PERR2 10
+#define V_TLSTX2ULPTX_PERR2(x) ((x) << S_TLSTX2ULPTX_PERR2)
+#define F_TLSTX2ULPTX_PERR2 V_TLSTX2ULPTX_PERR2(1U)
+
+#define S_TLSTX2ULPTX_PERR1 9
+#define V_TLSTX2ULPTX_PERR1(x) ((x) << S_TLSTX2ULPTX_PERR1)
+#define F_TLSTX2ULPTX_PERR1 V_TLSTX2ULPTX_PERR1(1U)
+
+#define S_TLSTX2ULPTX_PERR0 8
+#define V_TLSTX2ULPTX_PERR0(x) ((x) << S_TLSTX2ULPTX_PERR0)
+#define F_TLSTX2ULPTX_PERR0 V_TLSTX2ULPTX_PERR0(1U)
+
+#define S_XP10_2_ULP_PL_PERR 1
+#define V_XP10_2_ULP_PL_PERR(x) ((x) << S_XP10_2_ULP_PL_PERR)
+#define F_XP10_2_ULP_PL_PERR V_XP10_2_ULP_PL_PERR(1U)
+
+#define S_ULP_2_XP10_PL_PERR 0
+#define V_ULP_2_XP10_PL_PERR(x) ((x) << S_ULP_2_XP10_PL_PERR)
+#define F_ULP_2_XP10_PL_PERR V_ULP_2_XP10_PL_PERR(1U)
+
#define A_ULP_TX_LA_WRPTR_0 0x8ec8
+#define A_ULP_TX_INT_CAUSE_5 0x8ec8
#define A_ULP_TX_LA_RESERVED_0 0x8ecc
+#define A_ULP_TX_PERR_ENABLE_5 0x8ecc
#define A_ULP_TX_LA_RDPTR_1 0x8ed0
+#define A_ULP_TX_INT_CAUSE_6 0x8ed0
+
+#define S_DDR_HDR_FIFO_PERR_SET3 12
+#define V_DDR_HDR_FIFO_PERR_SET3(x) ((x) << S_DDR_HDR_FIFO_PERR_SET3)
+#define F_DDR_HDR_FIFO_PERR_SET3 V_DDR_HDR_FIFO_PERR_SET3(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET2 11
+#define V_DDR_HDR_FIFO_PERR_SET2(x) ((x) << S_DDR_HDR_FIFO_PERR_SET2)
+#define F_DDR_HDR_FIFO_PERR_SET2 V_DDR_HDR_FIFO_PERR_SET2(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET1 10
+#define V_DDR_HDR_FIFO_PERR_SET1(x) ((x) << S_DDR_HDR_FIFO_PERR_SET1)
+#define F_DDR_HDR_FIFO_PERR_SET1 V_DDR_HDR_FIFO_PERR_SET1(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET0 9
+#define V_DDR_HDR_FIFO_PERR_SET0(x) ((x) << S_DDR_HDR_FIFO_PERR_SET0)
+#define F_DDR_HDR_FIFO_PERR_SET0 V_DDR_HDR_FIFO_PERR_SET0(1U)
+
+#define S_PRE_MP_RSP_PERR_SET3 8
+#define V_PRE_MP_RSP_PERR_SET3(x) ((x) << S_PRE_MP_RSP_PERR_SET3)
+#define F_PRE_MP_RSP_PERR_SET3 V_PRE_MP_RSP_PERR_SET3(1U)
+
+#define S_PRE_MP_RSP_PERR_SET2 7
+#define V_PRE_MP_RSP_PERR_SET2(x) ((x) << S_PRE_MP_RSP_PERR_SET2)
+#define F_PRE_MP_RSP_PERR_SET2 V_PRE_MP_RSP_PERR_SET2(1U)
+
+#define S_PRE_MP_RSP_PERR_SET1 6
+#define V_PRE_MP_RSP_PERR_SET1(x) ((x) << S_PRE_MP_RSP_PERR_SET1)
+#define F_PRE_MP_RSP_PERR_SET1 V_PRE_MP_RSP_PERR_SET1(1U)
+
+#define S_PRE_MP_RSP_PERR_SET0 5
+#define V_PRE_MP_RSP_PERR_SET0(x) ((x) << S_PRE_MP_RSP_PERR_SET0)
+#define F_PRE_MP_RSP_PERR_SET0 V_PRE_MP_RSP_PERR_SET0(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET3 4
+#define V_PRE_CQE_FIFO_PERR_SET3(x) ((x) << S_PRE_CQE_FIFO_PERR_SET3)
+#define F_PRE_CQE_FIFO_PERR_SET3 V_PRE_CQE_FIFO_PERR_SET3(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET2 3
+#define V_PRE_CQE_FIFO_PERR_SET2(x) ((x) << S_PRE_CQE_FIFO_PERR_SET2)
+#define F_PRE_CQE_FIFO_PERR_SET2 V_PRE_CQE_FIFO_PERR_SET2(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET1 2
+#define V_PRE_CQE_FIFO_PERR_SET1(x) ((x) << S_PRE_CQE_FIFO_PERR_SET1)
+#define F_PRE_CQE_FIFO_PERR_SET1 V_PRE_CQE_FIFO_PERR_SET1(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET0 1
+#define V_PRE_CQE_FIFO_PERR_SET0(x) ((x) << S_PRE_CQE_FIFO_PERR_SET0)
+#define F_PRE_CQE_FIFO_PERR_SET0 V_PRE_CQE_FIFO_PERR_SET0(1U)
+
+#define S_RSP_FIFO_PERR_SET 0
+#define V_RSP_FIFO_PERR_SET(x) ((x) << S_RSP_FIFO_PERR_SET)
+#define F_RSP_FIFO_PERR_SET V_RSP_FIFO_PERR_SET(1U)
+
#define A_ULP_TX_LA_RDDATA_1 0x8ed4
+#define A_ULP_TX_INT_ENABLE_6 0x8ed4
#define A_ULP_TX_LA_WRPTR_1 0x8ed8
+#define A_ULP_TX_PERR_ENABLE_6 0x8ed8
#define A_ULP_TX_LA_RESERVED_1 0x8edc
+#define A_ULP_TX_INT_CAUSE_7 0x8edc
+
+#define S_TLS_SGE_FIFO_CORERR3 23
+#define V_TLS_SGE_FIFO_CORERR3(x) ((x) << S_TLS_SGE_FIFO_CORERR3)
+#define F_TLS_SGE_FIFO_CORERR3 V_TLS_SGE_FIFO_CORERR3(1U)
+
+#define S_TLS_SGE_FIFO_CORERR2 22
+#define V_TLS_SGE_FIFO_CORERR2(x) ((x) << S_TLS_SGE_FIFO_CORERR2)
+#define F_TLS_SGE_FIFO_CORERR2 V_TLS_SGE_FIFO_CORERR2(1U)
+
+#define S_TLS_SGE_FIFO_CORERR1 21
+#define V_TLS_SGE_FIFO_CORERR1(x) ((x) << S_TLS_SGE_FIFO_CORERR1)
+#define F_TLS_SGE_FIFO_CORERR1 V_TLS_SGE_FIFO_CORERR1(1U)
+
+#define S_TLS_SGE_FIFO_CORERR0 20
+#define V_TLS_SGE_FIFO_CORERR0(x) ((x) << S_TLS_SGE_FIFO_CORERR0)
+#define F_TLS_SGE_FIFO_CORERR0 V_TLS_SGE_FIFO_CORERR0(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET3 19
+#define V_LSO_HDR_SRAM_CERR_SET3(x) ((x) << S_LSO_HDR_SRAM_CERR_SET3)
+#define F_LSO_HDR_SRAM_CERR_SET3 V_LSO_HDR_SRAM_CERR_SET3(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET2 18
+#define V_LSO_HDR_SRAM_CERR_SET2(x) ((x) << S_LSO_HDR_SRAM_CERR_SET2)
+#define F_LSO_HDR_SRAM_CERR_SET2 V_LSO_HDR_SRAM_CERR_SET2(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET1 17
+#define V_LSO_HDR_SRAM_CERR_SET1(x) ((x) << S_LSO_HDR_SRAM_CERR_SET1)
+#define F_LSO_HDR_SRAM_CERR_SET1 V_LSO_HDR_SRAM_CERR_SET1(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET0 16
+#define V_LSO_HDR_SRAM_CERR_SET0(x) ((x) << S_LSO_HDR_SRAM_CERR_SET0)
+#define F_LSO_HDR_SRAM_CERR_SET0 V_LSO_HDR_SRAM_CERR_SET0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH3_LB1 15
+#define V_CORE_CMD_FIFO_CERR_SET_CH3_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH3_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH3_LB1 V_CORE_CMD_FIFO_CERR_SET_CH3_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH2_LB1 14
+#define V_CORE_CMD_FIFO_CERR_SET_CH2_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH2_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH2_LB1 V_CORE_CMD_FIFO_CERR_SET_CH2_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH1_LB1 13
+#define V_CORE_CMD_FIFO_CERR_SET_CH1_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH1_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH1_LB1 V_CORE_CMD_FIFO_CERR_SET_CH1_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH0_LB1 12
+#define V_CORE_CMD_FIFO_CERR_SET_CH0_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH0_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH0_LB1 V_CORE_CMD_FIFO_CERR_SET_CH0_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH3_LB0 11
+#define V_CORE_CMD_FIFO_CERR_SET_CH3_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH3_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH3_LB0 V_CORE_CMD_FIFO_CERR_SET_CH3_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH2_LB0 10
+#define V_CORE_CMD_FIFO_CERR_SET_CH2_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH2_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH2_LB0 V_CORE_CMD_FIFO_CERR_SET_CH2_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH1_LB0 9
+#define V_CORE_CMD_FIFO_CERR_SET_CH1_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH1_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH1_LB0 V_CORE_CMD_FIFO_CERR_SET_CH1_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH0_LB0 8
+#define V_CORE_CMD_FIFO_CERR_SET_CH0_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH0_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH0_LB0 V_CORE_CMD_FIFO_CERR_SET_CH0_LB0(1U)
+
+#define S_CQE_FIFO_CERR_SET3 7
+#define V_CQE_FIFO_CERR_SET3(x) ((x) << S_CQE_FIFO_CERR_SET3)
+#define F_CQE_FIFO_CERR_SET3 V_CQE_FIFO_CERR_SET3(1U)
+
+#define S_CQE_FIFO_CERR_SET2 6
+#define V_CQE_FIFO_CERR_SET2(x) ((x) << S_CQE_FIFO_CERR_SET2)
+#define F_CQE_FIFO_CERR_SET2 V_CQE_FIFO_CERR_SET2(1U)
+
+#define S_CQE_FIFO_CERR_SET1 5
+#define V_CQE_FIFO_CERR_SET1(x) ((x) << S_CQE_FIFO_CERR_SET1)
+#define F_CQE_FIFO_CERR_SET1 V_CQE_FIFO_CERR_SET1(1U)
+
+#define S_CQE_FIFO_CERR_SET0 4
+#define V_CQE_FIFO_CERR_SET0(x) ((x) << S_CQE_FIFO_CERR_SET0)
+#define F_CQE_FIFO_CERR_SET0 V_CQE_FIFO_CERR_SET0(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET3 3
+#define V_PRE_CQE_FIFO_CERR_SET3(x) ((x) << S_PRE_CQE_FIFO_CERR_SET3)
+#define F_PRE_CQE_FIFO_CERR_SET3 V_PRE_CQE_FIFO_CERR_SET3(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET2 2
+#define V_PRE_CQE_FIFO_CERR_SET2(x) ((x) << S_PRE_CQE_FIFO_CERR_SET2)
+#define F_PRE_CQE_FIFO_CERR_SET2 V_PRE_CQE_FIFO_CERR_SET2(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET1 1
+#define V_PRE_CQE_FIFO_CERR_SET1(x) ((x) << S_PRE_CQE_FIFO_CERR_SET1)
+#define F_PRE_CQE_FIFO_CERR_SET1 V_PRE_CQE_FIFO_CERR_SET1(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET0 0
+#define V_PRE_CQE_FIFO_CERR_SET0(x) ((x) << S_PRE_CQE_FIFO_CERR_SET0)
+#define F_PRE_CQE_FIFO_CERR_SET0 V_PRE_CQE_FIFO_CERR_SET0(1U)
+
#define A_ULP_TX_LA_RDPTR_2 0x8ee0
+#define A_ULP_TX_INT_ENABLE_7 0x8ee0
#define A_ULP_TX_LA_RDDATA_2 0x8ee4
+#define A_ULP_TX_INT_CAUSE_8 0x8ee4
+
+#define S_MEM_RSP_FIFO_CERR_SET3 28
+#define V_MEM_RSP_FIFO_CERR_SET3(x) ((x) << S_MEM_RSP_FIFO_CERR_SET3)
+#define F_MEM_RSP_FIFO_CERR_SET3 V_MEM_RSP_FIFO_CERR_SET3(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET2 27
+#define V_MEM_RSP_FIFO_CERR_SET2(x) ((x) << S_MEM_RSP_FIFO_CERR_SET2)
+#define F_MEM_RSP_FIFO_CERR_SET2 V_MEM_RSP_FIFO_CERR_SET2(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET1 26
+#define V_MEM_RSP_FIFO_CERR_SET1(x) ((x) << S_MEM_RSP_FIFO_CERR_SET1)
+#define F_MEM_RSP_FIFO_CERR_SET1 V_MEM_RSP_FIFO_CERR_SET1(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET0 25
+#define V_MEM_RSP_FIFO_CERR_SET0(x) ((x) << S_MEM_RSP_FIFO_CERR_SET0)
+#define F_MEM_RSP_FIFO_CERR_SET0 V_MEM_RSP_FIFO_CERR_SET0(1U)
+
+#define S_PI_SRAM_CERR_SET3 24
+#define V_PI_SRAM_CERR_SET3(x) ((x) << S_PI_SRAM_CERR_SET3)
+#define F_PI_SRAM_CERR_SET3 V_PI_SRAM_CERR_SET3(1U)
+
+#define S_PI_SRAM_CERR_SET2 23
+#define V_PI_SRAM_CERR_SET2(x) ((x) << S_PI_SRAM_CERR_SET2)
+#define F_PI_SRAM_CERR_SET2 V_PI_SRAM_CERR_SET2(1U)
+
+#define S_PI_SRAM_CERR_SET1 22
+#define V_PI_SRAM_CERR_SET1(x) ((x) << S_PI_SRAM_CERR_SET1)
+#define F_PI_SRAM_CERR_SET1 V_PI_SRAM_CERR_SET1(1U)
+
+#define S_PI_SRAM_CERR_SET0 21
+#define V_PI_SRAM_CERR_SET0(x) ((x) << S_PI_SRAM_CERR_SET0)
+#define F_PI_SRAM_CERR_SET0 V_PI_SRAM_CERR_SET0(1U)
+
+#define S_PRE_MP_RSP_CERR_SET3 20
+#define V_PRE_MP_RSP_CERR_SET3(x) ((x) << S_PRE_MP_RSP_CERR_SET3)
+#define F_PRE_MP_RSP_CERR_SET3 V_PRE_MP_RSP_CERR_SET3(1U)
+
+#define S_PRE_MP_RSP_CERR_SET2 19
+#define V_PRE_MP_RSP_CERR_SET2(x) ((x) << S_PRE_MP_RSP_CERR_SET2)
+#define F_PRE_MP_RSP_CERR_SET2 V_PRE_MP_RSP_CERR_SET2(1U)
+
+#define S_PRE_MP_RSP_CERR_SET1 18
+#define V_PRE_MP_RSP_CERR_SET1(x) ((x) << S_PRE_MP_RSP_CERR_SET1)
+#define F_PRE_MP_RSP_CERR_SET1 V_PRE_MP_RSP_CERR_SET1(1U)
+
+#define S_PRE_MP_RSP_CERR_SET0 17
+#define V_PRE_MP_RSP_CERR_SET0(x) ((x) << S_PRE_MP_RSP_CERR_SET0)
+#define F_PRE_MP_RSP_CERR_SET0 V_PRE_MP_RSP_CERR_SET0(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET3 16
+#define V_DDR_HDR_FIFO_CERR_SET3(x) ((x) << S_DDR_HDR_FIFO_CERR_SET3)
+#define F_DDR_HDR_FIFO_CERR_SET3 V_DDR_HDR_FIFO_CERR_SET3(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET2 15
+#define V_DDR_HDR_FIFO_CERR_SET2(x) ((x) << S_DDR_HDR_FIFO_CERR_SET2)
+#define F_DDR_HDR_FIFO_CERR_SET2 V_DDR_HDR_FIFO_CERR_SET2(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET1 14
+#define V_DDR_HDR_FIFO_CERR_SET1(x) ((x) << S_DDR_HDR_FIFO_CERR_SET1)
+#define F_DDR_HDR_FIFO_CERR_SET1 V_DDR_HDR_FIFO_CERR_SET1(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET0 13
+#define V_DDR_HDR_FIFO_CERR_SET0(x) ((x) << S_DDR_HDR_FIFO_CERR_SET0)
+#define F_DDR_HDR_FIFO_CERR_SET0 V_DDR_HDR_FIFO_CERR_SET0(1U)
+
+#define S_CMD_FIFO_CERR_SET3 12
+#define V_CMD_FIFO_CERR_SET3(x) ((x) << S_CMD_FIFO_CERR_SET3)
+#define F_CMD_FIFO_CERR_SET3 V_CMD_FIFO_CERR_SET3(1U)
+
+#define S_CMD_FIFO_CERR_SET2 11
+#define V_CMD_FIFO_CERR_SET2(x) ((x) << S_CMD_FIFO_CERR_SET2)
+#define F_CMD_FIFO_CERR_SET2 V_CMD_FIFO_CERR_SET2(1U)
+
+#define S_CMD_FIFO_CERR_SET1 10
+#define V_CMD_FIFO_CERR_SET1(x) ((x) << S_CMD_FIFO_CERR_SET1)
+#define F_CMD_FIFO_CERR_SET1 V_CMD_FIFO_CERR_SET1(1U)
+
+#define S_CMD_FIFO_CERR_SET0 9
+#define V_CMD_FIFO_CERR_SET0(x) ((x) << S_CMD_FIFO_CERR_SET0)
+#define F_CMD_FIFO_CERR_SET0 V_CMD_FIFO_CERR_SET0(1U)
+
+#define S_GF_SGE_FIFO_CORERR3 8
+#define V_GF_SGE_FIFO_CORERR3(x) ((x) << S_GF_SGE_FIFO_CORERR3)
+#define F_GF_SGE_FIFO_CORERR3 V_GF_SGE_FIFO_CORERR3(1U)
+
+#define S_GF_SGE_FIFO_CORERR2 7
+#define V_GF_SGE_FIFO_CORERR2(x) ((x) << S_GF_SGE_FIFO_CORERR2)
+#define F_GF_SGE_FIFO_CORERR2 V_GF_SGE_FIFO_CORERR2(1U)
+
+#define S_GF_SGE_FIFO_CORERR1 6
+#define V_GF_SGE_FIFO_CORERR1(x) ((x) << S_GF_SGE_FIFO_CORERR1)
+#define F_GF_SGE_FIFO_CORERR1 V_GF_SGE_FIFO_CORERR1(1U)
+
+#define S_GF_SGE_FIFO_CORERR0 5
+#define V_GF_SGE_FIFO_CORERR0(x) ((x) << S_GF_SGE_FIFO_CORERR0)
+#define F_GF_SGE_FIFO_CORERR0 V_GF_SGE_FIFO_CORERR0(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR3 4
+#define V_DEDUPE_SGE_FIFO_CORERR3(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR3)
+#define F_DEDUPE_SGE_FIFO_CORERR3 V_DEDUPE_SGE_FIFO_CORERR3(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR2 3
+#define V_DEDUPE_SGE_FIFO_CORERR2(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR2)
+#define F_DEDUPE_SGE_FIFO_CORERR2 V_DEDUPE_SGE_FIFO_CORERR2(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR1 2
+#define V_DEDUPE_SGE_FIFO_CORERR1(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR1)
+#define F_DEDUPE_SGE_FIFO_CORERR1 V_DEDUPE_SGE_FIFO_CORERR1(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR0 1
+#define V_DEDUPE_SGE_FIFO_CORERR0(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR0)
+#define F_DEDUPE_SGE_FIFO_CORERR0 V_DEDUPE_SGE_FIFO_CORERR0(1U)
+
+#define S_RSP_FIFO_CERR_SET 0
+#define V_RSP_FIFO_CERR_SET(x) ((x) << S_RSP_FIFO_CERR_SET)
+#define F_RSP_FIFO_CERR_SET V_RSP_FIFO_CERR_SET(1U)
+
#define A_ULP_TX_LA_WRPTR_2 0x8ee8
+#define A_ULP_TX_INT_ENABLE_8 0x8ee8
#define A_ULP_TX_LA_RESERVED_2 0x8eec
#define A_ULP_TX_LA_RDPTR_3 0x8ef0
#define A_ULP_TX_LA_RDDATA_3 0x8ef4
@@ -29671,6 +37470,97 @@
#define V_SHOVE_LAST(x) ((x) << S_SHOVE_LAST)
#define F_SHOVE_LAST V_SHOVE_LAST(1U)
+#define A_ULP_TX_ACCELERATOR_CTL 0x8f90
+
+#define S_FIFO_THRESHOLD 8
+#define M_FIFO_THRESHOLD 0x1fU
+#define V_FIFO_THRESHOLD(x) ((x) << S_FIFO_THRESHOLD)
+#define G_FIFO_THRESHOLD(x) (((x) >> S_FIFO_THRESHOLD) & M_FIFO_THRESHOLD)
+
+#define S_COMPRESSION_XP10DISABLECFUSE 5
+#define V_COMPRESSION_XP10DISABLECFUSE(x) ((x) << S_COMPRESSION_XP10DISABLECFUSE)
+#define F_COMPRESSION_XP10DISABLECFUSE V_COMPRESSION_XP10DISABLECFUSE(1U)
+
+#define S_COMPRESSION_XP10DISABLE 4
+#define V_COMPRESSION_XP10DISABLE(x) ((x) << S_COMPRESSION_XP10DISABLE)
+#define F_COMPRESSION_XP10DISABLE V_COMPRESSION_XP10DISABLE(1U)
+
+#define S_DEDUPEDISABLECFUSE 3
+#define V_DEDUPEDISABLECFUSE(x) ((x) << S_DEDUPEDISABLECFUSE)
+#define F_DEDUPEDISABLECFUSE V_DEDUPEDISABLECFUSE(1U)
+
+#define S_DEDUPEDISABLE 2
+#define V_DEDUPEDISABLE(x) ((x) << S_DEDUPEDISABLE)
+#define F_DEDUPEDISABLE V_DEDUPEDISABLE(1U)
+
+#define S_GFDISABLECFUSE 1
+#define V_GFDISABLECFUSE(x) ((x) << S_GFDISABLECFUSE)
+#define F_GFDISABLECFUSE V_GFDISABLECFUSE(1U)
+
+#define S_GFDISABLE 0
+#define V_GFDISABLE(x) ((x) << S_GFDISABLE)
+#define F_GFDISABLE V_GFDISABLE(1U)
+
+#define A_ULP_TX_XP10_IND_ADDR 0x8f94
+
+#define S_XP10_CONTROL 31
+#define V_XP10_CONTROL(x) ((x) << S_XP10_CONTROL)
+#define F_XP10_CONTROL V_XP10_CONTROL(1U)
+
+#define S_XP10_ADDR 0
+#define M_XP10_ADDR 0xfffffU
+#define V_XP10_ADDR(x) ((x) << S_XP10_ADDR)
+#define G_XP10_ADDR(x) (((x) >> S_XP10_ADDR) & M_XP10_ADDR)
+
+#define A_ULP_TX_XP10_IND_DATA 0x8f98
+#define A_ULP_TX_IWARP_PMOF_OPCODES_1 0x8f9c
+
+#define S_RDMA_VERIFY_RESPONSE 24
+#define M_RDMA_VERIFY_RESPONSE 0x1fU
+#define V_RDMA_VERIFY_RESPONSE(x) ((x) << S_RDMA_VERIFY_RESPONSE)
+#define G_RDMA_VERIFY_RESPONSE(x) (((x) >> S_RDMA_VERIFY_RESPONSE) & M_RDMA_VERIFY_RESPONSE)
+
+#define S_RDMA_VERIFY_REQUEST 16
+#define M_RDMA_VERIFY_REQUEST 0x1fU
+#define V_RDMA_VERIFY_REQUEST(x) ((x) << S_RDMA_VERIFY_REQUEST)
+#define G_RDMA_VERIFY_REQUEST(x) (((x) >> S_RDMA_VERIFY_REQUEST) & M_RDMA_VERIFY_REQUEST)
+
+#define S_RDMA_FLUSH_RESPONSE 8
+#define M_RDMA_FLUSH_RESPONSE 0x1fU
+#define V_RDMA_FLUSH_RESPONSE(x) ((x) << S_RDMA_FLUSH_RESPONSE)
+#define G_RDMA_FLUSH_RESPONSE(x) (((x) >> S_RDMA_FLUSH_RESPONSE) & M_RDMA_FLUSH_RESPONSE)
+
+#define S_RDMA_FLUSH_REQUEST 0
+#define M_RDMA_FLUSH_REQUEST 0x1fU
+#define V_RDMA_FLUSH_REQUEST(x) ((x) << S_RDMA_FLUSH_REQUEST)
+#define G_RDMA_FLUSH_REQUEST(x) (((x) >> S_RDMA_FLUSH_REQUEST) & M_RDMA_FLUSH_REQUEST)
+
+#define A_ULP_TX_IWARP_PMOF_OPCODES_2 0x8fa0
+
+#define S_RDMA_SEND_WITH_SE_IMMEDIATE 24
+#define M_RDMA_SEND_WITH_SE_IMMEDIATE 0x1fU
+#define V_RDMA_SEND_WITH_SE_IMMEDIATE(x) ((x) << S_RDMA_SEND_WITH_SE_IMMEDIATE)
+#define G_RDMA_SEND_WITH_SE_IMMEDIATE(x) (((x) >> S_RDMA_SEND_WITH_SE_IMMEDIATE) & M_RDMA_SEND_WITH_SE_IMMEDIATE)
+
+#define S_RDMA_SEND_WITH_IMMEDIATE 16
+#define M_RDMA_SEND_WITH_IMMEDIATE 0x1fU
+#define V_RDMA_SEND_WITH_IMMEDIATE(x) ((x) << S_RDMA_SEND_WITH_IMMEDIATE)
+#define G_RDMA_SEND_WITH_IMMEDIATE(x) (((x) >> S_RDMA_SEND_WITH_IMMEDIATE) & M_RDMA_SEND_WITH_IMMEDIATE)
+
+#define S_RDMA_ATOMIC_WRITE_RESPONSE 8
+#define M_RDMA_ATOMIC_WRITE_RESPONSE 0x1fU
+#define V_RDMA_ATOMIC_WRITE_RESPONSE(x) ((x) << S_RDMA_ATOMIC_WRITE_RESPONSE)
+#define G_RDMA_ATOMIC_WRITE_RESPONSE(x) (((x) >> S_RDMA_ATOMIC_WRITE_RESPONSE) & M_RDMA_ATOMIC_WRITE_RESPONSE)
+
+#define S_RDMA_ATOMIC_WRITE_REQUEST 0
+#define M_RDMA_ATOMIC_WRITE_REQUEST 0x1fU
+#define V_RDMA_ATOMIC_WRITE_REQUEST(x) ((x) << S_RDMA_ATOMIC_WRITE_REQUEST)
+#define G_RDMA_ATOMIC_WRITE_REQUEST(x) (((x) >> S_RDMA_ATOMIC_WRITE_REQUEST) & M_RDMA_ATOMIC_WRITE_REQUEST)
+
+#define A_ULP_TX_NVME_TCP_TPT_LLIMIT 0x8fa4
+#define A_ULP_TX_NVME_TCP_TPT_ULIMIT 0x8fa8
+#define A_ULP_TX_NVME_TCP_PBL_LLIMIT 0x8fac
+#define A_ULP_TX_NVME_TCP_PBL_ULIMIT 0x8fb0
#define A_ULP_TX_TLS_IND_CMD 0x8fb8
#define S_TLS_TX_REG_OFF_ADDR 0
@@ -29678,7 +37568,48 @@
#define V_TLS_TX_REG_OFF_ADDR(x) ((x) << S_TLS_TX_REG_OFF_ADDR)
#define G_TLS_TX_REG_OFF_ADDR(x) (((x) >> S_TLS_TX_REG_OFF_ADDR) & M_TLS_TX_REG_OFF_ADDR)
+#define A_ULP_TX_DBG_CTL 0x8fb8
#define A_ULP_TX_TLS_IND_DATA 0x8fbc
+#define A_ULP_TX_DBG_DATA 0x8fbc
+#define A_ULP_TX_TLS_CH0_PERR_CAUSE 0xc
+
+#define S_GLUE_PERR 3
+#define V_GLUE_PERR(x) ((x) << S_GLUE_PERR)
+#define F_GLUE_PERR V_GLUE_PERR(1U)
+
+#define S_DSGL_PERR 2
+#define V_DSGL_PERR(x) ((x) << S_DSGL_PERR)
+#define F_DSGL_PERR V_DSGL_PERR(1U)
+
+#define S_SGE_PERR 1
+#define V_SGE_PERR(x) ((x) << S_SGE_PERR)
+#define F_SGE_PERR V_SGE_PERR(1U)
+
+#define S_KEX_PERR 0
+#define V_KEX_PERR(x) ((x) << S_KEX_PERR)
+#define F_KEX_PERR V_KEX_PERR(1U)
+
+#define A_ULP_TX_TLS_CH0_PERR_ENABLE 0x10
+#define A_ULP_TX_TLS_CH0_HMACCTRL_CFG 0x20
+
+#define S_HMAC_CFG6 12
+#define M_HMAC_CFG6 0x3fU
+#define V_HMAC_CFG6(x) ((x) << S_HMAC_CFG6)
+#define G_HMAC_CFG6(x) (((x) >> S_HMAC_CFG6) & M_HMAC_CFG6)
+
+#define S_HMAC_CFG5 6
+#define M_HMAC_CFG5 0x3fU
+#define V_HMAC_CFG5(x) ((x) << S_HMAC_CFG5)
+#define G_HMAC_CFG5(x) (((x) >> S_HMAC_CFG5) & M_HMAC_CFG5)
+
+#define S_HMAC_CFG4 0
+#define M_HMAC_CFG4 0x3fU
+#define V_HMAC_CFG4(x) ((x) << S_HMAC_CFG4)
+#define G_HMAC_CFG4(x) (((x) >> S_HMAC_CFG4) & M_HMAC_CFG4)
+
+#define A_ULP_TX_TLS_CH1_PERR_CAUSE 0x4c
+#define A_ULP_TX_TLS_CH1_PERR_ENABLE 0x50
+#define A_ULP_TX_TLS_CH1_HMACCTRL_CFG 0x60
/* registers for module PM_RX */
#define PM_RX_BASE_ADDR 0x8fc0
@@ -29703,6 +37634,31 @@
#define V_PREFETCH_ENABLE(x) ((x) << S_PREFETCH_ENABLE)
#define F_PREFETCH_ENABLE V_PREFETCH_ENABLE(1U)
+#define S_CACHE_HOLD 13
+#define V_CACHE_HOLD(x) ((x) << S_CACHE_HOLD)
+#define F_CACHE_HOLD V_CACHE_HOLD(1U)
+
+#define S_CACHE_INIT_DONE 12
+#define V_CACHE_INIT_DONE(x) ((x) << S_CACHE_INIT_DONE)
+#define F_CACHE_INIT_DONE V_CACHE_INIT_DONE(1U)
+
+#define S_CACHE_DEPTH 8
+#define M_CACHE_DEPTH 0xfU
+#define V_CACHE_DEPTH(x) ((x) << S_CACHE_DEPTH)
+#define G_CACHE_DEPTH(x) (((x) >> S_CACHE_DEPTH) & M_CACHE_DEPTH)
+
+#define S_CACHE_INIT 7
+#define V_CACHE_INIT(x) ((x) << S_CACHE_INIT)
+#define F_CACHE_INIT V_CACHE_INIT(1U)
+
+#define S_CACHE_SLEEP 6
+#define V_CACHE_SLEEP(x) ((x) << S_CACHE_SLEEP)
+#define F_CACHE_SLEEP V_CACHE_SLEEP(1U)
+
+#define S_CACHE_BYPASS 5
+#define V_CACHE_BYPASS(x) ((x) << S_CACHE_BYPASS)
+#define F_CACHE_BYPASS V_CACHE_BYPASS(1U)
+
#define A_PM_RX_STAT_CONFIG 0x8fc8
#define A_PM_RX_STAT_COUNT 0x8fcc
#define A_PM_RX_STAT_LSB 0x8fd0
@@ -29723,6 +37679,11 @@
#define V_PMDBGADDR(x) ((x) << S_PMDBGADDR)
#define G_PMDBGADDR(x) (((x) >> S_PMDBGADDR) & M_PMDBGADDR)
+#define S_T7_OSPIWRBUSY_T5 21
+#define M_T7_OSPIWRBUSY_T5 0xfU
+#define V_T7_OSPIWRBUSY_T5(x) ((x) << S_T7_OSPIWRBUSY_T5)
+#define G_T7_OSPIWRBUSY_T5(x) (((x) >> S_T7_OSPIWRBUSY_T5) & M_T7_OSPIWRBUSY_T5)
+
#define A_PM_RX_STAT_MSB 0x8fd4
#define A_PM_RX_DBG_DATA 0x8fd4
#define A_PM_RX_INT_ENABLE 0x8fd8
@@ -29843,7 +37804,36 @@
#define V_SDC_ERR(x) ((x) << S_SDC_ERR)
#define F_SDC_ERR V_SDC_ERR(1U)
+#define S_MASTER_PERR 31
+#define V_MASTER_PERR(x) ((x) << S_MASTER_PERR)
+#define F_MASTER_PERR V_MASTER_PERR(1U)
+
+#define S_T7_OSPI_OVERFLOW3 30
+#define V_T7_OSPI_OVERFLOW3(x) ((x) << S_T7_OSPI_OVERFLOW3)
+#define F_T7_OSPI_OVERFLOW3 V_T7_OSPI_OVERFLOW3(1U)
+
+#define S_T7_OSPI_OVERFLOW2 29
+#define V_T7_OSPI_OVERFLOW2(x) ((x) << S_T7_OSPI_OVERFLOW2)
+#define F_T7_OSPI_OVERFLOW2 V_T7_OSPI_OVERFLOW2(1U)
+
#define A_PM_RX_INT_CAUSE 0x8fdc
+
+#define S_CACHE_SRAM_ERROR 3
+#define V_CACHE_SRAM_ERROR(x) ((x) << S_CACHE_SRAM_ERROR)
+#define F_CACHE_SRAM_ERROR V_CACHE_SRAM_ERROR(1U)
+
+#define S_CACHE_LRU_ERROR 2
+#define V_CACHE_LRU_ERROR(x) ((x) << S_CACHE_LRU_ERROR)
+#define F_CACHE_LRU_ERROR V_CACHE_LRU_ERROR(1U)
+
+#define S_CACHE_ISLAND_ERROR 1
+#define V_CACHE_ISLAND_ERROR(x) ((x) << S_CACHE_ISLAND_ERROR)
+#define F_CACHE_ISLAND_ERROR V_CACHE_ISLAND_ERROR(1U)
+
+#define S_CACHE_CTRL_ERROR 0
+#define V_CACHE_CTRL_ERROR(x) ((x) << S_CACHE_CTRL_ERROR)
+#define F_CACHE_CTRL_ERROR V_CACHE_CTRL_ERROR(1U)
+
#define A_PM_RX_ISPI_DBG_4B_DATA0 0x10000
#define A_PM_RX_ISPI_DBG_4B_DATA1 0x10001
#define A_PM_RX_ISPI_DBG_4B_DATA2 0x10002
@@ -29959,12 +37949,25 @@
#define V_CHNL0_MAX_DEFICIT_CNT(x) ((x) << S_CHNL0_MAX_DEFICIT_CNT)
#define G_CHNL0_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL0_MAX_DEFICIT_CNT) & M_CHNL0_MAX_DEFICIT_CNT)
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT0 0x1001c
#define A_PM_RX_FEATURE_EN 0x1001d
#define S_PIO_CH_DEFICIT_CTL_EN_RX 0
#define V_PIO_CH_DEFICIT_CTL_EN_RX(x) ((x) << S_PIO_CH_DEFICIT_CTL_EN_RX)
#define F_PIO_CH_DEFICIT_CTL_EN_RX V_PIO_CH_DEFICIT_CTL_EN_RX(1U)
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT1 0x1001d
+
+#define S_CHNL3_MAX_DEFICIT_CNT 16
+#define M_CHNL3_MAX_DEFICIT_CNT 0xffffU
+#define V_CHNL3_MAX_DEFICIT_CNT(x) ((x) << S_CHNL3_MAX_DEFICIT_CNT)
+#define G_CHNL3_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL3_MAX_DEFICIT_CNT) & M_CHNL3_MAX_DEFICIT_CNT)
+
+#define S_CHNL2_MAX_DEFICIT_CNT 0
+#define M_CHNL2_MAX_DEFICIT_CNT 0xffffU
+#define V_CHNL2_MAX_DEFICIT_CNT(x) ((x) << S_CHNL2_MAX_DEFICIT_CNT)
+#define G_CHNL2_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL2_MAX_DEFICIT_CNT) & M_CHNL2_MAX_DEFICIT_CNT)
+
#define A_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x1001e
#define S_CH0_OSPI_DEFICIT_THRSHLD 0
@@ -30245,16 +38248,6 @@
#define V_RX_C_TXAFULL(x) ((x) << S_RX_C_TXAFULL)
#define G_RX_C_TXAFULL(x) (((x) >> S_RX_C_TXAFULL) & M_RX_C_TXAFULL)
-#define S_T6_RX_PCMD_DRDY 26
-#define M_T6_RX_PCMD_DRDY 0x3U
-#define V_T6_RX_PCMD_DRDY(x) ((x) << S_T6_RX_PCMD_DRDY)
-#define G_T6_RX_PCMD_DRDY(x) (((x) >> S_T6_RX_PCMD_DRDY) & M_T6_RX_PCMD_DRDY)
-
-#define S_T6_RX_PCMD_SRDY 24
-#define M_T6_RX_PCMD_SRDY 0x3U
-#define V_T6_RX_PCMD_SRDY(x) ((x) << S_T6_RX_PCMD_SRDY)
-#define G_T6_RX_PCMD_SRDY(x) (((x) >> S_T6_RX_PCMD_SRDY) & M_T6_RX_PCMD_SRDY)
-
#define A_PM_RX_DBG_STAT6 0x10027
#define S_RX_M_INTRNL_FIFO_CNT 4
@@ -30434,6 +38427,179 @@
#define V_RX_BUNDLE_LEN0(x) ((x) << S_RX_BUNDLE_LEN0)
#define G_RX_BUNDLE_LEN0(x) (((x) >> S_RX_BUNDLE_LEN0) & M_RX_BUNDLE_LEN0)
+#define A_PM_RX_INT_CAUSE_MASK_HALT_2 0x10049
+#define A_PM_RX_INT_ENABLE_2 0x10060
+
+#define S_CACHE_SRAM_ODD_CERR 12
+#define V_CACHE_SRAM_ODD_CERR(x) ((x) << S_CACHE_SRAM_ODD_CERR)
+#define F_CACHE_SRAM_ODD_CERR V_CACHE_SRAM_ODD_CERR(1U)
+
+#define S_CACHE_SRAM_EVEN_CERR 11
+#define V_CACHE_SRAM_EVEN_CERR(x) ((x) << S_CACHE_SRAM_EVEN_CERR)
+#define F_CACHE_SRAM_EVEN_CERR V_CACHE_SRAM_EVEN_CERR(1U)
+
+#define S_CACHE_LRU_LEFT_CERR 10
+#define V_CACHE_LRU_LEFT_CERR(x) ((x) << S_CACHE_LRU_LEFT_CERR)
+#define F_CACHE_LRU_LEFT_CERR V_CACHE_LRU_LEFT_CERR(1U)
+
+#define S_CACHE_LRU_RIGHT_CERR 9
+#define V_CACHE_LRU_RIGHT_CERR(x) ((x) << S_CACHE_LRU_RIGHT_CERR)
+#define F_CACHE_LRU_RIGHT_CERR V_CACHE_LRU_RIGHT_CERR(1U)
+
+#define S_CACHE_ISLAND_CERR 8
+#define V_CACHE_ISLAND_CERR(x) ((x) << S_CACHE_ISLAND_CERR)
+#define F_CACHE_ISLAND_CERR V_CACHE_ISLAND_CERR(1U)
+
+#define S_OCSPI_CERR 7
+#define V_OCSPI_CERR(x) ((x) << S_OCSPI_CERR)
+#define F_OCSPI_CERR V_OCSPI_CERR(1U)
+
+#define S_IESPI_CERR 6
+#define V_IESPI_CERR(x) ((x) << S_IESPI_CERR)
+#define F_IESPI_CERR V_IESPI_CERR(1U)
+
+#define S_OCSPI2_RX_FRAMING_ERROR 5
+#define V_OCSPI2_RX_FRAMING_ERROR(x) ((x) << S_OCSPI2_RX_FRAMING_ERROR)
+#define F_OCSPI2_RX_FRAMING_ERROR V_OCSPI2_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_RX_FRAMING_ERROR 4
+#define V_OCSPI3_RX_FRAMING_ERROR(x) ((x) << S_OCSPI3_RX_FRAMING_ERROR)
+#define F_OCSPI3_RX_FRAMING_ERROR V_OCSPI3_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI2_TX_FRAMING_ERROR 3
+#define V_OCSPI2_TX_FRAMING_ERROR(x) ((x) << S_OCSPI2_TX_FRAMING_ERROR)
+#define F_OCSPI2_TX_FRAMING_ERROR V_OCSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_TX_FRAMING_ERROR 2
+#define V_OCSPI3_TX_FRAMING_ERROR(x) ((x) << S_OCSPI3_TX_FRAMING_ERROR)
+#define F_OCSPI3_TX_FRAMING_ERROR V_OCSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI2_OFIFO2X_TX_FRAMING_ERROR 1
+#define V_OCSPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI2_OFIFO2X_TX_FRAMING_ERROR V_OCSPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_OFIFO2X_TX_FRAMING_ERROR 0
+#define V_OCSPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI3_OFIFO2X_TX_FRAMING_ERROR V_OCSPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define A_PM_RX_INT_CAUSE_2 0x10061
+#define A_PM_RX_PERR_ENABLE 0x10062
+
+#define S_T7_SDC_ERR 31
+#define V_T7_SDC_ERR(x) ((x) << S_T7_SDC_ERR)
+#define F_T7_SDC_ERR V_T7_SDC_ERR(1U)
+
+#define S_T7_MA_INTF_SDC_ERR 30
+#define V_T7_MA_INTF_SDC_ERR(x) ((x) << S_T7_MA_INTF_SDC_ERR)
+#define F_T7_MA_INTF_SDC_ERR V_T7_MA_INTF_SDC_ERR(1U)
+
+#define S_E_PCMD_PERR 21
+#define V_E_PCMD_PERR(x) ((x) << S_E_PCMD_PERR)
+#define F_E_PCMD_PERR V_E_PCMD_PERR(1U)
+
+#define S_CACHE_RSP_DFIFO_PERR 20
+#define V_CACHE_RSP_DFIFO_PERR(x) ((x) << S_CACHE_RSP_DFIFO_PERR)
+#define F_CACHE_RSP_DFIFO_PERR V_CACHE_RSP_DFIFO_PERR(1U)
+
+#define S_CACHE_SRAM_ODD_PERR 19
+#define V_CACHE_SRAM_ODD_PERR(x) ((x) << S_CACHE_SRAM_ODD_PERR)
+#define F_CACHE_SRAM_ODD_PERR V_CACHE_SRAM_ODD_PERR(1U)
+
+#define S_CACHE_SRAM_EVEN_PERR 18
+#define V_CACHE_SRAM_EVEN_PERR(x) ((x) << S_CACHE_SRAM_EVEN_PERR)
+#define F_CACHE_SRAM_EVEN_PERR V_CACHE_SRAM_EVEN_PERR(1U)
+
+#define S_CACHE_RSVD_PERR 17
+#define V_CACHE_RSVD_PERR(x) ((x) << S_CACHE_RSVD_PERR)
+#define F_CACHE_RSVD_PERR V_CACHE_RSVD_PERR(1U)
+
+#define S_CACHE_LRU_LEFT_PERR 16
+#define V_CACHE_LRU_LEFT_PERR(x) ((x) << S_CACHE_LRU_LEFT_PERR)
+#define F_CACHE_LRU_LEFT_PERR V_CACHE_LRU_LEFT_PERR(1U)
+
+#define S_CACHE_LRU_RIGHT_PERR 15
+#define V_CACHE_LRU_RIGHT_PERR(x) ((x) << S_CACHE_LRU_RIGHT_PERR)
+#define F_CACHE_LRU_RIGHT_PERR V_CACHE_LRU_RIGHT_PERR(1U)
+
+#define S_CACHE_RSP_CMD_PERR 14
+#define V_CACHE_RSP_CMD_PERR(x) ((x) << S_CACHE_RSP_CMD_PERR)
+#define F_CACHE_RSP_CMD_PERR V_CACHE_RSP_CMD_PERR(1U)
+
+#define S_CACHE_SRAM_CMD_PERR 13
+#define V_CACHE_SRAM_CMD_PERR(x) ((x) << S_CACHE_SRAM_CMD_PERR)
+#define F_CACHE_SRAM_CMD_PERR V_CACHE_SRAM_CMD_PERR(1U)
+
+#define S_CACHE_MA_CMD_PERR 12
+#define V_CACHE_MA_CMD_PERR(x) ((x) << S_CACHE_MA_CMD_PERR)
+#define F_CACHE_MA_CMD_PERR V_CACHE_MA_CMD_PERR(1U)
+
+#define S_CACHE_TCAM_PERR 11
+#define V_CACHE_TCAM_PERR(x) ((x) << S_CACHE_TCAM_PERR)
+#define F_CACHE_TCAM_PERR V_CACHE_TCAM_PERR(1U)
+
+#define S_CACHE_ISLAND_PERR 10
+#define V_CACHE_ISLAND_PERR(x) ((x) << S_CACHE_ISLAND_PERR)
+#define F_CACHE_ISLAND_PERR V_CACHE_ISLAND_PERR(1U)
+
+#define S_MC_WCNT_FIFO_PERR 9
+#define V_MC_WCNT_FIFO_PERR(x) ((x) << S_MC_WCNT_FIFO_PERR)
+#define F_MC_WCNT_FIFO_PERR V_MC_WCNT_FIFO_PERR(1U)
+
+#define S_MC_WDATA_FIFO_PERR 8
+#define V_MC_WDATA_FIFO_PERR(x) ((x) << S_MC_WDATA_FIFO_PERR)
+#define F_MC_WDATA_FIFO_PERR V_MC_WDATA_FIFO_PERR(1U)
+
+#define S_MC_RCNT_FIFO_PERR 7
+#define V_MC_RCNT_FIFO_PERR(x) ((x) << S_MC_RCNT_FIFO_PERR)
+#define F_MC_RCNT_FIFO_PERR V_MC_RCNT_FIFO_PERR(1U)
+
+#define S_MC_RDATA_FIFO_PERR 6
+#define V_MC_RDATA_FIFO_PERR(x) ((x) << S_MC_RDATA_FIFO_PERR)
+#define F_MC_RDATA_FIFO_PERR V_MC_RDATA_FIFO_PERR(1U)
+
+#define S_TOKEN_FIFO_PERR 5
+#define V_TOKEN_FIFO_PERR(x) ((x) << S_TOKEN_FIFO_PERR)
+#define F_TOKEN_FIFO_PERR V_TOKEN_FIFO_PERR(1U)
+
+#define S_T7_BUNDLE_LEN_PARERR 4
+#define V_T7_BUNDLE_LEN_PARERR(x) ((x) << S_T7_BUNDLE_LEN_PARERR)
+#define F_T7_BUNDLE_LEN_PARERR V_T7_BUNDLE_LEN_PARERR(1U)
+
+#define A_PM_RX_PERR_CAUSE 0x10063
+#define A_PM_RX_EXT_CFIFO_CONFIG0 0x10070
+
+#define S_CH1_PTR_MAX 17
+#define M_CH1_PTR_MAX 0x7fffU
+#define V_CH1_PTR_MAX(x) ((x) << S_CH1_PTR_MAX)
+#define G_CH1_PTR_MAX(x) (((x) >> S_CH1_PTR_MAX) & M_CH1_PTR_MAX)
+
+#define S_CH0_PTR_MAX 1
+#define M_CH0_PTR_MAX 0x7fffU
+#define V_CH0_PTR_MAX(x) ((x) << S_CH0_PTR_MAX)
+#define G_CH0_PTR_MAX(x) (((x) >> S_CH0_PTR_MAX) & M_CH0_PTR_MAX)
+
+#define S_STROBE 0
+#define V_STROBE(x) ((x) << S_STROBE)
+#define F_STROBE V_STROBE(1U)
+
+#define A_PM_RX_EXT_CFIFO_CONFIG1 0x10071
+
+#define S_CH2_PTR_MAX 1
+#define M_CH2_PTR_MAX 0x7fffU
+#define V_CH2_PTR_MAX(x) ((x) << S_CH2_PTR_MAX)
+#define G_CH2_PTR_MAX(x) (((x) >> S_CH2_PTR_MAX) & M_CH2_PTR_MAX)
+
+#define A_PM_RX_EXT_EFIFO_CONFIG0 0x10072
+#define A_PM_RX_EXT_EFIFO_CONFIG1 0x10073
+#define A_T7_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x10074
+#define A_T7_PM_RX_CH1_OSPI_DEFICIT_THRSHLD 0x10075
+#define A_PM_RX_CH2_OSPI_DEFICIT_THRSHLD 0x10076
+#define A_PM_RX_CH3_OSPI_DEFICIT_THRSHLD 0x10077
+#define A_T7_PM_RX_FEATURE_EN 0x10078
+#define A_PM_RX_TCAM_BIST_CTRL 0x10080
+#define A_PM_RX_TCAM_BIST_CB_PASS 0x10081
+#define A_PM_RX_TCAM_BIST_CB_BUSY 0x10082
+
/* registers for module PM_TX */
#define PM_TX_BASE_ADDR 0x8fe0
@@ -30613,6 +38779,118 @@
#define V_C_PCMD_PAR_ERROR(x) ((x) << S_C_PCMD_PAR_ERROR)
#define F_C_PCMD_PAR_ERROR V_C_PCMD_PAR_ERROR(1U)
+#define S_T7_ZERO_C_CMD_ERROR 30
+#define V_T7_ZERO_C_CMD_ERROR(x) ((x) << S_T7_ZERO_C_CMD_ERROR)
+#define F_T7_ZERO_C_CMD_ERROR V_T7_ZERO_C_CMD_ERROR(1U)
+
+#define S_OESPI_COR_ERR 29
+#define V_OESPI_COR_ERR(x) ((x) << S_OESPI_COR_ERR)
+#define F_OESPI_COR_ERR V_OESPI_COR_ERR(1U)
+
+#define S_ICSPI_COR_ERR 28
+#define V_ICSPI_COR_ERR(x) ((x) << S_ICSPI_COR_ERR)
+#define F_ICSPI_COR_ERR V_ICSPI_COR_ERR(1U)
+
+#define S_ICSPI_OVFL 24
+#define V_ICSPI_OVFL(x) ((x) << S_ICSPI_OVFL)
+#define F_ICSPI_OVFL V_ICSPI_OVFL(1U)
+
+#define S_PCMD_LEN_OVFL3 23
+#define V_PCMD_LEN_OVFL3(x) ((x) << S_PCMD_LEN_OVFL3)
+#define F_PCMD_LEN_OVFL3 V_PCMD_LEN_OVFL3(1U)
+
+#define S_T7_PCMD_LEN_OVFL2 22
+#define V_T7_PCMD_LEN_OVFL2(x) ((x) << S_T7_PCMD_LEN_OVFL2)
+#define F_T7_PCMD_LEN_OVFL2 V_T7_PCMD_LEN_OVFL2(1U)
+
+#define S_T7_PCMD_LEN_OVFL1 21
+#define V_T7_PCMD_LEN_OVFL1(x) ((x) << S_T7_PCMD_LEN_OVFL1)
+#define F_T7_PCMD_LEN_OVFL1 V_T7_PCMD_LEN_OVFL1(1U)
+
+#define S_T7_PCMD_LEN_OVFL0 20
+#define V_T7_PCMD_LEN_OVFL0(x) ((x) << S_T7_PCMD_LEN_OVFL0)
+#define F_T7_PCMD_LEN_OVFL0 V_T7_PCMD_LEN_OVFL0(1U)
+
+#define S_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR 19
+#define V_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR 18
+#define V_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR 17
+#define V_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR 16
+#define V_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI0_TX_FRAMING_ERROR 15
+#define V_T7_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI0_TX_FRAMING_ERROR)
+#define F_T7_ICSPI0_TX_FRAMING_ERROR V_T7_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI1_TX_FRAMING_ERROR 14
+#define V_T7_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI1_TX_FRAMING_ERROR)
+#define F_T7_ICSPI1_TX_FRAMING_ERROR V_T7_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI2_TX_FRAMING_ERROR 13
+#define V_T7_ICSPI2_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI2_TX_FRAMING_ERROR)
+#define F_T7_ICSPI2_TX_FRAMING_ERROR V_T7_ICSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI3_TX_FRAMING_ERROR 12
+#define V_T7_ICSPI3_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI3_TX_FRAMING_ERROR)
+#define F_T7_ICSPI3_TX_FRAMING_ERROR V_T7_ICSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_RX_FRAMING_ERROR 11
+#define V_T7_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_RX_FRAMING_ERROR)
+#define F_T7_OESPI0_RX_FRAMING_ERROR V_T7_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_RX_FRAMING_ERROR 10
+#define V_T7_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_RX_FRAMING_ERROR)
+#define F_T7_OESPI1_RX_FRAMING_ERROR V_T7_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_RX_FRAMING_ERROR 9
+#define V_T7_OESPI2_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_RX_FRAMING_ERROR)
+#define F_T7_OESPI2_RX_FRAMING_ERROR V_T7_OESPI2_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_RX_FRAMING_ERROR 8
+#define V_T7_OESPI3_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_RX_FRAMING_ERROR)
+#define F_T7_OESPI3_RX_FRAMING_ERROR V_T7_OESPI3_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_TX_FRAMING_ERROR 7
+#define V_T7_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_TX_FRAMING_ERROR)
+#define F_T7_OESPI0_TX_FRAMING_ERROR V_T7_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_TX_FRAMING_ERROR 6
+#define V_T7_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_TX_FRAMING_ERROR)
+#define F_T7_OESPI1_TX_FRAMING_ERROR V_T7_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_TX_FRAMING_ERROR 5
+#define V_T7_OESPI2_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_TX_FRAMING_ERROR)
+#define F_T7_OESPI2_TX_FRAMING_ERROR V_T7_OESPI2_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_TX_FRAMING_ERROR 4
+#define V_T7_OESPI3_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_TX_FRAMING_ERROR)
+#define F_T7_OESPI3_TX_FRAMING_ERROR V_T7_OESPI3_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR 3
+#define V_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR 2
+#define V_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR 1
+#define V_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR 0
+#define V_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
#define A_PM_TX_INT_CAUSE 0x8ffc
#define S_ZERO_C_CMD_ERROR 28
@@ -30624,23 +38902,51 @@
#define F_OSPI_OR_BUNDLE_LEN_PAR_ERR V_OSPI_OR_BUNDLE_LEN_PAR_ERR(1U)
#define A_PM_TX_ISPI_DBG_4B_DATA0 0x10000
+#define A_T7_PM_TX_DBG_STAT_MSB 0x10000
#define A_PM_TX_ISPI_DBG_4B_DATA1 0x10001
+#define A_T7_PM_TX_DBG_STAT_LSB 0x10001
#define A_PM_TX_ISPI_DBG_4B_DATA2 0x10002
+#define A_T7_PM_TX_DBG_RSVD_FLIT_CNT 0x10002
#define A_PM_TX_ISPI_DBG_4B_DATA3 0x10003
+#define A_T7_PM_TX_SDC_EN 0x10003
#define A_PM_TX_ISPI_DBG_4B_DATA4 0x10004
+#define A_T7_PM_TX_INOUT_FIFO_DBG_CHNL_SEL 0x10004
#define A_PM_TX_ISPI_DBG_4B_DATA5 0x10005
+#define A_T7_PM_TX_INOUT_FIFO_DBG_WR 0x10005
#define A_PM_TX_ISPI_DBG_4B_DATA6 0x10006
+#define A_T7_PM_TX_INPUT_FIFO_STR_FWD_EN 0x10006
#define A_PM_TX_ISPI_DBG_4B_DATA7 0x10007
+#define A_T7_PM_TX_FEATURE_EN 0x10007
+
+#define S_IN_AFULL_TH 5
+#define M_IN_AFULL_TH 0x3U
+#define V_IN_AFULL_TH(x) ((x) << S_IN_AFULL_TH)
+#define G_IN_AFULL_TH(x) (((x) >> S_IN_AFULL_TH) & M_IN_AFULL_TH)
+
+#define S_PIO_FROM_CH_EN 4
+#define V_PIO_FROM_CH_EN(x) ((x) << S_PIO_FROM_CH_EN)
+#define F_PIO_FROM_CH_EN V_PIO_FROM_CH_EN(1U)
+
#define A_PM_TX_ISPI_DBG_4B_DATA8 0x10008
+#define A_T7_PM_TX_T5_PM_TX_INT_ENABLE 0x10008
#define A_PM_TX_OSPI_DBG_4B_DATA0 0x10009
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD0 0x10009
#define A_PM_TX_OSPI_DBG_4B_DATA1 0x1000a
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD1 0x1000a
#define A_PM_TX_OSPI_DBG_4B_DATA2 0x1000b
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD2 0x1000b
#define A_PM_TX_OSPI_DBG_4B_DATA3 0x1000c
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x1000c
#define A_PM_TX_OSPI_DBG_4B_DATA4 0x1000d
+#define A_T7_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x1000d
#define A_PM_TX_OSPI_DBG_4B_DATA5 0x1000e
+#define A_T7_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x1000e
#define A_PM_TX_OSPI_DBG_4B_DATA6 0x1000f
+#define A_T7_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x1000f
#define A_PM_TX_OSPI_DBG_4B_DATA7 0x10010
+#define A_T7_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x10010
#define A_PM_TX_OSPI_DBG_4B_DATA8 0x10011
+#define A_T7_PM_TX_INT_CAUSE_MASK_HALT 0x10011
#define A_PM_TX_OSPI_DBG_4B_DATA9 0x10012
#define A_PM_TX_OSPI_DBG_4B_DATA10 0x10013
#define A_PM_TX_OSPI_DBG_4B_DATA11 0x10014
@@ -30722,6 +39028,48 @@
#define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x10026
#define A_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x10027
#define A_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x10028
+#define A_PM_TX_PERR_ENABLE 0x10028
+
+#define S_T7_1_OSPI_OVERFLOW3 23
+#define V_T7_1_OSPI_OVERFLOW3(x) ((x) << S_T7_1_OSPI_OVERFLOW3)
+#define F_T7_1_OSPI_OVERFLOW3 V_T7_1_OSPI_OVERFLOW3(1U)
+
+#define S_T7_1_OSPI_OVERFLOW2 22
+#define V_T7_1_OSPI_OVERFLOW2(x) ((x) << S_T7_1_OSPI_OVERFLOW2)
+#define F_T7_1_OSPI_OVERFLOW2 V_T7_1_OSPI_OVERFLOW2(1U)
+
+#define S_T7_1_OSPI_OVERFLOW1 21
+#define V_T7_1_OSPI_OVERFLOW1(x) ((x) << S_T7_1_OSPI_OVERFLOW1)
+#define F_T7_1_OSPI_OVERFLOW1 V_T7_1_OSPI_OVERFLOW1(1U)
+
+#define S_T7_1_OSPI_OVERFLOW0 20
+#define V_T7_1_OSPI_OVERFLOW0(x) ((x) << S_T7_1_OSPI_OVERFLOW0)
+#define F_T7_1_OSPI_OVERFLOW0 V_T7_1_OSPI_OVERFLOW0(1U)
+
+#define S_T7_BUNDLE_LEN_OVFL_EN 18
+#define V_T7_BUNDLE_LEN_OVFL_EN(x) ((x) << S_T7_BUNDLE_LEN_OVFL_EN)
+#define F_T7_BUNDLE_LEN_OVFL_EN V_T7_BUNDLE_LEN_OVFL_EN(1U)
+
+#define S_T7_M_INTFPERREN 17
+#define V_T7_M_INTFPERREN(x) ((x) << S_T7_M_INTFPERREN)
+#define F_T7_M_INTFPERREN V_T7_M_INTFPERREN(1U)
+
+#define S_T7_1_SDC_ERR 16
+#define V_T7_1_SDC_ERR(x) ((x) << S_T7_1_SDC_ERR)
+#define F_T7_1_SDC_ERR V_T7_1_SDC_ERR(1U)
+
+#define S_TOKEN_PAR_ERROR 5
+#define V_TOKEN_PAR_ERROR(x) ((x) << S_TOKEN_PAR_ERROR)
+#define F_TOKEN_PAR_ERROR V_TOKEN_PAR_ERROR(1U)
+
+#define S_BUNDLE_LEN_PAR_ERROR 4
+#define V_BUNDLE_LEN_PAR_ERROR(x) ((x) << S_BUNDLE_LEN_PAR_ERROR)
+#define F_BUNDLE_LEN_PAR_ERROR V_BUNDLE_LEN_PAR_ERROR(1U)
+
+#define S_C_PCMD_TOKEN_PAR_ERROR 0
+#define V_C_PCMD_TOKEN_PAR_ERROR(x) ((x) << S_C_PCMD_TOKEN_PAR_ERROR)
+#define F_C_PCMD_TOKEN_PAR_ERROR V_C_PCMD_TOKEN_PAR_ERROR(1U)
+
#define A_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x10029
#define S_CH2_OSPI_DEFICIT_THRSHLD 0
@@ -30729,6 +39077,7 @@
#define V_CH2_OSPI_DEFICIT_THRSHLD(x) ((x) << S_CH2_OSPI_DEFICIT_THRSHLD)
#define G_CH2_OSPI_DEFICIT_THRSHLD(x) (((x) >> S_CH2_OSPI_DEFICIT_THRSHLD) & M_CH2_OSPI_DEFICIT_THRSHLD)
+#define A_PM_TX_PERR_CAUSE 0x10029
#define A_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x1002a
#define S_CH3_OSPI_DEFICIT_THRSHLD 0
@@ -31462,6 +39811,7 @@
#define G_ADDR(x) (((x) >> S_ADDR) & M_ADDR)
#define A_MPS_PORT_TX_PAUSE_SOURCE_L 0x24
+#define A_MPS_VF_TX_MAC_DROP_PP 0x24
#define A_MPS_PORT_TX_PAUSE_SOURCE_H 0x28
#define A_MPS_PORT_PRTY_BUFFER_GROUP_MAP 0x2c
@@ -31547,6 +39897,24 @@
#define V_TXPRTY0(x) ((x) << S_TXPRTY0)
#define G_TXPRTY0(x) (((x) >> S_TXPRTY0) & M_TXPRTY0)
+#define A_MPS_PORT_PRTY_GROUP_MAP 0x34
+#define A_MPS_PORT_TRACE_MAX_CAPTURE_SIZE 0x38
+
+#define S_TX2RX 6
+#define M_TX2RX 0x7U
+#define V_TX2RX(x) ((x) << S_TX2RX)
+#define G_TX2RX(x) (((x) >> S_TX2RX) & M_TX2RX)
+
+#define S_MAC2MPS 3
+#define M_MAC2MPS 0x7U
+#define V_MAC2MPS(x) ((x) << S_MAC2MPS)
+#define G_MAC2MPS(x) (((x) >> S_MAC2MPS) & M_MAC2MPS)
+
+#define S_MPS2MAC 0
+#define M_MPS2MAC 0x7U
+#define V_MPS2MAC(x) ((x) << S_MPS2MAC)
+#define G_MPS2MAC(x) (((x) >> S_MPS2MAC) & M_MPS2MAC)
+
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_H 0x84
#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
@@ -31578,7 +39946,9 @@
#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_H 0xf4
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+#define A_MPS_VF_STAT_RX_VF_ERR_DROP_FRAMES_L 0xf8
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define A_MPS_VF_STAT_RX_VF_ERR_DROP_FRAMES_H 0xfc
#define A_MPS_PORT_RX_CTL 0x100
#define S_NO_RPLCT_M 20
@@ -31682,6 +40052,26 @@
#define V_HASH_EN_MAC(x) ((x) << S_HASH_EN_MAC)
#define F_HASH_EN_MAC V_HASH_EN_MAC(1U)
+#define S_TRANS_ENCAP_EN 30
+#define V_TRANS_ENCAP_EN(x) ((x) << S_TRANS_ENCAP_EN)
+#define F_TRANS_ENCAP_EN V_TRANS_ENCAP_EN(1U)
+
+#define S_CRYPTO_DUMMY_PKT_CHK_EN 29
+#define V_CRYPTO_DUMMY_PKT_CHK_EN(x) ((x) << S_CRYPTO_DUMMY_PKT_CHK_EN)
+#define F_CRYPTO_DUMMY_PKT_CHK_EN V_CRYPTO_DUMMY_PKT_CHK_EN(1U)
+
+#define S_PASS_HPROM 28
+#define V_PASS_HPROM(x) ((x) << S_PASS_HPROM)
+#define F_PASS_HPROM V_PASS_HPROM(1U)
+
+#define S_PASS_PROM 27
+#define V_PASS_PROM(x) ((x) << S_PASS_PROM)
+#define F_PASS_PROM V_PASS_PROM(1U)
+
+#define S_ENCAP_ONLY_IF_OUTER_HIT 26
+#define V_ENCAP_ONLY_IF_OUTER_HIT(x) ((x) << S_ENCAP_ONLY_IF_OUTER_HIT)
+#define F_ENCAP_ONLY_IF_OUTER_HIT V_ENCAP_ONLY_IF_OUTER_HIT(1U)
+
#define A_MPS_PORT_RX_MTU 0x104
#define A_MPS_PORT_RX_PF_MAP 0x108
#define A_MPS_PORT_RX_VF_MAP0 0x10c
@@ -31924,6 +40314,23 @@
#define V_REPL_VECT_SEL(x) ((x) << S_REPL_VECT_SEL)
#define G_REPL_VECT_SEL(x) (((x) >> S_REPL_VECT_SEL) & M_REPL_VECT_SEL)
+#define A_MPS_PORT_MAC_RX_DROP_EN_PP 0x16c
+
+#define S_PRIO 0
+#define M_PRIO 0xffU
+#define V_PRIO(x) ((x) << S_PRIO)
+#define G_PRIO(x) (((x) >> S_PRIO) & M_PRIO)
+
+#define A_MPS_PORT_RX_INT_RSS_HASH 0x170
+#define A_MPS_PORT_RX_INT_RSS_CONTROL 0x174
+#define A_MPS_PORT_RX_CNT_DBG_CTL 0x178
+
+#define S_DBG_TYPE 0
+#define M_DBG_TYPE 0x1fU
+#define V_DBG_TYPE(x) ((x) << S_DBG_TYPE)
+#define G_DBG_TYPE(x) (((x) >> S_DBG_TYPE) & M_DBG_TYPE)
+
+#define A_MPS_PORT_RX_CNT_DBG 0x17c
#define A_MPS_PORT_TX_MAC_RELOAD_CH0 0x190
#define S_CREDIT 0
@@ -31984,6 +40391,10 @@
#define V_ON_PENDING(x) ((x) << S_ON_PENDING)
#define G_ON_PENDING(x) (((x) >> S_ON_PENDING) & M_ON_PENDING)
+#define A_MPS_PORT_TX_MAC_DROP_PP 0x1d4
+#define A_MPS_PORT_TX_LPBK_DROP_PP 0x1d8
+#define A_MPS_PORT_TX_MAC_DROP_CNT 0x1dc
+#define A_MPS_PORT_TX_LPBK_DROP_CNT 0x1e0
#define A_MPS_PORT_CLS_HASH_SRAM 0x200
#define S_VALID 20
@@ -32097,6 +40508,13 @@
#define V_TAG(x) ((x) << S_TAG)
#define G_TAG(x) (((x) >> S_TAG) & M_TAG)
+#define A_MPS_PF_TX_MAC_DROP_PP 0x2e4
+
+#define S_T7_DROPEN 0
+#define M_T7_DROPEN 0xffU
+#define V_T7_DROPEN(x) ((x) << S_T7_DROPEN)
+#define G_T7_DROPEN(x) (((x) >> S_T7_DROPEN) & M_T7_DROPEN)
+
#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_L 0x300
#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_H 0x304
#define A_MPS_PORT_CLS_HASH_CTL 0x304
@@ -32112,35 +40530,9 @@
#define V_PROMISCEN(x) ((x) << S_PROMISCEN)
#define F_PROMISCEN V_PROMISCEN(1U)
-#define S_T6_MULTILISTEN 16
-#define V_T6_MULTILISTEN(x) ((x) << S_T6_MULTILISTEN)
-#define F_T6_MULTILISTEN V_T6_MULTILISTEN(1U)
-
-#define S_T6_PRIORITY 13
-#define M_T6_PRIORITY 0x7U
-#define V_T6_PRIORITY(x) ((x) << S_T6_PRIORITY)
-#define G_T6_PRIORITY(x) (((x) >> S_T6_PRIORITY) & M_T6_PRIORITY)
-
-#define S_T6_REPLICATE 12
-#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
-#define F_T6_REPLICATE V_T6_REPLICATE(1U)
-
-#define S_T6_PF 9
-#define M_T6_PF 0x7U
-#define V_T6_PF(x) ((x) << S_T6_PF)
-#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
-
-#define S_T6_VF_VALID 8
-#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
-#define F_T6_VF_VALID V_T6_VF_VALID(1U)
-
-#define S_T6_VF 0
-#define M_T6_VF 0xffU
-#define V_T6_VF(x) ((x) << S_T6_VF)
-#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
-
#define A_MPS_PF_STAT_TX_PF_BCAST_FRAMES_H 0x30c
#define A_MPS_PORT_CLS_BMC_MAC_ADDR_L 0x30c
+#define A_MPS_PORT_CLS_BMC_MAC0_ADDR_L 0x30c
#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_L 0x310
#define A_MPS_PORT_CLS_BMC_MAC_ADDR_H 0x310
@@ -32156,6 +40548,7 @@
#define V_MATCHALL(x) ((x) << S_MATCHALL)
#define F_MATCHALL V_MATCHALL(1U)
+#define A_MPS_PORT_CLS_BMC_MAC0_ADDR_H 0x310
#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_H 0x314
#define A_MPS_PORT_CLS_BMC_VLAN 0x314
@@ -32167,6 +40560,7 @@
#define V_VLAN_VLD(x) ((x) << S_VLAN_VLD)
#define F_VLAN_VLD V_VLAN_VLD(1U)
+#define A_MPS_PORT_CLS_BMC_VLAN0 0x314
#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_L 0x318
#define A_MPS_PORT_CLS_CTL 0x318
@@ -32218,6 +40612,18 @@
#define V_DMAC_TCAM_SEL(x) ((x) << S_DMAC_TCAM_SEL)
#define G_DMAC_TCAM_SEL(x) (((x) >> S_DMAC_TCAM_SEL) & M_DMAC_TCAM_SEL)
+#define S_SMAC_INDEX_EN 17
+#define V_SMAC_INDEX_EN(x) ((x) << S_SMAC_INDEX_EN)
+#define F_SMAC_INDEX_EN V_SMAC_INDEX_EN(1U)
+
+#define S_LPBK_TCAM2_HIT_PRIORITY 16
+#define V_LPBK_TCAM2_HIT_PRIORITY(x) ((x) << S_LPBK_TCAM2_HIT_PRIORITY)
+#define F_LPBK_TCAM2_HIT_PRIORITY V_LPBK_TCAM2_HIT_PRIORITY(1U)
+
+#define S_TCAM2_HIT_PRIORITY 15
+#define V_TCAM2_HIT_PRIORITY(x) ((x) << S_TCAM2_HIT_PRIORITY)
+#define F_TCAM2_HIT_PRIORITY V_TCAM2_HIT_PRIORITY(1U)
+
#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_H 0x31c
#define A_MPS_PORT_CLS_NCSI_ETH_TYPE 0x31c
@@ -32238,14 +40644,23 @@
#define F_EN2 V_EN2(1U)
#define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_H 0x324
+#define A_MPS_PORT_CLS_BMC_MAC1_ADDR_L 0x324
#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_L 0x328
+#define A_MPS_PORT_CLS_BMC_MAC1_ADDR_H 0x328
#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_H 0x32c
+#define A_MPS_PORT_CLS_BMC_MAC2_ADDR_L 0x32c
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_L 0x330
+#define A_MPS_PORT_CLS_BMC_MAC2_ADDR_H 0x330
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_H 0x334
+#define A_MPS_PORT_CLS_BMC_MAC3_ADDR_L 0x334
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_L 0x338
+#define A_MPS_PORT_CLS_BMC_MAC3_ADDR_H 0x338
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_H 0x33c
+#define A_MPS_PORT_CLS_BMC_VLAN1 0x33c
#define A_MPS_PF_STAT_RX_PF_BYTES_L 0x340
+#define A_MPS_PORT_CLS_BMC_VLAN2 0x340
#define A_MPS_PF_STAT_RX_PF_BYTES_H 0x344
+#define A_MPS_PORT_CLS_BMC_VLAN3 0x344
#define A_MPS_PF_STAT_RX_PF_FRAMES_L 0x348
#define A_MPS_PF_STAT_RX_PF_FRAMES_H 0x34c
#define A_MPS_PF_STAT_RX_PF_BCAST_BYTES_L 0x350
@@ -32261,7 +40676,9 @@
#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_L 0x378
#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_H 0x37c
#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_L 0x380
+#define A_MPS_PF_STAT_RX_PF_ERR_DROP_FRAMES_L 0x380
#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_H 0x384
+#define A_MPS_PF_STAT_RX_PF_ERR_DROP_FRAMES_H 0x384
#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
@@ -32393,6 +40810,22 @@
#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_L 0x618
#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_H 0x61c
+#define A_MPS_PORT_STAT_RX_PRIO_0_DROP_FRAME_L 0x620
+#define A_MPS_PORT_STAT_RX_PRIO_0_DROP_FRAME_H 0x624
+#define A_MPS_PORT_STAT_RX_PRIO_1_DROP_FRAME_L 0x628
+#define A_MPS_PORT_STAT_RX_PRIO_1_DROP_FRAME_H 0x62c
+#define A_MPS_PORT_STAT_RX_PRIO_2_DROP_FRAME_L 0x630
+#define A_MPS_PORT_STAT_RX_PRIO_2_DROP_FRAME_H 0x634
+#define A_MPS_PORT_STAT_RX_PRIO_3_DROP_FRAME_L 0x638
+#define A_MPS_PORT_STAT_RX_PRIO_3_DROP_FRAME_H 0x63c
+#define A_MPS_PORT_STAT_RX_PRIO_4_DROP_FRAME_L 0x640
+#define A_MPS_PORT_STAT_RX_PRIO_4_DROP_FRAME_H 0x644
+#define A_MPS_PORT_STAT_RX_PRIO_5_DROP_FRAME_L 0x648
+#define A_MPS_PORT_STAT_RX_PRIO_5_DROP_FRAME_H 0x64c
+#define A_MPS_PORT_STAT_RX_PRIO_6_DROP_FRAME_L 0x650
+#define A_MPS_PORT_STAT_RX_PRIO_6_DROP_FRAME_H 0x654
+#define A_MPS_PORT_STAT_RX_PRIO_7_DROP_FRAME_L 0x658
+#define A_MPS_PORT_STAT_RX_PRIO_7_DROP_FRAME_H 0x65c
#define A_MPS_CMN_CTL 0x9000
#define S_DETECT8023 3
@@ -32425,6 +40858,46 @@
#define V_SPEEDMODE(x) ((x) << S_SPEEDMODE)
#define G_SPEEDMODE(x) (((x) >> S_SPEEDMODE) & M_SPEEDMODE)
+#define S_PT1_SEL_CFG 21
+#define V_PT1_SEL_CFG(x) ((x) << S_PT1_SEL_CFG)
+#define F_PT1_SEL_CFG V_PT1_SEL_CFG(1U)
+
+#define S_BUG_42938_EN 20
+#define V_BUG_42938_EN(x) ((x) << S_BUG_42938_EN)
+#define F_BUG_42938_EN V_BUG_42938_EN(1U)
+
+#define S_NO_BYPASS_PAUSE 19
+#define V_NO_BYPASS_PAUSE(x) ((x) << S_NO_BYPASS_PAUSE)
+#define F_NO_BYPASS_PAUSE V_NO_BYPASS_PAUSE(1U)
+
+#define S_BYPASS_PAUSE 18
+#define V_BYPASS_PAUSE(x) ((x) << S_BYPASS_PAUSE)
+#define F_BYPASS_PAUSE V_BYPASS_PAUSE(1U)
+
+#define S_PBUS_EN 16
+#define M_PBUS_EN 0x3U
+#define V_PBUS_EN(x) ((x) << S_PBUS_EN)
+#define G_PBUS_EN(x) (((x) >> S_PBUS_EN) & M_PBUS_EN)
+
+#define S_INIC_EN 14
+#define M_INIC_EN 0x3U
+#define V_INIC_EN(x) ((x) << S_INIC_EN)
+#define G_INIC_EN(x) (((x) >> S_INIC_EN) & M_INIC_EN)
+
+#define S_SBA_EN 12
+#define M_SBA_EN 0x3U
+#define V_SBA_EN(x) ((x) << S_SBA_EN)
+#define G_SBA_EN(x) (((x) >> S_SBA_EN) & M_SBA_EN)
+
+#define S_BG2TP_MAP_MODE 11
+#define V_BG2TP_MAP_MODE(x) ((x) << S_BG2TP_MAP_MODE)
+#define F_BG2TP_MAP_MODE V_BG2TP_MAP_MODE(1U)
+
+#define S_MPS_LB_MODE 9
+#define M_MPS_LB_MODE 0x3U
+#define V_MPS_LB_MODE(x) ((x) << S_MPS_LB_MODE)
+#define G_MPS_LB_MODE(x) (((x) >> S_MPS_LB_MODE) & M_MPS_LB_MODE)
+
#define A_MPS_INT_ENABLE 0x9004
#define S_STATINTENB 5
@@ -32618,6 +41091,17 @@
#define A_MPS_T5_BUILD_REVISION 0x9078
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH0 0x907c
+
+#define S_VALUE_1 16
+#define M_VALUE_1 0xffffU
+#define V_VALUE_1(x) ((x) << S_VALUE_1)
+#define G_VALUE_1(x) (((x) >> S_VALUE_1) & M_VALUE_1)
+
+#define S_VALUE_0 0
+#define M_VALUE_0 0xffffU
+#define V_VALUE_0(x) ((x) << S_VALUE_0)
+#define G_VALUE_0(x) (((x) >> S_VALUE_0) & M_VALUE_0)
+
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH1 0x9080
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH2 0x9084
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH3 0x9088
@@ -32671,11 +41155,130 @@
#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
#define A_MPS_FPGA_BIST_CFG_P1 0x9124
-
-#define S_T6_BASEADDR 0
-#define M_T6_BASEADDR 0xffffU
-#define V_T6_BASEADDR(x) ((x) << S_T6_BASEADDR)
-#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
+#define A_MPS_FPGA_BIST_CFG_P2 0x9128
+#define A_MPS_FPGA_BIST_CFG_P3 0x912c
+#define A_MPS_INIC_CTL 0x9130
+
+#define S_T7_RD_WRN 16
+#define V_T7_RD_WRN(x) ((x) << S_T7_RD_WRN)
+#define F_T7_RD_WRN V_T7_RD_WRN(1U)
+
+#define A_MPS_INIC_DATA 0x9134
+#define A_MPS_TP_CSIDE_MUX_CTL_P2 0x9138
+#define A_MPS_TP_CSIDE_MUX_CTL_P3 0x913c
+#define A_MPS_RED_CTL 0x9140
+
+#define S_LPBK_SHIFT_0 28
+#define M_LPBK_SHIFT_0 0xfU
+#define V_LPBK_SHIFT_0(x) ((x) << S_LPBK_SHIFT_0)
+#define G_LPBK_SHIFT_0(x) (((x) >> S_LPBK_SHIFT_0) & M_LPBK_SHIFT_0)
+
+#define S_LPBK_SHIFT_1 24
+#define M_LPBK_SHIFT_1 0xfU
+#define V_LPBK_SHIFT_1(x) ((x) << S_LPBK_SHIFT_1)
+#define G_LPBK_SHIFT_1(x) (((x) >> S_LPBK_SHIFT_1) & M_LPBK_SHIFT_1)
+
+#define S_LPBK_SHIFT_2 20
+#define M_LPBK_SHIFT_2 0xfU
+#define V_LPBK_SHIFT_2(x) ((x) << S_LPBK_SHIFT_2)
+#define G_LPBK_SHIFT_2(x) (((x) >> S_LPBK_SHIFT_2) & M_LPBK_SHIFT_2)
+
+#define S_LPBK_SHIFT_3 16
+#define M_LPBK_SHIFT_3 0xfU
+#define V_LPBK_SHIFT_3(x) ((x) << S_LPBK_SHIFT_3)
+#define G_LPBK_SHIFT_3(x) (((x) >> S_LPBK_SHIFT_3) & M_LPBK_SHIFT_3)
+
+#define S_MAC_SHIFT_0 12
+#define M_MAC_SHIFT_0 0xfU
+#define V_MAC_SHIFT_0(x) ((x) << S_MAC_SHIFT_0)
+#define G_MAC_SHIFT_0(x) (((x) >> S_MAC_SHIFT_0) & M_MAC_SHIFT_0)
+
+#define S_MAC_SHIFT_1 8
+#define M_MAC_SHIFT_1 0xfU
+#define V_MAC_SHIFT_1(x) ((x) << S_MAC_SHIFT_1)
+#define G_MAC_SHIFT_1(x) (((x) >> S_MAC_SHIFT_1) & M_MAC_SHIFT_1)
+
+#define S_MAC_SHIFT_2 4
+#define M_MAC_SHIFT_2 0xfU
+#define V_MAC_SHIFT_2(x) ((x) << S_MAC_SHIFT_2)
+#define G_MAC_SHIFT_2(x) (((x) >> S_MAC_SHIFT_2) & M_MAC_SHIFT_2)
+
+#define S_MAC_SHIFT_3 0
+#define M_MAC_SHIFT_3 0xfU
+#define V_MAC_SHIFT_3(x) ((x) << S_MAC_SHIFT_3)
+#define G_MAC_SHIFT_3(x) (((x) >> S_MAC_SHIFT_3) & M_MAC_SHIFT_3)
+
+#define A_MPS_RED_EN 0x9144
+
+#define S_LPBK_EN3 7
+#define V_LPBK_EN3(x) ((x) << S_LPBK_EN3)
+#define F_LPBK_EN3 V_LPBK_EN3(1U)
+
+#define S_LPBK_EN2 6
+#define V_LPBK_EN2(x) ((x) << S_LPBK_EN2)
+#define F_LPBK_EN2 V_LPBK_EN2(1U)
+
+#define S_LPBK_EN1 5
+#define V_LPBK_EN1(x) ((x) << S_LPBK_EN1)
+#define F_LPBK_EN1 V_LPBK_EN1(1U)
+
+#define S_LPBK_EN0 4
+#define V_LPBK_EN0(x) ((x) << S_LPBK_EN0)
+#define F_LPBK_EN0 V_LPBK_EN0(1U)
+
+#define S_MAC_EN3 3
+#define V_MAC_EN3(x) ((x) << S_MAC_EN3)
+#define F_MAC_EN3 V_MAC_EN3(1U)
+
+#define S_MAC_EN2 2
+#define V_MAC_EN2(x) ((x) << S_MAC_EN2)
+#define F_MAC_EN2 V_MAC_EN2(1U)
+
+#define S_MAC_EN1 1
+#define V_MAC_EN1(x) ((x) << S_MAC_EN1)
+#define F_MAC_EN1 V_MAC_EN1(1U)
+
+#define S_MAC_EN0 0
+#define V_MAC_EN0(x) ((x) << S_MAC_EN0)
+#define F_MAC_EN0 V_MAC_EN0(1U)
+
+#define A_MPS_MAC0_RED_DROP_CNT_H 0x9148
+#define A_MPS_MAC0_RED_DROP_CNT_L 0x914c
+#define A_MPS_MAC1_RED_DROP_CNT_H 0x9150
+#define A_MPS_MAC1_RED_DROP_CNT_L 0x9154
+#define A_MPS_MAC2_RED_DROP_CNT_H 0x9158
+#define A_MPS_MAC2_RED_DROP_CNT_L 0x915c
+#define A_MPS_MAC3_RED_DROP_CNT_H 0x9160
+#define A_MPS_MAC3_RED_DROP_CNT_L 0x9164
+#define A_MPS_LPBK0_RED_DROP_CNT_H 0x9168
+#define A_MPS_LPBK0_RED_DROP_CNT_L 0x916c
+#define A_MPS_LPBK1_RED_DROP_CNT_H 0x9170
+#define A_MPS_LPBK1_RED_DROP_CNT_L 0x9174
+#define A_MPS_LPBK2_RED_DROP_CNT_H 0x9178
+#define A_MPS_LPBK2_RED_DROP_CNT_L 0x917c
+#define A_MPS_LPBK3_RED_DROP_CNT_H 0x9180
+#define A_MPS_LPBK3_RED_DROP_CNT_L 0x9184
+#define A_MPS_MAC_RED_PP_DROP_EN 0x9188
+
+#define S_T7_MAC3 24
+#define M_T7_MAC3 0xffU
+#define V_T7_MAC3(x) ((x) << S_T7_MAC3)
+#define G_T7_MAC3(x) (((x) >> S_T7_MAC3) & M_T7_MAC3)
+
+#define S_T7_MAC2 16
+#define M_T7_MAC2 0xffU
+#define V_T7_MAC2(x) ((x) << S_T7_MAC2)
+#define G_T7_MAC2(x) (((x) >> S_T7_MAC2) & M_T7_MAC2)
+
+#define S_T7_MAC1 8
+#define M_T7_MAC1 0xffU
+#define V_T7_MAC1(x) ((x) << S_T7_MAC1)
+#define G_T7_MAC1(x) (((x) >> S_T7_MAC1) & M_T7_MAC1)
+
+#define S_T7_MAC0 0
+#define M_T7_MAC0 0xffU
+#define V_T7_MAC0(x) ((x) << S_T7_MAC0)
+#define G_T7_MAC0(x) (((x) >> S_T7_MAC0) & M_T7_MAC0)
#define A_MPS_TX_PRTY_SEL 0x9400
@@ -32714,6 +41317,26 @@
#define V_NCSI_SOURCE(x) ((x) << S_NCSI_SOURCE)
#define G_NCSI_SOURCE(x) (((x) >> S_NCSI_SOURCE) & M_NCSI_SOURCE)
+#define S_T7_CH4_PRTY 16
+#define M_T7_CH4_PRTY 0x7U
+#define V_T7_CH4_PRTY(x) ((x) << S_T7_CH4_PRTY)
+#define G_T7_CH4_PRTY(x) (((x) >> S_T7_CH4_PRTY) & M_T7_CH4_PRTY)
+
+#define S_T7_CH3_PRTY 13
+#define M_T7_CH3_PRTY 0x7U
+#define V_T7_CH3_PRTY(x) ((x) << S_T7_CH3_PRTY)
+#define G_T7_CH3_PRTY(x) (((x) >> S_T7_CH3_PRTY) & M_T7_CH3_PRTY)
+
+#define S_T7_CH2_PRTY 10
+#define M_T7_CH2_PRTY 0x7U
+#define V_T7_CH2_PRTY(x) ((x) << S_T7_CH2_PRTY)
+#define G_T7_CH2_PRTY(x) (((x) >> S_T7_CH2_PRTY) & M_T7_CH2_PRTY)
+
+#define S_T7_CH1_PRTY 7
+#define M_T7_CH1_PRTY 0x7U
+#define V_T7_CH1_PRTY(x) ((x) << S_T7_CH1_PRTY)
+#define G_T7_CH1_PRTY(x) (((x) >> S_T7_CH1_PRTY) & M_T7_CH1_PRTY)
+
#define A_MPS_TX_INT_ENABLE 0x9404
#define S_PORTERR 16
@@ -32751,9 +41374,52 @@
#define V_TPFIFO(x) ((x) << S_TPFIFO)
#define G_TPFIFO(x) (((x) >> S_TPFIFO) & M_TPFIFO)
+#define S_T7_PORTERR 28
+#define V_T7_PORTERR(x) ((x) << S_T7_PORTERR)
+#define F_T7_PORTERR V_T7_PORTERR(1U)
+
+#define S_T7_FRMERR 27
+#define V_T7_FRMERR(x) ((x) << S_T7_FRMERR)
+#define F_T7_FRMERR V_T7_FRMERR(1U)
+
+#define S_T7_SECNTERR 26
+#define V_T7_SECNTERR(x) ((x) << S_T7_SECNTERR)
+#define F_T7_SECNTERR V_T7_SECNTERR(1U)
+
+#define S_T7_BUBBLE 25
+#define V_T7_BUBBLE(x) ((x) << S_T7_BUBBLE)
+#define F_T7_BUBBLE V_T7_BUBBLE(1U)
+
+#define S_TXTOKENFIFO 15
+#define M_TXTOKENFIFO 0x3ffU
+#define V_TXTOKENFIFO(x) ((x) << S_TXTOKENFIFO)
+#define G_TXTOKENFIFO(x) (((x) >> S_TXTOKENFIFO) & M_TXTOKENFIFO)
+
+#define S_PERR_TP2MPS_TFIFO 13
+#define M_PERR_TP2MPS_TFIFO 0x3U
+#define V_PERR_TP2MPS_TFIFO(x) ((x) << S_PERR_TP2MPS_TFIFO)
+#define G_PERR_TP2MPS_TFIFO(x) (((x) >> S_PERR_TP2MPS_TFIFO) & M_PERR_TP2MPS_TFIFO)
+
#define A_MPS_TX_INT_CAUSE 0x9408
#define A_MPS_TX_NCSI2MPS_CNT 0x940c
#define A_MPS_TX_PERR_ENABLE 0x9410
+
+#define S_PORTERRINT 28
+#define V_PORTERRINT(x) ((x) << S_PORTERRINT)
+#define F_PORTERRINT V_PORTERRINT(1U)
+
+#define S_FRAMINGERRINT 27
+#define V_FRAMINGERRINT(x) ((x) << S_FRAMINGERRINT)
+#define F_FRAMINGERRINT V_FRAMINGERRINT(1U)
+
+#define S_SECNTERRINT 26
+#define V_SECNTERRINT(x) ((x) << S_SECNTERRINT)
+#define F_SECNTERRINT V_SECNTERRINT(1U)
+
+#define S_BUBBLEERRINT 25
+#define V_BUBBLEERRINT(x) ((x) << S_BUBBLEERRINT)
+#define F_BUBBLEERRINT V_BUBBLEERRINT(1U)
+
#define A_MPS_TX_PERR_INJECT 0x9414
#define S_MPSTXMEMSEL 1
@@ -33481,6 +42147,41 @@
#define F_TXINCH0_CGEN V_TXINCH0_CGEN(1U)
#define A_MPS_TX_CGEN_DYNAMIC 0x9470
+#define A_MPS_TX2RX_CH_MAP 0x9474
+
+#define S_ENABLELBK_CH3 3
+#define V_ENABLELBK_CH3(x) ((x) << S_ENABLELBK_CH3)
+#define F_ENABLELBK_CH3 V_ENABLELBK_CH3(1U)
+
+#define S_ENABLELBK_CH2 2
+#define V_ENABLELBK_CH2(x) ((x) << S_ENABLELBK_CH2)
+#define F_ENABLELBK_CH2 V_ENABLELBK_CH2(1U)
+
+#define S_ENABLELBK_CH1 1
+#define V_ENABLELBK_CH1(x) ((x) << S_ENABLELBK_CH1)
+#define F_ENABLELBK_CH1 V_ENABLELBK_CH1(1U)
+
+#define S_ENABLELBK_CH0 0
+#define V_ENABLELBK_CH0(x) ((x) << S_ENABLELBK_CH0)
+#define F_ENABLELBK_CH0 V_ENABLELBK_CH0(1U)
+
+#define A_MPS_TX_DBG_CNT_CTL 0x9478
+
+#define S_DBG_CNT_CTL 0
+#define M_DBG_CNT_CTL 0xffU
+#define V_DBG_CNT_CTL(x) ((x) << S_DBG_CNT_CTL)
+#define G_DBG_CNT_CTL(x) (((x) >> S_DBG_CNT_CTL) & M_DBG_CNT_CTL)
+
+#define A_MPS_TX_DBG_CNT 0x947c
+#define A_MPS_TX_INT2_ENABLE 0x9498
+#define A_MPS_TX_INT2_CAUSE 0x949c
+#define A_MPS_TX_PERR2_ENABLE 0x94a0
+#define A_MPS_TX_INT3_ENABLE 0x94a4
+#define A_MPS_TX_INT3_CAUSE 0x94a8
+#define A_MPS_TX_PERR3_ENABLE 0x94ac
+#define A_MPS_TX_INT4_ENABLE 0x94b0
+#define A_MPS_TX_INT4_CAUSE 0x94b4
+#define A_MPS_TX_PERR4_ENABLE 0x94b8
#define A_MPS_STAT_CTL 0x9600
#define S_COUNTVFINPF 1
@@ -33810,6 +42511,7 @@
#define A_MPS_TRC_RSS_HASH 0x9804
#define A_MPS_TRC_FILTER0_RSS_HASH 0x9804
+#define A_T7_MPS_TRC_PERR_INJECT 0x9804
#define A_MPS_TRC_RSS_CONTROL 0x9808
#define S_RSSCONTROL 16
@@ -33939,6 +42641,20 @@
#define V_FILTMEM(x) ((x) << S_FILTMEM)
#define G_FILTMEM(x) (((x) >> S_FILTMEM) & M_FILTMEM)
+#define S_T7_MISCPERR 16
+#define V_T7_MISCPERR(x) ((x) << S_T7_MISCPERR)
+#define F_T7_MISCPERR V_T7_MISCPERR(1U)
+
+#define S_T7_PKTFIFO 8
+#define M_T7_PKTFIFO 0xffU
+#define V_T7_PKTFIFO(x) ((x) << S_T7_PKTFIFO)
+#define G_T7_PKTFIFO(x) (((x) >> S_T7_PKTFIFO) & M_T7_PKTFIFO)
+
+#define S_T7_FILTMEM 0
+#define M_T7_FILTMEM 0xffU
+#define V_T7_FILTMEM(x) ((x) << S_T7_FILTMEM)
+#define G_T7_FILTMEM(x) (((x) >> S_T7_FILTMEM) & M_T7_FILTMEM)
+
#define A_MPS_TRC_INT_ENABLE 0x9858
#define S_TRCPLERRENB 9
@@ -33961,6 +42677,7 @@
#define A_MPS_TRC_FILTER2_RSS_HASH 0x9ff8
#define A_MPS_TRC_FILTER2_RSS_CONTROL 0x9ffc
#define A_MPS_TRC_FILTER3_RSS_HASH 0xa000
+#define A_MPS_TRC_FILTER4_MATCH 0xa000
#define A_MPS_TRC_FILTER3_RSS_CONTROL 0xa004
#define A_MPS_T5_TRC_RSS_HASH 0xa008
#define A_MPS_T5_TRC_RSS_CONTROL 0xa00c
@@ -34043,125 +42760,8 @@
#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
#define A_MPS_TRC_VF_OFF_FILTER_1 0xa014
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_VF_OFF_FILTER_2 0xa018
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_VF_OFF_FILTER_3 0xa01c
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_CGEN 0xa020
#define S_MPSTRCCGEN 0
@@ -34169,6 +42769,129 @@
#define V_MPSTRCCGEN(x) ((x) << S_MPSTRCCGEN)
#define G_MPSTRCCGEN(x) (((x) >> S_MPSTRCCGEN) & M_MPSTRCCGEN)
+#define A_MPS_TRC_FILTER4_DONT_CARE 0xa080
+#define A_MPS_TRC_FILTER5_MATCH 0xa100
+#define A_MPS_TRC_FILTER5_DONT_CARE 0xa180
+#define A_MPS_TRC_FILTER6_MATCH 0xa200
+#define A_MPS_TRC_FILTER6_DONT_CARE 0xa280
+#define A_MPS_TRC_FILTER7_MATCH 0xa300
+#define A_MPS_TRC_FILTER7_DONT_CARE 0xa380
+#define A_T7_MPS_TRC_FILTER0_RSS_HASH 0xa3f0
+#define A_T7_MPS_TRC_FILTER0_RSS_CONTROL 0xa3f4
+#define A_T7_MPS_TRC_FILTER1_RSS_HASH 0xa3f8
+#define A_T7_MPS_TRC_FILTER1_RSS_CONTROL 0xa3fc
+#define A_T7_MPS_TRC_FILTER2_RSS_HASH 0xa400
+#define A_T7_MPS_TRC_FILTER2_RSS_CONTROL 0xa404
+#define A_T7_MPS_TRC_FILTER3_RSS_HASH 0xa408
+#define A_T7_MPS_TRC_FILTER3_RSS_CONTROL 0xa40c
+#define A_MPS_TRC_FILTER4_RSS_HASH 0xa410
+#define A_MPS_TRC_FILTER4_RSS_CONTROL 0xa414
+#define A_MPS_TRC_FILTER5_RSS_HASH 0xa418
+#define A_MPS_TRC_FILTER5_RSS_CONTROL 0xa41c
+#define A_MPS_TRC_FILTER6_RSS_HASH 0xa420
+#define A_MPS_TRC_FILTER6_RSS_CONTROL 0xa424
+#define A_MPS_TRC_FILTER7_RSS_HASH 0xa428
+#define A_MPS_TRC_FILTER7_RSS_CONTROL 0xa42c
+#define A_T7_MPS_T5_TRC_RSS_HASH 0xa430
+#define A_T7_MPS_T5_TRC_RSS_CONTROL 0xa434
+#define A_T7_MPS_TRC_VF_OFF_FILTER_0 0xa438
+#define A_T7_MPS_TRC_VF_OFF_FILTER_1 0xa43c
+#define A_T7_MPS_TRC_VF_OFF_FILTER_2 0xa440
+#define A_T7_MPS_TRC_VF_OFF_FILTER_3 0xa444
+#define A_MPS_TRC_VF_OFF_FILTER_4 0xa448
+#define A_MPS_TRC_VF_OFF_FILTER_5 0xa44c
+#define A_MPS_TRC_VF_OFF_FILTER_6 0xa450
+#define A_MPS_TRC_VF_OFF_FILTER_7 0xa454
+#define A_T7_MPS_TRC_CGEN 0xa458
+
+#define S_T7_MPSTRCCGEN 0
+#define M_T7_MPSTRCCGEN 0xffU
+#define V_T7_MPSTRCCGEN(x) ((x) << S_T7_MPSTRCCGEN)
+#define G_T7_MPSTRCCGEN(x) (((x) >> S_T7_MPSTRCCGEN) & M_T7_MPSTRCCGEN)
+
+#define A_T7_MPS_TRC_FILTER_MATCH_CTL_A 0xa460
+#define A_T7_MPS_TRC_FILTER_MATCH_CTL_B 0xa480
+#define A_T7_MPS_TRC_FILTER_RUNT_CTL 0xa4a0
+#define A_T7_MPS_TRC_FILTER_DROP 0xa4c0
+#define A_T7_MPS_TRC_INT_ENABLE 0xa4e0
+
+#define S_T7_TRCPLERRENB 17
+#define V_T7_TRCPLERRENB(x) ((x) << S_T7_TRCPLERRENB)
+#define F_T7_TRCPLERRENB V_T7_TRCPLERRENB(1U)
+
+#define A_T7_MPS_TRC_INT_CAUSE 0xa4e4
+#define A_T7_MPS_TRC_TIMESTAMP_L 0xa4e8
+#define A_T7_MPS_TRC_TIMESTAMP_H 0xa4ec
+#define A_MPS_TRC_PERR_ENABLE2 0xa4f0
+
+#define S_TRC_TF_ECC 24
+#define M_TRC_TF_ECC 0xffU
+#define V_TRC_TF_ECC(x) ((x) << S_TRC_TF_ECC)
+#define G_TRC_TF_ECC(x) (((x) >> S_TRC_TF_ECC) & M_TRC_TF_ECC)
+
+#define S_MPS2MAC_CONV_TRC_CERR 22
+#define M_MPS2MAC_CONV_TRC_CERR 0x3U
+#define V_MPS2MAC_CONV_TRC_CERR(x) ((x) << S_MPS2MAC_CONV_TRC_CERR)
+#define G_MPS2MAC_CONV_TRC_CERR(x) (((x) >> S_MPS2MAC_CONV_TRC_CERR) & M_MPS2MAC_CONV_TRC_CERR)
+
+#define S_MPS2MAC_CONV_TRC 18
+#define M_MPS2MAC_CONV_TRC 0xfU
+#define V_MPS2MAC_CONV_TRC(x) ((x) << S_MPS2MAC_CONV_TRC)
+#define G_MPS2MAC_CONV_TRC(x) (((x) >> S_MPS2MAC_CONV_TRC) & M_MPS2MAC_CONV_TRC)
+
+#define S_TF0_PERR_1 17
+#define V_TF0_PERR_1(x) ((x) << S_TF0_PERR_1)
+#define F_TF0_PERR_1 V_TF0_PERR_1(1U)
+
+#define S_TF1_PERR_1 16
+#define V_TF1_PERR_1(x) ((x) << S_TF1_PERR_1)
+#define F_TF1_PERR_1 V_TF1_PERR_1(1U)
+
+#define S_TF2_PERR_1 15
+#define V_TF2_PERR_1(x) ((x) << S_TF2_PERR_1)
+#define F_TF2_PERR_1 V_TF2_PERR_1(1U)
+
+#define S_TF3_PERR_1 14
+#define V_TF3_PERR_1(x) ((x) << S_TF3_PERR_1)
+#define F_TF3_PERR_1 V_TF3_PERR_1(1U)
+
+#define S_TF4_PERR_1 13
+#define V_TF4_PERR_1(x) ((x) << S_TF4_PERR_1)
+#define F_TF4_PERR_1 V_TF4_PERR_1(1U)
+
+#define S_TF0_PERR_0 12
+#define V_TF0_PERR_0(x) ((x) << S_TF0_PERR_0)
+#define F_TF0_PERR_0 V_TF0_PERR_0(1U)
+
+#define S_TF1_PERR_0 11
+#define V_TF1_PERR_0(x) ((x) << S_TF1_PERR_0)
+#define F_TF1_PERR_0 V_TF1_PERR_0(1U)
+
+#define S_TF2_PERR_0 10
+#define V_TF2_PERR_0(x) ((x) << S_TF2_PERR_0)
+#define F_TF2_PERR_0 V_TF2_PERR_0(1U)
+
+#define S_TF3_PERR_0 9
+#define V_TF3_PERR_0(x) ((x) << S_TF3_PERR_0)
+#define F_TF3_PERR_0 V_TF3_PERR_0(1U)
+
+#define S_TF4_PERR_0 8
+#define V_TF4_PERR_0(x) ((x) << S_TF4_PERR_0)
+#define F_TF4_PERR_0 V_TF4_PERR_0(1U)
+
+#define S_PERR_TF_IN_CTL 0
+#define M_PERR_TF_IN_CTL 0xffU
+#define V_PERR_TF_IN_CTL(x) ((x) << S_PERR_TF_IN_CTL)
+#define G_PERR_TF_IN_CTL(x) (((x) >> S_PERR_TF_IN_CTL) & M_PERR_TF_IN_CTL)
+
+#define A_MPS_TRC_INT_ENABLE2 0xa4f4
+#define A_MPS_TRC_INT_CAUSE2 0xa4f8
+
+#define S_T7_TRC_TF_ECC 22
+#define M_T7_TRC_TF_ECC 0xffU
+#define V_T7_TRC_TF_ECC(x) ((x) << S_T7_TRC_TF_ECC)
+#define G_T7_TRC_TF_ECC(x) (((x) >> S_T7_TRC_TF_ECC) & M_T7_TRC_TF_ECC)
+
#define A_MPS_CLS_CTL 0xd000
#define S_MEMWRITEFAULT 4
@@ -34246,12 +42969,24 @@
#define V_MATCHSRAM(x) ((x) << S_MATCHSRAM)
#define F_MATCHSRAM V_MATCHSRAM(1U)
+#define S_CIM2MPS_INTF_PAR 4
+#define V_CIM2MPS_INTF_PAR(x) ((x) << S_CIM2MPS_INTF_PAR)
+#define F_CIM2MPS_INTF_PAR V_CIM2MPS_INTF_PAR(1U)
+
+#define S_TCAM_CRC_SRAM 3
+#define V_TCAM_CRC_SRAM(x) ((x) << S_TCAM_CRC_SRAM)
+#define F_TCAM_CRC_SRAM V_TCAM_CRC_SRAM(1U)
+
#define A_MPS_CLS_INT_ENABLE 0xd024
#define S_PLERRENB 3
#define V_PLERRENB(x) ((x) << S_PLERRENB)
#define F_PLERRENB V_PLERRENB(1U)
+#define S_T7_PLERRENB 5
+#define V_T7_PLERRENB(x) ((x) << S_T7_PLERRENB)
+#define F_T7_PLERRENB V_T7_PLERRENB(1U)
+
#define A_MPS_CLS_INT_CAUSE 0xd028
#define A_MPS_CLS_PL_TEST_DATA_L 0xd02c
#define A_MPS_CLS_PL_TEST_DATA_H 0xd030
@@ -34314,6 +43049,25 @@
#define V_T6_CLS_VF(x) ((x) << S_T6_CLS_VF)
#define G_T6_CLS_VF(x) (((x) >> S_T6_CLS_VF) & M_T6_CLS_VF)
+#define S_T7_CLS_SPARE 30
+#define M_T7_CLS_SPARE 0x3U
+#define V_T7_CLS_SPARE(x) ((x) << S_T7_CLS_SPARE)
+#define G_T7_CLS_SPARE(x) (((x) >> S_T7_CLS_SPARE) & M_T7_CLS_SPARE)
+
+#define S_T7_1_CLS_PRIORITY 27
+#define M_T7_1_CLS_PRIORITY 0x7U
+#define V_T7_1_CLS_PRIORITY(x) ((x) << S_T7_1_CLS_PRIORITY)
+#define G_T7_1_CLS_PRIORITY(x) (((x) >> S_T7_1_CLS_PRIORITY) & M_T7_1_CLS_PRIORITY)
+
+#define S_T7_1_CLS_REPLICATE 26
+#define V_T7_1_CLS_REPLICATE(x) ((x) << S_T7_1_CLS_REPLICATE)
+#define F_T7_1_CLS_REPLICATE V_T7_1_CLS_REPLICATE(1U)
+
+#define S_T7_1_CLS_INDEX 15
+#define M_T7_1_CLS_INDEX 0x7ffU
+#define V_T7_1_CLS_INDEX(x) ((x) << S_T7_1_CLS_INDEX)
+#define G_T7_1_CLS_INDEX(x) (((x) >> S_T7_1_CLS_INDEX) & M_T7_1_CLS_INDEX)
+
#define A_MPS_CLS_PL_TEST_CTL 0xd038
#define S_PLTESTCTL 0
@@ -34327,12 +43081,26 @@
#define F_PRTBMCCTL V_PRTBMCCTL(1U)
#define A_MPS_CLS_MATCH_CNT_TCAM 0xd100
+#define A_MPS_CLS0_MATCH_CNT_TCAM 0xd100
#define A_MPS_CLS_MATCH_CNT_HASH 0xd104
+#define A_MPS_CLS0_MATCH_CNT_HASH 0xd104
#define A_MPS_CLS_MATCH_CNT_BCAST 0xd108
+#define A_MPS_CLS0_MATCH_CNT_BCAST 0xd108
#define A_MPS_CLS_MATCH_CNT_BMC 0xd10c
+#define A_MPS_CLS0_MATCH_CNT_BMC 0xd10c
#define A_MPS_CLS_MATCH_CNT_PROM 0xd110
+#define A_MPS_CLS0_MATCH_CNT_PROM 0xd110
#define A_MPS_CLS_MATCH_CNT_HPROM 0xd114
+#define A_MPS_CLS0_MATCH_CNT_HPROM 0xd114
#define A_MPS_CLS_MISS_CNT 0xd118
+#define A_MPS_CLS0_MISS_CNT 0xd118
+#define A_MPS_CLS1_MATCH_CNT_TCAM 0xd11c
+#define A_MPS_CLS1_MATCH_CNT_HASH 0xd120
+#define A_MPS_CLS1_MATCH_CNT_BCAST 0xd124
+#define A_MPS_CLS1_MATCH_CNT_BMC 0xd128
+#define A_MPS_CLS1_MATCH_CNT_PROM 0xd12c
+#define A_MPS_CLS1_MATCH_CNT_HPROM 0xd130
+#define A_MPS_CLS1_MISS_CNT 0xd134
#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_L 0xd200
#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_H 0xd204
@@ -34428,6 +43196,15 @@
#define V_CLSTRCVF(x) ((x) << S_CLSTRCVF)
#define G_CLSTRCVF(x) (((x) >> S_CLSTRCVF) & M_CLSTRCVF)
+#define S_T7_CLSTRCMATCH 23
+#define V_T7_CLSTRCMATCH(x) ((x) << S_T7_CLSTRCMATCH)
+#define F_T7_CLSTRCMATCH V_T7_CLSTRCMATCH(1U)
+
+#define S_T7_CLSTRCINDEX 12
+#define M_T7_CLSTRCINDEX 0x7ffU
+#define V_T7_CLSTRCINDEX(x) ((x) << S_T7_CLSTRCINDEX)
+#define G_T7_CLSTRCINDEX(x) (((x) >> S_T7_CLSTRCINDEX) & M_T7_CLSTRCINDEX)
+
#define A_MPS_CLS_VLAN_TABLE 0xdfc0
#define S_VLAN_MASK 16
@@ -34536,24 +43313,6 @@
#define V_T6_SRAM_VLD(x) ((x) << S_T6_SRAM_VLD)
#define F_T6_SRAM_VLD V_T6_SRAM_VLD(1U)
-#define S_T6_REPLICATE 12
-#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
-#define F_T6_REPLICATE V_T6_REPLICATE(1U)
-
-#define S_T6_PF 9
-#define M_T6_PF 0x7U
-#define V_T6_PF(x) ((x) << S_T6_PF)
-#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
-
-#define S_T6_VF_VALID 8
-#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
-#define F_T6_VF_VALID V_T6_VF_VALID(1U)
-
-#define S_T6_VF 0
-#define M_T6_VF 0xffU
-#define V_T6_VF(x) ((x) << S_T6_VF)
-#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
-
#define A_MPS_CLS_SRAM_H 0xe004
#define S_MACPARITY1 9
@@ -34580,6 +43339,41 @@
#define V_MACPARITY2(x) ((x) << S_MACPARITY2)
#define F_MACPARITY2 V_MACPARITY2(1U)
+#define S_SRAMWRN 31
+#define V_SRAMWRN(x) ((x) << S_SRAMWRN)
+#define F_SRAMWRN V_SRAMWRN(1U)
+
+#define S_SRAMSPARE 27
+#define M_SRAMSPARE 0xfU
+#define V_SRAMSPARE(x) ((x) << S_SRAMSPARE)
+#define G_SRAMSPARE(x) (((x) >> S_SRAMSPARE) & M_SRAMSPARE)
+
+#define S_SRAMINDEX 16
+#define M_SRAMINDEX 0x7ffU
+#define V_SRAMINDEX(x) ((x) << S_SRAMINDEX)
+#define G_SRAMINDEX(x) (((x) >> S_SRAMINDEX) & M_SRAMINDEX)
+
+#define A_MPS_CLS_HASH_TCAM_CTL 0xe008
+
+#define S_T7_CTLCMDTYPE 15
+#define V_T7_CTLCMDTYPE(x) ((x) << S_T7_CTLCMDTYPE)
+#define F_T7_CTLCMDTYPE V_T7_CTLCMDTYPE(1U)
+
+#define S_T7_CTLXYBITSEL 12
+#define V_T7_CTLXYBITSEL(x) ((x) << S_T7_CTLXYBITSEL)
+#define F_T7_CTLXYBITSEL V_T7_CTLXYBITSEL(1U)
+
+#define S_T7_CTLTCAMINDEX 0
+#define M_T7_CTLTCAMINDEX 0x1ffU
+#define V_T7_CTLTCAMINDEX(x) ((x) << S_T7_CTLTCAMINDEX)
+#define G_T7_CTLTCAMINDEX(x) (((x) >> S_T7_CTLTCAMINDEX) & M_T7_CTLTCAMINDEX)
+
+#define A_MPS_CLS_HASH_TCAM_DATA 0xe00c
+
+#define S_LKPTYPE 24
+#define V_LKPTYPE(x) ((x) << S_LKPTYPE)
+#define F_LKPTYPE V_LKPTYPE(1U)
+
#define A_MPS_CLS_TCAM_Y_L 0xf000
#define A_MPS_CLS_TCAM_DATA0 0xf000
#define A_MPS_CLS_TCAM_Y_H 0xf004
@@ -34648,6 +43442,16 @@
#define V_DATAVIDH1(x) ((x) << S_DATAVIDH1)
#define G_DATAVIDH1(x) (((x) >> S_DATAVIDH1) & M_DATAVIDH1)
+#define S_T7_CTLTCAMSEL 26
+#define M_T7_CTLTCAMSEL 0x3U
+#define V_T7_CTLTCAMSEL(x) ((x) << S_T7_CTLTCAMSEL)
+#define G_T7_CTLTCAMSEL(x) (((x) >> S_T7_CTLTCAMSEL) & M_T7_CTLTCAMSEL)
+
+#define S_T7_1_CTLTCAMINDEX 17
+#define M_T7_1_CTLTCAMINDEX 0x1ffU
+#define V_T7_1_CTLTCAMINDEX(x) ((x) << S_T7_1_CTLTCAMINDEX)
+#define G_T7_1_CTLTCAMINDEX(x) (((x) >> S_T7_1_CTLTCAMINDEX) & M_T7_1_CTLTCAMINDEX)
+
#define A_MPS_CLS_TCAM_X_H 0xf00c
#define S_TCAMXH 0
@@ -34656,11 +43460,47 @@
#define G_TCAMXH(x) (((x) >> S_TCAMXH) & M_TCAMXH)
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID0 0xf010
+#define A_MPS_CLS_TCAM0_RDATA0_REQ_ID0 0xf010
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID0 0xf014
+#define A_MPS_CLS_TCAM0_RDATA1_REQ_ID0 0xf014
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM0_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM0_RDATA0_REQ_ID1 0xf01c
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID1 0xf020
+#define A_MPS_CLS_TCAM0_RDATA1_REQ_ID1 0xf020
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID1 0xf024
+#define A_MPS_CLS_TCAM0_RDATA2_REQ_ID1 0xf024
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID1 0xf028
+#define A_MPS_CLS_TCAM1_RDATA0_REQ_ID0 0xf028
+#define A_MPS_CLS_TCAM1_RDATA1_REQ_ID0 0xf02c
+#define A_MPS_CLS_TCAM1_RDATA2_REQ_ID0 0xf030
+#define A_MPS_CLS_TCAM1_RDATA0_REQ_ID1 0xf034
+#define A_MPS_CLS_TCAM1_RDATA1_REQ_ID1 0xf038
+#define A_MPS_CLS_TCAM1_RDATA2_REQ_ID1 0xf03c
+#define A_MPS_CLS_TCAM0_MASK_REG0 0xf040
+#define A_MPS_CLS_TCAM0_MASK_REG1 0xf044
+#define A_MPS_CLS_TCAM0_MASK_REG2 0xf048
+
+#define S_MASK_0_2 0
+#define M_MASK_0_2 0xffffU
+#define V_MASK_0_2(x) ((x) << S_MASK_0_2)
+#define G_MASK_0_2(x) (((x) >> S_MASK_0_2) & M_MASK_0_2)
+
+#define A_MPS_CLS_TCAM1_MASK_REG0 0xf04c
+#define A_MPS_CLS_TCAM1_MASK_REG1 0xf050
+#define A_MPS_CLS_TCAM1_MASK_REG2 0xf054
+
+#define S_MASK_1_2 0
+#define M_MASK_1_2 0xffffU
+#define V_MASK_1_2(x) ((x) << S_MASK_1_2)
+#define G_MASK_1_2(x) (((x) >> S_MASK_1_2) & M_MASK_1_2)
+
+#define A_MPS_CLS_TCAM_BIST_CTRL 0xf058
+#define A_MPS_CLS_TCAM_BIST_CB_PASS 0xf05c
+#define A_MPS_CLS_TCAM_BIST_CB_BUSY 0xf060
+#define A_MPS_CLS_TCAM2_MASK_REG0 0xf064
+#define A_MPS_CLS_TCAM2_MASK_REG1 0xf068
+#define A_MPS_CLS_TCAM2_MASK_REG2 0xf06c
#define A_MPS_RX_CTL 0x11000
#define S_FILT_VLAN_SEL 17
@@ -34686,6 +43526,14 @@
#define V_SNF(x) ((x) << S_SNF)
#define G_SNF(x) (((x) >> S_SNF) & M_SNF)
+#define S_HASH_TCAM_EN 19
+#define V_HASH_TCAM_EN(x) ((x) << S_HASH_TCAM_EN)
+#define F_HASH_TCAM_EN V_HASH_TCAM_EN(1U)
+
+#define S_SND_ORG_PFVF 18
+#define V_SND_ORG_PFVF(x) ((x) << S_SND_ORG_PFVF)
+#define F_SND_ORG_PFVF V_SND_ORG_PFVF(1U)
+
#define A_MPS_RX_PORT_MUX_CTL 0x11004
#define S_CTL_P3 12
@@ -34877,6 +43725,11 @@
#define V_THRESH(x) ((x) << S_THRESH)
#define G_THRESH(x) (((x) >> S_THRESH) & M_THRESH)
+#define S_T7_THRESH 0
+#define M_T7_THRESH 0xfffU
+#define V_T7_THRESH(x) ((x) << S_T7_THRESH)
+#define G_T7_THRESH(x) (((x) >> S_T7_THRESH) & M_T7_THRESH)
+
#define A_MPS_RX_LPBK_BP1 0x11060
#define A_MPS_RX_LPBK_BP2 0x11064
#define A_MPS_RX_LPBK_BP3 0x11068
@@ -34888,6 +43741,12 @@
#define G_GAP(x) (((x) >> S_GAP) & M_GAP)
#define A_MPS_RX_CHMN_CNT 0x11070
+#define A_MPS_CTL_STAT 0x11070
+
+#define S_T7_CTL 0
+#define V_T7_CTL(x) ((x) << S_T7_CTL)
+#define F_T7_CTL V_T7_CTL(1U)
+
#define A_MPS_RX_PERR_INT_CAUSE 0x11074
#define S_FF 23
@@ -34990,18 +43849,54 @@
#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
-#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+#define S_MAC_IN_FIFO_768B 30
+#define V_MAC_IN_FIFO_768B(x) ((x) << S_MAC_IN_FIFO_768B)
+#define F_MAC_IN_FIFO_768B V_MAC_IN_FIFO_768B(1U)
-#define S_T6_INT_ERR_INT 24
-#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
-#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
+#define S_T7_1_INT_ERR_INT 29
+#define V_T7_1_INT_ERR_INT(x) ((x) << S_T7_1_INT_ERR_INT)
+#define F_T7_1_INT_ERR_INT V_T7_1_INT_ERR_INT(1U)
-#define A_MPS_RX_PERR_ENABLE 0x1107c
+#define S_FLOP_PERR 28
+#define V_FLOP_PERR(x) ((x) << S_FLOP_PERR)
+#define F_FLOP_PERR V_FLOP_PERR(1U)
-#define S_T6_INT_ERR_INT 24
-#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
-#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
+#define S_RPLC_MAP 13
+#define M_RPLC_MAP 0x1fU
+#define V_RPLC_MAP(x) ((x) << S_RPLC_MAP)
+#define G_RPLC_MAP(x) (((x) >> S_RPLC_MAP) & M_RPLC_MAP)
+
+#define S_TKN_RUNT_DROP_FIFO 12
+#define V_TKN_RUNT_DROP_FIFO(x) ((x) << S_TKN_RUNT_DROP_FIFO)
+#define F_TKN_RUNT_DROP_FIFO V_TKN_RUNT_DROP_FIFO(1U)
+
+#define S_T7_PPM3 9
+#define M_T7_PPM3 0x7U
+#define V_T7_PPM3(x) ((x) << S_T7_PPM3)
+#define G_T7_PPM3(x) (((x) >> S_T7_PPM3) & M_T7_PPM3)
+#define S_T7_PPM2 6
+#define M_T7_PPM2 0x7U
+#define V_T7_PPM2(x) ((x) << S_T7_PPM2)
+#define G_T7_PPM2(x) (((x) >> S_T7_PPM2) & M_T7_PPM2)
+
+#define S_T7_PPM1 3
+#define M_T7_PPM1 0x7U
+#define V_T7_PPM1(x) ((x) << S_T7_PPM1)
+#define G_T7_PPM1(x) (((x) >> S_T7_PPM1) & M_T7_PPM1)
+
+#define S_T7_PPM0 0
+#define M_T7_PPM0 0x7U
+#define V_T7_PPM0(x) ((x) << S_T7_PPM0)
+#define G_T7_PPM0(x) (((x) >> S_T7_PPM0) & M_T7_PPM0)
+
+#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+
+#define S_T7_2_INT_ERR_INT 30
+#define V_T7_2_INT_ERR_INT(x) ((x) << S_T7_2_INT_ERR_INT)
+#define F_T7_2_INT_ERR_INT V_T7_2_INT_ERR_INT(1U)
+
+#define A_MPS_RX_PERR_ENABLE 0x1107c
#define A_MPS_RX_PERR_INJECT 0x11080
#define A_MPS_RX_FUNC_INT_CAUSE 0x11084
@@ -35083,8 +43978,43 @@
#define V_TH_LOW(x) ((x) << S_TH_LOW)
#define G_TH_LOW(x) (((x) >> S_TH_LOW) & M_TH_LOW)
+#define A_MPS_RX_PERR_INT_CAUSE2 0x1108c
+
+#define S_CRYPT2MPS_RX_INTF_FIFO 28
+#define M_CRYPT2MPS_RX_INTF_FIFO 0xfU
+#define V_CRYPT2MPS_RX_INTF_FIFO(x) ((x) << S_CRYPT2MPS_RX_INTF_FIFO)
+#define G_CRYPT2MPS_RX_INTF_FIFO(x) (((x) >> S_CRYPT2MPS_RX_INTF_FIFO) & M_CRYPT2MPS_RX_INTF_FIFO)
+
+#define S_INIC2MPS_TX0_PERR 27
+#define V_INIC2MPS_TX0_PERR(x) ((x) << S_INIC2MPS_TX0_PERR)
+#define F_INIC2MPS_TX0_PERR V_INIC2MPS_TX0_PERR(1U)
+
+#define S_INIC2MPS_TX1_PERR 26
+#define V_INIC2MPS_TX1_PERR(x) ((x) << S_INIC2MPS_TX1_PERR)
+#define F_INIC2MPS_TX1_PERR V_INIC2MPS_TX1_PERR(1U)
+
+#define S_XGMAC2MPS_RX0_PERR 25
+#define V_XGMAC2MPS_RX0_PERR(x) ((x) << S_XGMAC2MPS_RX0_PERR)
+#define F_XGMAC2MPS_RX0_PERR V_XGMAC2MPS_RX0_PERR(1U)
+
+#define S_XGMAC2MPS_RX1_PERR 24
+#define V_XGMAC2MPS_RX1_PERR(x) ((x) << S_XGMAC2MPS_RX1_PERR)
+#define F_XGMAC2MPS_RX1_PERR V_XGMAC2MPS_RX1_PERR(1U)
+
+#define S_MPS2CRYPTO_RX_INTF_FIFO 20
+#define M_MPS2CRYPTO_RX_INTF_FIFO 0xfU
+#define V_MPS2CRYPTO_RX_INTF_FIFO(x) ((x) << S_MPS2CRYPTO_RX_INTF_FIFO)
+#define G_MPS2CRYPTO_RX_INTF_FIFO(x) (((x) >> S_MPS2CRYPTO_RX_INTF_FIFO) & M_MPS2CRYPTO_RX_INTF_FIFO)
+
+#define S_RX_PRE_PROC_PERR 9
+#define M_RX_PRE_PROC_PERR 0x7ffU
+#define V_RX_PRE_PROC_PERR(x) ((x) << S_RX_PRE_PROC_PERR)
+#define G_RX_PRE_PROC_PERR(x) (((x) >> S_RX_PRE_PROC_PERR) & M_RX_PRE_PROC_PERR)
+
#define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
+#define A_MPS_RX_PERR_INT_ENABLE2 0x11090
#define A_MPS_RX_PAUSE_GEN_TH_2 0x11094
+#define A_MPS_RX_PERR_ENABLE2 0x11094
#define A_MPS_RX_PAUSE_GEN_TH_3 0x11098
#define A_MPS_RX_REPL_CTL 0x11098
@@ -35126,10 +44056,13 @@
#define A_MPS_RX_PT_ARB1 0x110ac
#define A_MPS_RX_PT_ARB2 0x110b0
+#define A_T7_MPS_RX_PT_ARB4 0x110b0
#define A_MPS_RX_PT_ARB3 0x110b4
#define A_T6_MPS_PF_OUT_EN 0x110b4
+#define A_T7_MPS_PF_OUT_EN 0x110b4
#define A_MPS_RX_PT_ARB4 0x110b8
#define A_T6_MPS_BMC_MTU 0x110b8
+#define A_T7_MPS_BMC_MTU 0x110b8
#define A_MPS_PF_OUT_EN 0x110bc
#define S_OUTEN 0
@@ -35138,6 +44071,7 @@
#define G_OUTEN(x) (((x) >> S_OUTEN) & M_OUTEN)
#define A_T6_MPS_BMC_PKT_CNT 0x110bc
+#define A_T7_MPS_BMC_PKT_CNT 0x110bc
#define A_MPS_BMC_MTU 0x110c0
#define S_MTU 0
@@ -35146,6 +44080,7 @@
#define G_MTU(x) (((x) >> S_MTU) & M_MTU)
#define A_T6_MPS_BMC_BYTE_CNT 0x110c0
+#define A_T7_MPS_BMC_BYTE_CNT 0x110c0
#define A_MPS_BMC_PKT_CNT 0x110c4
#define A_T6_MPS_PFVF_ATRB_CTL 0x110c4
@@ -35154,6 +44089,7 @@
#define V_T6_PFVF(x) ((x) << S_T6_PFVF)
#define G_T6_PFVF(x) (((x) >> S_T6_PFVF) & M_T6_PFVF)
+#define A_T7_MPS_PFVF_ATRB_CTL 0x110c4
#define A_MPS_BMC_BYTE_CNT 0x110c8
#define A_T6_MPS_PFVF_ATRB 0x110c8
@@ -35161,6 +44097,12 @@
#define V_FULL_FRAME_MODE(x) ((x) << S_FULL_FRAME_MODE)
#define F_FULL_FRAME_MODE V_FULL_FRAME_MODE(1U)
+#define A_T7_MPS_PFVF_ATRB 0x110c8
+
+#define S_EXTRACT_DEL_VLAN 31
+#define V_EXTRACT_DEL_VLAN(x) ((x) << S_EXTRACT_DEL_VLAN)
+#define F_EXTRACT_DEL_VLAN V_EXTRACT_DEL_VLAN(1U)
+
#define A_MPS_PFVF_ATRB_CTL 0x110cc
#define S_RD_WRN 31
@@ -35173,6 +44115,7 @@
#define G_PFVF(x) (((x) >> S_PFVF) & M_PFVF)
#define A_T6_MPS_PFVF_ATRB_FLTR0 0x110cc
+#define A_T7_MPS_PFVF_ATRB_FLTR0 0x110cc
#define A_MPS_PFVF_ATRB 0x110d0
#define S_ATTR_PF 28
@@ -35193,6 +44136,7 @@
#define F_ATTR_MODE V_ATTR_MODE(1U)
#define A_T6_MPS_PFVF_ATRB_FLTR1 0x110d0
+#define A_T7_MPS_PFVF_ATRB_FLTR1 0x110d0
#define A_MPS_PFVF_ATRB_FLTR0 0x110d4
#define S_VLAN_EN 16
@@ -35205,36 +44149,58 @@
#define G_VLAN_ID(x) (((x) >> S_VLAN_ID) & M_VLAN_ID)
#define A_T6_MPS_PFVF_ATRB_FLTR2 0x110d4
+#define A_T7_MPS_PFVF_ATRB_FLTR2 0x110d4
#define A_MPS_PFVF_ATRB_FLTR1 0x110d8
#define A_T6_MPS_PFVF_ATRB_FLTR3 0x110d8
+#define A_T7_MPS_PFVF_ATRB_FLTR3 0x110d8
#define A_MPS_PFVF_ATRB_FLTR2 0x110dc
#define A_T6_MPS_PFVF_ATRB_FLTR4 0x110dc
+#define A_T7_MPS_PFVF_ATRB_FLTR4 0x110dc
#define A_MPS_PFVF_ATRB_FLTR3 0x110e0
#define A_T6_MPS_PFVF_ATRB_FLTR5 0x110e0
+#define A_T7_MPS_PFVF_ATRB_FLTR5 0x110e0
#define A_MPS_PFVF_ATRB_FLTR4 0x110e4
#define A_T6_MPS_PFVF_ATRB_FLTR6 0x110e4
+#define A_T7_MPS_PFVF_ATRB_FLTR6 0x110e4
#define A_MPS_PFVF_ATRB_FLTR5 0x110e8
#define A_T6_MPS_PFVF_ATRB_FLTR7 0x110e8
+#define A_T7_MPS_PFVF_ATRB_FLTR7 0x110e8
#define A_MPS_PFVF_ATRB_FLTR6 0x110ec
#define A_T6_MPS_PFVF_ATRB_FLTR8 0x110ec
+#define A_T7_MPS_PFVF_ATRB_FLTR8 0x110ec
#define A_MPS_PFVF_ATRB_FLTR7 0x110f0
#define A_T6_MPS_PFVF_ATRB_FLTR9 0x110f0
+#define A_T7_MPS_PFVF_ATRB_FLTR9 0x110f0
#define A_MPS_PFVF_ATRB_FLTR8 0x110f4
#define A_T6_MPS_PFVF_ATRB_FLTR10 0x110f4
+#define A_T7_MPS_PFVF_ATRB_FLTR10 0x110f4
#define A_MPS_PFVF_ATRB_FLTR9 0x110f8
#define A_T6_MPS_PFVF_ATRB_FLTR11 0x110f8
+#define A_T7_MPS_PFVF_ATRB_FLTR11 0x110f8
#define A_MPS_PFVF_ATRB_FLTR10 0x110fc
#define A_T6_MPS_PFVF_ATRB_FLTR12 0x110fc
+#define A_T7_MPS_PFVF_ATRB_FLTR12 0x110fc
#define A_MPS_PFVF_ATRB_FLTR11 0x11100
#define A_T6_MPS_PFVF_ATRB_FLTR13 0x11100
+#define A_T7_MPS_PFVF_ATRB_FLTR13 0x11100
#define A_MPS_PFVF_ATRB_FLTR12 0x11104
#define A_T6_MPS_PFVF_ATRB_FLTR14 0x11104
+#define A_T7_MPS_PFVF_ATRB_FLTR14 0x11104
#define A_MPS_PFVF_ATRB_FLTR13 0x11108
#define A_T6_MPS_PFVF_ATRB_FLTR15 0x11108
+#define A_T7_MPS_PFVF_ATRB_FLTR15 0x11108
#define A_MPS_PFVF_ATRB_FLTR14 0x1110c
#define A_T6_MPS_RPLC_MAP_CTL 0x1110c
+#define A_T7_MPS_RPLC_MAP_CTL 0x1110c
+
+#define S_T7_RPLC_MAP_ADDR 0
+#define M_T7_RPLC_MAP_ADDR 0xfffU
+#define V_T7_RPLC_MAP_ADDR(x) ((x) << S_T7_RPLC_MAP_ADDR)
+#define G_T7_RPLC_MAP_ADDR(x) (((x) >> S_T7_RPLC_MAP_ADDR) & M_T7_RPLC_MAP_ADDR)
+
#define A_MPS_PFVF_ATRB_FLTR15 0x11110
#define A_T6_MPS_PF_RPLCT_MAP 0x11110
+#define A_T7_MPS_PF_RPLCT_MAP 0x11110
#define A_MPS_RPLC_MAP_CTL 0x11114
#define S_RPLC_MAP_ADDR 0
@@ -35243,6 +44209,7 @@
#define G_RPLC_MAP_ADDR(x) (((x) >> S_RPLC_MAP_ADDR) & M_RPLC_MAP_ADDR)
#define A_T6_MPS_VF_RPLCT_MAP0 0x11114
+#define A_T7_MPS_VF_RPLCT_MAP0 0x11114
#define A_MPS_PF_RPLCT_MAP 0x11118
#define S_PF_EN 0
@@ -35251,10 +44218,13 @@
#define G_PF_EN(x) (((x) >> S_PF_EN) & M_PF_EN)
#define A_T6_MPS_VF_RPLCT_MAP1 0x11118
+#define A_T7_MPS_VF_RPLCT_MAP1 0x11118
#define A_MPS_VF_RPLCT_MAP0 0x1111c
#define A_T6_MPS_VF_RPLCT_MAP2 0x1111c
+#define A_T7_MPS_VF_RPLCT_MAP2 0x1111c
#define A_MPS_VF_RPLCT_MAP1 0x11120
#define A_T6_MPS_VF_RPLCT_MAP3 0x11120
+#define A_T7_MPS_VF_RPLCT_MAP3 0x11120
#define A_MPS_VF_RPLCT_MAP2 0x11124
#define A_MPS_VF_RPLCT_MAP3 0x11128
#define A_MPS_MEM_DBG_CTL 0x1112c
@@ -35629,9 +44599,13 @@
#define V_CONG_TH(x) ((x) << S_CONG_TH)
#define G_CONG_TH(x) (((x) >> S_CONG_TH) & M_CONG_TH)
+#define A_MPS_RX_LPBK_BG_PG_CNT2 0x11220
#define A_MPS_RX_CONGESTION_THRESHOLD_BG1 0x11224
+#define A_MPS_RX_LPBK_BG_PG_CNT3 0x11224
#define A_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11228
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG0 0x11228
#define A_MPS_RX_CONGESTION_THRESHOLD_BG3 0x1122c
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG1 0x1122c
#define A_MPS_RX_GRE_PROT_TYPE 0x11230
#define S_NVGRE_EN 9
@@ -35647,6 +44621,7 @@
#define V_GRE(x) ((x) << S_GRE)
#define G_GRE(x) (((x) >> S_GRE) & M_GRE)
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11230
#define A_MPS_RX_VXLAN_TYPE 0x11234
#define S_VXLAN_EN 16
@@ -35658,6 +44633,7 @@
#define V_VXLAN(x) ((x) << S_VXLAN)
#define G_VXLAN(x) (((x) >> S_VXLAN) & M_VXLAN)
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG3 0x11234
#define A_MPS_RX_GENEVE_TYPE 0x11238
#define S_GENEVE_EN 16
@@ -35669,12 +44645,14 @@
#define V_GENEVE(x) ((x) << S_GENEVE)
#define G_GENEVE(x) (((x) >> S_GENEVE) & M_GENEVE)
+#define A_T7_MPS_RX_GRE_PROT_TYPE 0x11238
#define A_MPS_RX_INNER_HDR_IVLAN 0x1123c
#define S_T6_IVLAN_EN 16
#define V_T6_IVLAN_EN(x) ((x) << S_T6_IVLAN_EN)
#define F_T6_IVLAN_EN V_T6_IVLAN_EN(1U)
+#define A_T7_MPS_RX_VXLAN_TYPE 0x1123c
#define A_MPS_RX_ENCAP_NVGRE 0x11240
#define S_ETYPE_EN 16
@@ -35686,13 +44664,9 @@
#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
+#define A_T7_MPS_RX_GENEVE_TYPE 0x11240
#define A_MPS_RX_ENCAP_GENEVE 0x11244
-
-#define S_T6_ETYPE 0
-#define M_T6_ETYPE 0xffffU
-#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
-#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
-
+#define A_T7_MPS_RX_INNER_HDR_IVLAN 0x11244
#define A_MPS_RX_TCP 0x11248
#define S_PROT_TYPE_EN 8
@@ -35704,8 +44678,11 @@
#define V_PROT_TYPE(x) ((x) << S_PROT_TYPE)
#define G_PROT_TYPE(x) (((x) >> S_PROT_TYPE) & M_PROT_TYPE)
+#define A_T7_MPS_RX_ENCAP_NVGRE 0x11248
#define A_MPS_RX_UDP 0x1124c
+#define A_T7_MPS_RX_ENCAP_GENEVE 0x1124c
#define A_MPS_RX_PAUSE 0x11250
+#define A_T7_MPS_RX_TCP 0x11250
#define A_MPS_RX_LENGTH 0x11254
#define S_SAP_VALUE 16
@@ -35718,6 +44695,7 @@
#define V_LENGTH_ETYPE(x) ((x) << S_LENGTH_ETYPE)
#define G_LENGTH_ETYPE(x) (((x) >> S_LENGTH_ETYPE) & M_LENGTH_ETYPE)
+#define A_T7_MPS_RX_UDP 0x11254
#define A_MPS_RX_CTL_ORG 0x11258
#define S_CTL_VALUE 24
@@ -35730,6 +44708,7 @@
#define V_ORG_VALUE(x) ((x) << S_ORG_VALUE)
#define G_ORG_VALUE(x) (((x) >> S_ORG_VALUE) & M_ORG_VALUE)
+#define A_T7_MPS_RX_PAUSE 0x11258
#define A_MPS_RX_IPV4 0x1125c
#define S_ETYPE_IPV4 0
@@ -35737,6 +44716,7 @@
#define V_ETYPE_IPV4(x) ((x) << S_ETYPE_IPV4)
#define G_ETYPE_IPV4(x) (((x) >> S_ETYPE_IPV4) & M_ETYPE_IPV4)
+#define A_T7_MPS_RX_LENGTH 0x1125c
#define A_MPS_RX_IPV6 0x11260
#define S_ETYPE_IPV6 0
@@ -35744,6 +44724,7 @@
#define V_ETYPE_IPV6(x) ((x) << S_ETYPE_IPV6)
#define G_ETYPE_IPV6(x) (((x) >> S_ETYPE_IPV6) & M_ETYPE_IPV6)
+#define A_T7_MPS_RX_CTL_ORG 0x11260
#define A_MPS_RX_TTL 0x11264
#define S_TTL_IPV4 10
@@ -35764,6 +44745,7 @@
#define V_TTL_CHK_EN_IPV6(x) ((x) << S_TTL_CHK_EN_IPV6)
#define F_TTL_CHK_EN_IPV6 V_TTL_CHK_EN_IPV6(1U)
+#define A_T7_MPS_RX_IPV4 0x11264
#define A_MPS_RX_DEFAULT_VNI 0x11268
#define S_VNI 0
@@ -35771,6 +44753,7 @@
#define V_VNI(x) ((x) << S_VNI)
#define G_VNI(x) (((x) >> S_VNI) & M_VNI)
+#define A_T7_MPS_RX_IPV6 0x11268
#define A_MPS_RX_PRS_CTL 0x1126c
#define S_CTL_CHK_EN 28
@@ -35821,6 +44804,7 @@
#define V_DIP_EN(x) ((x) << S_DIP_EN)
#define F_DIP_EN V_DIP_EN(1U)
+#define A_T7_MPS_RX_TTL 0x1126c
#define A_MPS_RX_PRS_CTL_2 0x11270
#define S_EN_UDP_CSUM_CHK 4
@@ -35843,7 +44827,9 @@
#define V_T6_IPV6_UDP_CSUM_COMPAT(x) ((x) << S_T6_IPV6_UDP_CSUM_COMPAT)
#define F_T6_IPV6_UDP_CSUM_COMPAT V_T6_IPV6_UDP_CSUM_COMPAT(1U)
+#define A_T7_MPS_RX_DEFAULT_VNI 0x11270
#define A_MPS_RX_MPS2NCSI_CNT 0x11274
+#define A_T7_MPS_RX_PRS_CTL 0x11274
#define A_MPS_RX_MAX_TNL_HDR_LEN 0x11278
#define S_T6_LEN 0
@@ -35851,38 +44837,222 @@
#define V_T6_LEN(x) ((x) << S_T6_LEN)
#define G_T6_LEN(x) (((x) >> S_T6_LEN) & M_T6_LEN)
+#define A_T7_MPS_RX_PRS_CTL_2 0x11278
+
+#define S_IP_EXT_HDR_EN 5
+#define V_IP_EXT_HDR_EN(x) ((x) << S_IP_EXT_HDR_EN)
+#define F_IP_EXT_HDR_EN V_IP_EXT_HDR_EN(1U)
+
#define A_MPS_RX_PAUSE_DA_H 0x1127c
+#define A_T7_MPS_RX_MPS2NCSI_CNT 0x1127c
#define A_MPS_RX_PAUSE_DA_L 0x11280
+#define A_T7_MPS_RX_MAX_TNL_HDR_LEN 0x11280
+
+#define S_MPS_TNL_HDR_LEN_MODE 9
+#define V_MPS_TNL_HDR_LEN_MODE(x) ((x) << S_MPS_TNL_HDR_LEN_MODE)
+#define F_MPS_TNL_HDR_LEN_MODE V_MPS_TNL_HDR_LEN_MODE(1U)
+
+#define S_MPS_MAX_TNL_HDR_LEN 0
+#define M_MPS_MAX_TNL_HDR_LEN 0x1ffU
+#define V_MPS_MAX_TNL_HDR_LEN(x) ((x) << S_MPS_MAX_TNL_HDR_LEN)
+#define G_MPS_MAX_TNL_HDR_LEN(x) (((x) >> S_MPS_MAX_TNL_HDR_LEN) & M_MPS_MAX_TNL_HDR_LEN)
+
#define A_MPS_RX_CNT_NVGRE_PKT_MAC0 0x11284
+#define A_T7_MPS_RX_PAUSE_DA_H 0x11284
#define A_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11288
+#define A_T7_MPS_RX_PAUSE_DA_L 0x11288
#define A_MPS_RX_CNT_GENEVE_PKT_MAC0 0x1128c
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_MAC0 0x1128c
#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11290
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11290
#define A_MPS_RX_CNT_NVGRE_PKT_MAC1 0x11294
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_MAC0 0x11294
#define A_MPS_RX_CNT_VXLAN_PKT_MAC1 0x11298
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11298
#define A_MPS_RX_CNT_GENEVE_PKT_MAC1 0x1129c
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_MAC1 0x1129c
#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_MAC1 0x112a0
#define A_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112a4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_MAC1 0x112a4
#define A_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112a8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a8
#define A_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112ac
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112ac
#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112b0
#define A_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112b4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112b4
#define A_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112b8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b8
#define A_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112bc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112bc
#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112c0
#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112c4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112c4
#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112c8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c8
#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112cc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112cc
#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112d0
#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112d4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112d4
#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112d8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d8
#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112dc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112dc
#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112e0
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112e4
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e8
+#define A_MPS_RX_ESP 0x112ec
+#define A_MPS_EN_LPBK_BLK_SNDR 0x112f0
+
+#define S_EN_CH3 3
+#define V_EN_CH3(x) ((x) << S_EN_CH3)
+#define F_EN_CH3 V_EN_CH3(1U)
+
+#define S_EN_CH2 2
+#define V_EN_CH2(x) ((x) << S_EN_CH2)
+#define F_EN_CH2 V_EN_CH2(1U)
+
+#define S_EN_CH1 1
+#define V_EN_CH1(x) ((x) << S_EN_CH1)
+#define F_EN_CH1 V_EN_CH1(1U)
+
+#define S_EN_CH0 0
+#define V_EN_CH0(x) ((x) << S_EN_CH0)
+#define F_EN_CH0 V_EN_CH0(1U)
+
#define A_MPS_VF_RPLCT_MAP4 0x11300
#define A_MPS_VF_RPLCT_MAP5 0x11304
#define A_MPS_VF_RPLCT_MAP6 0x11308
#define A_MPS_VF_RPLCT_MAP7 0x1130c
+#define A_MPS_RX_PERR_INT_CAUSE3 0x11310
+#define A_MPS_RX_PERR_INT_ENABLE3 0x11314
+#define A_MPS_RX_PERR_ENABLE3 0x11318
+#define A_MPS_RX_PERR_INT_CAUSE4 0x1131c
+
+#define S_CLS 20
+#define M_CLS 0x3fU
+#define V_CLS(x) ((x) << S_CLS)
+#define G_CLS(x) (((x) >> S_CLS) & M_CLS)
+
+#define S_RX_PRE_PROC 16
+#define M_RX_PRE_PROC 0xfU
+#define V_RX_PRE_PROC(x) ((x) << S_RX_PRE_PROC)
+#define G_RX_PRE_PROC(x) (((x) >> S_RX_PRE_PROC) & M_RX_PRE_PROC)
+
+#define S_PPROC3 12
+#define M_PPROC3 0xfU
+#define V_PPROC3(x) ((x) << S_PPROC3)
+#define G_PPROC3(x) (((x) >> S_PPROC3) & M_PPROC3)
+
+#define S_PPROC2 8
+#define M_PPROC2 0xfU
+#define V_PPROC2(x) ((x) << S_PPROC2)
+#define G_PPROC2(x) (((x) >> S_PPROC2) & M_PPROC2)
+
+#define S_PPROC1 4
+#define M_PPROC1 0xfU
+#define V_PPROC1(x) ((x) << S_PPROC1)
+#define G_PPROC1(x) (((x) >> S_PPROC1) & M_PPROC1)
+
+#define S_PPROC0 0
+#define M_PPROC0 0xfU
+#define V_PPROC0(x) ((x) << S_PPROC0)
+#define G_PPROC0(x) (((x) >> S_PPROC0) & M_PPROC0)
+
+#define A_MPS_RX_PERR_INT_ENABLE4 0x11320
+#define A_MPS_RX_PERR_ENABLE4 0x11324
+#define A_MPS_RX_PERR_INT_CAUSE5 0x11328
+
+#define S_MPS2CRYP_RX_FIFO 26
+#define M_MPS2CRYP_RX_FIFO 0xfU
+#define V_MPS2CRYP_RX_FIFO(x) ((x) << S_MPS2CRYP_RX_FIFO)
+#define G_MPS2CRYP_RX_FIFO(x) (((x) >> S_MPS2CRYP_RX_FIFO) & M_MPS2CRYP_RX_FIFO)
+
+#define S_RX_OUT 20
+#define M_RX_OUT 0x3fU
+#define V_RX_OUT(x) ((x) << S_RX_OUT)
+#define G_RX_OUT(x) (((x) >> S_RX_OUT) & M_RX_OUT)
+
+#define S_MEM_WRAP 0
+#define M_MEM_WRAP 0xfffffU
+#define V_MEM_WRAP(x) ((x) << S_MEM_WRAP)
+#define G_MEM_WRAP(x) (((x) >> S_MEM_WRAP) & M_MEM_WRAP)
+
+#define A_MPS_RX_PERR_INT_ENABLE5 0x1132c
+#define A_MPS_RX_PERR_ENABLE5 0x11330
+#define A_MPS_RX_PERR_INT_CAUSE6 0x11334
+
+#define S_MPS_RX_MEM_WRAP 0
+#define M_MPS_RX_MEM_WRAP 0x1ffffffU
+#define V_MPS_RX_MEM_WRAP(x) ((x) << S_MPS_RX_MEM_WRAP)
+#define G_MPS_RX_MEM_WRAP(x) (((x) >> S_MPS_RX_MEM_WRAP) & M_MPS_RX_MEM_WRAP)
+
+#define A_MPS_RX_PERR_INT_ENABLE6 0x11338
+#define A_MPS_RX_PERR_ENABLE6 0x1133c
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC2 0x11408
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC2 0x1140c
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC2 0x11410
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC2 0x11414
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC3 0x11418
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC3 0x1141c
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC3 0x11420
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC3 0x11424
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK2 0x11428
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK2 0x1142c
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK2 0x11430
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK2 0x11434
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK3 0x11438
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK3 0x1143c
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK3 0x11440
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK3 0x11444
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP2 0x11448
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP2 0x1144c
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP2 0x11450
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP2 0x11454
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP3 0x11458
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP3 0x1145c
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP3 0x11460
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP3 0x11464
+#define A_T7_MPS_RX_PT_ARB2 0x11468
+#define A_T7_MPS_RX_PT_ARB3 0x1146c
#define A_MPS_CLS_DIPIPV4_ID_TABLE 0x12000
+#define A_MPS_CLS_DIP_ID_TABLE_CTL 0x12000
+
+#define S_DIP_VLD 12
+#define V_DIP_VLD(x) ((x) << S_DIP_VLD)
+#define F_DIP_VLD V_DIP_VLD(1U)
+
+#define S_DIP_TYPE 11
+#define V_DIP_TYPE(x) ((x) << S_DIP_TYPE)
+#define F_DIP_TYPE V_DIP_TYPE(1U)
+
+#define S_DIP_WRN 10
+#define V_DIP_WRN(x) ((x) << S_DIP_WRN)
+#define F_DIP_WRN V_DIP_WRN(1U)
+
+#define S_DIP_SEG 8
+#define M_DIP_SEG 0x3U
+#define V_DIP_SEG(x) ((x) << S_DIP_SEG)
+#define G_DIP_SEG(x) (((x) >> S_DIP_SEG) & M_DIP_SEG)
+
+#define S_DIP_TBL_RSVD1 5
+#define M_DIP_TBL_RSVD1 0x7U
+#define V_DIP_TBL_RSVD1(x) ((x) << S_DIP_TBL_RSVD1)
+#define G_DIP_TBL_RSVD1(x) (((x) >> S_DIP_TBL_RSVD1) & M_DIP_TBL_RSVD1)
+
+#define S_DIP_TBL_ADDR 0
+#define M_DIP_TBL_ADDR 0x1fU
+#define V_DIP_TBL_ADDR(x) ((x) << S_DIP_TBL_ADDR)
+#define G_DIP_TBL_ADDR(x) (((x) >> S_DIP_TBL_ADDR) & M_DIP_TBL_ADDR)
+
#define A_MPS_CLS_DIPIPV4_MASK_TABLE 0x12004
+#define A_MPS_CLS_DIP_ID_TABLE_DATA 0x12004
#define A_MPS_CLS_DIPIPV6ID_0_TABLE 0x12020
#define A_MPS_CLS_DIPIPV6ID_1_TABLE 0x12024
#define A_MPS_CLS_DIPIPV6ID_2_TABLE 0x12028
@@ -35892,6 +45062,226 @@
#define A_MPS_CLS_DIPIPV6MASK_2_TABLE 0x12038
#define A_MPS_CLS_DIPIPV6MASK_3_TABLE 0x1203c
#define A_MPS_RX_HASH_LKP_TABLE 0x12060
+#define A_MPS_CLS_DROP_DMAC0_L 0x12070
+#define A_MPS_CLS_DROP_DMAC0_H 0x12074
+
+#define S_DMAC 0
+#define M_DMAC 0xffffU
+#define V_DMAC(x) ((x) << S_DMAC)
+#define G_DMAC(x) (((x) >> S_DMAC) & M_DMAC)
+
+#define A_MPS_CLS_DROP_DMAC1_L 0x12078
+#define A_MPS_CLS_DROP_DMAC1_H 0x1207c
+#define A_MPS_CLS_DROP_DMAC2_L 0x12080
+#define A_MPS_CLS_DROP_DMAC2_H 0x12084
+#define A_MPS_CLS_DROP_DMAC3_L 0x12088
+#define A_MPS_CLS_DROP_DMAC3_H 0x1208c
+#define A_MPS_CLS_DROP_DMAC4_L 0x12090
+#define A_MPS_CLS_DROP_DMAC4_H 0x12094
+#define A_MPS_CLS_DROP_DMAC5_L 0x12098
+#define A_MPS_CLS_DROP_DMAC5_H 0x1209c
+#define A_MPS_CLS_DROP_DMAC6_L 0x120a0
+#define A_MPS_CLS_DROP_DMAC6_H 0x120a4
+#define A_MPS_CLS_DROP_DMAC7_L 0x120a8
+#define A_MPS_CLS_DROP_DMAC7_H 0x120ac
+#define A_MPS_CLS_DROP_DMAC8_L 0x120b0
+#define A_MPS_CLS_DROP_DMAC8_H 0x120b4
+#define A_MPS_CLS_DROP_DMAC9_L 0x120b8
+#define A_MPS_CLS_DROP_DMAC9_H 0x120bc
+#define A_MPS_CLS_DROP_DMAC10_L 0x120c0
+#define A_MPS_CLS_DROP_DMAC10_H 0x120c4
+#define A_MPS_CLS_DROP_DMAC11_L 0x120c8
+#define A_MPS_CLS_DROP_DMAC11_H 0x120cc
+#define A_MPS_CLS_DROP_DMAC12_L 0x120d0
+#define A_MPS_CLS_DROP_DMAC12_H 0x120d4
+#define A_MPS_CLS_DROP_DMAC13_L 0x120d8
+#define A_MPS_CLS_DROP_DMAC13_H 0x120dc
+#define A_MPS_CLS_DROP_DMAC14_L 0x120e0
+#define A_MPS_CLS_DROP_DMAC14_H 0x120e4
+#define A_MPS_CLS_DROP_DMAC15_L 0x120e8
+#define A_MPS_CLS_DROP_DMAC15_H 0x120ec
+#define A_MPS_RX_ENCAP_VXLAN 0x120f0
+#define A_MPS_RX_INT_VXLAN 0x120f4
+
+#define S_INT_TYPE_EN 16
+#define V_INT_TYPE_EN(x) ((x) << S_INT_TYPE_EN)
+#define F_INT_TYPE_EN V_INT_TYPE_EN(1U)
+
+#define S_INT_TYPE 0
+#define M_INT_TYPE 0xffffU
+#define V_INT_TYPE(x) ((x) << S_INT_TYPE)
+#define G_INT_TYPE(x) (((x) >> S_INT_TYPE) & M_INT_TYPE)
+
+#define A_MPS_RX_INT_GENEVE 0x120f8
+#define A_MPS_PFVF_ATRB2 0x120fc
+
+#define S_EXTRACT_DEL_ENCAP 31
+#define V_EXTRACT_DEL_ENCAP(x) ((x) << S_EXTRACT_DEL_ENCAP)
+#define F_EXTRACT_DEL_ENCAP V_EXTRACT_DEL_ENCAP(1U)
+
+#define A_MPS_RX_TRANS_ENCAP_FLTR_CTL 0x12100
+
+#define S_TIMEOUT_FLT_CLR_EN 8
+#define V_TIMEOUT_FLT_CLR_EN(x) ((x) << S_TIMEOUT_FLT_CLR_EN)
+#define F_TIMEOUT_FLT_CLR_EN V_TIMEOUT_FLT_CLR_EN(1U)
+
+#define S_FLTR_TIMOUT_VAL 0
+#define M_FLTR_TIMOUT_VAL 0xffU
+#define V_FLTR_TIMOUT_VAL(x) ((x) << S_FLTR_TIMOUT_VAL)
+#define G_FLTR_TIMOUT_VAL(x) (((x) >> S_FLTR_TIMOUT_VAL) & M_FLTR_TIMOUT_VAL)
+
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_0 0x12104
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_1 0x12108
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_2 0x1210c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_3 0x12110
+#define A_MPS_RX_PAUSE_GEN_TH_0_4 0x12114
+#define A_MPS_RX_PAUSE_GEN_TH_0_5 0x12118
+#define A_MPS_RX_PAUSE_GEN_TH_0_6 0x1211c
+#define A_MPS_RX_PAUSE_GEN_TH_0_7 0x12120
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_0 0x12124
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_1 0x12128
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_2 0x1212c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_3 0x12130
+#define A_MPS_RX_PAUSE_GEN_TH_1_4 0x12134
+#define A_MPS_RX_PAUSE_GEN_TH_1_5 0x12138
+#define A_MPS_RX_PAUSE_GEN_TH_1_6 0x1213c
+#define A_MPS_RX_PAUSE_GEN_TH_1_7 0x12140
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_0 0x12144
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_1 0x12148
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_2 0x1214c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_3 0x12150
+#define A_MPS_RX_PAUSE_GEN_TH_2_4 0x12154
+#define A_MPS_RX_PAUSE_GEN_TH_2_5 0x12158
+#define A_MPS_RX_PAUSE_GEN_TH_2_6 0x1215c
+#define A_MPS_RX_PAUSE_GEN_TH_2_7 0x12160
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_0 0x12164
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_1 0x12168
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_2 0x1216c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_3 0x12170
+#define A_MPS_RX_PAUSE_GEN_TH_3_4 0x12174
+#define A_MPS_RX_PAUSE_GEN_TH_3_5 0x12178
+#define A_MPS_RX_PAUSE_GEN_TH_3_6 0x1217c
+#define A_MPS_RX_PAUSE_GEN_TH_3_7 0x12180
+#define A_MPS_RX_DROP_0_0 0x12184
+
+#define S_DROP_TH 0
+#define M_DROP_TH 0xffffU
+#define V_DROP_TH(x) ((x) << S_DROP_TH)
+#define G_DROP_TH(x) (((x) >> S_DROP_TH) & M_DROP_TH)
+
+#define A_MPS_RX_DROP_0_1 0x12188
+#define A_MPS_RX_DROP_0_2 0x1218c
+#define A_MPS_RX_DROP_0_3 0x12190
+#define A_MPS_RX_DROP_0_4 0x12194
+#define A_MPS_RX_DROP_0_5 0x12198
+#define A_MPS_RX_DROP_0_6 0x1219c
+#define A_MPS_RX_DROP_0_7 0x121a0
+#define A_MPS_RX_DROP_1_0 0x121a4
+#define A_MPS_RX_DROP_1_1 0x121a8
+#define A_MPS_RX_DROP_1_2 0x121ac
+#define A_MPS_RX_DROP_1_3 0x121b0
+#define A_MPS_RX_DROP_1_4 0x121b4
+#define A_MPS_RX_DROP_1_5 0x121b8
+#define A_MPS_RX_DROP_1_6 0x121bc
+#define A_MPS_RX_DROP_1_7 0x121c0
+#define A_MPS_RX_DROP_2_0 0x121c4
+#define A_MPS_RX_DROP_2_1 0x121c8
+#define A_MPS_RX_DROP_2_2 0x121cc
+#define A_MPS_RX_DROP_2_3 0x121d0
+#define A_MPS_RX_DROP_2_4 0x121d4
+#define A_MPS_RX_DROP_2_5 0x121d8
+#define A_MPS_RX_DROP_2_6 0x121dc
+#define A_MPS_RX_DROP_2_7 0x121e0
+#define A_MPS_RX_DROP_3_0 0x121e4
+#define A_MPS_RX_DROP_3_1 0x121e8
+#define A_MPS_RX_DROP_3_2 0x121ec
+#define A_MPS_RX_DROP_3_3 0x121f0
+#define A_MPS_RX_DROP_3_4 0x121f4
+#define A_MPS_RX_DROP_3_5 0x121f8
+#define A_MPS_RX_DROP_3_6 0x121fc
+#define A_MPS_RX_DROP_3_7 0x12200
+#define A_MPS_RX_MAC_BG_PG_CNT0_0 0x12204
+#define A_MPS_RX_MAC_BG_PG_CNT0_1 0x12208
+#define A_MPS_RX_MAC_BG_PG_CNT0_2 0x1220c
+#define A_MPS_RX_MAC_BG_PG_CNT0_3 0x12210
+#define A_MPS_RX_MAC_BG_PG_CNT0_4 0x12214
+#define A_MPS_RX_MAC_BG_PG_CNT0_5 0x12218
+#define A_MPS_RX_MAC_BG_PG_CNT0_6 0x1221c
+#define A_MPS_RX_MAC_BG_PG_CNT0_7 0x12220
+#define A_MPS_RX_MAC_BG_PG_CNT1_0 0x12224
+#define A_MPS_RX_MAC_BG_PG_CNT1_1 0x12228
+#define A_MPS_RX_MAC_BG_PG_CNT1_2 0x1222c
+#define A_MPS_RX_MAC_BG_PG_CNT1_3 0x12230
+#define A_MPS_RX_MAC_BG_PG_CNT1_4 0x12234
+#define A_MPS_RX_MAC_BG_PG_CNT1_5 0x12238
+#define A_MPS_RX_MAC_BG_PG_CNT1_6 0x1223c
+#define A_MPS_RX_MAC_BG_PG_CNT1_7 0x12240
+#define A_MPS_RX_MAC_BG_PG_CNT2_0 0x12244
+#define A_MPS_RX_MAC_BG_PG_CNT2_1 0x12248
+#define A_MPS_RX_MAC_BG_PG_CNT2_2 0x1224c
+#define A_MPS_RX_MAC_BG_PG_CNT2_3 0x12250
+#define A_MPS_RX_MAC_BG_PG_CNT2_4 0x12254
+#define A_MPS_RX_MAC_BG_PG_CNT2_5 0x12258
+#define A_MPS_RX_MAC_BG_PG_CNT2_6 0x1225c
+#define A_MPS_RX_MAC_BG_PG_CNT2_7 0x12260
+#define A_MPS_RX_MAC_BG_PG_CNT3_0 0x12264
+#define A_MPS_RX_MAC_BG_PG_CNT3_1 0x12268
+#define A_MPS_RX_MAC_BG_PG_CNT3_2 0x1226c
+#define A_MPS_RX_MAC_BG_PG_CNT3_3 0x12270
+#define A_MPS_RX_MAC_BG_PG_CNT3_4 0x12274
+#define A_MPS_RX_MAC_BG_PG_CNT3_5 0x12278
+#define A_MPS_RX_MAC_BG_PG_CNT3_6 0x1227c
+#define A_MPS_RX_MAC_BG_PG_CNT3_7 0x12280
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0 0x12284
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1 0x12288
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2 0x1228c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3 0x12290
+#define A_MPS_RX_BG0_IPSEC_CNT 0x12294
+#define A_MPS_RX_BG1_IPSEC_CNT 0x12298
+#define A_MPS_RX_BG2_IPSEC_CNT 0x1229c
+#define A_MPS_RX_BG3_IPSEC_CNT 0x122a0
+#define A_MPS_RX_MEM_FIFO_CONFIG0 0x122a4
+
+#define S_FIFO_CONFIG2 16
+#define M_FIFO_CONFIG2 0xffffU
+#define V_FIFO_CONFIG2(x) ((x) << S_FIFO_CONFIG2)
+#define G_FIFO_CONFIG2(x) (((x) >> S_FIFO_CONFIG2) & M_FIFO_CONFIG2)
+
+#define S_FIFO_CONFIG1 0
+#define M_FIFO_CONFIG1 0xffffU
+#define V_FIFO_CONFIG1(x) ((x) << S_FIFO_CONFIG1)
+#define G_FIFO_CONFIG1(x) (((x) >> S_FIFO_CONFIG1) & M_FIFO_CONFIG1)
+
+#define A_MPS_RX_MEM_FIFO_CONFIG1 0x122a8
+
+#define S_FIFO_CONFIG3 0
+#define M_FIFO_CONFIG3 0xffffU
+#define V_FIFO_CONFIG3(x) ((x) << S_FIFO_CONFIG3)
+#define G_FIFO_CONFIG3(x) (((x) >> S_FIFO_CONFIG3) & M_FIFO_CONFIG3)
+
+#define A_MPS_LPBK_MEM_FIFO_CONFIG0 0x122ac
+#define A_MPS_LPBK_MEM_FIFO_CONFIG1 0x122b0
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG0 0x122b4
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG1 0x122b8
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG2 0x122bc
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG3 0x122c0
+#define A_MPS_BG_PAUSE_CTL 0x122c4
+
+#define S_BG0_PAUSE_EN 3
+#define V_BG0_PAUSE_EN(x) ((x) << S_BG0_PAUSE_EN)
+#define F_BG0_PAUSE_EN V_BG0_PAUSE_EN(1U)
+
+#define S_BG1_PAUSE_EN 2
+#define V_BG1_PAUSE_EN(x) ((x) << S_BG1_PAUSE_EN)
+#define F_BG1_PAUSE_EN V_BG1_PAUSE_EN(1U)
+
+#define S_BG2_PAUSE_EN 1
+#define V_BG2_PAUSE_EN(x) ((x) << S_BG2_PAUSE_EN)
+#define F_BG2_PAUSE_EN V_BG2_PAUSE_EN(1U)
+
+#define S_BG3_PAUSE_EN 0
+#define V_BG3_PAUSE_EN(x) ((x) << S_BG3_PAUSE_EN)
+#define F_BG3_PAUSE_EN V_BG3_PAUSE_EN(1U)
/* registers for module CPL_SWITCH */
#define CPL_SWITCH_BASE_ADDR 0x19040
@@ -35931,6 +45321,7 @@
#define V_CIM_SPLIT_ENABLE(x) ((x) << S_CIM_SPLIT_ENABLE)
#define F_CIM_SPLIT_ENABLE V_CIM_SPLIT_ENABLE(1U)
+#define A_CNTRL 0x19040
#define A_CPL_SWITCH_TBL_IDX 0x19044
#define S_SWITCH_TBL_IDX 0
@@ -35938,7 +45329,9 @@
#define V_SWITCH_TBL_IDX(x) ((x) << S_SWITCH_TBL_IDX)
#define G_SWITCH_TBL_IDX(x) (((x) >> S_SWITCH_TBL_IDX) & M_SWITCH_TBL_IDX)
+#define A_TBL_IDX 0x19044
#define A_CPL_SWITCH_TBL_DATA 0x19048
+#define A_TBL_DATA 0x19048
#define A_CPL_SWITCH_ZERO_ERROR 0x1904c
#define S_ZERO_CMD_CH1 8
@@ -35951,6 +45344,18 @@
#define V_ZERO_CMD_CH0(x) ((x) << S_ZERO_CMD_CH0)
#define G_ZERO_CMD_CH0(x) (((x) >> S_ZERO_CMD_CH0) & M_ZERO_CMD_CH0)
+#define A_ZERO_ERROR 0x1904c
+
+#define S_ZERO_CMD_CH3 24
+#define M_ZERO_CMD_CH3 0xffU
+#define V_ZERO_CMD_CH3(x) ((x) << S_ZERO_CMD_CH3)
+#define G_ZERO_CMD_CH3(x) (((x) >> S_ZERO_CMD_CH3) & M_ZERO_CMD_CH3)
+
+#define S_ZERO_CMD_CH2 16
+#define M_ZERO_CMD_CH2 0xffU
+#define V_ZERO_CMD_CH2(x) ((x) << S_ZERO_CMD_CH2)
+#define G_ZERO_CMD_CH2(x) (((x) >> S_ZERO_CMD_CH2) & M_ZERO_CMD_CH2)
+
#define A_CPL_INTR_ENABLE 0x19050
#define S_CIM_OP_MAP_PERR 5
@@ -35985,7 +45390,18 @@
#define V_PERR_CPL_128TO128_0(x) ((x) << S_PERR_CPL_128TO128_0)
#define F_PERR_CPL_128TO128_0 V_PERR_CPL_128TO128_0(1U)
+#define A_INTR_ENABLE 0x19050
+
+#define S_PERR_CPL_128TO128_3 9
+#define V_PERR_CPL_128TO128_3(x) ((x) << S_PERR_CPL_128TO128_3)
+#define F_PERR_CPL_128TO128_3 V_PERR_CPL_128TO128_3(1U)
+
+#define S_PERR_CPL_128TO128_2 8
+#define V_PERR_CPL_128TO128_2(x) ((x) << S_PERR_CPL_128TO128_2)
+#define F_PERR_CPL_128TO128_2 V_PERR_CPL_128TO128_2(1U)
+
#define A_CPL_INTR_CAUSE 0x19054
+#define A_INTR_CAUSE 0x19054
#define A_CPL_MAP_TBL_IDX 0x19058
#define S_MAP_TBL_IDX 0
@@ -35997,6 +45413,13 @@
#define V_CIM_SPLIT_OPCODE_PROGRAM(x) ((x) << S_CIM_SPLIT_OPCODE_PROGRAM)
#define F_CIM_SPLIT_OPCODE_PROGRAM V_CIM_SPLIT_OPCODE_PROGRAM(1U)
+#define A_MAP_TBL_IDX 0x19058
+
+#define S_CPL_MAP_TBL_SEL 9
+#define M_CPL_MAP_TBL_SEL 0x3U
+#define V_CPL_MAP_TBL_SEL(x) ((x) << S_CPL_MAP_TBL_SEL)
+#define G_CPL_MAP_TBL_SEL(x) (((x) >> S_CPL_MAP_TBL_SEL) & M_CPL_MAP_TBL_SEL)
+
#define A_CPL_MAP_TBL_DATA 0x1905c
#define S_MAP_TBL_DATA 0
@@ -36004,6 +45427,8 @@
#define V_MAP_TBL_DATA(x) ((x) << S_MAP_TBL_DATA)
#define G_MAP_TBL_DATA(x) (((x) >> S_MAP_TBL_DATA) & M_MAP_TBL_DATA)
+#define A_MAP_TBL_DATA 0x1905c
+
/* registers for module SMB */
#define SMB_BASE_ADDR 0x19060
@@ -36019,6 +45444,16 @@
#define V_MICROCNTCFG(x) ((x) << S_MICROCNTCFG)
#define G_MICROCNTCFG(x) (((x) >> S_MICROCNTCFG) & M_MICROCNTCFG)
+#define S_T7_MACROCNTCFG 12
+#define M_T7_MACROCNTCFG 0x1fU
+#define V_T7_MACROCNTCFG(x) ((x) << S_T7_MACROCNTCFG)
+#define G_T7_MACROCNTCFG(x) (((x) >> S_T7_MACROCNTCFG) & M_T7_MACROCNTCFG)
+
+#define S_T7_MICROCNTCFG 0
+#define M_T7_MICROCNTCFG 0xfffU
+#define V_T7_MICROCNTCFG(x) ((x) << S_T7_MICROCNTCFG)
+#define G_T7_MICROCNTCFG(x) (((x) >> S_T7_MICROCNTCFG) & M_T7_MICROCNTCFG)
+
#define A_SMB_MST_TIMEOUT_CFG 0x19064
#define S_MSTTIMEOUTCFG 0
@@ -36685,6 +46120,26 @@
#define V_UART_CLKDIV(x) ((x) << S_UART_CLKDIV)
#define G_UART_CLKDIV(x) (((x) >> S_UART_CLKDIV) & M_UART_CLKDIV)
+#define S_T7_STOPBITS 25
+#define M_T7_STOPBITS 0x3U
+#define V_T7_STOPBITS(x) ((x) << S_T7_STOPBITS)
+#define G_T7_STOPBITS(x) (((x) >> S_T7_STOPBITS) & M_T7_STOPBITS)
+
+#define S_T7_PARITY 23
+#define M_T7_PARITY 0x3U
+#define V_T7_PARITY(x) ((x) << S_T7_PARITY)
+#define G_T7_PARITY(x) (((x) >> S_T7_PARITY) & M_T7_PARITY)
+
+#define S_T7_DATABITS 19
+#define M_T7_DATABITS 0xfU
+#define V_T7_DATABITS(x) ((x) << S_T7_DATABITS)
+#define G_T7_DATABITS(x) (((x) >> S_T7_DATABITS) & M_T7_DATABITS)
+
+#define S_T7_UART_CLKDIV 0
+#define M_T7_UART_CLKDIV 0x3ffffU
+#define V_T7_UART_CLKDIV(x) ((x) << S_T7_UART_CLKDIV)
+#define G_T7_UART_CLKDIV(x) (((x) >> S_T7_UART_CLKDIV) & M_T7_UART_CLKDIV)
+
/* registers for module PMU */
#define PMU_BASE_ADDR 0x19120
@@ -36767,6 +46222,26 @@
#define V_PL_DIS_PRTY_CHK(x) ((x) << S_PL_DIS_PRTY_CHK)
#define F_PL_DIS_PRTY_CHK V_PL_DIS_PRTY_CHK(1U)
+#define S_ARM_PART_CGEN 19
+#define V_ARM_PART_CGEN(x) ((x) << S_ARM_PART_CGEN)
+#define F_ARM_PART_CGEN V_ARM_PART_CGEN(1U)
+
+#define S_CRYPTO_PART_CGEN 14
+#define V_CRYPTO_PART_CGEN(x) ((x) << S_CRYPTO_PART_CGEN)
+#define F_CRYPTO_PART_CGEN V_CRYPTO_PART_CGEN(1U)
+
+#define S_NVME_PART_CGEN 9
+#define V_NVME_PART_CGEN(x) ((x) << S_NVME_PART_CGEN)
+#define F_NVME_PART_CGEN V_NVME_PART_CGEN(1U)
+
+#define S_XP10_PART_CGEN 8
+#define V_XP10_PART_CGEN(x) ((x) << S_XP10_PART_CGEN)
+#define F_XP10_PART_CGEN V_XP10_PART_CGEN(1U)
+
+#define S_GPEX_PART_CGEN 7
+#define V_GPEX_PART_CGEN(x) ((x) << S_GPEX_PART_CGEN)
+#define F_GPEX_PART_CGEN V_GPEX_PART_CGEN(1U)
+
#define A_PMU_SLEEPMODE_WAKEUP 0x19124
#define S_HWWAKEUPEN 5
@@ -36861,6 +46336,72 @@
#define V_TDDPTAGTCB(x) ((x) << S_TDDPTAGTCB)
#define F_TDDPTAGTCB V_TDDPTAGTCB(1U)
+#define S_ISCSI_PAGE_SIZE_CHK_ENB 31
+#define V_ISCSI_PAGE_SIZE_CHK_ENB(x) ((x) << S_ISCSI_PAGE_SIZE_CHK_ENB)
+#define F_ISCSI_PAGE_SIZE_CHK_ENB V_ISCSI_PAGE_SIZE_CHK_ENB(1U)
+
+#define S_RDMA_0B_WR_OPCODE_HI 29
+#define V_RDMA_0B_WR_OPCODE_HI(x) ((x) << S_RDMA_0B_WR_OPCODE_HI)
+#define F_RDMA_0B_WR_OPCODE_HI V_RDMA_0B_WR_OPCODE_HI(1U)
+
+#define S_RDMA_IMMEDIATE_CQE 28
+#define V_RDMA_IMMEDIATE_CQE(x) ((x) << S_RDMA_IMMEDIATE_CQE)
+#define F_RDMA_IMMEDIATE_CQE V_RDMA_IMMEDIATE_CQE(1U)
+
+#define S_RDMA_ATOMIC_WR_RSP_CQE 27
+#define V_RDMA_ATOMIC_WR_RSP_CQE(x) ((x) << S_RDMA_ATOMIC_WR_RSP_CQE)
+#define F_RDMA_ATOMIC_WR_RSP_CQE V_RDMA_ATOMIC_WR_RSP_CQE(1U)
+
+#define S_RDMA_VERIFY_RSP_FLUSH 26
+#define V_RDMA_VERIFY_RSP_FLUSH(x) ((x) << S_RDMA_VERIFY_RSP_FLUSH)
+#define F_RDMA_VERIFY_RSP_FLUSH V_RDMA_VERIFY_RSP_FLUSH(1U)
+
+#define S_RDMA_VERIFY_RSP_CQE 25
+#define V_RDMA_VERIFY_RSP_CQE(x) ((x) << S_RDMA_VERIFY_RSP_CQE)
+#define F_RDMA_VERIFY_RSP_CQE V_RDMA_VERIFY_RSP_CQE(1U)
+
+#define S_RDMA_FLUSH_RSP_CQE 24
+#define V_RDMA_FLUSH_RSP_CQE(x) ((x) << S_RDMA_FLUSH_RSP_CQE)
+#define F_RDMA_FLUSH_RSP_CQE V_RDMA_FLUSH_RSP_CQE(1U)
+
+#define S_RDMA_ATOMIC_RSP_CQE 23
+#define V_RDMA_ATOMIC_RSP_CQE(x) ((x) << S_RDMA_ATOMIC_RSP_CQE)
+#define F_RDMA_ATOMIC_RSP_CQE V_RDMA_ATOMIC_RSP_CQE(1U)
+
+#define S_T7_TPT_EXTENSION_MODE 22
+#define V_T7_TPT_EXTENSION_MODE(x) ((x) << S_T7_TPT_EXTENSION_MODE)
+#define F_T7_TPT_EXTENSION_MODE V_T7_TPT_EXTENSION_MODE(1U)
+
+#define S_NVME_TCP_DDP_VAL_EN 21
+#define V_NVME_TCP_DDP_VAL_EN(x) ((x) << S_NVME_TCP_DDP_VAL_EN)
+#define F_NVME_TCP_DDP_VAL_EN V_NVME_TCP_DDP_VAL_EN(1U)
+
+#define S_NVME_TCP_REMOVE_HDR_CRC 20
+#define V_NVME_TCP_REMOVE_HDR_CRC(x) ((x) << S_NVME_TCP_REMOVE_HDR_CRC)
+#define F_NVME_TCP_REMOVE_HDR_CRC V_NVME_TCP_REMOVE_HDR_CRC(1U)
+
+#define S_NVME_TCP_LAST_PDU_CHECK_ENB 19
+#define V_NVME_TCP_LAST_PDU_CHECK_ENB(x) ((x) << S_NVME_TCP_LAST_PDU_CHECK_ENB)
+#define F_NVME_TCP_LAST_PDU_CHECK_ENB V_NVME_TCP_LAST_PDU_CHECK_ENB(1U)
+
+#define S_NVME_TCP_OFFSET_SUBMODE 17
+#define M_NVME_TCP_OFFSET_SUBMODE 0x3U
+#define V_NVME_TCP_OFFSET_SUBMODE(x) ((x) << S_NVME_TCP_OFFSET_SUBMODE)
+#define G_NVME_TCP_OFFSET_SUBMODE(x) (((x) >> S_NVME_TCP_OFFSET_SUBMODE) & M_NVME_TCP_OFFSET_SUBMODE)
+
+#define S_NVME_TCP_OFFSET_MODE 16
+#define V_NVME_TCP_OFFSET_MODE(x) ((x) << S_NVME_TCP_OFFSET_MODE)
+#define F_NVME_TCP_OFFSET_MODE V_NVME_TCP_OFFSET_MODE(1U)
+
+#define S_QPID_CHECK_DISABLE_FOR_SEND 15
+#define V_QPID_CHECK_DISABLE_FOR_SEND(x) ((x) << S_QPID_CHECK_DISABLE_FOR_SEND)
+#define F_QPID_CHECK_DISABLE_FOR_SEND V_QPID_CHECK_DISABLE_FOR_SEND(1U)
+
+#define S_RDMA_0B_WR_OPCODE_LO 10
+#define M_RDMA_0B_WR_OPCODE_LO 0xfU
+#define V_RDMA_0B_WR_OPCODE_LO(x) ((x) << S_RDMA_0B_WR_OPCODE_LO)
+#define G_RDMA_0B_WR_OPCODE_LO(x) (((x) >> S_RDMA_0B_WR_OPCODE_LO) & M_RDMA_0B_WR_OPCODE_LO)
+
#define A_ULP_RX_INT_ENABLE 0x19154
#define S_ENABLE_CTX_1 24
@@ -36971,6 +46512,86 @@
#define V_SE_CNT_MISMATCH_0(x) ((x) << S_SE_CNT_MISMATCH_0)
#define F_SE_CNT_MISMATCH_0 V_SE_CNT_MISMATCH_0(1U)
+#define S_CERR_PCMD_FIFO_3 19
+#define V_CERR_PCMD_FIFO_3(x) ((x) << S_CERR_PCMD_FIFO_3)
+#define F_CERR_PCMD_FIFO_3 V_CERR_PCMD_FIFO_3(1U)
+
+#define S_CERR_PCMD_FIFO_2 18
+#define V_CERR_PCMD_FIFO_2(x) ((x) << S_CERR_PCMD_FIFO_2)
+#define F_CERR_PCMD_FIFO_2 V_CERR_PCMD_FIFO_2(1U)
+
+#define S_CERR_PCMD_FIFO_1 17
+#define V_CERR_PCMD_FIFO_1(x) ((x) << S_CERR_PCMD_FIFO_1)
+#define F_CERR_PCMD_FIFO_1 V_CERR_PCMD_FIFO_1(1U)
+
+#define S_CERR_PCMD_FIFO_0 16
+#define V_CERR_PCMD_FIFO_0(x) ((x) << S_CERR_PCMD_FIFO_0)
+#define F_CERR_PCMD_FIFO_0 V_CERR_PCMD_FIFO_0(1U)
+
+#define S_CERR_DATA_FIFO_3 15
+#define V_CERR_DATA_FIFO_3(x) ((x) << S_CERR_DATA_FIFO_3)
+#define F_CERR_DATA_FIFO_3 V_CERR_DATA_FIFO_3(1U)
+
+#define S_CERR_DATA_FIFO_2 14
+#define V_CERR_DATA_FIFO_2(x) ((x) << S_CERR_DATA_FIFO_2)
+#define F_CERR_DATA_FIFO_2 V_CERR_DATA_FIFO_2(1U)
+
+#define S_CERR_DATA_FIFO_1 13
+#define V_CERR_DATA_FIFO_1(x) ((x) << S_CERR_DATA_FIFO_1)
+#define F_CERR_DATA_FIFO_1 V_CERR_DATA_FIFO_1(1U)
+
+#define S_CERR_DATA_FIFO_0 12
+#define V_CERR_DATA_FIFO_0(x) ((x) << S_CERR_DATA_FIFO_0)
+#define F_CERR_DATA_FIFO_0 V_CERR_DATA_FIFO_0(1U)
+
+#define S_SE_CNT_MISMATCH_3 11
+#define V_SE_CNT_MISMATCH_3(x) ((x) << S_SE_CNT_MISMATCH_3)
+#define F_SE_CNT_MISMATCH_3 V_SE_CNT_MISMATCH_3(1U)
+
+#define S_SE_CNT_MISMATCH_2 10
+#define V_SE_CNT_MISMATCH_2(x) ((x) << S_SE_CNT_MISMATCH_2)
+#define F_SE_CNT_MISMATCH_2 V_SE_CNT_MISMATCH_2(1U)
+
+#define S_T7_SE_CNT_MISMATCH_1 9
+#define V_T7_SE_CNT_MISMATCH_1(x) ((x) << S_T7_SE_CNT_MISMATCH_1)
+#define F_T7_SE_CNT_MISMATCH_1 V_T7_SE_CNT_MISMATCH_1(1U)
+
+#define S_T7_SE_CNT_MISMATCH_0 8
+#define V_T7_SE_CNT_MISMATCH_0(x) ((x) << S_T7_SE_CNT_MISMATCH_0)
+#define F_T7_SE_CNT_MISMATCH_0 V_T7_SE_CNT_MISMATCH_0(1U)
+
+#define S_ENABLE_CTX_3 7
+#define V_ENABLE_CTX_3(x) ((x) << S_ENABLE_CTX_3)
+#define F_ENABLE_CTX_3 V_ENABLE_CTX_3(1U)
+
+#define S_ENABLE_CTX_2 6
+#define V_ENABLE_CTX_2(x) ((x) << S_ENABLE_CTX_2)
+#define F_ENABLE_CTX_2 V_ENABLE_CTX_2(1U)
+
+#define S_T7_ENABLE_CTX_1 5
+#define V_T7_ENABLE_CTX_1(x) ((x) << S_T7_ENABLE_CTX_1)
+#define F_T7_ENABLE_CTX_1 V_T7_ENABLE_CTX_1(1U)
+
+#define S_T7_ENABLE_CTX_0 4
+#define V_T7_ENABLE_CTX_0(x) ((x) << S_T7_ENABLE_CTX_0)
+#define F_T7_ENABLE_CTX_0 V_T7_ENABLE_CTX_0(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_3 3
+#define V_ENABLE_ALN_SDC_ERR_3(x) ((x) << S_ENABLE_ALN_SDC_ERR_3)
+#define F_ENABLE_ALN_SDC_ERR_3 V_ENABLE_ALN_SDC_ERR_3(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_2 2
+#define V_ENABLE_ALN_SDC_ERR_2(x) ((x) << S_ENABLE_ALN_SDC_ERR_2)
+#define F_ENABLE_ALN_SDC_ERR_2 V_ENABLE_ALN_SDC_ERR_2(1U)
+
+#define S_T7_ENABLE_ALN_SDC_ERR_1 1
+#define V_T7_ENABLE_ALN_SDC_ERR_1(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_1)
+#define F_T7_ENABLE_ALN_SDC_ERR_1 V_T7_ENABLE_ALN_SDC_ERR_1(1U)
+
+#define S_T7_ENABLE_ALN_SDC_ERR_0 0
+#define V_T7_ENABLE_ALN_SDC_ERR_0(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_0)
+#define F_T7_ENABLE_ALN_SDC_ERR_0 V_T7_ENABLE_ALN_SDC_ERR_0(1U)
+
#define A_ULP_RX_INT_CAUSE 0x19158
#define S_CAUSE_CTX_1 24
@@ -37282,6 +46903,312 @@
#define G_ULPRX_TID(x) (((x) >> S_ULPRX_TID) & M_ULPRX_TID)
#define A_ULP_RX_CTX_ACC_CH1 0x191b0
+#define A_ULP_RX_CTX_ACC_CH2 0x191b4
+#define A_ULP_RX_CTX_ACC_CH3 0x191b8
+#define A_ULP_RX_CTL2 0x191bc
+
+#define S_PCMD3THRESHOLD 24
+#define M_PCMD3THRESHOLD 0xffU
+#define V_PCMD3THRESHOLD(x) ((x) << S_PCMD3THRESHOLD)
+#define G_PCMD3THRESHOLD(x) (((x) >> S_PCMD3THRESHOLD) & M_PCMD3THRESHOLD)
+
+#define S_PCMD2THRESHOLD 16
+#define M_PCMD2THRESHOLD 0xffU
+#define V_PCMD2THRESHOLD(x) ((x) << S_PCMD2THRESHOLD)
+#define G_PCMD2THRESHOLD(x) (((x) >> S_PCMD2THRESHOLD) & M_PCMD2THRESHOLD)
+
+#define S_T7_PCMD1THRESHOLD 8
+#define M_T7_PCMD1THRESHOLD 0xffU
+#define V_T7_PCMD1THRESHOLD(x) ((x) << S_T7_PCMD1THRESHOLD)
+#define G_T7_PCMD1THRESHOLD(x) (((x) >> S_T7_PCMD1THRESHOLD) & M_T7_PCMD1THRESHOLD)
+
+#define S_T7_PCMD0THRESHOLD 0
+#define M_T7_PCMD0THRESHOLD 0xffU
+#define V_T7_PCMD0THRESHOLD(x) ((x) << S_T7_PCMD0THRESHOLD)
+#define G_T7_PCMD0THRESHOLD(x) (((x) >> S_T7_PCMD0THRESHOLD) & M_T7_PCMD0THRESHOLD)
+
+#define A_ULP_RX_INT_ENABLE_INTERFACE 0x191c0
+
+#define S_ENABLE_ULPRX2SBT_RSPPERR 31
+#define V_ENABLE_ULPRX2SBT_RSPPERR(x) ((x) << S_ENABLE_ULPRX2SBT_RSPPERR)
+#define F_ENABLE_ULPRX2SBT_RSPPERR V_ENABLE_ULPRX2SBT_RSPPERR(1U)
+
+#define S_ENABLE_ULPRX2MA_RSPPERR 30
+#define V_ENABLE_ULPRX2MA_RSPPERR(x) ((x) << S_ENABLE_ULPRX2MA_RSPPERR)
+#define F_ENABLE_ULPRX2MA_RSPPERR V_ENABLE_ULPRX2MA_RSPPERR(1U)
+
+#define S_ENABME_PIO_BUS_PERR 29
+#define V_ENABME_PIO_BUS_PERR(x) ((x) << S_ENABME_PIO_BUS_PERR)
+#define F_ENABME_PIO_BUS_PERR V_ENABME_PIO_BUS_PERR(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_3 19
+#define V_ENABLE_PM2ULP_SNOOPDATA_3(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_3)
+#define F_ENABLE_PM2ULP_SNOOPDATA_3 V_ENABLE_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_2 18
+#define V_ENABLE_PM2ULP_SNOOPDATA_2(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_2)
+#define F_ENABLE_PM2ULP_SNOOPDATA_2 V_ENABLE_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_1 17
+#define V_ENABLE_PM2ULP_SNOOPDATA_1(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_1)
+#define F_ENABLE_PM2ULP_SNOOPDATA_1 V_ENABLE_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_0 16
+#define V_ENABLE_PM2ULP_SNOOPDATA_0(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_0)
+#define F_ENABLE_PM2ULP_SNOOPDATA_0 V_ENABLE_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_3 15
+#define V_ENABLE_TLS2ULP_DATA_3(x) ((x) << S_ENABLE_TLS2ULP_DATA_3)
+#define F_ENABLE_TLS2ULP_DATA_3 V_ENABLE_TLS2ULP_DATA_3(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_2 14
+#define V_ENABLE_TLS2ULP_DATA_2(x) ((x) << S_ENABLE_TLS2ULP_DATA_2)
+#define F_ENABLE_TLS2ULP_DATA_2 V_ENABLE_TLS2ULP_DATA_2(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_1 13
+#define V_ENABLE_TLS2ULP_DATA_1(x) ((x) << S_ENABLE_TLS2ULP_DATA_1)
+#define F_ENABLE_TLS2ULP_DATA_1 V_ENABLE_TLS2ULP_DATA_1(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_0 12
+#define V_ENABLE_TLS2ULP_DATA_0(x) ((x) << S_ENABLE_TLS2ULP_DATA_0)
+#define F_ENABLE_TLS2ULP_DATA_0 V_ENABLE_TLS2ULP_DATA_0(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_3 11
+#define V_ENABLE_TLS2ULP_PLENDATA_3(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_3)
+#define F_ENABLE_TLS2ULP_PLENDATA_3 V_ENABLE_TLS2ULP_PLENDATA_3(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_2 10
+#define V_ENABLE_TLS2ULP_PLENDATA_2(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_2)
+#define F_ENABLE_TLS2ULP_PLENDATA_2 V_ENABLE_TLS2ULP_PLENDATA_2(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_1 9
+#define V_ENABLE_TLS2ULP_PLENDATA_1(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_1)
+#define F_ENABLE_TLS2ULP_PLENDATA_1 V_ENABLE_TLS2ULP_PLENDATA_1(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_0 8
+#define V_ENABLE_TLS2ULP_PLENDATA_0(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_0)
+#define F_ENABLE_TLS2ULP_PLENDATA_0 V_ENABLE_TLS2ULP_PLENDATA_0(1U)
+
+#define S_ENABLE_PM2ULP_DATA_3 7
+#define V_ENABLE_PM2ULP_DATA_3(x) ((x) << S_ENABLE_PM2ULP_DATA_3)
+#define F_ENABLE_PM2ULP_DATA_3 V_ENABLE_PM2ULP_DATA_3(1U)
+
+#define S_ENABLE_PM2ULP_DATA_2 6
+#define V_ENABLE_PM2ULP_DATA_2(x) ((x) << S_ENABLE_PM2ULP_DATA_2)
+#define F_ENABLE_PM2ULP_DATA_2 V_ENABLE_PM2ULP_DATA_2(1U)
+
+#define S_ENABLE_PM2ULP_DATA_1 5
+#define V_ENABLE_PM2ULP_DATA_1(x) ((x) << S_ENABLE_PM2ULP_DATA_1)
+#define F_ENABLE_PM2ULP_DATA_1 V_ENABLE_PM2ULP_DATA_1(1U)
+
+#define S_ENABLE_PM2ULP_DATA_0 4
+#define V_ENABLE_PM2ULP_DATA_0(x) ((x) << S_ENABLE_PM2ULP_DATA_0)
+#define F_ENABLE_PM2ULP_DATA_0 V_ENABLE_PM2ULP_DATA_0(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_3 3
+#define V_ENABLE_TP2ULP_PCMD_3(x) ((x) << S_ENABLE_TP2ULP_PCMD_3)
+#define F_ENABLE_TP2ULP_PCMD_3 V_ENABLE_TP2ULP_PCMD_3(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_2 2
+#define V_ENABLE_TP2ULP_PCMD_2(x) ((x) << S_ENABLE_TP2ULP_PCMD_2)
+#define F_ENABLE_TP2ULP_PCMD_2 V_ENABLE_TP2ULP_PCMD_2(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_1 1
+#define V_ENABLE_TP2ULP_PCMD_1(x) ((x) << S_ENABLE_TP2ULP_PCMD_1)
+#define F_ENABLE_TP2ULP_PCMD_1 V_ENABLE_TP2ULP_PCMD_1(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_0 0
+#define V_ENABLE_TP2ULP_PCMD_0(x) ((x) << S_ENABLE_TP2ULP_PCMD_0)
+#define F_ENABLE_TP2ULP_PCMD_0 V_ENABLE_TP2ULP_PCMD_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_INTERFACE 0x191c4
+
+#define S_CAUSE_ULPRX2SBT_RSPPERR 31
+#define V_CAUSE_ULPRX2SBT_RSPPERR(x) ((x) << S_CAUSE_ULPRX2SBT_RSPPERR)
+#define F_CAUSE_ULPRX2SBT_RSPPERR V_CAUSE_ULPRX2SBT_RSPPERR(1U)
+
+#define S_CAUSE_ULPRX2MA_RSPPERR 30
+#define V_CAUSE_ULPRX2MA_RSPPERR(x) ((x) << S_CAUSE_ULPRX2MA_RSPPERR)
+#define F_CAUSE_ULPRX2MA_RSPPERR V_CAUSE_ULPRX2MA_RSPPERR(1U)
+
+#define S_CAUSE_PIO_BUS_PERR 29
+#define V_CAUSE_PIO_BUS_PERR(x) ((x) << S_CAUSE_PIO_BUS_PERR)
+#define F_CAUSE_PIO_BUS_PERR V_CAUSE_PIO_BUS_PERR(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_3 19
+#define V_CAUSE_PM2ULP_SNOOPDATA_3(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_3)
+#define F_CAUSE_PM2ULP_SNOOPDATA_3 V_CAUSE_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_2 18
+#define V_CAUSE_PM2ULP_SNOOPDATA_2(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_2)
+#define F_CAUSE_PM2ULP_SNOOPDATA_2 V_CAUSE_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_1 17
+#define V_CAUSE_PM2ULP_SNOOPDATA_1(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_1)
+#define F_CAUSE_PM2ULP_SNOOPDATA_1 V_CAUSE_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_0 16
+#define V_CAUSE_PM2ULP_SNOOPDATA_0(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_0)
+#define F_CAUSE_PM2ULP_SNOOPDATA_0 V_CAUSE_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_3 15
+#define V_CAUSE_TLS2ULP_DATA_3(x) ((x) << S_CAUSE_TLS2ULP_DATA_3)
+#define F_CAUSE_TLS2ULP_DATA_3 V_CAUSE_TLS2ULP_DATA_3(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_2 14
+#define V_CAUSE_TLS2ULP_DATA_2(x) ((x) << S_CAUSE_TLS2ULP_DATA_2)
+#define F_CAUSE_TLS2ULP_DATA_2 V_CAUSE_TLS2ULP_DATA_2(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_1 13
+#define V_CAUSE_TLS2ULP_DATA_1(x) ((x) << S_CAUSE_TLS2ULP_DATA_1)
+#define F_CAUSE_TLS2ULP_DATA_1 V_CAUSE_TLS2ULP_DATA_1(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_0 12
+#define V_CAUSE_TLS2ULP_DATA_0(x) ((x) << S_CAUSE_TLS2ULP_DATA_0)
+#define F_CAUSE_TLS2ULP_DATA_0 V_CAUSE_TLS2ULP_DATA_0(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_3 11
+#define V_CAUSE_TLS2ULP_PLENDATA_3(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_3)
+#define F_CAUSE_TLS2ULP_PLENDATA_3 V_CAUSE_TLS2ULP_PLENDATA_3(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_2 10
+#define V_CAUSE_TLS2ULP_PLENDATA_2(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_2)
+#define F_CAUSE_TLS2ULP_PLENDATA_2 V_CAUSE_TLS2ULP_PLENDATA_2(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_1 9
+#define V_CAUSE_TLS2ULP_PLENDATA_1(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_1)
+#define F_CAUSE_TLS2ULP_PLENDATA_1 V_CAUSE_TLS2ULP_PLENDATA_1(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_0 8
+#define V_CAUSE_TLS2ULP_PLENDATA_0(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_0)
+#define F_CAUSE_TLS2ULP_PLENDATA_0 V_CAUSE_TLS2ULP_PLENDATA_0(1U)
+
+#define S_CAUSE_PM2ULP_DATA_3 7
+#define V_CAUSE_PM2ULP_DATA_3(x) ((x) << S_CAUSE_PM2ULP_DATA_3)
+#define F_CAUSE_PM2ULP_DATA_3 V_CAUSE_PM2ULP_DATA_3(1U)
+
+#define S_CAUSE_PM2ULP_DATA_2 6
+#define V_CAUSE_PM2ULP_DATA_2(x) ((x) << S_CAUSE_PM2ULP_DATA_2)
+#define F_CAUSE_PM2ULP_DATA_2 V_CAUSE_PM2ULP_DATA_2(1U)
+
+#define S_CAUSE_PM2ULP_DATA_1 5
+#define V_CAUSE_PM2ULP_DATA_1(x) ((x) << S_CAUSE_PM2ULP_DATA_1)
+#define F_CAUSE_PM2ULP_DATA_1 V_CAUSE_PM2ULP_DATA_1(1U)
+
+#define S_CAUSE_PM2ULP_DATA_0 4
+#define V_CAUSE_PM2ULP_DATA_0(x) ((x) << S_CAUSE_PM2ULP_DATA_0)
+#define F_CAUSE_PM2ULP_DATA_0 V_CAUSE_PM2ULP_DATA_0(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_3 3
+#define V_CAUSE_TP2ULP_PCMD_3(x) ((x) << S_CAUSE_TP2ULP_PCMD_3)
+#define F_CAUSE_TP2ULP_PCMD_3 V_CAUSE_TP2ULP_PCMD_3(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_2 2
+#define V_CAUSE_TP2ULP_PCMD_2(x) ((x) << S_CAUSE_TP2ULP_PCMD_2)
+#define F_CAUSE_TP2ULP_PCMD_2 V_CAUSE_TP2ULP_PCMD_2(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_1 1
+#define V_CAUSE_TP2ULP_PCMD_1(x) ((x) << S_CAUSE_TP2ULP_PCMD_1)
+#define F_CAUSE_TP2ULP_PCMD_1 V_CAUSE_TP2ULP_PCMD_1(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_0 0
+#define V_CAUSE_TP2ULP_PCMD_0(x) ((x) << S_CAUSE_TP2ULP_PCMD_0)
+#define F_CAUSE_TP2ULP_PCMD_0 V_CAUSE_TP2ULP_PCMD_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_INTERFACE 0x191c8
+
+#define S_PERR_ULPRX2SBT_RSPPERR 31
+#define V_PERR_ULPRX2SBT_RSPPERR(x) ((x) << S_PERR_ULPRX2SBT_RSPPERR)
+#define F_PERR_ULPRX2SBT_RSPPERR V_PERR_ULPRX2SBT_RSPPERR(1U)
+
+#define S_PERR_ULPRX2MA_RSPPERR 30
+#define V_PERR_ULPRX2MA_RSPPERR(x) ((x) << S_PERR_ULPRX2MA_RSPPERR)
+#define F_PERR_ULPRX2MA_RSPPERR V_PERR_ULPRX2MA_RSPPERR(1U)
+
+#define S_PERR_PIO_BUS_PERR 29
+#define V_PERR_PIO_BUS_PERR(x) ((x) << S_PERR_PIO_BUS_PERR)
+#define F_PERR_PIO_BUS_PERR V_PERR_PIO_BUS_PERR(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_3 19
+#define V_PERR_PM2ULP_SNOOPDATA_3(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_3)
+#define F_PERR_PM2ULP_SNOOPDATA_3 V_PERR_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_2 18
+#define V_PERR_PM2ULP_SNOOPDATA_2(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_2)
+#define F_PERR_PM2ULP_SNOOPDATA_2 V_PERR_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_1 17
+#define V_PERR_PM2ULP_SNOOPDATA_1(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_1)
+#define F_PERR_PM2ULP_SNOOPDATA_1 V_PERR_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_0 16
+#define V_PERR_PM2ULP_SNOOPDATA_0(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_0)
+#define F_PERR_PM2ULP_SNOOPDATA_0 V_PERR_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_PERR_TLS2ULP_DATA_3 15
+#define V_PERR_TLS2ULP_DATA_3(x) ((x) << S_PERR_TLS2ULP_DATA_3)
+#define F_PERR_TLS2ULP_DATA_3 V_PERR_TLS2ULP_DATA_3(1U)
+
+#define S_PERR_TLS2ULP_DATA_2 14
+#define V_PERR_TLS2ULP_DATA_2(x) ((x) << S_PERR_TLS2ULP_DATA_2)
+#define F_PERR_TLS2ULP_DATA_2 V_PERR_TLS2ULP_DATA_2(1U)
+
+#define S_PERR_TLS2ULP_DATA_1 13
+#define V_PERR_TLS2ULP_DATA_1(x) ((x) << S_PERR_TLS2ULP_DATA_1)
+#define F_PERR_TLS2ULP_DATA_1 V_PERR_TLS2ULP_DATA_1(1U)
+
+#define S_PERR_TLS2ULP_DATA_0 12
+#define V_PERR_TLS2ULP_DATA_0(x) ((x) << S_PERR_TLS2ULP_DATA_0)
+#define F_PERR_TLS2ULP_DATA_0 V_PERR_TLS2ULP_DATA_0(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_3 11
+#define V_PERR_TLS2ULP_PLENDATA_3(x) ((x) << S_PERR_TLS2ULP_PLENDATA_3)
+#define F_PERR_TLS2ULP_PLENDATA_3 V_PERR_TLS2ULP_PLENDATA_3(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_2 10
+#define V_PERR_TLS2ULP_PLENDATA_2(x) ((x) << S_PERR_TLS2ULP_PLENDATA_2)
+#define F_PERR_TLS2ULP_PLENDATA_2 V_PERR_TLS2ULP_PLENDATA_2(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_1 9
+#define V_PERR_TLS2ULP_PLENDATA_1(x) ((x) << S_PERR_TLS2ULP_PLENDATA_1)
+#define F_PERR_TLS2ULP_PLENDATA_1 V_PERR_TLS2ULP_PLENDATA_1(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_0 8
+#define V_PERR_TLS2ULP_PLENDATA_0(x) ((x) << S_PERR_TLS2ULP_PLENDATA_0)
+#define F_PERR_TLS2ULP_PLENDATA_0 V_PERR_TLS2ULP_PLENDATA_0(1U)
+
+#define S_PERR_PM2ULP_DATA_3 7
+#define V_PERR_PM2ULP_DATA_3(x) ((x) << S_PERR_PM2ULP_DATA_3)
+#define F_PERR_PM2ULP_DATA_3 V_PERR_PM2ULP_DATA_3(1U)
+
+#define S_PERR_PM2ULP_DATA_2 6
+#define V_PERR_PM2ULP_DATA_2(x) ((x) << S_PERR_PM2ULP_DATA_2)
+#define F_PERR_PM2ULP_DATA_2 V_PERR_PM2ULP_DATA_2(1U)
+
+#define S_PERR_PM2ULP_DATA_1 5
+#define V_PERR_PM2ULP_DATA_1(x) ((x) << S_PERR_PM2ULP_DATA_1)
+#define F_PERR_PM2ULP_DATA_1 V_PERR_PM2ULP_DATA_1(1U)
+
+#define S_PERR_PM2ULP_DATA_0 4
+#define V_PERR_PM2ULP_DATA_0(x) ((x) << S_PERR_PM2ULP_DATA_0)
+#define F_PERR_PM2ULP_DATA_0 V_PERR_PM2ULP_DATA_0(1U)
+
+#define S_PERR_TP2ULP_PCMD_3 3
+#define V_PERR_TP2ULP_PCMD_3(x) ((x) << S_PERR_TP2ULP_PCMD_3)
+#define F_PERR_TP2ULP_PCMD_3 V_PERR_TP2ULP_PCMD_3(1U)
+
+#define S_PERR_TP2ULP_PCMD_2 2
+#define V_PERR_TP2ULP_PCMD_2(x) ((x) << S_PERR_TP2ULP_PCMD_2)
+#define F_PERR_TP2ULP_PCMD_2 V_PERR_TP2ULP_PCMD_2(1U)
+
+#define S_PERR_TP2ULP_PCMD_1 1
+#define V_PERR_TP2ULP_PCMD_1(x) ((x) << S_PERR_TP2ULP_PCMD_1)
+#define F_PERR_TP2ULP_PCMD_1 V_PERR_TP2ULP_PCMD_1(1U)
+
+#define S_PERR_TP2ULP_PCMD_0 0
+#define V_PERR_TP2ULP_PCMD_0(x) ((x) << S_PERR_TP2ULP_PCMD_0)
+#define F_PERR_TP2ULP_PCMD_0 V_PERR_TP2ULP_PCMD_0(1U)
+
#define A_ULP_RX_SE_CNT_ERR 0x191d0
#define A_ULP_RX_SE_CNT_CLR 0x191d4
@@ -37295,6 +47222,26 @@
#define V_CLRCHAN1(x) ((x) << S_CLRCHAN1)
#define G_CLRCHAN1(x) (((x) >> S_CLRCHAN1) & M_CLRCHAN1)
+#define S_CLRCHAN3 12
+#define M_CLRCHAN3 0xfU
+#define V_CLRCHAN3(x) ((x) << S_CLRCHAN3)
+#define G_CLRCHAN3(x) (((x) >> S_CLRCHAN3) & M_CLRCHAN3)
+
+#define S_CLRCHAN2 8
+#define M_CLRCHAN2 0xfU
+#define V_CLRCHAN2(x) ((x) << S_CLRCHAN2)
+#define G_CLRCHAN2(x) (((x) >> S_CLRCHAN2) & M_CLRCHAN2)
+
+#define S_T7_CLRCHAN1 4
+#define M_T7_CLRCHAN1 0xfU
+#define V_T7_CLRCHAN1(x) ((x) << S_T7_CLRCHAN1)
+#define G_T7_CLRCHAN1(x) (((x) >> S_T7_CLRCHAN1) & M_T7_CLRCHAN1)
+
+#define S_T7_CLRCHAN0 0
+#define M_T7_CLRCHAN0 0xfU
+#define V_T7_CLRCHAN0(x) ((x) << S_T7_CLRCHAN0)
+#define G_T7_CLRCHAN0(x) (((x) >> S_T7_CLRCHAN0) & M_T7_CLRCHAN0)
+
#define A_ULP_RX_SE_CNT_CH0 0x191d8
#define S_SOP_CNT_OUT0 28
@@ -37400,6 +47347,7 @@
#define G_SEL_L(x) (((x) >> S_SEL_L) & M_SEL_L)
#define A_ULP_RX_DBG_DATAH 0x191e4
+#define A_ULP_RX_DBG_DATA 0x191e4
#define A_ULP_RX_DBG_DATAL 0x191e8
#define A_ULP_RX_LA_CHNL 0x19238
@@ -37581,6 +47529,11 @@
#define V_PIO_RDMA_SEND_RQE(x) ((x) << S_PIO_RDMA_SEND_RQE)
#define F_PIO_RDMA_SEND_RQE V_PIO_RDMA_SEND_RQE(1U)
+#define S_TLS_KEYSIZECONF 26
+#define M_TLS_KEYSIZECONF 0x3U
+#define V_TLS_KEYSIZECONF(x) ((x) << S_TLS_KEYSIZECONF)
+#define G_TLS_KEYSIZECONF(x) (((x) >> S_TLS_KEYSIZECONF) & M_TLS_KEYSIZECONF)
+
#define A_ULP_RX_CH0_CGEN 0x19260
#define S_BYPASS_CGEN 7
@@ -37615,7 +47568,61 @@
#define V_RDMA_DATAPATH_CGEN(x) ((x) << S_RDMA_DATAPATH_CGEN)
#define F_RDMA_DATAPATH_CGEN V_RDMA_DATAPATH_CGEN(1U)
+#define A_ULP_RX_CH_CGEN 0x19260
+
+#define S_T7_BYPASS_CGEN 28
+#define M_T7_BYPASS_CGEN 0xfU
+#define V_T7_BYPASS_CGEN(x) ((x) << S_T7_BYPASS_CGEN)
+#define G_T7_BYPASS_CGEN(x) (((x) >> S_T7_BYPASS_CGEN) & M_T7_BYPASS_CGEN)
+
+#define S_T7_TDDP_CGEN 24
+#define M_T7_TDDP_CGEN 0xfU
+#define V_T7_TDDP_CGEN(x) ((x) << S_T7_TDDP_CGEN)
+#define G_T7_TDDP_CGEN(x) (((x) >> S_T7_TDDP_CGEN) & M_T7_TDDP_CGEN)
+
+#define S_T7_ISCSI_CGEN 20
+#define M_T7_ISCSI_CGEN 0xfU
+#define V_T7_ISCSI_CGEN(x) ((x) << S_T7_ISCSI_CGEN)
+#define G_T7_ISCSI_CGEN(x) (((x) >> S_T7_ISCSI_CGEN) & M_T7_ISCSI_CGEN)
+
+#define S_T7_RDMA_CGEN 16
+#define M_T7_RDMA_CGEN 0xfU
+#define V_T7_RDMA_CGEN(x) ((x) << S_T7_RDMA_CGEN)
+#define G_T7_RDMA_CGEN(x) (((x) >> S_T7_RDMA_CGEN) & M_T7_RDMA_CGEN)
+
+#define S_T7_CHANNEL_CGEN 12
+#define M_T7_CHANNEL_CGEN 0xfU
+#define V_T7_CHANNEL_CGEN(x) ((x) << S_T7_CHANNEL_CGEN)
+#define G_T7_CHANNEL_CGEN(x) (((x) >> S_T7_CHANNEL_CGEN) & M_T7_CHANNEL_CGEN)
+
+#define S_T7_ALL_DATAPATH_CGEN 8
+#define M_T7_ALL_DATAPATH_CGEN 0xfU
+#define V_T7_ALL_DATAPATH_CGEN(x) ((x) << S_T7_ALL_DATAPATH_CGEN)
+#define G_T7_ALL_DATAPATH_CGEN(x) (((x) >> S_T7_ALL_DATAPATH_CGEN) & M_T7_ALL_DATAPATH_CGEN)
+
+#define S_T7_T10DIFF_DATAPATH_CGEN 4
+#define M_T7_T10DIFF_DATAPATH_CGEN 0xfU
+#define V_T7_T10DIFF_DATAPATH_CGEN(x) ((x) << S_T7_T10DIFF_DATAPATH_CGEN)
+#define G_T7_T10DIFF_DATAPATH_CGEN(x) (((x) >> S_T7_T10DIFF_DATAPATH_CGEN) & M_T7_T10DIFF_DATAPATH_CGEN)
+
+#define S_T7_RDMA_DATAPATH_CGEN 0
+#define M_T7_RDMA_DATAPATH_CGEN 0xfU
+#define V_T7_RDMA_DATAPATH_CGEN(x) ((x) << S_T7_RDMA_DATAPATH_CGEN)
+#define G_T7_RDMA_DATAPATH_CGEN(x) (((x) >> S_T7_RDMA_DATAPATH_CGEN) & M_T7_RDMA_DATAPATH_CGEN)
+
#define A_ULP_RX_CH1_CGEN 0x19264
+#define A_ULP_RX_CH_CGEN_1 0x19264
+
+#define S_NVME_TCP_CGEN 4
+#define M_NVME_TCP_CGEN 0xfU
+#define V_NVME_TCP_CGEN(x) ((x) << S_NVME_TCP_CGEN)
+#define G_NVME_TCP_CGEN(x) (((x) >> S_NVME_TCP_CGEN) & M_NVME_TCP_CGEN)
+
+#define S_ROCE_CGEN 0
+#define M_ROCE_CGEN 0xfU
+#define V_ROCE_CGEN(x) ((x) << S_ROCE_CGEN)
+#define G_ROCE_CGEN(x) (((x) >> S_ROCE_CGEN) & M_ROCE_CGEN)
+
#define A_ULP_RX_RFE_DISABLE 0x19268
#define S_RQE_LIM_CHECK_RFE_DISABLE 0
@@ -37742,6 +47749,30 @@
#define V_SKIP_MA_REQ_EN0(x) ((x) << S_SKIP_MA_REQ_EN0)
#define F_SKIP_MA_REQ_EN0 V_SKIP_MA_REQ_EN0(1U)
+#define S_CLEAR_CTX_ERR_CNT3 7
+#define V_CLEAR_CTX_ERR_CNT3(x) ((x) << S_CLEAR_CTX_ERR_CNT3)
+#define F_CLEAR_CTX_ERR_CNT3 V_CLEAR_CTX_ERR_CNT3(1U)
+
+#define S_CLEAR_CTX_ERR_CNT2 6
+#define V_CLEAR_CTX_ERR_CNT2(x) ((x) << S_CLEAR_CTX_ERR_CNT2)
+#define F_CLEAR_CTX_ERR_CNT2 V_CLEAR_CTX_ERR_CNT2(1U)
+
+#define S_T7_CLEAR_CTX_ERR_CNT1 5
+#define V_T7_CLEAR_CTX_ERR_CNT1(x) ((x) << S_T7_CLEAR_CTX_ERR_CNT1)
+#define F_T7_CLEAR_CTX_ERR_CNT1 V_T7_CLEAR_CTX_ERR_CNT1(1U)
+
+#define S_T7_CLEAR_CTX_ERR_CNT0 4
+#define V_T7_CLEAR_CTX_ERR_CNT0(x) ((x) << S_T7_CLEAR_CTX_ERR_CNT0)
+#define F_T7_CLEAR_CTX_ERR_CNT0 V_T7_CLEAR_CTX_ERR_CNT0(1U)
+
+#define S_SKIP_MA_REQ_EN3 3
+#define V_SKIP_MA_REQ_EN3(x) ((x) << S_SKIP_MA_REQ_EN3)
+#define F_SKIP_MA_REQ_EN3 V_SKIP_MA_REQ_EN3(1U)
+
+#define S_SKIP_MA_REQ_EN2 2
+#define V_SKIP_MA_REQ_EN2(x) ((x) << S_SKIP_MA_REQ_EN2)
+#define F_SKIP_MA_REQ_EN2 V_SKIP_MA_REQ_EN2(1U)
+
#define A_ULP_RX_CHNL0_CTX_ERROR_COUNT_PER_TID 0x19288
#define A_ULP_RX_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1928c
#define A_ULP_RX_MSN_CHECK_ENABLE 0x19290
@@ -37758,6 +47789,92 @@
#define V_SEND_MSN_CHECK_ENABLE(x) ((x) << S_SEND_MSN_CHECK_ENABLE)
#define F_SEND_MSN_CHECK_ENABLE V_SEND_MSN_CHECK_ENABLE(1U)
+#define A_ULP_RX_SE_CNT_CH2 0x19294
+
+#define S_SOP_CNT_OUT2 28
+#define M_SOP_CNT_OUT2 0xfU
+#define V_SOP_CNT_OUT2(x) ((x) << S_SOP_CNT_OUT2)
+#define G_SOP_CNT_OUT2(x) (((x) >> S_SOP_CNT_OUT2) & M_SOP_CNT_OUT2)
+
+#define S_EOP_CNT_OUT2 24
+#define M_EOP_CNT_OUT2 0xfU
+#define V_EOP_CNT_OUT2(x) ((x) << S_EOP_CNT_OUT2)
+#define G_EOP_CNT_OUT2(x) (((x) >> S_EOP_CNT_OUT2) & M_EOP_CNT_OUT2)
+
+#define S_SOP_CNT_AL2 20
+#define M_SOP_CNT_AL2 0xfU
+#define V_SOP_CNT_AL2(x) ((x) << S_SOP_CNT_AL2)
+#define G_SOP_CNT_AL2(x) (((x) >> S_SOP_CNT_AL2) & M_SOP_CNT_AL2)
+
+#define S_EOP_CNT_AL2 16
+#define M_EOP_CNT_AL2 0xfU
+#define V_EOP_CNT_AL2(x) ((x) << S_EOP_CNT_AL2)
+#define G_EOP_CNT_AL2(x) (((x) >> S_EOP_CNT_AL2) & M_EOP_CNT_AL2)
+
+#define S_SOP_CNT_MR2 12
+#define M_SOP_CNT_MR2 0xfU
+#define V_SOP_CNT_MR2(x) ((x) << S_SOP_CNT_MR2)
+#define G_SOP_CNT_MR2(x) (((x) >> S_SOP_CNT_MR2) & M_SOP_CNT_MR2)
+
+#define S_EOP_CNT_MR2 8
+#define M_EOP_CNT_MR2 0xfU
+#define V_EOP_CNT_MR2(x) ((x) << S_EOP_CNT_MR2)
+#define G_EOP_CNT_MR2(x) (((x) >> S_EOP_CNT_MR2) & M_EOP_CNT_MR2)
+
+#define S_SOP_CNT_IN2 4
+#define M_SOP_CNT_IN2 0xfU
+#define V_SOP_CNT_IN2(x) ((x) << S_SOP_CNT_IN2)
+#define G_SOP_CNT_IN2(x) (((x) >> S_SOP_CNT_IN2) & M_SOP_CNT_IN2)
+
+#define S_EOP_CNT_IN2 0
+#define M_EOP_CNT_IN2 0xfU
+#define V_EOP_CNT_IN2(x) ((x) << S_EOP_CNT_IN2)
+#define G_EOP_CNT_IN2(x) (((x) >> S_EOP_CNT_IN2) & M_EOP_CNT_IN2)
+
+#define A_ULP_RX_SE_CNT_CH3 0x19298
+
+#define S_SOP_CNT_OUT3 28
+#define M_SOP_CNT_OUT3 0xfU
+#define V_SOP_CNT_OUT3(x) ((x) << S_SOP_CNT_OUT3)
+#define G_SOP_CNT_OUT3(x) (((x) >> S_SOP_CNT_OUT3) & M_SOP_CNT_OUT3)
+
+#define S_EOP_CNT_OUT3 24
+#define M_EOP_CNT_OUT3 0xfU
+#define V_EOP_CNT_OUT3(x) ((x) << S_EOP_CNT_OUT3)
+#define G_EOP_CNT_OUT3(x) (((x) >> S_EOP_CNT_OUT3) & M_EOP_CNT_OUT3)
+
+#define S_SOP_CNT_AL3 20
+#define M_SOP_CNT_AL3 0xfU
+#define V_SOP_CNT_AL3(x) ((x) << S_SOP_CNT_AL3)
+#define G_SOP_CNT_AL3(x) (((x) >> S_SOP_CNT_AL3) & M_SOP_CNT_AL3)
+
+#define S_EOP_CNT_AL3 16
+#define M_EOP_CNT_AL3 0xfU
+#define V_EOP_CNT_AL3(x) ((x) << S_EOP_CNT_AL3)
+#define G_EOP_CNT_AL3(x) (((x) >> S_EOP_CNT_AL3) & M_EOP_CNT_AL3)
+
+#define S_SOP_CNT_MR3 12
+#define M_SOP_CNT_MR3 0xfU
+#define V_SOP_CNT_MR3(x) ((x) << S_SOP_CNT_MR3)
+#define G_SOP_CNT_MR3(x) (((x) >> S_SOP_CNT_MR3) & M_SOP_CNT_MR3)
+
+#define S_EOP_CNT_MR3 8
+#define M_EOP_CNT_MR3 0xfU
+#define V_EOP_CNT_MR3(x) ((x) << S_EOP_CNT_MR3)
+#define G_EOP_CNT_MR3(x) (((x) >> S_EOP_CNT_MR3) & M_EOP_CNT_MR3)
+
+#define S_SOP_CNT_IN3 4
+#define M_SOP_CNT_IN3 0xfU
+#define V_SOP_CNT_IN3(x) ((x) << S_SOP_CNT_IN3)
+#define G_SOP_CNT_IN3(x) (((x) >> S_SOP_CNT_IN3) & M_SOP_CNT_IN3)
+
+#define S_EOP_CNT_IN3 0
+#define M_EOP_CNT_IN3 0xfU
+#define V_EOP_CNT_IN3(x) ((x) << S_EOP_CNT_IN3)
+#define G_EOP_CNT_IN3(x) (((x) >> S_EOP_CNT_IN3) & M_EOP_CNT_IN3)
+
+#define A_ULP_RX_CHNL2_CTX_ERROR_COUNT_PER_TID 0x1929c
+#define A_ULP_RX_CHNL3_CTX_ERROR_COUNT_PER_TID 0x192a0
#define A_ULP_RX_TLS_PP_LLIMIT 0x192a4
#define S_TLSPPLLIMIT 6
@@ -37787,6 +47904,933 @@
#define G_TLSKEYULIMIT(x) (((x) >> S_TLSKEYULIMIT) & M_TLSKEYULIMIT)
#define A_ULP_RX_TLS_CTL 0x192bc
+#define A_ULP_RX_RRQ_LLIMIT 0x192c0
+#define A_ULP_RX_RRQ_ULIMIT 0x192c4
+#define A_ULP_RX_NVME_TCP_STAG_LLIMIT 0x192c8
+#define A_ULP_RX_NVME_TCP_STAG_ULIMIT 0x192cc
+#define A_ULP_RX_NVME_TCP_RQ_LLIMIT 0x192d0
+#define A_ULP_RX_NVME_TCP_RQ_ULIMIT 0x192d4
+#define A_ULP_RX_NVME_TCP_PBL_LLIMIT 0x192d8
+#define A_ULP_RX_NVME_TCP_PBL_ULIMIT 0x192dc
+#define A_ULP_RX_NVME_TCP_MAX_LENGTH 0x192e0
+
+#define S_NVME_TCP_MAX_PLEN01 24
+#define M_NVME_TCP_MAX_PLEN01 0xffU
+#define V_NVME_TCP_MAX_PLEN01(x) ((x) << S_NVME_TCP_MAX_PLEN01)
+#define G_NVME_TCP_MAX_PLEN01(x) (((x) >> S_NVME_TCP_MAX_PLEN01) & M_NVME_TCP_MAX_PLEN01)
+
+#define S_NVME_TCP_MAX_PLEN23 16
+#define M_NVME_TCP_MAX_PLEN23 0xffU
+#define V_NVME_TCP_MAX_PLEN23(x) ((x) << S_NVME_TCP_MAX_PLEN23)
+#define G_NVME_TCP_MAX_PLEN23(x) (((x) >> S_NVME_TCP_MAX_PLEN23) & M_NVME_TCP_MAX_PLEN23)
+
+#define S_NVME_TCP_MAX_CMD_PDU_LENGTH 0
+#define M_NVME_TCP_MAX_CMD_PDU_LENGTH 0xffffU
+#define V_NVME_TCP_MAX_CMD_PDU_LENGTH(x) ((x) << S_NVME_TCP_MAX_CMD_PDU_LENGTH)
+#define G_NVME_TCP_MAX_CMD_PDU_LENGTH(x) (((x) >> S_NVME_TCP_MAX_CMD_PDU_LENGTH) & M_NVME_TCP_MAX_CMD_PDU_LENGTH)
+
+#define A_ULP_RX_NVME_TCP_IQE_SIZE 0x192e4
+#define A_ULP_RX_NVME_TCP_NEW_PDU_TYPES 0x192e8
+#define A_ULP_RX_IWARP_PMOF_OPCODES_1 0x192ec
+#define A_ULP_RX_IWARP_PMOF_OPCODES_2 0x192f0
+#define A_ULP_RX_INT_ENABLE_PCMD 0x19300
+
+#define S_ENABLE_PCMD_SFIFO_3 30
+#define V_ENABLE_PCMD_SFIFO_3(x) ((x) << S_ENABLE_PCMD_SFIFO_3)
+#define F_ENABLE_PCMD_SFIFO_3 V_ENABLE_PCMD_SFIFO_3(1U)
+
+#define S_ENABLE_PCMD_FIFO_3 29
+#define V_ENABLE_PCMD_FIFO_3(x) ((x) << S_ENABLE_PCMD_FIFO_3)
+#define F_ENABLE_PCMD_FIFO_3 V_ENABLE_PCMD_FIFO_3(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_3 28
+#define V_ENABLE_PCMD_DDP_HINT_3(x) ((x) << S_ENABLE_PCMD_DDP_HINT_3)
+#define F_ENABLE_PCMD_DDP_HINT_3 V_ENABLE_PCMD_DDP_HINT_3(1U)
+
+#define S_ENABLE_PCMD_TPT_3 27
+#define V_ENABLE_PCMD_TPT_3(x) ((x) << S_ENABLE_PCMD_TPT_3)
+#define F_ENABLE_PCMD_TPT_3 V_ENABLE_PCMD_TPT_3(1U)
+
+#define S_ENABLE_PCMD_DDP_3 26
+#define V_ENABLE_PCMD_DDP_3(x) ((x) << S_ENABLE_PCMD_DDP_3)
+#define F_ENABLE_PCMD_DDP_3 V_ENABLE_PCMD_DDP_3(1U)
+
+#define S_ENABLE_PCMD_MPAR_3 25
+#define V_ENABLE_PCMD_MPAR_3(x) ((x) << S_ENABLE_PCMD_MPAR_3)
+#define F_ENABLE_PCMD_MPAR_3 V_ENABLE_PCMD_MPAR_3(1U)
+
+#define S_ENABLE_PCMD_MPAC_3 24
+#define V_ENABLE_PCMD_MPAC_3(x) ((x) << S_ENABLE_PCMD_MPAC_3)
+#define F_ENABLE_PCMD_MPAC_3 V_ENABLE_PCMD_MPAC_3(1U)
+
+#define S_ENABLE_PCMD_SFIFO_2 22
+#define V_ENABLE_PCMD_SFIFO_2(x) ((x) << S_ENABLE_PCMD_SFIFO_2)
+#define F_ENABLE_PCMD_SFIFO_2 V_ENABLE_PCMD_SFIFO_2(1U)
+
+#define S_ENABLE_PCMD_FIFO_2 21
+#define V_ENABLE_PCMD_FIFO_2(x) ((x) << S_ENABLE_PCMD_FIFO_2)
+#define F_ENABLE_PCMD_FIFO_2 V_ENABLE_PCMD_FIFO_2(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_2 20
+#define V_ENABLE_PCMD_DDP_HINT_2(x) ((x) << S_ENABLE_PCMD_DDP_HINT_2)
+#define F_ENABLE_PCMD_DDP_HINT_2 V_ENABLE_PCMD_DDP_HINT_2(1U)
+
+#define S_ENABLE_PCMD_TPT_2 19
+#define V_ENABLE_PCMD_TPT_2(x) ((x) << S_ENABLE_PCMD_TPT_2)
+#define F_ENABLE_PCMD_TPT_2 V_ENABLE_PCMD_TPT_2(1U)
+
+#define S_ENABLE_PCMD_DDP_2 18
+#define V_ENABLE_PCMD_DDP_2(x) ((x) << S_ENABLE_PCMD_DDP_2)
+#define F_ENABLE_PCMD_DDP_2 V_ENABLE_PCMD_DDP_2(1U)
+
+#define S_ENABLE_PCMD_MPAR_2 17
+#define V_ENABLE_PCMD_MPAR_2(x) ((x) << S_ENABLE_PCMD_MPAR_2)
+#define F_ENABLE_PCMD_MPAR_2 V_ENABLE_PCMD_MPAR_2(1U)
+
+#define S_ENABLE_PCMD_MPAC_2 16
+#define V_ENABLE_PCMD_MPAC_2(x) ((x) << S_ENABLE_PCMD_MPAC_2)
+#define F_ENABLE_PCMD_MPAC_2 V_ENABLE_PCMD_MPAC_2(1U)
+
+#define S_ENABLE_PCMD_SFIFO_1 14
+#define V_ENABLE_PCMD_SFIFO_1(x) ((x) << S_ENABLE_PCMD_SFIFO_1)
+#define F_ENABLE_PCMD_SFIFO_1 V_ENABLE_PCMD_SFIFO_1(1U)
+
+#define S_ENABLE_PCMD_FIFO_1 13
+#define V_ENABLE_PCMD_FIFO_1(x) ((x) << S_ENABLE_PCMD_FIFO_1)
+#define F_ENABLE_PCMD_FIFO_1 V_ENABLE_PCMD_FIFO_1(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_1 12
+#define V_ENABLE_PCMD_DDP_HINT_1(x) ((x) << S_ENABLE_PCMD_DDP_HINT_1)
+#define F_ENABLE_PCMD_DDP_HINT_1 V_ENABLE_PCMD_DDP_HINT_1(1U)
+
+#define S_ENABLE_PCMD_TPT_1 11
+#define V_ENABLE_PCMD_TPT_1(x) ((x) << S_ENABLE_PCMD_TPT_1)
+#define F_ENABLE_PCMD_TPT_1 V_ENABLE_PCMD_TPT_1(1U)
+
+#define S_ENABLE_PCMD_DDP_1 10
+#define V_ENABLE_PCMD_DDP_1(x) ((x) << S_ENABLE_PCMD_DDP_1)
+#define F_ENABLE_PCMD_DDP_1 V_ENABLE_PCMD_DDP_1(1U)
+
+#define S_ENABLE_PCMD_MPAR_1 9
+#define V_ENABLE_PCMD_MPAR_1(x) ((x) << S_ENABLE_PCMD_MPAR_1)
+#define F_ENABLE_PCMD_MPAR_1 V_ENABLE_PCMD_MPAR_1(1U)
+
+#define S_ENABLE_PCMD_MPAC_1 8
+#define V_ENABLE_PCMD_MPAC_1(x) ((x) << S_ENABLE_PCMD_MPAC_1)
+#define F_ENABLE_PCMD_MPAC_1 V_ENABLE_PCMD_MPAC_1(1U)
+
+#define S_ENABLE_PCMD_SFIFO_0 6
+#define V_ENABLE_PCMD_SFIFO_0(x) ((x) << S_ENABLE_PCMD_SFIFO_0)
+#define F_ENABLE_PCMD_SFIFO_0 V_ENABLE_PCMD_SFIFO_0(1U)
+
+#define S_ENABLE_PCMD_FIFO_0 5
+#define V_ENABLE_PCMD_FIFO_0(x) ((x) << S_ENABLE_PCMD_FIFO_0)
+#define F_ENABLE_PCMD_FIFO_0 V_ENABLE_PCMD_FIFO_0(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_0 4
+#define V_ENABLE_PCMD_DDP_HINT_0(x) ((x) << S_ENABLE_PCMD_DDP_HINT_0)
+#define F_ENABLE_PCMD_DDP_HINT_0 V_ENABLE_PCMD_DDP_HINT_0(1U)
+
+#define S_ENABLE_PCMD_TPT_0 3
+#define V_ENABLE_PCMD_TPT_0(x) ((x) << S_ENABLE_PCMD_TPT_0)
+#define F_ENABLE_PCMD_TPT_0 V_ENABLE_PCMD_TPT_0(1U)
+
+#define S_ENABLE_PCMD_DDP_0 2
+#define V_ENABLE_PCMD_DDP_0(x) ((x) << S_ENABLE_PCMD_DDP_0)
+#define F_ENABLE_PCMD_DDP_0 V_ENABLE_PCMD_DDP_0(1U)
+
+#define S_ENABLE_PCMD_MPAR_0 1
+#define V_ENABLE_PCMD_MPAR_0(x) ((x) << S_ENABLE_PCMD_MPAR_0)
+#define F_ENABLE_PCMD_MPAR_0 V_ENABLE_PCMD_MPAR_0(1U)
+
+#define S_ENABLE_PCMD_MPAC_0 0
+#define V_ENABLE_PCMD_MPAC_0(x) ((x) << S_ENABLE_PCMD_MPAC_0)
+#define F_ENABLE_PCMD_MPAC_0 V_ENABLE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_PCMD 0x19304
+
+#define S_CAUSE_PCMD_SFIFO_3 30
+#define V_CAUSE_PCMD_SFIFO_3(x) ((x) << S_CAUSE_PCMD_SFIFO_3)
+#define F_CAUSE_PCMD_SFIFO_3 V_CAUSE_PCMD_SFIFO_3(1U)
+
+#define S_CAUSE_PCMD_FIFO_3 29
+#define V_CAUSE_PCMD_FIFO_3(x) ((x) << S_CAUSE_PCMD_FIFO_3)
+#define F_CAUSE_PCMD_FIFO_3 V_CAUSE_PCMD_FIFO_3(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_3 28
+#define V_CAUSE_PCMD_DDP_HINT_3(x) ((x) << S_CAUSE_PCMD_DDP_HINT_3)
+#define F_CAUSE_PCMD_DDP_HINT_3 V_CAUSE_PCMD_DDP_HINT_3(1U)
+
+#define S_CAUSE_PCMD_TPT_3 27
+#define V_CAUSE_PCMD_TPT_3(x) ((x) << S_CAUSE_PCMD_TPT_3)
+#define F_CAUSE_PCMD_TPT_3 V_CAUSE_PCMD_TPT_3(1U)
+
+#define S_CAUSE_PCMD_DDP_3 26
+#define V_CAUSE_PCMD_DDP_3(x) ((x) << S_CAUSE_PCMD_DDP_3)
+#define F_CAUSE_PCMD_DDP_3 V_CAUSE_PCMD_DDP_3(1U)
+
+#define S_CAUSE_PCMD_MPAR_3 25
+#define V_CAUSE_PCMD_MPAR_3(x) ((x) << S_CAUSE_PCMD_MPAR_3)
+#define F_CAUSE_PCMD_MPAR_3 V_CAUSE_PCMD_MPAR_3(1U)
+
+#define S_CAUSE_PCMD_MPAC_3 24
+#define V_CAUSE_PCMD_MPAC_3(x) ((x) << S_CAUSE_PCMD_MPAC_3)
+#define F_CAUSE_PCMD_MPAC_3 V_CAUSE_PCMD_MPAC_3(1U)
+
+#define S_CAUSE_PCMD_SFIFO_2 22
+#define V_CAUSE_PCMD_SFIFO_2(x) ((x) << S_CAUSE_PCMD_SFIFO_2)
+#define F_CAUSE_PCMD_SFIFO_2 V_CAUSE_PCMD_SFIFO_2(1U)
+
+#define S_CAUSE_PCMD_FIFO_2 21
+#define V_CAUSE_PCMD_FIFO_2(x) ((x) << S_CAUSE_PCMD_FIFO_2)
+#define F_CAUSE_PCMD_FIFO_2 V_CAUSE_PCMD_FIFO_2(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_2 20
+#define V_CAUSE_PCMD_DDP_HINT_2(x) ((x) << S_CAUSE_PCMD_DDP_HINT_2)
+#define F_CAUSE_PCMD_DDP_HINT_2 V_CAUSE_PCMD_DDP_HINT_2(1U)
+
+#define S_CAUSE_PCMD_TPT_2 19
+#define V_CAUSE_PCMD_TPT_2(x) ((x) << S_CAUSE_PCMD_TPT_2)
+#define F_CAUSE_PCMD_TPT_2 V_CAUSE_PCMD_TPT_2(1U)
+
+#define S_CAUSE_PCMD_DDP_2 18
+#define V_CAUSE_PCMD_DDP_2(x) ((x) << S_CAUSE_PCMD_DDP_2)
+#define F_CAUSE_PCMD_DDP_2 V_CAUSE_PCMD_DDP_2(1U)
+
+#define S_CAUSE_PCMD_MPAR_2 17
+#define V_CAUSE_PCMD_MPAR_2(x) ((x) << S_CAUSE_PCMD_MPAR_2)
+#define F_CAUSE_PCMD_MPAR_2 V_CAUSE_PCMD_MPAR_2(1U)
+
+#define S_CAUSE_PCMD_MPAC_2 16
+#define V_CAUSE_PCMD_MPAC_2(x) ((x) << S_CAUSE_PCMD_MPAC_2)
+#define F_CAUSE_PCMD_MPAC_2 V_CAUSE_PCMD_MPAC_2(1U)
+
+#define S_CAUSE_PCMD_SFIFO_1 14
+#define V_CAUSE_PCMD_SFIFO_1(x) ((x) << S_CAUSE_PCMD_SFIFO_1)
+#define F_CAUSE_PCMD_SFIFO_1 V_CAUSE_PCMD_SFIFO_1(1U)
+
+#define S_CAUSE_PCMD_FIFO_1 13
+#define V_CAUSE_PCMD_FIFO_1(x) ((x) << S_CAUSE_PCMD_FIFO_1)
+#define F_CAUSE_PCMD_FIFO_1 V_CAUSE_PCMD_FIFO_1(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_1 12
+#define V_CAUSE_PCMD_DDP_HINT_1(x) ((x) << S_CAUSE_PCMD_DDP_HINT_1)
+#define F_CAUSE_PCMD_DDP_HINT_1 V_CAUSE_PCMD_DDP_HINT_1(1U)
+
+#define S_CAUSE_PCMD_TPT_1 11
+#define V_CAUSE_PCMD_TPT_1(x) ((x) << S_CAUSE_PCMD_TPT_1)
+#define F_CAUSE_PCMD_TPT_1 V_CAUSE_PCMD_TPT_1(1U)
+
+#define S_CAUSE_PCMD_DDP_1 10
+#define V_CAUSE_PCMD_DDP_1(x) ((x) << S_CAUSE_PCMD_DDP_1)
+#define F_CAUSE_PCMD_DDP_1 V_CAUSE_PCMD_DDP_1(1U)
+
+#define S_CAUSE_PCMD_MPAR_1 9
+#define V_CAUSE_PCMD_MPAR_1(x) ((x) << S_CAUSE_PCMD_MPAR_1)
+#define F_CAUSE_PCMD_MPAR_1 V_CAUSE_PCMD_MPAR_1(1U)
+
+#define S_CAUSE_PCMD_MPAC_1 8
+#define V_CAUSE_PCMD_MPAC_1(x) ((x) << S_CAUSE_PCMD_MPAC_1)
+#define F_CAUSE_PCMD_MPAC_1 V_CAUSE_PCMD_MPAC_1(1U)
+
+#define S_CAUSE_PCMD_SFIFO_0 6
+#define V_CAUSE_PCMD_SFIFO_0(x) ((x) << S_CAUSE_PCMD_SFIFO_0)
+#define F_CAUSE_PCMD_SFIFO_0 V_CAUSE_PCMD_SFIFO_0(1U)
+
+#define S_CAUSE_PCMD_FIFO_0 5
+#define V_CAUSE_PCMD_FIFO_0(x) ((x) << S_CAUSE_PCMD_FIFO_0)
+#define F_CAUSE_PCMD_FIFO_0 V_CAUSE_PCMD_FIFO_0(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_0 4
+#define V_CAUSE_PCMD_DDP_HINT_0(x) ((x) << S_CAUSE_PCMD_DDP_HINT_0)
+#define F_CAUSE_PCMD_DDP_HINT_0 V_CAUSE_PCMD_DDP_HINT_0(1U)
+
+#define S_CAUSE_PCMD_TPT_0 3
+#define V_CAUSE_PCMD_TPT_0(x) ((x) << S_CAUSE_PCMD_TPT_0)
+#define F_CAUSE_PCMD_TPT_0 V_CAUSE_PCMD_TPT_0(1U)
+
+#define S_CAUSE_PCMD_DDP_0 2
+#define V_CAUSE_PCMD_DDP_0(x) ((x) << S_CAUSE_PCMD_DDP_0)
+#define F_CAUSE_PCMD_DDP_0 V_CAUSE_PCMD_DDP_0(1U)
+
+#define S_CAUSE_PCMD_MPAR_0 1
+#define V_CAUSE_PCMD_MPAR_0(x) ((x) << S_CAUSE_PCMD_MPAR_0)
+#define F_CAUSE_PCMD_MPAR_0 V_CAUSE_PCMD_MPAR_0(1U)
+
+#define S_CAUSE_PCMD_MPAC_0 0
+#define V_CAUSE_PCMD_MPAC_0(x) ((x) << S_CAUSE_PCMD_MPAC_0)
+#define F_CAUSE_PCMD_MPAC_0 V_CAUSE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_PCMD 0x19308
+
+#define S_PERR_ENABLE_PCMD_SFIFO_3 30
+#define V_PERR_ENABLE_PCMD_SFIFO_3(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_3)
+#define F_PERR_ENABLE_PCMD_SFIFO_3 V_PERR_ENABLE_PCMD_SFIFO_3(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_3 29
+#define V_PERR_ENABLE_PCMD_FIFO_3(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_3)
+#define F_PERR_ENABLE_PCMD_FIFO_3 V_PERR_ENABLE_PCMD_FIFO_3(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_3 28
+#define V_PERR_ENABLE_PCMD_DDP_HINT_3(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_3)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_3 V_PERR_ENABLE_PCMD_DDP_HINT_3(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_3 27
+#define V_PERR_ENABLE_PCMD_TPT_3(x) ((x) << S_PERR_ENABLE_PCMD_TPT_3)
+#define F_PERR_ENABLE_PCMD_TPT_3 V_PERR_ENABLE_PCMD_TPT_3(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_3 26
+#define V_PERR_ENABLE_PCMD_DDP_3(x) ((x) << S_PERR_ENABLE_PCMD_DDP_3)
+#define F_PERR_ENABLE_PCMD_DDP_3 V_PERR_ENABLE_PCMD_DDP_3(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_3 25
+#define V_PERR_ENABLE_PCMD_MPAR_3(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_3)
+#define F_PERR_ENABLE_PCMD_MPAR_3 V_PERR_ENABLE_PCMD_MPAR_3(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_3 24
+#define V_PERR_ENABLE_PCMD_MPAC_3(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_3)
+#define F_PERR_ENABLE_PCMD_MPAC_3 V_PERR_ENABLE_PCMD_MPAC_3(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_2 22
+#define V_PERR_ENABLE_PCMD_SFIFO_2(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_2)
+#define F_PERR_ENABLE_PCMD_SFIFO_2 V_PERR_ENABLE_PCMD_SFIFO_2(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_2 21
+#define V_PERR_ENABLE_PCMD_FIFO_2(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_2)
+#define F_PERR_ENABLE_PCMD_FIFO_2 V_PERR_ENABLE_PCMD_FIFO_2(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_2 20
+#define V_PERR_ENABLE_PCMD_DDP_HINT_2(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_2)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_2 V_PERR_ENABLE_PCMD_DDP_HINT_2(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_2 19
+#define V_PERR_ENABLE_PCMD_TPT_2(x) ((x) << S_PERR_ENABLE_PCMD_TPT_2)
+#define F_PERR_ENABLE_PCMD_TPT_2 V_PERR_ENABLE_PCMD_TPT_2(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_2 18
+#define V_PERR_ENABLE_PCMD_DDP_2(x) ((x) << S_PERR_ENABLE_PCMD_DDP_2)
+#define F_PERR_ENABLE_PCMD_DDP_2 V_PERR_ENABLE_PCMD_DDP_2(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_2 17
+#define V_PERR_ENABLE_PCMD_MPAR_2(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_2)
+#define F_PERR_ENABLE_PCMD_MPAR_2 V_PERR_ENABLE_PCMD_MPAR_2(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_2 16
+#define V_PERR_ENABLE_PCMD_MPAC_2(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_2)
+#define F_PERR_ENABLE_PCMD_MPAC_2 V_PERR_ENABLE_PCMD_MPAC_2(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_1 14
+#define V_PERR_ENABLE_PCMD_SFIFO_1(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_1)
+#define F_PERR_ENABLE_PCMD_SFIFO_1 V_PERR_ENABLE_PCMD_SFIFO_1(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_1 13
+#define V_PERR_ENABLE_PCMD_FIFO_1(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_1)
+#define F_PERR_ENABLE_PCMD_FIFO_1 V_PERR_ENABLE_PCMD_FIFO_1(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_1 12
+#define V_PERR_ENABLE_PCMD_DDP_HINT_1(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_1)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_1 V_PERR_ENABLE_PCMD_DDP_HINT_1(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_1 11
+#define V_PERR_ENABLE_PCMD_TPT_1(x) ((x) << S_PERR_ENABLE_PCMD_TPT_1)
+#define F_PERR_ENABLE_PCMD_TPT_1 V_PERR_ENABLE_PCMD_TPT_1(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_1 10
+#define V_PERR_ENABLE_PCMD_DDP_1(x) ((x) << S_PERR_ENABLE_PCMD_DDP_1)
+#define F_PERR_ENABLE_PCMD_DDP_1 V_PERR_ENABLE_PCMD_DDP_1(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_1 9
+#define V_PERR_ENABLE_PCMD_MPAR_1(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_1)
+#define F_PERR_ENABLE_PCMD_MPAR_1 V_PERR_ENABLE_PCMD_MPAR_1(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_1 8
+#define V_PERR_ENABLE_PCMD_MPAC_1(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_1)
+#define F_PERR_ENABLE_PCMD_MPAC_1 V_PERR_ENABLE_PCMD_MPAC_1(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_0 6
+#define V_PERR_ENABLE_PCMD_SFIFO_0(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_0)
+#define F_PERR_ENABLE_PCMD_SFIFO_0 V_PERR_ENABLE_PCMD_SFIFO_0(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_0 5
+#define V_PERR_ENABLE_PCMD_FIFO_0(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_0)
+#define F_PERR_ENABLE_PCMD_FIFO_0 V_PERR_ENABLE_PCMD_FIFO_0(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_0 4
+#define V_PERR_ENABLE_PCMD_DDP_HINT_0(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_0)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_0 V_PERR_ENABLE_PCMD_DDP_HINT_0(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_0 3
+#define V_PERR_ENABLE_PCMD_TPT_0(x) ((x) << S_PERR_ENABLE_PCMD_TPT_0)
+#define F_PERR_ENABLE_PCMD_TPT_0 V_PERR_ENABLE_PCMD_TPT_0(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_0 2
+#define V_PERR_ENABLE_PCMD_DDP_0(x) ((x) << S_PERR_ENABLE_PCMD_DDP_0)
+#define F_PERR_ENABLE_PCMD_DDP_0 V_PERR_ENABLE_PCMD_DDP_0(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_0 1
+#define V_PERR_ENABLE_PCMD_MPAR_0(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_0)
+#define F_PERR_ENABLE_PCMD_MPAR_0 V_PERR_ENABLE_PCMD_MPAR_0(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_0 0
+#define V_PERR_ENABLE_PCMD_MPAC_0(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_0)
+#define F_PERR_ENABLE_PCMD_MPAC_0 V_PERR_ENABLE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_INT_ENABLE_DATA 0x19310
+
+#define S_ENABLE_DATA_SNOOP_3 29
+#define V_ENABLE_DATA_SNOOP_3(x) ((x) << S_ENABLE_DATA_SNOOP_3)
+#define F_ENABLE_DATA_SNOOP_3 V_ENABLE_DATA_SNOOP_3(1U)
+
+#define S_ENABLE_DATA_SFIFO_3 28
+#define V_ENABLE_DATA_SFIFO_3(x) ((x) << S_ENABLE_DATA_SFIFO_3)
+#define F_ENABLE_DATA_SFIFO_3 V_ENABLE_DATA_SFIFO_3(1U)
+
+#define S_ENABLE_DATA_FIFO_3 27
+#define V_ENABLE_DATA_FIFO_3(x) ((x) << S_ENABLE_DATA_FIFO_3)
+#define F_ENABLE_DATA_FIFO_3 V_ENABLE_DATA_FIFO_3(1U)
+
+#define S_ENABLE_DATA_DDP_3 26
+#define V_ENABLE_DATA_DDP_3(x) ((x) << S_ENABLE_DATA_DDP_3)
+#define F_ENABLE_DATA_DDP_3 V_ENABLE_DATA_DDP_3(1U)
+
+#define S_ENABLE_DATA_CTX_3 25
+#define V_ENABLE_DATA_CTX_3(x) ((x) << S_ENABLE_DATA_CTX_3)
+#define F_ENABLE_DATA_CTX_3 V_ENABLE_DATA_CTX_3(1U)
+
+#define S_ENABLE_DATA_PARSER_3 24
+#define V_ENABLE_DATA_PARSER_3(x) ((x) << S_ENABLE_DATA_PARSER_3)
+#define F_ENABLE_DATA_PARSER_3 V_ENABLE_DATA_PARSER_3(1U)
+
+#define S_ENABLE_DATA_SNOOP_2 21
+#define V_ENABLE_DATA_SNOOP_2(x) ((x) << S_ENABLE_DATA_SNOOP_2)
+#define F_ENABLE_DATA_SNOOP_2 V_ENABLE_DATA_SNOOP_2(1U)
+
+#define S_ENABLE_DATA_SFIFO_2 20
+#define V_ENABLE_DATA_SFIFO_2(x) ((x) << S_ENABLE_DATA_SFIFO_2)
+#define F_ENABLE_DATA_SFIFO_2 V_ENABLE_DATA_SFIFO_2(1U)
+
+#define S_ENABLE_DATA_FIFO_2 19
+#define V_ENABLE_DATA_FIFO_2(x) ((x) << S_ENABLE_DATA_FIFO_2)
+#define F_ENABLE_DATA_FIFO_2 V_ENABLE_DATA_FIFO_2(1U)
+
+#define S_ENABLE_DATA_DDP_2 18
+#define V_ENABLE_DATA_DDP_2(x) ((x) << S_ENABLE_DATA_DDP_2)
+#define F_ENABLE_DATA_DDP_2 V_ENABLE_DATA_DDP_2(1U)
+
+#define S_ENABLE_DATA_CTX_2 17
+#define V_ENABLE_DATA_CTX_2(x) ((x) << S_ENABLE_DATA_CTX_2)
+#define F_ENABLE_DATA_CTX_2 V_ENABLE_DATA_CTX_2(1U)
+
+#define S_ENABLE_DATA_PARSER_2 16
+#define V_ENABLE_DATA_PARSER_2(x) ((x) << S_ENABLE_DATA_PARSER_2)
+#define F_ENABLE_DATA_PARSER_2 V_ENABLE_DATA_PARSER_2(1U)
+
+#define S_ENABLE_DATA_SNOOP_1 13
+#define V_ENABLE_DATA_SNOOP_1(x) ((x) << S_ENABLE_DATA_SNOOP_1)
+#define F_ENABLE_DATA_SNOOP_1 V_ENABLE_DATA_SNOOP_1(1U)
+
+#define S_ENABLE_DATA_SFIFO_1 12
+#define V_ENABLE_DATA_SFIFO_1(x) ((x) << S_ENABLE_DATA_SFIFO_1)
+#define F_ENABLE_DATA_SFIFO_1 V_ENABLE_DATA_SFIFO_1(1U)
+
+#define S_ENABLE_DATA_FIFO_1 11
+#define V_ENABLE_DATA_FIFO_1(x) ((x) << S_ENABLE_DATA_FIFO_1)
+#define F_ENABLE_DATA_FIFO_1 V_ENABLE_DATA_FIFO_1(1U)
+
+#define S_ENABLE_DATA_DDP_1 10
+#define V_ENABLE_DATA_DDP_1(x) ((x) << S_ENABLE_DATA_DDP_1)
+#define F_ENABLE_DATA_DDP_1 V_ENABLE_DATA_DDP_1(1U)
+
+#define S_ENABLE_DATA_CTX_1 9
+#define V_ENABLE_DATA_CTX_1(x) ((x) << S_ENABLE_DATA_CTX_1)
+#define F_ENABLE_DATA_CTX_1 V_ENABLE_DATA_CTX_1(1U)
+
+#define S_ENABLE_DATA_PARSER_1 8
+#define V_ENABLE_DATA_PARSER_1(x) ((x) << S_ENABLE_DATA_PARSER_1)
+#define F_ENABLE_DATA_PARSER_1 V_ENABLE_DATA_PARSER_1(1U)
+
+#define S_ENABLE_DATA_SNOOP_0 5
+#define V_ENABLE_DATA_SNOOP_0(x) ((x) << S_ENABLE_DATA_SNOOP_0)
+#define F_ENABLE_DATA_SNOOP_0 V_ENABLE_DATA_SNOOP_0(1U)
+
+#define S_ENABLE_DATA_SFIFO_0 4
+#define V_ENABLE_DATA_SFIFO_0(x) ((x) << S_ENABLE_DATA_SFIFO_0)
+#define F_ENABLE_DATA_SFIFO_0 V_ENABLE_DATA_SFIFO_0(1U)
+
+#define S_ENABLE_DATA_FIFO_0 3
+#define V_ENABLE_DATA_FIFO_0(x) ((x) << S_ENABLE_DATA_FIFO_0)
+#define F_ENABLE_DATA_FIFO_0 V_ENABLE_DATA_FIFO_0(1U)
+
+#define S_ENABLE_DATA_DDP_0 2
+#define V_ENABLE_DATA_DDP_0(x) ((x) << S_ENABLE_DATA_DDP_0)
+#define F_ENABLE_DATA_DDP_0 V_ENABLE_DATA_DDP_0(1U)
+
+#define S_ENABLE_DATA_CTX_0 1
+#define V_ENABLE_DATA_CTX_0(x) ((x) << S_ENABLE_DATA_CTX_0)
+#define F_ENABLE_DATA_CTX_0 V_ENABLE_DATA_CTX_0(1U)
+
+#define S_ENABLE_DATA_PARSER_0 0
+#define V_ENABLE_DATA_PARSER_0(x) ((x) << S_ENABLE_DATA_PARSER_0)
+#define F_ENABLE_DATA_PARSER_0 V_ENABLE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_DATA 0x19314
+
+#define S_CAUSE_DATA_SNOOP_3 29
+#define V_CAUSE_DATA_SNOOP_3(x) ((x) << S_CAUSE_DATA_SNOOP_3)
+#define F_CAUSE_DATA_SNOOP_3 V_CAUSE_DATA_SNOOP_3(1U)
+
+#define S_CAUSE_DATA_SFIFO_3 28
+#define V_CAUSE_DATA_SFIFO_3(x) ((x) << S_CAUSE_DATA_SFIFO_3)
+#define F_CAUSE_DATA_SFIFO_3 V_CAUSE_DATA_SFIFO_3(1U)
+
+#define S_CAUSE_DATA_FIFO_3 27
+#define V_CAUSE_DATA_FIFO_3(x) ((x) << S_CAUSE_DATA_FIFO_3)
+#define F_CAUSE_DATA_FIFO_3 V_CAUSE_DATA_FIFO_3(1U)
+
+#define S_CAUSE_DATA_DDP_3 26
+#define V_CAUSE_DATA_DDP_3(x) ((x) << S_CAUSE_DATA_DDP_3)
+#define F_CAUSE_DATA_DDP_3 V_CAUSE_DATA_DDP_3(1U)
+
+#define S_CAUSE_DATA_CTX_3 25
+#define V_CAUSE_DATA_CTX_3(x) ((x) << S_CAUSE_DATA_CTX_3)
+#define F_CAUSE_DATA_CTX_3 V_CAUSE_DATA_CTX_3(1U)
+
+#define S_CAUSE_DATA_PARSER_3 24
+#define V_CAUSE_DATA_PARSER_3(x) ((x) << S_CAUSE_DATA_PARSER_3)
+#define F_CAUSE_DATA_PARSER_3 V_CAUSE_DATA_PARSER_3(1U)
+
+#define S_CAUSE_DATA_SNOOP_2 21
+#define V_CAUSE_DATA_SNOOP_2(x) ((x) << S_CAUSE_DATA_SNOOP_2)
+#define F_CAUSE_DATA_SNOOP_2 V_CAUSE_DATA_SNOOP_2(1U)
+
+#define S_CAUSE_DATA_SFIFO_2 20
+#define V_CAUSE_DATA_SFIFO_2(x) ((x) << S_CAUSE_DATA_SFIFO_2)
+#define F_CAUSE_DATA_SFIFO_2 V_CAUSE_DATA_SFIFO_2(1U)
+
+#define S_CAUSE_DATA_FIFO_2 19
+#define V_CAUSE_DATA_FIFO_2(x) ((x) << S_CAUSE_DATA_FIFO_2)
+#define F_CAUSE_DATA_FIFO_2 V_CAUSE_DATA_FIFO_2(1U)
+
+#define S_CAUSE_DATA_DDP_2 18
+#define V_CAUSE_DATA_DDP_2(x) ((x) << S_CAUSE_DATA_DDP_2)
+#define F_CAUSE_DATA_DDP_2 V_CAUSE_DATA_DDP_2(1U)
+
+#define S_CAUSE_DATA_CTX_2 17
+#define V_CAUSE_DATA_CTX_2(x) ((x) << S_CAUSE_DATA_CTX_2)
+#define F_CAUSE_DATA_CTX_2 V_CAUSE_DATA_CTX_2(1U)
+
+#define S_CAUSE_DATA_PARSER_2 16
+#define V_CAUSE_DATA_PARSER_2(x) ((x) << S_CAUSE_DATA_PARSER_2)
+#define F_CAUSE_DATA_PARSER_2 V_CAUSE_DATA_PARSER_2(1U)
+
+#define S_CAUSE_DATA_SNOOP_1 13
+#define V_CAUSE_DATA_SNOOP_1(x) ((x) << S_CAUSE_DATA_SNOOP_1)
+#define F_CAUSE_DATA_SNOOP_1 V_CAUSE_DATA_SNOOP_1(1U)
+
+#define S_CAUSE_DATA_SFIFO_1 12
+#define V_CAUSE_DATA_SFIFO_1(x) ((x) << S_CAUSE_DATA_SFIFO_1)
+#define F_CAUSE_DATA_SFIFO_1 V_CAUSE_DATA_SFIFO_1(1U)
+
+#define S_CAUSE_DATA_FIFO_1 11
+#define V_CAUSE_DATA_FIFO_1(x) ((x) << S_CAUSE_DATA_FIFO_1)
+#define F_CAUSE_DATA_FIFO_1 V_CAUSE_DATA_FIFO_1(1U)
+
+#define S_CAUSE_DATA_DDP_1 10
+#define V_CAUSE_DATA_DDP_1(x) ((x) << S_CAUSE_DATA_DDP_1)
+#define F_CAUSE_DATA_DDP_1 V_CAUSE_DATA_DDP_1(1U)
+
+#define S_CAUSE_DATA_CTX_1 9
+#define V_CAUSE_DATA_CTX_1(x) ((x) << S_CAUSE_DATA_CTX_1)
+#define F_CAUSE_DATA_CTX_1 V_CAUSE_DATA_CTX_1(1U)
+
+#define S_CAUSE_DATA_PARSER_1 8
+#define V_CAUSE_DATA_PARSER_1(x) ((x) << S_CAUSE_DATA_PARSER_1)
+#define F_CAUSE_DATA_PARSER_1 V_CAUSE_DATA_PARSER_1(1U)
+
+#define S_CAUSE_DATA_SNOOP_0 5
+#define V_CAUSE_DATA_SNOOP_0(x) ((x) << S_CAUSE_DATA_SNOOP_0)
+#define F_CAUSE_DATA_SNOOP_0 V_CAUSE_DATA_SNOOP_0(1U)
+
+#define S_CAUSE_DATA_SFIFO_0 4
+#define V_CAUSE_DATA_SFIFO_0(x) ((x) << S_CAUSE_DATA_SFIFO_0)
+#define F_CAUSE_DATA_SFIFO_0 V_CAUSE_DATA_SFIFO_0(1U)
+
+#define S_CAUSE_DATA_FIFO_0 3
+#define V_CAUSE_DATA_FIFO_0(x) ((x) << S_CAUSE_DATA_FIFO_0)
+#define F_CAUSE_DATA_FIFO_0 V_CAUSE_DATA_FIFO_0(1U)
+
+#define S_CAUSE_DATA_DDP_0 2
+#define V_CAUSE_DATA_DDP_0(x) ((x) << S_CAUSE_DATA_DDP_0)
+#define F_CAUSE_DATA_DDP_0 V_CAUSE_DATA_DDP_0(1U)
+
+#define S_CAUSE_DATA_CTX_0 1
+#define V_CAUSE_DATA_CTX_0(x) ((x) << S_CAUSE_DATA_CTX_0)
+#define F_CAUSE_DATA_CTX_0 V_CAUSE_DATA_CTX_0(1U)
+
+#define S_CAUSE_DATA_PARSER_0 0
+#define V_CAUSE_DATA_PARSER_0(x) ((x) << S_CAUSE_DATA_PARSER_0)
+#define F_CAUSE_DATA_PARSER_0 V_CAUSE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_DATA 0x19318
+
+#define S_PERR_ENABLE_DATA_SNOOP_3 29
+#define V_PERR_ENABLE_DATA_SNOOP_3(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_3)
+#define F_PERR_ENABLE_DATA_SNOOP_3 V_PERR_ENABLE_DATA_SNOOP_3(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_3 28
+#define V_PERR_ENABLE_DATA_SFIFO_3(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_3)
+#define F_PERR_ENABLE_DATA_SFIFO_3 V_PERR_ENABLE_DATA_SFIFO_3(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_3 27
+#define V_PERR_ENABLE_DATA_FIFO_3(x) ((x) << S_PERR_ENABLE_DATA_FIFO_3)
+#define F_PERR_ENABLE_DATA_FIFO_3 V_PERR_ENABLE_DATA_FIFO_3(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_3 26
+#define V_PERR_ENABLE_DATA_DDP_3(x) ((x) << S_PERR_ENABLE_DATA_DDP_3)
+#define F_PERR_ENABLE_DATA_DDP_3 V_PERR_ENABLE_DATA_DDP_3(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_3 25
+#define V_PERR_ENABLE_DATA_CTX_3(x) ((x) << S_PERR_ENABLE_DATA_CTX_3)
+#define F_PERR_ENABLE_DATA_CTX_3 V_PERR_ENABLE_DATA_CTX_3(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_3 24
+#define V_PERR_ENABLE_DATA_PARSER_3(x) ((x) << S_PERR_ENABLE_DATA_PARSER_3)
+#define F_PERR_ENABLE_DATA_PARSER_3 V_PERR_ENABLE_DATA_PARSER_3(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_2 21
+#define V_PERR_ENABLE_DATA_SNOOP_2(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_2)
+#define F_PERR_ENABLE_DATA_SNOOP_2 V_PERR_ENABLE_DATA_SNOOP_2(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_2 20
+#define V_PERR_ENABLE_DATA_SFIFO_2(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_2)
+#define F_PERR_ENABLE_DATA_SFIFO_2 V_PERR_ENABLE_DATA_SFIFO_2(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_2 19
+#define V_PERR_ENABLE_DATA_FIFO_2(x) ((x) << S_PERR_ENABLE_DATA_FIFO_2)
+#define F_PERR_ENABLE_DATA_FIFO_2 V_PERR_ENABLE_DATA_FIFO_2(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_2 18
+#define V_PERR_ENABLE_DATA_DDP_2(x) ((x) << S_PERR_ENABLE_DATA_DDP_2)
+#define F_PERR_ENABLE_DATA_DDP_2 V_PERR_ENABLE_DATA_DDP_2(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_2 17
+#define V_PERR_ENABLE_DATA_CTX_2(x) ((x) << S_PERR_ENABLE_DATA_CTX_2)
+#define F_PERR_ENABLE_DATA_CTX_2 V_PERR_ENABLE_DATA_CTX_2(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_2 16
+#define V_PERR_ENABLE_DATA_PARSER_2(x) ((x) << S_PERR_ENABLE_DATA_PARSER_2)
+#define F_PERR_ENABLE_DATA_PARSER_2 V_PERR_ENABLE_DATA_PARSER_2(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_1 13
+#define V_PERR_ENABLE_DATA_SNOOP_1(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_1)
+#define F_PERR_ENABLE_DATA_SNOOP_1 V_PERR_ENABLE_DATA_SNOOP_1(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_1 12
+#define V_PERR_ENABLE_DATA_SFIFO_1(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_1)
+#define F_PERR_ENABLE_DATA_SFIFO_1 V_PERR_ENABLE_DATA_SFIFO_1(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_1 11
+#define V_PERR_ENABLE_DATA_FIFO_1(x) ((x) << S_PERR_ENABLE_DATA_FIFO_1)
+#define F_PERR_ENABLE_DATA_FIFO_1 V_PERR_ENABLE_DATA_FIFO_1(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_1 10
+#define V_PERR_ENABLE_DATA_DDP_1(x) ((x) << S_PERR_ENABLE_DATA_DDP_1)
+#define F_PERR_ENABLE_DATA_DDP_1 V_PERR_ENABLE_DATA_DDP_1(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_1 9
+#define V_PERR_ENABLE_DATA_CTX_1(x) ((x) << S_PERR_ENABLE_DATA_CTX_1)
+#define F_PERR_ENABLE_DATA_CTX_1 V_PERR_ENABLE_DATA_CTX_1(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_1 8
+#define V_PERR_ENABLE_DATA_PARSER_1(x) ((x) << S_PERR_ENABLE_DATA_PARSER_1)
+#define F_PERR_ENABLE_DATA_PARSER_1 V_PERR_ENABLE_DATA_PARSER_1(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_0 5
+#define V_PERR_ENABLE_DATA_SNOOP_0(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_0)
+#define F_PERR_ENABLE_DATA_SNOOP_0 V_PERR_ENABLE_DATA_SNOOP_0(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_0 4
+#define V_PERR_ENABLE_DATA_SFIFO_0(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_0)
+#define F_PERR_ENABLE_DATA_SFIFO_0 V_PERR_ENABLE_DATA_SFIFO_0(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_0 3
+#define V_PERR_ENABLE_DATA_FIFO_0(x) ((x) << S_PERR_ENABLE_DATA_FIFO_0)
+#define F_PERR_ENABLE_DATA_FIFO_0 V_PERR_ENABLE_DATA_FIFO_0(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_0 2
+#define V_PERR_ENABLE_DATA_DDP_0(x) ((x) << S_PERR_ENABLE_DATA_DDP_0)
+#define F_PERR_ENABLE_DATA_DDP_0 V_PERR_ENABLE_DATA_DDP_0(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_0 1
+#define V_PERR_ENABLE_DATA_CTX_0(x) ((x) << S_PERR_ENABLE_DATA_CTX_0)
+#define F_PERR_ENABLE_DATA_CTX_0 V_PERR_ENABLE_DATA_CTX_0(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_0 0
+#define V_PERR_ENABLE_DATA_PARSER_0(x) ((x) << S_PERR_ENABLE_DATA_PARSER_0)
+#define F_PERR_ENABLE_DATA_PARSER_0 V_PERR_ENABLE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_INT_ENABLE_ARB 0x19320
+
+#define S_ENABLE_ARB_PBL_PF_3 27
+#define V_ENABLE_ARB_PBL_PF_3(x) ((x) << S_ENABLE_ARB_PBL_PF_3)
+#define F_ENABLE_ARB_PBL_PF_3 V_ENABLE_ARB_PBL_PF_3(1U)
+
+#define S_ENABLE_ARB_PF_3 26
+#define V_ENABLE_ARB_PF_3(x) ((x) << S_ENABLE_ARB_PF_3)
+#define F_ENABLE_ARB_PF_3 V_ENABLE_ARB_PF_3(1U)
+
+#define S_ENABLE_ARB_TPT_PF_3 25
+#define V_ENABLE_ARB_TPT_PF_3(x) ((x) << S_ENABLE_ARB_TPT_PF_3)
+#define F_ENABLE_ARB_TPT_PF_3 V_ENABLE_ARB_TPT_PF_3(1U)
+
+#define S_ENABLE_ARB_F_3 24
+#define V_ENABLE_ARB_F_3(x) ((x) << S_ENABLE_ARB_F_3)
+#define F_ENABLE_ARB_F_3 V_ENABLE_ARB_F_3(1U)
+
+#define S_ENABLE_ARB_PBL_PF_2 19
+#define V_ENABLE_ARB_PBL_PF_2(x) ((x) << S_ENABLE_ARB_PBL_PF_2)
+#define F_ENABLE_ARB_PBL_PF_2 V_ENABLE_ARB_PBL_PF_2(1U)
+
+#define S_ENABLE_ARB_PF_2 18
+#define V_ENABLE_ARB_PF_2(x) ((x) << S_ENABLE_ARB_PF_2)
+#define F_ENABLE_ARB_PF_2 V_ENABLE_ARB_PF_2(1U)
+
+#define S_ENABLE_ARB_TPT_PF_2 17
+#define V_ENABLE_ARB_TPT_PF_2(x) ((x) << S_ENABLE_ARB_TPT_PF_2)
+#define F_ENABLE_ARB_TPT_PF_2 V_ENABLE_ARB_TPT_PF_2(1U)
+
+#define S_ENABLE_ARB_F_2 16
+#define V_ENABLE_ARB_F_2(x) ((x) << S_ENABLE_ARB_F_2)
+#define F_ENABLE_ARB_F_2 V_ENABLE_ARB_F_2(1U)
+
+#define S_ENABLE_ARB_PBL_PF_1 11
+#define V_ENABLE_ARB_PBL_PF_1(x) ((x) << S_ENABLE_ARB_PBL_PF_1)
+#define F_ENABLE_ARB_PBL_PF_1 V_ENABLE_ARB_PBL_PF_1(1U)
+
+#define S_ENABLE_ARB_PF_1 10
+#define V_ENABLE_ARB_PF_1(x) ((x) << S_ENABLE_ARB_PF_1)
+#define F_ENABLE_ARB_PF_1 V_ENABLE_ARB_PF_1(1U)
+
+#define S_ENABLE_ARB_TPT_PF_1 9
+#define V_ENABLE_ARB_TPT_PF_1(x) ((x) << S_ENABLE_ARB_TPT_PF_1)
+#define F_ENABLE_ARB_TPT_PF_1 V_ENABLE_ARB_TPT_PF_1(1U)
+
+#define S_ENABLE_ARB_F_1 8
+#define V_ENABLE_ARB_F_1(x) ((x) << S_ENABLE_ARB_F_1)
+#define F_ENABLE_ARB_F_1 V_ENABLE_ARB_F_1(1U)
+
+#define S_ENABLE_ARB_PBL_PF_0 3
+#define V_ENABLE_ARB_PBL_PF_0(x) ((x) << S_ENABLE_ARB_PBL_PF_0)
+#define F_ENABLE_ARB_PBL_PF_0 V_ENABLE_ARB_PBL_PF_0(1U)
+
+#define S_ENABLE_ARB_PF_0 2
+#define V_ENABLE_ARB_PF_0(x) ((x) << S_ENABLE_ARB_PF_0)
+#define F_ENABLE_ARB_PF_0 V_ENABLE_ARB_PF_0(1U)
+
+#define S_ENABLE_ARB_TPT_PF_0 1
+#define V_ENABLE_ARB_TPT_PF_0(x) ((x) << S_ENABLE_ARB_TPT_PF_0)
+#define F_ENABLE_ARB_TPT_PF_0 V_ENABLE_ARB_TPT_PF_0(1U)
+
+#define S_ENABLE_ARB_F_0 0
+#define V_ENABLE_ARB_F_0(x) ((x) << S_ENABLE_ARB_F_0)
+#define F_ENABLE_ARB_F_0 V_ENABLE_ARB_F_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_ARB 0x19324
+
+#define S_CAUSE_ARB_PBL_PF_3 27
+#define V_CAUSE_ARB_PBL_PF_3(x) ((x) << S_CAUSE_ARB_PBL_PF_3)
+#define F_CAUSE_ARB_PBL_PF_3 V_CAUSE_ARB_PBL_PF_3(1U)
+
+#define S_CAUSE_ARB_PF_3 26
+#define V_CAUSE_ARB_PF_3(x) ((x) << S_CAUSE_ARB_PF_3)
+#define F_CAUSE_ARB_PF_3 V_CAUSE_ARB_PF_3(1U)
+
+#define S_CAUSE_ARB_TPT_PF_3 25
+#define V_CAUSE_ARB_TPT_PF_3(x) ((x) << S_CAUSE_ARB_TPT_PF_3)
+#define F_CAUSE_ARB_TPT_PF_3 V_CAUSE_ARB_TPT_PF_3(1U)
+
+#define S_CAUSE_ARB_F_3 24
+#define V_CAUSE_ARB_F_3(x) ((x) << S_CAUSE_ARB_F_3)
+#define F_CAUSE_ARB_F_3 V_CAUSE_ARB_F_3(1U)
+
+#define S_CAUSE_ARB_PBL_PF_2 19
+#define V_CAUSE_ARB_PBL_PF_2(x) ((x) << S_CAUSE_ARB_PBL_PF_2)
+#define F_CAUSE_ARB_PBL_PF_2 V_CAUSE_ARB_PBL_PF_2(1U)
+
+#define S_CAUSE_ARB_PF_2 18
+#define V_CAUSE_ARB_PF_2(x) ((x) << S_CAUSE_ARB_PF_2)
+#define F_CAUSE_ARB_PF_2 V_CAUSE_ARB_PF_2(1U)
+
+#define S_CAUSE_ARB_TPT_PF_2 17
+#define V_CAUSE_ARB_TPT_PF_2(x) ((x) << S_CAUSE_ARB_TPT_PF_2)
+#define F_CAUSE_ARB_TPT_PF_2 V_CAUSE_ARB_TPT_PF_2(1U)
+
+#define S_CAUSE_ARB_F_2 16
+#define V_CAUSE_ARB_F_2(x) ((x) << S_CAUSE_ARB_F_2)
+#define F_CAUSE_ARB_F_2 V_CAUSE_ARB_F_2(1U)
+
+#define S_CAUSE_ARB_PBL_PF_1 11
+#define V_CAUSE_ARB_PBL_PF_1(x) ((x) << S_CAUSE_ARB_PBL_PF_1)
+#define F_CAUSE_ARB_PBL_PF_1 V_CAUSE_ARB_PBL_PF_1(1U)
+
+#define S_CAUSE_ARB_PF_1 10
+#define V_CAUSE_ARB_PF_1(x) ((x) << S_CAUSE_ARB_PF_1)
+#define F_CAUSE_ARB_PF_1 V_CAUSE_ARB_PF_1(1U)
+
+#define S_CAUSE_ARB_TPT_PF_1 9
+#define V_CAUSE_ARB_TPT_PF_1(x) ((x) << S_CAUSE_ARB_TPT_PF_1)
+#define F_CAUSE_ARB_TPT_PF_1 V_CAUSE_ARB_TPT_PF_1(1U)
+
+#define S_CAUSE_ARB_F_1 8
+#define V_CAUSE_ARB_F_1(x) ((x) << S_CAUSE_ARB_F_1)
+#define F_CAUSE_ARB_F_1 V_CAUSE_ARB_F_1(1U)
+
+#define S_CAUSE_ARB_PBL_PF_0 3
+#define V_CAUSE_ARB_PBL_PF_0(x) ((x) << S_CAUSE_ARB_PBL_PF_0)
+#define F_CAUSE_ARB_PBL_PF_0 V_CAUSE_ARB_PBL_PF_0(1U)
+
+#define S_CAUSE_ARB_PF_0 2
+#define V_CAUSE_ARB_PF_0(x) ((x) << S_CAUSE_ARB_PF_0)
+#define F_CAUSE_ARB_PF_0 V_CAUSE_ARB_PF_0(1U)
+
+#define S_CAUSE_ARB_TPT_PF_0 1
+#define V_CAUSE_ARB_TPT_PF_0(x) ((x) << S_CAUSE_ARB_TPT_PF_0)
+#define F_CAUSE_ARB_TPT_PF_0 V_CAUSE_ARB_TPT_PF_0(1U)
+
+#define S_CAUSE_ARB_F_0 0
+#define V_CAUSE_ARB_F_0(x) ((x) << S_CAUSE_ARB_F_0)
+#define F_CAUSE_ARB_F_0 V_CAUSE_ARB_F_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_ARB 0x19328
+
+#define S_PERR_ENABLE_ARB_PBL_PF_3 27
+#define V_PERR_ENABLE_ARB_PBL_PF_3(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_3)
+#define F_PERR_ENABLE_ARB_PBL_PF_3 V_PERR_ENABLE_ARB_PBL_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_PF_3 26
+#define V_PERR_ENABLE_ARB_PF_3(x) ((x) << S_PERR_ENABLE_ARB_PF_3)
+#define F_PERR_ENABLE_ARB_PF_3 V_PERR_ENABLE_ARB_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_3 25
+#define V_PERR_ENABLE_ARB_TPT_PF_3(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_3)
+#define F_PERR_ENABLE_ARB_TPT_PF_3 V_PERR_ENABLE_ARB_TPT_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_F_3 24
+#define V_PERR_ENABLE_ARB_F_3(x) ((x) << S_PERR_ENABLE_ARB_F_3)
+#define F_PERR_ENABLE_ARB_F_3 V_PERR_ENABLE_ARB_F_3(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_2 19
+#define V_PERR_ENABLE_ARB_PBL_PF_2(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_2)
+#define F_PERR_ENABLE_ARB_PBL_PF_2 V_PERR_ENABLE_ARB_PBL_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_PF_2 18
+#define V_PERR_ENABLE_ARB_PF_2(x) ((x) << S_PERR_ENABLE_ARB_PF_2)
+#define F_PERR_ENABLE_ARB_PF_2 V_PERR_ENABLE_ARB_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_2 17
+#define V_PERR_ENABLE_ARB_TPT_PF_2(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_2)
+#define F_PERR_ENABLE_ARB_TPT_PF_2 V_PERR_ENABLE_ARB_TPT_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_F_2 16
+#define V_PERR_ENABLE_ARB_F_2(x) ((x) << S_PERR_ENABLE_ARB_F_2)
+#define F_PERR_ENABLE_ARB_F_2 V_PERR_ENABLE_ARB_F_2(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_1 11
+#define V_PERR_ENABLE_ARB_PBL_PF_1(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_1)
+#define F_PERR_ENABLE_ARB_PBL_PF_1 V_PERR_ENABLE_ARB_PBL_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_PF_1 10
+#define V_PERR_ENABLE_ARB_PF_1(x) ((x) << S_PERR_ENABLE_ARB_PF_1)
+#define F_PERR_ENABLE_ARB_PF_1 V_PERR_ENABLE_ARB_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_1 9
+#define V_PERR_ENABLE_ARB_TPT_PF_1(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_1)
+#define F_PERR_ENABLE_ARB_TPT_PF_1 V_PERR_ENABLE_ARB_TPT_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_F_1 8
+#define V_PERR_ENABLE_ARB_F_1(x) ((x) << S_PERR_ENABLE_ARB_F_1)
+#define F_PERR_ENABLE_ARB_F_1 V_PERR_ENABLE_ARB_F_1(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_0 3
+#define V_PERR_ENABLE_ARB_PBL_PF_0(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_0)
+#define F_PERR_ENABLE_ARB_PBL_PF_0 V_PERR_ENABLE_ARB_PBL_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_PF_0 2
+#define V_PERR_ENABLE_ARB_PF_0(x) ((x) << S_PERR_ENABLE_ARB_PF_0)
+#define F_PERR_ENABLE_ARB_PF_0 V_PERR_ENABLE_ARB_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_0 1
+#define V_PERR_ENABLE_ARB_TPT_PF_0(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_0)
+#define F_PERR_ENABLE_ARB_TPT_PF_0 V_PERR_ENABLE_ARB_TPT_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_F_0 0
+#define V_PERR_ENABLE_ARB_F_0(x) ((x) << S_PERR_ENABLE_ARB_F_0)
+#define F_PERR_ENABLE_ARB_F_0 V_PERR_ENABLE_ARB_F_0(1U)
+
+#define A_ULP_RX_CTL1 0x19330
+
+#define S_ISCSI_CTL2 27
+#define V_ISCSI_CTL2(x) ((x) << S_ISCSI_CTL2)
+#define F_ISCSI_CTL2 V_ISCSI_CTL2(1U)
+
+#define S_ISCSI_CTL1 26
+#define V_ISCSI_CTL1(x) ((x) << S_ISCSI_CTL1)
+#define F_ISCSI_CTL1 V_ISCSI_CTL1(1U)
+
+#define S_ISCSI_CTL0 25
+#define V_ISCSI_CTL0(x) ((x) << S_ISCSI_CTL0)
+#define F_ISCSI_CTL0 V_ISCSI_CTL0(1U)
+
+#define S_NVME_TCP_DATA_ALIGNMENT 16
+#define M_NVME_TCP_DATA_ALIGNMENT 0x1ffU
+#define V_NVME_TCP_DATA_ALIGNMENT(x) ((x) << S_NVME_TCP_DATA_ALIGNMENT)
+#define G_NVME_TCP_DATA_ALIGNMENT(x) (((x) >> S_NVME_TCP_DATA_ALIGNMENT) & M_NVME_TCP_DATA_ALIGNMENT)
+
+#define S_NVME_TCP_INVLD_MSG_DIS 14
+#define M_NVME_TCP_INVLD_MSG_DIS 0x3U
+#define V_NVME_TCP_INVLD_MSG_DIS(x) ((x) << S_NVME_TCP_INVLD_MSG_DIS)
+#define G_NVME_TCP_INVLD_MSG_DIS(x) (((x) >> S_NVME_TCP_INVLD_MSG_DIS) & M_NVME_TCP_INVLD_MSG_DIS)
+
+#define S_NVME_TCP_DDP_PDU_CHK_TYPE 13
+#define V_NVME_TCP_DDP_PDU_CHK_TYPE(x) ((x) << S_NVME_TCP_DDP_PDU_CHK_TYPE)
+#define F_NVME_TCP_DDP_PDU_CHK_TYPE V_NVME_TCP_DDP_PDU_CHK_TYPE(1U)
+
+#define S_T10_CONFIG_ENB 12
+#define V_T10_CONFIG_ENB(x) ((x) << S_T10_CONFIG_ENB)
+#define F_T10_CONFIG_ENB V_T10_CONFIG_ENB(1U)
+
+#define S_NVME_TCP_COLOUR_ENB 10
+#define M_NVME_TCP_COLOUR_ENB 0x3U
+#define V_NVME_TCP_COLOUR_ENB(x) ((x) << S_NVME_TCP_COLOUR_ENB)
+#define G_NVME_TCP_COLOUR_ENB(x) (((x) >> S_NVME_TCP_COLOUR_ENB) & M_NVME_TCP_COLOUR_ENB)
+
+#define S_ROCE_SEND_RQE 8
+#define V_ROCE_SEND_RQE(x) ((x) << S_ROCE_SEND_RQE)
+#define F_ROCE_SEND_RQE V_ROCE_SEND_RQE(1U)
+
+#define S_RDMA_INVLD_MSG_DIS 6
+#define M_RDMA_INVLD_MSG_DIS 0x3U
+#define V_RDMA_INVLD_MSG_DIS(x) ((x) << S_RDMA_INVLD_MSG_DIS)
+#define G_RDMA_INVLD_MSG_DIS(x) (((x) >> S_RDMA_INVLD_MSG_DIS) & M_RDMA_INVLD_MSG_DIS)
+
+#define S_ROCE_INVLD_MSG_DIS 4
+#define M_ROCE_INVLD_MSG_DIS 0x3U
+#define V_ROCE_INVLD_MSG_DIS(x) ((x) << S_ROCE_INVLD_MSG_DIS)
+#define G_ROCE_INVLD_MSG_DIS(x) (((x) >> S_ROCE_INVLD_MSG_DIS) & M_ROCE_INVLD_MSG_DIS)
+
+#define S_T7_MEM_ADDR_CTRL 2
+#define M_T7_MEM_ADDR_CTRL 0x3U
+#define V_T7_MEM_ADDR_CTRL(x) ((x) << S_T7_MEM_ADDR_CTRL)
+#define G_T7_MEM_ADDR_CTRL(x) (((x) >> S_T7_MEM_ADDR_CTRL) & M_T7_MEM_ADDR_CTRL)
+
+#define S_ENB_32K_PDU 1
+#define V_ENB_32K_PDU(x) ((x) << S_ENB_32K_PDU)
+#define F_ENB_32K_PDU V_ENB_32K_PDU(1U)
+
+#define S_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS 0
+#define V_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS(x) ((x) << S_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS)
+#define F_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS V_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS(1U)
+
#define A_ULP_RX_TLS_IND_CMD 0x19348
#define S_TLS_RX_REG_OFF_ADDR 0
@@ -37795,6 +48839,8 @@
#define G_TLS_RX_REG_OFF_ADDR(x) (((x) >> S_TLS_RX_REG_OFF_ADDR) & M_TLS_RX_REG_OFF_ADDR)
#define A_ULP_RX_TLS_IND_DATA 0x1934c
+#define A_ULP_RX_TLS_CH0_HMACCTRL_CFG 0x20
+#define A_ULP_RX_TLS_CH1_HMACCTRL_CFG 0x60
/* registers for module SF */
#define SF_BASE_ADDR 0x193f8
@@ -37815,6 +48861,39 @@
#define V_BYTECNT(x) ((x) << S_BYTECNT)
#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+#define S_EN32BADDR 30
+#define V_EN32BADDR(x) ((x) << S_EN32BADDR)
+#define F_EN32BADDR V_EN32BADDR(1U)
+
+#define S_NUM_OF_BYTES 1
+#define M_NUM_OF_BYTES 0x3U
+#define V_NUM_OF_BYTES(x) ((x) << S_NUM_OF_BYTES)
+#define G_NUM_OF_BYTES(x) (((x) >> S_NUM_OF_BYTES) & M_NUM_OF_BYTES)
+
+#define S_QUADREADDISABLE 5
+#define V_QUADREADDISABLE(x) ((x) << S_QUADREADDISABLE)
+#define F_QUADREADDISABLE V_QUADREADDISABLE(1U)
+
+#define S_EXIT4B 6
+#define V_EXIT4B(x) ((x) << S_EXIT4B)
+#define F_EXIT4B V_EXIT4B(1U)
+
+#define S_ENTER4B 7
+#define V_ENTER4B(x) ((x) << S_ENTER4B)
+#define F_ENTER4B V_ENTER4B(1U)
+
+#define S_QUADWRENABLE 8
+#define V_QUADWRENABLE(x) ((x) << S_QUADWRENABLE)
+#define F_QUADWRENABLE V_QUADWRENABLE(1U)
+
+#define S_REGDBG_SEL 9
+#define V_REGDBG_SEL(x) ((x) << S_REGDBG_SEL)
+#define F_REGDBG_SEL V_REGDBG_SEL(1U)
+
+#define S_REGDBG_MODE 10
+#define V_REGDBG_MODE(x) ((x) << S_REGDBG_MODE)
+#define F_REGDBG_MODE V_REGDBG_MODE(1U)
+
/* registers for module PL */
#define PL_BASE_ADDR 0x19400
@@ -37892,21 +48971,6 @@
#define F_SWINT V_SWINT(1U)
#define A_PL_WHOAMI 0x19400
-
-#define S_T6_SOURCEPF 9
-#define M_T6_SOURCEPF 0x7U
-#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
-#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
-
-#define S_T6_ISVF 8
-#define V_T6_ISVF(x) ((x) << S_T6_ISVF)
-#define F_T6_ISVF V_T6_ISVF(1U)
-
-#define S_T6_VFID 0
-#define M_T6_VFID 0xffU
-#define V_T6_VFID(x) ((x) << S_T6_VFID)
-#define G_T6_VFID(x) (((x) >> S_T6_VFID) & M_T6_VFID)
-
#define A_PL_PERR_CAUSE 0x19404
#define S_UART 28
@@ -38037,6 +49101,134 @@
#define V_ANYMAC(x) ((x) << S_ANYMAC)
#define F_ANYMAC V_ANYMAC(1U)
+#define S_T7_PL_PERR_CRYPTO_KEY 31
+#define V_T7_PL_PERR_CRYPTO_KEY(x) ((x) << S_T7_PL_PERR_CRYPTO_KEY)
+#define F_T7_PL_PERR_CRYPTO_KEY V_T7_PL_PERR_CRYPTO_KEY(1U)
+
+#define S_T7_PL_PERR_CRYPTO1 30
+#define V_T7_PL_PERR_CRYPTO1(x) ((x) << S_T7_PL_PERR_CRYPTO1)
+#define F_T7_PL_PERR_CRYPTO1 V_T7_PL_PERR_CRYPTO1(1U)
+
+#define S_T7_PL_PERR_CRYPTO0 29
+#define V_T7_PL_PERR_CRYPTO0(x) ((x) << S_T7_PL_PERR_CRYPTO0)
+#define F_T7_PL_PERR_CRYPTO0 V_T7_PL_PERR_CRYPTO0(1U)
+
+#define S_T7_PL_PERR_GCACHE 28
+#define V_T7_PL_PERR_GCACHE(x) ((x) << S_T7_PL_PERR_GCACHE)
+#define F_T7_PL_PERR_GCACHE V_T7_PL_PERR_GCACHE(1U)
+
+#define S_T7_PL_PERR_ARM 27
+#define V_T7_PL_PERR_ARM(x) ((x) << S_T7_PL_PERR_ARM)
+#define F_T7_PL_PERR_ARM V_T7_PL_PERR_ARM(1U)
+
+#define S_T7_PL_PERR_ULP_TX 26
+#define V_T7_PL_PERR_ULP_TX(x) ((x) << S_T7_PL_PERR_ULP_TX)
+#define F_T7_PL_PERR_ULP_TX V_T7_PL_PERR_ULP_TX(1U)
+
+#define S_T7_PL_PERR_SGE 25
+#define V_T7_PL_PERR_SGE(x) ((x) << S_T7_PL_PERR_SGE)
+#define F_T7_PL_PERR_SGE V_T7_PL_PERR_SGE(1U)
+
+#define S_T7_PL_PERR_HMA 24
+#define V_T7_PL_PERR_HMA(x) ((x) << S_T7_PL_PERR_HMA)
+#define F_T7_PL_PERR_HMA V_T7_PL_PERR_HMA(1U)
+
+#define S_T7_PL_PERR_CPL_SWITCH 23
+#define V_T7_PL_PERR_CPL_SWITCH(x) ((x) << S_T7_PL_PERR_CPL_SWITCH)
+#define F_T7_PL_PERR_CPL_SWITCH V_T7_PL_PERR_CPL_SWITCH(1U)
+
+#define S_T7_PL_PERR_ULP_RX 22
+#define V_T7_PL_PERR_ULP_RX(x) ((x) << S_T7_PL_PERR_ULP_RX)
+#define F_T7_PL_PERR_ULP_RX V_T7_PL_PERR_ULP_RX(1U)
+
+#define S_T7_PL_PERR_PM_RX 21
+#define V_T7_PL_PERR_PM_RX(x) ((x) << S_T7_PL_PERR_PM_RX)
+#define F_T7_PL_PERR_PM_RX V_T7_PL_PERR_PM_RX(1U)
+
+#define S_T7_PL_PERR_PM_TX 20
+#define V_T7_PL_PERR_PM_TX(x) ((x) << S_T7_PL_PERR_PM_TX)
+#define F_T7_PL_PERR_PM_TX V_T7_PL_PERR_PM_TX(1U)
+
+#define S_T7_PL_PERR_MA 19
+#define V_T7_PL_PERR_MA(x) ((x) << S_T7_PL_PERR_MA)
+#define F_T7_PL_PERR_MA V_T7_PL_PERR_MA(1U)
+
+#define S_T7_PL_PERR_TP 18
+#define V_T7_PL_PERR_TP(x) ((x) << S_T7_PL_PERR_TP)
+#define F_T7_PL_PERR_TP V_T7_PL_PERR_TP(1U)
+
+#define S_T7_PL_PERR_LE 17
+#define V_T7_PL_PERR_LE(x) ((x) << S_T7_PL_PERR_LE)
+#define F_T7_PL_PERR_LE V_T7_PL_PERR_LE(1U)
+
+#define S_T7_PL_PERR_EDC1 16
+#define V_T7_PL_PERR_EDC1(x) ((x) << S_T7_PL_PERR_EDC1)
+#define F_T7_PL_PERR_EDC1 V_T7_PL_PERR_EDC1(1U)
+
+#define S_T7_PL_PERR_EDC0 15
+#define V_T7_PL_PERR_EDC0(x) ((x) << S_T7_PL_PERR_EDC0)
+#define F_T7_PL_PERR_EDC0 V_T7_PL_PERR_EDC0(1U)
+
+#define S_T7_PL_PERR_MC1 14
+#define V_T7_PL_PERR_MC1(x) ((x) << S_T7_PL_PERR_MC1)
+#define F_T7_PL_PERR_MC1 V_T7_PL_PERR_MC1(1U)
+
+#define S_T7_PL_PERR_MC0 13
+#define V_T7_PL_PERR_MC0(x) ((x) << S_T7_PL_PERR_MC0)
+#define F_T7_PL_PERR_MC0 V_T7_PL_PERR_MC0(1U)
+
+#define S_T7_PL_PERR_PCIE 12
+#define V_T7_PL_PERR_PCIE(x) ((x) << S_T7_PL_PERR_PCIE)
+#define F_T7_PL_PERR_PCIE V_T7_PL_PERR_PCIE(1U)
+
+#define S_T7_PL_PERR_UART 11
+#define V_T7_PL_PERR_UART(x) ((x) << S_T7_PL_PERR_UART)
+#define F_T7_PL_PERR_UART V_T7_PL_PERR_UART(1U)
+
+#define S_T7_PL_PERR_PMU 10
+#define V_T7_PL_PERR_PMU(x) ((x) << S_T7_PL_PERR_PMU)
+#define F_T7_PL_PERR_PMU V_T7_PL_PERR_PMU(1U)
+
+#define S_T7_PL_PERR_MAC 9
+#define V_T7_PL_PERR_MAC(x) ((x) << S_T7_PL_PERR_MAC)
+#define F_T7_PL_PERR_MAC V_T7_PL_PERR_MAC(1U)
+
+#define S_T7_PL_PERR_SMB 8
+#define V_T7_PL_PERR_SMB(x) ((x) << S_T7_PL_PERR_SMB)
+#define F_T7_PL_PERR_SMB V_T7_PL_PERR_SMB(1U)
+
+#define S_T7_PL_PERR_SF 7
+#define V_T7_PL_PERR_SF(x) ((x) << S_T7_PL_PERR_SF)
+#define F_T7_PL_PERR_SF V_T7_PL_PERR_SF(1U)
+
+#define S_T7_PL_PERR_PL 6
+#define V_T7_PL_PERR_PL(x) ((x) << S_T7_PL_PERR_PL)
+#define F_T7_PL_PERR_PL V_T7_PL_PERR_PL(1U)
+
+#define S_T7_PL_PERR_NCSI 5
+#define V_T7_PL_PERR_NCSI(x) ((x) << S_T7_PL_PERR_NCSI)
+#define F_T7_PL_PERR_NCSI V_T7_PL_PERR_NCSI(1U)
+
+#define S_T7_PL_PERR_MPS 4
+#define V_T7_PL_PERR_MPS(x) ((x) << S_T7_PL_PERR_MPS)
+#define F_T7_PL_PERR_MPS V_T7_PL_PERR_MPS(1U)
+
+#define S_T7_PL_PERR_MI 3
+#define V_T7_PL_PERR_MI(x) ((x) << S_T7_PL_PERR_MI)
+#define F_T7_PL_PERR_MI V_T7_PL_PERR_MI(1U)
+
+#define S_T7_PL_PERR_DBG 2
+#define V_T7_PL_PERR_DBG(x) ((x) << S_T7_PL_PERR_DBG)
+#define F_T7_PL_PERR_DBG V_T7_PL_PERR_DBG(1U)
+
+#define S_T7_PL_PERR_I2CM 1
+#define V_T7_PL_PERR_I2CM(x) ((x) << S_T7_PL_PERR_I2CM)
+#define F_T7_PL_PERR_I2CM V_T7_PL_PERR_I2CM(1U)
+
+#define S_T7_PL_PERR_CIM 0
+#define V_T7_PL_PERR_CIM(x) ((x) << S_T7_PL_PERR_CIM)
+#define F_T7_PL_PERR_CIM V_T7_PL_PERR_CIM(1U)
+
#define A_PL_PERR_ENABLE 0x19408
#define A_PL_INT_CAUSE 0x1940c
@@ -38064,6 +49256,78 @@
#define V_MAC0(x) ((x) << S_MAC0)
#define F_MAC0 V_MAC0(1U)
+#define S_T7_FLR 31
+#define V_T7_FLR(x) ((x) << S_T7_FLR)
+#define F_T7_FLR V_T7_FLR(1U)
+
+#define S_T7_SW_CIM 30
+#define V_T7_SW_CIM(x) ((x) << S_T7_SW_CIM)
+#define F_T7_SW_CIM V_T7_SW_CIM(1U)
+
+#define S_T7_ULP_TX 29
+#define V_T7_ULP_TX(x) ((x) << S_T7_ULP_TX)
+#define F_T7_ULP_TX V_T7_ULP_TX(1U)
+
+#define S_T7_SGE 28
+#define V_T7_SGE(x) ((x) << S_T7_SGE)
+#define F_T7_SGE V_T7_SGE(1U)
+
+#define S_T7_HMA 27
+#define V_T7_HMA(x) ((x) << S_T7_HMA)
+#define F_T7_HMA V_T7_HMA(1U)
+
+#define S_T7_CPL_SWITCH 26
+#define V_T7_CPL_SWITCH(x) ((x) << S_T7_CPL_SWITCH)
+#define F_T7_CPL_SWITCH V_T7_CPL_SWITCH(1U)
+
+#define S_T7_ULP_RX 25
+#define V_T7_ULP_RX(x) ((x) << S_T7_ULP_RX)
+#define F_T7_ULP_RX V_T7_ULP_RX(1U)
+
+#define S_T7_PM_RX 24
+#define V_T7_PM_RX(x) ((x) << S_T7_PM_RX)
+#define F_T7_PM_RX V_T7_PM_RX(1U)
+
+#define S_T7_PM_TX 23
+#define V_T7_PM_TX(x) ((x) << S_T7_PM_TX)
+#define F_T7_PM_TX V_T7_PM_TX(1U)
+
+#define S_T7_MA 22
+#define V_T7_MA(x) ((x) << S_T7_MA)
+#define F_T7_MA V_T7_MA(1U)
+
+#define S_T7_TP 21
+#define V_T7_TP(x) ((x) << S_T7_TP)
+#define F_T7_TP V_T7_TP(1U)
+
+#define S_T7_LE 20
+#define V_T7_LE(x) ((x) << S_T7_LE)
+#define F_T7_LE V_T7_LE(1U)
+
+#define S_T7_EDC1 19
+#define V_T7_EDC1(x) ((x) << S_T7_EDC1)
+#define F_T7_EDC1 V_T7_EDC1(1U)
+
+#define S_T7_EDC0 18
+#define V_T7_EDC0(x) ((x) << S_T7_EDC0)
+#define F_T7_EDC0 V_T7_EDC0(1U)
+
+#define S_T7_MC1 17
+#define V_T7_MC1(x) ((x) << S_T7_MC1)
+#define F_T7_MC1 V_T7_MC1(1U)
+
+#define S_T7_MC0 16
+#define V_T7_MC0(x) ((x) << S_T7_MC0)
+#define F_T7_MC0 V_T7_MC0(1U)
+
+#define S_T7_PCIE 15
+#define V_T7_PCIE(x) ((x) << S_T7_PCIE)
+#define F_T7_PCIE V_T7_PCIE(1U)
+
+#define S_T7_UART 14
+#define V_T7_UART(x) ((x) << S_T7_UART)
+#define F_T7_UART V_T7_UART(1U)
+
#define A_PL_INT_ENABLE 0x19410
#define A_PL_INT_MAP0 0x19414
@@ -38262,15 +49526,10 @@
#define V_T6_LN0_AECMD(x) ((x) << S_T6_LN0_AECMD)
#define G_T6_LN0_AECMD(x) (((x) >> S_T6_LN0_AECMD) & M_T6_LN0_AECMD)
-#define S_T6_STATECFGINITF 16
-#define M_T6_STATECFGINITF 0xffU
-#define V_T6_STATECFGINITF(x) ((x) << S_T6_STATECFGINITF)
-#define G_T6_STATECFGINITF(x) (((x) >> S_T6_STATECFGINITF) & M_T6_STATECFGINITF)
-
-#define S_T6_STATECFGINIT 12
-#define M_T6_STATECFGINIT 0xfU
-#define V_T6_STATECFGINIT(x) ((x) << S_T6_STATECFGINIT)
-#define G_T6_STATECFGINIT(x) (((x) >> S_T6_STATECFGINIT) & M_T6_STATECFGINIT)
+#define S_T6_1_STATECFGINITF 16
+#define M_T6_1_STATECFGINITF 0xffU
+#define V_T6_1_STATECFGINITF(x) ((x) << S_T6_1_STATECFGINITF)
+#define G_T6_1_STATECFGINITF(x) (((x) >> S_T6_1_STATECFGINITF) & M_T6_1_STATECFGINITF)
#define S_PHY_STATUS 10
#define V_PHY_STATUS(x) ((x) << S_PHY_STATUS)
@@ -38285,9 +49544,9 @@
#define V_PERSTTIMEOUT_PL(x) ((x) << S_PERSTTIMEOUT_PL)
#define F_PERSTTIMEOUT_PL V_PERSTTIMEOUT_PL(1U)
-#define S_T6_LTSSMENABLE 6
-#define V_T6_LTSSMENABLE(x) ((x) << S_T6_LTSSMENABLE)
-#define F_T6_LTSSMENABLE V_T6_LTSSMENABLE(1U)
+#define S_SPEEDMS 30
+#define V_SPEEDMS(x) ((x) << S_SPEEDMS)
+#define F_SPEEDMS V_SPEEDMS(1U)
#define A_PL_PCIE_CTL_STAT 0x19444
@@ -38382,6 +49641,37 @@
#define V_MAP0(x) ((x) << S_MAP0)
#define G_MAP0(x) (((x) >> S_MAP0) & M_MAP0)
+#define A_PL_INT_CAUSE2 0x19478
+
+#define S_CRYPTO_KEY 4
+#define V_CRYPTO_KEY(x) ((x) << S_CRYPTO_KEY)
+#define F_CRYPTO_KEY V_CRYPTO_KEY(1U)
+
+#define S_CRYPTO1 3
+#define V_CRYPTO1(x) ((x) << S_CRYPTO1)
+#define F_CRYPTO1 V_CRYPTO1(1U)
+
+#define S_CRYPTO0 2
+#define V_CRYPTO0(x) ((x) << S_CRYPTO0)
+#define F_CRYPTO0 V_CRYPTO0(1U)
+
+#define S_GCACHE 1
+#define V_GCACHE(x) ((x) << S_GCACHE)
+#define F_GCACHE V_GCACHE(1U)
+
+#define S_ARM 0
+#define V_ARM(x) ((x) << S_ARM)
+#define F_ARM V_ARM(1U)
+
+#define A_PL_INT_ENABLE2 0x1947c
+#define A_PL_ER_CMD 0x19488
+
+#define S_ER_ADDR 2
+#define M_ER_ADDR 0x3fffffffU
+#define V_ER_ADDR(x) ((x) << S_ER_ADDR)
+#define G_ER_ADDR(x) (((x) >> S_ER_ADDR) & M_ER_ADDR)
+
+#define A_PL_ER_DATA 0x1948c
#define A_PL_VF_SLICE_L 0x19490
#define S_LIMITADDR 16
@@ -38638,6 +49928,10 @@
#define V_REGION_EN(x) ((x) << S_REGION_EN)
#define G_REGION_EN(x) (((x) >> S_REGION_EN) & M_REGION_EN)
+#define S_CACHEBYPASS 28
+#define V_CACHEBYPASS(x) ((x) << S_CACHEBYPASS)
+#define F_CACHEBYPASS V_CACHEBYPASS(1U)
+
#define A_LE_MISC 0x19c08
#define S_CMPUNVAIL 0
@@ -38830,6 +50124,10 @@
#define V_TCAM_SIZE(x) ((x) << S_TCAM_SIZE)
#define G_TCAM_SIZE(x) (((x) >> S_TCAM_SIZE) & M_TCAM_SIZE)
+#define S_MLL_MASK 2
+#define V_MLL_MASK(x) ((x) << S_MLL_MASK)
+#define F_MLL_MASK V_MLL_MASK(1U)
+
#define A_LE_DB_INT_ENABLE 0x19c38
#define S_MSGSEL 27
@@ -39045,40 +50343,15 @@
#define V_PIPELINEERR(x) ((x) << S_PIPELINEERR)
#define F_PIPELINEERR V_PIPELINEERR(1U)
-#define A_LE_DB_INT_CAUSE 0x19c3c
-
-#define S_T6_ACTRGNFULL 21
-#define V_T6_ACTRGNFULL(x) ((x) << S_T6_ACTRGNFULL)
-#define F_T6_ACTRGNFULL V_T6_ACTRGNFULL(1U)
+#define S_CACHEINTPERR 31
+#define V_CACHEINTPERR(x) ((x) << S_CACHEINTPERR)
+#define F_CACHEINTPERR V_CACHEINTPERR(1U)
-#define S_T6_ACTCNTIPV6TZERO 20
-#define V_T6_ACTCNTIPV6TZERO(x) ((x) << S_T6_ACTCNTIPV6TZERO)
-#define F_T6_ACTCNTIPV6TZERO V_T6_ACTCNTIPV6TZERO(1U)
-
-#define S_T6_ACTCNTIPV4TZERO 19
-#define V_T6_ACTCNTIPV4TZERO(x) ((x) << S_T6_ACTCNTIPV4TZERO)
-#define F_T6_ACTCNTIPV4TZERO V_T6_ACTCNTIPV4TZERO(1U)
-
-#define S_T6_ACTCNTIPV6ZERO 18
-#define V_T6_ACTCNTIPV6ZERO(x) ((x) << S_T6_ACTCNTIPV6ZERO)
-#define F_T6_ACTCNTIPV6ZERO V_T6_ACTCNTIPV6ZERO(1U)
-
-#define S_T6_ACTCNTIPV4ZERO 17
-#define V_T6_ACTCNTIPV4ZERO(x) ((x) << S_T6_ACTCNTIPV4ZERO)
-#define F_T6_ACTCNTIPV4ZERO V_T6_ACTCNTIPV4ZERO(1U)
-
-#define S_T6_UNKNOWNCMD 3
-#define V_T6_UNKNOWNCMD(x) ((x) << S_T6_UNKNOWNCMD)
-#define F_T6_UNKNOWNCMD V_T6_UNKNOWNCMD(1U)
-
-#define S_T6_LIP0 2
-#define V_T6_LIP0(x) ((x) << S_T6_LIP0)
-#define F_T6_LIP0 V_T6_LIP0(1U)
-
-#define S_T6_LIPMISS 1
-#define V_T6_LIPMISS(x) ((x) << S_T6_LIPMISS)
-#define F_T6_LIPMISS V_T6_LIPMISS(1U)
+#define S_CACHESRAMPERR 30
+#define V_CACHESRAMPERR(x) ((x) << S_CACHESRAMPERR)
+#define F_CACHESRAMPERR V_CACHESRAMPERR(1U)
+#define A_LE_DB_INT_CAUSE 0x19c3c
#define A_LE_DB_INT_TID 0x19c40
#define S_INTTID 0
@@ -39287,6 +50560,14 @@
#define A_LE_DB_MASK_IPV6 0x19ca0
#define A_LE_DB_DBG_MATCH_DATA 0x19ca0
+#define A_LE_CMM_CONFIG 0x19cc0
+#define A_LE_CACHE_DBG 0x19cc4
+#define A_LE_CACHE_WR_ALL_CNT 0x19cc8
+#define A_LE_CACHE_WR_HIT_CNT 0x19ccc
+#define A_LE_CACHE_RD_ALL_CNT 0x19cd0
+#define A_LE_CACHE_RD_HIT_CNT 0x19cd4
+#define A_LE_CACHE_MC_WR_CNT 0x19cd8
+#define A_LE_CACHE_MC_RD_CNT 0x19cdc
#define A_LE_DB_REQ_RSP_CNT 0x19ce4
#define S_T4_RSPCNT 16
@@ -39309,6 +50590,14 @@
#define V_REQCNTLE(x) ((x) << S_REQCNTLE)
#define G_REQCNTLE(x) (((x) >> S_REQCNTLE) & M_REQCNTLE)
+#define A_LE_IND_ADDR 0x19ce8
+
+#define S_T7_1_ADDR 0
+#define M_T7_1_ADDR 0xffU
+#define V_T7_1_ADDR(x) ((x) << S_T7_1_ADDR)
+#define G_T7_1_ADDR(x) (((x) >> S_T7_1_ADDR) & M_T7_1_ADDR)
+
+#define A_LE_IND_DATA 0x19cec
#define A_LE_DB_DBGI_CONFIG 0x19cf0
#define S_DBGICMDPERR 31
@@ -39436,6 +50725,11 @@
#define V_T6_HASHTBLMEMCRCERR(x) ((x) << S_T6_HASHTBLMEMCRCERR)
#define F_T6_HASHTBLMEMCRCERR V_T6_HASHTBLMEMCRCERR(1U)
+#define S_T7_BKCHKPERIOD 22
+#define M_T7_BKCHKPERIOD 0xffU
+#define V_T7_BKCHKPERIOD(x) ((x) << S_T7_BKCHKPERIOD)
+#define G_T7_BKCHKPERIOD(x) (((x) >> S_T7_BKCHKPERIOD) & M_T7_BKCHKPERIOD)
+
#define A_LE_SPARE 0x19cfc
#define A_LE_DB_DBGI_REQ_DATA 0x19d00
#define A_LE_DB_DBGI_REQ_MASK 0x19d50
@@ -39551,6 +50845,7 @@
#define V_HASH_TID_BASE(x) ((x) << S_HASH_TID_BASE)
#define G_HASH_TID_BASE(x) (((x) >> S_HASH_TID_BASE) & M_HASH_TID_BASE)
+#define A_T7_LE_DB_HASH_TID_BASE 0x19df8
#define A_LE_PERR_INJECT 0x19dfc
#define S_LEMEMSEL 1
@@ -39573,6 +50868,7 @@
#define A_LE_HASH_MASK_GEN_IPV6 0x19eb0
#define A_LE_HASH_MASK_GEN_IPV6T5 0x19eb4
#define A_T6_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
+#define A_T7_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
#define A_LE_HASH_MASK_CMP_IPV4 0x19ee0
#define A_LE_HASH_MASK_CMP_IPV4T5 0x19ee4
#define A_LE_DB_PSV_FILTER_MASK_TUP_IPV4 0x19ee4
@@ -39677,6 +50973,9 @@
#define A_LE_TCAM_DEBUG_LA_DATA 0x19f4c
#define A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 0x19f90
#define A_LE_DB_SECOND_CMP_HASH_MASK_IPV4 0x19fa4
+#define A_LE_TCAM_BIST_CTRL 0x19fb0
+#define A_LE_TCAM_BIST_CB_PASS 0x19fb4
+#define A_LE_TCAM_BIST_CB_BUSY 0x19fbc
#define A_LE_HASH_COLLISION 0x19fc4
#define A_LE_GLOBAL_COLLISION 0x19fc8
#define A_LE_FULL_CNT_COLLISION 0x19fcc
@@ -39686,6 +50985,38 @@
#define A_LE_RSP_DEBUG_LA_DATAT5 0x19fdc
#define A_LE_RSP_DEBUG_LA_WRPTRT5 0x19fe0
#define A_LE_DEBUG_LA_SEL_DATA 0x19fe4
+#define A_LE_TCAM_NEG_CTRL0 0x0
+#define A_LE_TCAM_NEG_CTRL1 0x1
+#define A_LE_TCAM_NEG_CTRL2 0x2
+#define A_LE_TCAM_NEG_CTRL3 0x3
+#define A_LE_TCAM_NEG_CTRL4 0x4
+#define A_LE_TCAM_NEG_CTRL5 0x5
+#define A_LE_TCAM_NEG_CTRL6 0x6
+#define A_LE_TCAM_NEG_CTRL7 0x7
+#define A_LE_TCAM_NEG_CTRL8 0x8
+#define A_LE_TCAM_NEG_CTRL9 0x9
+#define A_LE_TCAM_NEG_CTRL10 0xa
+#define A_LE_TCAM_NEG_CTRL11 0xb
+#define A_LE_TCAM_NEG_CTRL12 0xc
+#define A_LE_TCAM_NEG_CTRL13 0xd
+#define A_LE_TCAM_NEG_CTRL14 0xe
+#define A_LE_TCAM_NEG_CTRL15 0xf
+#define A_LE_TCAM_NEG_CTRL16 0x10
+#define A_LE_TCAM_NEG_CTRL17 0x11
+#define A_LE_TCAM_NEG_CTRL18 0x12
+#define A_LE_TCAM_NEG_CTRL19 0x13
+#define A_LE_TCAM_NEG_CTRL20 0x14
+#define A_LE_TCAM_NEG_CTRL21 0x15
+#define A_LE_TCAM_NEG_CTRL22 0x16
+#define A_LE_TCAM_NEG_CTRL23 0x17
+#define A_LE_TCAM_NEG_CTRL24 0x18
+#define A_LE_TCAM_NEG_CTRL25 0x19
+#define A_LE_TCAM_NEG_CTRL26 0x1a
+#define A_LE_TCAM_NEG_CTRL27 0x1b
+#define A_LE_TCAM_NEG_CTRL28 0x1c
+#define A_LE_TCAM_NEG_CTRL29 0x1d
+#define A_LE_TCAM_NEG_CTRL30 0x1e
+#define A_LE_TCAM_NEG_CTRL31 0x1f
/* registers for module NCSI */
#define NCSI_BASE_ADDR 0x1a000
@@ -39735,6 +51066,10 @@
#define V_TX_BYTE_SWAP(x) ((x) << S_TX_BYTE_SWAP)
#define F_TX_BYTE_SWAP V_TX_BYTE_SWAP(1U)
+#define S_XGMAC0_EN 0
+#define V_XGMAC0_EN(x) ((x) << S_XGMAC0_EN)
+#define F_XGMAC0_EN V_XGMAC0_EN(1U)
+
#define A_NCSI_RST_CTRL 0x1a004
#define S_MAC_REF_RST 2
@@ -39991,6 +51326,10 @@
#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
#define F_RXFIFO_PRTY_ERR V_RXFIFO_PRTY_ERR(1U)
+#define S_CIM2NC_PERR 9
+#define V_CIM2NC_PERR(x) ((x) << S_CIM2NC_PERR)
+#define F_CIM2NC_PERR V_CIM2NC_PERR(1U)
+
#define A_NCSI_INT_CAUSE 0x1a0d8
#define A_NCSI_STATUS 0x1a0dc
@@ -40048,6 +51387,12 @@
#define F_MCSIMELSEL V_MCSIMELSEL(1U)
#define A_NCSI_PERR_ENABLE 0x1a0f8
+#define A_NCSI_MODE_SEL 0x1a0fc
+
+#define S_XGMAC_MODE 0
+#define V_XGMAC_MODE(x) ((x) << S_XGMAC_MODE)
+#define F_XGMAC_MODE V_XGMAC_MODE(1U)
+
#define A_NCSI_MACB_NETWORK_CTRL 0x1a100
#define S_TXSNDZEROPAUSE 12
@@ -40550,6 +51895,832 @@
#define V_DESREV(x) ((x) << S_DESREV)
#define G_DESREV(x) (((x) >> S_DESREV) & M_DESREV)
+#define A_NCSI_TX_CTRL 0x1a200
+
+#define S_T7_TXEN 0
+#define V_T7_TXEN(x) ((x) << S_T7_TXEN)
+#define F_T7_TXEN V_T7_TXEN(1U)
+
+#define A_NCSI_TX_CFG 0x1a204
+#define A_NCSI_TX_PAUSE_QUANTA 0x1a208
+#define A_NCSI_RX_CTRL 0x1a20c
+#define A_NCSI_RX_CFG 0x1a210
+#define A_NCSI_RX_HASH_LOW 0x1a214
+#define A_NCSI_RX_HASH_HIGH 0x1a218
+#define A_NCSI_RX_EXACT_MATCH_LOW_1 0x1a21c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_1 0x1a220
+#define A_NCSI_RX_EXACT_MATCH_LOW_2 0x1a224
+#define A_NCSI_RX_EXACT_MATCH_HIGH_2 0x1a228
+#define A_NCSI_RX_EXACT_MATCH_LOW_3 0x1a22c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_3 0x1a230
+#define A_NCSI_RX_EXACT_MATCH_LOW_4 0x1a234
+#define A_NCSI_RX_EXACT_MATCH_HIGH_4 0x1a238
+#define A_NCSI_RX_EXACT_MATCH_LOW_5 0x1a23c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_5 0x1a240
+#define A_NCSI_RX_EXACT_MATCH_LOW_6 0x1a244
+#define A_NCSI_RX_EXACT_MATCH_HIGH_6 0x1a248
+#define A_NCSI_RX_EXACT_MATCH_LOW_7 0x1a24c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_7 0x1a250
+#define A_NCSI_RX_EXACT_MATCH_LOW_8 0x1a254
+#define A_NCSI_RX_EXACT_MATCH_HIGH_8 0x1a258
+#define A_NCSI_RX_TYPE_MATCH_1 0x1a25c
+#define A_NCSI_RX_TYPE_MATCH_2 0x1a260
+#define A_NCSI_RX_TYPE_MATCH_3 0x1a264
+#define A_NCSI_RX_TYPE_MATCH_4 0x1a268
+#define A_NCSI_INT_STATUS 0x1a26c
+#define A_NCSI_XGM_INT_MASK 0x1a270
+#define A_NCSI_XGM_INT_ENABLE 0x1a274
+#define A_NCSI_XGM_INT_DISABLE 0x1a278
+#define A_NCSI_TX_PAUSE_TIMER 0x1a27c
+#define A_NCSI_STAT_CTRL 0x1a280
+#define A_NCSI_RXFIFO_CFG 0x1a284
+
+#define S_RXFIFO_EMPTY 31
+#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
+#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
+
+#define S_RXFIFO_FULL 30
+#define V_RXFIFO_FULL(x) ((x) << S_RXFIFO_FULL)
+#define F_RXFIFO_FULL V_RXFIFO_FULL(1U)
+
+#define S_RXFIFOPAUSEHWM 17
+#define M_RXFIFOPAUSEHWM 0xfffU
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM 5
+#define M_RXFIFOPAUSELWM 0xfffU
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_FORCEDPAUSE 4
+#define V_FORCEDPAUSE(x) ((x) << S_FORCEDPAUSE)
+#define F_FORCEDPAUSE V_FORCEDPAUSE(1U)
+
+#define S_EXTERNLOOPBACK 3
+#define V_EXTERNLOOPBACK(x) ((x) << S_EXTERNLOOPBACK)
+#define F_EXTERNLOOPBACK V_EXTERNLOOPBACK(1U)
+
+#define S_RXBYTESWAP 2
+#define V_RXBYTESWAP(x) ((x) << S_RXBYTESWAP)
+#define F_RXBYTESWAP V_RXBYTESWAP(1U)
+
+#define S_RXSTRFRWRD 1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES 0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES V_DISERRFRAMES(1U)
+
+#define A_NCSI_TXFIFO_CFG 0x1a288
+
+#define S_T7_TXFIFO_EMPTY 31
+#define V_T7_TXFIFO_EMPTY(x) ((x) << S_T7_TXFIFO_EMPTY)
+#define F_T7_TXFIFO_EMPTY V_T7_TXFIFO_EMPTY(1U)
+
+#define S_T7_TXFIFO_FULL 30
+#define V_T7_TXFIFO_FULL(x) ((x) << S_T7_TXFIFO_FULL)
+#define F_T7_TXFIFO_FULL V_T7_TXFIFO_FULL(1U)
+
+#define S_UNDERUNFIX 22
+#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
+#define F_UNDERUNFIX V_UNDERUNFIX(1U)
+
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
+#define S_TXIPG 13
+#define M_TXIPG 0xffU
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
+#define S_TXFIFOTHRESH 4
+#define M_TXFIFOTHRESH 0x1ffU
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+#define G_TXFIFOTHRESH(x) (((x) >> S_TXFIFOTHRESH) & M_TXFIFOTHRESH)
+
+#define S_INTERNLOOPBACK 3
+#define V_INTERNLOOPBACK(x) ((x) << S_INTERNLOOPBACK)
+#define F_INTERNLOOPBACK V_INTERNLOOPBACK(1U)
+
+#define S_TXBYTESWAP 2
+#define V_TXBYTESWAP(x) ((x) << S_TXBYTESWAP)
+#define F_TXBYTESWAP V_TXBYTESWAP(1U)
+
+#define S_DISCRC 1
+#define V_DISCRC(x) ((x) << S_DISCRC)
+#define F_DISCRC V_DISCRC(1U)
+
+#define S_DISPREAMBLE 0
+#define V_DISPREAMBLE(x) ((x) << S_DISPREAMBLE)
+#define F_DISPREAMBLE V_DISPREAMBLE(1U)
+
+#define A_NCSI_SLOW_TIMER 0x1a28c
+
+#define S_PAUSESLOWTIMEREN 31
+#define V_PAUSESLOWTIMEREN(x) ((x) << S_PAUSESLOWTIMEREN)
+#define F_PAUSESLOWTIMEREN V_PAUSESLOWTIMEREN(1U)
+
+#define S_PAUSESLOWTIMER 0
+#define M_PAUSESLOWTIMER 0xfffffU
+#define V_PAUSESLOWTIMER(x) ((x) << S_PAUSESLOWTIMER)
+#define G_PAUSESLOWTIMER(x) (((x) >> S_PAUSESLOWTIMER) & M_PAUSESLOWTIMER)
+
+#define A_NCSI_PAUSE_TIMER 0x1a290
+
+#define S_PAUSETIMER 0
+#define M_PAUSETIMER 0xfffffU
+#define V_PAUSETIMER(x) ((x) << S_PAUSETIMER)
+#define G_PAUSETIMER(x) (((x) >> S_PAUSETIMER) & M_PAUSETIMER)
+
+#define A_NCSI_XAUI_PCS_TEST 0x1a294
+
+#define S_TESTPATTERN 1
+#define M_TESTPATTERN 0x3U
+#define V_TESTPATTERN(x) ((x) << S_TESTPATTERN)
+#define G_TESTPATTERN(x) (((x) >> S_TESTPATTERN) & M_TESTPATTERN)
+
+#define S_ENTEST 0
+#define V_ENTEST(x) ((x) << S_ENTEST)
+#define F_ENTEST V_ENTEST(1U)
+
+#define A_NCSI_RGMII_CTRL 0x1a298
+
+#define S_PHALIGNFIFOTHRESH 1
+#define M_PHALIGNFIFOTHRESH 0x3U
+#define V_PHALIGNFIFOTHRESH(x) ((x) << S_PHALIGNFIFOTHRESH)
+#define G_PHALIGNFIFOTHRESH(x) (((x) >> S_PHALIGNFIFOTHRESH) & M_PHALIGNFIFOTHRESH)
+
+#define S_TXCLK90SHIFT 0
+#define V_TXCLK90SHIFT(x) ((x) << S_TXCLK90SHIFT)
+#define F_TXCLK90SHIFT V_TXCLK90SHIFT(1U)
+
+#define A_NCSI_RGMII_IMP 0x1a29c
+
+#define S_CALRESET 8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET V_CALRESET(1U)
+
+#define S_CALUPDATE 7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE V_CALUPDATE(1U)
+
+#define S_IMPSETUPDATE 6
+#define V_IMPSETUPDATE(x) ((x) << S_IMPSETUPDATE)
+#define F_IMPSETUPDATE V_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD 3
+#define M_RGMIIIMPPD 0x7U
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+#define G_RGMIIIMPPD(x) (((x) >> S_RGMIIIMPPD) & M_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU 0
+#define M_RGMIIIMPPU 0x7U
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+#define G_RGMIIIMPPU(x) (((x) >> S_RGMIIIMPPU) & M_RGMIIIMPPU)
+
+#define A_NCSI_RX_MAX_PKT_SIZE 0x1a2a8
+
+#define S_RXMAXFRAMERSIZE 17
+#define M_RXMAXFRAMERSIZE 0x3fffU
+#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
+#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+
+#define S_RXENERRORGATHER 16
+#define V_RXENERRORGATHER(x) ((x) << S_RXENERRORGATHER)
+#define F_RXENERRORGATHER V_RXENERRORGATHER(1U)
+
+#define S_RXENSINGLEFLIT 15
+#define V_RXENSINGLEFLIT(x) ((x) << S_RXENSINGLEFLIT)
+#define F_RXENSINGLEFLIT V_RXENSINGLEFLIT(1U)
+
+#define S_RXENFRAMER 14
+#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
+#define F_RXENFRAMER V_RXENFRAMER(1U)
+
+#define S_RXMAXPKTSIZE 0
+#define M_RXMAXPKTSIZE 0x3fffU
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
+
+#define A_NCSI_RESET_CTRL 0x1a2ac
+
+#define S_XGMAC_STOP_EN 4
+#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
+#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U)
+
+#define S_XG2G_RESET_ 3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_ 2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_ 1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_ V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_ 0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_ V_MAC_RESET_(1U)
+
+#define A_NCSI_XAUI1G_CTRL 0x1a2b0
+
+#define S_XAUI1GLINKID 0
+#define M_XAUI1GLINKID 0x3U
+#define V_XAUI1GLINKID(x) ((x) << S_XAUI1GLINKID)
+#define G_XAUI1GLINKID(x) (((x) >> S_XAUI1GLINKID) & M_XAUI1GLINKID)
+
+#define A_NCSI_SERDES_LANE_CTRL 0x1a2b4
+
+#define S_LANEREVERSAL 8
+#define V_LANEREVERSAL(x) ((x) << S_LANEREVERSAL)
+#define F_LANEREVERSAL V_LANEREVERSAL(1U)
+
+#define S_TXPOLARITY 4
+#define M_TXPOLARITY 0xfU
+#define V_TXPOLARITY(x) ((x) << S_TXPOLARITY)
+#define G_TXPOLARITY(x) (((x) >> S_TXPOLARITY) & M_TXPOLARITY)
+
+#define S_RXPOLARITY 0
+#define M_RXPOLARITY 0xfU
+#define V_RXPOLARITY(x) ((x) << S_RXPOLARITY)
+#define G_RXPOLARITY(x) (((x) >> S_RXPOLARITY) & M_RXPOLARITY)
+
+#define A_NCSI_PORT_CFG 0x1a2b8
+
+#define S_NCSI_SAFESPEEDCHANGE 4
+#define V_NCSI_SAFESPEEDCHANGE(x) ((x) << S_NCSI_SAFESPEEDCHANGE)
+#define F_NCSI_SAFESPEEDCHANGE V_NCSI_SAFESPEEDCHANGE(1U)
+
+#define S_NCSI_CLKDIVRESET_ 3
+#define V_NCSI_CLKDIVRESET_(x) ((x) << S_NCSI_CLKDIVRESET_)
+#define F_NCSI_CLKDIVRESET_ V_NCSI_CLKDIVRESET_(1U)
+
+#define S_NCSI_PORTSPEED 1
+#define M_NCSI_PORTSPEED 0x3U
+#define V_NCSI_PORTSPEED(x) ((x) << S_NCSI_PORTSPEED)
+#define G_NCSI_PORTSPEED(x) (((x) >> S_NCSI_PORTSPEED) & M_NCSI_PORTSPEED)
+
+#define S_NCSI_ENRGMII 0
+#define V_NCSI_ENRGMII(x) ((x) << S_NCSI_ENRGMII)
+#define F_NCSI_ENRGMII V_NCSI_ENRGMII(1U)
+
+#define A_NCSI_EPIO_DATA0 0x1a2c0
+#define A_NCSI_EPIO_DATA1 0x1a2c4
+#define A_NCSI_EPIO_DATA2 0x1a2c8
+#define A_NCSI_EPIO_DATA3 0x1a2cc
+#define A_NCSI_EPIO_OP 0x1a2d0
+
+#define S_PIO_READY 31
+#define V_PIO_READY(x) ((x) << S_PIO_READY)
+#define F_PIO_READY V_PIO_READY(1U)
+
+#define S_PIO_WRRD 24
+#define V_PIO_WRRD(x) ((x) << S_PIO_WRRD)
+#define F_PIO_WRRD V_PIO_WRRD(1U)
+
+#define S_PIO_ADDRESS 0
+#define M_PIO_ADDRESS 0xffU
+#define V_PIO_ADDRESS(x) ((x) << S_PIO_ADDRESS)
+#define G_PIO_ADDRESS(x) (((x) >> S_PIO_ADDRESS) & M_PIO_ADDRESS)
+
+#define A_NCSI_XGMAC0_INT_ENABLE 0x1a2d4
+
+#define S_XAUIPCSDECERR 24
+#define V_XAUIPCSDECERR(x) ((x) << S_XAUIPCSDECERR)
+#define F_XAUIPCSDECERR V_XAUIPCSDECERR(1U)
+
+#define S_RGMIIRXFIFOOVERFLOW 23
+#define V_RGMIIRXFIFOOVERFLOW(x) ((x) << S_RGMIIRXFIFOOVERFLOW)
+#define F_RGMIIRXFIFOOVERFLOW V_RGMIIRXFIFOOVERFLOW(1U)
+
+#define S_RGMIIRXFIFOUNDERFLOW 22
+#define V_RGMIIRXFIFOUNDERFLOW(x) ((x) << S_RGMIIRXFIFOUNDERFLOW)
+#define F_RGMIIRXFIFOUNDERFLOW V_RGMIIRXFIFOUNDERFLOW(1U)
+
+#define S_RXPKTSIZEERROR 21
+#define V_RXPKTSIZEERROR(x) ((x) << S_RXPKTSIZEERROR)
+#define F_RXPKTSIZEERROR V_RXPKTSIZEERROR(1U)
+
+#define S_WOLPATDETECTED 20
+#define V_WOLPATDETECTED(x) ((x) << S_WOLPATDETECTED)
+#define F_WOLPATDETECTED V_WOLPATDETECTED(1U)
+
+#define S_T7_TXFIFO_PRTY_ERR 17
+#define M_T7_TXFIFO_PRTY_ERR 0x7U
+#define V_T7_TXFIFO_PRTY_ERR(x) ((x) << S_T7_TXFIFO_PRTY_ERR)
+#define G_T7_TXFIFO_PRTY_ERR(x) (((x) >> S_T7_TXFIFO_PRTY_ERR) & M_T7_TXFIFO_PRTY_ERR)
+
+#define S_T7_RXFIFO_PRTY_ERR 14
+#define M_T7_RXFIFO_PRTY_ERR 0x7U
+#define V_T7_RXFIFO_PRTY_ERR(x) ((x) << S_T7_RXFIFO_PRTY_ERR)
+#define G_T7_RXFIFO_PRTY_ERR(x) (((x) >> S_T7_RXFIFO_PRTY_ERR) & M_T7_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN 13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW 12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDESBISTERR 8
+#define M_SERDESBISTERR 0xfU
+#define V_SERDESBISTERR(x) ((x) << S_SERDESBISTERR)
+#define G_SERDESBISTERR(x) (((x) >> S_SERDESBISTERR) & M_SERDESBISTERR)
+
+#define S_SERDESLOWSIGCHANGE 4
+#define M_SERDESLOWSIGCHANGE 0xfU
+#define V_SERDESLOWSIGCHANGE(x) ((x) << S_SERDESLOWSIGCHANGE)
+#define G_SERDESLOWSIGCHANGE(x) (((x) >> S_SERDESLOWSIGCHANGE) & M_SERDESLOWSIGCHANGE)
+
+#define S_XAUIPCSCTCERR 3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE 2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
+
+#define S_RGMIILINKSTSCHANGE 1
+#define V_RGMIILINKSTSCHANGE(x) ((x) << S_RGMIILINKSTSCHANGE)
+#define F_RGMIILINKSTSCHANGE V_RGMIILINKSTSCHANGE(1U)
+
+#define S_T7_XGM_INT 0
+#define V_T7_XGM_INT(x) ((x) << S_T7_XGM_INT)
+#define F_T7_XGM_INT V_T7_XGM_INT(1U)
+
+#define A_NCSI_XGMAC0_INT_CAUSE 0x1a2d8
+#define A_NCSI_XAUI_ACT_CTRL 0x1a2dc
+#define A_NCSI_SERDES_CTRL0 0x1a2e0
+
+#define S_INTSERLPBK3 27
+#define V_INTSERLPBK3(x) ((x) << S_INTSERLPBK3)
+#define F_INTSERLPBK3 V_INTSERLPBK3(1U)
+
+#define S_INTSERLPBK2 26
+#define V_INTSERLPBK2(x) ((x) << S_INTSERLPBK2)
+#define F_INTSERLPBK2 V_INTSERLPBK2(1U)
+
+#define S_INTSERLPBK1 25
+#define V_INTSERLPBK1(x) ((x) << S_INTSERLPBK1)
+#define F_INTSERLPBK1 V_INTSERLPBK1(1U)
+
+#define S_INTSERLPBK0 24
+#define V_INTSERLPBK0(x) ((x) << S_INTSERLPBK0)
+#define F_INTSERLPBK0 V_INTSERLPBK0(1U)
+
+#define S_RESET3 23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3 V_RESET3(1U)
+
+#define S_RESET2 22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2 V_RESET2(1U)
+
+#define S_RESET1 21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1 V_RESET1(1U)
+
+#define S_RESET0 20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0 V_RESET0(1U)
+
+#define S_PWRDN3 19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3 V_PWRDN3(1U)
+
+#define S_PWRDN2 18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2 V_PWRDN2(1U)
+
+#define S_PWRDN1 17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1 V_PWRDN1(1U)
+
+#define S_PWRDN0 16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0 V_PWRDN0(1U)
+
+#define S_RESETPLL23 15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23 V_RESETPLL23(1U)
+
+#define S_RESETPLL01 14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01 V_RESETPLL01(1U)
+
+#define S_PW23 12
+#define M_PW23 0x3U
+#define V_PW23(x) ((x) << S_PW23)
+#define G_PW23(x) (((x) >> S_PW23) & M_PW23)
+
+#define S_PW01 10
+#define M_PW01 0x3U
+#define V_PW01(x) ((x) << S_PW01)
+#define G_PW01(x) (((x) >> S_PW01) & M_PW01)
+
+#define S_DEQ 6
+#define M_DEQ 0xfU
+#define V_DEQ(x) ((x) << S_DEQ)
+#define G_DEQ(x) (((x) >> S_DEQ) & M_DEQ)
+
+#define S_DTX 2
+#define M_DTX 0xfU
+#define V_DTX(x) ((x) << S_DTX)
+#define G_DTX(x) (((x) >> S_DTX) & M_DTX)
+
+#define S_LODRV 1
+#define V_LODRV(x) ((x) << S_LODRV)
+#define F_LODRV V_LODRV(1U)
+
+#define S_HIDRV 0
+#define V_HIDRV(x) ((x) << S_HIDRV)
+#define F_HIDRV V_HIDRV(1U)
+
+#define A_NCSI_SERDES_CTRL1 0x1a2e4
+
+#define S_FMOFFSET3 19
+#define M_FMOFFSET3 0x1fU
+#define V_FMOFFSET3(x) ((x) << S_FMOFFSET3)
+#define G_FMOFFSET3(x) (((x) >> S_FMOFFSET3) & M_FMOFFSET3)
+
+#define S_FMOFFSETEN3 18
+#define V_FMOFFSETEN3(x) ((x) << S_FMOFFSETEN3)
+#define F_FMOFFSETEN3 V_FMOFFSETEN3(1U)
+
+#define S_FMOFFSET2 13
+#define M_FMOFFSET2 0x1fU
+#define V_FMOFFSET2(x) ((x) << S_FMOFFSET2)
+#define G_FMOFFSET2(x) (((x) >> S_FMOFFSET2) & M_FMOFFSET2)
+
+#define S_FMOFFSETEN2 12
+#define V_FMOFFSETEN2(x) ((x) << S_FMOFFSETEN2)
+#define F_FMOFFSETEN2 V_FMOFFSETEN2(1U)
+
+#define S_FMOFFSET1 7
+#define M_FMOFFSET1 0x1fU
+#define V_FMOFFSET1(x) ((x) << S_FMOFFSET1)
+#define G_FMOFFSET1(x) (((x) >> S_FMOFFSET1) & M_FMOFFSET1)
+
+#define S_FMOFFSETEN1 6
+#define V_FMOFFSETEN1(x) ((x) << S_FMOFFSETEN1)
+#define F_FMOFFSETEN1 V_FMOFFSETEN1(1U)
+
+#define S_FMOFFSET0 1
+#define M_FMOFFSET0 0x1fU
+#define V_FMOFFSET0(x) ((x) << S_FMOFFSET0)
+#define G_FMOFFSET0(x) (((x) >> S_FMOFFSET0) & M_FMOFFSET0)
+
+#define S_FMOFFSETEN0 0
+#define V_FMOFFSETEN0(x) ((x) << S_FMOFFSETEN0)
+#define F_FMOFFSETEN0 V_FMOFFSETEN0(1U)
+
+#define A_NCSI_SERDES_CTRL2 0x1a2e8
+
+#define S_DNIN3 11
+#define V_DNIN3(x) ((x) << S_DNIN3)
+#define F_DNIN3 V_DNIN3(1U)
+
+#define S_UPIN3 10
+#define V_UPIN3(x) ((x) << S_UPIN3)
+#define F_UPIN3 V_UPIN3(1U)
+
+#define S_RXSLAVE3 9
+#define V_RXSLAVE3(x) ((x) << S_RXSLAVE3)
+#define F_RXSLAVE3 V_RXSLAVE3(1U)
+
+#define S_DNIN2 8
+#define V_DNIN2(x) ((x) << S_DNIN2)
+#define F_DNIN2 V_DNIN2(1U)
+
+#define S_UPIN2 7
+#define V_UPIN2(x) ((x) << S_UPIN2)
+#define F_UPIN2 V_UPIN2(1U)
+
+#define S_RXSLAVE2 6
+#define V_RXSLAVE2(x) ((x) << S_RXSLAVE2)
+#define F_RXSLAVE2 V_RXSLAVE2(1U)
+
+#define S_DNIN1 5
+#define V_DNIN1(x) ((x) << S_DNIN1)
+#define F_DNIN1 V_DNIN1(1U)
+
+#define S_UPIN1 4
+#define V_UPIN1(x) ((x) << S_UPIN1)
+#define F_UPIN1 V_UPIN1(1U)
+
+#define S_RXSLAVE1 3
+#define V_RXSLAVE1(x) ((x) << S_RXSLAVE1)
+#define F_RXSLAVE1 V_RXSLAVE1(1U)
+
+#define S_DNIN0 2
+#define V_DNIN0(x) ((x) << S_DNIN0)
+#define F_DNIN0 V_DNIN0(1U)
+
+#define S_UPIN0 1
+#define V_UPIN0(x) ((x) << S_UPIN0)
+#define F_UPIN0 V_UPIN0(1U)
+
+#define S_RXSLAVE0 0
+#define V_RXSLAVE0(x) ((x) << S_RXSLAVE0)
+#define F_RXSLAVE0 V_RXSLAVE0(1U)
+
+#define A_NCSI_SERDES_CTRL3 0x1a2ec
+
+#define S_EXTBISTCHKERRCLR3 31
+#define V_EXTBISTCHKERRCLR3(x) ((x) << S_EXTBISTCHKERRCLR3)
+#define F_EXTBISTCHKERRCLR3 V_EXTBISTCHKERRCLR3(1U)
+
+#define S_EXTBISTCHKEN3 30
+#define V_EXTBISTCHKEN3(x) ((x) << S_EXTBISTCHKEN3)
+#define F_EXTBISTCHKEN3 V_EXTBISTCHKEN3(1U)
+
+#define S_EXTBISTGENEN3 29
+#define V_EXTBISTGENEN3(x) ((x) << S_EXTBISTGENEN3)
+#define F_EXTBISTGENEN3 V_EXTBISTGENEN3(1U)
+
+#define S_EXTBISTPAT3 26
+#define M_EXTBISTPAT3 0x7U
+#define V_EXTBISTPAT3(x) ((x) << S_EXTBISTPAT3)
+#define G_EXTBISTPAT3(x) (((x) >> S_EXTBISTPAT3) & M_EXTBISTPAT3)
+
+#define S_EXTPARRESET3 25
+#define V_EXTPARRESET3(x) ((x) << S_EXTPARRESET3)
+#define F_EXTPARRESET3 V_EXTPARRESET3(1U)
+
+#define S_EXTPARLPBK3 24
+#define V_EXTPARLPBK3(x) ((x) << S_EXTPARLPBK3)
+#define F_EXTPARLPBK3 V_EXTPARLPBK3(1U)
+
+#define S_EXTBISTCHKERRCLR2 23
+#define V_EXTBISTCHKERRCLR2(x) ((x) << S_EXTBISTCHKERRCLR2)
+#define F_EXTBISTCHKERRCLR2 V_EXTBISTCHKERRCLR2(1U)
+
+#define S_EXTBISTCHKEN2 22
+#define V_EXTBISTCHKEN2(x) ((x) << S_EXTBISTCHKEN2)
+#define F_EXTBISTCHKEN2 V_EXTBISTCHKEN2(1U)
+
+#define S_EXTBISTGENEN2 21
+#define V_EXTBISTGENEN2(x) ((x) << S_EXTBISTGENEN2)
+#define F_EXTBISTGENEN2 V_EXTBISTGENEN2(1U)
+
+#define S_EXTBISTPAT2 18
+#define M_EXTBISTPAT2 0x7U
+#define V_EXTBISTPAT2(x) ((x) << S_EXTBISTPAT2)
+#define G_EXTBISTPAT2(x) (((x) >> S_EXTBISTPAT2) & M_EXTBISTPAT2)
+
+#define S_EXTPARRESET2 17
+#define V_EXTPARRESET2(x) ((x) << S_EXTPARRESET2)
+#define F_EXTPARRESET2 V_EXTPARRESET2(1U)
+
+#define S_EXTPARLPBK2 16
+#define V_EXTPARLPBK2(x) ((x) << S_EXTPARLPBK2)
+#define F_EXTPARLPBK2 V_EXTPARLPBK2(1U)
+
+#define S_EXTBISTCHKERRCLR1 15
+#define V_EXTBISTCHKERRCLR1(x) ((x) << S_EXTBISTCHKERRCLR1)
+#define F_EXTBISTCHKERRCLR1 V_EXTBISTCHKERRCLR1(1U)
+
+#define S_EXTBISTCHKEN1 14
+#define V_EXTBISTCHKEN1(x) ((x) << S_EXTBISTCHKEN1)
+#define F_EXTBISTCHKEN1 V_EXTBISTCHKEN1(1U)
+
+#define S_EXTBISTGENEN1 13
+#define V_EXTBISTGENEN1(x) ((x) << S_EXTBISTGENEN1)
+#define F_EXTBISTGENEN1 V_EXTBISTGENEN1(1U)
+
+#define S_EXTBISTPAT1 10
+#define M_EXTBISTPAT1 0x7U
+#define V_EXTBISTPAT1(x) ((x) << S_EXTBISTPAT1)
+#define G_EXTBISTPAT1(x) (((x) >> S_EXTBISTPAT1) & M_EXTBISTPAT1)
+
+#define S_EXTPARRESET1 9
+#define V_EXTPARRESET1(x) ((x) << S_EXTPARRESET1)
+#define F_EXTPARRESET1 V_EXTPARRESET1(1U)
+
+#define S_EXTPARLPBK1 8
+#define V_EXTPARLPBK1(x) ((x) << S_EXTPARLPBK1)
+#define F_EXTPARLPBK1 V_EXTPARLPBK1(1U)
+
+#define S_EXTBISTCHKERRCLR0 7
+#define V_EXTBISTCHKERRCLR0(x) ((x) << S_EXTBISTCHKERRCLR0)
+#define F_EXTBISTCHKERRCLR0 V_EXTBISTCHKERRCLR0(1U)
+
+#define S_EXTBISTCHKEN0 6
+#define V_EXTBISTCHKEN0(x) ((x) << S_EXTBISTCHKEN0)
+#define F_EXTBISTCHKEN0 V_EXTBISTCHKEN0(1U)
+
+#define S_EXTBISTGENEN0 5
+#define V_EXTBISTGENEN0(x) ((x) << S_EXTBISTGENEN0)
+#define F_EXTBISTGENEN0 V_EXTBISTGENEN0(1U)
+
+#define S_EXTBISTPAT0 2
+#define M_EXTBISTPAT0 0x7U
+#define V_EXTBISTPAT0(x) ((x) << S_EXTBISTPAT0)
+#define G_EXTBISTPAT0(x) (((x) >> S_EXTBISTPAT0) & M_EXTBISTPAT0)
+
+#define S_EXTPARRESET0 1
+#define V_EXTPARRESET0(x) ((x) << S_EXTPARRESET0)
+#define F_EXTPARRESET0 V_EXTPARRESET0(1U)
+
+#define S_EXTPARLPBK0 0
+#define V_EXTPARLPBK0(x) ((x) << S_EXTPARLPBK0)
+#define F_EXTPARLPBK0 V_EXTPARLPBK0(1U)
+
+#define A_NCSI_SERDES_STAT0 0x1a2f0
+
+#define S_EXTBISTCHKERRCNT0 4
+#define M_EXTBISTCHKERRCNT0 0xffffffU
+#define V_EXTBISTCHKERRCNT0(x) ((x) << S_EXTBISTCHKERRCNT0)
+#define G_EXTBISTCHKERRCNT0(x) (((x) >> S_EXTBISTCHKERRCNT0) & M_EXTBISTCHKERRCNT0)
+
+#define S_EXTBISTCHKFMD0 3
+#define V_EXTBISTCHKFMD0(x) ((x) << S_EXTBISTCHKFMD0)
+#define F_EXTBISTCHKFMD0 V_EXTBISTCHKFMD0(1U)
+
+#define S_LOWSIGFORCEEN0 2
+#define V_LOWSIGFORCEEN0(x) ((x) << S_LOWSIGFORCEEN0)
+#define F_LOWSIGFORCEEN0 V_LOWSIGFORCEEN0(1U)
+
+#define S_LOWSIGFORCEVALUE0 1
+#define V_LOWSIGFORCEVALUE0(x) ((x) << S_LOWSIGFORCEVALUE0)
+#define F_LOWSIGFORCEVALUE0 V_LOWSIGFORCEVALUE0(1U)
+
+#define S_LOWSIG0 0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0 V_LOWSIG0(1U)
+
+#define A_NCSI_SERDES_STAT1 0x1a2f4
+
+#define S_EXTBISTCHKERRCNT1 4
+#define M_EXTBISTCHKERRCNT1 0xffffffU
+#define V_EXTBISTCHKERRCNT1(x) ((x) << S_EXTBISTCHKERRCNT1)
+#define G_EXTBISTCHKERRCNT1(x) (((x) >> S_EXTBISTCHKERRCNT1) & M_EXTBISTCHKERRCNT1)
+
+#define S_EXTBISTCHKFMD1 3
+#define V_EXTBISTCHKFMD1(x) ((x) << S_EXTBISTCHKFMD1)
+#define F_EXTBISTCHKFMD1 V_EXTBISTCHKFMD1(1U)
+
+#define S_LOWSIGFORCEEN1 2
+#define V_LOWSIGFORCEEN1(x) ((x) << S_LOWSIGFORCEEN1)
+#define F_LOWSIGFORCEEN1 V_LOWSIGFORCEEN1(1U)
+
+#define S_LOWSIGFORCEVALUE1 1
+#define V_LOWSIGFORCEVALUE1(x) ((x) << S_LOWSIGFORCEVALUE1)
+#define F_LOWSIGFORCEVALUE1 V_LOWSIGFORCEVALUE1(1U)
+
+#define S_LOWSIG1 0
+#define V_LOWSIG1(x) ((x) << S_LOWSIG1)
+#define F_LOWSIG1 V_LOWSIG1(1U)
+
+#define A_NCSI_SERDES_STAT2 0x1a2f8
+
+#define S_EXTBISTCHKERRCNT2 4
+#define M_EXTBISTCHKERRCNT2 0xffffffU
+#define V_EXTBISTCHKERRCNT2(x) ((x) << S_EXTBISTCHKERRCNT2)
+#define G_EXTBISTCHKERRCNT2(x) (((x) >> S_EXTBISTCHKERRCNT2) & M_EXTBISTCHKERRCNT2)
+
+#define S_EXTBISTCHKFMD2 3
+#define V_EXTBISTCHKFMD2(x) ((x) << S_EXTBISTCHKFMD2)
+#define F_EXTBISTCHKFMD2 V_EXTBISTCHKFMD2(1U)
+
+#define S_LOWSIGFORCEEN2 2
+#define V_LOWSIGFORCEEN2(x) ((x) << S_LOWSIGFORCEEN2)
+#define F_LOWSIGFORCEEN2 V_LOWSIGFORCEEN2(1U)
+
+#define S_LOWSIGFORCEVALUE2 1
+#define V_LOWSIGFORCEVALUE2(x) ((x) << S_LOWSIGFORCEVALUE2)
+#define F_LOWSIGFORCEVALUE2 V_LOWSIGFORCEVALUE2(1U)
+
+#define S_LOWSIG2 0
+#define V_LOWSIG2(x) ((x) << S_LOWSIG2)
+#define F_LOWSIG2 V_LOWSIG2(1U)
+
+#define A_NCSI_SERDES_STAT3 0x1a2fc
+
+#define S_EXTBISTCHKERRCNT3 4
+#define M_EXTBISTCHKERRCNT3 0xffffffU
+#define V_EXTBISTCHKERRCNT3(x) ((x) << S_EXTBISTCHKERRCNT3)
+#define G_EXTBISTCHKERRCNT3(x) (((x) >> S_EXTBISTCHKERRCNT3) & M_EXTBISTCHKERRCNT3)
+
+#define S_EXTBISTCHKFMD3 3
+#define V_EXTBISTCHKFMD3(x) ((x) << S_EXTBISTCHKFMD3)
+#define F_EXTBISTCHKFMD3 V_EXTBISTCHKFMD3(1U)
+
+#define S_LOWSIGFORCEEN3 2
+#define V_LOWSIGFORCEEN3(x) ((x) << S_LOWSIGFORCEEN3)
+#define F_LOWSIGFORCEEN3 V_LOWSIGFORCEEN3(1U)
+
+#define S_LOWSIGFORCEVALUE3 1
+#define V_LOWSIGFORCEVALUE3(x) ((x) << S_LOWSIGFORCEVALUE3)
+#define F_LOWSIGFORCEVALUE3 V_LOWSIGFORCEVALUE3(1U)
+
+#define S_LOWSIG3 0
+#define V_LOWSIG3(x) ((x) << S_LOWSIG3)
+#define F_LOWSIG3 V_LOWSIG3(1U)
+
+#define A_NCSI_STAT_TX_BYTE_LOW 0x1a300
+#define A_NCSI_STAT_TX_BYTE_HIGH 0x1a304
+#define A_NCSI_STAT_TX_FRAME_LOW 0x1a308
+#define A_NCSI_STAT_TX_FRAME_HIGH 0x1a30c
+#define A_NCSI_STAT_TX_BCAST 0x1a310
+#define A_NCSI_STAT_TX_MCAST 0x1a314
+#define A_NCSI_STAT_TX_PAUSE 0x1a318
+#define A_NCSI_STAT_TX_64B_FRAMES 0x1a31c
+#define A_NCSI_STAT_TX_65_127B_FRAMES 0x1a320
+#define A_NCSI_STAT_TX_128_255B_FRAMES 0x1a324
+#define A_NCSI_STAT_TX_256_511B_FRAMES 0x1a328
+#define A_NCSI_STAT_TX_512_1023B_FRAMES 0x1a32c
+#define A_NCSI_STAT_TX_1024_1518B_FRAMES 0x1a330
+#define A_NCSI_STAT_TX_1519_MAXB_FRAMES 0x1a334
+#define A_NCSI_STAT_TX_ERR_FRAMES 0x1a338
+#define A_NCSI_STAT_RX_BYTES_LOW 0x1a33c
+#define A_NCSI_STAT_RX_BYTES_HIGH 0x1a340
+#define A_NCSI_STAT_RX_FRAMES_LOW 0x1a344
+#define A_NCSI_STAT_RX_FRAMES_HIGH 0x1a348
+#define A_NCSI_STAT_RX_BCAST_FRAMES 0x1a34c
+#define A_NCSI_STAT_RX_MCAST_FRAMES 0x1a350
+#define A_NCSI_STAT_RX_PAUSE_FRAMES 0x1a354
+#define A_NCSI_STAT_RX_64B_FRAMES 0x1a358
+#define A_NCSI_STAT_RX_65_127B_FRAMES 0x1a35c
+#define A_NCSI_STAT_RX_128_255B_FRAMES 0x1a360
+#define A_NCSI_STAT_RX_256_511B_FRAMES 0x1a364
+#define A_NCSI_STAT_RX_512_1023B_FRAMES 0x1a368
+#define A_NCSI_STAT_RX_1024_1518B_FRAMES 0x1a36c
+#define A_NCSI_STAT_RX_1519_MAXB_FRAMES 0x1a370
+#define A_NCSI_STAT_RX_SHORT_FRAMES 0x1a374
+#define A_NCSI_STAT_RX_OVERSIZE_FRAMES 0x1a378
+#define A_NCSI_STAT_RX_JABBER_FRAMES 0x1a37c
+#define A_NCSI_STAT_RX_CRC_ERR_FRAMES 0x1a380
+#define A_NCSI_STAT_RX_LENGTH_ERR_FRAMES 0x1a384
+#define A_NCSI_STAT_RX_SYM_CODE_ERR_FRAMES 0x1a388
+#define A_NCSI_XAUI_PCS_ERR 0x1a398
+
+#define S_PCS_SYNCSTATUS 5
+#define M_PCS_SYNCSTATUS 0xfU
+#define V_PCS_SYNCSTATUS(x) ((x) << S_PCS_SYNCSTATUS)
+#define G_PCS_SYNCSTATUS(x) (((x) >> S_PCS_SYNCSTATUS) & M_PCS_SYNCSTATUS)
+
+#define S_PCS_CTCFIFOERR 1
+#define M_PCS_CTCFIFOERR 0xfU
+#define V_PCS_CTCFIFOERR(x) ((x) << S_PCS_CTCFIFOERR)
+#define G_PCS_CTCFIFOERR(x) (((x) >> S_PCS_CTCFIFOERR) & M_PCS_CTCFIFOERR)
+
+#define S_PCS_NOTALIGNED 0
+#define V_PCS_NOTALIGNED(x) ((x) << S_PCS_NOTALIGNED)
+#define F_PCS_NOTALIGNED V_PCS_NOTALIGNED(1U)
+
+#define A_NCSI_RGMII_STATUS 0x1a39c
+
+#define S_GMIIDUPLEX 3
+#define V_GMIIDUPLEX(x) ((x) << S_GMIIDUPLEX)
+#define F_GMIIDUPLEX V_GMIIDUPLEX(1U)
+
+#define S_GMIISPEED 1
+#define M_GMIISPEED 0x3U
+#define V_GMIISPEED(x) ((x) << S_GMIISPEED)
+#define G_GMIISPEED(x) (((x) >> S_GMIISPEED) & M_GMIISPEED)
+
+#define S_GMIILINKSTATUS 0
+#define V_GMIILINKSTATUS(x) ((x) << S_GMIILINKSTATUS)
+#define F_GMIILINKSTATUS V_GMIILINKSTATUS(1U)
+
+#define A_NCSI_WOL_STATUS 0x1a3a0
+
+#define S_T7_PATDETECTED 31
+#define V_T7_PATDETECTED(x) ((x) << S_T7_PATDETECTED)
+#define F_T7_PATDETECTED V_T7_PATDETECTED(1U)
+
+#define A_NCSI_RX_MAX_PKT_SIZE_ERR_CNT 0x1a3a4
+#define A_NCSI_TX_SPI4_SOP_EOP_CNT 0x1a3a8
+
+#define S_TXSPI4SOPCNT 16
+#define M_TXSPI4SOPCNT 0xffffU
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
+#define S_TXSPI4EOPCNT 0
+#define M_TXSPI4EOPCNT 0xffffU
+#define V_TXSPI4EOPCNT(x) ((x) << S_TXSPI4EOPCNT)
+#define G_TXSPI4EOPCNT(x) (((x) >> S_TXSPI4EOPCNT) & M_TXSPI4EOPCNT)
+
+#define A_NCSI_RX_SPI4_SOP_EOP_CNT 0x1a3ac
+
+#define S_RXSPI4SOPCNT 16
+#define M_RXSPI4SOPCNT 0xffffU
+#define V_RXSPI4SOPCNT(x) ((x) << S_RXSPI4SOPCNT)
+#define G_RXSPI4SOPCNT(x) (((x) >> S_RXSPI4SOPCNT) & M_RXSPI4SOPCNT)
+
+#define S_RXSPI4EOPCNT 0
+#define M_RXSPI4EOPCNT 0xffffU
+#define V_RXSPI4EOPCNT(x) ((x) << S_RXSPI4EOPCNT)
+#define G_RXSPI4EOPCNT(x) (((x) >> S_RXSPI4EOPCNT) & M_RXSPI4EOPCNT)
+
/* registers for module XGMAC */
#define XGMAC_BASE_ADDR 0x0
@@ -44054,6 +56225,16 @@
#define V_IBQEMPTY(x) ((x) << S_IBQEMPTY)
#define G_IBQEMPTY(x) (((x) >> S_IBQEMPTY) & M_IBQEMPTY)
+#define S_T7_IBQGEN1 10
+#define M_T7_IBQGEN1 0x3fU
+#define V_T7_IBQGEN1(x) ((x) << S_T7_IBQGEN1)
+#define G_T7_IBQGEN1(x) (((x) >> S_T7_IBQGEN1) & M_T7_IBQGEN1)
+
+#define S_T7_IBQEMPTY 0
+#define M_T7_IBQEMPTY 0x3ffU
+#define V_T7_IBQEMPTY(x) ((x) << S_T7_IBQEMPTY)
+#define G_T7_IBQEMPTY(x) (((x) >> S_T7_IBQEMPTY) & M_T7_IBQEMPTY)
+
#define A_UP_OBQ_GEN 0xc
#define S_OBQGEN 6
@@ -44076,6 +56257,16 @@
#define V_T5_OBQFULL(x) ((x) << S_T5_OBQFULL)
#define G_T5_OBQFULL(x) (((x) >> S_T5_OBQFULL) & M_T5_OBQFULL)
+#define S_T7_T5_OBQGEN 16
+#define M_T7_T5_OBQGEN 0xffffU
+#define V_T7_T5_OBQGEN(x) ((x) << S_T7_T5_OBQGEN)
+#define G_T7_T5_OBQGEN(x) (((x) >> S_T7_T5_OBQGEN) & M_T7_T5_OBQGEN)
+
+#define S_T7_T5_OBQFULL 0
+#define M_T7_T5_OBQFULL 0xffffU
+#define V_T7_T5_OBQFULL(x) ((x) << S_T7_T5_OBQFULL)
+#define G_T7_T5_OBQFULL(x) (((x) >> S_T7_T5_OBQFULL) & M_T7_T5_OBQFULL)
+
#define A_UP_IBQ_0_RDADDR 0x10
#define S_QUEID 13
@@ -44088,6 +56279,13 @@
#define V_IBQRDADDR(x) ((x) << S_IBQRDADDR)
#define G_IBQRDADDR(x) (((x) >> S_IBQRDADDR) & M_IBQRDADDR)
+#define A_UP_IBQ_GEN_IPC 0x10
+
+#define S_IPCEMPTY 0
+#define M_IPCEMPTY 0x7fU
+#define V_IPCEMPTY(x) ((x) << S_IPCEMPTY)
+#define G_IPCEMPTY(x) (((x) >> S_IPCEMPTY) & M_IPCEMPTY)
+
#define A_UP_IBQ_0_WRADDR 0x14
#define S_IBQWRADDR 0
@@ -44160,10 +56358,15 @@
#define A_UP_OBQ_0_STATUS 0x78
#define A_UP_OBQ_0_PKTCNT 0x7c
#define A_UP_OBQ_1_RDADDR 0x80
+#define A_UP_NXT_FLOWADDR0 0x80
#define A_UP_OBQ_1_WRADDR 0x84
+#define A_UP_NXT_FLOWADDR1 0x84
#define A_UP_OBQ_1_STATUS 0x88
+#define A_UP_NXT_FLOWADDR2 0x88
#define A_UP_OBQ_1_PKTCNT 0x8c
+#define A_UP_NXT_FLOWADDR3 0x8c
#define A_UP_OBQ_2_RDADDR 0x90
+#define A_UP_DFT_FLOWADDR 0x90
#define A_UP_OBQ_2_WRADDR 0x94
#define A_UP_OBQ_2_STATUS 0x98
#define A_UP_OBQ_2_PKTCNT 0x9c
@@ -44176,9 +56379,33 @@
#define A_UP_OBQ_4_STATUS 0xb8
#define A_UP_OBQ_4_PKTCNT 0xbc
#define A_UP_OBQ_5_RDADDR 0xc0
+#define A_UP_MAX_SEQ_NUM 0xc0
#define A_UP_OBQ_5_WRADDR 0xc4
+#define A_UP_UNACK_SEQ_NUM 0xc4
#define A_UP_OBQ_5_STATUS 0xc8
+#define A_UP_SEARCH_SEQ_NUM 0xc8
#define A_UP_OBQ_5_PKTCNT 0xcc
+#define A_UP_SEQ_SEARCH_CTRL 0xcc
+
+#define S_FIFO_SIZE 29
+#define M_FIFO_SIZE 0x7U
+#define V_FIFO_SIZE(x) ((x) << S_FIFO_SIZE)
+#define G_FIFO_SIZE(x) (((x) >> S_FIFO_SIZE) & M_FIFO_SIZE)
+
+#define S_ROCE_MODE 28
+#define V_ROCE_MODE(x) ((x) << S_ROCE_MODE)
+#define F_ROCE_MODE V_ROCE_MODE(1U)
+
+#define S_SEQ_WR_PTR 16
+#define M_SEQ_WR_PTR 0xfffU
+#define V_SEQ_WR_PTR(x) ((x) << S_SEQ_WR_PTR)
+#define G_SEQ_WR_PTR(x) (((x) >> S_SEQ_WR_PTR) & M_SEQ_WR_PTR)
+
+#define S_SEQ_RD_PTR 0
+#define M_SEQ_RD_PTR 0xfffU
+#define V_SEQ_RD_PTR(x) ((x) << S_SEQ_RD_PTR)
+#define G_SEQ_RD_PTR(x) (((x) >> S_SEQ_RD_PTR) & M_SEQ_RD_PTR)
+
#define A_UP_IBQ_0_CONFIG 0xd0
#define S_QUESIZE 26
@@ -44203,6 +56430,25 @@
#define V_QUE1KEN(x) ((x) << S_QUE1KEN)
#define F_QUE1KEN V_QUE1KEN(1U)
+#define A_UP_SEQ_SEARCH_RES0 0xd0
+
+#define S_INV_SEQ 18
+#define V_INV_SEQ(x) ((x) << S_INV_SEQ)
+#define F_INV_SEQ V_INV_SEQ(1U)
+
+#define S_DUP_SEQ 17
+#define V_DUP_SEQ(x) ((x) << S_DUP_SEQ)
+#define F_DUP_SEQ V_DUP_SEQ(1U)
+
+#define S_MATCH_VLD 16
+#define V_MATCH_VLD(x) ((x) << S_MATCH_VLD)
+#define F_MATCH_VLD V_MATCH_VLD(1U)
+
+#define S_MATCH_INDEX 0
+#define M_MATCH_INDEX 0xffffU
+#define V_MATCH_INDEX(x) ((x) << S_MATCH_INDEX)
+#define G_MATCH_INDEX(x) (((x) >> S_MATCH_INDEX) & M_MATCH_INDEX)
+
#define A_UP_IBQ_0_REALADDR 0xd4
#define S_QUERDADDRWRAP 31
@@ -44218,6 +56464,7 @@
#define V_QUEMEMADDR(x) ((x) << S_QUEMEMADDR)
#define G_QUEMEMADDR(x) (((x) >> S_QUEMEMADDR) & M_QUEMEMADDR)
+#define A_UP_SEQ_SEARCH_RES1 0xd4
#define A_UP_IBQ_1_CONFIG 0xd8
#define A_UP_IBQ_1_REALADDR 0xdc
#define A_UP_IBQ_2_CONFIG 0xe0
@@ -44229,14 +56476,34 @@
#define A_UP_IBQ_5_CONFIG 0xf8
#define A_UP_IBQ_5_REALADDR 0xfc
#define A_UP_OBQ_0_CONFIG 0x100
+#define A_UP_PEER_HALT_STAT0 0x100
+
+#define S_HALTINFO 1
+#define M_HALTINFO 0x7fffffffU
+#define V_HALTINFO(x) ((x) << S_HALTINFO)
+#define G_HALTINFO(x) (((x) >> S_HALTINFO) & M_HALTINFO)
+
#define A_UP_OBQ_0_REALADDR 0x104
+#define A_UP_PEER_HALT_STAT1 0x104
#define A_UP_OBQ_1_CONFIG 0x108
+#define A_UP_PEER_HALT_STAT2 0x108
#define A_UP_OBQ_1_REALADDR 0x10c
+#define A_UP_PEER_HALT_STAT3 0x10c
#define A_UP_OBQ_2_CONFIG 0x110
+#define A_UP_PEER_HALT_STAT4 0x110
#define A_UP_OBQ_2_REALADDR 0x114
+#define A_UP_PEER_HALT_STAT5 0x114
#define A_UP_OBQ_3_CONFIG 0x118
+#define A_UP_PEER_HALT_STAT6 0x118
#define A_UP_OBQ_3_REALADDR 0x11c
+#define A_UP_PEER_HALT_STAT7 0x11c
#define A_UP_OBQ_4_CONFIG 0x120
+#define A_UP_PEER_HALT_CTL 0x120
+
+#define S_HALTREQ 0
+#define V_HALTREQ(x) ((x) << S_HALTREQ)
+#define F_HALTREQ V_HALTREQ(1U)
+
#define A_UP_OBQ_4_REALADDR 0x124
#define A_UP_OBQ_5_CONFIG 0x128
#define A_UP_OBQ_5_REALADDR 0x12c
@@ -44516,6 +56783,204 @@
#define A_UP_OBQ_6_SHADOW_REALADDR 0x3c4
#define A_UP_OBQ_7_SHADOW_CONFIG 0x3c8
#define A_UP_OBQ_7_SHADOW_REALADDR 0x3cc
+#define A_T7_UP_IBQ_0_SHADOW_RDADDR 0x400
+#define A_T7_UP_IBQ_0_SHADOW_WRADDR 0x404
+#define A_T7_UP_IBQ_0_SHADOW_STATUS 0x408
+
+#define S_T7_QUEREMFLITS 0
+#define M_T7_QUEREMFLITS 0xfffU
+#define V_T7_QUEREMFLITS(x) ((x) << S_T7_QUEREMFLITS)
+#define G_T7_QUEREMFLITS(x) (((x) >> S_T7_QUEREMFLITS) & M_T7_QUEREMFLITS)
+
+#define A_T7_UP_IBQ_0_SHADOW_PKTCNT 0x40c
+#define A_T7_UP_IBQ_1_SHADOW_RDADDR 0x410
+#define A_T7_UP_IBQ_1_SHADOW_WRADDR 0x414
+#define A_T7_UP_IBQ_1_SHADOW_STATUS 0x418
+#define A_T7_UP_IBQ_1_SHADOW_PKTCNT 0x41c
+#define A_T7_UP_IBQ_2_SHADOW_RDADDR 0x420
+#define A_T7_UP_IBQ_2_SHADOW_WRADDR 0x424
+#define A_T7_UP_IBQ_2_SHADOW_STATUS 0x428
+#define A_T7_UP_IBQ_2_SHADOW_PKTCNT 0x42c
+#define A_T7_UP_IBQ_3_SHADOW_RDADDR 0x430
+#define A_T7_UP_IBQ_3_SHADOW_WRADDR 0x434
+#define A_T7_UP_IBQ_3_SHADOW_STATUS 0x438
+#define A_T7_UP_IBQ_3_SHADOW_PKTCNT 0x43c
+#define A_T7_UP_IBQ_4_SHADOW_RDADDR 0x440
+#define A_T7_UP_IBQ_4_SHADOW_WRADDR 0x444
+#define A_T7_UP_IBQ_4_SHADOW_STATUS 0x448
+#define A_T7_UP_IBQ_4_SHADOW_PKTCNT 0x44c
+#define A_T7_UP_IBQ_5_SHADOW_RDADDR 0x450
+#define A_T7_UP_IBQ_5_SHADOW_WRADDR 0x454
+#define A_T7_UP_IBQ_5_SHADOW_STATUS 0x458
+#define A_T7_UP_IBQ_5_SHADOW_PKTCNT 0x45c
+#define A_UP_IBQ_6_SHADOW_RDADDR 0x460
+#define A_UP_IBQ_6_SHADOW_WRADDR 0x464
+#define A_UP_IBQ_6_SHADOW_STATUS 0x468
+#define A_UP_IBQ_6_SHADOW_PKTCNT 0x46c
+#define A_UP_IBQ_7_SHADOW_RDADDR 0x470
+#define A_UP_IBQ_7_SHADOW_WRADDR 0x474
+#define A_UP_IBQ_7_SHADOW_STATUS 0x478
+#define A_UP_IBQ_7_SHADOW_PKTCNT 0x47c
+#define A_UP_IBQ_8_SHADOW_RDADDR 0x480
+#define A_UP_IBQ_8_SHADOW_WRADDR 0x484
+#define A_UP_IBQ_8_SHADOW_STATUS 0x488
+#define A_UP_IBQ_8_SHADOW_PKTCNT 0x48c
+#define A_UP_IBQ_9_SHADOW_RDADDR 0x490
+#define A_UP_IBQ_9_SHADOW_WRADDR 0x494
+#define A_UP_IBQ_9_SHADOW_STATUS 0x498
+#define A_UP_IBQ_9_SHADOW_PKTCNT 0x49c
+#define A_UP_IBQ_10_SHADOW_RDADDR 0x4a0
+#define A_UP_IBQ_10_SHADOW_WRADDR 0x4a4
+#define A_UP_IBQ_10_SHADOW_STATUS 0x4a8
+#define A_UP_IBQ_10_SHADOW_PKTCNT 0x4ac
+#define A_UP_IBQ_11_SHADOW_RDADDR 0x4b0
+#define A_UP_IBQ_11_SHADOW_WRADDR 0x4b4
+#define A_UP_IBQ_11_SHADOW_STATUS 0x4b8
+#define A_UP_IBQ_11_SHADOW_PKTCNT 0x4bc
+#define A_UP_IBQ_12_SHADOW_RDADDR 0x4c0
+#define A_UP_IBQ_12_SHADOW_WRADDR 0x4c4
+#define A_UP_IBQ_12_SHADOW_STATUS 0x4c8
+#define A_UP_IBQ_12_SHADOW_PKTCNT 0x4cc
+#define A_UP_IBQ_13_SHADOW_RDADDR 0x4d0
+#define A_UP_IBQ_13_SHADOW_WRADDR 0x4d4
+#define A_UP_IBQ_13_SHADOW_STATUS 0x4d8
+#define A_UP_IBQ_13_SHADOW_PKTCNT 0x4dc
+#define A_UP_IBQ_14_SHADOW_RDADDR 0x4e0
+#define A_UP_IBQ_14_SHADOW_WRADDR 0x4e4
+#define A_UP_IBQ_14_SHADOW_STATUS 0x4e8
+#define A_UP_IBQ_14_SHADOW_PKTCNT 0x4ec
+#define A_UP_IBQ_15_SHADOW_RDADDR 0x4f0
+#define A_UP_IBQ_15_SHADOW_WRADDR 0x4f4
+#define A_UP_IBQ_15_SHADOW_STATUS 0x4f8
+#define A_UP_IBQ_15_SHADOW_PKTCNT 0x4fc
+#define A_T7_UP_IBQ_0_SHADOW_CONFIG 0x500
+#define A_T7_UP_IBQ_0_SHADOW_REALADDR 0x504
+#define A_T7_UP_IBQ_1_SHADOW_CONFIG 0x510
+#define A_T7_UP_IBQ_1_SHADOW_REALADDR 0x514
+#define A_T7_UP_IBQ_2_SHADOW_CONFIG 0x520
+#define A_T7_UP_IBQ_2_SHADOW_REALADDR 0x524
+#define A_T7_UP_IBQ_3_SHADOW_CONFIG 0x530
+#define A_T7_UP_IBQ_3_SHADOW_REALADDR 0x534
+#define A_T7_UP_IBQ_4_SHADOW_CONFIG 0x540
+#define A_T7_UP_IBQ_4_SHADOW_REALADDR 0x544
+#define A_T7_UP_IBQ_5_SHADOW_CONFIG 0x550
+#define A_T7_UP_IBQ_5_SHADOW_REALADDR 0x554
+#define A_UP_IBQ_6_SHADOW_CONFIG 0x560
+#define A_UP_IBQ_6_SHADOW_REALADDR 0x564
+#define A_UP_IBQ_7_SHADOW_CONFIG 0x570
+#define A_UP_IBQ_7_SHADOW_REALADDR 0x574
+#define A_UP_IBQ_8_SHADOW_CONFIG 0x580
+#define A_UP_IBQ_8_SHADOW_REALADDR 0x584
+#define A_UP_IBQ_9_SHADOW_CONFIG 0x590
+#define A_UP_IBQ_9_SHADOW_REALADDR 0x594
+#define A_UP_IBQ_10_SHADOW_CONFIG 0x5a0
+#define A_UP_IBQ_10_SHADOW_REALADDR 0x5a4
+#define A_UP_IBQ_11_SHADOW_CONFIG 0x5b0
+#define A_UP_IBQ_11_SHADOW_REALADDR 0x5b4
+#define A_UP_IBQ_12_SHADOW_CONFIG 0x5c0
+#define A_UP_IBQ_12_SHADOW_REALADDR 0x5c4
+#define A_UP_IBQ_13_SHADOW_CONFIG 0x5d0
+#define A_UP_IBQ_13_SHADOW_REALADDR 0x5d4
+#define A_UP_IBQ_14_SHADOW_CONFIG 0x5e0
+#define A_UP_IBQ_14_SHADOW_REALADDR 0x5e4
+#define A_UP_IBQ_15_SHADOW_CONFIG 0x5f0
+#define A_UP_IBQ_15_SHADOW_REALADDR 0x5f4
+#define A_T7_UP_OBQ_0_SHADOW_RDADDR 0x600
+#define A_T7_UP_OBQ_0_SHADOW_WRADDR 0x604
+#define A_T7_UP_OBQ_0_SHADOW_STATUS 0x608
+#define A_T7_UP_OBQ_0_SHADOW_PKTCNT 0x60c
+#define A_T7_UP_OBQ_1_SHADOW_RDADDR 0x610
+#define A_T7_UP_OBQ_1_SHADOW_WRADDR 0x614
+#define A_T7_UP_OBQ_1_SHADOW_STATUS 0x618
+#define A_T7_UP_OBQ_1_SHADOW_PKTCNT 0x61c
+#define A_T7_UP_OBQ_2_SHADOW_RDADDR 0x620
+#define A_T7_UP_OBQ_2_SHADOW_WRADDR 0x624
+#define A_T7_UP_OBQ_2_SHADOW_STATUS 0x628
+#define A_T7_UP_OBQ_2_SHADOW_PKTCNT 0x62c
+#define A_T7_UP_OBQ_3_SHADOW_RDADDR 0x630
+#define A_T7_UP_OBQ_3_SHADOW_WRADDR 0x634
+#define A_T7_UP_OBQ_3_SHADOW_STATUS 0x638
+#define A_T7_UP_OBQ_3_SHADOW_PKTCNT 0x63c
+#define A_T7_UP_OBQ_4_SHADOW_RDADDR 0x640
+#define A_T7_UP_OBQ_4_SHADOW_WRADDR 0x644
+#define A_T7_UP_OBQ_4_SHADOW_STATUS 0x648
+#define A_T7_UP_OBQ_4_SHADOW_PKTCNT 0x64c
+#define A_T7_UP_OBQ_5_SHADOW_RDADDR 0x650
+#define A_T7_UP_OBQ_5_SHADOW_WRADDR 0x654
+#define A_T7_UP_OBQ_5_SHADOW_STATUS 0x658
+#define A_T7_UP_OBQ_5_SHADOW_PKTCNT 0x65c
+#define A_T7_UP_OBQ_6_SHADOW_RDADDR 0x660
+#define A_T7_UP_OBQ_6_SHADOW_WRADDR 0x664
+#define A_T7_UP_OBQ_6_SHADOW_STATUS 0x668
+#define A_T7_UP_OBQ_6_SHADOW_PKTCNT 0x66c
+#define A_T7_UP_OBQ_7_SHADOW_RDADDR 0x670
+#define A_T7_UP_OBQ_7_SHADOW_WRADDR 0x674
+#define A_T7_UP_OBQ_7_SHADOW_STATUS 0x678
+#define A_T7_UP_OBQ_7_SHADOW_PKTCNT 0x67c
+#define A_UP_OBQ_8_SHADOW_RDADDR 0x680
+#define A_UP_OBQ_8_SHADOW_WRADDR 0x684
+#define A_UP_OBQ_8_SHADOW_STATUS 0x688
+#define A_UP_OBQ_8_SHADOW_PKTCNT 0x68c
+#define A_UP_OBQ_9_SHADOW_RDADDR 0x690
+#define A_UP_OBQ_9_SHADOW_WRADDR 0x694
+#define A_UP_OBQ_9_SHADOW_STATUS 0x698
+#define A_UP_OBQ_9_SHADOW_PKTCNT 0x69c
+#define A_UP_OBQ_10_SHADOW_RDADDR 0x6a0
+#define A_UP_OBQ_10_SHADOW_WRADDR 0x6a4
+#define A_UP_OBQ_10_SHADOW_STATUS 0x6a8
+#define A_UP_OBQ_10_SHADOW_PKTCNT 0x6ac
+#define A_UP_OBQ_11_SHADOW_RDADDR 0x6b0
+#define A_UP_OBQ_11_SHADOW_WRADDR 0x6b4
+#define A_UP_OBQ_11_SHADOW_STATUS 0x6b8
+#define A_UP_OBQ_11_SHADOW_PKTCNT 0x6bc
+#define A_UP_OBQ_12_SHADOW_RDADDR 0x6c0
+#define A_UP_OBQ_12_SHADOW_WRADDR 0x6c4
+#define A_UP_OBQ_12_SHADOW_STATUS 0x6c8
+#define A_UP_OBQ_12_SHADOW_PKTCNT 0x6cc
+#define A_UP_OBQ_13_SHADOW_RDADDR 0x6d0
+#define A_UP_OBQ_13_SHADOW_WRADDR 0x6d4
+#define A_UP_OBQ_13_SHADOW_STATUS 0x6d8
+#define A_UP_OBQ_13_SHADOW_PKTCNT 0x6dc
+#define A_UP_OBQ_14_SHADOW_RDADDR 0x6e0
+#define A_UP_OBQ_14_SHADOW_WRADDR 0x6e4
+#define A_UP_OBQ_14_SHADOW_STATUS 0x6e8
+#define A_UP_OBQ_14_SHADOW_PKTCNT 0x6ec
+#define A_UP_OBQ_15_SHADOW_RDADDR 0x6f0
+#define A_UP_OBQ_15_SHADOW_WRADDR 0x6f4
+#define A_UP_OBQ_15_SHADOW_STATUS 0x6f8
+#define A_UP_OBQ_15_SHADOW_PKTCNT 0x6fc
+#define A_T7_UP_OBQ_0_SHADOW_CONFIG 0x700
+#define A_T7_UP_OBQ_0_SHADOW_REALADDR 0x704
+#define A_T7_UP_OBQ_1_SHADOW_CONFIG 0x710
+#define A_T7_UP_OBQ_1_SHADOW_REALADDR 0x714
+#define A_T7_UP_OBQ_2_SHADOW_CONFIG 0x720
+#define A_T7_UP_OBQ_2_SHADOW_REALADDR 0x724
+#define A_T7_UP_OBQ_3_SHADOW_CONFIG 0x730
+#define A_T7_UP_OBQ_3_SHADOW_REALADDR 0x734
+#define A_T7_UP_OBQ_4_SHADOW_CONFIG 0x740
+#define A_T7_UP_OBQ_4_SHADOW_REALADDR 0x744
+#define A_T7_UP_OBQ_5_SHADOW_CONFIG 0x750
+#define A_T7_UP_OBQ_5_SHADOW_REALADDR 0x754
+#define A_T7_UP_OBQ_6_SHADOW_CONFIG 0x760
+#define A_T7_UP_OBQ_6_SHADOW_REALADDR 0x764
+#define A_T7_UP_OBQ_7_SHADOW_CONFIG 0x770
+#define A_T7_UP_OBQ_7_SHADOW_REALADDR 0x774
+#define A_UP_OBQ_8_SHADOW_CONFIG 0x780
+#define A_UP_OBQ_8_SHADOW_REALADDR 0x784
+#define A_UP_OBQ_9_SHADOW_CONFIG 0x790
+#define A_UP_OBQ_9_SHADOW_REALADDR 0x794
+#define A_UP_OBQ_10_SHADOW_CONFIG 0x7a0
+#define A_UP_OBQ_10_SHADOW_REALADDR 0x7a4
+#define A_UP_OBQ_11_SHADOW_CONFIG 0x7b0
+#define A_UP_OBQ_11_SHADOW_REALADDR 0x7b4
+#define A_UP_OBQ_12_SHADOW_CONFIG 0x7c0
+#define A_UP_OBQ_12_SHADOW_REALADDR 0x7c4
+#define A_UP_OBQ_13_SHADOW_CONFIG 0x7d0
+#define A_UP_OBQ_13_SHADOW_REALADDR 0x7d4
+#define A_UP_OBQ_14_SHADOW_CONFIG 0x7e0
+#define A_UP_OBQ_14_SHADOW_REALADDR 0x7e4
+#define A_UP_OBQ_15_SHADOW_CONFIG 0x7f0
+#define A_UP_OBQ_15_SHADOW_REALADDR 0x7f4
/* registers for module CIM_CTL */
#define CIM_CTL_BASE_ADDR 0x0
@@ -44579,17 +57044,63 @@
#define A_CIM_CTL_STATIC_PREFADDR10 0x38
#define A_CIM_CTL_STATIC_PREFADDR11 0x3c
#define A_CIM_CTL_STATIC_PREFADDR12 0x40
+#define A_CIM_CTL_SEM_CFG 0x40
+
+#define S_SEMINIT 31
+#define V_SEMINIT(x) ((x) << S_SEMINIT)
+#define F_SEMINIT V_SEMINIT(1U)
+
+#define S_NUMSEM 0
+#define M_NUMSEM 0x3ffffU
+#define V_NUMSEM(x) ((x) << S_NUMSEM)
+#define G_NUMSEM(x) (((x) >> S_NUMSEM) & M_NUMSEM)
+
#define A_CIM_CTL_STATIC_PREFADDR13 0x44
+#define A_CIM_CTL_SEM_MA_CFG 0x44
+
+#define S_SEMMABASE 4
+#define M_SEMMABASE 0xfffffffU
+#define V_SEMMABASE(x) ((x) << S_SEMMABASE)
+#define G_SEMMABASE(x) (((x) >> S_SEMMABASE) & M_SEMMABASE)
+
+#define S_SEMMATHREADID 0
+#define M_SEMMATHREADID 0x7U
+#define V_SEMMATHREADID(x) ((x) << S_SEMMATHREADID)
+#define G_SEMMATHREADID(x) (((x) >> S_SEMMATHREADID) & M_SEMMATHREADID)
+
#define A_CIM_CTL_STATIC_PREFADDR14 0x48
#define A_CIM_CTL_STATIC_PREFADDR15 0x4c
#define A_CIM_CTL_STATIC_ALLOCADDR0 0x50
+#define A_CIM_CTL_LOCK_CFG 0x50
+
+#define S_NUMLOCK 0
+#define M_NUMLOCK 0x3ffffU
+#define V_NUMLOCK(x) ((x) << S_NUMLOCK)
+#define G_NUMLOCK(x) (((x) >> S_NUMLOCK) & M_NUMLOCK)
+
#define A_CIM_CTL_STATIC_ALLOCADDR1 0x54
+#define A_CIM_CTL_LOCK_MA_CFG 0x54
+
+#define S_LOCKMABASE 4
+#define M_LOCKMABASE 0xfffffffU
+#define V_LOCKMABASE(x) ((x) << S_LOCKMABASE)
+#define G_LOCKMABASE(x) (((x) >> S_LOCKMABASE) & M_LOCKMABASE)
+
+#define S_LOCKMATHREADID 0
+#define M_LOCKMATHREADID 0x7U
+#define V_LOCKMATHREADID(x) ((x) << S_LOCKMATHREADID)
+#define G_LOCKMATHREADID(x) (((x) >> S_LOCKMATHREADID) & M_LOCKMATHREADID)
+
#define A_CIM_CTL_STATIC_ALLOCADDR2 0x58
#define A_CIM_CTL_STATIC_ALLOCADDR3 0x5c
#define A_CIM_CTL_STATIC_ALLOCADDR4 0x60
+#define A_CIM_CTL_RSA_INT 0x60
#define A_CIM_CTL_STATIC_ALLOCADDR5 0x64
+#define A_CIM_CTL_RSA_BUSY 0x64
#define A_CIM_CTL_STATIC_ALLOCADDR6 0x68
+#define A_CIM_CTL_RSA_CPERR 0x68
#define A_CIM_CTL_STATIC_ALLOCADDR7 0x6c
+#define A_CIM_CTL_RSA_DPERR 0x6c
#define A_CIM_CTL_STATIC_ALLOCADDR8 0x70
#define A_CIM_CTL_STATIC_ALLOCADDR9 0x74
#define A_CIM_CTL_STATIC_ALLOCADDR10 0x78
@@ -44650,6 +57161,66 @@
#define A_CIM_CTL_GEN_TIMER3 0xd0
#define A_CIM_CTL_MAILBOX_VF_STATUS 0xe0
#define A_CIM_CTL_MAILBOX_VFN_CTL 0x100
+#define A_CIM_CTL_TID_MAP_EN 0x500
+#define A_CIM_CTL_TID_MAP_CORE 0x520
+#define A_CIM_CTL_TID_MAP_CONFIG 0x540
+
+#define S_TIDDEFCORE 4
+#define M_TIDDEFCORE 0xfU
+#define V_TIDDEFCORE(x) ((x) << S_TIDDEFCORE)
+#define G_TIDDEFCORE(x) (((x) >> S_TIDDEFCORE) & M_TIDDEFCORE)
+
+#define S_TIDVECBASE 0
+#define M_TIDVECBASE 0x7U
+#define V_TIDVECBASE(x) ((x) << S_TIDVECBASE)
+#define G_TIDVECBASE(x) (((x) >> S_TIDVECBASE) & M_TIDVECBASE)
+
+#define A_CIM_CTL_CRYPTO_KEY_DATA 0x600
+#define A_CIM_CTL_SECURE_CONFIG 0x6f8
+#define A_CIM_CTL_CRYPTO_KEY_CTRL 0x6fc
+
+#define S_CRYPTOKEYDATAREGNUM 8
+#define M_CRYPTOKEYDATAREGNUM 0xffU
+#define V_CRYPTOKEYDATAREGNUM(x) ((x) << S_CRYPTOKEYDATAREGNUM)
+#define G_CRYPTOKEYDATAREGNUM(x) (((x) >> S_CRYPTOKEYDATAREGNUM) & M_CRYPTOKEYDATAREGNUM)
+
+#define S_CRYPTOKEYSTARTBUSY 0
+#define V_CRYPTOKEYSTARTBUSY(x) ((x) << S_CRYPTOKEYSTARTBUSY)
+#define F_CRYPTOKEYSTARTBUSY V_CRYPTOKEYSTARTBUSY(1U)
+
+#define A_CIM_CTL_FLOWID_OP_VALID 0x700
+#define A_CIM_CTL_FLOWID_CTL 0x720
+
+#define S_FLOWBASEADDR 8
+#define M_FLOWBASEADDR 0xffffffU
+#define V_FLOWBASEADDR(x) ((x) << S_FLOWBASEADDR)
+#define G_FLOWBASEADDR(x) (((x) >> S_FLOWBASEADDR) & M_FLOWBASEADDR)
+
+#define S_SEQSRCHALIGNCFG 4
+#define M_SEQSRCHALIGNCFG 0x3U
+#define V_SEQSRCHALIGNCFG(x) ((x) << S_SEQSRCHALIGNCFG)
+#define G_SEQSRCHALIGNCFG(x) (((x) >> S_SEQSRCHALIGNCFG) & M_SEQSRCHALIGNCFG)
+
+#define S_FLOWADDRSIZE 1
+#define M_FLOWADDRSIZE 0x3U
+#define V_FLOWADDRSIZE(x) ((x) << S_FLOWADDRSIZE)
+#define G_FLOWADDRSIZE(x) (((x) >> S_FLOWADDRSIZE) & M_FLOWADDRSIZE)
+
+#define S_FLOWIDEN 0
+#define V_FLOWIDEN(x) ((x) << S_FLOWIDEN)
+#define F_FLOWIDEN V_FLOWIDEN(1U)
+
+#define A_CIM_CTL_FLOWID_MAX 0x724
+
+#define S_MAXFLOWID 0
+#define M_MAXFLOWID 0xffffffU
+#define V_MAXFLOWID(x) ((x) << S_MAXFLOWID)
+#define G_MAXFLOWID(x) (((x) >> S_MAXFLOWID) & M_MAXFLOWID)
+
+#define A_CIM_CTL_FLOWID_HINT0 0x728
+#define A_CIM_CTL_EFUSE_CTRL 0x780
+#define A_CIM_CTL_EFUSE_QOUT 0x784
+#define A_CIM_CTL_EFUSE_RFOUT 0x788
#define A_CIM_CTL_TSCH_CHNLN_CTL 0x900
#define S_TSCHNLEN 31
@@ -45001,14 +57572,19 @@
#define A_CIM_CTL_TSCH_TICK3 0xd8c
#define A_CIM_CTL_MAILBOX_PF3_CTL 0xd90
#define A_T6_CIM_CTL_MAILBOX_PF0_CTL 0xd90
+#define A_T7_CIM_CTL_MAILBOX_PF0_CTL 0xd90
#define A_CIM_CTL_MAILBOX_PF4_CTL 0xd94
#define A_T6_CIM_CTL_MAILBOX_PF1_CTL 0xd94
+#define A_T7_CIM_CTL_MAILBOX_PF1_CTL 0xd94
#define A_CIM_CTL_MAILBOX_PF5_CTL 0xd98
#define A_T6_CIM_CTL_MAILBOX_PF2_CTL 0xd98
+#define A_T7_CIM_CTL_MAILBOX_PF2_CTL 0xd98
#define A_CIM_CTL_MAILBOX_PF6_CTL 0xd9c
#define A_T6_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
+#define A_T7_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
#define A_CIM_CTL_MAILBOX_PF7_CTL 0xda0
#define A_T6_CIM_CTL_MAILBOX_PF4_CTL 0xda0
+#define A_T7_CIM_CTL_MAILBOX_PF4_CTL 0xda0
#define A_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xda4
#define S_PF7_OWNER_PL 15
@@ -45076,6 +57652,7 @@
#define F_PF0_OWNER_UP V_PF0_OWNER_UP(1U)
#define A_T6_CIM_CTL_MAILBOX_PF5_CTL 0xda4
+#define A_T7_CIM_CTL_MAILBOX_PF5_CTL 0xda4
#define A_CIM_CTL_PIO_MST_CONFIG 0xda8
#define S_T5_CTLRID 0
@@ -45084,15 +57661,13 @@
#define G_T5_CTLRID(x) (((x) >> S_T5_CTLRID) & M_T5_CTLRID)
#define A_T6_CIM_CTL_MAILBOX_PF6_CTL 0xda8
+#define A_T7_CIM_CTL_MAILBOX_PF6_CTL 0xda8
#define A_T6_CIM_CTL_MAILBOX_PF7_CTL 0xdac
+#define A_T7_CIM_CTL_MAILBOX_PF7_CTL 0xdac
#define A_T6_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
+#define A_T7_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
#define A_T6_CIM_CTL_PIO_MST_CONFIG 0xdb4
-
-#define S_T6_UPRID 0
-#define M_T6_UPRID 0x1ffU
-#define V_T6_UPRID(x) ((x) << S_T6_UPRID)
-#define G_T6_UPRID(x) (((x) >> S_T6_UPRID) & M_T6_UPRID)
-
+#define A_T7_CIM_CTL_PIO_MST_CONFIG 0xdb4
#define A_CIM_CTL_ULP_OBQ0_PAUSE_MASK 0xe00
#define A_CIM_CTL_ULP_OBQ1_PAUSE_MASK 0xe04
#define A_CIM_CTL_ULP_OBQ2_PAUSE_MASK 0xe08
@@ -45119,6 +57694,64 @@
#define V_MA_TIMEOUT(x) ((x) << S_MA_TIMEOUT)
#define G_MA_TIMEOUT(x) (((x) >> S_MA_TIMEOUT) & M_MA_TIMEOUT)
+#define A_CIM_CTL_BREAK 0xf00
+
+#define S_XOCDMODE 8
+#define M_XOCDMODE 0xffU
+#define V_XOCDMODE(x) ((x) << S_XOCDMODE)
+#define G_XOCDMODE(x) (((x) >> S_XOCDMODE) & M_XOCDMODE)
+
+#define S_BREAKIN_CONTROL 0
+#define M_BREAKIN_CONTROL 0xffU
+#define V_BREAKIN_CONTROL(x) ((x) << S_BREAKIN_CONTROL)
+#define G_BREAKIN_CONTROL(x) (((x) >> S_BREAKIN_CONTROL) & M_BREAKIN_CONTROL)
+
+#define A_CIM_CTL_SLV_BOOT_CFG 0x4000
+
+#define S_T7_UPGEN 3
+#define M_T7_UPGEN 0x1fU
+#define V_T7_UPGEN(x) ((x) << S_T7_UPGEN)
+#define G_T7_UPGEN(x) (((x) >> S_T7_UPGEN) & M_T7_UPGEN)
+
+#define S_UPCLKEN 2
+#define V_UPCLKEN(x) ((x) << S_UPCLKEN)
+#define F_UPCLKEN V_UPCLKEN(1U)
+
+#define A_CIM_CTL_SLV_BOOT_LEN 0x4004
+#define A_CIM_CTL_SLV_ACC_INT_ENABLE 0x4008
+#define A_CIM_CTL_SLV_ACC_INT_CAUSE 0x400c
+#define A_CIM_CTL_SLV_INT_ENABLE 0x4010
+#define A_CIM_CTL_SLV_INT_CAUSE 0x4014
+#define A_CIM_CTL_SLV_PERR_ENABLE 0x4018
+#define A_CIM_CTL_SLV_PERR_CAUSE 0x401c
+#define A_CIM_CTL_SLV_ADDR_TIMEOUT 0x4028
+#define A_CIM_CTL_SLV_ADDR_ILLEGAL 0x402c
+#define A_CIM_CTL_SLV_PIO_MST_CONFIG 0x4030
+#define A_CIM_CTL_SLV_MEM_ZONE0_VA 0x4040
+#define A_CIM_CTL_SLV_MEM_ZONE0_BA 0x4044
+#define A_CIM_CTL_SLV_MEM_ZONE0_LEN 0x4048
+#define A_CIM_CTL_SLV_MEM_ZONE1_VA 0x404c
+#define A_CIM_CTL_SLV_MEM_ZONE1_BA 0x4050
+#define A_CIM_CTL_SLV_MEM_ZONE1_LEN 0x4054
+#define A_CIM_CTL_SLV_MEM_ZONE2_VA 0x4058
+#define A_CIM_CTL_SLV_MEM_ZONE2_BA 0x405c
+#define A_CIM_CTL_SLV_MEM_ZONE2_LEN 0x4060
+#define A_CIM_CTL_SLV_MEM_ZONE3_VA 0x4064
+#define A_CIM_CTL_SLV_MEM_ZONE3_BA 0x4068
+#define A_CIM_CTL_SLV_MEM_ZONE3_LEN 0x406c
+#define A_CIM_CTL_SLV_MEM_ZONE4_VA 0x4070
+#define A_CIM_CTL_SLV_MEM_ZONE4_BA 0x4074
+#define A_CIM_CTL_SLV_MEM_ZONE4_LEN 0x4078
+#define A_CIM_CTL_SLV_MEM_ZONE5_VA 0x407c
+#define A_CIM_CTL_SLV_MEM_ZONE5_BA 0x4080
+#define A_CIM_CTL_SLV_MEM_ZONE5_LEN 0x4084
+#define A_CIM_CTL_SLV_MEM_ZONE6_VA 0x4088
+#define A_CIM_CTL_SLV_MEM_ZONE6_BA 0x408c
+#define A_CIM_CTL_SLV_MEM_ZONE6_LEN 0x4090
+#define A_CIM_CTL_SLV_MEM_ZONE7_VA 0x4094
+#define A_CIM_CTL_SLV_MEM_ZONE7_BA 0x4098
+#define A_CIM_CTL_SLV_MEM_ZONE7_LEN 0x409c
+
/* registers for module MAC */
#define MAC_BASE_ADDR 0x0
@@ -46613,33 +59246,7 @@
#define F_PERR_TX_PCS1G V_PERR_TX_PCS1G(1U)
#define A_MAC_PORT_PERR_INT_CAUSE 0x8e4
-
-#define S_T6_PERR_PKT_RAM 31
-#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
-#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U)
-
-#define S_T6_PERR_MASK_RAM 30
-#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
-#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U)
-
-#define S_T6_PERR_CRC_RAM 29
-#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
-#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U)
-
#define A_MAC_PORT_PERR_ENABLE 0x8e8
-
-#define S_T6_PERR_PKT_RAM 31
-#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
-#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U)
-
-#define S_T6_PERR_MASK_RAM 30
-#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
-#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U)
-
-#define S_T6_PERR_CRC_RAM 29
-#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
-#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U)
-
#define A_MAC_PORT_PERR_INJECT 0x8ec
#define S_MEMSEL_PERR 1
@@ -47304,10 +59911,12 @@
#define A_MAC_PORT_PTP_DRIFT_ADJUST_COUNT 0x9a0
#define A_MAC_PORT_PTP_OFFSET_ADJUST_FINE 0x9a4
+#if 0
#define S_B 16
-#define CXGBE_M_B 0xffffU
+#define M_B 0xffffU
#define V_B(x) ((x) << S_B)
-#define G_B(x) (((x) >> S_B) & CXGBE_M_B)
+#define G_B(x) (((x) >> S_B) & M_B)
+#endif
#define S_A 0
#define M_A 0xffffU
@@ -48454,10 +61063,6 @@
#define V_LOW_POWER(x) ((x) << S_LOW_POWER)
#define F_LOW_POWER V_LOW_POWER(1U)
-#define S_T6_SPEED_SEL1 6
-#define V_T6_SPEED_SEL1(x) ((x) << S_T6_SPEED_SEL1)
-#define F_T6_SPEED_SEL1 V_T6_SPEED_SEL1(1U)
-
#define S_SPEED_SEL2 2
#define M_SPEED_SEL2 0xfU
#define V_SPEED_SEL2(x) ((x) << S_SPEED_SEL2)
@@ -49016,7 +61621,7 @@
#define S_VLANTAG 0
#define CXGBE_M_VLANTAG 0xffffU
#define V_VLANTAG(x) ((x) << S_VLANTAG)
-#define G_VLANTAG(x) (((x) >> S_VLANTAG) & CXGBE_M_VLANTAG)
+#define G_VLANTAG(x) (((x) >> S_VLANTAG) & M_VLANTAG)
#define A_MAC_PORT_MTIP_VLAN_TPID_1 0x1a04
#define A_MAC_PORT_MTIP_VLAN_TPID_2 0x1a08
@@ -51279,75 +63884,24 @@
#define G_DPC_TIME_LIM(x) (((x) >> S_DPC_TIME_LIM) & M_DPC_TIME_LIM)
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_1 0x2b20
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_1 0x2b24
#define A_MAC_PORT_AET_ZFE_LIMITS_1 0x2b28
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_1 0x2b2c
#define A_MAC_PORT_AET_STATUS_1 0x2b30
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_21 0x2b34
#define A_MAC_PORT_AET_LIMITS1 0x2b38
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_2 0x2b40
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_2 0x2b44
#define A_MAC_PORT_AET_ZFE_LIMITS_2 0x2b48
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_2 0x2b4c
#define A_MAC_PORT_AET_STATUS_2 0x2b50
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_22 0x2b54
#define A_MAC_PORT_AET_LIMITS2 0x2b58
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_3 0x2b60
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_3 0x2b64
#define A_MAC_PORT_AET_ZFE_LIMITS_3 0x2b68
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_3 0x2b6c
#define A_MAC_PORT_AET_STATUS_3 0x2b70
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_23 0x2b74
#define A_MAC_PORT_AET_LIMITS3 0x2b78
#define A_T6_MAC_PORT_BEAN_CTL 0x2c00
@@ -52384,103 +64938,21 @@
#define F_BSOUTP V_BSOUTP(1U)
#define A_MAC_PORT_TX_LINKB_TRANSMIT_CONFIGURATION_MODE 0x3100
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TEST_CONTROL 0x3104
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_COEFFICIENT_CONTROL 0x3108
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_MODE_CONTROL 0x310c
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3110
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3114
#define A_MAC_PORT_TX_LINKB_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3118
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x311c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT 0x3120
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT 0x3124
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT 0x3128
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_3_COEFFICIENT 0x312c
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AMPLITUDE 0x3130
#define A_MAC_PORT_TX_LINKB_TRANSMIT_POLARITY 0x3134
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3138
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x313c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3140
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3140
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3144
@@ -52503,12 +64975,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3174
#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3178
#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x317c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3180
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3184
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3188
@@ -52521,21 +64987,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AZ_CONTROL 0x319c
#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_CONTROL 0x31a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKB_DCCSTEP_CTL 6
#define M_TX_LINKB_DCCSTEP_CTL 0x3U
#define V_TX_LINKB_DCCSTEP_CTL(x) ((x) << S_TX_LINKB_DCCSTEP_CTL)
@@ -52553,20 +65004,9 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x31e0
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_5 0x31ec
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_4 0x31f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_3 0x31f4
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_2 0x31f8
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_1 0x31fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINKA_RECEIVER_CONFIGURATION_MODE 0x3200
#define S_T5_RX_LINKEN 15
@@ -54442,56 +66882,15 @@
#define A_MAC_PORT_RX_LINKB_RECEIVER_TEST_CONTROL 0x3304
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_CONTROL 0x3308
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_OFFSET_CONTROL 0x330c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_1 0x3310
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_2 0x3314
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3318
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x331c
#define A_MAC_PORT_RX_LINKB_DFE_CONTROL 0x3320
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_1 0x3324
#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_2 0x3328
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_1 0x332c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_2 0x3330
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_3 0x3334
#define A_MAC_PORT_RX_LINKB_RECEIVER_DQCC_CONTROL_1 0x3338
#define A_MAC_PORT_RX_LINKB_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3338
@@ -54515,12 +66914,6 @@
#define A_MAC_PORT_RX_LINKB_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x336c
#define A_MAC_PORT_RX_LINKB_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3370
#define A_MAC_PORT_RX_LINKB_DYNAMIC_DATA_CENTERING_DDC 0x3374
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_INTERNAL_STATUS 0x3378
#define S_RX_LINKB_ACCCMP_RIS 11
@@ -54550,20 +66943,6 @@
#define A_MAC_PORT_RX_LINKB_INTEGRATOR_DAC_OFFSET 0x33a4
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_CONTROL 0x33a8
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS 0x33ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_ERROR_COUNT 0x33b0
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x33b4
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x33b8
@@ -54611,103 +66990,21 @@
#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x33f8
#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_1 0x33fc
#define A_MAC_PORT_TX_LINKC_TRANSMIT_CONFIGURATION_MODE 0x3400
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TEST_CONTROL 0x3404
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_COEFFICIENT_CONTROL 0x3408
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_MODE_CONTROL 0x340c
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3410
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3414
#define A_MAC_PORT_TX_LINKC_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3418
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x341c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT 0x3420
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT 0x3424
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT 0x3428
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_3_COEFFICIENT 0x342c
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AMPLITUDE 0x3430
#define A_MAC_PORT_TX_LINKC_TRANSMIT_POLARITY 0x3434
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3438
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x343c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3440
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3440
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3444
@@ -54730,12 +67027,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3474
#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3478
#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x347c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3480
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3484
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3488
@@ -54748,21 +67039,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AZ_CONTROL 0x349c
#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_CONTROL 0x34a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKC_DCCSTEP_CTL 6
#define M_TX_LINKC_DCCSTEP_CTL 0x3U
#define V_TX_LINKC_DCCSTEP_CTL(x) ((x) << S_TX_LINKC_DCCSTEP_CTL)
@@ -54780,118 +67056,25 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x34e0
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_5 0x34ec
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_4 0x34f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_3 0x34f4
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_2 0x34f8
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_1 0x34fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_CONFIGURATION_MODE 0x3500
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TEST_CONTROL 0x3504
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_COEFFICIENT_CONTROL 0x3508
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_MODE_CONTROL 0x350c
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3510
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3514
#define A_MAC_PORT_TX_LINKD_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3518
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x351c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT 0x3520
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT 0x3524
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT 0x3528
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_3_COEFFICIENT 0x352c
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AMPLITUDE 0x3530
#define A_MAC_PORT_TX_LINKD_TRANSMIT_POLARITY 0x3534
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3538
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x353c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3540
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3540
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3544
@@ -54914,12 +67097,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3574
#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3578
#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x357c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3580
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3584
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3588
@@ -54932,21 +67109,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AZ_CONTROL 0x359c
#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_CONTROL 0x35a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKD_DCCSTEP_CTL 6
#define M_TX_LINKD_DCCSTEP_CTL 0x3U
#define V_TX_LINKD_DCCSTEP_CTL(x) ((x) << S_TX_LINKD_DCCSTEP_CTL)
@@ -54964,74 +67126,22 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x35e0
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_5 0x35ec
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_4 0x35f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_3 0x35f4
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_2 0x35f8
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_1 0x35fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_CONFIGURATION_MODE 0x3600
#define A_MAC_PORT_RX_LINKC_RECEIVER_TEST_CONTROL 0x3604
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_CONTROL 0x3608
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_OFFSET_CONTROL 0x360c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_1 0x3610
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_2 0x3614
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3618
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x361c
#define A_MAC_PORT_RX_LINKC_DFE_CONTROL 0x3620
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_1 0x3624
#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_2 0x3628
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_1 0x362c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_2 0x3630
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_3 0x3634
#define A_MAC_PORT_RX_LINKC_RECEIVER_DQCC_CONTROL_1 0x3638
#define A_MAC_PORT_RX_LINKC_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3638
@@ -55055,12 +67165,6 @@
#define A_MAC_PORT_RX_LINKC_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x366c
#define A_MAC_PORT_RX_LINKC_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3670
#define A_MAC_PORT_RX_LINKC_DYNAMIC_DATA_CENTERING_DDC 0x3674
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_INTERNAL_STATUS 0x3678
#define S_RX_LINKC_ACCCMP_RIS 11
@@ -55090,20 +67194,6 @@
#define A_MAC_PORT_RX_LINKC_INTEGRATOR_DAC_OFFSET 0x36a4
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_CONTROL 0x36a8
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS 0x36ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_ERROR_COUNT 0x36b0
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x36b4
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x36b8
@@ -55154,56 +67244,15 @@
#define A_MAC_PORT_RX_LINKD_RECEIVER_TEST_CONTROL 0x3704
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_CONTROL 0x3708
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_OFFSET_CONTROL 0x370c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_1 0x3710
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_2 0x3714
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3718
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x371c
#define A_MAC_PORT_RX_LINKD_DFE_CONTROL 0x3720
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_1 0x3724
#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_2 0x3728
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_1 0x372c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_2 0x3730
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_3 0x3734
#define A_MAC_PORT_RX_LINKD_RECEIVER_DQCC_CONTROL_1 0x3738
#define A_MAC_PORT_RX_LINKD_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3738
@@ -55227,12 +67276,6 @@
#define A_MAC_PORT_RX_LINKD_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x376c
#define A_MAC_PORT_RX_LINKD_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3770
#define A_MAC_PORT_RX_LINKD_DYNAMIC_DATA_CENTERING_DDC 0x3774
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_INTERNAL_STATUS 0x3778
#define S_RX_LINKD_ACCCMP_RIS 11
@@ -55262,20 +67305,6 @@
#define A_MAC_PORT_RX_LINKD_INTEGRATOR_DAC_OFFSET 0x37a4
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_CONTROL 0x37a8
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS 0x37ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_ERROR_COUNT 0x37b0
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x37b4
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x37b8
@@ -55597,103 +67626,21 @@
#define F_MACROTEST V_MACROTEST(1U)
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_CONFIGURATION_MODE 0x3900
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TEST_CONTROL 0x3904
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_COEFFICIENT_CONTROL 0x3908
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_MODE_CONTROL 0x390c
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3910
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3914
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3918
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x391c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT 0x3920
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT 0x3924
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT 0x3928
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_3_COEFFICIENT 0x392c
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AMPLITUDE 0x3930
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_POLARITY 0x3934
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3938
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x393c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3940
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3940
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3944
@@ -55716,12 +67663,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3974
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3978
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x397c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3980
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3984
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3988
@@ -55734,21 +67675,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AZ_CONTROL 0x399c
#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_CONTROL 0x39a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINK_BCST_DCCSTEP_CTL 6
#define M_TX_LINK_BCST_DCCSTEP_CTL 0x3U
#define V_TX_LINK_BCST_DCCSTEP_CTL(x) ((x) << S_TX_LINK_BCST_DCCSTEP_CTL)
@@ -55766,74 +67692,22 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x39e0
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_5 0x39ec
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_4 0x39f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_3 0x39f4
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_2 0x39f8
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_1 0x39fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_CONFIGURATION_MODE 0x3a00
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_TEST_CONTROL 0x3a04
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_CONTROL 0x3a08
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_OFFSET_CONTROL 0x3a0c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_1 0x3a10
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_2 0x3a14
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3a18
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x3a1c
#define A_MAC_PORT_RX_LINK_BCST_DFE_CONTROL 0x3a20
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_1 0x3a24
#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_2 0x3a28
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_1 0x3a2c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_2 0x3a30
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_3 0x3a34
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DQCC_CONTROL_1 0x3a38
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3a38
@@ -55857,12 +67731,6 @@
#define A_MAC_PORT_RX_LINK_BCST_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x3a6c
#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3a70
#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_DATA_CENTERING_DDC 0x3a74
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_INTERNAL_STATUS 0x3a78
#define S_RX_LINK_BCST_ACCCMP_RIS 11
@@ -55892,20 +67760,6 @@
#define A_MAC_PORT_RX_LINK_BCST_INTEGRATOR_DAC_OFFSET 0x3aa4
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_CONTROL 0x3aa8
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS 0x3aac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_ERROR_COUNT 0x3ab0
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x3ab4
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x3ab8
@@ -56304,17 +68158,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56323,17 +68166,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56349,17 +68181,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56368,17 +68189,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56394,17 +68204,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56413,17 +68212,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56439,17 +68227,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56458,17 +68235,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56639,17 +68405,6 @@
#define G_RX_LINKB_INDEX_DFE_EN(x) (((x) >> S_RX_LINKB_INDEX_DFE_EN) & M_RX_LINKB_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKB_DFE_H1 0x2b04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKB_DFE_H2 0x2b08
#define A_T6_MAC_PORT_RX_LINKB_DFE_H3 0x2b0c
#define A_T6_MAC_PORT_RX_LINKB_DFE_H4 0x2b10
@@ -56668,17 +68423,6 @@
#define G_RX_LINKC_INDEX_DFE_EN(x) (((x) >> S_RX_LINKC_INDEX_DFE_EN) & M_RX_LINKC_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKC_DFE_H1 0x2e04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKC_DFE_H2 0x2e08
#define A_T6_MAC_PORT_RX_LINKC_DFE_H3 0x2e0c
#define A_T6_MAC_PORT_RX_LINKC_DFE_H4 0x2e10
@@ -56697,17 +68441,6 @@
#define G_RX_LINKD_INDEX_DFE_EN(x) (((x) >> S_RX_LINKD_INDEX_DFE_EN) & M_RX_LINKD_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKD_DFE_H1 0x2f04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKD_DFE_H2 0x2f08
#define A_T6_MAC_PORT_RX_LINKD_DFE_H3 0x2f0c
#define A_T6_MAC_PORT_RX_LINKD_DFE_H4 0x2f10
@@ -56726,17 +68459,6 @@
#define G_RX_LINK_BCST_INDEX_DFE_EN(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_EN) & M_RX_LINK_BCST_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H1 0x3204
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H2 0x3208
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H3 0x320c
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H4 0x3210
@@ -57294,69 +69016,21 @@
#define G_BANK(x) (((x) >> S_BANK) & M_BANK)
#define A_MC_LMC_INITSEQ1 0x40148
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD1 0x4014c
#define A_MC_LMC_INITSEQ2 0x40150
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD2 0x40154
#define A_MC_LMC_INITSEQ3 0x40158
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD3 0x4015c
#define A_MC_LMC_INITSEQ4 0x40160
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD4 0x40164
#define A_MC_LMC_INITSEQ5 0x40168
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD5 0x4016c
#define A_MC_LMC_INITSEQ6 0x40170
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD6 0x40174
#define A_MC_LMC_INITSEQ7 0x40178
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD7 0x4017c
#define A_MC_UPCTL_ECCCFG 0x40180
#define A_MC_LMC_INITSEQ8 0x40180
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_UPCTL_ECCTST 0x40184
#define S_ECC_TEST_MASK0 0
@@ -57367,61 +69041,19 @@
#define A_MC_LMC_CMD8 0x40184
#define A_MC_UPCTL_ECCCLR 0x40188
#define A_MC_LMC_INITSEQ9 0x40188
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_UPCTL_ECCLOG 0x4018c
#define A_MC_LMC_CMD9 0x4018c
#define A_MC_LMC_INITSEQ10 0x40190
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD10 0x40194
#define A_MC_LMC_INITSEQ11 0x40198
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD11 0x4019c
#define A_MC_LMC_INITSEQ12 0x401a0
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD12 0x401a4
#define A_MC_LMC_INITSEQ13 0x401a8
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD13 0x401ac
#define A_MC_LMC_INITSEQ14 0x401b0
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD14 0x401b4
#define A_MC_LMC_INITSEQ15 0x401b8
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD15 0x401bc
#define A_MC_UPCTL_DTUWACTL 0x40200
@@ -61990,6 +73622,11 @@
#define V_NUMPIPESTAGES(x) ((x) << S_NUMPIPESTAGES)
#define G_NUMPIPESTAGES(x) (((x) >> S_NUMPIPESTAGES) & M_NUMPIPESTAGES)
+#define S_DRAMREFENABLE 27
+#define M_DRAMREFENABLE 0x3U
+#define V_DRAMREFENABLE(x) ((x) << S_DRAMREFENABLE)
+#define G_DRAMREFENABLE(x) (((x) >> S_DRAMREFENABLE) & M_DRAMREFENABLE)
+
#define A_EDC_H_DBG_MA_CMD_INTF 0x50300
#define S_MCMDADDR 12
@@ -62372,12 +74009,51 @@
#define V_REFCNT(x) ((x) << S_REFCNT)
#define G_REFCNT(x) (((x) >> S_REFCNT) & M_REFCNT)
+#define A_EDC_H_PAR_CAUSE 0x50404
+
+#define S_STG_CMDQ_PARERR_CAUSE 7
+#define V_STG_CMDQ_PARERR_CAUSE(x) ((x) << S_STG_CMDQ_PARERR_CAUSE)
+#define F_STG_CMDQ_PARERR_CAUSE V_STG_CMDQ_PARERR_CAUSE(1U)
+
+#define S_STG_WRDQ_PARERR_CAUSE 6
+#define V_STG_WRDQ_PARERR_CAUSE(x) ((x) << S_STG_WRDQ_PARERR_CAUSE)
+#define F_STG_WRDQ_PARERR_CAUSE V_STG_WRDQ_PARERR_CAUSE(1U)
+
+#define S_INP_CMDQ_PARERR_CAUSE 5
+#define V_INP_CMDQ_PARERR_CAUSE(x) ((x) << S_INP_CMDQ_PARERR_CAUSE)
+#define F_INP_CMDQ_PARERR_CAUSE V_INP_CMDQ_PARERR_CAUSE(1U)
+
+#define S_INP_WRDQ_PARERR_CAUSE 4
+#define V_INP_WRDQ_PARERR_CAUSE(x) ((x) << S_INP_WRDQ_PARERR_CAUSE)
+#define F_INP_WRDQ_PARERR_CAUSE V_INP_WRDQ_PARERR_CAUSE(1U)
+
+#define S_INP_BEQ_PARERR_CAUSE 3
+#define V_INP_BEQ_PARERR_CAUSE(x) ((x) << S_INP_BEQ_PARERR_CAUSE)
+#define F_INP_BEQ_PARERR_CAUSE V_INP_BEQ_PARERR_CAUSE(1U)
+
+#define S_ECC_CE_PAR_ENABLE_CAUSE 2
+#define V_ECC_CE_PAR_ENABLE_CAUSE(x) ((x) << S_ECC_CE_PAR_ENABLE_CAUSE)
+#define F_ECC_CE_PAR_ENABLE_CAUSE V_ECC_CE_PAR_ENABLE_CAUSE(1U)
+
+#define S_ECC_UE_PAR_ENABLE_CAUSE 1
+#define V_ECC_UE_PAR_ENABLE_CAUSE(x) ((x) << S_ECC_UE_PAR_ENABLE_CAUSE)
+#define F_ECC_UE_PAR_ENABLE_CAUSE V_ECC_UE_PAR_ENABLE_CAUSE(1U)
+
+#define S_RDDQ_PARERR_CAUSE 0
+#define V_RDDQ_PARERR_CAUSE(x) ((x) << S_RDDQ_PARERR_CAUSE)
+#define F_RDDQ_PARERR_CAUSE V_RDDQ_PARERR_CAUSE(1U)
+
/* registers for module EDC_T61 */
#define EDC_T61_BASE_ADDR 0x50800
/* registers for module HMA_T6 */
#define HMA_T6_BASE_ADDR 0x51000
+#define S_T7_CLIENT_EN 0
+#define M_T7_CLIENT_EN 0x7fffU
+#define V_T7_CLIENT_EN(x) ((x) << S_T7_CLIENT_EN)
+#define G_T7_CLIENT_EN(x) (((x) >> S_T7_CLIENT_EN) & M_T7_CLIENT_EN)
+
#define S_TPH 12
#define M_TPH 0x3U
#define V_TPH(x) ((x) << S_TPH)
@@ -62398,6 +74074,14 @@
#define V_OP_MODE(x) ((x) << S_OP_MODE)
#define F_OP_MODE V_OP_MODE(1U)
+#define S_GK_ENABLE 30
+#define V_GK_ENABLE(x) ((x) << S_GK_ENABLE)
+#define F_GK_ENABLE V_GK_ENABLE(1U)
+
+#define S_DBGCNTRST 29
+#define V_DBGCNTRST(x) ((x) << S_DBGCNTRST)
+#define F_DBGCNTRST V_DBGCNTRST(1U)
+
#define A_HMA_TLB_ACCESS 0x51028
#define S_INV_ALL 29
@@ -62437,6 +74121,11 @@
#define V_REGION(x) ((x) << S_REGION)
#define G_REGION(x) (((x) >> S_REGION) & M_REGION)
+#define S_T7_VA 8
+#define M_T7_VA 0xffffffU
+#define V_T7_VA(x) ((x) << S_T7_VA)
+#define G_T7_VA(x) (((x) >> S_T7_VA) & M_T7_VA)
+
#define A_HMA_TLB_DESC_0_H 0x51030
#define A_HMA_TLB_DESC_0_L 0x51034
#define A_HMA_TLB_DESC_1_H 0x51038
@@ -62460,6 +74149,11 @@
#define V_ADDR0_MIN(x) ((x) << S_ADDR0_MIN)
#define G_ADDR0_MIN(x) (((x) >> S_ADDR0_MIN) & M_ADDR0_MIN)
+#define S_REG0MINADDR0MIN 8
+#define M_REG0MINADDR0MIN 0xffffffU
+#define V_REG0MINADDR0MIN(x) ((x) << S_REG0MINADDR0MIN)
+#define G_REG0MINADDR0MIN(x) (((x) >> S_REG0MINADDR0MIN) & M_REG0MINADDR0MIN)
+
#define A_HMA_REG0_MAX 0x51074
#define S_ADDR0_MAX 12
@@ -62467,6 +74161,11 @@
#define V_ADDR0_MAX(x) ((x) << S_ADDR0_MAX)
#define G_ADDR0_MAX(x) (((x) >> S_ADDR0_MAX) & M_ADDR0_MAX)
+#define S_REG0MAXADDR0MAX 8
+#define M_REG0MAXADDR0MAX 0xffffffU
+#define V_REG0MAXADDR0MAX(x) ((x) << S_REG0MAXADDR0MAX)
+#define G_REG0MAXADDR0MAX(x) (((x) >> S_REG0MAXADDR0MAX) & M_REG0MAXADDR0MAX)
+
#define A_HMA_REG0_MASK 0x51078
#define S_PAGE_SIZE0 12
@@ -62475,6 +74174,7 @@
#define G_PAGE_SIZE0(x) (((x) >> S_PAGE_SIZE0) & M_PAGE_SIZE0)
#define A_HMA_REG0_BASE 0x5107c
+#define A_HMA_REG0_BASE_LSB 0x5107c
#define A_HMA_REG1_MIN 0x51080
#define S_ADDR1_MIN 12
@@ -62482,6 +74182,11 @@
#define V_ADDR1_MIN(x) ((x) << S_ADDR1_MIN)
#define G_ADDR1_MIN(x) (((x) >> S_ADDR1_MIN) & M_ADDR1_MIN)
+#define S_REG1MINADDR1MIN 8
+#define M_REG1MINADDR1MIN 0xffffffU
+#define V_REG1MINADDR1MIN(x) ((x) << S_REG1MINADDR1MIN)
+#define G_REG1MINADDR1MIN(x) (((x) >> S_REG1MINADDR1MIN) & M_REG1MINADDR1MIN)
+
#define A_HMA_REG1_MAX 0x51084
#define S_ADDR1_MAX 12
@@ -62489,6 +74194,11 @@
#define V_ADDR1_MAX(x) ((x) << S_ADDR1_MAX)
#define G_ADDR1_MAX(x) (((x) >> S_ADDR1_MAX) & M_ADDR1_MAX)
+#define S_REG1MAXADDR1MAX 8
+#define M_REG1MAXADDR1MAX 0xffffffU
+#define V_REG1MAXADDR1MAX(x) ((x) << S_REG1MAXADDR1MAX)
+#define G_REG1MAXADDR1MAX(x) (((x) >> S_REG1MAXADDR1MAX) & M_REG1MAXADDR1MAX)
+
#define A_HMA_REG1_MASK 0x51088
#define S_PAGE_SIZE1 12
@@ -62497,6 +74207,7 @@
#define G_PAGE_SIZE1(x) (((x) >> S_PAGE_SIZE1) & M_PAGE_SIZE1)
#define A_HMA_REG1_BASE 0x5108c
+#define A_HMA_REG1_BASE_LSB 0x5108c
#define A_HMA_REG2_MIN 0x51090
#define S_ADDR2_MIN 12
@@ -62504,6 +74215,11 @@
#define V_ADDR2_MIN(x) ((x) << S_ADDR2_MIN)
#define G_ADDR2_MIN(x) (((x) >> S_ADDR2_MIN) & M_ADDR2_MIN)
+#define S_REG2MINADDR2MIN 8
+#define M_REG2MINADDR2MIN 0xffffffU
+#define V_REG2MINADDR2MIN(x) ((x) << S_REG2MINADDR2MIN)
+#define G_REG2MINADDR2MIN(x) (((x) >> S_REG2MINADDR2MIN) & M_REG2MINADDR2MIN)
+
#define A_HMA_REG2_MAX 0x51094
#define S_ADDR2_MAX 12
@@ -62511,6 +74227,11 @@
#define V_ADDR2_MAX(x) ((x) << S_ADDR2_MAX)
#define G_ADDR2_MAX(x) (((x) >> S_ADDR2_MAX) & M_ADDR2_MAX)
+#define S_REG2MAXADDR2MAX 8
+#define M_REG2MAXADDR2MAX 0xffffffU
+#define V_REG2MAXADDR2MAX(x) ((x) << S_REG2MAXADDR2MAX)
+#define G_REG2MAXADDR2MAX(x) (((x) >> S_REG2MAXADDR2MAX) & M_REG2MAXADDR2MAX)
+
#define A_HMA_REG2_MASK 0x51098
#define S_PAGE_SIZE2 12
@@ -62519,6 +74240,7 @@
#define G_PAGE_SIZE2(x) (((x) >> S_PAGE_SIZE2) & M_PAGE_SIZE2)
#define A_HMA_REG2_BASE 0x5109c
+#define A_HMA_REG2_BASE_LSB 0x5109c
#define A_HMA_REG3_MIN 0x510a0
#define S_ADDR3_MIN 12
@@ -62526,6 +74248,11 @@
#define V_ADDR3_MIN(x) ((x) << S_ADDR3_MIN)
#define G_ADDR3_MIN(x) (((x) >> S_ADDR3_MIN) & M_ADDR3_MIN)
+#define S_REG3MINADDR3MIN 8
+#define M_REG3MINADDR3MIN 0xffffffU
+#define V_REG3MINADDR3MIN(x) ((x) << S_REG3MINADDR3MIN)
+#define G_REG3MINADDR3MIN(x) (((x) >> S_REG3MINADDR3MIN) & M_REG3MINADDR3MIN)
+
#define A_HMA_REG3_MAX 0x510a4
#define S_ADDR3_MAX 12
@@ -62533,6 +74260,11 @@
#define V_ADDR3_MAX(x) ((x) << S_ADDR3_MAX)
#define G_ADDR3_MAX(x) (((x) >> S_ADDR3_MAX) & M_ADDR3_MAX)
+#define S_REG3MAXADDR3MAX 8
+#define M_REG3MAXADDR3MAX 0xffffffU
+#define V_REG3MAXADDR3MAX(x) ((x) << S_REG3MAXADDR3MAX)
+#define G_REG3MAXADDR3MAX(x) (((x) >> S_REG3MAXADDR3MAX) & M_REG3MAXADDR3MAX)
+
#define A_HMA_REG3_MASK 0x510a8
#define S_PAGE_SIZE3 12
@@ -62541,6 +74273,7 @@
#define G_PAGE_SIZE3(x) (((x) >> S_PAGE_SIZE3) & M_PAGE_SIZE3)
#define A_HMA_REG3_BASE 0x510ac
+#define A_HMA_REG3_BASE_LSB 0x510ac
#define A_HMA_SW_SYNC 0x510b0
#define S_ENTER_SYNC 31
@@ -62551,6 +74284,84 @@
#define V_EXIT_SYNC(x) ((x) << S_EXIT_SYNC)
#define F_EXIT_SYNC V_EXIT_SYNC(1U)
+#define A_HMA_GC_MODE_SEL 0x510b4
+
+#define S_MODE_SEL 8
+#define M_MODE_SEL 0x3U
+#define V_MODE_SEL(x) ((x) << S_MODE_SEL)
+#define G_MODE_SEL(x) (((x) >> S_MODE_SEL) & M_MODE_SEL)
+
+#define S_FLUSH_REQ 4
+#define V_FLUSH_REQ(x) ((x) << S_FLUSH_REQ)
+#define F_FLUSH_REQ V_FLUSH_REQ(1U)
+
+#define S_CLEAR_REQ 0
+#define V_CLEAR_REQ(x) ((x) << S_CLEAR_REQ)
+#define F_CLEAR_REQ V_CLEAR_REQ(1U)
+
+#define A_HMA_REG0_BASE_MSB 0x510b8
+
+#define S_BASE0_MSB 0
+#define M_BASE0_MSB 0xfU
+#define V_BASE0_MSB(x) ((x) << S_BASE0_MSB)
+#define G_BASE0_MSB(x) (((x) >> S_BASE0_MSB) & M_BASE0_MSB)
+
+#define A_HMA_REG1_BASE_MSB 0x510bc
+
+#define S_BASE1_MSB 0
+#define M_BASE1_MSB 0xfU
+#define V_BASE1_MSB(x) ((x) << S_BASE1_MSB)
+#define G_BASE1_MSB(x) (((x) >> S_BASE1_MSB) & M_BASE1_MSB)
+
+#define A_HMA_REG2_BASE_MSB 0x510c0
+
+#define S_BASE2_MSB 0
+#define M_BASE2_MSB 0xfU
+#define V_BASE2_MSB(x) ((x) << S_BASE2_MSB)
+#define G_BASE2_MSB(x) (((x) >> S_BASE2_MSB) & M_BASE2_MSB)
+
+#define A_HMA_REG3_BASE_MSB 0x510c4
+
+#define S_BASE3_MSB 0
+#define M_BASE3_MSB 0xfU
+#define V_BASE3_MSB(x) ((x) << S_BASE3_MSB)
+#define G_BASE3_MSB(x) (((x) >> S_BASE3_MSB) & M_BASE3_MSB)
+
+#define A_HMA_DBG_CTL 0x51104
+#define A_HMA_DBG_DATA 0x51108
+#define A_HMA_H_BIST_CMD 0x51200
+#define A_HMA_H_BIST_CMD_ADDR 0x51204
+#define A_HMA_H_BIST_CMD_LEN 0x51208
+#define A_HMA_H_BIST_DATA_PATTERN 0x5120c
+#define A_HMA_H_BIST_USER_WDATA0 0x51210
+#define A_HMA_H_BIST_USER_WDATA1 0x51214
+#define A_HMA_H_BIST_USER_WDATA2 0x51218
+#define A_HMA_H_BIST_NUM_ERR 0x5121c
+#define A_HMA_H_BIST_ERR_FIRST_ADDR 0x51220
+#define A_HMA_H_BIST_STATUS_RDATA 0x51224
+#define A_HMA_H_BIST_CRC_SEED 0x5126c
+#define A_HMA_TABLE_LINE1_MSB 0x51270
+
+#define S_STARTA 0
+#define M_STARTA 0xfU
+#define V_STARTA(x) ((x) << S_STARTA)
+#define G_STARTA(x) (((x) >> S_STARTA) & M_STARTA)
+
+#define A_HMA_TABLE_LINE2_MSB 0x51274
+
+#define S_ENDA 0
+#define M_ENDA 0xfU
+#define V_ENDA(x) ((x) << S_ENDA)
+#define G_ENDA(x) (((x) >> S_ENDA) & M_ENDA)
+
+#define S_GK_UF_PAR_ENABLE 6
+#define V_GK_UF_PAR_ENABLE(x) ((x) << S_GK_UF_PAR_ENABLE)
+#define F_GK_UF_PAR_ENABLE V_GK_UF_PAR_ENABLE(1U)
+
+#define S_PCIEMST_PAR_ENABLE 2
+#define V_PCIEMST_PAR_ENABLE(x) ((x) << S_PCIEMST_PAR_ENABLE)
+#define F_PCIEMST_PAR_ENABLE V_PCIEMST_PAR_ENABLE(1U)
+
#define S_IDTF_INT_ENABLE 5
#define V_IDTF_INT_ENABLE(x) ((x) << S_IDTF_INT_ENABLE)
#define F_IDTF_INT_ENABLE V_IDTF_INT_ENABLE(1U)
@@ -62571,6 +74382,10 @@
#define V_MAMST_INT_ENABLE(x) ((x) << S_MAMST_INT_ENABLE)
#define F_MAMST_INT_ENABLE V_MAMST_INT_ENABLE(1U)
+#define S_GK_UF_INT_ENABLE 6
+#define V_GK_UF_INT_ENABLE(x) ((x) << S_GK_UF_INT_ENABLE)
+#define F_GK_UF_INT_ENABLE V_GK_UF_INT_ENABLE(1U)
+
#define S_IDTF_INT_CAUSE 5
#define V_IDTF_INT_CAUSE(x) ((x) << S_IDTF_INT_CAUSE)
#define F_IDTF_INT_CAUSE V_IDTF_INT_CAUSE(1U)
@@ -62591,6 +74406,10 @@
#define V_MAMST_INT_CAUSE(x) ((x) << S_MAMST_INT_CAUSE)
#define F_MAMST_INT_CAUSE V_MAMST_INT_CAUSE(1U)
+#define S_GK_UF_INT_CAUSE 6
+#define V_GK_UF_INT_CAUSE(x) ((x) << S_GK_UF_INT_CAUSE)
+#define F_GK_UF_INT_CAUSE V_GK_UF_INT_CAUSE(1U)
+
#define A_HMA_MA_MST_ERR 0x5130c
#define A_HMA_RTF_ERR 0x51310
#define A_HMA_OTF_ERR 0x51314
@@ -62904,3 +74723,12365 @@
#define M_RD_EOP_CNT 0xffU
#define V_RD_EOP_CNT(x) ((x) << S_RD_EOP_CNT)
#define G_RD_EOP_CNT(x) (((x) >> S_RD_EOP_CNT) & M_RD_EOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT 16
+#define M_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT 8
+#define M_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT 0
+#define M_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT)
+
+/* registers for module MAC_T7 */
+#define MAC_T7_BASE_ADDR 0x38000
+
+#define S_T7_PORT_MAP 21
+#define M_T7_PORT_MAP 0x7U
+#define V_T7_PORT_MAP(x) ((x) << S_T7_PORT_MAP)
+#define G_T7_PORT_MAP(x) (((x) >> S_T7_PORT_MAP) & M_T7_PORT_MAP)
+
+#define S_T7_SMUX_RX_LOOP 17
+#define M_T7_SMUX_RX_LOOP 0xfU
+#define V_T7_SMUX_RX_LOOP(x) ((x) << S_T7_SMUX_RX_LOOP)
+#define G_T7_SMUX_RX_LOOP(x) (((x) >> S_T7_SMUX_RX_LOOP) & M_T7_SMUX_RX_LOOP)
+
+#define S_T7_SIGNAL_DET 15
+#define V_T7_SIGNAL_DET(x) ((x) << S_T7_SIGNAL_DET)
+#define F_T7_SIGNAL_DET V_T7_SIGNAL_DET(1U)
+
+#define S_CFG_MAC_2_MPS_FULL 13
+#define V_CFG_MAC_2_MPS_FULL(x) ((x) << S_CFG_MAC_2_MPS_FULL)
+#define F_CFG_MAC_2_MPS_FULL V_CFG_MAC_2_MPS_FULL(1U)
+
+#define S_MPS_FULL_SEL 12
+#define V_MPS_FULL_SEL(x) ((x) << S_MPS_FULL_SEL)
+#define F_MPS_FULL_SEL V_MPS_FULL_SEL(1U)
+
+#define S_T7_SMUXTXSEL 8
+#define M_T7_SMUXTXSEL 0xfU
+#define V_T7_SMUXTXSEL(x) ((x) << S_T7_SMUXTXSEL)
+#define G_T7_SMUXTXSEL(x) (((x) >> S_T7_SMUXTXSEL) & M_T7_SMUXTXSEL)
+
+#define S_T7_PORTSPEED 4
+#define M_T7_PORTSPEED 0xfU
+#define V_T7_PORTSPEED(x) ((x) << S_T7_PORTSPEED)
+#define G_T7_PORTSPEED(x) (((x) >> S_T7_PORTSPEED) & M_T7_PORTSPEED)
+
+#define S_MTIP_REG_RESET 25
+#define V_MTIP_REG_RESET(x) ((x) << S_MTIP_REG_RESET)
+#define F_MTIP_REG_RESET V_MTIP_REG_RESET(1U)
+
+#define S_RESET_REG_CLK_I 24
+#define V_RESET_REG_CLK_I(x) ((x) << S_RESET_REG_CLK_I)
+#define F_RESET_REG_CLK_I V_RESET_REG_CLK_I(1U)
+
+#define S_T7_LED1_CFG1 15
+#define M_T7_LED1_CFG1 0x7U
+#define V_T7_LED1_CFG1(x) ((x) << S_T7_LED1_CFG1)
+#define G_T7_LED1_CFG1(x) (((x) >> S_T7_LED1_CFG1) & M_T7_LED1_CFG1)
+
+#define S_T7_LED0_CFG1 12
+#define M_T7_LED0_CFG1 0x7U
+#define V_T7_LED0_CFG1(x) ((x) << S_T7_LED0_CFG1)
+#define G_T7_LED0_CFG1(x) (((x) >> S_T7_LED0_CFG1) & M_T7_LED0_CFG1)
+
+#define A_T7_MAC_PORT_MAGIC_MACID_LO 0x820
+#define A_T7_MAC_PORT_MAGIC_MACID_HI 0x824
+#define A_T7_MAC_PORT_LINK_STATUS 0x828
+
+#define S_EGR_SE_CNT_ERR 9
+#define V_EGR_SE_CNT_ERR(x) ((x) << S_EGR_SE_CNT_ERR)
+#define F_EGR_SE_CNT_ERR V_EGR_SE_CNT_ERR(1U)
+
+#define S_INGR_SE_CNT_ERR 8
+#define V_INGR_SE_CNT_ERR(x) ((x) << S_INGR_SE_CNT_ERR)
+#define F_INGR_SE_CNT_ERR V_INGR_SE_CNT_ERR(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_EN_100G 0x82c
+
+#define S_PERR_PCSR_FDM_3 21
+#define V_PERR_PCSR_FDM_3(x) ((x) << S_PERR_PCSR_FDM_3)
+#define F_PERR_PCSR_FDM_3 V_PERR_PCSR_FDM_3(1U)
+
+#define S_PERR_PCSR_FDM_2 20
+#define V_PERR_PCSR_FDM_2(x) ((x) << S_PERR_PCSR_FDM_2)
+#define F_PERR_PCSR_FDM_2 V_PERR_PCSR_FDM_2(1U)
+
+#define S_PERR_PCSR_FDM_1 19
+#define V_PERR_PCSR_FDM_1(x) ((x) << S_PERR_PCSR_FDM_1)
+#define F_PERR_PCSR_FDM_1 V_PERR_PCSR_FDM_1(1U)
+
+#define S_PERR_PCSR_FDM_0 18
+#define V_PERR_PCSR_FDM_0(x) ((x) << S_PERR_PCSR_FDM_0)
+#define F_PERR_PCSR_FDM_0 V_PERR_PCSR_FDM_0(1U)
+
+#define S_PERR_PCSR_FM_3 17
+#define V_PERR_PCSR_FM_3(x) ((x) << S_PERR_PCSR_FM_3)
+#define F_PERR_PCSR_FM_3 V_PERR_PCSR_FM_3(1U)
+
+#define S_PERR_PCSR_FM_2 16
+#define V_PERR_PCSR_FM_2(x) ((x) << S_PERR_PCSR_FM_2)
+#define F_PERR_PCSR_FM_2 V_PERR_PCSR_FM_2(1U)
+
+#define S_PERR_PCSR_FM_1 15
+#define V_PERR_PCSR_FM_1(x) ((x) << S_PERR_PCSR_FM_1)
+#define F_PERR_PCSR_FM_1 V_PERR_PCSR_FM_1(1U)
+
+#define S_PERR_PCSR_FM_0 14
+#define V_PERR_PCSR_FM_0(x) ((x) << S_PERR_PCSR_FM_0)
+#define F_PERR_PCSR_FM_0 V_PERR_PCSR_FM_0(1U)
+
+#define S_PERR_PCSR_DM_1 13
+#define V_PERR_PCSR_DM_1(x) ((x) << S_PERR_PCSR_DM_1)
+#define F_PERR_PCSR_DM_1 V_PERR_PCSR_DM_1(1U)
+
+#define S_PERR_PCSR_DM_0 12
+#define V_PERR_PCSR_DM_0(x) ((x) << S_PERR_PCSR_DM_0)
+#define F_PERR_PCSR_DM_0 V_PERR_PCSR_DM_0(1U)
+
+#define S_PERR_PCSR_DK_3 11
+#define V_PERR_PCSR_DK_3(x) ((x) << S_PERR_PCSR_DK_3)
+#define F_PERR_PCSR_DK_3 V_PERR_PCSR_DK_3(1U)
+
+#define S_PERR_PCSR_DK_2 10
+#define V_PERR_PCSR_DK_2(x) ((x) << S_PERR_PCSR_DK_2)
+#define F_PERR_PCSR_DK_2 V_PERR_PCSR_DK_2(1U)
+
+#define S_PERR_PCSR_DK_1 9
+#define V_PERR_PCSR_DK_1(x) ((x) << S_PERR_PCSR_DK_1)
+#define F_PERR_PCSR_DK_1 V_PERR_PCSR_DK_1(1U)
+
+#define S_PERR_PCSR_DK_0 8
+#define V_PERR_PCSR_DK_0(x) ((x) << S_PERR_PCSR_DK_0)
+#define F_PERR_PCSR_DK_0 V_PERR_PCSR_DK_0(1U)
+
+#define S_PERR_F91RO_1 7
+#define V_PERR_F91RO_1(x) ((x) << S_PERR_F91RO_1)
+#define F_PERR_F91RO_1 V_PERR_F91RO_1(1U)
+
+#define S_PERR_F91RO_0 6
+#define V_PERR_F91RO_0(x) ((x) << S_PERR_F91RO_0)
+#define F_PERR_F91RO_0 V_PERR_F91RO_0(1U)
+
+#define S_PERR_PCSR_F91DM 5
+#define V_PERR_PCSR_F91DM(x) ((x) << S_PERR_PCSR_F91DM)
+#define F_PERR_PCSR_F91DM V_PERR_PCSR_F91DM(1U)
+
+#define S_PERR_PCSR_F91TI 4
+#define V_PERR_PCSR_F91TI(x) ((x) << S_PERR_PCSR_F91TI)
+#define F_PERR_PCSR_F91TI V_PERR_PCSR_F91TI(1U)
+
+#define S_PERR_PCSR_F91TO 3
+#define V_PERR_PCSR_F91TO(x) ((x) << S_PERR_PCSR_F91TO)
+#define F_PERR_PCSR_F91TO V_PERR_PCSR_F91TO(1U)
+
+#define S_PERR_PCSR_F91M 2
+#define V_PERR_PCSR_F91M(x) ((x) << S_PERR_PCSR_F91M)
+#define F_PERR_PCSR_F91M V_PERR_PCSR_F91M(1U)
+
+#define S_PERR_PCSR_80_16_1 1
+#define V_PERR_PCSR_80_16_1(x) ((x) << S_PERR_PCSR_80_16_1)
+#define F_PERR_PCSR_80_16_1 V_PERR_PCSR_80_16_1(1U)
+
+#define S_PERR_PCSR_80_16_0 0
+#define V_PERR_PCSR_80_16_0(x) ((x) << S_PERR_PCSR_80_16_0)
+#define F_PERR_PCSR_80_16_0 V_PERR_PCSR_80_16_0(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_CAUSE_100G 0x830
+#define A_T7_MAC_PORT_PERR_ENABLE_100G 0x834
+#define A_MAC_PORT_MAC10G100G_CONFIG_0 0x838
+
+#define S_PEER_DELAY_VAL 31
+#define V_PEER_DELAY_VAL(x) ((x) << S_PEER_DELAY_VAL)
+#define F_PEER_DELAY_VAL V_PEER_DELAY_VAL(1U)
+
+#define S_PEER_DELAY 1
+#define M_PEER_DELAY 0x3fffffffU
+#define V_PEER_DELAY(x) ((x) << S_PEER_DELAY)
+#define G_PEER_DELAY(x) (((x) >> S_PEER_DELAY) & M_PEER_DELAY)
+
+#define S_MODE1S_ENA 0
+#define V_MODE1S_ENA(x) ((x) << S_MODE1S_ENA)
+#define F_MODE1S_ENA V_MODE1S_ENA(1U)
+
+#define A_MAC_PORT_MAC10G100G_CONFIG_1 0x83c
+
+#define S_TX_STOP 25
+#define V_TX_STOP(x) ((x) << S_TX_STOP)
+#define F_TX_STOP V_TX_STOP(1U)
+
+#define S_T7_MODE1S_ENA 24
+#define V_T7_MODE1S_ENA(x) ((x) << S_T7_MODE1S_ENA)
+#define F_T7_MODE1S_ENA V_T7_MODE1S_ENA(1U)
+
+#define S_TX_TS_ID 12
+#define M_TX_TS_ID 0xfffU
+#define V_TX_TS_ID(x) ((x) << S_TX_TS_ID)
+#define G_TX_TS_ID(x) (((x) >> S_TX_TS_ID) & M_TX_TS_ID)
+
+#define S_T7_TX_LI_FAULT 11
+#define V_T7_TX_LI_FAULT(x) ((x) << S_T7_TX_LI_FAULT)
+#define F_T7_TX_LI_FAULT V_T7_TX_LI_FAULT(1U)
+
+#define S_XOFF_GEN 3
+#define M_XOFF_GEN 0xffU
+#define V_XOFF_GEN(x) ((x) << S_XOFF_GEN)
+#define G_XOFF_GEN(x) (((x) >> S_XOFF_GEN) & M_XOFF_GEN)
+
+#define S_TX_REM_FAULT 1
+#define V_TX_REM_FAULT(x) ((x) << S_TX_REM_FAULT)
+#define F_TX_REM_FAULT V_TX_REM_FAULT(1U)
+
+#define S_TX_LOC_FAULT 0
+#define V_TX_LOC_FAULT(x) ((x) << S_TX_LOC_FAULT)
+#define F_TX_LOC_FAULT V_TX_LOC_FAULT(1U)
+
+#define A_MAC_PORT_MAC10G100G_CONFIG_2 0x840
+
+#define S_FF_TX_RX_TS_NS 0
+#define M_FF_TX_RX_TS_NS 0x3fffffffU
+#define V_FF_TX_RX_TS_NS(x) ((x) << S_FF_TX_RX_TS_NS)
+#define G_FF_TX_RX_TS_NS(x) (((x) >> S_FF_TX_RX_TS_NS) & M_FF_TX_RX_TS_NS)
+
+#define A_MAC_PORT_MAC10G100G_STATUS 0x844
+
+#define S_REG_LOWP 21
+#define V_REG_LOWP(x) ((x) << S_REG_LOWP)
+#define F_REG_LOWP V_REG_LOWP(1U)
+
+#define S_LI_FAULT 20
+#define V_LI_FAULT(x) ((x) << S_LI_FAULT)
+#define F_LI_FAULT V_LI_FAULT(1U)
+
+#define S_TX_ISIDLE 19
+#define V_TX_ISIDLE(x) ((x) << S_TX_ISIDLE)
+#define F_TX_ISIDLE V_TX_ISIDLE(1U)
+
+#define S_TX_UNDERFLOW 18
+#define V_TX_UNDERFLOW(x) ((x) << S_TX_UNDERFLOW)
+#define F_TX_UNDERFLOW V_TX_UNDERFLOW(1U)
+
+#define S_T7_TX_EMPTY 17
+#define V_T7_TX_EMPTY(x) ((x) << S_T7_TX_EMPTY)
+#define F_T7_TX_EMPTY V_T7_TX_EMPTY(1U)
+
+#define S_T7_1_REM_FAULT 16
+#define V_T7_1_REM_FAULT(x) ((x) << S_T7_1_REM_FAULT)
+#define F_T7_1_REM_FAULT V_T7_1_REM_FAULT(1U)
+
+#define S_REG_TS_AVAIL 15
+#define V_REG_TS_AVAIL(x) ((x) << S_REG_TS_AVAIL)
+#define F_REG_TS_AVAIL V_REG_TS_AVAIL(1U)
+
+#define S_T7_PHY_TXENA 14
+#define V_T7_PHY_TXENA(x) ((x) << S_T7_PHY_TXENA)
+#define F_T7_PHY_TXENA V_T7_PHY_TXENA(1U)
+
+#define S_T7_PFC_MODE 13
+#define V_T7_PFC_MODE(x) ((x) << S_T7_PFC_MODE)
+#define F_T7_PFC_MODE V_T7_PFC_MODE(1U)
+
+#define S_PAUSE_ON 5
+#define M_PAUSE_ON 0xffU
+#define V_PAUSE_ON(x) ((x) << S_PAUSE_ON)
+#define G_PAUSE_ON(x) (((x) >> S_PAUSE_ON) & M_PAUSE_ON)
+
+#define S_MAC_PAUSE_EN 4
+#define V_MAC_PAUSE_EN(x) ((x) << S_MAC_PAUSE_EN)
+#define F_MAC_PAUSE_EN V_MAC_PAUSE_EN(1U)
+
+#define S_MAC_ENABLE 3
+#define V_MAC_ENABLE(x) ((x) << S_MAC_ENABLE)
+#define F_MAC_ENABLE V_MAC_ENABLE(1U)
+
+#define S_LOOP_ENA 2
+#define V_LOOP_ENA(x) ((x) << S_LOOP_ENA)
+#define F_LOOP_ENA V_LOOP_ENA(1U)
+
+#define S_LOC_FAULT 1
+#define V_LOC_FAULT(x) ((x) << S_LOC_FAULT)
+#define F_LOC_FAULT V_LOC_FAULT(1U)
+
+#define S_FF_RX_EMPTY 0
+#define V_FF_RX_EMPTY(x) ((x) << S_FF_RX_EMPTY)
+#define F_FF_RX_EMPTY V_FF_RX_EMPTY(1U)
+
+#define A_MAC_PORT_MAC_AN_STATE_STATUS0 0x848
+
+#define S_AN_VAL_AN 15
+#define V_AN_VAL_AN(x) ((x) << S_AN_VAL_AN)
+#define F_AN_VAL_AN V_AN_VAL_AN(1U)
+
+#define S_AN_TR_DIS_STATUS_AN 14
+#define V_AN_TR_DIS_STATUS_AN(x) ((x) << S_AN_TR_DIS_STATUS_AN)
+#define F_AN_TR_DIS_STATUS_AN V_AN_TR_DIS_STATUS_AN(1U)
+
+#define S_AN_STATUS_AN 13
+#define V_AN_STATUS_AN(x) ((x) << S_AN_STATUS_AN)
+#define F_AN_STATUS_AN V_AN_STATUS_AN(1U)
+
+#define S_AN_SELECT_AN 8
+#define M_AN_SELECT_AN 0x1fU
+#define V_AN_SELECT_AN(x) ((x) << S_AN_SELECT_AN)
+#define G_AN_SELECT_AN(x) (((x) >> S_AN_SELECT_AN) & M_AN_SELECT_AN)
+
+#define S_AN_RS_FEC_ENA_AN 7
+#define V_AN_RS_FEC_ENA_AN(x) ((x) << S_AN_RS_FEC_ENA_AN)
+#define F_AN_RS_FEC_ENA_AN V_AN_RS_FEC_ENA_AN(1U)
+
+#define S_AN_INT_AN 6
+#define V_AN_INT_AN(x) ((x) << S_AN_INT_AN)
+#define F_AN_INT_AN V_AN_INT_AN(1U)
+
+#define S_AN_FEC_ENA_AN 5
+#define V_AN_FEC_ENA_AN(x) ((x) << S_AN_FEC_ENA_AN)
+#define F_AN_FEC_ENA_AN V_AN_FEC_ENA_AN(1U)
+
+#define S_AN_DONE_AN 4
+#define V_AN_DONE_AN(x) ((x) << S_AN_DONE_AN)
+#define F_AN_DONE_AN V_AN_DONE_AN(1U)
+
+#define S_AN_STATE 0
+#define M_AN_STATE 0xfU
+#define V_AN_STATE(x) ((x) << S_AN_STATE)
+#define G_AN_STATE(x) (((x) >> S_AN_STATE) & M_AN_STATE)
+
+#define A_MAC_PORT_MAC_AN_STATE_STATUS1 0x84c
+#define A_T7_MAC_PORT_EPIO_DATA0 0x850
+#define A_T7_MAC_PORT_EPIO_DATA1 0x854
+#define A_T7_MAC_PORT_EPIO_DATA2 0x858
+#define A_T7_MAC_PORT_EPIO_DATA3 0x85c
+#define A_T7_MAC_PORT_EPIO_OP 0x860
+#define A_T7_MAC_PORT_WOL_STATUS 0x864
+#define A_T7_MAC_PORT_INT_EN 0x868
+
+#define S_MAC2MPS_PERR 31
+#define V_MAC2MPS_PERR(x) ((x) << S_MAC2MPS_PERR)
+#define F_MAC2MPS_PERR V_MAC2MPS_PERR(1U)
+
+#define S_MAC_PPS_INT_EN 30
+#define V_MAC_PPS_INT_EN(x) ((x) << S_MAC_PPS_INT_EN)
+#define F_MAC_PPS_INT_EN V_MAC_PPS_INT_EN(1U)
+
+#define S_MAC_TX_TS_AVAIL_INT_EN 29
+#define V_MAC_TX_TS_AVAIL_INT_EN(x) ((x) << S_MAC_TX_TS_AVAIL_INT_EN)
+#define F_MAC_TX_TS_AVAIL_INT_EN V_MAC_TX_TS_AVAIL_INT_EN(1U)
+
+#define S_MAC_SINGLE_ALARM_INT_EN 28
+#define V_MAC_SINGLE_ALARM_INT_EN(x) ((x) << S_MAC_SINGLE_ALARM_INT_EN)
+#define F_MAC_SINGLE_ALARM_INT_EN V_MAC_SINGLE_ALARM_INT_EN(1U)
+
+#define S_MAC_PERIODIC_ALARM_INT_EN 27
+#define V_MAC_PERIODIC_ALARM_INT_EN(x) ((x) << S_MAC_PERIODIC_ALARM_INT_EN)
+#define F_MAC_PERIODIC_ALARM_INT_EN V_MAC_PERIODIC_ALARM_INT_EN(1U)
+
+#define S_MAC_PATDETWAKE_INT_EN 26
+#define V_MAC_PATDETWAKE_INT_EN(x) ((x) << S_MAC_PATDETWAKE_INT_EN)
+#define F_MAC_PATDETWAKE_INT_EN V_MAC_PATDETWAKE_INT_EN(1U)
+
+#define S_MAC_MAGIC_WAKE_INT_EN 25
+#define V_MAC_MAGIC_WAKE_INT_EN(x) ((x) << S_MAC_MAGIC_WAKE_INT_EN)
+#define F_MAC_MAGIC_WAKE_INT_EN V_MAC_MAGIC_WAKE_INT_EN(1U)
+
+#define S_MAC_SIGDETCHG_INT_EN 24
+#define V_MAC_SIGDETCHG_INT_EN(x) ((x) << S_MAC_SIGDETCHG_INT_EN)
+#define F_MAC_SIGDETCHG_INT_EN V_MAC_SIGDETCHG_INT_EN(1U)
+
+#define S_MAC_PCS_LINK_GOOD_EN 12
+#define V_MAC_PCS_LINK_GOOD_EN(x) ((x) << S_MAC_PCS_LINK_GOOD_EN)
+#define F_MAC_PCS_LINK_GOOD_EN V_MAC_PCS_LINK_GOOD_EN(1U)
+
+#define S_MAC_PCS_LINK_FAIL_EN 11
+#define V_MAC_PCS_LINK_FAIL_EN(x) ((x) << S_MAC_PCS_LINK_FAIL_EN)
+#define F_MAC_PCS_LINK_FAIL_EN V_MAC_PCS_LINK_FAIL_EN(1U)
+
+#define S_MAC_OVRFLOW_INT_EN 10
+#define V_MAC_OVRFLOW_INT_EN(x) ((x) << S_MAC_OVRFLOW_INT_EN)
+#define F_MAC_OVRFLOW_INT_EN V_MAC_OVRFLOW_INT_EN(1U)
+
+#define S_MAC_REM_FAULT_INT_EN 7
+#define V_MAC_REM_FAULT_INT_EN(x) ((x) << S_MAC_REM_FAULT_INT_EN)
+#define F_MAC_REM_FAULT_INT_EN V_MAC_REM_FAULT_INT_EN(1U)
+
+#define S_MAC_LOC_FAULT_INT_EN 6
+#define V_MAC_LOC_FAULT_INT_EN(x) ((x) << S_MAC_LOC_FAULT_INT_EN)
+#define F_MAC_LOC_FAULT_INT_EN V_MAC_LOC_FAULT_INT_EN(1U)
+
+#define S_MAC_LINK_DOWN_INT_EN 5
+#define V_MAC_LINK_DOWN_INT_EN(x) ((x) << S_MAC_LINK_DOWN_INT_EN)
+#define F_MAC_LINK_DOWN_INT_EN V_MAC_LINK_DOWN_INT_EN(1U)
+
+#define S_MAC_LINK_UP_INT_EN 4
+#define V_MAC_LINK_UP_INT_EN(x) ((x) << S_MAC_LINK_UP_INT_EN)
+#define F_MAC_LINK_UP_INT_EN V_MAC_LINK_UP_INT_EN(1U)
+
+#define S_MAC_AN_DONE_INT_EN 3
+#define V_MAC_AN_DONE_INT_EN(x) ((x) << S_MAC_AN_DONE_INT_EN)
+#define F_MAC_AN_DONE_INT_EN V_MAC_AN_DONE_INT_EN(1U)
+
+#define S_MAC_AN_PGRD_INT_EN 2
+#define V_MAC_AN_PGRD_INT_EN(x) ((x) << S_MAC_AN_PGRD_INT_EN)
+#define F_MAC_AN_PGRD_INT_EN V_MAC_AN_PGRD_INT_EN(1U)
+
+#define S_MAC_TXFIFO_ERR_INT_EN 1
+#define V_MAC_TXFIFO_ERR_INT_EN(x) ((x) << S_MAC_TXFIFO_ERR_INT_EN)
+#define F_MAC_TXFIFO_ERR_INT_EN V_MAC_TXFIFO_ERR_INT_EN(1U)
+
+#define S_MAC_RXFIFO_ERR_INT_EN 0
+#define V_MAC_RXFIFO_ERR_INT_EN(x) ((x) << S_MAC_RXFIFO_ERR_INT_EN)
+#define F_MAC_RXFIFO_ERR_INT_EN V_MAC_RXFIFO_ERR_INT_EN(1U)
+
+#define A_T7_MAC_PORT_INT_CAUSE 0x86c
+
+#define S_MAC2MPS_PERR_CAUSE 31
+#define V_MAC2MPS_PERR_CAUSE(x) ((x) << S_MAC2MPS_PERR_CAUSE)
+#define F_MAC2MPS_PERR_CAUSE V_MAC2MPS_PERR_CAUSE(1U)
+
+#define S_MAC_PPS_INT_CAUSE 30
+#define V_MAC_PPS_INT_CAUSE(x) ((x) << S_MAC_PPS_INT_CAUSE)
+#define F_MAC_PPS_INT_CAUSE V_MAC_PPS_INT_CAUSE(1U)
+
+#define S_MAC_TX_TS_AVAIL_INT_CAUSE 29
+#define V_MAC_TX_TS_AVAIL_INT_CAUSE(x) ((x) << S_MAC_TX_TS_AVAIL_INT_CAUSE)
+#define F_MAC_TX_TS_AVAIL_INT_CAUSE V_MAC_TX_TS_AVAIL_INT_CAUSE(1U)
+
+#define S_MAC_SINGLE_ALARM_INT_CAUSE 28
+#define V_MAC_SINGLE_ALARM_INT_CAUSE(x) ((x) << S_MAC_SINGLE_ALARM_INT_CAUSE)
+#define F_MAC_SINGLE_ALARM_INT_CAUSE V_MAC_SINGLE_ALARM_INT_CAUSE(1U)
+
+#define S_MAC_PERIODIC_ALARM_INT_CAUSE 27
+#define V_MAC_PERIODIC_ALARM_INT_CAUSE(x) ((x) << S_MAC_PERIODIC_ALARM_INT_CAUSE)
+#define F_MAC_PERIODIC_ALARM_INT_CAUSE V_MAC_PERIODIC_ALARM_INT_CAUSE(1U)
+
+#define S_MAC_PATDETWAKE_INT_CAUSE 26
+#define V_MAC_PATDETWAKE_INT_CAUSE(x) ((x) << S_MAC_PATDETWAKE_INT_CAUSE)
+#define F_MAC_PATDETWAKE_INT_CAUSE V_MAC_PATDETWAKE_INT_CAUSE(1U)
+
+#define S_MAC_MAGIC_WAKE_INT_CAUSE 25
+#define V_MAC_MAGIC_WAKE_INT_CAUSE(x) ((x) << S_MAC_MAGIC_WAKE_INT_CAUSE)
+#define F_MAC_MAGIC_WAKE_INT_CAUSE V_MAC_MAGIC_WAKE_INT_CAUSE(1U)
+
+#define S_MAC_SIGDETCHG_INT_CAUSE 24
+#define V_MAC_SIGDETCHG_INT_CAUSE(x) ((x) << S_MAC_SIGDETCHG_INT_CAUSE)
+#define F_MAC_SIGDETCHG_INT_CAUSE V_MAC_SIGDETCHG_INT_CAUSE(1U)
+
+#define S_MAC_PCS_LINK_GOOD_CAUSE 12
+#define V_MAC_PCS_LINK_GOOD_CAUSE(x) ((x) << S_MAC_PCS_LINK_GOOD_CAUSE)
+#define F_MAC_PCS_LINK_GOOD_CAUSE V_MAC_PCS_LINK_GOOD_CAUSE(1U)
+
+#define S_MAC_PCS_LINK_FAIL_CAUSE 11
+#define V_MAC_PCS_LINK_FAIL_CAUSE(x) ((x) << S_MAC_PCS_LINK_FAIL_CAUSE)
+#define F_MAC_PCS_LINK_FAIL_CAUSE V_MAC_PCS_LINK_FAIL_CAUSE(1U)
+
+#define S_MAC_OVRFLOW_INT_CAUSE 10
+#define V_MAC_OVRFLOW_INT_CAUSE(x) ((x) << S_MAC_OVRFLOW_INT_CAUSE)
+#define F_MAC_OVRFLOW_INT_CAUSE V_MAC_OVRFLOW_INT_CAUSE(1U)
+
+#define S_MAC_REM_FAULT_INT_CAUSE 7
+#define V_MAC_REM_FAULT_INT_CAUSE(x) ((x) << S_MAC_REM_FAULT_INT_CAUSE)
+#define F_MAC_REM_FAULT_INT_CAUSE V_MAC_REM_FAULT_INT_CAUSE(1U)
+
+#define S_MAC_LOC_FAULT_INT_CAUSE 6
+#define V_MAC_LOC_FAULT_INT_CAUSE(x) ((x) << S_MAC_LOC_FAULT_INT_CAUSE)
+#define F_MAC_LOC_FAULT_INT_CAUSE V_MAC_LOC_FAULT_INT_CAUSE(1U)
+
+#define S_MAC_LINK_DOWN_INT_CAUSE 5
+#define V_MAC_LINK_DOWN_INT_CAUSE(x) ((x) << S_MAC_LINK_DOWN_INT_CAUSE)
+#define F_MAC_LINK_DOWN_INT_CAUSE V_MAC_LINK_DOWN_INT_CAUSE(1U)
+
+#define S_MAC_LINK_UP_INT_CAUSE 4
+#define V_MAC_LINK_UP_INT_CAUSE(x) ((x) << S_MAC_LINK_UP_INT_CAUSE)
+#define F_MAC_LINK_UP_INT_CAUSE V_MAC_LINK_UP_INT_CAUSE(1U)
+
+#define S_MAC_AN_DONE_INT_CAUSE 3
+#define V_MAC_AN_DONE_INT_CAUSE(x) ((x) << S_MAC_AN_DONE_INT_CAUSE)
+#define F_MAC_AN_DONE_INT_CAUSE V_MAC_AN_DONE_INT_CAUSE(1U)
+
+#define S_MAC_AN_PGRD_INT_CAUSE 2
+#define V_MAC_AN_PGRD_INT_CAUSE(x) ((x) << S_MAC_AN_PGRD_INT_CAUSE)
+#define F_MAC_AN_PGRD_INT_CAUSE V_MAC_AN_PGRD_INT_CAUSE(1U)
+
+#define S_MAC_TXFIFO_ERR_INT_CAUSE 1
+#define V_MAC_TXFIFO_ERR_INT_CAUSE(x) ((x) << S_MAC_TXFIFO_ERR_INT_CAUSE)
+#define F_MAC_TXFIFO_ERR_INT_CAUSE V_MAC_TXFIFO_ERR_INT_CAUSE(1U)
+
+#define S_MAC_RXFIFO_ERR_INT_CAUSE 0
+#define V_MAC_RXFIFO_ERR_INT_CAUSE(x) ((x) << S_MAC_RXFIFO_ERR_INT_CAUSE)
+#define F_MAC_RXFIFO_ERR_INT_CAUSE V_MAC_RXFIFO_ERR_INT_CAUSE(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_EN 0x870
+#define A_T7_MAC_PORT_PERR_INT_CAUSE 0x874
+#define A_T7_MAC_PORT_PERR_ENABLE 0x878
+#define A_T7_MAC_PORT_PERR_INJECT 0x87c
+
+#define S_T7_MEMSEL_PERR 1
+#define M_T7_MEMSEL_PERR 0xffU
+#define V_T7_MEMSEL_PERR(x) ((x) << S_T7_MEMSEL_PERR)
+#define G_T7_MEMSEL_PERR(x) (((x) >> S_T7_MEMSEL_PERR) & M_T7_MEMSEL_PERR)
+
+#define A_T7_MAC_PORT_RUNT_FRAME 0x880
+#define A_T7_MAC_PORT_EEE_STATUS 0x884
+#define A_T7_MAC_PORT_TX_TS_ID 0x888
+
+#define S_TS_ID_MSB 3
+#define V_TS_ID_MSB(x) ((x) << S_TS_ID_MSB)
+#define F_TS_ID_MSB V_TS_ID_MSB(1U)
+
+#define A_T7_MAC_PORT_TX_TS_VAL_LO 0x88c
+#define A_T7_MAC_PORT_TX_TS_VAL_HI 0x890
+#define A_T7_MAC_PORT_EEE_CTL 0x894
+#define A_T7_MAC_PORT_EEE_TX_CTL 0x898
+#define A_T7_MAC_PORT_EEE_RX_CTL 0x89c
+#define A_T7_MAC_PORT_EEE_TX_10G_SLEEP_TIMER 0x8a0
+#define A_T7_MAC_PORT_EEE_TX_10G_QUIET_TIMER 0x8a4
+#define A_T7_MAC_PORT_EEE_TX_10G_WAKE_TIMER 0x8a8
+#define A_T7_MAC_PORT_EEE_RX_10G_QUIET_TIMER 0x8b8
+#define A_T7_MAC_PORT_EEE_RX_10G_WAKE_TIMER 0x8bc
+#define A_T7_MAC_PORT_EEE_RX_10G_WF_TIMER 0x8c0
+#define A_T7_MAC_PORT_EEE_WF_COUNT 0x8cc
+#define A_MAC_PORT_WOL_EN 0x8d0
+
+#define S_WOL_ENABLE 1
+#define V_WOL_ENABLE(x) ((x) << S_WOL_ENABLE)
+#define F_WOL_ENABLE V_WOL_ENABLE(1U)
+
+#define S_WOL_INDICATOR 0
+#define V_WOL_INDICATOR(x) ((x) << S_WOL_INDICATOR)
+#define F_WOL_INDICATOR V_WOL_INDICATOR(1U)
+
+#define A_MAC_PORT_INT_TRACE 0x8d4
+
+#define S_INTERRUPT 0
+#define M_INTERRUPT 0x7fffffffU
+#define V_INTERRUPT(x) ((x) << S_INTERRUPT)
+#define G_INTERRUPT(x) (((x) >> S_INTERRUPT) & M_INTERRUPT)
+
+#define A_MAC_PORT_TRACE_TS_LO 0x8d8
+#define A_MAC_PORT_TRACE_TS_HI 0x8dc
+#define A_MAC_PORT_MTIP_10G100G_REVISION 0x900
+
+#define S_VER_10G100G 8
+#define M_VER_10G100G 0xffU
+#define V_VER_10G100G(x) ((x) << S_VER_10G100G)
+#define G_VER_10G100G(x) (((x) >> S_VER_10G100G) & M_VER_10G100G)
+
+#define S_REV_10G100G 0
+#define M_REV_10G100G 0xffU
+#define V_REV_10G100G(x) ((x) << S_REV_10G100G)
+#define G_REV_10G100G(x) (((x) >> S_REV_10G100G) & M_REV_10G100G)
+
+#define A_MAC_PORT_MTIP_10G100G_SCRATCH 0x904
+#define A_MAC_PORT_MTIP_10G100G_COMMAND_CONFIG 0x908
+
+#define S_NO_PREAM 31
+#define V_NO_PREAM(x) ((x) << S_NO_PREAM)
+#define F_NO_PREAM V_NO_PREAM(1U)
+
+#define S_SHORT_PREAM 30
+#define V_SHORT_PREAM(x) ((x) << S_SHORT_PREAM)
+#define F_SHORT_PREAM V_SHORT_PREAM(1U)
+
+#define S_FLT_HDL_DIS 27
+#define V_FLT_HDL_DIS(x) ((x) << S_FLT_HDL_DIS)
+#define F_FLT_HDL_DIS V_FLT_HDL_DIS(1U)
+
+#define S_TX_FIFO_RESET 26
+#define V_TX_FIFO_RESET(x) ((x) << S_TX_FIFO_RESET)
+#define F_TX_FIFO_RESET V_TX_FIFO_RESET(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_MAC_ADDR_0 0x90c
+#define A_MAC_PORT_MTIP_10G100G_MAC_ADDR_1 0x910
+#define A_MAC_PORT_MTIP_10G100G_FRM_LENGTH_TX_MTU 0x914
+#define A_MAC_PORT_MTIP_10G100G_RX_FIFO_SECTIONS 0x91c
+
+#define S_RX10G100G_EMPTY 16
+#define M_RX10G100G_EMPTY 0xffffU
+#define V_RX10G100G_EMPTY(x) ((x) << S_RX10G100G_EMPTY)
+#define G_RX10G100G_EMPTY(x) (((x) >> S_RX10G100G_EMPTY) & M_RX10G100G_EMPTY)
+
+#define S_RX10G100G_AVAIL 0
+#define M_RX10G100G_AVAIL 0xffffU
+#define V_RX10G100G_AVAIL(x) ((x) << S_RX10G100G_AVAIL)
+#define G_RX10G100G_AVAIL(x) (((x) >> S_RX10G100G_AVAIL) & M_RX10G100G_AVAIL)
+
+#define A_MAC_PORT_MTIP_10G100G_TX_FIFO_SECTIONS 0x920
+
+#define S_TX10G100G_EMPTY 16
+#define M_TX10G100G_EMPTY 0xffffU
+#define V_TX10G100G_EMPTY(x) ((x) << S_TX10G100G_EMPTY)
+#define G_TX10G100G_EMPTY(x) (((x) >> S_TX10G100G_EMPTY) & M_TX10G100G_EMPTY)
+
+#define S_TX10G100G_AVAIL 0
+#define M_TX10G100G_AVAIL 0xffffU
+#define V_TX10G100G_AVAIL(x) ((x) << S_TX10G100G_AVAIL)
+#define G_TX10G100G_AVAIL(x) (((x) >> S_TX10G100G_AVAIL) & M_TX10G100G_AVAIL)
+
+#define A_MAC_PORT_MTIP_10G100G_RX_FIFO_ALMOST_F_E 0x924
+#define A_MAC_PORT_MTIP_10G100G_TX_FIFO_ALMOST_F_E 0x928
+#define A_MAC_PORT_MTIP_10G100G_MDIO_CFG_STATUS 0x930
+#define A_MAC_PORT_MTIP_10G100G_MDIO_COMMAND 0x934
+#define A_MAC_PORT_MTIP_10G100G_MDIO_DATA 0x938
+#define A_MAC_PORT_MTIP_10G100G_MDIO_REGADDR 0x93c
+#define A_MAC_PORT_MTIP_10G100G_STATUS 0x940
+
+#define S_T7_TX_ISIDLE 8
+#define V_T7_TX_ISIDLE(x) ((x) << S_T7_TX_ISIDLE)
+#define F_T7_TX_ISIDLE V_T7_TX_ISIDLE(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_TX_IPG_LENGTH 0x944
+
+#define S_IPG_COMP_CNT 16
+#define M_IPG_COMP_CNT 0xffffU
+#define V_IPG_COMP_CNT(x) ((x) << S_IPG_COMP_CNT)
+#define G_IPG_COMP_CNT(x) (((x) >> S_IPG_COMP_CNT) & M_IPG_COMP_CNT)
+
+#define S_AVG_IPG_LEN 2
+#define M_AVG_IPG_LEN 0xfU
+#define V_AVG_IPG_LEN(x) ((x) << S_AVG_IPG_LEN)
+#define G_AVG_IPG_LEN(x) (((x) >> S_AVG_IPG_LEN) & M_AVG_IPG_LEN)
+
+#define S_DSBL_DIC 0
+#define V_DSBL_DIC(x) ((x) << S_DSBL_DIC)
+#define F_DSBL_DIC V_DSBL_DIC(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_CRC_MODE 0x948
+#define A_MAC_PORT_MTIP_10G100G_CL01_PAUSE_QUANTA 0x954
+#define A_MAC_PORT_MTIP_10G100G_CL23_PAUSE_QUANTA 0x958
+#define A_MAC_PORT_MTIP_10G100G_CL45_PAUSE_QUANTA 0x95c
+#define A_MAC_PORT_MTIP_10G100G_CL67_PAUSE_QUANTA 0x960
+#define A_MAC_PORT_MTIP_10G100G_CL01_QUANTA_THRESH 0x964
+#define A_MAC_PORT_MTIP_10G100G_CL23_QUANTA_THRESH 0x968
+#define A_MAC_PORT_MTIP_10G100G_CL45_QUANTA_THRESH 0x96c
+#define A_MAC_PORT_MTIP_10G100G_CL67_QUANTA_THRESH 0x970
+#define A_MAC_PORT_MTIP_10G100G_RX_PAUSE_STATUS 0x974
+#define A_MAC_PORT_MTIP_10G100G_TS_TIMESTAMP 0x97c
+#define A_MAC_PORT_MTIP_10G100G_XIF_MODE 0x980
+
+#define S_RX_CNT_MODE 16
+#define V_RX_CNT_MODE(x) ((x) << S_RX_CNT_MODE)
+#define F_RX_CNT_MODE V_RX_CNT_MODE(1U)
+
+#define S_TS_UPD64_MODE 12
+#define V_TS_UPD64_MODE(x) ((x) << S_TS_UPD64_MODE)
+#define F_TS_UPD64_MODE V_TS_UPD64_MODE(1U)
+
+#define S_TS_BINARY_MODE 11
+#define V_TS_BINARY_MODE(x) ((x) << S_TS_BINARY_MODE)
+#define F_TS_BINARY_MODE V_TS_BINARY_MODE(1U)
+
+#define S_TS_DELAY_MODE 10
+#define V_TS_DELAY_MODE(x) ((x) << S_TS_DELAY_MODE)
+#define F_TS_DELAY_MODE V_TS_DELAY_MODE(1U)
+
+#define S_TS_DELTA_MODE 9
+#define V_TS_DELTA_MODE(x) ((x) << S_TS_DELTA_MODE)
+#define F_TS_DELTA_MODE V_TS_DELTA_MODE(1U)
+
+#define S_TX_MAC_RS_ERR 8
+#define V_TX_MAC_RS_ERR(x) ((x) << S_TX_MAC_RS_ERR)
+#define F_TX_MAC_RS_ERR V_TX_MAC_RS_ERR(1U)
+
+#define S_RX_PAUSE_BYPASS 6
+#define V_RX_PAUSE_BYPASS(x) ((x) << S_RX_PAUSE_BYPASS)
+#define F_RX_PAUSE_BYPASS V_RX_PAUSE_BYPASS(1U)
+
+#define S_ONE_STEP_ENA 5
+#define V_ONE_STEP_ENA(x) ((x) << S_ONE_STEP_ENA)
+#define F_ONE_STEP_ENA V_ONE_STEP_ENA(1U)
+
+#define S_PAUSETIMERX8 4
+#define V_PAUSETIMERX8(x) ((x) << S_PAUSETIMERX8)
+#define F_PAUSETIMERX8 V_PAUSETIMERX8(1U)
+
+#define S_XGMII_ENA 0
+#define V_XGMII_ENA(x) ((x) << S_XGMII_ENA)
+#define F_XGMII_ENA V_XGMII_ENA(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_CONTROL_1 0xa00
+#define A_MAC_PORT_MTIP_CR4_0_STATUS_1 0xa04
+
+#define S_CR4_0_RX_LINK_STATUS 2
+#define V_CR4_0_RX_LINK_STATUS(x) ((x) << S_CR4_0_RX_LINK_STATUS)
+#define F_CR4_0_RX_LINK_STATUS V_CR4_0_RX_LINK_STATUS(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICE_ID0 0xa08
+
+#define S_CR4_0_DEVICE_ID0 0
+#define M_CR4_0_DEVICE_ID0 0xffffU
+#define V_CR4_0_DEVICE_ID0(x) ((x) << S_CR4_0_DEVICE_ID0)
+#define G_CR4_0_DEVICE_ID0(x) (((x) >> S_CR4_0_DEVICE_ID0) & M_CR4_0_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICE_ID1 0xa0c
+
+#define S_CR4_0_DEVICE_ID1 0
+#define M_CR4_0_DEVICE_ID1 0xffffU
+#define V_CR4_0_DEVICE_ID1(x) ((x) << S_CR4_0_DEVICE_ID1)
+#define G_CR4_0_DEVICE_ID1(x) (((x) >> S_CR4_0_DEVICE_ID1) & M_CR4_0_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SPEED_ABILITY 0xa10
+
+#define S_50G_CAPABLE 5
+#define V_50G_CAPABLE(x) ((x) << S_50G_CAPABLE)
+#define F_50G_CAPABLE V_50G_CAPABLE(1U)
+
+#define S_25G_CAPABLE 4
+#define V_25G_CAPABLE(x) ((x) << S_25G_CAPABLE)
+#define F_25G_CAPABLE V_25G_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICES_IN_PKG1 0xa14
+#define A_MAC_PORT_MTIP_CR4_0_DEVICES_IN_PKG2 0xa18
+#define A_MAC_PORT_MTIP_CR4_0_CONTROL_2 0xa1c
+
+#define S_T7_PCS_TYPE_SELECTION 0
+#define M_T7_PCS_TYPE_SELECTION 0xfU
+#define V_T7_PCS_TYPE_SELECTION(x) ((x) << S_T7_PCS_TYPE_SELECTION)
+#define G_T7_PCS_TYPE_SELECTION(x) (((x) >> S_T7_PCS_TYPE_SELECTION) & M_T7_PCS_TYPE_SELECTION)
+
+#define A_MAC_PORT_MTIP_CR4_0_STATUS_2 0xa20
+
+#define S_50GBASE_R_CAPABLE 8
+#define V_50GBASE_R_CAPABLE(x) ((x) << S_50GBASE_R_CAPABLE)
+#define F_50GBASE_R_CAPABLE V_50GBASE_R_CAPABLE(1U)
+
+#define S_25GBASE_R_CAPABLE 7
+#define V_25GBASE_R_CAPABLE(x) ((x) << S_25GBASE_R_CAPABLE)
+#define F_25GBASE_R_CAPABLE V_25GBASE_R_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_PKG_ID0 0xa38
+#define A_MAC_PORT_MTIP_CR4_0_PKG_ID1 0xa3c
+#define A_MAC_PORT_MTIP_CR4_0_EEE_CTRL 0xa50
+
+#define S_50GBASE_R_FW 14
+#define V_50GBASE_R_FW(x) ((x) << S_50GBASE_R_FW)
+#define F_50GBASE_R_FW V_50GBASE_R_FW(1U)
+
+#define S_100GBASE_R_DS 13
+#define V_100GBASE_R_DS(x) ((x) << S_100GBASE_R_DS)
+#define F_100GBASE_R_DS V_100GBASE_R_DS(1U)
+
+#define S_100GBASE_R_FW 12
+#define V_100GBASE_R_FW(x) ((x) << S_100GBASE_R_FW)
+#define F_100GBASE_R_FW V_100GBASE_R_FW(1U)
+
+#define S_25GBASE_R_DS 11
+#define V_25GBASE_R_DS(x) ((x) << S_25GBASE_R_DS)
+#define F_25GBASE_R_DS V_25GBASE_R_DS(1U)
+
+#define S_25GBASE_R_FW 10
+#define V_25GBASE_R_FW(x) ((x) << S_25GBASE_R_FW)
+#define F_25GBASE_R_FW V_25GBASE_R_FW(1U)
+
+#define S_40GBASE_R_DS 9
+#define V_40GBASE_R_DS(x) ((x) << S_40GBASE_R_DS)
+#define F_40GBASE_R_DS V_40GBASE_R_DS(1U)
+
+#define S_40GBASE_R_FW 8
+#define V_40GBASE_R_FW(x) ((x) << S_40GBASE_R_FW)
+#define F_40GBASE_R_FW V_40GBASE_R_FW(1U)
+
+#define S_10GBASE_KE_EEE 6
+#define V_10GBASE_KE_EEE(x) ((x) << S_10GBASE_KE_EEE)
+#define F_10GBASE_KE_EEE V_10GBASE_KE_EEE(1U)
+
+#define S_FAST_WAKE 1
+#define M_FAST_WAKE 0x1fU
+#define V_FAST_WAKE(x) ((x) << S_FAST_WAKE)
+#define G_FAST_WAKE(x) (((x) >> S_FAST_WAKE) & M_FAST_WAKE)
+
+#define S_DEEP_SLEEP 0
+#define V_DEEP_SLEEP(x) ((x) << S_DEEP_SLEEP)
+#define F_DEEP_SLEEP V_DEEP_SLEEP(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_WAKE_ERROR_COUNTER 0xa58
+
+#define S_WAKE_ERROR_COUNTER 0
+#define M_WAKE_ERROR_COUNTER 0x1ffffU
+#define V_WAKE_ERROR_COUNTER(x) ((x) << S_WAKE_ERROR_COUNTER)
+#define G_WAKE_ERROR_COUNTER(x) (((x) >> S_WAKE_ERROR_COUNTER) & M_WAKE_ERROR_COUNTER)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_STATUS_1 0xa80
+
+#define S_CR4_0_BR_BLOCK_LOCK 0
+#define V_CR4_0_BR_BLOCK_LOCK(x) ((x) << S_CR4_0_BR_BLOCK_LOCK)
+#define F_CR4_0_BR_BLOCK_LOCK V_CR4_0_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_STATUS_2 0xa84
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_0 0xa88
+
+#define S_SEED_A_0 0
+#define M_SEED_A_0 0xffffU
+#define V_SEED_A_0(x) ((x) << S_SEED_A_0)
+#define G_SEED_A_0(x) (((x) >> S_SEED_A_0) & M_SEED_A_0)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_1 0xa8c
+
+#define S_SEED_A_1 0
+#define M_SEED_A_1 0xffffU
+#define V_SEED_A_1(x) ((x) << S_SEED_A_1)
+#define G_SEED_A_1(x) (((x) >> S_SEED_A_1) & M_SEED_A_1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_2 0xa90
+
+#define S_SEED_A_2 0
+#define M_SEED_A_2 0xffffU
+#define V_SEED_A_2(x) ((x) << S_SEED_A_2)
+#define G_SEED_A_2(x) (((x) >> S_SEED_A_2) & M_SEED_A_2)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_3 0xa94
+
+#define S_SEED_A_3 0
+#define M_SEED_A_3 0xffffU
+#define V_SEED_A_3(x) ((x) << S_SEED_A_3)
+#define G_SEED_A_3(x) (((x) >> S_SEED_A_3) & M_SEED_A_3)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_0 0xa98
+
+#define S_SEED_B_0 0
+#define M_SEED_B_0 0xffffU
+#define V_SEED_B_0(x) ((x) << S_SEED_B_0)
+#define G_SEED_B_0(x) (((x) >> S_SEED_B_0) & M_SEED_B_0)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_1 0xa9c
+
+#define S_SEED_B_1 0
+#define M_SEED_B_1 0xffffU
+#define V_SEED_B_1(x) ((x) << S_SEED_B_1)
+#define G_SEED_B_1(x) (((x) >> S_SEED_B_1) & M_SEED_B_1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_2 0xaa0
+
+#define S_SEED_B_2 0
+#define M_SEED_B_2 0xffffU
+#define V_SEED_B_2(x) ((x) << S_SEED_B_2)
+#define G_SEED_B_2(x) (((x) >> S_SEED_B_2) & M_SEED_B_2)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_3 0xaa4
+
+#define S_SEED_B_3 0
+#define M_SEED_B_3 0xffffU
+#define V_SEED_B_3(x) ((x) << S_SEED_B_3)
+#define G_SEED_B_3(x) (((x) >> S_SEED_B_3) & M_SEED_B_3)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_TEST_PATTERN_CONTROL 0xaa8
+
+#define S_TEST_PATTERN_40G 7
+#define V_TEST_PATTERN_40G(x) ((x) << S_TEST_PATTERN_40G)
+#define F_TEST_PATTERN_40G V_TEST_PATTERN_40G(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_TEST_ERR_CNT 0xaac
+#define A_MAC_PORT_MTIP_CR4_0_BER_HIGH_ORDER_CNT 0xab0
+
+#define S_BASE_R_BER_HIGH_ORDER_CNT 0
+#define M_BASE_R_BER_HIGH_ORDER_CNT 0xffffU
+#define V_BASE_R_BER_HIGH_ORDER_CNT(x) ((x) << S_BASE_R_BER_HIGH_ORDER_CNT)
+#define G_BASE_R_BER_HIGH_ORDER_CNT(x) (((x) >> S_BASE_R_BER_HIGH_ORDER_CNT) & M_BASE_R_BER_HIGH_ORDER_CNT)
+
+#define A_MAC_PORT_MTIP_CR4_0_ERR_BLK_HIGH_ORDER_CNT 0xab4
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_1 0xac8
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_2 0xacc
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_3 0xad0
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_4 0xad4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_0 0xad8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_1 0xadc
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_2 0xae0
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_3 0xae4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_4 0xae8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_5 0xaec
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_6 0xaf0
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_7 0xaf4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_8 0xaf8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_9 0xafc
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_10 0xb00
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_11 0xb04
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_12 0xb08
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_13 0xb0c
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_14 0xb10
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_15 0xb14
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_16 0xb18
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_17 0xb1c
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_18 0xb20
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_19 0xb24
+#define A_MAC_PORT_MTIP_CR4_0_LANE_0_MAPPING 0xb28
+#define A_MAC_PORT_MTIP_CR4_0_LANE_1_MAPPING 0xb2c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_2_MAPPING 0xb30
+#define A_MAC_PORT_MTIP_CR4_0_LANE_3_MAPPING 0xb34
+#define A_MAC_PORT_MTIP_CR4_0_LANE_4_MAPPING 0xb38
+#define A_MAC_PORT_MTIP_CR4_0_LANE_5_MAPPING 0xb3c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_6_MAPPING 0xb40
+#define A_MAC_PORT_MTIP_CR4_0_LANE_7_MAPPING 0xb44
+#define A_MAC_PORT_MTIP_CR4_0_LANE_8_MAPPING 0xb48
+#define A_MAC_PORT_MTIP_CR4_0_LANE_9_MAPPING 0xb4c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_10_MAPPING 0xb50
+#define A_MAC_PORT_MTIP_CR4_0_LANE_11_MAPPING 0xb54
+#define A_MAC_PORT_MTIP_CR4_0_LANE_12_MAPPING 0xb58
+#define A_MAC_PORT_MTIP_CR4_0_LANE_13_MAPPING 0xb5c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_14_MAPPING 0xb60
+#define A_MAC_PORT_MTIP_CR4_0_LANE_15_MAPPING 0xb64
+#define A_MAC_PORT_MTIP_CR4_0_LANE_16_MAPPING 0xb68
+#define A_MAC_PORT_MTIP_CR4_0_LANE_17_MAPPING 0xb6c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_18_MAPPING 0xb70
+#define A_MAC_PORT_MTIP_CR4_0_LANE_19_MAPPING 0xb74
+#define A_MAC_PORT_MTIP_CR4_0_SCRATCH 0xb78
+#define A_MAC_PORT_MTIP_CR4_0_CORE_REVISION 0xb7c
+#define A_MAC_PORT_MTIP_CR4_0_VL_INTVL 0xb80
+
+#define S_VL_INTCL 0
+#define M_VL_INTCL 0xffffU
+#define V_VL_INTCL(x) ((x) << S_VL_INTCL)
+#define G_VL_INTCL(x) (((x) >> S_VL_INTCL) & M_VL_INTCL)
+
+#define A_MAC_PORT_MTIP_CR4_0_TX_LANE_THRESH 0xb84
+
+#define S_LANE6_LANE7 12
+#define M_LANE6_LANE7 0xfU
+#define V_LANE6_LANE7(x) ((x) << S_LANE6_LANE7)
+#define G_LANE6_LANE7(x) (((x) >> S_LANE6_LANE7) & M_LANE6_LANE7)
+
+#define S_LANE4_LANE5 8
+#define M_LANE4_LANE5 0xfU
+#define V_LANE4_LANE5(x) ((x) << S_LANE4_LANE5)
+#define G_LANE4_LANE5(x) (((x) >> S_LANE4_LANE5) & M_LANE4_LANE5)
+
+#define S_LANE2_LANE3 4
+#define M_LANE2_LANE3 0xfU
+#define V_LANE2_LANE3(x) ((x) << S_LANE2_LANE3)
+#define G_LANE2_LANE3(x) (((x) >> S_LANE2_LANE3) & M_LANE2_LANE3)
+
+#define S_LANE0_LANE1 0
+#define M_LANE0_LANE1 0xfU
+#define V_LANE0_LANE1(x) ((x) << S_LANE0_LANE1)
+#define G_LANE0_LANE1(x) (((x) >> S_LANE0_LANE1) & M_LANE0_LANE1)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL0_0 0xb98
+
+#define S_M1 8
+#define M_M1 0xffU
+#define V_M1(x) ((x) << S_M1)
+#define G_M1(x) (((x) >> S_M1) & M_M1)
+
+#define S_M0 0
+#define M_M0 0xffU
+#define V_M0(x) ((x) << S_M0)
+#define G_M0(x) (((x) >> S_M0) & M_M0)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL0_1 0xb9c
+
+#define S_M2 0
+#define M_M2 0xffU
+#define V_M2(x) ((x) << S_M2)
+#define G_M2(x) (((x) >> S_M2) & M_M2)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL1_0 0xba0
+#define A_MAC_PORT_MTIP_CR4_0_VL1_1 0xba4
+#define A_MAC_PORT_MTIP_CR4_0_VL2_0 0xba8
+#define A_MAC_PORT_MTIP_CR4_0_VL2_1 0xbac
+#define A_MAC_PORT_MTIP_CR4_0_VL3_0 0xbb0
+#define A_MAC_PORT_MTIP_CR4_0_VL3_1 0xbb4
+#define A_MAC_PORT_MTIP_CR4_0_PCS_MODE 0xbb8
+
+#define S_ST_DISABLE_MLD 9
+#define V_ST_DISABLE_MLD(x) ((x) << S_ST_DISABLE_MLD)
+#define F_ST_DISABLE_MLD V_ST_DISABLE_MLD(1U)
+
+#define S_ST_EN_CLAUSE49 8
+#define V_ST_EN_CLAUSE49(x) ((x) << S_ST_EN_CLAUSE49)
+#define F_ST_EN_CLAUSE49 V_ST_EN_CLAUSE49(1U)
+
+#define S_HI_BER25 2
+#define V_HI_BER25(x) ((x) << S_HI_BER25)
+#define F_HI_BER25 V_HI_BER25(1U)
+
+#define S_DISABLE_MLD 1
+#define V_DISABLE_MLD(x) ((x) << S_DISABLE_MLD)
+#define F_DISABLE_MLD V_DISABLE_MLD(1U)
+
+#define S_ENA_CLAUSE49 0
+#define V_ENA_CLAUSE49(x) ((x) << S_ENA_CLAUSE49)
+#define F_ENA_CLAUSE49 V_ENA_CLAUSE49(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL4_0 0xc98
+#define A_MAC_PORT_MTIP_CR4_0_VL4_1 0xc9c
+#define A_MAC_PORT_MTIP_CR4_0_VL5_0 0xca0
+#define A_MAC_PORT_MTIP_CR4_0_VL5_1 0xca4
+#define A_MAC_PORT_MTIP_CR4_0_VL6_0 0xca8
+#define A_MAC_PORT_MTIP_CR4_0_VL6_1 0xcac
+#define A_MAC_PORT_MTIP_CR4_0_VL7_0 0xcb0
+#define A_MAC_PORT_MTIP_CR4_0_VL7_1 0xcb4
+#define A_MAC_PORT_MTIP_CR4_0_VL8_0 0xcb8
+#define A_MAC_PORT_MTIP_CR4_0_VL8_1 0xcbc
+#define A_MAC_PORT_MTIP_CR4_0_VL9_0 0xcc0
+#define A_MAC_PORT_MTIP_CR4_0_VL9_1 0xcc4
+#define A_MAC_PORT_MTIP_CR4_0_VL10_0 0xcc8
+#define A_MAC_PORT_MTIP_CR4_0_VL10_1 0xccc
+#define A_MAC_PORT_MTIP_CR4_0_VL11_0 0xcd0
+#define A_MAC_PORT_MTIP_CR4_0_VL11_1 0xcd4
+#define A_MAC_PORT_MTIP_CR4_0_VL12_0 0xcd8
+#define A_MAC_PORT_MTIP_CR4_0_VL12_1 0xcdc
+#define A_MAC_PORT_MTIP_CR4_0_VL13_0 0xce0
+#define A_MAC_PORT_MTIP_CR4_0_VL13_1 0xce4
+#define A_MAC_PORT_MTIP_CR4_0_VL14_0 0xce8
+#define A_MAC_PORT_MTIP_CR4_0_VL14_1 0xcec
+#define A_MAC_PORT_MTIP_CR4_0_VL15_0 0xcf0
+#define A_MAC_PORT_MTIP_CR4_0_VL15_1 0xcf4
+#define A_MAC_PORT_MTIP_CR4_0_VL16_0 0xcf8
+#define A_MAC_PORT_MTIP_CR4_0_VL16_1 0xcfc
+#define A_MAC_PORT_MTIP_CR4_0_VL17_0 0xd00
+#define A_MAC_PORT_MTIP_CR4_0_VL17_1 0xd04
+#define A_MAC_PORT_MTIP_CR4_0_VL18_0 0xd08
+#define A_MAC_PORT_MTIP_CR4_0_VL18_1 0xd0c
+#define A_MAC_PORT_MTIP_CR4_0_VL19_0 0xd10
+#define A_MAC_PORT_MTIP_CR4_0_VL19_1 0xd14
+#define A_MAC_PORT_MTIP_CR4_1_CONTROL_1 0x1000
+#define A_MAC_PORT_MTIP_CR4_1_STATUS_1 0x1004
+
+#define S_CR4_RX_LINK_STATUS_1 2
+#define V_CR4_RX_LINK_STATUS_1(x) ((x) << S_CR4_RX_LINK_STATUS_1)
+#define F_CR4_RX_LINK_STATUS_1 V_CR4_RX_LINK_STATUS_1(1U)
+
+#define A_MAC_PORT_MTIP_CR4_1_DEVICE_ID0 0x1008
+
+#define S_CR4_1_DEVICE_ID0 0
+#define M_CR4_1_DEVICE_ID0 0xffffU
+#define V_CR4_1_DEVICE_ID0(x) ((x) << S_CR4_1_DEVICE_ID0)
+#define G_CR4_1_DEVICE_ID0(x) (((x) >> S_CR4_1_DEVICE_ID0) & M_CR4_1_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_1_DEVICE_ID1 0x100c
+
+#define S_CR4_1_DEVICE_ID1 0
+#define M_CR4_1_DEVICE_ID1 0xffffU
+#define V_CR4_1_DEVICE_ID1(x) ((x) << S_CR4_1_DEVICE_ID1)
+#define G_CR4_1_DEVICE_ID1(x) (((x) >> S_CR4_1_DEVICE_ID1) & M_CR4_1_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_1_SPEED_ABILITY 0x1010
+#define A_MAC_PORT_MTIP_CR4_1_DEVICES_IN_PKG1 0x1014
+#define A_MAC_PORT_MTIP_CR4_1_DEVICES_IN_PKG2 0x1018
+#define A_MAC_PORT_MTIP_CR4_1_CONTROL_2 0x101c
+#define A_MAC_PORT_MTIP_CR4_1_STATUS_2 0x1020
+#define A_MAC_PORT_MTIP_CR4_1_PKG_ID0 0x1038
+#define A_MAC_PORT_MTIP_CR4_1_PKG_ID1 0x103c
+#define A_MAC_PORT_MTIP_CR4_1_EEE_CTRL 0x1050
+#define A_MAC_PORT_MTIP_CR4_1_WAKE_ERROR_COUNTER 0x1058
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_STATUS_1 0x1080
+
+#define S_CR4_1_BR_BLOCK_LOCK 0
+#define V_CR4_1_BR_BLOCK_LOCK(x) ((x) << S_CR4_1_BR_BLOCK_LOCK)
+#define F_CR4_1_BR_BLOCK_LOCK V_CR4_1_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_STATUS_2 0x1084
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_0 0x1088
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_1 0x108c
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_2 0x1090
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_3 0x1094
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_0 0x1098
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_1 0x109c
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_2 0x10a0
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_3 0x10a4
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_TEST_PATTERN_CONTROL 0x10a8
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_TEST_ERR_CNT 0x10ac
+#define A_MAC_PORT_MTIP_CR4_1_BER_HIGH_ORDER_CNT 0x10b0
+#define A_MAC_PORT_MTIP_CR4_1_ERR_BLK_HIGH_ORDER_CNT 0x10b4
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_1 0x10c8
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_2 0x10cc
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_3 0x10d0
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_4 0x10d4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_0 0x10d8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_1 0x10dc
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_2 0x10e0
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_3 0x10e4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_4 0x10e8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_5 0x10ec
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_6 0x10f0
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_7 0x10f4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_8 0x10f8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_9 0x10fc
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_10 0x1100
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_11 0x1104
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_12 0x1108
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_13 0x110c
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_14 0x1110
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_15 0x1114
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_16 0x1118
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_17 0x111c
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_18 0x1120
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_19 0x1124
+#define A_MAC_PORT_MTIP_CR4_1_LANE_0_MAPPING 0x1128
+#define A_MAC_PORT_MTIP_CR4_1_LANE_1_MAPPING 0x112c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_2_MAPPING 0x1130
+#define A_MAC_PORT_MTIP_CR4_1_LANE_3_MAPPING 0x1134
+#define A_MAC_PORT_MTIP_CR4_1_LANE_4_MAPPING 0x1138
+#define A_MAC_PORT_MTIP_CR4_1_LANE_5_MAPPING 0x113c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_6_MAPPING 0x1140
+#define A_MAC_PORT_MTIP_CR4_1_LANE_7_MAPPING 0x1144
+#define A_MAC_PORT_MTIP_CR4_1_LANE_8_MAPPING 0x1148
+#define A_MAC_PORT_MTIP_CR4_1_LANE_9_MAPPING 0x114c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_10_MAPPING 0x1150
+#define A_MAC_PORT_MTIP_CR4_1_LANE_11_MAPPING 0x1154
+#define A_MAC_PORT_MTIP_CR4_1_LANE_12_MAPPING 0x1158
+#define A_MAC_PORT_MTIP_CR4_1_LANE_13_MAPPING 0x115c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_14_MAPPING 0x1160
+#define A_MAC_PORT_MTIP_CR4_1_LANE_15_MAPPING 0x1164
+#define A_MAC_PORT_MTIP_CR4_1_LANE_16_MAPPING 0x1168
+#define A_MAC_PORT_MTIP_CR4_1_LANE_17_MAPPING 0x116c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_18_MAPPING 0x1170
+#define A_MAC_PORT_MTIP_CR4_1_LANE_19_MAPPING 0x1174
+#define A_MAC_PORT_MTIP_CR4_1_SCRATCH 0x1178
+#define A_MAC_PORT_MTIP_CR4_1_CORE_REVISION 0x117c
+#define A_MAC_PORT_MTIP_CR4_1_VL_INTVL 0x1180
+#define A_MAC_PORT_MTIP_CR4_1_TX_LANE_THRESH 0x1184
+#define A_MAC_PORT_MTIP_CR4_1_VL0_0 0x1198
+#define A_MAC_PORT_MTIP_CR4_1_VL0_1 0x119c
+#define A_MAC_PORT_MTIP_CR4_1_VL1_0 0x11a0
+#define A_MAC_PORT_MTIP_CR4_1_VL1_1 0x11a4
+#define A_MAC_PORT_MTIP_CR4_1_VL2_0 0x11a8
+#define A_MAC_PORT_MTIP_CR4_1_VL2_1 0x11ac
+#define A_MAC_PORT_MTIP_CR4_1_VL3_0 0x11b0
+#define A_MAC_PORT_MTIP_CR4_1_VL3_1 0x11b4
+#define A_MAC_PORT_MTIP_CR4_1_PCS_MODE 0x11b8
+#define A_MAC_COMMON_CFG_0 0x38000
+
+#define S_T7_RX_POLARITY_INV 24
+#define M_T7_RX_POLARITY_INV 0xffU
+#define V_T7_RX_POLARITY_INV(x) ((x) << S_T7_RX_POLARITY_INV)
+#define G_T7_RX_POLARITY_INV(x) (((x) >> S_T7_RX_POLARITY_INV) & M_T7_RX_POLARITY_INV)
+
+#define S_T7_TX_POLARITY_INV 16
+#define M_T7_TX_POLARITY_INV 0xffU
+#define V_T7_TX_POLARITY_INV(x) ((x) << S_T7_TX_POLARITY_INV)
+#define G_T7_TX_POLARITY_INV(x) (((x) >> S_T7_TX_POLARITY_INV) & M_T7_TX_POLARITY_INV)
+
+#define S_T7_DEBUG_PORT_SEL 14
+#define M_T7_DEBUG_PORT_SEL 0x3U
+#define V_T7_DEBUG_PORT_SEL(x) ((x) << S_T7_DEBUG_PORT_SEL)
+#define G_T7_DEBUG_PORT_SEL(x) (((x) >> S_T7_DEBUG_PORT_SEL) & M_T7_DEBUG_PORT_SEL)
+
+#define S_MAC_SEPTY_CTL 8
+#define M_MAC_SEPTY_CTL 0x3fU
+#define V_MAC_SEPTY_CTL(x) ((x) << S_MAC_SEPTY_CTL)
+#define G_MAC_SEPTY_CTL(x) (((x) >> S_MAC_SEPTY_CTL) & M_MAC_SEPTY_CTL)
+
+#define S_T7_DEBUG_TX_RX_SEL 7
+#define V_T7_DEBUG_TX_RX_SEL(x) ((x) << S_T7_DEBUG_TX_RX_SEL)
+#define F_T7_DEBUG_TX_RX_SEL V_T7_DEBUG_TX_RX_SEL(1U)
+
+#define S_MAC_RDY_CTL 0
+#define M_MAC_RDY_CTL 0x3fU
+#define V_MAC_RDY_CTL(x) ((x) << S_MAC_RDY_CTL)
+#define G_MAC_RDY_CTL(x) (((x) >> S_MAC_RDY_CTL) & M_MAC_RDY_CTL)
+
+#define A_MAC_MTIP_RESET_CTRL_0 0x38004
+
+#define S_RESET_F91_REF_CLK_I 31
+#define V_RESET_F91_REF_CLK_I(x) ((x) << S_RESET_F91_REF_CLK_I)
+#define F_RESET_F91_REF_CLK_I V_RESET_F91_REF_CLK_I(1U)
+
+#define S_RESET_PCS000_REF_CLK_I 30
+#define V_RESET_PCS000_REF_CLK_I(x) ((x) << S_RESET_PCS000_REF_CLK_I)
+#define F_RESET_PCS000_REF_CLK_I V_RESET_PCS000_REF_CLK_I(1U)
+
+#define S_RESET_REF_CLK_I 29
+#define V_RESET_REF_CLK_I(x) ((x) << S_RESET_REF_CLK_I)
+#define F_RESET_REF_CLK_I V_RESET_REF_CLK_I(1U)
+
+#define S_RESET_SD_RX_CLK_I_0 28
+#define V_RESET_SD_RX_CLK_I_0(x) ((x) << S_RESET_SD_RX_CLK_I_0)
+#define F_RESET_SD_RX_CLK_I_0 V_RESET_SD_RX_CLK_I_0(1U)
+
+#define S_RESET_SD_RX_CLK_I_1 27
+#define V_RESET_SD_RX_CLK_I_1(x) ((x) << S_RESET_SD_RX_CLK_I_1)
+#define F_RESET_SD_RX_CLK_I_1 V_RESET_SD_RX_CLK_I_1(1U)
+
+#define S_RESET_SD_RX_CLK_I_2 26
+#define V_RESET_SD_RX_CLK_I_2(x) ((x) << S_RESET_SD_RX_CLK_I_2)
+#define F_RESET_SD_RX_CLK_I_2 V_RESET_SD_RX_CLK_I_2(1U)
+
+#define S_RESET_SD_RX_CLK_I_3 25
+#define V_RESET_SD_RX_CLK_I_3(x) ((x) << S_RESET_SD_RX_CLK_I_3)
+#define F_RESET_SD_RX_CLK_I_3 V_RESET_SD_RX_CLK_I_3(1U)
+
+#define S_RESET_SD_RX_CLK_I_4 24
+#define V_RESET_SD_RX_CLK_I_4(x) ((x) << S_RESET_SD_RX_CLK_I_4)
+#define F_RESET_SD_RX_CLK_I_4 V_RESET_SD_RX_CLK_I_4(1U)
+
+#define S_RESET_SD_RX_CLK_I_5 23
+#define V_RESET_SD_RX_CLK_I_5(x) ((x) << S_RESET_SD_RX_CLK_I_5)
+#define F_RESET_SD_RX_CLK_I_5 V_RESET_SD_RX_CLK_I_5(1U)
+
+#define S_RESET_SD_RX_CLK_I_6 22
+#define V_RESET_SD_RX_CLK_I_6(x) ((x) << S_RESET_SD_RX_CLK_I_6)
+#define F_RESET_SD_RX_CLK_I_6 V_RESET_SD_RX_CLK_I_6(1U)
+
+#define S_RESET_SD_RX_CLK_I_7 21
+#define V_RESET_SD_RX_CLK_I_7(x) ((x) << S_RESET_SD_RX_CLK_I_7)
+#define F_RESET_SD_RX_CLK_I_7 V_RESET_SD_RX_CLK_I_7(1U)
+
+#define S_RESET_SD_TX_CLK_I_0 20
+#define V_RESET_SD_TX_CLK_I_0(x) ((x) << S_RESET_SD_TX_CLK_I_0)
+#define F_RESET_SD_TX_CLK_I_0 V_RESET_SD_TX_CLK_I_0(1U)
+
+#define S_RESET_SD_TX_CLK_I_1 19
+#define V_RESET_SD_TX_CLK_I_1(x) ((x) << S_RESET_SD_TX_CLK_I_1)
+#define F_RESET_SD_TX_CLK_I_1 V_RESET_SD_TX_CLK_I_1(1U)
+
+#define S_RESET_SD_TX_CLK_I_2 18
+#define V_RESET_SD_TX_CLK_I_2(x) ((x) << S_RESET_SD_TX_CLK_I_2)
+#define F_RESET_SD_TX_CLK_I_2 V_RESET_SD_TX_CLK_I_2(1U)
+
+#define S_RESET_SD_TX_CLK_I_3 17
+#define V_RESET_SD_TX_CLK_I_3(x) ((x) << S_RESET_SD_TX_CLK_I_3)
+#define F_RESET_SD_TX_CLK_I_3 V_RESET_SD_TX_CLK_I_3(1U)
+
+#define S_RESET_SD_TX_CLK_I_4 16
+#define V_RESET_SD_TX_CLK_I_4(x) ((x) << S_RESET_SD_TX_CLK_I_4)
+#define F_RESET_SD_TX_CLK_I_4 V_RESET_SD_TX_CLK_I_4(1U)
+
+#define S_RESET_SD_TX_CLK_I_5 15
+#define V_RESET_SD_TX_CLK_I_5(x) ((x) << S_RESET_SD_TX_CLK_I_5)
+#define F_RESET_SD_TX_CLK_I_5 V_RESET_SD_TX_CLK_I_5(1U)
+
+#define S_RESET_SD_TX_CLK_I_6 14
+#define V_RESET_SD_TX_CLK_I_6(x) ((x) << S_RESET_SD_TX_CLK_I_6)
+#define F_RESET_SD_TX_CLK_I_6 V_RESET_SD_TX_CLK_I_6(1U)
+
+#define S_RESET_SD_TX_CLK_I_7 13
+#define V_RESET_SD_TX_CLK_I_7(x) ((x) << S_RESET_SD_TX_CLK_I_7)
+#define F_RESET_SD_TX_CLK_I_7 V_RESET_SD_TX_CLK_I_7(1U)
+
+#define S_RESET_XPCS_REF_CLK_I_0 12
+#define V_RESET_XPCS_REF_CLK_I_0(x) ((x) << S_RESET_XPCS_REF_CLK_I_0)
+#define F_RESET_XPCS_REF_CLK_I_0 V_RESET_XPCS_REF_CLK_I_0(1U)
+
+#define S_RESET_XPCS_REF_CLK_I_1 11
+#define V_RESET_XPCS_REF_CLK_I_1(x) ((x) << S_RESET_XPCS_REF_CLK_I_1)
+#define F_RESET_XPCS_REF_CLK_I_1 V_RESET_XPCS_REF_CLK_I_1(1U)
+
+#define S_RESET_FF_RX_CLK_0_I 9
+#define V_RESET_FF_RX_CLK_0_I(x) ((x) << S_RESET_FF_RX_CLK_0_I)
+#define F_RESET_FF_RX_CLK_0_I V_RESET_FF_RX_CLK_0_I(1U)
+
+#define S_RESET_FF_TX_CLK_0_I 8
+#define V_RESET_FF_TX_CLK_0_I(x) ((x) << S_RESET_FF_TX_CLK_0_I)
+#define F_RESET_FF_TX_CLK_0_I V_RESET_FF_TX_CLK_0_I(1U)
+
+#define S_RESET_RXCLK_0_I 7
+#define V_RESET_RXCLK_0_I(x) ((x) << S_RESET_RXCLK_0_I)
+#define F_RESET_RXCLK_0_I V_RESET_RXCLK_0_I(1U)
+
+#define S_RESET_TXCLK_0_I 6
+#define V_RESET_TXCLK_0_I(x) ((x) << S_RESET_TXCLK_0_I)
+#define F_RESET_TXCLK_0_I V_RESET_TXCLK_0_I(1U)
+
+#define S_RESET_FF_RX_CLK_1_I 5
+#define V_RESET_FF_RX_CLK_1_I(x) ((x) << S_RESET_FF_RX_CLK_1_I)
+#define F_RESET_FF_RX_CLK_1_I V_RESET_FF_RX_CLK_1_I(1U)
+
+#define S_RESET_FF_TX_CLK_1_I 4
+#define V_RESET_FF_TX_CLK_1_I(x) ((x) << S_RESET_FF_TX_CLK_1_I)
+#define F_RESET_FF_TX_CLK_1_I V_RESET_FF_TX_CLK_1_I(1U)
+
+#define S_RESET_RXCLK_1_I 3
+#define V_RESET_RXCLK_1_I(x) ((x) << S_RESET_RXCLK_1_I)
+#define F_RESET_RXCLK_1_I V_RESET_RXCLK_1_I(1U)
+
+#define S_RESET_TXCLK_1_I 2
+#define V_RESET_TXCLK_1_I(x) ((x) << S_RESET_TXCLK_1_I)
+#define F_RESET_TXCLK_1_I V_RESET_TXCLK_1_I(1U)
+
+#define S_XGMII_CLK_RESET_0 0
+#define V_XGMII_CLK_RESET_0(x) ((x) << S_XGMII_CLK_RESET_0)
+#define F_XGMII_CLK_RESET_0 V_XGMII_CLK_RESET_0(1U)
+
+#define A_MAC_MTIP_RESET_CTRL_1 0x38008
+
+#define S_RESET_FF_RX_CLK_2_I 31
+#define V_RESET_FF_RX_CLK_2_I(x) ((x) << S_RESET_FF_RX_CLK_2_I)
+#define F_RESET_FF_RX_CLK_2_I V_RESET_FF_RX_CLK_2_I(1U)
+
+#define S_RESET_FF_TX_CLK_2_I 30
+#define V_RESET_FF_TX_CLK_2_I(x) ((x) << S_RESET_FF_TX_CLK_2_I)
+#define F_RESET_FF_TX_CLK_2_I V_RESET_FF_TX_CLK_2_I(1U)
+
+#define S_RESET_RXCLK_2_I 29
+#define V_RESET_RXCLK_2_I(x) ((x) << S_RESET_RXCLK_2_I)
+#define F_RESET_RXCLK_2_I V_RESET_RXCLK_2_I(1U)
+
+#define S_RESET_TXCLK_2_I 28
+#define V_RESET_TXCLK_2_I(x) ((x) << S_RESET_TXCLK_2_I)
+#define F_RESET_TXCLK_2_I V_RESET_TXCLK_2_I(1U)
+
+#define S_RESET_FF_RX_CLK_3_I 27
+#define V_RESET_FF_RX_CLK_3_I(x) ((x) << S_RESET_FF_RX_CLK_3_I)
+#define F_RESET_FF_RX_CLK_3_I V_RESET_FF_RX_CLK_3_I(1U)
+
+#define S_RESET_FF_TX_CLK_3_I 26
+#define V_RESET_FF_TX_CLK_3_I(x) ((x) << S_RESET_FF_TX_CLK_3_I)
+#define F_RESET_FF_TX_CLK_3_I V_RESET_FF_TX_CLK_3_I(1U)
+
+#define S_RESET_RXCLK_3_I 25
+#define V_RESET_RXCLK_3_I(x) ((x) << S_RESET_RXCLK_3_I)
+#define F_RESET_RXCLK_3_I V_RESET_RXCLK_3_I(1U)
+
+#define S_RESET_TXCLK_3_I 24
+#define V_RESET_TXCLK_3_I(x) ((x) << S_RESET_TXCLK_3_I)
+#define F_RESET_TXCLK_3_I V_RESET_TXCLK_3_I(1U)
+
+#define S_RESET_FF_RX_CLK_4_I 23
+#define V_RESET_FF_RX_CLK_4_I(x) ((x) << S_RESET_FF_RX_CLK_4_I)
+#define F_RESET_FF_RX_CLK_4_I V_RESET_FF_RX_CLK_4_I(1U)
+
+#define S_RESET_FF_TX_CLK_4_I 22
+#define V_RESET_FF_TX_CLK_4_I(x) ((x) << S_RESET_FF_TX_CLK_4_I)
+#define F_RESET_FF_TX_CLK_4_I V_RESET_FF_TX_CLK_4_I(1U)
+
+#define S_RESET_RXCLK_4_I 21
+#define V_RESET_RXCLK_4_I(x) ((x) << S_RESET_RXCLK_4_I)
+#define F_RESET_RXCLK_4_I V_RESET_RXCLK_4_I(1U)
+
+#define S_RESET_TXCLK_4_I 20
+#define V_RESET_TXCLK_4_I(x) ((x) << S_RESET_TXCLK_4_I)
+#define F_RESET_TXCLK_4_I V_RESET_TXCLK_4_I(1U)
+
+#define S_RESET_FF_RX_CLK_5_I 19
+#define V_RESET_FF_RX_CLK_5_I(x) ((x) << S_RESET_FF_RX_CLK_5_I)
+#define F_RESET_FF_RX_CLK_5_I V_RESET_FF_RX_CLK_5_I(1U)
+
+#define S_RESET_FF_TX_CLK_5_I 18
+#define V_RESET_FF_TX_CLK_5_I(x) ((x) << S_RESET_FF_TX_CLK_5_I)
+#define F_RESET_FF_TX_CLK_5_I V_RESET_FF_TX_CLK_5_I(1U)
+
+#define S_RESET_RXCLK_5_I 17
+#define V_RESET_RXCLK_5_I(x) ((x) << S_RESET_RXCLK_5_I)
+#define F_RESET_RXCLK_5_I V_RESET_RXCLK_5_I(1U)
+
+#define S_RESET_TXCLK_5_I 16
+#define V_RESET_TXCLK_5_I(x) ((x) << S_RESET_TXCLK_5_I)
+#define F_RESET_TXCLK_5_I V_RESET_TXCLK_5_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_0_I 15
+#define V_RESET_SD_RX_CLK_AN_0_I(x) ((x) << S_RESET_SD_RX_CLK_AN_0_I)
+#define F_RESET_SD_RX_CLK_AN_0_I V_RESET_SD_RX_CLK_AN_0_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_0_I 14
+#define V_RESET_SD_TX_CLK_AN_0_I(x) ((x) << S_RESET_SD_TX_CLK_AN_0_I)
+#define F_RESET_SD_TX_CLK_AN_0_I V_RESET_SD_TX_CLK_AN_0_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_1_I 13
+#define V_RESET_SD_RX_CLK_AN_1_I(x) ((x) << S_RESET_SD_RX_CLK_AN_1_I)
+#define F_RESET_SD_RX_CLK_AN_1_I V_RESET_SD_RX_CLK_AN_1_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_1_I 12
+#define V_RESET_SD_TX_CLK_AN_1_I(x) ((x) << S_RESET_SD_TX_CLK_AN_1_I)
+#define F_RESET_SD_TX_CLK_AN_1_I V_RESET_SD_TX_CLK_AN_1_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_2_I 11
+#define V_RESET_SD_RX_CLK_AN_2_I(x) ((x) << S_RESET_SD_RX_CLK_AN_2_I)
+#define F_RESET_SD_RX_CLK_AN_2_I V_RESET_SD_RX_CLK_AN_2_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_2_I 10
+#define V_RESET_SD_TX_CLK_AN_2_I(x) ((x) << S_RESET_SD_TX_CLK_AN_2_I)
+#define F_RESET_SD_TX_CLK_AN_2_I V_RESET_SD_TX_CLK_AN_2_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_3_I 9
+#define V_RESET_SD_RX_CLK_AN_3_I(x) ((x) << S_RESET_SD_RX_CLK_AN_3_I)
+#define F_RESET_SD_RX_CLK_AN_3_I V_RESET_SD_RX_CLK_AN_3_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_3_I 8
+#define V_RESET_SD_TX_CLK_AN_3_I(x) ((x) << S_RESET_SD_TX_CLK_AN_3_I)
+#define F_RESET_SD_TX_CLK_AN_3_I V_RESET_SD_TX_CLK_AN_3_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_4_I 7
+#define V_RESET_SD_RX_CLK_AN_4_I(x) ((x) << S_RESET_SD_RX_CLK_AN_4_I)
+#define F_RESET_SD_RX_CLK_AN_4_I V_RESET_SD_RX_CLK_AN_4_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_4_I 6
+#define V_RESET_SD_TX_CLK_AN_4_I(x) ((x) << S_RESET_SD_TX_CLK_AN_4_I)
+#define F_RESET_SD_TX_CLK_AN_4_I V_RESET_SD_TX_CLK_AN_4_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_5_I 5
+#define V_RESET_SD_RX_CLK_AN_5_I(x) ((x) << S_RESET_SD_RX_CLK_AN_5_I)
+#define F_RESET_SD_RX_CLK_AN_5_I V_RESET_SD_RX_CLK_AN_5_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_5_I 4
+#define V_RESET_SD_TX_CLK_AN_5_I(x) ((x) << S_RESET_SD_TX_CLK_AN_5_I)
+#define F_RESET_SD_TX_CLK_AN_5_I V_RESET_SD_TX_CLK_AN_5_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_6_I 3
+#define V_RESET_SD_RX_CLK_AN_6_I(x) ((x) << S_RESET_SD_RX_CLK_AN_6_I)
+#define F_RESET_SD_RX_CLK_AN_6_I V_RESET_SD_RX_CLK_AN_6_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_6_I 2
+#define V_RESET_SD_TX_CLK_AN_6_I(x) ((x) << S_RESET_SD_TX_CLK_AN_6_I)
+#define F_RESET_SD_TX_CLK_AN_6_I V_RESET_SD_TX_CLK_AN_6_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_7_I 1
+#define V_RESET_SD_RX_CLK_AN_7_I(x) ((x) << S_RESET_SD_RX_CLK_AN_7_I)
+#define F_RESET_SD_RX_CLK_AN_7_I V_RESET_SD_RX_CLK_AN_7_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_7_I 0
+#define V_RESET_SD_TX_CLK_AN_7_I(x) ((x) << S_RESET_SD_TX_CLK_AN_7_I)
+#define F_RESET_SD_TX_CLK_AN_7_I V_RESET_SD_TX_CLK_AN_7_I(1U)
+
+#define A_MAC_MTIP_RESET_CTRL_2 0x3800c
+
+#define S_RESET_SGMII_TXCLK_I_3 31
+#define V_RESET_SGMII_TXCLK_I_3(x) ((x) << S_RESET_SGMII_TXCLK_I_3)
+#define F_RESET_SGMII_TXCLK_I_3 V_RESET_SGMII_TXCLK_I_3(1U)
+
+#define S_RESET_SGMII_RXCLK_I_3 30
+#define V_RESET_SGMII_RXCLK_I_3(x) ((x) << S_RESET_SGMII_RXCLK_I_3)
+#define F_RESET_SGMII_RXCLK_I_3 V_RESET_SGMII_RXCLK_I_3(1U)
+
+#define S_RESET_SGMII_TXCLK_I_2 29
+#define V_RESET_SGMII_TXCLK_I_2(x) ((x) << S_RESET_SGMII_TXCLK_I_2)
+#define F_RESET_SGMII_TXCLK_I_2 V_RESET_SGMII_TXCLK_I_2(1U)
+
+#define S_RESET_SGMII_RXCLK_I_2 28
+#define V_RESET_SGMII_RXCLK_I_2(x) ((x) << S_RESET_SGMII_RXCLK_I_2)
+#define F_RESET_SGMII_RXCLK_I_2 V_RESET_SGMII_RXCLK_I_2(1U)
+
+#define S_RESET_SGMII_TXCLK_I_1 27
+#define V_RESET_SGMII_TXCLK_I_1(x) ((x) << S_RESET_SGMII_TXCLK_I_1)
+#define F_RESET_SGMII_TXCLK_I_1 V_RESET_SGMII_TXCLK_I_1(1U)
+
+#define S_RESET_SGMII_RXCLK_I_1 26
+#define V_RESET_SGMII_RXCLK_I_1(x) ((x) << S_RESET_SGMII_RXCLK_I_1)
+#define F_RESET_SGMII_RXCLK_I_1 V_RESET_SGMII_RXCLK_I_1(1U)
+
+#define S_RESET_SGMII_TXCLK_I_0 25
+#define V_RESET_SGMII_TXCLK_I_0(x) ((x) << S_RESET_SGMII_TXCLK_I_0)
+#define F_RESET_SGMII_TXCLK_I_0 V_RESET_SGMII_TXCLK_I_0(1U)
+
+#define S_RESET_SGMII_RXCLK_I_0 24
+#define V_RESET_SGMII_RXCLK_I_0(x) ((x) << S_RESET_SGMII_RXCLK_I_0)
+#define F_RESET_SGMII_RXCLK_I_0 V_RESET_SGMII_RXCLK_I_0(1U)
+
+#define S_MTIPSD7TXRST 23
+#define V_MTIPSD7TXRST(x) ((x) << S_MTIPSD7TXRST)
+#define F_MTIPSD7TXRST V_MTIPSD7TXRST(1U)
+
+#define S_MTIPSD6TXRST 22
+#define V_MTIPSD6TXRST(x) ((x) << S_MTIPSD6TXRST)
+#define F_MTIPSD6TXRST V_MTIPSD6TXRST(1U)
+
+#define S_MTIPSD5TXRST 21
+#define V_MTIPSD5TXRST(x) ((x) << S_MTIPSD5TXRST)
+#define F_MTIPSD5TXRST V_MTIPSD5TXRST(1U)
+
+#define S_MTIPSD4TXRST 20
+#define V_MTIPSD4TXRST(x) ((x) << S_MTIPSD4TXRST)
+#define F_MTIPSD4TXRST V_MTIPSD4TXRST(1U)
+
+#define S_T7_MTIPSD3TXRST 19
+#define V_T7_MTIPSD3TXRST(x) ((x) << S_T7_MTIPSD3TXRST)
+#define F_T7_MTIPSD3TXRST V_T7_MTIPSD3TXRST(1U)
+
+#define S_T7_MTIPSD2TXRST 18
+#define V_T7_MTIPSD2TXRST(x) ((x) << S_T7_MTIPSD2TXRST)
+#define F_T7_MTIPSD2TXRST V_T7_MTIPSD2TXRST(1U)
+
+#define S_T7_MTIPSD1TXRST 17
+#define V_T7_MTIPSD1TXRST(x) ((x) << S_T7_MTIPSD1TXRST)
+#define F_T7_MTIPSD1TXRST V_T7_MTIPSD1TXRST(1U)
+
+#define S_T7_MTIPSD0TXRST 16
+#define V_T7_MTIPSD0TXRST(x) ((x) << S_T7_MTIPSD0TXRST)
+#define F_T7_MTIPSD0TXRST V_T7_MTIPSD0TXRST(1U)
+
+#define S_MTIPSD7RXRST 15
+#define V_MTIPSD7RXRST(x) ((x) << S_MTIPSD7RXRST)
+#define F_MTIPSD7RXRST V_MTIPSD7RXRST(1U)
+
+#define S_MTIPSD6RXRST 14
+#define V_MTIPSD6RXRST(x) ((x) << S_MTIPSD6RXRST)
+#define F_MTIPSD6RXRST V_MTIPSD6RXRST(1U)
+
+#define S_MTIPSD5RXRST 13
+#define V_MTIPSD5RXRST(x) ((x) << S_MTIPSD5RXRST)
+#define F_MTIPSD5RXRST V_MTIPSD5RXRST(1U)
+
+#define S_MTIPSD4RXRST 12
+#define V_MTIPSD4RXRST(x) ((x) << S_MTIPSD4RXRST)
+#define F_MTIPSD4RXRST V_MTIPSD4RXRST(1U)
+
+#define S_T7_MTIPSD3RXRST 11
+#define V_T7_MTIPSD3RXRST(x) ((x) << S_T7_MTIPSD3RXRST)
+#define F_T7_MTIPSD3RXRST V_T7_MTIPSD3RXRST(1U)
+
+#define S_T7_MTIPSD2RXRST 10
+#define V_T7_MTIPSD2RXRST(x) ((x) << S_T7_MTIPSD2RXRST)
+#define F_T7_MTIPSD2RXRST V_T7_MTIPSD2RXRST(1U)
+
+#define S_T7_MTIPSD1RXRST 9
+#define V_T7_MTIPSD1RXRST(x) ((x) << S_T7_MTIPSD1RXRST)
+#define F_T7_MTIPSD1RXRST V_T7_MTIPSD1RXRST(1U)
+
+#define S_T7_MTIPSD0RXRST 8
+#define V_T7_MTIPSD0RXRST(x) ((x) << S_T7_MTIPSD0RXRST)
+#define F_T7_MTIPSD0RXRST V_T7_MTIPSD0RXRST(1U)
+
+#define S_RESET_REG_CLK_AN_0_I 7
+#define V_RESET_REG_CLK_AN_0_I(x) ((x) << S_RESET_REG_CLK_AN_0_I)
+#define F_RESET_REG_CLK_AN_0_I V_RESET_REG_CLK_AN_0_I(1U)
+
+#define S_RESET_REG_CLK_AN_1_I 6
+#define V_RESET_REG_CLK_AN_1_I(x) ((x) << S_RESET_REG_CLK_AN_1_I)
+#define F_RESET_REG_CLK_AN_1_I V_RESET_REG_CLK_AN_1_I(1U)
+
+#define S_RESET_REG_CLK_AN_2_I 5
+#define V_RESET_REG_CLK_AN_2_I(x) ((x) << S_RESET_REG_CLK_AN_2_I)
+#define F_RESET_REG_CLK_AN_2_I V_RESET_REG_CLK_AN_2_I(1U)
+
+#define S_RESET_REG_CLK_AN_3_I 4
+#define V_RESET_REG_CLK_AN_3_I(x) ((x) << S_RESET_REG_CLK_AN_3_I)
+#define F_RESET_REG_CLK_AN_3_I V_RESET_REG_CLK_AN_3_I(1U)
+
+#define S_RESET_REG_CLK_AN_4_I 3
+#define V_RESET_REG_CLK_AN_4_I(x) ((x) << S_RESET_REG_CLK_AN_4_I)
+#define F_RESET_REG_CLK_AN_4_I V_RESET_REG_CLK_AN_4_I(1U)
+
+#define S_RESET_REG_CLK_AN_5_I 2
+#define V_RESET_REG_CLK_AN_5_I(x) ((x) << S_RESET_REG_CLK_AN_5_I)
+#define F_RESET_REG_CLK_AN_5_I V_RESET_REG_CLK_AN_5_I(1U)
+
+#define S_RESET_REG_CLK_AN_6_I 1
+#define V_RESET_REG_CLK_AN_6_I(x) ((x) << S_RESET_REG_CLK_AN_6_I)
+#define F_RESET_REG_CLK_AN_6_I V_RESET_REG_CLK_AN_6_I(1U)
+
+#define S_RESET_REG_CLK_AN_7_I 0
+#define V_RESET_REG_CLK_AN_7_I(x) ((x) << S_RESET_REG_CLK_AN_7_I)
+#define F_RESET_REG_CLK_AN_7_I V_RESET_REG_CLK_AN_7_I(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_0 0x38010
+
+#define S_F91_REF_CLK_I_G 31
+#define V_F91_REF_CLK_I_G(x) ((x) << S_F91_REF_CLK_I_G)
+#define F_F91_REF_CLK_I_G V_F91_REF_CLK_I_G(1U)
+
+#define S_PCS000_REF_CLK_I_G 30
+#define V_PCS000_REF_CLK_I_G(x) ((x) << S_PCS000_REF_CLK_I_G)
+#define F_PCS000_REF_CLK_I_G V_PCS000_REF_CLK_I_G(1U)
+
+#define S_REF_CLK_I_G 29
+#define V_REF_CLK_I_G(x) ((x) << S_REF_CLK_I_G)
+#define F_REF_CLK_I_G V_REF_CLK_I_G(1U)
+
+#define S_SD_RX_CLK_I_0_G 28
+#define V_SD_RX_CLK_I_0_G(x) ((x) << S_SD_RX_CLK_I_0_G)
+#define F_SD_RX_CLK_I_0_G V_SD_RX_CLK_I_0_G(1U)
+
+#define S_SD_RX_CLK_I_1_G 27
+#define V_SD_RX_CLK_I_1_G(x) ((x) << S_SD_RX_CLK_I_1_G)
+#define F_SD_RX_CLK_I_1_G V_SD_RX_CLK_I_1_G(1U)
+
+#define S_SD_RX_CLK_I_2_G 26
+#define V_SD_RX_CLK_I_2_G(x) ((x) << S_SD_RX_CLK_I_2_G)
+#define F_SD_RX_CLK_I_2_G V_SD_RX_CLK_I_2_G(1U)
+
+#define S_SD_RX_CLK_I_3_G 25
+#define V_SD_RX_CLK_I_3_G(x) ((x) << S_SD_RX_CLK_I_3_G)
+#define F_SD_RX_CLK_I_3_G V_SD_RX_CLK_I_3_G(1U)
+
+#define S_SD_RX_CLK_I_4_G 24
+#define V_SD_RX_CLK_I_4_G(x) ((x) << S_SD_RX_CLK_I_4_G)
+#define F_SD_RX_CLK_I_4_G V_SD_RX_CLK_I_4_G(1U)
+
+#define S_SD_RX_CLK_I_5_G 23
+#define V_SD_RX_CLK_I_5_G(x) ((x) << S_SD_RX_CLK_I_5_G)
+#define F_SD_RX_CLK_I_5_G V_SD_RX_CLK_I_5_G(1U)
+
+#define S_SD_RX_CLK_I_6_G 22
+#define V_SD_RX_CLK_I_6_G(x) ((x) << S_SD_RX_CLK_I_6_G)
+#define F_SD_RX_CLK_I_6_G V_SD_RX_CLK_I_6_G(1U)
+
+#define S_SD_RX_CLK_I_7_G 21
+#define V_SD_RX_CLK_I_7_G(x) ((x) << S_SD_RX_CLK_I_7_G)
+#define F_SD_RX_CLK_I_7_G V_SD_RX_CLK_I_7_G(1U)
+
+#define S_SD_TX_CLK_I_0_G 20
+#define V_SD_TX_CLK_I_0_G(x) ((x) << S_SD_TX_CLK_I_0_G)
+#define F_SD_TX_CLK_I_0_G V_SD_TX_CLK_I_0_G(1U)
+
+#define S_SD_TX_CLK_I_1_G 19
+#define V_SD_TX_CLK_I_1_G(x) ((x) << S_SD_TX_CLK_I_1_G)
+#define F_SD_TX_CLK_I_1_G V_SD_TX_CLK_I_1_G(1U)
+
+#define S_SD_TX_CLK_I_2_G 18
+#define V_SD_TX_CLK_I_2_G(x) ((x) << S_SD_TX_CLK_I_2_G)
+#define F_SD_TX_CLK_I_2_G V_SD_TX_CLK_I_2_G(1U)
+
+#define S_SD_TX_CLK_I_3_G 17
+#define V_SD_TX_CLK_I_3_G(x) ((x) << S_SD_TX_CLK_I_3_G)
+#define F_SD_TX_CLK_I_3_G V_SD_TX_CLK_I_3_G(1U)
+
+#define S_SD_TX_CLK_I_4_G 16
+#define V_SD_TX_CLK_I_4_G(x) ((x) << S_SD_TX_CLK_I_4_G)
+#define F_SD_TX_CLK_I_4_G V_SD_TX_CLK_I_4_G(1U)
+
+#define S_SD_TX_CLK_I_5_G 15
+#define V_SD_TX_CLK_I_5_G(x) ((x) << S_SD_TX_CLK_I_5_G)
+#define F_SD_TX_CLK_I_5_G V_SD_TX_CLK_I_5_G(1U)
+
+#define S_SD_TX_CLK_I_6_G 14
+#define V_SD_TX_CLK_I_6_G(x) ((x) << S_SD_TX_CLK_I_6_G)
+#define F_SD_TX_CLK_I_6_G V_SD_TX_CLK_I_6_G(1U)
+
+#define S_SD_TX_CLK_I_7_G 13
+#define V_SD_TX_CLK_I_7_G(x) ((x) << S_SD_TX_CLK_I_7_G)
+#define F_SD_TX_CLK_I_7_G V_SD_TX_CLK_I_7_G(1U)
+
+#define S_XPCS_REF_CLK_I_0_G 12
+#define V_XPCS_REF_CLK_I_0_G(x) ((x) << S_XPCS_REF_CLK_I_0_G)
+#define F_XPCS_REF_CLK_I_0_G V_XPCS_REF_CLK_I_0_G(1U)
+
+#define S_XPCS_REF_CLK_I_1_G 11
+#define V_XPCS_REF_CLK_I_1_G(x) ((x) << S_XPCS_REF_CLK_I_1_G)
+#define F_XPCS_REF_CLK_I_1_G V_XPCS_REF_CLK_I_1_G(1U)
+
+#define S_REG_CLK_I_G 10
+#define V_REG_CLK_I_G(x) ((x) << S_REG_CLK_I_G)
+#define F_REG_CLK_I_G V_REG_CLK_I_G(1U)
+
+#define S_FF_RX_CLK_0_I_G 9
+#define V_FF_RX_CLK_0_I_G(x) ((x) << S_FF_RX_CLK_0_I_G)
+#define F_FF_RX_CLK_0_I_G V_FF_RX_CLK_0_I_G(1U)
+
+#define S_FF_TX_CLK_0_I_G 8
+#define V_FF_TX_CLK_0_I_G(x) ((x) << S_FF_TX_CLK_0_I_G)
+#define F_FF_TX_CLK_0_I_G V_FF_TX_CLK_0_I_G(1U)
+
+#define S_RXCLK_0_I_G 7
+#define V_RXCLK_0_I_G(x) ((x) << S_RXCLK_0_I_G)
+#define F_RXCLK_0_I_G V_RXCLK_0_I_G(1U)
+
+#define S_TXCLK_0_I_G 6
+#define V_TXCLK_0_I_G(x) ((x) << S_TXCLK_0_I_G)
+#define F_TXCLK_0_I_G V_TXCLK_0_I_G(1U)
+
+#define S_FF_RX_CLK_1_I_G 5
+#define V_FF_RX_CLK_1_I_G(x) ((x) << S_FF_RX_CLK_1_I_G)
+#define F_FF_RX_CLK_1_I_G V_FF_RX_CLK_1_I_G(1U)
+
+#define S_FF_TX_CLK_1_I_G 4
+#define V_FF_TX_CLK_1_I_G(x) ((x) << S_FF_TX_CLK_1_I_G)
+#define F_FF_TX_CLK_1_I_G V_FF_TX_CLK_1_I_G(1U)
+
+#define S_RXCLK_1_I_G 3
+#define V_RXCLK_1_I_G(x) ((x) << S_RXCLK_1_I_G)
+#define F_RXCLK_1_I_G V_RXCLK_1_I_G(1U)
+
+#define S_TXCLK_1_I_G 2
+#define V_TXCLK_1_I_G(x) ((x) << S_TXCLK_1_I_G)
+#define F_TXCLK_1_I_G V_TXCLK_1_I_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_1 0x38014
+
+#define S_FF_RX_CLK_2_I_G 31
+#define V_FF_RX_CLK_2_I_G(x) ((x) << S_FF_RX_CLK_2_I_G)
+#define F_FF_RX_CLK_2_I_G V_FF_RX_CLK_2_I_G(1U)
+
+#define S_FF_TX_CLK_2_I_G 30
+#define V_FF_TX_CLK_2_I_G(x) ((x) << S_FF_TX_CLK_2_I_G)
+#define F_FF_TX_CLK_2_I_G V_FF_TX_CLK_2_I_G(1U)
+
+#define S_RXCLK_2_I_G 29
+#define V_RXCLK_2_I_G(x) ((x) << S_RXCLK_2_I_G)
+#define F_RXCLK_2_I_G V_RXCLK_2_I_G(1U)
+
+#define S_TXCLK_2_I_G 28
+#define V_TXCLK_2_I_G(x) ((x) << S_TXCLK_2_I_G)
+#define F_TXCLK_2_I_G V_TXCLK_2_I_G(1U)
+
+#define S_FF_RX_CLK_3_I_G 27
+#define V_FF_RX_CLK_3_I_G(x) ((x) << S_FF_RX_CLK_3_I_G)
+#define F_FF_RX_CLK_3_I_G V_FF_RX_CLK_3_I_G(1U)
+
+#define S_FF_TX_CLK_3_I_G 26
+#define V_FF_TX_CLK_3_I_G(x) ((x) << S_FF_TX_CLK_3_I_G)
+#define F_FF_TX_CLK_3_I_G V_FF_TX_CLK_3_I_G(1U)
+
+#define S_RXCLK_3_I_G 25
+#define V_RXCLK_3_I_G(x) ((x) << S_RXCLK_3_I_G)
+#define F_RXCLK_3_I_G V_RXCLK_3_I_G(1U)
+
+#define S_TXCLK_3_I_G 24
+#define V_TXCLK_3_I_G(x) ((x) << S_TXCLK_3_I_G)
+#define F_TXCLK_3_I_G V_TXCLK_3_I_G(1U)
+
+#define S_FF_RX_CLK_4_I_G 23
+#define V_FF_RX_CLK_4_I_G(x) ((x) << S_FF_RX_CLK_4_I_G)
+#define F_FF_RX_CLK_4_I_G V_FF_RX_CLK_4_I_G(1U)
+
+#define S_FF_TX_CLK_4_I_G 22
+#define V_FF_TX_CLK_4_I_G(x) ((x) << S_FF_TX_CLK_4_I_G)
+#define F_FF_TX_CLK_4_I_G V_FF_TX_CLK_4_I_G(1U)
+
+#define S_RXCLK_4_I_G 21
+#define V_RXCLK_4_I_G(x) ((x) << S_RXCLK_4_I_G)
+#define F_RXCLK_4_I_G V_RXCLK_4_I_G(1U)
+
+#define S_TXCLK_4_I_G 20
+#define V_TXCLK_4_I_G(x) ((x) << S_TXCLK_4_I_G)
+#define F_TXCLK_4_I_G V_TXCLK_4_I_G(1U)
+
+#define S_FF_RX_CLK_5_I_G 19
+#define V_FF_RX_CLK_5_I_G(x) ((x) << S_FF_RX_CLK_5_I_G)
+#define F_FF_RX_CLK_5_I_G V_FF_RX_CLK_5_I_G(1U)
+
+#define S_FF_TX_CLK_5_I_G 18
+#define V_FF_TX_CLK_5_I_G(x) ((x) << S_FF_TX_CLK_5_I_G)
+#define F_FF_TX_CLK_5_I_G V_FF_TX_CLK_5_I_G(1U)
+
+#define S_RXCLK_5_I_G 17
+#define V_RXCLK_5_I_G(x) ((x) << S_RXCLK_5_I_G)
+#define F_RXCLK_5_I_G V_RXCLK_5_I_G(1U)
+
+#define S_TXCLK_5_I_G 16
+#define V_TXCLK_5_I_G(x) ((x) << S_TXCLK_5_I_G)
+#define F_TXCLK_5_I_G V_TXCLK_5_I_G(1U)
+
+#define S_SD_RX_CLK_AN_0_I_G 15
+#define V_SD_RX_CLK_AN_0_I_G(x) ((x) << S_SD_RX_CLK_AN_0_I_G)
+#define F_SD_RX_CLK_AN_0_I_G V_SD_RX_CLK_AN_0_I_G(1U)
+
+#define S_SD_TX_CLK_AN_0_I_G 14
+#define V_SD_TX_CLK_AN_0_I_G(x) ((x) << S_SD_TX_CLK_AN_0_I_G)
+#define F_SD_TX_CLK_AN_0_I_G V_SD_TX_CLK_AN_0_I_G(1U)
+
+#define S_SD_RX_CLK_AN_1_I_G 13
+#define V_SD_RX_CLK_AN_1_I_G(x) ((x) << S_SD_RX_CLK_AN_1_I_G)
+#define F_SD_RX_CLK_AN_1_I_G V_SD_RX_CLK_AN_1_I_G(1U)
+
+#define S_SD_TX_CLK_AN_1_I_G 12
+#define V_SD_TX_CLK_AN_1_I_G(x) ((x) << S_SD_TX_CLK_AN_1_I_G)
+#define F_SD_TX_CLK_AN_1_I_G V_SD_TX_CLK_AN_1_I_G(1U)
+
+#define S_SD_RX_CLK_AN_2_I_G 11
+#define V_SD_RX_CLK_AN_2_I_G(x) ((x) << S_SD_RX_CLK_AN_2_I_G)
+#define F_SD_RX_CLK_AN_2_I_G V_SD_RX_CLK_AN_2_I_G(1U)
+
+#define S_SD_TX_CLK_AN_2_I_G 10
+#define V_SD_TX_CLK_AN_2_I_G(x) ((x) << S_SD_TX_CLK_AN_2_I_G)
+#define F_SD_TX_CLK_AN_2_I_G V_SD_TX_CLK_AN_2_I_G(1U)
+
+#define S_SD_RX_CLK_AN_3_I_G 9
+#define V_SD_RX_CLK_AN_3_I_G(x) ((x) << S_SD_RX_CLK_AN_3_I_G)
+#define F_SD_RX_CLK_AN_3_I_G V_SD_RX_CLK_AN_3_I_G(1U)
+
+#define S_SD_TX_CLK_AN_3_I_G 8
+#define V_SD_TX_CLK_AN_3_I_G(x) ((x) << S_SD_TX_CLK_AN_3_I_G)
+#define F_SD_TX_CLK_AN_3_I_G V_SD_TX_CLK_AN_3_I_G(1U)
+
+#define S_SD_RX_CLK_AN_4_I_G 7
+#define V_SD_RX_CLK_AN_4_I_G(x) ((x) << S_SD_RX_CLK_AN_4_I_G)
+#define F_SD_RX_CLK_AN_4_I_G V_SD_RX_CLK_AN_4_I_G(1U)
+
+#define S_SD_TX_CLK_AN_4_I_G 6
+#define V_SD_TX_CLK_AN_4_I_G(x) ((x) << S_SD_TX_CLK_AN_4_I_G)
+#define F_SD_TX_CLK_AN_4_I_G V_SD_TX_CLK_AN_4_I_G(1U)
+
+#define S_SD_RX_CLK_AN_5_I_G 5
+#define V_SD_RX_CLK_AN_5_I_G(x) ((x) << S_SD_RX_CLK_AN_5_I_G)
+#define F_SD_RX_CLK_AN_5_I_G V_SD_RX_CLK_AN_5_I_G(1U)
+
+#define S_SD_TX_CLK_AN_5_I_G 4
+#define V_SD_TX_CLK_AN_5_I_G(x) ((x) << S_SD_TX_CLK_AN_5_I_G)
+#define F_SD_TX_CLK_AN_5_I_G V_SD_TX_CLK_AN_5_I_G(1U)
+
+#define S_SD_RX_CLK_AN_6_I_G 3
+#define V_SD_RX_CLK_AN_6_I_G(x) ((x) << S_SD_RX_CLK_AN_6_I_G)
+#define F_SD_RX_CLK_AN_6_I_G V_SD_RX_CLK_AN_6_I_G(1U)
+
+#define S_SD_TX_CLK_AN_6_I_G 2
+#define V_SD_TX_CLK_AN_6_I_G(x) ((x) << S_SD_TX_CLK_AN_6_I_G)
+#define F_SD_TX_CLK_AN_6_I_G V_SD_TX_CLK_AN_6_I_G(1U)
+
+#define S_SD_RX_CLK_AN_7_I_G 1
+#define V_SD_RX_CLK_AN_7_I_G(x) ((x) << S_SD_RX_CLK_AN_7_I_G)
+#define F_SD_RX_CLK_AN_7_I_G V_SD_RX_CLK_AN_7_I_G(1U)
+
+#define S_SD_TX_CLK_AN_7_I_G 0
+#define V_SD_TX_CLK_AN_7_I_G(x) ((x) << S_SD_TX_CLK_AN_7_I_G)
+#define F_SD_TX_CLK_AN_7_I_G V_SD_TX_CLK_AN_7_I_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_2 0x38018
+
+#define S_SD_RX_CLK_0_G 31
+#define V_SD_RX_CLK_0_G(x) ((x) << S_SD_RX_CLK_0_G)
+#define F_SD_RX_CLK_0_G V_SD_RX_CLK_0_G(1U)
+
+#define S_SD_RX_CLK_1_G 30
+#define V_SD_RX_CLK_1_G(x) ((x) << S_SD_RX_CLK_1_G)
+#define F_SD_RX_CLK_1_G V_SD_RX_CLK_1_G(1U)
+
+#define S_SD_RX_CLK_2_G 29
+#define V_SD_RX_CLK_2_G(x) ((x) << S_SD_RX_CLK_2_G)
+#define F_SD_RX_CLK_2_G V_SD_RX_CLK_2_G(1U)
+
+#define S_SD_RX_CLK_3_G 28
+#define V_SD_RX_CLK_3_G(x) ((x) << S_SD_RX_CLK_3_G)
+#define F_SD_RX_CLK_3_G V_SD_RX_CLK_3_G(1U)
+
+#define S_SD_RX_CLK_4_G 27
+#define V_SD_RX_CLK_4_G(x) ((x) << S_SD_RX_CLK_4_G)
+#define F_SD_RX_CLK_4_G V_SD_RX_CLK_4_G(1U)
+
+#define S_SD_RX_CLK_5_G 26
+#define V_SD_RX_CLK_5_G(x) ((x) << S_SD_RX_CLK_5_G)
+#define F_SD_RX_CLK_5_G V_SD_RX_CLK_5_G(1U)
+
+#define S_SD_RX_CLK_6_G 25
+#define V_SD_RX_CLK_6_G(x) ((x) << S_SD_RX_CLK_6_G)
+#define F_SD_RX_CLK_6_G V_SD_RX_CLK_6_G(1U)
+
+#define S_SD_RX_CLK_7_G 24
+#define V_SD_RX_CLK_7_G(x) ((x) << S_SD_RX_CLK_7_G)
+#define F_SD_RX_CLK_7_G V_SD_RX_CLK_7_G(1U)
+
+#define S_SD_TX_CLK_0_G 23
+#define V_SD_TX_CLK_0_G(x) ((x) << S_SD_TX_CLK_0_G)
+#define F_SD_TX_CLK_0_G V_SD_TX_CLK_0_G(1U)
+
+#define S_SD_TX_CLK_1_G 22
+#define V_SD_TX_CLK_1_G(x) ((x) << S_SD_TX_CLK_1_G)
+#define F_SD_TX_CLK_1_G V_SD_TX_CLK_1_G(1U)
+
+#define S_SD_TX_CLK_2_G 21
+#define V_SD_TX_CLK_2_G(x) ((x) << S_SD_TX_CLK_2_G)
+#define F_SD_TX_CLK_2_G V_SD_TX_CLK_2_G(1U)
+
+#define S_SD_TX_CLK_3_G 20
+#define V_SD_TX_CLK_3_G(x) ((x) << S_SD_TX_CLK_3_G)
+#define F_SD_TX_CLK_3_G V_SD_TX_CLK_3_G(1U)
+
+#define S_SD_TX_CLK_4_G 19
+#define V_SD_TX_CLK_4_G(x) ((x) << S_SD_TX_CLK_4_G)
+#define F_SD_TX_CLK_4_G V_SD_TX_CLK_4_G(1U)
+
+#define S_SD_TX_CLK_5_G 18
+#define V_SD_TX_CLK_5_G(x) ((x) << S_SD_TX_CLK_5_G)
+#define F_SD_TX_CLK_5_G V_SD_TX_CLK_5_G(1U)
+
+#define S_SD_TX_CLK_6_G 17
+#define V_SD_TX_CLK_6_G(x) ((x) << S_SD_TX_CLK_6_G)
+#define F_SD_TX_CLK_6_G V_SD_TX_CLK_6_G(1U)
+
+#define S_SD_TX_CLK_7_G 16
+#define V_SD_TX_CLK_7_G(x) ((x) << S_SD_TX_CLK_7_G)
+#define F_SD_TX_CLK_7_G V_SD_TX_CLK_7_G(1U)
+
+#define S_SD_RX_CLK_AEC_0_G 15
+#define V_SD_RX_CLK_AEC_0_G(x) ((x) << S_SD_RX_CLK_AEC_0_G)
+#define F_SD_RX_CLK_AEC_0_G V_SD_RX_CLK_AEC_0_G(1U)
+
+#define S_SD_RX_CLK_AEC_1_G 14
+#define V_SD_RX_CLK_AEC_1_G(x) ((x) << S_SD_RX_CLK_AEC_1_G)
+#define F_SD_RX_CLK_AEC_1_G V_SD_RX_CLK_AEC_1_G(1U)
+
+#define S_SD_RX_CLK_AEC_2_G 13
+#define V_SD_RX_CLK_AEC_2_G(x) ((x) << S_SD_RX_CLK_AEC_2_G)
+#define F_SD_RX_CLK_AEC_2_G V_SD_RX_CLK_AEC_2_G(1U)
+
+#define S_SD_RX_CLK_AEC_3_G 12
+#define V_SD_RX_CLK_AEC_3_G(x) ((x) << S_SD_RX_CLK_AEC_3_G)
+#define F_SD_RX_CLK_AEC_3_G V_SD_RX_CLK_AEC_3_G(1U)
+
+#define S_SD_RX_CLK_AEC_4_G 11
+#define V_SD_RX_CLK_AEC_4_G(x) ((x) << S_SD_RX_CLK_AEC_4_G)
+#define F_SD_RX_CLK_AEC_4_G V_SD_RX_CLK_AEC_4_G(1U)
+
+#define S_SD_RX_CLK_AEC_5_G 10
+#define V_SD_RX_CLK_AEC_5_G(x) ((x) << S_SD_RX_CLK_AEC_5_G)
+#define F_SD_RX_CLK_AEC_5_G V_SD_RX_CLK_AEC_5_G(1U)
+
+#define S_SD_RX_CLK_AEC_6_G 9
+#define V_SD_RX_CLK_AEC_6_G(x) ((x) << S_SD_RX_CLK_AEC_6_G)
+#define F_SD_RX_CLK_AEC_6_G V_SD_RX_CLK_AEC_6_G(1U)
+
+#define S_SD_RX_CLK_AEC_7_G 8
+#define V_SD_RX_CLK_AEC_7_G(x) ((x) << S_SD_RX_CLK_AEC_7_G)
+#define F_SD_RX_CLK_AEC_7_G V_SD_RX_CLK_AEC_7_G(1U)
+
+#define S_SD_TX_CLK_AEC_0_G 7
+#define V_SD_TX_CLK_AEC_0_G(x) ((x) << S_SD_TX_CLK_AEC_0_G)
+#define F_SD_TX_CLK_AEC_0_G V_SD_TX_CLK_AEC_0_G(1U)
+
+#define S_SD_TX_CLK_AEC_1_G 6
+#define V_SD_TX_CLK_AEC_1_G(x) ((x) << S_SD_TX_CLK_AEC_1_G)
+#define F_SD_TX_CLK_AEC_1_G V_SD_TX_CLK_AEC_1_G(1U)
+
+#define S_SD_TX_CLK_AEC_2_G 5
+#define V_SD_TX_CLK_AEC_2_G(x) ((x) << S_SD_TX_CLK_AEC_2_G)
+#define F_SD_TX_CLK_AEC_2_G V_SD_TX_CLK_AEC_2_G(1U)
+
+#define S_SD_TX_CLK_AEC_3_G 4
+#define V_SD_TX_CLK_AEC_3_G(x) ((x) << S_SD_TX_CLK_AEC_3_G)
+#define F_SD_TX_CLK_AEC_3_G V_SD_TX_CLK_AEC_3_G(1U)
+
+#define S_SD_TX_CLK_AEC_4_G 3
+#define V_SD_TX_CLK_AEC_4_G(x) ((x) << S_SD_TX_CLK_AEC_4_G)
+#define F_SD_TX_CLK_AEC_4_G V_SD_TX_CLK_AEC_4_G(1U)
+
+#define S_SD_TX_CLK_AEC_5_G 2
+#define V_SD_TX_CLK_AEC_5_G(x) ((x) << S_SD_TX_CLK_AEC_5_G)
+#define F_SD_TX_CLK_AEC_5_G V_SD_TX_CLK_AEC_5_G(1U)
+
+#define S_SD_TX_CLK_AEC_6_G 1
+#define V_SD_TX_CLK_AEC_6_G(x) ((x) << S_SD_TX_CLK_AEC_6_G)
+#define F_SD_TX_CLK_AEC_6_G V_SD_TX_CLK_AEC_6_G(1U)
+
+#define S_SD_TX_CLK_AEC_7_G 0
+#define V_SD_TX_CLK_AEC_7_G(x) ((x) << S_SD_TX_CLK_AEC_7_G)
+#define F_SD_TX_CLK_AEC_7_G V_SD_TX_CLK_AEC_7_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_3 0x3801c
+
+#define S_PCS_RX_CLK_0_G 31
+#define V_PCS_RX_CLK_0_G(x) ((x) << S_PCS_RX_CLK_0_G)
+#define F_PCS_RX_CLK_0_G V_PCS_RX_CLK_0_G(1U)
+
+#define S_PCS_RX_CLK_1_G 30
+#define V_PCS_RX_CLK_1_G(x) ((x) << S_PCS_RX_CLK_1_G)
+#define F_PCS_RX_CLK_1_G V_PCS_RX_CLK_1_G(1U)
+
+#define S_PCS_RX_CLK_2_G 29
+#define V_PCS_RX_CLK_2_G(x) ((x) << S_PCS_RX_CLK_2_G)
+#define F_PCS_RX_CLK_2_G V_PCS_RX_CLK_2_G(1U)
+
+#define S_PCS_RX_CLK_3_G 28
+#define V_PCS_RX_CLK_3_G(x) ((x) << S_PCS_RX_CLK_3_G)
+#define F_PCS_RX_CLK_3_G V_PCS_RX_CLK_3_G(1U)
+
+#define S_PCS_RX_CLK_4_G 27
+#define V_PCS_RX_CLK_4_G(x) ((x) << S_PCS_RX_CLK_4_G)
+#define F_PCS_RX_CLK_4_G V_PCS_RX_CLK_4_G(1U)
+
+#define S_PCS_RX_CLK_5_G 26
+#define V_PCS_RX_CLK_5_G(x) ((x) << S_PCS_RX_CLK_5_G)
+#define F_PCS_RX_CLK_5_G V_PCS_RX_CLK_5_G(1U)
+
+#define S_PCS_RX_CLK_6_G 25
+#define V_PCS_RX_CLK_6_G(x) ((x) << S_PCS_RX_CLK_6_G)
+#define F_PCS_RX_CLK_6_G V_PCS_RX_CLK_6_G(1U)
+
+#define S_PCS_RX_CLK_7_G 24
+#define V_PCS_RX_CLK_7_G(x) ((x) << S_PCS_RX_CLK_7_G)
+#define F_PCS_RX_CLK_7_G V_PCS_RX_CLK_7_G(1U)
+
+#define S_PCS_TX_CLK_0_G 23
+#define V_PCS_TX_CLK_0_G(x) ((x) << S_PCS_TX_CLK_0_G)
+#define F_PCS_TX_CLK_0_G V_PCS_TX_CLK_0_G(1U)
+
+#define S_PCS_TX_CLK_1_G 22
+#define V_PCS_TX_CLK_1_G(x) ((x) << S_PCS_TX_CLK_1_G)
+#define F_PCS_TX_CLK_1_G V_PCS_TX_CLK_1_G(1U)
+
+#define S_PCS_TX_CLK_2_G 21
+#define V_PCS_TX_CLK_2_G(x) ((x) << S_PCS_TX_CLK_2_G)
+#define F_PCS_TX_CLK_2_G V_PCS_TX_CLK_2_G(1U)
+
+#define S_PCS_TX_CLK_3_G 20
+#define V_PCS_TX_CLK_3_G(x) ((x) << S_PCS_TX_CLK_3_G)
+#define F_PCS_TX_CLK_3_G V_PCS_TX_CLK_3_G(1U)
+
+#define S_PCS_TX_CLK_4_G 19
+#define V_PCS_TX_CLK_4_G(x) ((x) << S_PCS_TX_CLK_4_G)
+#define F_PCS_TX_CLK_4_G V_PCS_TX_CLK_4_G(1U)
+
+#define S_PCS_TX_CLK_5_G 18
+#define V_PCS_TX_CLK_5_G(x) ((x) << S_PCS_TX_CLK_5_G)
+#define F_PCS_TX_CLK_5_G V_PCS_TX_CLK_5_G(1U)
+
+#define S_PCS_TX_CLK_6_G 17
+#define V_PCS_TX_CLK_6_G(x) ((x) << S_PCS_TX_CLK_6_G)
+#define F_PCS_TX_CLK_6_G V_PCS_TX_CLK_6_G(1U)
+
+#define S_PCS_TX_CLK_7_G 16
+#define V_PCS_TX_CLK_7_G(x) ((x) << S_PCS_TX_CLK_7_G)
+#define F_PCS_TX_CLK_7_G V_PCS_TX_CLK_7_G(1U)
+
+#define S_SD_RX_CLK_EN_0 15
+#define V_SD_RX_CLK_EN_0(x) ((x) << S_SD_RX_CLK_EN_0)
+#define F_SD_RX_CLK_EN_0 V_SD_RX_CLK_EN_0(1U)
+
+#define S_SD_RX_CLK_EN_1 14
+#define V_SD_RX_CLK_EN_1(x) ((x) << S_SD_RX_CLK_EN_1)
+#define F_SD_RX_CLK_EN_1 V_SD_RX_CLK_EN_1(1U)
+
+#define S_SD_RX_CLK_EN_2 13
+#define V_SD_RX_CLK_EN_2(x) ((x) << S_SD_RX_CLK_EN_2)
+#define F_SD_RX_CLK_EN_2 V_SD_RX_CLK_EN_2(1U)
+
+#define S_SD_RX_CLK_EN_3 12
+#define V_SD_RX_CLK_EN_3(x) ((x) << S_SD_RX_CLK_EN_3)
+#define F_SD_RX_CLK_EN_3 V_SD_RX_CLK_EN_3(1U)
+
+#define S_SD_RX_CLK_EN_4 11
+#define V_SD_RX_CLK_EN_4(x) ((x) << S_SD_RX_CLK_EN_4)
+#define F_SD_RX_CLK_EN_4 V_SD_RX_CLK_EN_4(1U)
+
+#define S_SD_RX_CLK_EN_5 10
+#define V_SD_RX_CLK_EN_5(x) ((x) << S_SD_RX_CLK_EN_5)
+#define F_SD_RX_CLK_EN_5 V_SD_RX_CLK_EN_5(1U)
+
+#define S_SD_RX_CLK_EN_6 9
+#define V_SD_RX_CLK_EN_6(x) ((x) << S_SD_RX_CLK_EN_6)
+#define F_SD_RX_CLK_EN_6 V_SD_RX_CLK_EN_6(1U)
+
+#define S_SD_RX_CLK_EN_7 8
+#define V_SD_RX_CLK_EN_7(x) ((x) << S_SD_RX_CLK_EN_7)
+#define F_SD_RX_CLK_EN_7 V_SD_RX_CLK_EN_7(1U)
+
+#define S_SD_TX_CLK_EN_0 7
+#define V_SD_TX_CLK_EN_0(x) ((x) << S_SD_TX_CLK_EN_0)
+#define F_SD_TX_CLK_EN_0 V_SD_TX_CLK_EN_0(1U)
+
+#define S_SD_TX_CLK_EN_1 6
+#define V_SD_TX_CLK_EN_1(x) ((x) << S_SD_TX_CLK_EN_1)
+#define F_SD_TX_CLK_EN_1 V_SD_TX_CLK_EN_1(1U)
+
+#define S_SD_TX_CLK_EN_2 5
+#define V_SD_TX_CLK_EN_2(x) ((x) << S_SD_TX_CLK_EN_2)
+#define F_SD_TX_CLK_EN_2 V_SD_TX_CLK_EN_2(1U)
+
+#define S_SD_TX_CLK_EN_3 4
+#define V_SD_TX_CLK_EN_3(x) ((x) << S_SD_TX_CLK_EN_3)
+#define F_SD_TX_CLK_EN_3 V_SD_TX_CLK_EN_3(1U)
+
+#define S_SD_TX_CLK_EN_4 3
+#define V_SD_TX_CLK_EN_4(x) ((x) << S_SD_TX_CLK_EN_4)
+#define F_SD_TX_CLK_EN_4 V_SD_TX_CLK_EN_4(1U)
+
+#define S_SD_TX_CLK_EN_5 2
+#define V_SD_TX_CLK_EN_5(x) ((x) << S_SD_TX_CLK_EN_5)
+#define F_SD_TX_CLK_EN_5 V_SD_TX_CLK_EN_5(1U)
+
+#define S_SD_TX_CLK_EN_6 1
+#define V_SD_TX_CLK_EN_6(x) ((x) << S_SD_TX_CLK_EN_6)
+#define F_SD_TX_CLK_EN_6 V_SD_TX_CLK_EN_6(1U)
+
+#define S_SD_TX_CLK_EN_7 0
+#define V_SD_TX_CLK_EN_7(x) ((x) << S_SD_TX_CLK_EN_7)
+#define F_SD_TX_CLK_EN_7 V_SD_TX_CLK_EN_7(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_4 0x38020
+
+#define S_SGMII_TX_CLK_0_G 7
+#define V_SGMII_TX_CLK_0_G(x) ((x) << S_SGMII_TX_CLK_0_G)
+#define F_SGMII_TX_CLK_0_G V_SGMII_TX_CLK_0_G(1U)
+
+#define S_SGMII_TX_CLK_1_G 6
+#define V_SGMII_TX_CLK_1_G(x) ((x) << S_SGMII_TX_CLK_1_G)
+#define F_SGMII_TX_CLK_1_G V_SGMII_TX_CLK_1_G(1U)
+
+#define S_SGMII_TX_CLK_2_G 5
+#define V_SGMII_TX_CLK_2_G(x) ((x) << S_SGMII_TX_CLK_2_G)
+#define F_SGMII_TX_CLK_2_G V_SGMII_TX_CLK_2_G(1U)
+
+#define S_SGMII_TX_CLK_3_G 4
+#define V_SGMII_TX_CLK_3_G(x) ((x) << S_SGMII_TX_CLK_3_G)
+#define F_SGMII_TX_CLK_3_G V_SGMII_TX_CLK_3_G(1U)
+
+#define S_SGMII_RX_CLK_0_G 3
+#define V_SGMII_RX_CLK_0_G(x) ((x) << S_SGMII_RX_CLK_0_G)
+#define F_SGMII_RX_CLK_0_G V_SGMII_RX_CLK_0_G(1U)
+
+#define S_SGMII_RX_CLK_1_G 2
+#define V_SGMII_RX_CLK_1_G(x) ((x) << S_SGMII_RX_CLK_1_G)
+#define F_SGMII_RX_CLK_1_G V_SGMII_RX_CLK_1_G(1U)
+
+#define S_SGMII_RX_CLK_2_G 1
+#define V_SGMII_RX_CLK_2_G(x) ((x) << S_SGMII_RX_CLK_2_G)
+#define F_SGMII_RX_CLK_2_G V_SGMII_RX_CLK_2_G(1U)
+
+#define S_SGMII_RX_CLK_3_G 0
+#define V_SGMII_RX_CLK_3_G(x) ((x) << S_SGMII_RX_CLK_3_G)
+#define F_SGMII_RX_CLK_3_G V_SGMII_RX_CLK_3_G(1U)
+
+#define A_MAC_PCS_CONFIG_0 0x38024
+
+#define S_KP_MODE_IN 24
+#define M_KP_MODE_IN 0xffU
+#define V_KP_MODE_IN(x) ((x) << S_KP_MODE_IN)
+#define G_KP_MODE_IN(x) (((x) >> S_KP_MODE_IN) & M_KP_MODE_IN)
+
+#define S_FEC91_ENA_IN 16
+#define M_FEC91_ENA_IN 0xffU
+#define V_FEC91_ENA_IN(x) ((x) << S_FEC91_ENA_IN)
+#define G_FEC91_ENA_IN(x) (((x) >> S_FEC91_ENA_IN) & M_FEC91_ENA_IN)
+
+#define S_SD_8X 8
+#define M_SD_8X 0xffU
+#define V_SD_8X(x) ((x) << S_SD_8X)
+#define G_SD_8X(x) (((x) >> S_SD_8X) & M_SD_8X)
+
+#define S_SD_N2 0
+#define M_SD_N2 0xffU
+#define V_SD_N2(x) ((x) << S_SD_N2)
+#define G_SD_N2(x) (((x) >> S_SD_N2) & M_SD_N2)
+
+#define A_MAC_PCS_CONFIG_1 0x38028
+
+#define S_FAST_1LANE_MODE 24
+#define M_FAST_1LANE_MODE 0xffU
+#define V_FAST_1LANE_MODE(x) ((x) << S_FAST_1LANE_MODE)
+#define G_FAST_1LANE_MODE(x) (((x) >> S_FAST_1LANE_MODE) & M_FAST_1LANE_MODE)
+
+#define S_PACER_10G 16
+#define M_PACER_10G 0xffU
+#define V_PACER_10G(x) ((x) << S_PACER_10G)
+#define G_PACER_10G(x) (((x) >> S_PACER_10G) & M_PACER_10G)
+
+#define S_PCS400_ENA_IN 14
+#define M_PCS400_ENA_IN 0x3U
+#define V_PCS400_ENA_IN(x) ((x) << S_PCS400_ENA_IN)
+#define G_PCS400_ENA_IN(x) (((x) >> S_PCS400_ENA_IN) & M_PCS400_ENA_IN)
+
+#define S_MODE40_ENA_IN4 13
+#define V_MODE40_ENA_IN4(x) ((x) << S_MODE40_ENA_IN4)
+#define F_MODE40_ENA_IN4 V_MODE40_ENA_IN4(1U)
+
+#define S_MODE40_ENA_IN0 12
+#define V_MODE40_ENA_IN0(x) ((x) << S_MODE40_ENA_IN0)
+#define F_MODE40_ENA_IN0 V_MODE40_ENA_IN0(1U)
+
+#define S_PCS100_ENA_IN6 11
+#define V_PCS100_ENA_IN6(x) ((x) << S_PCS100_ENA_IN6)
+#define F_PCS100_ENA_IN6 V_PCS100_ENA_IN6(1U)
+
+#define S_PCS100_ENA_IN4 10
+#define V_PCS100_ENA_IN4(x) ((x) << S_PCS100_ENA_IN4)
+#define F_PCS100_ENA_IN4 V_PCS100_ENA_IN4(1U)
+
+#define S_PCS100_ENA_IN2 9
+#define V_PCS100_ENA_IN2(x) ((x) << S_PCS100_ENA_IN2)
+#define F_PCS100_ENA_IN2 V_PCS100_ENA_IN2(1U)
+
+#define S_PCS100_ENA_IN0 8
+#define V_PCS100_ENA_IN0(x) ((x) << S_PCS100_ENA_IN0)
+#define F_PCS100_ENA_IN0 V_PCS100_ENA_IN0(1U)
+
+#define S_RXLAUI_ENA_IN6 7
+#define V_RXLAUI_ENA_IN6(x) ((x) << S_RXLAUI_ENA_IN6)
+#define F_RXLAUI_ENA_IN6 V_RXLAUI_ENA_IN6(1U)
+
+#define S_RXLAUI_ENA_IN4 6
+#define V_RXLAUI_ENA_IN4(x) ((x) << S_RXLAUI_ENA_IN4)
+#define F_RXLAUI_ENA_IN4 V_RXLAUI_ENA_IN4(1U)
+
+#define S_RXLAUI_ENA_IN2 5
+#define V_RXLAUI_ENA_IN2(x) ((x) << S_RXLAUI_ENA_IN2)
+#define F_RXLAUI_ENA_IN2 V_RXLAUI_ENA_IN2(1U)
+
+#define S_RXLAUI_ENA_IN0 4
+#define V_RXLAUI_ENA_IN0(x) ((x) << S_RXLAUI_ENA_IN0)
+#define F_RXLAUI_ENA_IN0 V_RXLAUI_ENA_IN0(1U)
+
+#define S_FEC91_LANE_IN6 3
+#define V_FEC91_LANE_IN6(x) ((x) << S_FEC91_LANE_IN6)
+#define F_FEC91_LANE_IN6 V_FEC91_LANE_IN6(1U)
+
+#define S_FEC91_LANE_IN4 2
+#define V_FEC91_LANE_IN4(x) ((x) << S_FEC91_LANE_IN4)
+#define F_FEC91_LANE_IN4 V_FEC91_LANE_IN4(1U)
+
+#define S_FEC91_LANE_IN2 1
+#define V_FEC91_LANE_IN2(x) ((x) << S_FEC91_LANE_IN2)
+#define F_FEC91_LANE_IN2 V_FEC91_LANE_IN2(1U)
+
+#define S_FEC91_LANE_IN0 0
+#define V_FEC91_LANE_IN0(x) ((x) << S_FEC91_LANE_IN0)
+#define F_FEC91_LANE_IN0 V_FEC91_LANE_IN0(1U)
+
+#define A_MAC_PCS_CONFIG_2 0x3802c
+
+#define S_SGPCS_EN_3 29
+#define V_SGPCS_EN_3(x) ((x) << S_SGPCS_EN_3)
+#define F_SGPCS_EN_3 V_SGPCS_EN_3(1U)
+
+#define S_SGPCS_EN_2 28
+#define V_SGPCS_EN_2(x) ((x) << S_SGPCS_EN_2)
+#define F_SGPCS_EN_2 V_SGPCS_EN_2(1U)
+
+#define S_SGPCS_EN_1 27
+#define V_SGPCS_EN_1(x) ((x) << S_SGPCS_EN_1)
+#define F_SGPCS_EN_1 V_SGPCS_EN_1(1U)
+
+#define S_SGPCS_EN_0 26
+#define V_SGPCS_EN_0(x) ((x) << S_SGPCS_EN_0)
+#define F_SGPCS_EN_0 V_SGPCS_EN_0(1U)
+
+#define S_CFG_CLOCK_RATE 22
+#define M_CFG_CLOCK_RATE 0xfU
+#define V_CFG_CLOCK_RATE(x) ((x) << S_CFG_CLOCK_RATE)
+#define G_CFG_CLOCK_RATE(x) (((x) >> S_CFG_CLOCK_RATE) & M_CFG_CLOCK_RATE)
+
+#define S_FEC_ERR_ENA 14
+#define M_FEC_ERR_ENA 0xffU
+#define V_FEC_ERR_ENA(x) ((x) << S_FEC_ERR_ENA)
+#define G_FEC_ERR_ENA(x) (((x) >> S_FEC_ERR_ENA) & M_FEC_ERR_ENA)
+
+#define S_FEC_ENA 6
+#define M_FEC_ENA 0xffU
+#define V_FEC_ENA(x) ((x) << S_FEC_ENA)
+#define G_FEC_ENA(x) (((x) >> S_FEC_ENA) & M_FEC_ENA)
+
+#define S_PCS001_TX_AM_SF 3
+#define M_PCS001_TX_AM_SF 0x7U
+#define V_PCS001_TX_AM_SF(x) ((x) << S_PCS001_TX_AM_SF)
+#define G_PCS001_TX_AM_SF(x) (((x) >> S_PCS001_TX_AM_SF) & M_PCS001_TX_AM_SF)
+
+#define S_PCS000_TX_AM_SF 0
+#define M_PCS000_TX_AM_SF 0x7U
+#define V_PCS000_TX_AM_SF(x) ((x) << S_PCS000_TX_AM_SF)
+#define G_PCS000_TX_AM_SF(x) (((x) >> S_PCS000_TX_AM_SF) & M_PCS000_TX_AM_SF)
+
+#define A_MAC_PCS_STATUS_0 0x38030
+
+#define S_PCS000_ALIGN_LOCK 30
+#define M_PCS000_ALIGN_LOCK 0x3U
+#define V_PCS000_ALIGN_LOCK(x) ((x) << S_PCS000_ALIGN_LOCK)
+#define G_PCS000_ALIGN_LOCK(x) (((x) >> S_PCS000_ALIGN_LOCK) & M_PCS000_ALIGN_LOCK)
+
+#define S_PCS000_HI_SER 28
+#define M_PCS000_HI_SER 0x3U
+#define V_PCS000_HI_SER(x) ((x) << S_PCS000_HI_SER)
+#define G_PCS000_HI_SER(x) (((x) >> S_PCS000_HI_SER) & M_PCS000_HI_SER)
+
+#define S_BER_TIMER_DONE 20
+#define M_BER_TIMER_DONE 0xffU
+#define V_BER_TIMER_DONE(x) ((x) << S_BER_TIMER_DONE)
+#define G_BER_TIMER_DONE(x) (((x) >> S_BER_TIMER_DONE) & M_BER_TIMER_DONE)
+
+#define S_T7_AMPS_LOCK 4
+#define M_T7_AMPS_LOCK 0xffffU
+#define V_T7_AMPS_LOCK(x) ((x) << S_T7_AMPS_LOCK)
+#define G_T7_AMPS_LOCK(x) (((x) >> S_T7_AMPS_LOCK) & M_T7_AMPS_LOCK)
+
+#define S_T7_ALIGN_DONE 0
+#define M_T7_ALIGN_DONE 0xfU
+#define V_T7_ALIGN_DONE(x) ((x) << S_T7_ALIGN_DONE)
+#define G_T7_ALIGN_DONE(x) (((x) >> S_T7_ALIGN_DONE) & M_T7_ALIGN_DONE)
+
+#define A_MAC_PCS_STATUS_1 0x38034
+#define A_MAC_PCS_STATUS_2 0x38038
+
+#define S_RSFEC_ALIGNED 24
+#define M_RSFEC_ALIGNED 0xffU
+#define V_RSFEC_ALIGNED(x) ((x) << S_RSFEC_ALIGNED)
+#define G_RSFEC_ALIGNED(x) (((x) >> S_RSFEC_ALIGNED) & M_RSFEC_ALIGNED)
+
+#define S_T7_FEC_LOCKED 8
+#define M_T7_FEC_LOCKED 0xffffU
+#define V_T7_FEC_LOCKED(x) ((x) << S_T7_FEC_LOCKED)
+#define G_T7_FEC_LOCKED(x) (((x) >> S_T7_FEC_LOCKED) & M_T7_FEC_LOCKED)
+
+#define S_T7_BLOCK_LOCK 0
+#define M_T7_BLOCK_LOCK 0xffU
+#define V_T7_BLOCK_LOCK(x) ((x) << S_T7_BLOCK_LOCK)
+#define G_T7_BLOCK_LOCK(x) (((x) >> S_T7_BLOCK_LOCK) & M_T7_BLOCK_LOCK)
+
+#define A_MAC_PCS_STATUS_3 0x3803c
+
+#define S_FEC_NCERR 16
+#define M_FEC_NCERR 0xffffU
+#define V_FEC_NCERR(x) ((x) << S_FEC_NCERR)
+#define G_FEC_NCERR(x) (((x) >> S_FEC_NCERR) & M_FEC_NCERR)
+
+#define S_FEC_CERR 0
+#define M_FEC_CERR 0xffffU
+#define V_FEC_CERR(x) ((x) << S_FEC_CERR)
+#define G_FEC_CERR(x) (((x) >> S_FEC_CERR) & M_FEC_CERR)
+
+#define A_MAC_PCS_STATUS_4 0x38040
+
+#define S_MAC1_RES_SPEED 23
+#define M_MAC1_RES_SPEED 0xffU
+#define V_MAC1_RES_SPEED(x) ((x) << S_MAC1_RES_SPEED)
+#define G_MAC1_RES_SPEED(x) (((x) >> S_MAC1_RES_SPEED) & M_MAC1_RES_SPEED)
+
+#define S_MAC0_RES_SPEED 14
+#define M_MAC0_RES_SPEED 0xffU
+#define V_MAC0_RES_SPEED(x) ((x) << S_MAC0_RES_SPEED)
+#define G_MAC0_RES_SPEED(x) (((x) >> S_MAC0_RES_SPEED) & M_MAC0_RES_SPEED)
+
+#define S_PCS400_ENA_IN_REF 12
+#define M_PCS400_ENA_IN_REF 0x3U
+#define V_PCS400_ENA_IN_REF(x) ((x) << S_PCS400_ENA_IN_REF)
+#define G_PCS400_ENA_IN_REF(x) (((x) >> S_PCS400_ENA_IN_REF) & M_PCS400_ENA_IN_REF)
+
+#define S_PCS000_DEGRADE_SER 10
+#define M_PCS000_DEGRADE_SER 0x3U
+#define V_PCS000_DEGRADE_SER(x) ((x) << S_PCS000_DEGRADE_SER)
+#define G_PCS000_DEGRADE_SER(x) (((x) >> S_PCS000_DEGRADE_SER) & M_PCS000_DEGRADE_SER)
+
+#define S_P4X_SIGNAL_OK 8
+#define M_P4X_SIGNAL_OK 0x3U
+#define V_P4X_SIGNAL_OK(x) ((x) << S_P4X_SIGNAL_OK)
+#define G_P4X_SIGNAL_OK(x) (((x) >> S_P4X_SIGNAL_OK) & M_P4X_SIGNAL_OK)
+
+#define S_MODE200_IND_REF 7
+#define V_MODE200_IND_REF(x) ((x) << S_MODE200_IND_REF)
+#define F_MODE200_IND_REF V_MODE200_IND_REF(1U)
+
+#define S_MODE200_8X26_IND_REF 6
+#define V_MODE200_8X26_IND_REF(x) ((x) << S_MODE200_8X26_IND_REF)
+#define F_MODE200_8X26_IND_REF V_MODE200_8X26_IND_REF(1U)
+
+#define S_PCS001_RX_AM_SF 3
+#define M_PCS001_RX_AM_SF 0x7U
+#define V_PCS001_RX_AM_SF(x) ((x) << S_PCS001_RX_AM_SF)
+#define G_PCS001_RX_AM_SF(x) (((x) >> S_PCS001_RX_AM_SF) & M_PCS001_RX_AM_SF)
+
+#define S_PCS000_RX_AM_SF 0
+#define M_PCS000_RX_AM_SF 0x7U
+#define V_PCS000_RX_AM_SF(x) ((x) << S_PCS000_RX_AM_SF)
+#define G_PCS000_RX_AM_SF(x) (((x) >> S_PCS000_RX_AM_SF) & M_PCS000_RX_AM_SF)
+
+#define A_MAC_PCS_STATUS_5 0x38044
+
+#define S_MAC5_RES_SPEED 24
+#define M_MAC5_RES_SPEED 0xffU
+#define V_MAC5_RES_SPEED(x) ((x) << S_MAC5_RES_SPEED)
+#define G_MAC5_RES_SPEED(x) (((x) >> S_MAC5_RES_SPEED) & M_MAC5_RES_SPEED)
+
+#define S_MAC4_RES_SPEED 16
+#define M_MAC4_RES_SPEED 0xffU
+#define V_MAC4_RES_SPEED(x) ((x) << S_MAC4_RES_SPEED)
+#define G_MAC4_RES_SPEED(x) (((x) >> S_MAC4_RES_SPEED) & M_MAC4_RES_SPEED)
+
+#define S_MAC3_RES_SPEED 8
+#define M_MAC3_RES_SPEED 0xffU
+#define V_MAC3_RES_SPEED(x) ((x) << S_MAC3_RES_SPEED)
+#define G_MAC3_RES_SPEED(x) (((x) >> S_MAC3_RES_SPEED) & M_MAC3_RES_SPEED)
+
+#define S_MAC2_RES_SPEED 0
+#define M_MAC2_RES_SPEED 0xffU
+#define V_MAC2_RES_SPEED(x) ((x) << S_MAC2_RES_SPEED)
+#define G_MAC2_RES_SPEED(x) (((x) >> S_MAC2_RES_SPEED) & M_MAC2_RES_SPEED)
+
+#define A_MAC_PCS_STATUS_6 0x38048
+
+#define S_MARKER_INS_CNT_100_00 16
+#define M_MARKER_INS_CNT_100_00 0x7fffU
+#define V_MARKER_INS_CNT_100_00(x) ((x) << S_MARKER_INS_CNT_100_00)
+#define G_MARKER_INS_CNT_100_00(x) (((x) >> S_MARKER_INS_CNT_100_00) & M_MARKER_INS_CNT_100_00)
+
+#define S_MAC7_RES_SPEED 8
+#define M_MAC7_RES_SPEED 0xffU
+#define V_MAC7_RES_SPEED(x) ((x) << S_MAC7_RES_SPEED)
+#define G_MAC7_RES_SPEED(x) (((x) >> S_MAC7_RES_SPEED) & M_MAC7_RES_SPEED)
+
+#define S_MAC6_RES_SPEED 0
+#define M_MAC6_RES_SPEED 0xffU
+#define V_MAC6_RES_SPEED(x) ((x) << S_MAC6_RES_SPEED)
+#define G_MAC6_RES_SPEED(x) (((x) >> S_MAC6_RES_SPEED) & M_MAC6_RES_SPEED)
+
+#define A_MAC_PCS_STATUS_7 0x3804c
+
+#define S_PCS000_LINK_STATUS 30
+#define M_PCS000_LINK_STATUS 0x3U
+#define V_PCS000_LINK_STATUS(x) ((x) << S_PCS000_LINK_STATUS)
+#define G_PCS000_LINK_STATUS(x) (((x) >> S_PCS000_LINK_STATUS) & M_PCS000_LINK_STATUS)
+
+#define S_MARKER_INS_CNT_100_02 15
+#define M_MARKER_INS_CNT_100_02 0x7fffU
+#define V_MARKER_INS_CNT_100_02(x) ((x) << S_MARKER_INS_CNT_100_02)
+#define G_MARKER_INS_CNT_100_02(x) (((x) >> S_MARKER_INS_CNT_100_02) & M_MARKER_INS_CNT_100_02)
+
+#define S_MARKER_INS_CNT_100_01 0
+#define M_MARKER_INS_CNT_100_01 0x7fffU
+#define V_MARKER_INS_CNT_100_01(x) ((x) << S_MARKER_INS_CNT_100_01)
+#define G_MARKER_INS_CNT_100_01(x) (((x) >> S_MARKER_INS_CNT_100_01) & M_MARKER_INS_CNT_100_01)
+
+#define A_MAC_PCS_STATUS_8 0x38050
+
+#define S_MARKER_INS_CNT_25_1 15
+#define M_MARKER_INS_CNT_25_1 0xffffU
+#define V_MARKER_INS_CNT_25_1(x) ((x) << S_MARKER_INS_CNT_25_1)
+#define G_MARKER_INS_CNT_25_1(x) (((x) >> S_MARKER_INS_CNT_25_1) & M_MARKER_INS_CNT_25_1)
+
+#define S_MARKER_INS_CNT_100_03 0
+#define M_MARKER_INS_CNT_100_03 0x7fffU
+#define V_MARKER_INS_CNT_100_03(x) ((x) << S_MARKER_INS_CNT_100_03)
+#define G_MARKER_INS_CNT_100_03(x) (((x) >> S_MARKER_INS_CNT_100_03) & M_MARKER_INS_CNT_100_03)
+
+#define A_MAC_PCS_STATUS_9 0x38054
+
+#define S_MARKER_INS_CNT_25_5 16
+#define M_MARKER_INS_CNT_25_5 0xffffU
+#define V_MARKER_INS_CNT_25_5(x) ((x) << S_MARKER_INS_CNT_25_5)
+#define G_MARKER_INS_CNT_25_5(x) (((x) >> S_MARKER_INS_CNT_25_5) & M_MARKER_INS_CNT_25_5)
+
+#define S_MARKER_INS_CNT_25_3 0
+#define M_MARKER_INS_CNT_25_3 0xffffU
+#define V_MARKER_INS_CNT_25_3(x) ((x) << S_MARKER_INS_CNT_25_3)
+#define G_MARKER_INS_CNT_25_3(x) (((x) >> S_MARKER_INS_CNT_25_3) & M_MARKER_INS_CNT_25_3)
+
+#define A_MAC_PCS_STATUS_10 0x38058
+
+#define S_MARKER_INS_CNT_25_50_2 16
+#define M_MARKER_INS_CNT_25_50_2 0xffffU
+#define V_MARKER_INS_CNT_25_50_2(x) ((x) << S_MARKER_INS_CNT_25_50_2)
+#define G_MARKER_INS_CNT_25_50_2(x) (((x) >> S_MARKER_INS_CNT_25_50_2) & M_MARKER_INS_CNT_25_50_2)
+
+#define S_MARKER_INS_CNT_25_50_0 0
+#define M_MARKER_INS_CNT_25_50_0 0xffffU
+#define V_MARKER_INS_CNT_25_50_0(x) ((x) << S_MARKER_INS_CNT_25_50_0)
+#define G_MARKER_INS_CNT_25_50_0(x) (((x) >> S_MARKER_INS_CNT_25_50_0) & M_MARKER_INS_CNT_25_50_0)
+
+#define A_MAC_PCS_STATUS_11 0x3805c
+
+#define S_MARKER_INS_CNT_25_50_6 16
+#define M_MARKER_INS_CNT_25_50_6 0xffffU
+#define V_MARKER_INS_CNT_25_50_6(x) ((x) << S_MARKER_INS_CNT_25_50_6)
+#define G_MARKER_INS_CNT_25_50_6(x) (((x) >> S_MARKER_INS_CNT_25_50_6) & M_MARKER_INS_CNT_25_50_6)
+
+#define S_MARKER_INS_CNT_25_50_4 0
+#define M_MARKER_INS_CNT_25_50_4 0xffffU
+#define V_MARKER_INS_CNT_25_50_4(x) ((x) << S_MARKER_INS_CNT_25_50_4)
+#define G_MARKER_INS_CNT_25_50_4(x) (((x) >> S_MARKER_INS_CNT_25_50_4) & M_MARKER_INS_CNT_25_50_4)
+
+#define A_MAC_PCS_STATUS_12 0x38060
+
+#define S_T7_LINK_STATUS 24
+#define M_T7_LINK_STATUS 0xffU
+#define V_T7_LINK_STATUS(x) ((x) << S_T7_LINK_STATUS)
+#define G_T7_LINK_STATUS(x) (((x) >> S_T7_LINK_STATUS) & M_T7_LINK_STATUS)
+
+#define S_T7_HI_BER 16
+#define M_T7_HI_BER 0xffU
+#define V_T7_HI_BER(x) ((x) << S_T7_HI_BER)
+#define G_T7_HI_BER(x) (((x) >> S_T7_HI_BER) & M_T7_HI_BER)
+
+#define S_MARKER_INS_CNT_25_7 0
+#define M_MARKER_INS_CNT_25_7 0xffffU
+#define V_MARKER_INS_CNT_25_7(x) ((x) << S_MARKER_INS_CNT_25_7)
+#define G_MARKER_INS_CNT_25_7(x) (((x) >> S_MARKER_INS_CNT_25_7) & M_MARKER_INS_CNT_25_7)
+
+#define A_MAC_MAC200G400G_0_CONFIG_0 0x38064
+#define A_MAC_MAC200G400G_0_CONFIG_1 0x38068
+
+#define S_FF_TX_CRC_OVR 11
+#define V_FF_TX_CRC_OVR(x) ((x) << S_FF_TX_CRC_OVR)
+#define F_FF_TX_CRC_OVR V_FF_TX_CRC_OVR(1U)
+
+#define S_TX_SMHOLD 2
+#define V_TX_SMHOLD(x) ((x) << S_TX_SMHOLD)
+#define F_TX_SMHOLD V_TX_SMHOLD(1U)
+
+#define A_MAC_MAC200G400G_0_CONFIG_2 0x3806c
+#define A_MAC_MAC200G400G_0_CONFIG_3 0x38070
+#define A_MAC_MAC200G400G_0_CONFIG_4 0x38074
+
+#define S_FRC_DELTA 0
+#define M_FRC_DELTA 0xffffU
+#define V_FRC_DELTA(x) ((x) << S_FRC_DELTA)
+#define G_FRC_DELTA(x) (((x) >> S_FRC_DELTA) & M_FRC_DELTA)
+
+#define A_MAC_MAC200G400G_0_STATUS 0x38078
+
+#define S_T7_LOOP_ENA 4
+#define V_T7_LOOP_ENA(x) ((x) << S_T7_LOOP_ENA)
+#define F_T7_LOOP_ENA V_T7_LOOP_ENA(1U)
+
+#define S_T7_LOC_FAULT 3
+#define V_T7_LOC_FAULT(x) ((x) << S_T7_LOC_FAULT)
+#define F_T7_LOC_FAULT V_T7_LOC_FAULT(1U)
+
+#define S_FRM_DROP 2
+#define V_FRM_DROP(x) ((x) << S_FRM_DROP)
+#define F_FRM_DROP V_FRM_DROP(1U)
+
+#define S_FF_TX_CREDIT 1
+#define V_FF_TX_CREDIT(x) ((x) << S_FF_TX_CREDIT)
+#define F_FF_TX_CREDIT V_FF_TX_CREDIT(1U)
+
+#define A_MAC_MAC200G400G_1_CONFIG_0 0x3807c
+#define A_MAC_MAC200G400G_1_CONFIG_1 0x38080
+#define A_MAC_MAC200G400G_1_CONFIG_2 0x38084
+#define A_MAC_MAC200G400G_1_CONFIG_3 0x38088
+#define A_MAC_MAC200G400G_1_CONFIG_4 0x3808c
+#define A_MAC_MAC200G400G_1_STATUS 0x38090
+#define A_MAC_AN_CFG_0 0x38094
+
+#define S_T7_AN_DATA_CTL 24
+#define M_T7_AN_DATA_CTL 0xffU
+#define V_T7_AN_DATA_CTL(x) ((x) << S_T7_AN_DATA_CTL)
+#define G_T7_AN_DATA_CTL(x) (((x) >> S_T7_AN_DATA_CTL) & M_T7_AN_DATA_CTL)
+
+#define S_T7_AN_ENA 16
+#define M_T7_AN_ENA 0xffU
+#define V_T7_AN_ENA(x) ((x) << S_T7_AN_ENA)
+#define G_T7_AN_ENA(x) (((x) >> S_T7_AN_ENA) & M_T7_AN_ENA)
+
+#define A_MAC_AN_CFG_1 0x38098
+
+#define S_AN_DIS_TIMER_AN_7 7
+#define V_AN_DIS_TIMER_AN_7(x) ((x) << S_AN_DIS_TIMER_AN_7)
+#define F_AN_DIS_TIMER_AN_7 V_AN_DIS_TIMER_AN_7(1U)
+
+#define S_AN_DIS_TIMER_AN_6 6
+#define V_AN_DIS_TIMER_AN_6(x) ((x) << S_AN_DIS_TIMER_AN_6)
+#define F_AN_DIS_TIMER_AN_6 V_AN_DIS_TIMER_AN_6(1U)
+
+#define S_AN_DIS_TIMER_AN_5 5
+#define V_AN_DIS_TIMER_AN_5(x) ((x) << S_AN_DIS_TIMER_AN_5)
+#define F_AN_DIS_TIMER_AN_5 V_AN_DIS_TIMER_AN_5(1U)
+
+#define S_AN_DIS_TIMER_AN_4 4
+#define V_AN_DIS_TIMER_AN_4(x) ((x) << S_AN_DIS_TIMER_AN_4)
+#define F_AN_DIS_TIMER_AN_4 V_AN_DIS_TIMER_AN_4(1U)
+
+#define S_AN_DIS_TIMER_AN_3 3
+#define V_AN_DIS_TIMER_AN_3(x) ((x) << S_AN_DIS_TIMER_AN_3)
+#define F_AN_DIS_TIMER_AN_3 V_AN_DIS_TIMER_AN_3(1U)
+
+#define S_AN_DIS_TIMER_AN_2 2
+#define V_AN_DIS_TIMER_AN_2(x) ((x) << S_AN_DIS_TIMER_AN_2)
+#define F_AN_DIS_TIMER_AN_2 V_AN_DIS_TIMER_AN_2(1U)
+
+#define S_AN_DIS_TIMER_AN_1 1
+#define V_AN_DIS_TIMER_AN_1(x) ((x) << S_AN_DIS_TIMER_AN_1)
+#define F_AN_DIS_TIMER_AN_1 V_AN_DIS_TIMER_AN_1(1U)
+
+#define S_AN_DIS_TIMER_AN_0 0
+#define V_AN_DIS_TIMER_AN_0(x) ((x) << S_AN_DIS_TIMER_AN_0)
+#define F_AN_DIS_TIMER_AN_0 V_AN_DIS_TIMER_AN_0(1U)
+
+#define A_MAC_AN_SERDES25G_ENA 0x3809c
+
+#define S_AN_SD25_TX_ENA_7 15
+#define V_AN_SD25_TX_ENA_7(x) ((x) << S_AN_SD25_TX_ENA_7)
+#define F_AN_SD25_TX_ENA_7 V_AN_SD25_TX_ENA_7(1U)
+
+#define S_AN_SD25_TX_ENA_6 14
+#define V_AN_SD25_TX_ENA_6(x) ((x) << S_AN_SD25_TX_ENA_6)
+#define F_AN_SD25_TX_ENA_6 V_AN_SD25_TX_ENA_6(1U)
+
+#define S_AN_SD25_TX_ENA_5 13
+#define V_AN_SD25_TX_ENA_5(x) ((x) << S_AN_SD25_TX_ENA_5)
+#define F_AN_SD25_TX_ENA_5 V_AN_SD25_TX_ENA_5(1U)
+
+#define S_AN_SD25_TX_ENA_4 12
+#define V_AN_SD25_TX_ENA_4(x) ((x) << S_AN_SD25_TX_ENA_4)
+#define F_AN_SD25_TX_ENA_4 V_AN_SD25_TX_ENA_4(1U)
+
+#define S_AN_SD25_TX_ENA_3 11
+#define V_AN_SD25_TX_ENA_3(x) ((x) << S_AN_SD25_TX_ENA_3)
+#define F_AN_SD25_TX_ENA_3 V_AN_SD25_TX_ENA_3(1U)
+
+#define S_AN_SD25_TX_ENA_2 10
+#define V_AN_SD25_TX_ENA_2(x) ((x) << S_AN_SD25_TX_ENA_2)
+#define F_AN_SD25_TX_ENA_2 V_AN_SD25_TX_ENA_2(1U)
+
+#define S_AN_SD25_TX_ENA_1 9
+#define V_AN_SD25_TX_ENA_1(x) ((x) << S_AN_SD25_TX_ENA_1)
+#define F_AN_SD25_TX_ENA_1 V_AN_SD25_TX_ENA_1(1U)
+
+#define S_AN_SD25_TX_ENA_0 8
+#define V_AN_SD25_TX_ENA_0(x) ((x) << S_AN_SD25_TX_ENA_0)
+#define F_AN_SD25_TX_ENA_0 V_AN_SD25_TX_ENA_0(1U)
+
+#define S_AN_SD25_RX_ENA_7 7
+#define V_AN_SD25_RX_ENA_7(x) ((x) << S_AN_SD25_RX_ENA_7)
+#define F_AN_SD25_RX_ENA_7 V_AN_SD25_RX_ENA_7(1U)
+
+#define S_AN_SD25_RX_ENA_6 6
+#define V_AN_SD25_RX_ENA_6(x) ((x) << S_AN_SD25_RX_ENA_6)
+#define F_AN_SD25_RX_ENA_6 V_AN_SD25_RX_ENA_6(1U)
+
+#define S_AN_SD25_RX_ENA_5 5
+#define V_AN_SD25_RX_ENA_5(x) ((x) << S_AN_SD25_RX_ENA_5)
+#define F_AN_SD25_RX_ENA_5 V_AN_SD25_RX_ENA_5(1U)
+
+#define S_AN_SD25_RX_ENA_4 4
+#define V_AN_SD25_RX_ENA_4(x) ((x) << S_AN_SD25_RX_ENA_4)
+#define F_AN_SD25_RX_ENA_4 V_AN_SD25_RX_ENA_4(1U)
+
+#define S_AN_SD25_RX_ENA_3 3
+#define V_AN_SD25_RX_ENA_3(x) ((x) << S_AN_SD25_RX_ENA_3)
+#define F_AN_SD25_RX_ENA_3 V_AN_SD25_RX_ENA_3(1U)
+
+#define S_AN_SD25_RX_ENA_2 2
+#define V_AN_SD25_RX_ENA_2(x) ((x) << S_AN_SD25_RX_ENA_2)
+#define F_AN_SD25_RX_ENA_2 V_AN_SD25_RX_ENA_2(1U)
+
+#define S_AN_SD25_RX_ENA_1 1
+#define V_AN_SD25_RX_ENA_1(x) ((x) << S_AN_SD25_RX_ENA_1)
+#define F_AN_SD25_RX_ENA_1 V_AN_SD25_RX_ENA_1(1U)
+
+#define S_AN_SD25_RX_ENA_0 0
+#define V_AN_SD25_RX_ENA_0(x) ((x) << S_AN_SD25_RX_ENA_0)
+#define F_AN_SD25_RX_ENA_0 V_AN_SD25_RX_ENA_0(1U)
+
+#define A_MAC_PLL_CFG_0 0x380a0
+
+#define S_USE_RX_CDR_CLK_FOR_TX 7
+#define V_USE_RX_CDR_CLK_FOR_TX(x) ((x) << S_USE_RX_CDR_CLK_FOR_TX)
+#define F_USE_RX_CDR_CLK_FOR_TX V_USE_RX_CDR_CLK_FOR_TX(1U)
+
+#define S_HSSPLLSEL0 5
+#define M_HSSPLLSEL0 0x3U
+#define V_HSSPLLSEL0(x) ((x) << S_HSSPLLSEL0)
+#define G_HSSPLLSEL0(x) (((x) >> S_HSSPLLSEL0) & M_HSSPLLSEL0)
+
+#define S_HSSTXDIV2CLK_SEL0 3
+#define M_HSSTXDIV2CLK_SEL0 0x3U
+#define V_HSSTXDIV2CLK_SEL0(x) ((x) << S_HSSTXDIV2CLK_SEL0)
+#define G_HSSTXDIV2CLK_SEL0(x) (((x) >> S_HSSTXDIV2CLK_SEL0) & M_HSSTXDIV2CLK_SEL0)
+
+#define S_HSS_RESET0 2
+#define V_HSS_RESET0(x) ((x) << S_HSS_RESET0)
+#define F_HSS_RESET0 V_HSS_RESET0(1U)
+
+#define S_APB_RESET0 1
+#define V_APB_RESET0(x) ((x) << S_APB_RESET0)
+#define F_APB_RESET0 V_APB_RESET0(1U)
+
+#define S_HSSCLK32DIV2_RESET0 0
+#define V_HSSCLK32DIV2_RESET0(x) ((x) << S_HSSCLK32DIV2_RESET0)
+#define F_HSSCLK32DIV2_RESET0 V_HSSCLK32DIV2_RESET0(1U)
+
+#define A_MAC_PLL_CFG_1 0x380a4
+
+#define S_HSSPLLSEL1 5
+#define M_HSSPLLSEL1 0x3U
+#define V_HSSPLLSEL1(x) ((x) << S_HSSPLLSEL1)
+#define G_HSSPLLSEL1(x) (((x) >> S_HSSPLLSEL1) & M_HSSPLLSEL1)
+
+#define S_HSSTXDIV2CLK_SEL1 3
+#define M_HSSTXDIV2CLK_SEL1 0x3U
+#define V_HSSTXDIV2CLK_SEL1(x) ((x) << S_HSSTXDIV2CLK_SEL1)
+#define G_HSSTXDIV2CLK_SEL1(x) (((x) >> S_HSSTXDIV2CLK_SEL1) & M_HSSTXDIV2CLK_SEL1)
+
+#define S_HSS_RESET1 2
+#define V_HSS_RESET1(x) ((x) << S_HSS_RESET1)
+#define F_HSS_RESET1 V_HSS_RESET1(1U)
+
+#define S_APB_RESET1 1
+#define V_APB_RESET1(x) ((x) << S_APB_RESET1)
+#define F_APB_RESET1 V_APB_RESET1(1U)
+
+#define S_HSSCLK32DIV2_RESET1 0
+#define V_HSSCLK32DIV2_RESET1(x) ((x) << S_HSSCLK32DIV2_RESET1)
+#define F_HSSCLK32DIV2_RESET1 V_HSSCLK32DIV2_RESET1(1U)
+
+#define A_MAC_PLL_CFG_2 0x380a8
+
+#define S_HSSPLLSEL2 5
+#define M_HSSPLLSEL2 0x3U
+#define V_HSSPLLSEL2(x) ((x) << S_HSSPLLSEL2)
+#define G_HSSPLLSEL2(x) (((x) >> S_HSSPLLSEL2) & M_HSSPLLSEL2)
+
+#define S_HSSTXDIV2CLK_SEL2 3
+#define M_HSSTXDIV2CLK_SEL2 0x3U
+#define V_HSSTXDIV2CLK_SEL2(x) ((x) << S_HSSTXDIV2CLK_SEL2)
+#define G_HSSTXDIV2CLK_SEL2(x) (((x) >> S_HSSTXDIV2CLK_SEL2) & M_HSSTXDIV2CLK_SEL2)
+
+#define S_HSS_RESET2 2
+#define V_HSS_RESET2(x) ((x) << S_HSS_RESET2)
+#define F_HSS_RESET2 V_HSS_RESET2(1U)
+
+#define S_APB_RESET2 1
+#define V_APB_RESET2(x) ((x) << S_APB_RESET2)
+#define F_APB_RESET2 V_APB_RESET2(1U)
+
+#define S_HSSCLK32DIV2_RESET2 0
+#define V_HSSCLK32DIV2_RESET2(x) ((x) << S_HSSCLK32DIV2_RESET2)
+#define F_HSSCLK32DIV2_RESET2 V_HSSCLK32DIV2_RESET2(1U)
+
+#define A_MAC_PLL_CFG_3 0x380ac
+
+#define S_HSSPLLSEL3 5
+#define M_HSSPLLSEL3 0x3U
+#define V_HSSPLLSEL3(x) ((x) << S_HSSPLLSEL3)
+#define G_HSSPLLSEL3(x) (((x) >> S_HSSPLLSEL3) & M_HSSPLLSEL3)
+
+#define S_HSSTXDIV2CLK_SEL3 3
+#define M_HSSTXDIV2CLK_SEL3 0x3U
+#define V_HSSTXDIV2CLK_SEL3(x) ((x) << S_HSSTXDIV2CLK_SEL3)
+#define G_HSSTXDIV2CLK_SEL3(x) (((x) >> S_HSSTXDIV2CLK_SEL3) & M_HSSTXDIV2CLK_SEL3)
+
+#define S_HSS_RESET3 2
+#define V_HSS_RESET3(x) ((x) << S_HSS_RESET3)
+#define F_HSS_RESET3 V_HSS_RESET3(1U)
+
+#define S_APB_RESET3 1
+#define V_APB_RESET3(x) ((x) << S_APB_RESET3)
+#define F_APB_RESET3 V_APB_RESET3(1U)
+
+#define S_HSSCLK32DIV2_RESET3 0
+#define V_HSSCLK32DIV2_RESET3(x) ((x) << S_HSSCLK32DIV2_RESET3)
+#define F_HSSCLK32DIV2_RESET3 V_HSSCLK32DIV2_RESET3(1U)
+
+#define A_MAC_HSS_STATUS 0x380b0
+
+#define S_TX_LANE_PLL_SEL_3 30
+#define M_TX_LANE_PLL_SEL_3 0x3U
+#define V_TX_LANE_PLL_SEL_3(x) ((x) << S_TX_LANE_PLL_SEL_3)
+#define G_TX_LANE_PLL_SEL_3(x) (((x) >> S_TX_LANE_PLL_SEL_3) & M_TX_LANE_PLL_SEL_3)
+
+#define S_TX_LANE_PLL_SEL_2 28
+#define M_TX_LANE_PLL_SEL_2 0x3U
+#define V_TX_LANE_PLL_SEL_2(x) ((x) << S_TX_LANE_PLL_SEL_2)
+#define G_TX_LANE_PLL_SEL_2(x) (((x) >> S_TX_LANE_PLL_SEL_2) & M_TX_LANE_PLL_SEL_2)
+
+#define S_TX_LANE_PLL_SEL_1 26
+#define M_TX_LANE_PLL_SEL_1 0x3U
+#define V_TX_LANE_PLL_SEL_1(x) ((x) << S_TX_LANE_PLL_SEL_1)
+#define G_TX_LANE_PLL_SEL_1(x) (((x) >> S_TX_LANE_PLL_SEL_1) & M_TX_LANE_PLL_SEL_1)
+
+#define S_TX_LANE_PLL_SEL_0 24
+#define M_TX_LANE_PLL_SEL_0 0x3U
+#define V_TX_LANE_PLL_SEL_0(x) ((x) << S_TX_LANE_PLL_SEL_0)
+#define G_TX_LANE_PLL_SEL_0(x) (((x) >> S_TX_LANE_PLL_SEL_0) & M_TX_LANE_PLL_SEL_0)
+
+#define S_HSSPLLLOCKB_HSS3 7
+#define V_HSSPLLLOCKB_HSS3(x) ((x) << S_HSSPLLLOCKB_HSS3)
+#define F_HSSPLLLOCKB_HSS3 V_HSSPLLLOCKB_HSS3(1U)
+
+#define S_HSSPLLLOCKA_HSS3 6
+#define V_HSSPLLLOCKA_HSS3(x) ((x) << S_HSSPLLLOCKA_HSS3)
+#define F_HSSPLLLOCKA_HSS3 V_HSSPLLLOCKA_HSS3(1U)
+
+#define S_HSSPLLLOCKB_HSS2 5
+#define V_HSSPLLLOCKB_HSS2(x) ((x) << S_HSSPLLLOCKB_HSS2)
+#define F_HSSPLLLOCKB_HSS2 V_HSSPLLLOCKB_HSS2(1U)
+
+#define S_HSSPLLLOCKA_HSS2 4
+#define V_HSSPLLLOCKA_HSS2(x) ((x) << S_HSSPLLLOCKA_HSS2)
+#define F_HSSPLLLOCKA_HSS2 V_HSSPLLLOCKA_HSS2(1U)
+
+#define S_HSSPLLLOCKB_HSS1 3
+#define V_HSSPLLLOCKB_HSS1(x) ((x) << S_HSSPLLLOCKB_HSS1)
+#define F_HSSPLLLOCKB_HSS1 V_HSSPLLLOCKB_HSS1(1U)
+
+#define S_HSSPLLLOCKA_HSS1 2
+#define V_HSSPLLLOCKA_HSS1(x) ((x) << S_HSSPLLLOCKA_HSS1)
+#define F_HSSPLLLOCKA_HSS1 V_HSSPLLLOCKA_HSS1(1U)
+
+#define S_HSSPLLLOCKB_HSS0 1
+#define V_HSSPLLLOCKB_HSS0(x) ((x) << S_HSSPLLLOCKB_HSS0)
+#define F_HSSPLLLOCKB_HSS0 V_HSSPLLLOCKB_HSS0(1U)
+
+#define S_HSSPLLLOCKA_HSS0 0
+#define V_HSSPLLLOCKA_HSS0(x) ((x) << S_HSSPLLLOCKA_HSS0)
+#define F_HSSPLLLOCKA_HSS0 V_HSSPLLLOCKA_HSS0(1U)
+
+#define A_MAC_HSS_SIGDET_STATUS 0x380b4
+
+#define S_HSS3_SIGDET 6
+#define M_HSS3_SIGDET 0x3U
+#define V_HSS3_SIGDET(x) ((x) << S_HSS3_SIGDET)
+#define G_HSS3_SIGDET(x) (((x) >> S_HSS3_SIGDET) & M_HSS3_SIGDET)
+
+#define S_HSS2_SIGDET 4
+#define M_HSS2_SIGDET 0x3U
+#define V_HSS2_SIGDET(x) ((x) << S_HSS2_SIGDET)
+#define G_HSS2_SIGDET(x) (((x) >> S_HSS2_SIGDET) & M_HSS2_SIGDET)
+
+#define S_HSS1_SIGDET 2
+#define M_HSS1_SIGDET 0x3U
+#define V_HSS1_SIGDET(x) ((x) << S_HSS1_SIGDET)
+#define G_HSS1_SIGDET(x) (((x) >> S_HSS1_SIGDET) & M_HSS1_SIGDET)
+
+#define S_HSS0_SIGDET 0
+#define M_HSS0_SIGDET 0x3U
+#define V_HSS0_SIGDET(x) ((x) << S_HSS0_SIGDET)
+#define G_HSS0_SIGDET(x) (((x) >> S_HSS0_SIGDET) & M_HSS0_SIGDET)
+
+#define A_MAC_FPGA_CFG_0 0x380b8
+#define A_MAC_PMD_STATUS 0x380bc
+
+#define S_SIGNAL_DETECT 0
+#define M_SIGNAL_DETECT 0xffU
+#define V_SIGNAL_DETECT(x) ((x) << S_SIGNAL_DETECT)
+#define G_SIGNAL_DETECT(x) (((x) >> S_SIGNAL_DETECT) & M_SIGNAL_DETECT)
+
+#define A_MAC_PMD_AN_CONFIG0 0x380c0
+
+#define S_AN3_RATE_SELECT 25
+#define M_AN3_RATE_SELECT 0x1fU
+#define V_AN3_RATE_SELECT(x) ((x) << S_AN3_RATE_SELECT)
+#define G_AN3_RATE_SELECT(x) (((x) >> S_AN3_RATE_SELECT) & M_AN3_RATE_SELECT)
+
+#define S_AN3_STATUS 24
+#define V_AN3_STATUS(x) ((x) << S_AN3_STATUS)
+#define F_AN3_STATUS V_AN3_STATUS(1U)
+
+#define S_AN2_RATE_SELECT 17
+#define M_AN2_RATE_SELECT 0x1fU
+#define V_AN2_RATE_SELECT(x) ((x) << S_AN2_RATE_SELECT)
+#define G_AN2_RATE_SELECT(x) (((x) >> S_AN2_RATE_SELECT) & M_AN2_RATE_SELECT)
+
+#define S_AN2_STATUS 16
+#define V_AN2_STATUS(x) ((x) << S_AN2_STATUS)
+#define F_AN2_STATUS V_AN2_STATUS(1U)
+
+#define S_AN1_RATE_SELECT 9
+#define M_AN1_RATE_SELECT 0x1fU
+#define V_AN1_RATE_SELECT(x) ((x) << S_AN1_RATE_SELECT)
+#define G_AN1_RATE_SELECT(x) (((x) >> S_AN1_RATE_SELECT) & M_AN1_RATE_SELECT)
+
+#define S_AN1_STATUS 8
+#define V_AN1_STATUS(x) ((x) << S_AN1_STATUS)
+#define F_AN1_STATUS V_AN1_STATUS(1U)
+
+#define S_AN0_RATE_SELECT 1
+#define M_AN0_RATE_SELECT 0x1fU
+#define V_AN0_RATE_SELECT(x) ((x) << S_AN0_RATE_SELECT)
+#define G_AN0_RATE_SELECT(x) (((x) >> S_AN0_RATE_SELECT) & M_AN0_RATE_SELECT)
+
+#define S_AN0_STATUS 0
+#define V_AN0_STATUS(x) ((x) << S_AN0_STATUS)
+#define F_AN0_STATUS V_AN0_STATUS(1U)
+
+#define A_MAC_PMD_AN_CONFIG1 0x380c4
+
+#define S_AN7_RATE_SELECT 25
+#define M_AN7_RATE_SELECT 0x1fU
+#define V_AN7_RATE_SELECT(x) ((x) << S_AN7_RATE_SELECT)
+#define G_AN7_RATE_SELECT(x) (((x) >> S_AN7_RATE_SELECT) & M_AN7_RATE_SELECT)
+
+#define S_AN7_STATUS 24
+#define V_AN7_STATUS(x) ((x) << S_AN7_STATUS)
+#define F_AN7_STATUS V_AN7_STATUS(1U)
+
+#define S_AN6_RATE_SELECT 17
+#define M_AN6_RATE_SELECT 0x1fU
+#define V_AN6_RATE_SELECT(x) ((x) << S_AN6_RATE_SELECT)
+#define G_AN6_RATE_SELECT(x) (((x) >> S_AN6_RATE_SELECT) & M_AN6_RATE_SELECT)
+
+#define S_AN6_STATUS 16
+#define V_AN6_STATUS(x) ((x) << S_AN6_STATUS)
+#define F_AN6_STATUS V_AN6_STATUS(1U)
+
+#define S_AN5_RATE_SELECT 9
+#define M_AN5_RATE_SELECT 0x1fU
+#define V_AN5_RATE_SELECT(x) ((x) << S_AN5_RATE_SELECT)
+#define G_AN5_RATE_SELECT(x) (((x) >> S_AN5_RATE_SELECT) & M_AN5_RATE_SELECT)
+
+#define S_AN5_STATUS 8
+#define V_AN5_STATUS(x) ((x) << S_AN5_STATUS)
+#define F_AN5_STATUS V_AN5_STATUS(1U)
+
+#define S_AN4_RATE_SELECT 1
+#define M_AN4_RATE_SELECT 0x1fU
+#define V_AN4_RATE_SELECT(x) ((x) << S_AN4_RATE_SELECT)
+#define G_AN4_RATE_SELECT(x) (((x) >> S_AN4_RATE_SELECT) & M_AN4_RATE_SELECT)
+
+#define S_AN4_STATUS 0
+#define V_AN4_STATUS(x) ((x) << S_AN4_STATUS)
+#define F_AN4_STATUS V_AN4_STATUS(1U)
+
+#define A_MAC_INT_EN_CMN 0x380c8
+
+#define S_HSS3PLL1_LOCK_LOST_INT_EN 21
+#define V_HSS3PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS3PLL1_LOCK_LOST_INT_EN)
+#define F_HSS3PLL1_LOCK_LOST_INT_EN V_HSS3PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS3PLL1_LOCK_INT_EN 20
+#define V_HSS3PLL1_LOCK_INT_EN(x) ((x) << S_HSS3PLL1_LOCK_INT_EN)
+#define F_HSS3PLL1_LOCK_INT_EN V_HSS3PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS3PLL0_LOCK_LOST_INT_EN 19
+#define V_HSS3PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS3PLL0_LOCK_LOST_INT_EN)
+#define F_HSS3PLL0_LOCK_LOST_INT_EN V_HSS3PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS3PLL0_LOCK_INT_EN 18
+#define V_HSS3PLL0_LOCK_INT_EN(x) ((x) << S_HSS3PLL0_LOCK_INT_EN)
+#define F_HSS3PLL0_LOCK_INT_EN V_HSS3PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS2PLL1_LOCK_LOST_INT_EN 17
+#define V_HSS2PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS2PLL1_LOCK_LOST_INT_EN)
+#define F_HSS2PLL1_LOCK_LOST_INT_EN V_HSS2PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS2PLL1_LOCK_INT_EN 16
+#define V_HSS2PLL1_LOCK_INT_EN(x) ((x) << S_HSS2PLL1_LOCK_INT_EN)
+#define F_HSS2PLL1_LOCK_INT_EN V_HSS2PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS2PLL0_LOCK_LOST_INT_EN 15
+#define V_HSS2PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS2PLL0_LOCK_LOST_INT_EN)
+#define F_HSS2PLL0_LOCK_LOST_INT_EN V_HSS2PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS2PLL0_LOCK_INT_EN 14
+#define V_HSS2PLL0_LOCK_INT_EN(x) ((x) << S_HSS2PLL0_LOCK_INT_EN)
+#define F_HSS2PLL0_LOCK_INT_EN V_HSS2PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS1PLL1_LOCK_LOST_INT_EN 13
+#define V_HSS1PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS1PLL1_LOCK_LOST_INT_EN)
+#define F_HSS1PLL1_LOCK_LOST_INT_EN V_HSS1PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS1PLL1_LOCK_INT_EN 12
+#define V_HSS1PLL1_LOCK_INT_EN(x) ((x) << S_HSS1PLL1_LOCK_INT_EN)
+#define F_HSS1PLL1_LOCK_INT_EN V_HSS1PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS1PLL0_LOCK_LOST_INT_EN 11
+#define V_HSS1PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS1PLL0_LOCK_LOST_INT_EN)
+#define F_HSS1PLL0_LOCK_LOST_INT_EN V_HSS1PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS1PLL0_LOCK_INT_EN 10
+#define V_HSS1PLL0_LOCK_INT_EN(x) ((x) << S_HSS1PLL0_LOCK_INT_EN)
+#define F_HSS1PLL0_LOCK_INT_EN V_HSS1PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS0PLL1_LOCK_LOST_INT_EN 9
+#define V_HSS0PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS0PLL1_LOCK_LOST_INT_EN)
+#define F_HSS0PLL1_LOCK_LOST_INT_EN V_HSS0PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS0PLL1_LOCK_INT_EN 8
+#define V_HSS0PLL1_LOCK_INT_EN(x) ((x) << S_HSS0PLL1_LOCK_INT_EN)
+#define F_HSS0PLL1_LOCK_INT_EN V_HSS0PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS0PLL0_LOCK_LOST_INT_EN 7
+#define V_HSS0PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS0PLL0_LOCK_LOST_INT_EN)
+#define F_HSS0PLL0_LOCK_LOST_INT_EN V_HSS0PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS0PLL0_LOCK_INT_EN 6
+#define V_HSS0PLL0_LOCK_INT_EN(x) ((x) << S_HSS0PLL0_LOCK_INT_EN)
+#define F_HSS0PLL0_LOCK_INT_EN V_HSS0PLL0_LOCK_INT_EN(1U)
+
+#define S_FLOCK_ASSERTED 5
+#define V_FLOCK_ASSERTED(x) ((x) << S_FLOCK_ASSERTED)
+#define F_FLOCK_ASSERTED V_FLOCK_ASSERTED(1U)
+
+#define S_FLOCK_LOST 4
+#define V_FLOCK_LOST(x) ((x) << S_FLOCK_LOST)
+#define F_FLOCK_LOST V_FLOCK_LOST(1U)
+
+#define S_PHASE_LOCK_ASSERTED 3
+#define V_PHASE_LOCK_ASSERTED(x) ((x) << S_PHASE_LOCK_ASSERTED)
+#define F_PHASE_LOCK_ASSERTED V_PHASE_LOCK_ASSERTED(1U)
+
+#define S_PHASE_LOCK_LOST 2
+#define V_PHASE_LOCK_LOST(x) ((x) << S_PHASE_LOCK_LOST)
+#define F_PHASE_LOCK_LOST V_PHASE_LOCK_LOST(1U)
+
+#define S_LOCK_ASSERTED 1
+#define V_LOCK_ASSERTED(x) ((x) << S_LOCK_ASSERTED)
+#define F_LOCK_ASSERTED V_LOCK_ASSERTED(1U)
+
+#define S_LOCK_LOST 0
+#define V_LOCK_LOST(x) ((x) << S_LOCK_LOST)
+#define F_LOCK_LOST V_LOCK_LOST(1U)
+
+#define A_MAC_INT_CAUSE_CMN 0x380cc
+
+#define S_HSS3PLL1_LOCK_LOST_INT_CAUSE 21
+#define V_HSS3PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS3PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS3PLL1_LOCK_LOST_INT_CAUSE V_HSS3PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS3PLL1_LOCK_INT_CAUSE 20
+#define V_HSS3PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS3PLL1_LOCK_INT_CAUSE)
+#define F_HSS3PLL1_LOCK_INT_CAUSE V_HSS3PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS3PLL0_LOCK_LOST_INT_CAUSE 19
+#define V_HSS3PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS3PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS3PLL0_LOCK_LOST_INT_CAUSE V_HSS3PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS3PLL0_LOCK_INT_CAUSE 18
+#define V_HSS3PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS3PLL0_LOCK_INT_CAUSE)
+#define F_HSS3PLL0_LOCK_INT_CAUSE V_HSS3PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS2PLL1_LOCK_LOST_CAUSE 17
+#define V_HSS2PLL1_LOCK_LOST_CAUSE(x) ((x) << S_HSS2PLL1_LOCK_LOST_CAUSE)
+#define F_HSS2PLL1_LOCK_LOST_CAUSE V_HSS2PLL1_LOCK_LOST_CAUSE(1U)
+
+#define S_HSS2PLL1_LOCK_INT_CAUSE 16
+#define V_HSS2PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS2PLL1_LOCK_INT_CAUSE)
+#define F_HSS2PLL1_LOCK_INT_CAUSE V_HSS2PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS2PLL0_LOCK_LOST_INT_CAUSE 15
+#define V_HSS2PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS2PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS2PLL0_LOCK_LOST_INT_CAUSE V_HSS2PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS2PLL0_LOCK_INT_CAUSE 14
+#define V_HSS2PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS2PLL0_LOCK_INT_CAUSE)
+#define F_HSS2PLL0_LOCK_INT_CAUSE V_HSS2PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS1PLL1_LOCK_LOST_INT_CAUSE 13
+#define V_HSS1PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS1PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS1PLL1_LOCK_LOST_INT_CAUSE V_HSS1PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS1PLL1_LOCK_INT_CAUSE 12
+#define V_HSS1PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS1PLL1_LOCK_INT_CAUSE)
+#define F_HSS1PLL1_LOCK_INT_CAUSE V_HSS1PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS1PLL0_LOCK_LOST_INT_CAUSE 11
+#define V_HSS1PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS1PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS1PLL0_LOCK_LOST_INT_CAUSE V_HSS1PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS1PLL0_LOCK_INT_CAUSE 10
+#define V_HSS1PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS1PLL0_LOCK_INT_CAUSE)
+#define F_HSS1PLL0_LOCK_INT_CAUSE V_HSS1PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS0PLL1_LOCK_LOST_INT_CAUSE 9
+#define V_HSS0PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS0PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS0PLL1_LOCK_LOST_INT_CAUSE V_HSS0PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS0PLL1_LOCK_INT_CAUSE 8
+#define V_HSS0PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS0PLL1_LOCK_INT_CAUSE)
+#define F_HSS0PLL1_LOCK_INT_CAUSE V_HSS0PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS0PLL0_LOCK_LOST_INT_CAUSE 7
+#define V_HSS0PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS0PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS0PLL0_LOCK_LOST_INT_CAUSE V_HSS0PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS0PLL0_LOCK_INT_CAUSE 6
+#define V_HSS0PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS0PLL0_LOCK_INT_CAUSE)
+#define F_HSS0PLL0_LOCK_INT_CAUSE V_HSS0PLL0_LOCK_INT_CAUSE(1U)
+
+#define A_MAC_PERR_INT_EN_MTIP 0x380d0
+
+#define S_PERR_MAC0_TX 19
+#define V_PERR_MAC0_TX(x) ((x) << S_PERR_MAC0_TX)
+#define F_PERR_MAC0_TX V_PERR_MAC0_TX(1U)
+
+#define S_PERR_MAC1_TX 18
+#define V_PERR_MAC1_TX(x) ((x) << S_PERR_MAC1_TX)
+#define F_PERR_MAC1_TX V_PERR_MAC1_TX(1U)
+
+#define S_PERR_MAC2_TX 17
+#define V_PERR_MAC2_TX(x) ((x) << S_PERR_MAC2_TX)
+#define F_PERR_MAC2_TX V_PERR_MAC2_TX(1U)
+
+#define S_PERR_MAC3_TX 16
+#define V_PERR_MAC3_TX(x) ((x) << S_PERR_MAC3_TX)
+#define F_PERR_MAC3_TX V_PERR_MAC3_TX(1U)
+
+#define S_PERR_MAC4_TX 15
+#define V_PERR_MAC4_TX(x) ((x) << S_PERR_MAC4_TX)
+#define F_PERR_MAC4_TX V_PERR_MAC4_TX(1U)
+
+#define S_PERR_MAC5_TX 14
+#define V_PERR_MAC5_TX(x) ((x) << S_PERR_MAC5_TX)
+#define F_PERR_MAC5_TX V_PERR_MAC5_TX(1U)
+
+#define S_PERR_MAC0_RX 13
+#define V_PERR_MAC0_RX(x) ((x) << S_PERR_MAC0_RX)
+#define F_PERR_MAC0_RX V_PERR_MAC0_RX(1U)
+
+#define S_PERR_MAC1_RX 12
+#define V_PERR_MAC1_RX(x) ((x) << S_PERR_MAC1_RX)
+#define F_PERR_MAC1_RX V_PERR_MAC1_RX(1U)
+
+#define S_PERR_MAC2_RX 11
+#define V_PERR_MAC2_RX(x) ((x) << S_PERR_MAC2_RX)
+#define F_PERR_MAC2_RX V_PERR_MAC2_RX(1U)
+
+#define S_PERR_MAC3_RX 10
+#define V_PERR_MAC3_RX(x) ((x) << S_PERR_MAC3_RX)
+#define F_PERR_MAC3_RX V_PERR_MAC3_RX(1U)
+
+#define S_PERR_MAC4_RX 9
+#define V_PERR_MAC4_RX(x) ((x) << S_PERR_MAC4_RX)
+#define F_PERR_MAC4_RX V_PERR_MAC4_RX(1U)
+
+#define S_PERR_MAC5_RX 8
+#define V_PERR_MAC5_RX(x) ((x) << S_PERR_MAC5_RX)
+#define F_PERR_MAC5_RX V_PERR_MAC5_RX(1U)
+
+#define S_PERR_MAC_STAT2_RX 7
+#define V_PERR_MAC_STAT2_RX(x) ((x) << S_PERR_MAC_STAT2_RX)
+#define F_PERR_MAC_STAT2_RX V_PERR_MAC_STAT2_RX(1U)
+
+#define S_PERR_MAC_STAT3_RX 6
+#define V_PERR_MAC_STAT3_RX(x) ((x) << S_PERR_MAC_STAT3_RX)
+#define F_PERR_MAC_STAT3_RX V_PERR_MAC_STAT3_RX(1U)
+
+#define S_PERR_MAC_STAT4_RX 5
+#define V_PERR_MAC_STAT4_RX(x) ((x) << S_PERR_MAC_STAT4_RX)
+#define F_PERR_MAC_STAT4_RX V_PERR_MAC_STAT4_RX(1U)
+
+#define S_PERR_MAC_STAT5_RX 4
+#define V_PERR_MAC_STAT5_RX(x) ((x) << S_PERR_MAC_STAT5_RX)
+#define F_PERR_MAC_STAT5_RX V_PERR_MAC_STAT5_RX(1U)
+
+#define S_PERR_MAC_STAT2_TX 3
+#define V_PERR_MAC_STAT2_TX(x) ((x) << S_PERR_MAC_STAT2_TX)
+#define F_PERR_MAC_STAT2_TX V_PERR_MAC_STAT2_TX(1U)
+
+#define S_PERR_MAC_STAT3_TX 2
+#define V_PERR_MAC_STAT3_TX(x) ((x) << S_PERR_MAC_STAT3_TX)
+#define F_PERR_MAC_STAT3_TX V_PERR_MAC_STAT3_TX(1U)
+
+#define S_PERR_MAC_STAT4_TX 1
+#define V_PERR_MAC_STAT4_TX(x) ((x) << S_PERR_MAC_STAT4_TX)
+#define F_PERR_MAC_STAT4_TX V_PERR_MAC_STAT4_TX(1U)
+
+#define S_PERR_MAC_STAT5_TX 0
+#define V_PERR_MAC_STAT5_TX(x) ((x) << S_PERR_MAC_STAT5_TX)
+#define F_PERR_MAC_STAT5_TX V_PERR_MAC_STAT5_TX(1U)
+
+#define A_MAC_PERR_INT_CAUSE_MTIP 0x380d4
+
+#define S_PERR_MAC_STAT_RX 7
+#define V_PERR_MAC_STAT_RX(x) ((x) << S_PERR_MAC_STAT_RX)
+#define F_PERR_MAC_STAT_RX V_PERR_MAC_STAT_RX(1U)
+
+#define S_PERR_MAC_STAT_TX 3
+#define V_PERR_MAC_STAT_TX(x) ((x) << S_PERR_MAC_STAT_TX)
+#define F_PERR_MAC_STAT_TX V_PERR_MAC_STAT_TX(1U)
+
+#define S_PERR_MAC_STAT_CAP 2
+#define V_PERR_MAC_STAT_CAP(x) ((x) << S_PERR_MAC_STAT_CAP)
+#define F_PERR_MAC_STAT_CAP V_PERR_MAC_STAT_CAP(1U)
+
+#define A_MAC_PERR_ENABLE_MTIP 0x380d8
+#define A_MAC_PCS_1G_CONFIG_0 0x380dc
+
+#define S_SEQ_ENA_3 19
+#define V_SEQ_ENA_3(x) ((x) << S_SEQ_ENA_3)
+#define F_SEQ_ENA_3 V_SEQ_ENA_3(1U)
+
+#define S_SEQ_ENA_2 18
+#define V_SEQ_ENA_2(x) ((x) << S_SEQ_ENA_2)
+#define F_SEQ_ENA_2 V_SEQ_ENA_2(1U)
+
+#define S_SEQ_ENA_1 17
+#define V_SEQ_ENA_1(x) ((x) << S_SEQ_ENA_1)
+#define F_SEQ_ENA_1 V_SEQ_ENA_1(1U)
+
+#define S_SEQ_ENA_0 16
+#define V_SEQ_ENA_0(x) ((x) << S_SEQ_ENA_0)
+#define F_SEQ_ENA_0 V_SEQ_ENA_0(1U)
+
+#define S_TX_LANE_THRESH_3 12
+#define M_TX_LANE_THRESH_3 0xfU
+#define V_TX_LANE_THRESH_3(x) ((x) << S_TX_LANE_THRESH_3)
+#define G_TX_LANE_THRESH_3(x) (((x) >> S_TX_LANE_THRESH_3) & M_TX_LANE_THRESH_3)
+
+#define S_TX_LANE_THRESH_2 8
+#define M_TX_LANE_THRESH_2 0xfU
+#define V_TX_LANE_THRESH_2(x) ((x) << S_TX_LANE_THRESH_2)
+#define G_TX_LANE_THRESH_2(x) (((x) >> S_TX_LANE_THRESH_2) & M_TX_LANE_THRESH_2)
+
+#define S_TX_LANE_THRESH_1 4
+#define M_TX_LANE_THRESH_1 0xfU
+#define V_TX_LANE_THRESH_1(x) ((x) << S_TX_LANE_THRESH_1)
+#define G_TX_LANE_THRESH_1(x) (((x) >> S_TX_LANE_THRESH_1) & M_TX_LANE_THRESH_1)
+
+#define S_TX_LANE_THRESH_0 0
+#define M_TX_LANE_THRESH_0 0xfU
+#define V_TX_LANE_THRESH_0(x) ((x) << S_TX_LANE_THRESH_0)
+#define G_TX_LANE_THRESH_0(x) (((x) >> S_TX_LANE_THRESH_0) & M_TX_LANE_THRESH_0)
+
+#define A_MAC_PCS_1G_CONFIG_1 0x380e0
+
+#define S_TX_LANE_CKMULT_3 9
+#define M_TX_LANE_CKMULT_3 0x7U
+#define V_TX_LANE_CKMULT_3(x) ((x) << S_TX_LANE_CKMULT_3)
+#define G_TX_LANE_CKMULT_3(x) (((x) >> S_TX_LANE_CKMULT_3) & M_TX_LANE_CKMULT_3)
+
+#define S_TX_LANE_CKMULT_2 6
+#define M_TX_LANE_CKMULT_2 0x7U
+#define V_TX_LANE_CKMULT_2(x) ((x) << S_TX_LANE_CKMULT_2)
+#define G_TX_LANE_CKMULT_2(x) (((x) >> S_TX_LANE_CKMULT_2) & M_TX_LANE_CKMULT_2)
+
+#define S_TX_LANE_CKMULT_1 3
+#define M_TX_LANE_CKMULT_1 0x7U
+#define V_TX_LANE_CKMULT_1(x) ((x) << S_TX_LANE_CKMULT_1)
+#define G_TX_LANE_CKMULT_1(x) (((x) >> S_TX_LANE_CKMULT_1) & M_TX_LANE_CKMULT_1)
+
+#define S_TX_LANE_CKMULT_0 0
+#define M_TX_LANE_CKMULT_0 0x7U
+#define V_TX_LANE_CKMULT_0(x) ((x) << S_TX_LANE_CKMULT_0)
+#define G_TX_LANE_CKMULT_0(x) (((x) >> S_TX_LANE_CKMULT_0) & M_TX_LANE_CKMULT_0)
+
+#define A_MAC_PTP_TIMER_RD0_LO 0x380e4
+#define A_MAC_PTP_TIMER_RD0_HI 0x380e8
+#define A_MAC_PTP_TIMER_RD1_LO 0x380ec
+#define A_MAC_PTP_TIMER_RD1_HI 0x380f0
+#define A_MAC_PTP_TIMER_WR_LO 0x380f4
+#define A_MAC_PTP_TIMER_WR_HI 0x380f8
+#define A_MAC_PTP_TIMER_OFFSET_0 0x380fc
+#define A_MAC_PTP_TIMER_OFFSET_1 0x38100
+#define A_MAC_PTP_TIMER_OFFSET_2 0x38104
+#define A_MAC_PTP_SUM_LO 0x38108
+#define A_MAC_PTP_SUM_HI 0x3810c
+#define A_MAC_PTP_TIMER_INCR0 0x38110
+#define A_MAC_PTP_TIMER_INCR1 0x38114
+#define A_MAC_PTP_DRIFT_ADJUST_COUNT 0x38118
+#define A_MAC_PTP_OFFSET_ADJUST_FINE 0x3811c
+#define A_MAC_PTP_OFFSET_ADJUST_TOTAL 0x38120
+#define A_MAC_PTP_CFG 0x38124
+#define A_MAC_PTP_PPS 0x38128
+#define A_MAC_PTP_SINGLE_ALARM 0x3812c
+#define A_MAC_PTP_PERIODIC_ALARM 0x38130
+#define A_MAC_PTP_STATUS 0x38134
+#define A_MAC_STS_GPIO_SEL 0x38140
+
+#define S_STSOUTSEL 1
+#define V_STSOUTSEL(x) ((x) << S_STSOUTSEL)
+#define F_STSOUTSEL V_STSOUTSEL(1U)
+
+#define S_STSINSEL 0
+#define V_STSINSEL(x) ((x) << S_STSINSEL)
+#define F_STSINSEL V_STSINSEL(1U)
+
+#define A_MAC_CERR_INT_EN_MTIP 0x38150
+
+#define S_CERR_MAC0_TX 11
+#define V_CERR_MAC0_TX(x) ((x) << S_CERR_MAC0_TX)
+#define F_CERR_MAC0_TX V_CERR_MAC0_TX(1U)
+
+#define S_CERR_MAC1_TX 10
+#define V_CERR_MAC1_TX(x) ((x) << S_CERR_MAC1_TX)
+#define F_CERR_MAC1_TX V_CERR_MAC1_TX(1U)
+
+#define S_CERR_MAC2_TX 9
+#define V_CERR_MAC2_TX(x) ((x) << S_CERR_MAC2_TX)
+#define F_CERR_MAC2_TX V_CERR_MAC2_TX(1U)
+
+#define S_CERR_MAC3_TX 8
+#define V_CERR_MAC3_TX(x) ((x) << S_CERR_MAC3_TX)
+#define F_CERR_MAC3_TX V_CERR_MAC3_TX(1U)
+
+#define S_CERR_MAC4_TX 7
+#define V_CERR_MAC4_TX(x) ((x) << S_CERR_MAC4_TX)
+#define F_CERR_MAC4_TX V_CERR_MAC4_TX(1U)
+
+#define S_CERR_MAC5_TX 6
+#define V_CERR_MAC5_TX(x) ((x) << S_CERR_MAC5_TX)
+#define F_CERR_MAC5_TX V_CERR_MAC5_TX(1U)
+
+#define S_CERR_MAC0_RX 5
+#define V_CERR_MAC0_RX(x) ((x) << S_CERR_MAC0_RX)
+#define F_CERR_MAC0_RX V_CERR_MAC0_RX(1U)
+
+#define S_CERR_MAC1_RX 4
+#define V_CERR_MAC1_RX(x) ((x) << S_CERR_MAC1_RX)
+#define F_CERR_MAC1_RX V_CERR_MAC1_RX(1U)
+
+#define S_CERR_MAC2_RX 3
+#define V_CERR_MAC2_RX(x) ((x) << S_CERR_MAC2_RX)
+#define F_CERR_MAC2_RX V_CERR_MAC2_RX(1U)
+
+#define S_CERR_MAC3_RX 2
+#define V_CERR_MAC3_RX(x) ((x) << S_CERR_MAC3_RX)
+#define F_CERR_MAC3_RX V_CERR_MAC3_RX(1U)
+
+#define S_CERR_MAC4_RX 1
+#define V_CERR_MAC4_RX(x) ((x) << S_CERR_MAC4_RX)
+#define F_CERR_MAC4_RX V_CERR_MAC4_RX(1U)
+
+#define S_CERR_MAC5_RX 0
+#define V_CERR_MAC5_RX(x) ((x) << S_CERR_MAC5_RX)
+#define F_CERR_MAC5_RX V_CERR_MAC5_RX(1U)
+
+#define A_MAC_CERR_INT_CAUSE_MTIP 0x38154
+#define A_MAC_1G_PCS0_STATUS 0x38160
+
+#define S_1G_PCS0_LOOPBACK 12
+#define V_1G_PCS0_LOOPBACK(x) ((x) << S_1G_PCS0_LOOPBACK)
+#define F_1G_PCS0_LOOPBACK V_1G_PCS0_LOOPBACK(1U)
+
+#define S_1G_PCS0_LINK_STATUS 11
+#define V_1G_PCS0_LINK_STATUS(x) ((x) << S_1G_PCS0_LINK_STATUS)
+#define F_1G_PCS0_LINK_STATUS V_1G_PCS0_LINK_STATUS(1U)
+
+#define S_1G_PCS0_RX_SYNC 10
+#define V_1G_PCS0_RX_SYNC(x) ((x) << S_1G_PCS0_RX_SYNC)
+#define F_1G_PCS0_RX_SYNC V_1G_PCS0_RX_SYNC(1U)
+
+#define S_1G_PCS0_AN_DONE 9
+#define V_1G_PCS0_AN_DONE(x) ((x) << S_1G_PCS0_AN_DONE)
+#define F_1G_PCS0_AN_DONE V_1G_PCS0_AN_DONE(1U)
+
+#define S_1G_PCS0_PGRCVD 8
+#define V_1G_PCS0_PGRCVD(x) ((x) << S_1G_PCS0_PGRCVD)
+#define F_1G_PCS0_PGRCVD V_1G_PCS0_PGRCVD(1U)
+
+#define S_1G_PCS0_SPEED_SEL 6
+#define M_1G_PCS0_SPEED_SEL 0x3U
+#define V_1G_PCS0_SPEED_SEL(x) ((x) << S_1G_PCS0_SPEED_SEL)
+#define G_1G_PCS0_SPEED_SEL(x) (((x) >> S_1G_PCS0_SPEED_SEL) & M_1G_PCS0_SPEED_SEL)
+
+#define S_1G_PCS0_HALF_DUPLEX 5
+#define V_1G_PCS0_HALF_DUPLEX(x) ((x) << S_1G_PCS0_HALF_DUPLEX)
+#define F_1G_PCS0_HALF_DUPLEX V_1G_PCS0_HALF_DUPLEX(1U)
+
+#define S_1G_PCS0_TX_MODE_QUIET 4
+#define V_1G_PCS0_TX_MODE_QUIET(x) ((x) << S_1G_PCS0_TX_MODE_QUIET)
+#define F_1G_PCS0_TX_MODE_QUIET V_1G_PCS0_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS0_TX_LPI_ACTIVE 3
+#define V_1G_PCS0_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS0_TX_LPI_ACTIVE)
+#define F_1G_PCS0_TX_LPI_ACTIVE V_1G_PCS0_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS0_RX_MODE_QUIET 2
+#define V_1G_PCS0_RX_MODE_QUIET(x) ((x) << S_1G_PCS0_RX_MODE_QUIET)
+#define F_1G_PCS0_RX_MODE_QUIET V_1G_PCS0_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS0_RX_LPI_ACTIVE 1
+#define V_1G_PCS0_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS0_RX_LPI_ACTIVE)
+#define F_1G_PCS0_RX_LPI_ACTIVE V_1G_PCS0_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS0_RX_WAKE_ERR 0
+#define V_1G_PCS0_RX_WAKE_ERR(x) ((x) << S_1G_PCS0_RX_WAKE_ERR)
+#define F_1G_PCS0_RX_WAKE_ERR V_1G_PCS0_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS1_STATUS 0x38164
+
+#define S_1G_PCS1_LOOPBACK 12
+#define V_1G_PCS1_LOOPBACK(x) ((x) << S_1G_PCS1_LOOPBACK)
+#define F_1G_PCS1_LOOPBACK V_1G_PCS1_LOOPBACK(1U)
+
+#define S_1G_PCS1_LINK_STATUS 11
+#define V_1G_PCS1_LINK_STATUS(x) ((x) << S_1G_PCS1_LINK_STATUS)
+#define F_1G_PCS1_LINK_STATUS V_1G_PCS1_LINK_STATUS(1U)
+
+#define S_1G_PCS1_RX_SYNC 10
+#define V_1G_PCS1_RX_SYNC(x) ((x) << S_1G_PCS1_RX_SYNC)
+#define F_1G_PCS1_RX_SYNC V_1G_PCS1_RX_SYNC(1U)
+
+#define S_1G_PCS1_AN_DONE 9
+#define V_1G_PCS1_AN_DONE(x) ((x) << S_1G_PCS1_AN_DONE)
+#define F_1G_PCS1_AN_DONE V_1G_PCS1_AN_DONE(1U)
+
+#define S_1G_PCS1_PGRCVD 8
+#define V_1G_PCS1_PGRCVD(x) ((x) << S_1G_PCS1_PGRCVD)
+#define F_1G_PCS1_PGRCVD V_1G_PCS1_PGRCVD(1U)
+
+#define S_1G_PCS1_SPEED_SEL 6
+#define M_1G_PCS1_SPEED_SEL 0x3U
+#define V_1G_PCS1_SPEED_SEL(x) ((x) << S_1G_PCS1_SPEED_SEL)
+#define G_1G_PCS1_SPEED_SEL(x) (((x) >> S_1G_PCS1_SPEED_SEL) & M_1G_PCS1_SPEED_SEL)
+
+#define S_1G_PCS1_HALF_DUPLEX 5
+#define V_1G_PCS1_HALF_DUPLEX(x) ((x) << S_1G_PCS1_HALF_DUPLEX)
+#define F_1G_PCS1_HALF_DUPLEX V_1G_PCS1_HALF_DUPLEX(1U)
+
+#define S_1G_PCS1_TX_MODE_QUIET 4
+#define V_1G_PCS1_TX_MODE_QUIET(x) ((x) << S_1G_PCS1_TX_MODE_QUIET)
+#define F_1G_PCS1_TX_MODE_QUIET V_1G_PCS1_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS1_TX_LPI_ACTIVE 3
+#define V_1G_PCS1_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS1_TX_LPI_ACTIVE)
+#define F_1G_PCS1_TX_LPI_ACTIVE V_1G_PCS1_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS1_RX_MODE_QUIET 2
+#define V_1G_PCS1_RX_MODE_QUIET(x) ((x) << S_1G_PCS1_RX_MODE_QUIET)
+#define F_1G_PCS1_RX_MODE_QUIET V_1G_PCS1_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS1_RX_LPI_ACTIVE 1
+#define V_1G_PCS1_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS1_RX_LPI_ACTIVE)
+#define F_1G_PCS1_RX_LPI_ACTIVE V_1G_PCS1_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS1_RX_WAKE_ERR 0
+#define V_1G_PCS1_RX_WAKE_ERR(x) ((x) << S_1G_PCS1_RX_WAKE_ERR)
+#define F_1G_PCS1_RX_WAKE_ERR V_1G_PCS1_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS2_STATUS 0x38168
+
+#define S_1G_PCS2_LOOPBACK 12
+#define V_1G_PCS2_LOOPBACK(x) ((x) << S_1G_PCS2_LOOPBACK)
+#define F_1G_PCS2_LOOPBACK V_1G_PCS2_LOOPBACK(1U)
+
+#define S_1G_PCS2_LINK_STATUS 11
+#define V_1G_PCS2_LINK_STATUS(x) ((x) << S_1G_PCS2_LINK_STATUS)
+#define F_1G_PCS2_LINK_STATUS V_1G_PCS2_LINK_STATUS(1U)
+
+#define S_1G_PCS2_RX_SYNC 10
+#define V_1G_PCS2_RX_SYNC(x) ((x) << S_1G_PCS2_RX_SYNC)
+#define F_1G_PCS2_RX_SYNC V_1G_PCS2_RX_SYNC(1U)
+
+#define S_1G_PCS2_AN_DONE 9
+#define V_1G_PCS2_AN_DONE(x) ((x) << S_1G_PCS2_AN_DONE)
+#define F_1G_PCS2_AN_DONE V_1G_PCS2_AN_DONE(1U)
+
+#define S_1G_PCS2_PGRCVD 8
+#define V_1G_PCS2_PGRCVD(x) ((x) << S_1G_PCS2_PGRCVD)
+#define F_1G_PCS2_PGRCVD V_1G_PCS2_PGRCVD(1U)
+
+#define S_1G_PCS2_SPEED_SEL 6
+#define M_1G_PCS2_SPEED_SEL 0x3U
+#define V_1G_PCS2_SPEED_SEL(x) ((x) << S_1G_PCS2_SPEED_SEL)
+#define G_1G_PCS2_SPEED_SEL(x) (((x) >> S_1G_PCS2_SPEED_SEL) & M_1G_PCS2_SPEED_SEL)
+
+#define S_1G_PCS2_HALF_DUPLEX 5
+#define V_1G_PCS2_HALF_DUPLEX(x) ((x) << S_1G_PCS2_HALF_DUPLEX)
+#define F_1G_PCS2_HALF_DUPLEX V_1G_PCS2_HALF_DUPLEX(1U)
+
+#define S_1G_PCS2_TX_MODE_QUIET 4
+#define V_1G_PCS2_TX_MODE_QUIET(x) ((x) << S_1G_PCS2_TX_MODE_QUIET)
+#define F_1G_PCS2_TX_MODE_QUIET V_1G_PCS2_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS2_TX_LPI_ACTIVE 3
+#define V_1G_PCS2_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS2_TX_LPI_ACTIVE)
+#define F_1G_PCS2_TX_LPI_ACTIVE V_1G_PCS2_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS2_RX_MODE_QUIET 2
+#define V_1G_PCS2_RX_MODE_QUIET(x) ((x) << S_1G_PCS2_RX_MODE_QUIET)
+#define F_1G_PCS2_RX_MODE_QUIET V_1G_PCS2_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS2_RX_LPI_ACTIVE 1
+#define V_1G_PCS2_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS2_RX_LPI_ACTIVE)
+#define F_1G_PCS2_RX_LPI_ACTIVE V_1G_PCS2_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS2_RX_WAKE_ERR 0
+#define V_1G_PCS2_RX_WAKE_ERR(x) ((x) << S_1G_PCS2_RX_WAKE_ERR)
+#define F_1G_PCS2_RX_WAKE_ERR V_1G_PCS2_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS3_STATUS 0x3816c
+
+#define S_1G_PCS3_LOOPBACK 12
+#define V_1G_PCS3_LOOPBACK(x) ((x) << S_1G_PCS3_LOOPBACK)
+#define F_1G_PCS3_LOOPBACK V_1G_PCS3_LOOPBACK(1U)
+
+#define S_1G_PCS3_LINK_STATUS 11
+#define V_1G_PCS3_LINK_STATUS(x) ((x) << S_1G_PCS3_LINK_STATUS)
+#define F_1G_PCS3_LINK_STATUS V_1G_PCS3_LINK_STATUS(1U)
+
+#define S_1G_PCS3_RX_SYNC 10
+#define V_1G_PCS3_RX_SYNC(x) ((x) << S_1G_PCS3_RX_SYNC)
+#define F_1G_PCS3_RX_SYNC V_1G_PCS3_RX_SYNC(1U)
+
+#define S_1G_PCS3_AN_DONE 9
+#define V_1G_PCS3_AN_DONE(x) ((x) << S_1G_PCS3_AN_DONE)
+#define F_1G_PCS3_AN_DONE V_1G_PCS3_AN_DONE(1U)
+
+#define S_1G_PCS3_PGRCVD 8
+#define V_1G_PCS3_PGRCVD(x) ((x) << S_1G_PCS3_PGRCVD)
+#define F_1G_PCS3_PGRCVD V_1G_PCS3_PGRCVD(1U)
+
+#define S_1G_PCS3_SPEED_SEL 6
+#define M_1G_PCS3_SPEED_SEL 0x3U
+#define V_1G_PCS3_SPEED_SEL(x) ((x) << S_1G_PCS3_SPEED_SEL)
+#define G_1G_PCS3_SPEED_SEL(x) (((x) >> S_1G_PCS3_SPEED_SEL) & M_1G_PCS3_SPEED_SEL)
+
+#define S_1G_PCS3_HALF_DUPLEX 5
+#define V_1G_PCS3_HALF_DUPLEX(x) ((x) << S_1G_PCS3_HALF_DUPLEX)
+#define F_1G_PCS3_HALF_DUPLEX V_1G_PCS3_HALF_DUPLEX(1U)
+
+#define S_1G_PCS3_TX_MODE_QUIET 4
+#define V_1G_PCS3_TX_MODE_QUIET(x) ((x) << S_1G_PCS3_TX_MODE_QUIET)
+#define F_1G_PCS3_TX_MODE_QUIET V_1G_PCS3_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS3_TX_LPI_ACTIVE 3
+#define V_1G_PCS3_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS3_TX_LPI_ACTIVE)
+#define F_1G_PCS3_TX_LPI_ACTIVE V_1G_PCS3_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS3_RX_MODE_QUIET 2
+#define V_1G_PCS3_RX_MODE_QUIET(x) ((x) << S_1G_PCS3_RX_MODE_QUIET)
+#define F_1G_PCS3_RX_MODE_QUIET V_1G_PCS3_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS3_RX_LPI_ACTIVE 1
+#define V_1G_PCS3_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS3_RX_LPI_ACTIVE)
+#define F_1G_PCS3_RX_LPI_ACTIVE V_1G_PCS3_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS3_RX_WAKE_ERR 0
+#define V_1G_PCS3_RX_WAKE_ERR(x) ((x) << S_1G_PCS3_RX_WAKE_ERR)
+#define F_1G_PCS3_RX_WAKE_ERR V_1G_PCS3_RX_WAKE_ERR(1U)
+
+#define A_MAC_PCS_LPI_STATUS_0 0x38170
+
+#define S_TX_LPI_STATE 0
+#define M_TX_LPI_STATE 0xffffffU
+#define V_TX_LPI_STATE(x) ((x) << S_TX_LPI_STATE)
+#define G_TX_LPI_STATE(x) (((x) >> S_TX_LPI_STATE) & M_TX_LPI_STATE)
+
+#define A_MAC_PCS_LPI_STATUS_1 0x38174
+
+#define S_TX_LPI_MODE 0
+#define M_TX_LPI_MODE 0xffffU
+#define V_TX_LPI_MODE(x) ((x) << S_TX_LPI_MODE)
+#define G_TX_LPI_MODE(x) (((x) >> S_TX_LPI_MODE) & M_TX_LPI_MODE)
+
+#define A_MAC_PCS_LPI_STATUS_2 0x38178
+
+#define S_RX_LPI_MODE 24
+#define M_RX_LPI_MODE 0xffU
+#define V_RX_LPI_MODE(x) ((x) << S_RX_LPI_MODE)
+#define G_RX_LPI_MODE(x) (((x) >> S_RX_LPI_MODE) & M_RX_LPI_MODE)
+
+#define S_RX_LPI_STATE 0
+#define M_RX_LPI_STATE 0xffffffU
+#define V_RX_LPI_STATE(x) ((x) << S_RX_LPI_STATE)
+#define G_RX_LPI_STATE(x) (((x) >> S_RX_LPI_STATE) & M_RX_LPI_STATE)
+
+#define A_MAC_PCS_LPI_STATUS_3 0x3817c
+
+#define S_T7_RX_LPI_ACTIVE 0
+#define M_T7_RX_LPI_ACTIVE 0xffU
+#define V_T7_RX_LPI_ACTIVE(x) ((x) << S_T7_RX_LPI_ACTIVE)
+#define G_T7_RX_LPI_ACTIVE(x) (((x) >> S_T7_RX_LPI_ACTIVE) & M_T7_RX_LPI_ACTIVE)
+
+#define A_MAC_TX0_CLK_DIV 0x38180
+#define A_MAC_TX1_CLK_DIV 0x38184
+#define A_MAC_TX2_CLK_DIV 0x38188
+#define A_MAC_TX3_CLK_DIV 0x3818c
+#define A_MAC_TX4_CLK_DIV 0x38190
+#define A_MAC_TX5_CLK_DIV 0x38194
+#define A_MAC_TX6_CLK_DIV 0x38198
+#define A_MAC_TX7_CLK_DIV 0x3819c
+#define A_MAC_RX0_CLK_DIV 0x381a0
+#define A_MAC_RX1_CLK_DIV 0x381a4
+#define A_MAC_RX2_CLK_DIV 0x381a8
+#define A_MAC_RX3_CLK_DIV 0x381ac
+#define A_MAC_RX4_CLK_DIV 0x381b0
+#define A_MAC_RX5_CLK_DIV 0x381b4
+#define A_MAC_RX6_CLK_DIV 0x381b8
+#define A_MAC_RX7_CLK_DIV 0x381bc
+#define A_MAC_SYNC_E_CDR_LANE_SEL 0x381c0
+
+#define S_CML_MUX_SEL 11
+#define V_CML_MUX_SEL(x) ((x) << S_CML_MUX_SEL)
+#define F_CML_MUX_SEL V_CML_MUX_SEL(1U)
+
+#define S_CMOS_OUT_EN 10
+#define V_CMOS_OUT_EN(x) ((x) << S_CMOS_OUT_EN)
+#define F_CMOS_OUT_EN V_CMOS_OUT_EN(1U)
+
+#define S_CML_OUT_EN 9
+#define V_CML_OUT_EN(x) ((x) << S_CML_OUT_EN)
+#define F_CML_OUT_EN V_CML_OUT_EN(1U)
+
+#define S_LOC_FAULT_PORT_SEL 6
+#define M_LOC_FAULT_PORT_SEL 0x3U
+#define V_LOC_FAULT_PORT_SEL(x) ((x) << S_LOC_FAULT_PORT_SEL)
+#define G_LOC_FAULT_PORT_SEL(x) (((x) >> S_LOC_FAULT_PORT_SEL) & M_LOC_FAULT_PORT_SEL)
+
+#define S_TX_CDR_LANE_SEL 3
+#define M_TX_CDR_LANE_SEL 0x7U
+#define V_TX_CDR_LANE_SEL(x) ((x) << S_TX_CDR_LANE_SEL)
+#define G_TX_CDR_LANE_SEL(x) (((x) >> S_TX_CDR_LANE_SEL) & M_TX_CDR_LANE_SEL)
+
+#define S_RX_CDR_LANE_SEL 0
+#define M_RX_CDR_LANE_SEL 0x7U
+#define V_RX_CDR_LANE_SEL(x) ((x) << S_RX_CDR_LANE_SEL)
+#define G_RX_CDR_LANE_SEL(x) (((x) >> S_RX_CDR_LANE_SEL) & M_RX_CDR_LANE_SEL)
+
+#define A_MAC_DEBUG_PL_IF_1 0x381c4
+#define A_MAC_SIGNAL_DETECT_CTRL 0x381f0
+
+#define S_SIGNAL_DET_LN7 15
+#define V_SIGNAL_DET_LN7(x) ((x) << S_SIGNAL_DET_LN7)
+#define F_SIGNAL_DET_LN7 V_SIGNAL_DET_LN7(1U)
+
+#define S_SIGNAL_DET_LN6 14
+#define V_SIGNAL_DET_LN6(x) ((x) << S_SIGNAL_DET_LN6)
+#define F_SIGNAL_DET_LN6 V_SIGNAL_DET_LN6(1U)
+
+#define S_SIGNAL_DET_LN5 13
+#define V_SIGNAL_DET_LN5(x) ((x) << S_SIGNAL_DET_LN5)
+#define F_SIGNAL_DET_LN5 V_SIGNAL_DET_LN5(1U)
+
+#define S_SIGNAL_DET_LN4 12
+#define V_SIGNAL_DET_LN4(x) ((x) << S_SIGNAL_DET_LN4)
+#define F_SIGNAL_DET_LN4 V_SIGNAL_DET_LN4(1U)
+
+#define S_SIGNAL_DET_LN3 11
+#define V_SIGNAL_DET_LN3(x) ((x) << S_SIGNAL_DET_LN3)
+#define F_SIGNAL_DET_LN3 V_SIGNAL_DET_LN3(1U)
+
+#define S_SIGNAL_DET_LN2 10
+#define V_SIGNAL_DET_LN2(x) ((x) << S_SIGNAL_DET_LN2)
+#define F_SIGNAL_DET_LN2 V_SIGNAL_DET_LN2(1U)
+
+#define S_SIGNAL_DET_LN1 9
+#define V_SIGNAL_DET_LN1(x) ((x) << S_SIGNAL_DET_LN1)
+#define F_SIGNAL_DET_LN1 V_SIGNAL_DET_LN1(1U)
+
+#define S_SIGNAL_DET_LN0 8
+#define V_SIGNAL_DET_LN0(x) ((x) << S_SIGNAL_DET_LN0)
+#define F_SIGNAL_DET_LN0 V_SIGNAL_DET_LN0(1U)
+
+#define S_SIGDETCTRL_LN7 7
+#define V_SIGDETCTRL_LN7(x) ((x) << S_SIGDETCTRL_LN7)
+#define F_SIGDETCTRL_LN7 V_SIGDETCTRL_LN7(1U)
+
+#define S_SIGDETCTRL_LN6 6
+#define V_SIGDETCTRL_LN6(x) ((x) << S_SIGDETCTRL_LN6)
+#define F_SIGDETCTRL_LN6 V_SIGDETCTRL_LN6(1U)
+
+#define S_SIGDETCTRL_LN5 5
+#define V_SIGDETCTRL_LN5(x) ((x) << S_SIGDETCTRL_LN5)
+#define F_SIGDETCTRL_LN5 V_SIGDETCTRL_LN5(1U)
+
+#define S_SIGDETCTRL_LN4 4
+#define V_SIGDETCTRL_LN4(x) ((x) << S_SIGDETCTRL_LN4)
+#define F_SIGDETCTRL_LN4 V_SIGDETCTRL_LN4(1U)
+
+#define S_SIGDETCTRL_LN3 3
+#define V_SIGDETCTRL_LN3(x) ((x) << S_SIGDETCTRL_LN3)
+#define F_SIGDETCTRL_LN3 V_SIGDETCTRL_LN3(1U)
+
+#define S_SIGDETCTRL_LN2 2
+#define V_SIGDETCTRL_LN2(x) ((x) << S_SIGDETCTRL_LN2)
+#define F_SIGDETCTRL_LN2 V_SIGDETCTRL_LN2(1U)
+
+#define S_SIGDETCTRL_LN1 1
+#define V_SIGDETCTRL_LN1(x) ((x) << S_SIGDETCTRL_LN1)
+#define F_SIGDETCTRL_LN1 V_SIGDETCTRL_LN1(1U)
+
+#define S_SIGDETCTRL_LN0 0
+#define V_SIGDETCTRL_LN0(x) ((x) << S_SIGDETCTRL_LN0)
+#define F_SIGDETCTRL_LN0 V_SIGDETCTRL_LN0(1U)
+
+#define A_MAC_FPGA_STATUS_FRM_BOARD 0x381f4
+
+#define S_SFP3_RX_LOS 15
+#define V_SFP3_RX_LOS(x) ((x) << S_SFP3_RX_LOS)
+#define F_SFP3_RX_LOS V_SFP3_RX_LOS(1U)
+
+#define S_SFP3_TX_FAULT 14
+#define V_SFP3_TX_FAULT(x) ((x) << S_SFP3_TX_FAULT)
+#define F_SFP3_TX_FAULT V_SFP3_TX_FAULT(1U)
+
+#define S_SFP3_MOD_PRES 13
+#define V_SFP3_MOD_PRES(x) ((x) << S_SFP3_MOD_PRES)
+#define F_SFP3_MOD_PRES V_SFP3_MOD_PRES(1U)
+
+#define S_SFP2_RX_LOS 12
+#define V_SFP2_RX_LOS(x) ((x) << S_SFP2_RX_LOS)
+#define F_SFP2_RX_LOS V_SFP2_RX_LOS(1U)
+
+#define S_SFP2_TX_FAULT 11
+#define V_SFP2_TX_FAULT(x) ((x) << S_SFP2_TX_FAULT)
+#define F_SFP2_TX_FAULT V_SFP2_TX_FAULT(1U)
+
+#define S_SFP2_MOD_PRES 10
+#define V_SFP2_MOD_PRES(x) ((x) << S_SFP2_MOD_PRES)
+#define F_SFP2_MOD_PRES V_SFP2_MOD_PRES(1U)
+
+#define S_SFP1_RX_LOS 9
+#define V_SFP1_RX_LOS(x) ((x) << S_SFP1_RX_LOS)
+#define F_SFP1_RX_LOS V_SFP1_RX_LOS(1U)
+
+#define S_SFP1_TX_FAULT 8
+#define V_SFP1_TX_FAULT(x) ((x) << S_SFP1_TX_FAULT)
+#define F_SFP1_TX_FAULT V_SFP1_TX_FAULT(1U)
+
+#define S_SFP1_MOD_PRES 7
+#define V_SFP1_MOD_PRES(x) ((x) << S_SFP1_MOD_PRES)
+#define F_SFP1_MOD_PRES V_SFP1_MOD_PRES(1U)
+
+#define S_SFP0_RX_LOS 6
+#define V_SFP0_RX_LOS(x) ((x) << S_SFP0_RX_LOS)
+#define F_SFP0_RX_LOS V_SFP0_RX_LOS(1U)
+
+#define S_SFP0_TX_FAULT 5
+#define V_SFP0_TX_FAULT(x) ((x) << S_SFP0_TX_FAULT)
+#define F_SFP0_TX_FAULT V_SFP0_TX_FAULT(1U)
+
+#define S_SFP0_MOD_PRES 4
+#define V_SFP0_MOD_PRES(x) ((x) << S_SFP0_MOD_PRES)
+#define F_SFP0_MOD_PRES V_SFP0_MOD_PRES(1U)
+
+#define S_QSFP1_INT_L 3
+#define V_QSFP1_INT_L(x) ((x) << S_QSFP1_INT_L)
+#define F_QSFP1_INT_L V_QSFP1_INT_L(1U)
+
+#define S_QSFP1_MOD_PRES 2
+#define V_QSFP1_MOD_PRES(x) ((x) << S_QSFP1_MOD_PRES)
+#define F_QSFP1_MOD_PRES V_QSFP1_MOD_PRES(1U)
+
+#define S_QSFP0_INT_L 1
+#define V_QSFP0_INT_L(x) ((x) << S_QSFP0_INT_L)
+#define F_QSFP0_INT_L V_QSFP0_INT_L(1U)
+
+#define S_QSFP0_MOD_PRES 0
+#define V_QSFP0_MOD_PRES(x) ((x) << S_QSFP0_MOD_PRES)
+#define F_QSFP0_MOD_PRES V_QSFP0_MOD_PRES(1U)
+
+#define A_MAC_FPGA_CONTROL_TO_BOARD 0x381f8
+
+#define S_T7_1_LB_MODE 10
+#define M_T7_1_LB_MODE 0x3U
+#define V_T7_1_LB_MODE(x) ((x) << S_T7_1_LB_MODE)
+#define G_T7_1_LB_MODE(x) (((x) >> S_T7_1_LB_MODE) & M_T7_1_LB_MODE)
+
+#define S_SFP3_TX_DISABLE 9
+#define V_SFP3_TX_DISABLE(x) ((x) << S_SFP3_TX_DISABLE)
+#define F_SFP3_TX_DISABLE V_SFP3_TX_DISABLE(1U)
+
+#define S_SFP2_TX_DISABLE 8
+#define V_SFP2_TX_DISABLE(x) ((x) << S_SFP2_TX_DISABLE)
+#define F_SFP2_TX_DISABLE V_SFP2_TX_DISABLE(1U)
+
+#define S_SFP1_TX_DISABLE 7
+#define V_SFP1_TX_DISABLE(x) ((x) << S_SFP1_TX_DISABLE)
+#define F_SFP1_TX_DISABLE V_SFP1_TX_DISABLE(1U)
+
+#define S_SFP0_TX_DISABLE 6
+#define V_SFP0_TX_DISABLE(x) ((x) << S_SFP0_TX_DISABLE)
+#define F_SFP0_TX_DISABLE V_SFP0_TX_DISABLE(1U)
+
+#define S_QSFP1_LPMODE 5
+#define V_QSFP1_LPMODE(x) ((x) << S_QSFP1_LPMODE)
+#define F_QSFP1_LPMODE V_QSFP1_LPMODE(1U)
+
+#define S_QSFP1_MODSEL_L 4
+#define V_QSFP1_MODSEL_L(x) ((x) << S_QSFP1_MODSEL_L)
+#define F_QSFP1_MODSEL_L V_QSFP1_MODSEL_L(1U)
+
+#define S_QSFP1_RESET_L 3
+#define V_QSFP1_RESET_L(x) ((x) << S_QSFP1_RESET_L)
+#define F_QSFP1_RESET_L V_QSFP1_RESET_L(1U)
+
+#define S_QSFP0_LPMODE 2
+#define V_QSFP0_LPMODE(x) ((x) << S_QSFP0_LPMODE)
+#define F_QSFP0_LPMODE V_QSFP0_LPMODE(1U)
+
+#define S_QSFP0_MODSEL_L 1
+#define V_QSFP0_MODSEL_L(x) ((x) << S_QSFP0_MODSEL_L)
+#define F_QSFP0_MODSEL_L V_QSFP0_MODSEL_L(1U)
+
+#define S_QSFP0_RESET_L 0
+#define V_QSFP0_RESET_L(x) ((x) << S_QSFP0_RESET_L)
+#define F_QSFP0_RESET_L V_QSFP0_RESET_L(1U)
+
+#define A_MAC_FPGA_LINK_STATUS 0x381fc
+
+#define S_PORT3_FPGA_LINK_UP 3
+#define V_PORT3_FPGA_LINK_UP(x) ((x) << S_PORT3_FPGA_LINK_UP)
+#define F_PORT3_FPGA_LINK_UP V_PORT3_FPGA_LINK_UP(1U)
+
+#define S_PORT2_FPGA_LINK_UP 2
+#define V_PORT2_FPGA_LINK_UP(x) ((x) << S_PORT2_FPGA_LINK_UP)
+#define F_PORT2_FPGA_LINK_UP V_PORT2_FPGA_LINK_UP(1U)
+
+#define S_PORT1_FPGA_LINK_UP 1
+#define V_PORT1_FPGA_LINK_UP(x) ((x) << S_PORT1_FPGA_LINK_UP)
+#define F_PORT1_FPGA_LINK_UP V_PORT1_FPGA_LINK_UP(1U)
+
+#define S_PORT0_FPGA_LINK_UP 0
+#define V_PORT0_FPGA_LINK_UP(x) ((x) << S_PORT0_FPGA_LINK_UP)
+#define F_PORT0_FPGA_LINK_UP V_PORT0_FPGA_LINK_UP(1U)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_REVISION 0x38200
+
+#define S_MTIP_REV_400G_0 0
+#define M_MTIP_REV_400G_0 0xffU
+#define V_MTIP_REV_400G_0(x) ((x) << S_MTIP_REV_400G_0)
+#define G_MTIP_REV_400G_0(x) (((x) >> S_MTIP_REV_400G_0) & M_MTIP_REV_400G_0)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_SCRATCH 0x38204
+#define A_MAC_MTIP_MAC400G_0_MTIP_COMMAND_CONFIG 0x38208
+
+#define S_INV_LOOP 31
+#define V_INV_LOOP(x) ((x) << S_INV_LOOP)
+#define F_INV_LOOP V_INV_LOOP(1U)
+
+#define S_TX_FLUSH_ENABLE_400G_0 22
+#define V_TX_FLUSH_ENABLE_400G_0(x) ((x) << S_TX_FLUSH_ENABLE_400G_0)
+#define F_TX_FLUSH_ENABLE_400G_0 V_TX_FLUSH_ENABLE_400G_0(1U)
+
+#define S_PHY_LOOPBACK_EN_400G 10
+#define V_PHY_LOOPBACK_EN_400G(x) ((x) << S_PHY_LOOPBACK_EN_400G)
+#define F_PHY_LOOPBACK_EN_400G V_PHY_LOOPBACK_EN_400G(1U)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_0 0x3820c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_1 0x38210
+#define A_MAC_MTIP_MAC400G_0_MTIP_FRM_LENGTH 0x38214
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_SECTIONS 0x3821c
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_SECTIONS 0x38220
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_ALMOST_F_E 0x38224
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_ALMOST_F_E 0x38228
+#define A_MAC_MTIP_MAC400G_0_MTIP_HASHTABLE_LOAD 0x3822c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_STATUS 0x38240
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_IPG_LENGTH 0x38244
+
+#define S_T7_IPG 19
+#define M_T7_IPG 0x1fffU
+#define V_T7_IPG(x) ((x) << S_T7_IPG)
+#define G_T7_IPG(x) (((x) >> S_T7_IPG) & M_T7_IPG)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL01_PAUSE_QUANTA 0x38254
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL23_PAUSE_QUANTA 0x38258
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL45_PAUSE_QUANTA 0x3825c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL67_PAUSE_QUANTA 0x38260
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL01_PAUSE_QUANTA_THRESH 0x38264
+
+#define S_CL1_PAUSE_QUANTA_THRESH 16
+#define M_CL1_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL1_PAUSE_QUANTA_THRESH(x) ((x) << S_CL1_PAUSE_QUANTA_THRESH)
+#define G_CL1_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL1_PAUSE_QUANTA_THRESH) & M_CL1_PAUSE_QUANTA_THRESH)
+
+#define S_CL0_PAUSE_QUANTA_THRESH 0
+#define M_CL0_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL0_PAUSE_QUANTA_THRESH(x) ((x) << S_CL0_PAUSE_QUANTA_THRESH)
+#define G_CL0_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL0_PAUSE_QUANTA_THRESH) & M_CL0_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL23_PAUSE_QUANTA_THRESH 0x38268
+
+#define S_CL3_PAUSE_QUANTA_THRESH 16
+#define M_CL3_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL3_PAUSE_QUANTA_THRESH(x) ((x) << S_CL3_PAUSE_QUANTA_THRESH)
+#define G_CL3_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL3_PAUSE_QUANTA_THRESH) & M_CL3_PAUSE_QUANTA_THRESH)
+
+#define S_CL2_PAUSE_QUANTA_THRESH 0
+#define M_CL2_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL2_PAUSE_QUANTA_THRESH(x) ((x) << S_CL2_PAUSE_QUANTA_THRESH)
+#define G_CL2_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL2_PAUSE_QUANTA_THRESH) & M_CL2_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL45_PAUSE_QUANTA_THRESH 0x3826c
+
+#define S_CL5_PAUSE_QUANTA_THRESH 16
+#define M_CL5_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL5_PAUSE_QUANTA_THRESH(x) ((x) << S_CL5_PAUSE_QUANTA_THRESH)
+#define G_CL5_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL5_PAUSE_QUANTA_THRESH) & M_CL5_PAUSE_QUANTA_THRESH)
+
+#define S_CL4_PAUSE_QUANTA_THRESH 0
+#define M_CL4_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL4_PAUSE_QUANTA_THRESH(x) ((x) << S_CL4_PAUSE_QUANTA_THRESH)
+#define G_CL4_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL4_PAUSE_QUANTA_THRESH) & M_CL4_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL67_PAUSE_QUANTA_THRESH 0x38270
+
+#define S_CL7_PAUSE_QUANTA_THRESH 16
+#define M_CL7_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL7_PAUSE_QUANTA_THRESH(x) ((x) << S_CL7_PAUSE_QUANTA_THRESH)
+#define G_CL7_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL7_PAUSE_QUANTA_THRESH) & M_CL7_PAUSE_QUANTA_THRESH)
+
+#define S_CL6_PAUSE_QUANTA_THRESH 0
+#define M_CL6_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL6_PAUSE_QUANTA_THRESH(x) ((x) << S_CL6_PAUSE_QUANTA_THRESH)
+#define G_CL6_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL6_PAUSE_QUANTA_THRESH) & M_CL6_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_PAUSE_STATUS 0x38274
+
+#define S_RX_PAUSE_STATUS 0
+#define M_RX_PAUSE_STATUS 0xffU
+#define V_RX_PAUSE_STATUS(x) ((x) << S_RX_PAUSE_STATUS)
+#define G_RX_PAUSE_STATUS(x) (((x) >> S_RX_PAUSE_STATUS) & M_RX_PAUSE_STATUS)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_TS_TIMESTAMP 0x3827c
+#define A_MAC_MTIP_MAC400G_0_MTIP_XIF_MODE 0x38280
+#define A_MAC_MTIP_MAC400G_1_MTIP_REVISION 0x38300
+
+#define S_MTIP_REV_400G_1 0
+#define M_MTIP_REV_400G_1 0xffU
+#define V_MTIP_REV_400G_1(x) ((x) << S_MTIP_REV_400G_1)
+#define G_MTIP_REV_400G_1(x) (((x) >> S_MTIP_REV_400G_1) & M_MTIP_REV_400G_1)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_SCRATCH 0x38304
+#define A_MAC_MTIP_MAC400G_1_MTIP_COMMAND_CONFIG 0x38308
+
+#define S_TX_FLUSH_ENABLE_400G_1 22
+#define V_TX_FLUSH_ENABLE_400G_1(x) ((x) << S_TX_FLUSH_ENABLE_400G_1)
+#define F_TX_FLUSH_ENABLE_400G_1 V_TX_FLUSH_ENABLE_400G_1(1U)
+
+#define S_PHY_LOOPBACK_EN_400G_1 10
+#define V_PHY_LOOPBACK_EN_400G_1(x) ((x) << S_PHY_LOOPBACK_EN_400G_1)
+#define F_PHY_LOOPBACK_EN_400G_1 V_PHY_LOOPBACK_EN_400G_1(1U)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_ADDR_0 0x3830c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_ADDR_1 0x38310
+#define A_MAC_MTIP_MAC400G_1_MTIP_FRM_LENGTH 0x38314
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_FIFO_SECTIONS 0x3831c
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_FIFO_SECTIONS 0x38320
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_FIFO_ALMOST_F_E 0x38324
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_FIFO_ALMOST_F_E 0x38328
+#define A_MAC_MTIP_MAC400G_1_MTIP_HASHTABLE_LOAD 0x3832c
+
+#define S_ENABLE_MCAST_RX_400G_1 8
+#define V_ENABLE_MCAST_RX_400G_1(x) ((x) << S_ENABLE_MCAST_RX_400G_1)
+#define F_ENABLE_MCAST_RX_400G_1 V_ENABLE_MCAST_RX_400G_1(1U)
+
+#define S_HASHTABLE_ADDR_400G_1 0
+#define M_HASHTABLE_ADDR_400G_1 0x3fU
+#define V_HASHTABLE_ADDR_400G_1(x) ((x) << S_HASHTABLE_ADDR_400G_1)
+#define G_HASHTABLE_ADDR_400G_1(x) (((x) >> S_HASHTABLE_ADDR_400G_1) & M_HASHTABLE_ADDR_400G_1)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_STATUS 0x38340
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_IPG_LENGTH 0x38344
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL01_PAUSE_QUANTA 0x38354
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL23_PAUSE_QUANTA 0x38358
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL45_PAUSE_QUANTA 0x3835c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL67_PAUSE_QUANTA 0x38360
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL01_PAUSE_QUANTA_THRESH 0x38364
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL23_PAUSE_QUANTA_THRESH 0x38368
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL45_PAUSE_QUANTA_THRESH 0x3836c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL67_PAUSE_QUANTA_THRESH 0x38370
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_PAUSE_STATUS 0x38374
+#define A_MAC_MTIP_MAC400G_1_MTIP_TS_TIMESTAMP 0x3837c
+#define A_MAC_MTIP_MAC400G_1_MTIP_XIF_MODE 0x38380
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CONTROL_1 0x38400
+
+#define S_T7_SPEED_SELECTION 2
+#define V_T7_SPEED_SELECTION(x) ((x) << S_T7_SPEED_SELECTION)
+#define F_T7_SPEED_SELECTION V_T7_SPEED_SELECTION(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_1 0x38404
+
+#define S_400G_RX_LINK_STATUS 2
+#define V_400G_RX_LINK_STATUS(x) ((x) << S_400G_RX_LINK_STATUS)
+#define F_400G_RX_LINK_STATUS V_400G_RX_LINK_STATUS(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICE_ID0 0x38408
+
+#define S_400G_DEVICE_ID0_0 0
+#define M_400G_DEVICE_ID0_0 0xffffU
+#define V_400G_DEVICE_ID0_0(x) ((x) << S_400G_DEVICE_ID0_0)
+#define G_400G_DEVICE_ID0_0(x) (((x) >> S_400G_DEVICE_ID0_0) & M_400G_DEVICE_ID0_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICE_ID1 0x3840c
+
+#define S_400G_DEVICE_ID1_0 0
+#define M_400G_DEVICE_ID1_0 0xffffU
+#define V_400G_DEVICE_ID1_0(x) ((x) << S_400G_DEVICE_ID1_0)
+#define G_400G_DEVICE_ID1_0(x) (((x) >> S_400G_DEVICE_ID1_0) & M_400G_DEVICE_ID1_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_SPEED_ABILITY 0x38410
+
+#define S_400G_CAPABLE_0 9
+#define V_400G_CAPABLE_0(x) ((x) << S_400G_CAPABLE_0)
+#define F_400G_CAPABLE_0 V_400G_CAPABLE_0(1U)
+
+#define S_200G_CAPABLE_0 8
+#define V_200G_CAPABLE_0(x) ((x) << S_200G_CAPABLE_0)
+#define F_200G_CAPABLE_0 V_200G_CAPABLE_0(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICES_IN_PKG1 0x38414
+
+#define S_DEVICE_PACKAGE 3
+#define V_DEVICE_PACKAGE(x) ((x) << S_DEVICE_PACKAGE)
+#define F_DEVICE_PACKAGE V_DEVICE_PACKAGE(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICES_IN_PKG2 0x38418
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CONTROL_2 0x3841c
+
+#define S_400G_PCS_TYPE_SELECTION_0 0
+#define M_400G_PCS_TYPE_SELECTION_0 0xfU
+#define V_400G_PCS_TYPE_SELECTION_0(x) ((x) << S_400G_PCS_TYPE_SELECTION_0)
+#define G_400G_PCS_TYPE_SELECTION_0(x) (((x) >> S_400G_PCS_TYPE_SELECTION_0) & M_400G_PCS_TYPE_SELECTION_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_2 0x38420
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_3 0x38424
+
+#define S_T7_DEVICE_PRESENT 2
+#define M_T7_DEVICE_PRESENT 0x3fffU
+#define V_T7_DEVICE_PRESENT(x) ((x) << S_T7_DEVICE_PRESENT)
+#define G_T7_DEVICE_PRESENT(x) (((x) >> S_T7_DEVICE_PRESENT) & M_T7_DEVICE_PRESENT)
+
+#define S_400GBASE_R 1
+#define V_400GBASE_R(x) ((x) << S_400GBASE_R)
+#define F_400GBASE_R V_400GBASE_R(1U)
+
+#define S_200GBASE_R 0
+#define V_200GBASE_R(x) ((x) << S_200GBASE_R)
+#define F_200GBASE_R V_200GBASE_R(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_PKG_ID0 0x38438
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_PKG_ID1 0x3843c
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_STATUS_1 0x38480
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_STATUS_2 0x38484
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_TEST_CONTROL 0x384a8
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_TEST_ERR_CNT 0x384ac
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BER_HIGH_ORDER_CNT 0x384b0
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_ERR_BLK_HIGH_ORDER_CNT 0x384b4
+
+#define S_HIGH_ORDER 15
+#define V_HIGH_ORDER(x) ((x) << S_HIGH_ORDER)
+#define F_HIGH_ORDER V_HIGH_ORDER(1U)
+
+#define S_ERROR_BLOCK_COUNTER 0
+#define M_ERROR_BLOCK_COUNTER 0x3fffU
+#define V_ERROR_BLOCK_COUNTER(x) ((x) << S_ERROR_BLOCK_COUNTER)
+#define G_ERROR_BLOCK_COUNTER(x) (((x) >> S_ERROR_BLOCK_COUNTER) & M_ERROR_BLOCK_COUNTER)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_1 0x384c8
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_2 0x384cc
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_3 0x384d0
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_4 0x384d4
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_0_MAPPING 0x384d8
+
+#define S_T7_LANE_0_MAPPING 0
+#define M_T7_LANE_0_MAPPING 0xfU
+#define V_T7_LANE_0_MAPPING(x) ((x) << S_T7_LANE_0_MAPPING)
+#define G_T7_LANE_0_MAPPING(x) (((x) >> S_T7_LANE_0_MAPPING) & M_T7_LANE_0_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_1_MAPPING 0x384dc
+
+#define S_T7_LANE_1_MAPPING 0
+#define M_T7_LANE_1_MAPPING 0xfU
+#define V_T7_LANE_1_MAPPING(x) ((x) << S_T7_LANE_1_MAPPING)
+#define G_T7_LANE_1_MAPPING(x) (((x) >> S_T7_LANE_1_MAPPING) & M_T7_LANE_1_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_2_MAPPING 0x384e0
+
+#define S_T7_LANE_2_MAPPING 0
+#define M_T7_LANE_2_MAPPING 0xfU
+#define V_T7_LANE_2_MAPPING(x) ((x) << S_T7_LANE_2_MAPPING)
+#define G_T7_LANE_2_MAPPING(x) (((x) >> S_T7_LANE_2_MAPPING) & M_T7_LANE_2_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_3_MAPPING 0x384e4
+
+#define S_T7_LANE_3_MAPPING 0
+#define M_T7_LANE_3_MAPPING 0xfU
+#define V_T7_LANE_3_MAPPING(x) ((x) << S_T7_LANE_3_MAPPING)
+#define G_T7_LANE_3_MAPPING(x) (((x) >> S_T7_LANE_3_MAPPING) & M_T7_LANE_3_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_4_MAPPING 0x384e8
+
+#define S_T7_LANE_4_MAPPING 0
+#define M_T7_LANE_4_MAPPING 0xfU
+#define V_T7_LANE_4_MAPPING(x) ((x) << S_T7_LANE_4_MAPPING)
+#define G_T7_LANE_4_MAPPING(x) (((x) >> S_T7_LANE_4_MAPPING) & M_T7_LANE_4_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_5_MAPPING 0x384ec
+
+#define S_T7_LANE_5_MAPPING 0
+#define M_T7_LANE_5_MAPPING 0xfU
+#define V_T7_LANE_5_MAPPING(x) ((x) << S_T7_LANE_5_MAPPING)
+#define G_T7_LANE_5_MAPPING(x) (((x) >> S_T7_LANE_5_MAPPING) & M_T7_LANE_5_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_6_MAPPING 0x384f0
+
+#define S_T7_LANE_6_MAPPING 0
+#define M_T7_LANE_6_MAPPING 0xfU
+#define V_T7_LANE_6_MAPPING(x) ((x) << S_T7_LANE_6_MAPPING)
+#define G_T7_LANE_6_MAPPING(x) (((x) >> S_T7_LANE_6_MAPPING) & M_T7_LANE_6_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_7_MAPPING 0x384f4
+
+#define S_T7_LANE_7_MAPPING 0
+#define M_T7_LANE_7_MAPPING 0xfU
+#define V_T7_LANE_7_MAPPING(x) ((x) << S_T7_LANE_7_MAPPING)
+#define G_T7_LANE_7_MAPPING(x) (((x) >> S_T7_LANE_7_MAPPING) & M_T7_LANE_7_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_8_MAPPING 0x384f8
+
+#define S_T7_LANE_8_MAPPING 0
+#define M_T7_LANE_8_MAPPING 0xfU
+#define V_T7_LANE_8_MAPPING(x) ((x) << S_T7_LANE_8_MAPPING)
+#define G_T7_LANE_8_MAPPING(x) (((x) >> S_T7_LANE_8_MAPPING) & M_T7_LANE_8_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_9_MAPPING 0x384fc
+
+#define S_T7_LANE_9_MAPPING 0
+#define M_T7_LANE_9_MAPPING 0xfU
+#define V_T7_LANE_9_MAPPING(x) ((x) << S_T7_LANE_9_MAPPING)
+#define G_T7_LANE_9_MAPPING(x) (((x) >> S_T7_LANE_9_MAPPING) & M_T7_LANE_9_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_10_MAPPING 0x38500
+
+#define S_T7_LANE_10_MAPPING 0
+#define M_T7_LANE_10_MAPPING 0xfU
+#define V_T7_LANE_10_MAPPING(x) ((x) << S_T7_LANE_10_MAPPING)
+#define G_T7_LANE_10_MAPPING(x) (((x) >> S_T7_LANE_10_MAPPING) & M_T7_LANE_10_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_11_MAPPING 0x38504
+
+#define S_T7_LANE_11_MAPPING 0
+#define M_T7_LANE_11_MAPPING 0xfU
+#define V_T7_LANE_11_MAPPING(x) ((x) << S_T7_LANE_11_MAPPING)
+#define G_T7_LANE_11_MAPPING(x) (((x) >> S_T7_LANE_11_MAPPING) & M_T7_LANE_11_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_12_MAPPING 0x38508
+
+#define S_T7_LANE_12_MAPPING 0
+#define M_T7_LANE_12_MAPPING 0xfU
+#define V_T7_LANE_12_MAPPING(x) ((x) << S_T7_LANE_12_MAPPING)
+#define G_T7_LANE_12_MAPPING(x) (((x) >> S_T7_LANE_12_MAPPING) & M_T7_LANE_12_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_13_MAPPING 0x3850c
+
+#define S_T7_LANE_13_MAPPING 0
+#define M_T7_LANE_13_MAPPING 0xfU
+#define V_T7_LANE_13_MAPPING(x) ((x) << S_T7_LANE_13_MAPPING)
+#define G_T7_LANE_13_MAPPING(x) (((x) >> S_T7_LANE_13_MAPPING) & M_T7_LANE_13_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_14_MAPPING 0x38510
+
+#define S_T7_LANE_14_MAPPING 0
+#define M_T7_LANE_14_MAPPING 0xfU
+#define V_T7_LANE_14_MAPPING(x) ((x) << S_T7_LANE_14_MAPPING)
+#define G_T7_LANE_14_MAPPING(x) (((x) >> S_T7_LANE_14_MAPPING) & M_T7_LANE_14_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_15_MAPPING 0x38514
+
+#define S_T7_LANE_15_MAPPING 0
+#define M_T7_LANE_15_MAPPING 0xfU
+#define V_T7_LANE_15_MAPPING(x) ((x) << S_T7_LANE_15_MAPPING)
+#define G_T7_LANE_15_MAPPING(x) (((x) >> S_T7_LANE_15_MAPPING) & M_T7_LANE_15_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_SCRATCH 0x38600
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CORE_REVISION 0x38604
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CL_INTVL 0x38608
+
+#define S_T7_VL_INTVL 0
+#define M_T7_VL_INTVL 0xffffU
+#define V_T7_VL_INTVL(x) ((x) << S_T7_VL_INTVL)
+#define G_T7_VL_INTVL(x) (((x) >> S_T7_VL_INTVL) & M_T7_VL_INTVL)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_TX_LANE_THRESH 0x3860c
+
+#define S_TX_LANE_THRESH 0
+#define M_TX_LANE_THRESH 0xfU
+#define V_TX_LANE_THRESH(x) ((x) << S_TX_LANE_THRESH)
+#define G_TX_LANE_THRESH(x) (((x) >> S_TX_LANE_THRESH) & M_TX_LANE_THRESH)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_TX_CDMII_PACE 0x3861c
+
+#define S_TX_CDMII_PACE 0
+#define M_TX_CDMII_PACE 0xfU
+#define V_TX_CDMII_PACE(x) ((x) << S_TX_CDMII_PACE)
+#define G_TX_CDMII_PACE(x) (((x) >> S_TX_CDMII_PACE) & M_TX_CDMII_PACE)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_AM_0 0x38620
+
+#define S_AM_0 0
+#define M_AM_0 0xffffU
+#define V_AM_0(x) ((x) << S_AM_0)
+#define G_AM_0(x) (((x) >> S_AM_0) & M_AM_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_AM_1 0x38624
+
+#define S_AM_1 0
+#define M_AM_1 0xffffU
+#define V_AM_1(x) ((x) << S_AM_1)
+#define G_AM_1(x) (((x) >> S_AM_1) & M_AM_1)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO0 0x38800
+
+#define S_DBGINFO0 0
+#define M_DBGINFO0 0xffffU
+#define V_DBGINFO0(x) ((x) << S_DBGINFO0)
+#define G_DBGINFO0(x) (((x) >> S_DBGINFO0) & M_DBGINFO0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO1 0x38804
+
+#define S_DBGINFO1 0
+#define M_DBGINFO1 0xffffU
+#define V_DBGINFO1(x) ((x) << S_DBGINFO1)
+#define G_DBGINFO1(x) (((x) >> S_DBGINFO1) & M_DBGINFO1)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO2 0x38808
+
+#define S_DBGINFO2 0
+#define M_DBGINFO2 0xffffU
+#define V_DBGINFO2(x) ((x) << S_DBGINFO2)
+#define G_DBGINFO2(x) (((x) >> S_DBGINFO2) & M_DBGINFO2)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO3 0x3880c
+
+#define S_DBGINFO3 0
+#define M_DBGINFO3 0xffffU
+#define V_DBGINFO3(x) ((x) << S_DBGINFO3)
+#define G_DBGINFO3(x) (((x) >> S_DBGINFO3) & M_DBGINFO3)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CONTROL_1 0x38900
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_1 0x38904
+
+#define S_400G_RX_LINK_STATUS_1 2
+#define V_400G_RX_LINK_STATUS_1(x) ((x) << S_400G_RX_LINK_STATUS_1)
+#define F_400G_RX_LINK_STATUS_1 V_400G_RX_LINK_STATUS_1(1U)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICE_ID0 0x38908
+
+#define S_400G_DEVICE_ID0_1 0
+#define M_400G_DEVICE_ID0_1 0xffffU
+#define V_400G_DEVICE_ID0_1(x) ((x) << S_400G_DEVICE_ID0_1)
+#define G_400G_DEVICE_ID0_1(x) (((x) >> S_400G_DEVICE_ID0_1) & M_400G_DEVICE_ID0_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICE_ID1 0x3890c
+
+#define S_400G_DEVICE_ID1_1 0
+#define M_400G_DEVICE_ID1_1 0xffffU
+#define V_400G_DEVICE_ID1_1(x) ((x) << S_400G_DEVICE_ID1_1)
+#define G_400G_DEVICE_ID1_1(x) (((x) >> S_400G_DEVICE_ID1_1) & M_400G_DEVICE_ID1_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_SPEED_ABILITY 0x38910
+
+#define S_400G_CAPABLE_1 9
+#define V_400G_CAPABLE_1(x) ((x) << S_400G_CAPABLE_1)
+#define F_400G_CAPABLE_1 V_400G_CAPABLE_1(1U)
+
+#define S_200G_CAPABLE_1 8
+#define V_200G_CAPABLE_1(x) ((x) << S_200G_CAPABLE_1)
+#define F_200G_CAPABLE_1 V_200G_CAPABLE_1(1U)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICES_IN_PKG1 0x38914
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICES_IN_PKG2 0x38918
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CONTROL_2 0x3891c
+
+#define S_400G_PCS_TYPE_SELECTION_1 0
+#define M_400G_PCS_TYPE_SELECTION_1 0xfU
+#define V_400G_PCS_TYPE_SELECTION_1(x) ((x) << S_400G_PCS_TYPE_SELECTION_1)
+#define G_400G_PCS_TYPE_SELECTION_1(x) (((x) >> S_400G_PCS_TYPE_SELECTION_1) & M_400G_PCS_TYPE_SELECTION_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_2 0x38920
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_3 0x38924
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_PKG_ID0 0x38938
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_PKG_ID1 0x3893c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_STATUS_1 0x38980
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_STATUS_2 0x38984
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_TEST_CONTROL 0x389a8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_TEST_ERR_CNT 0x389ac
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BER_HIGH_ORDER_CNT 0x389b0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_ERR_BLK_HIGH_ORDER_CNT 0x389b4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_1 0x389c8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_2 0x389cc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_3 0x389d0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_4 0x389d4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_0_MAPPING 0x389d8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_1_MAPPING 0x389dc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_2_MAPPING 0x389e0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_3_MAPPING 0x389e4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_4_MAPPING 0x389e8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_5_MAPPING 0x389ec
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_6_MAPPING 0x389f0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_7_MAPPING 0x389f4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_8_MAPPING 0x389f8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_9_MAPPING 0x389fc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_10_MAPPING 0x38a00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_11_MAPPING 0x38a04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_12_MAPPING 0x38a08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_13_MAPPING 0x38a0c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_14_MAPPING 0x38a10
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_15_MAPPING 0x38a14
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_SCRATCH 0x38b00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CORE_REVISION 0x38b04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CL_INTVL 0x38b08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_TX_LANE_THRESH 0x38b0c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_TX_CDMII_PACE 0x38b1c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_AM_0 0x38b20
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_AM_1 0x38b24
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO0 0x38d00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO1 0x38d04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO2 0x38d08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO3 0x38d0c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_0 0x38e00
+
+#define S_TC_PAD_ALTER 10
+#define V_TC_PAD_ALTER(x) ((x) << S_TC_PAD_ALTER)
+#define F_TC_PAD_ALTER V_TC_PAD_ALTER(1U)
+
+#define S_TC_PAD_VALUE 9
+#define V_TC_PAD_VALUE(x) ((x) << S_TC_PAD_VALUE)
+#define F_TC_PAD_VALUE V_TC_PAD_VALUE(1U)
+
+#define S_KP_ENABLE 8
+#define V_KP_ENABLE(x) ((x) << S_KP_ENABLE)
+#define F_KP_ENABLE V_KP_ENABLE(1U)
+
+#define S_AM16_COPY_DIS 3
+#define V_AM16_COPY_DIS(x) ((x) << S_AM16_COPY_DIS)
+#define F_AM16_COPY_DIS V_AM16_COPY_DIS(1U)
+
+#define S_RS_FEC_DEGRADE_OPTION_ENA 2
+#define V_RS_FEC_DEGRADE_OPTION_ENA(x) ((x) << S_RS_FEC_DEGRADE_OPTION_ENA)
+#define F_RS_FEC_DEGRADE_OPTION_ENA V_RS_FEC_DEGRADE_OPTION_ENA(1U)
+
+#define A_MAC_MTIP_RS_FEC_STATUS_0_0 0x38e04
+
+#define S_FEC_STATUS_0_14 14
+#define V_FEC_STATUS_0_14(x) ((x) << S_FEC_STATUS_0_14)
+#define F_FEC_STATUS_0_14 V_FEC_STATUS_0_14(1U)
+
+#define S_FEC_STATUS_0_11 8
+#define M_FEC_STATUS_0_11 0xfU
+#define V_FEC_STATUS_0_11(x) ((x) << S_FEC_STATUS_0_11)
+#define G_FEC_STATUS_0_11(x) (((x) >> S_FEC_STATUS_0_11) & M_FEC_STATUS_0_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_0 V_RS_FEC_DEGRADE_SER_RECEIVED0_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_1 V_RS_FEC_DEGRADE_SER_RECEIVED0_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_2 V_RS_FEC_DEGRADE_SER_RECEIVED0_2(1U)
+
+#define S_FEC_STATUS_0_4 4
+#define V_FEC_STATUS_0_4(x) ((x) << S_FEC_STATUS_0_4)
+#define F_FEC_STATUS_0_4 V_FEC_STATUS_0_4(1U)
+
+#define S_FEC_STATUS_0_3 3
+#define V_FEC_STATUS_0_3(x) ((x) << S_FEC_STATUS_0_3)
+#define F_FEC_STATUS_0_3 V_FEC_STATUS_0_3(1U)
+
+#define S_FEC_STATUS_0_2 2
+#define V_FEC_STATUS_0_2(x) ((x) << S_FEC_STATUS_0_2)
+#define F_FEC_STATUS_0_2 V_FEC_STATUS_0_2(1U)
+
+#define S_FEC_STATUS_0_1 1
+#define V_FEC_STATUS_0_1(x) ((x) << S_FEC_STATUS_0_1)
+#define F_FEC_STATUS_0_1 V_FEC_STATUS_0_1(1U)
+
+#define S_FEC_STATUS_0_0 0
+#define V_FEC_STATUS_0_0(x) ((x) << S_FEC_STATUS_0_0)
+#define F_FEC_STATUS_0_0 V_FEC_STATUS_0_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_0 0x38e08
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_0 0x38e0c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_0 0x38e10
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_0 0x38e14
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_0 0x38e18
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_0 0x38e1c
+
+#define S_DEC_TRESH 0
+#define M_DEC_TRESH 0x3fU
+#define V_DEC_TRESH(x) ((x) << S_DEC_TRESH)
+#define G_DEC_TRESH(x) (((x) >> S_DEC_TRESH) & M_DEC_TRESH)
+
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_1 0x38e20
+#define A_MAC_MTIP_RS_FEC_STATUS_0_1 0x38e24
+
+#define S_FEC_STATUS_1_14 14
+#define V_FEC_STATUS_1_14(x) ((x) << S_FEC_STATUS_1_14)
+#define F_FEC_STATUS_1_14 V_FEC_STATUS_1_14(1U)
+
+#define S_FEC_STATUS_1_11 8
+#define M_FEC_STATUS_1_11 0xfU
+#define V_FEC_STATUS_1_11(x) ((x) << S_FEC_STATUS_1_11)
+#define G_FEC_STATUS_1_11(x) (((x) >> S_FEC_STATUS_1_11) & M_FEC_STATUS_1_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_0 V_RS_FEC_DEGRADE_SER_RECEIVED1_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_1 V_RS_FEC_DEGRADE_SER_RECEIVED1_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_2 V_RS_FEC_DEGRADE_SER_RECEIVED1_2(1U)
+
+#define S_FEC_STATUS_1_4 4
+#define V_FEC_STATUS_1_4(x) ((x) << S_FEC_STATUS_1_4)
+#define F_FEC_STATUS_1_4 V_FEC_STATUS_1_4(1U)
+
+#define S_FEC_STATUS_1_3 3
+#define V_FEC_STATUS_1_3(x) ((x) << S_FEC_STATUS_1_3)
+#define F_FEC_STATUS_1_3 V_FEC_STATUS_1_3(1U)
+
+#define S_FEC_STATUS_1_2 2
+#define V_FEC_STATUS_1_2(x) ((x) << S_FEC_STATUS_1_2)
+#define F_FEC_STATUS_1_2 V_FEC_STATUS_1_2(1U)
+
+#define S_FEC_STATUS_1_1 1
+#define V_FEC_STATUS_1_1(x) ((x) << S_FEC_STATUS_1_1)
+#define F_FEC_STATUS_1_1 V_FEC_STATUS_1_1(1U)
+
+#define S_FEC_STATUS_1_0 0
+#define V_FEC_STATUS_1_0(x) ((x) << S_FEC_STATUS_1_0)
+#define F_FEC_STATUS_1_0 V_FEC_STATUS_1_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_1 0x38e28
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_1 0x38e2c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_1 0x38e30
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_1 0x38e34
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_1 0x38e38
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_1 0x38e3c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_2 0x38e40
+#define A_MAC_MTIP_RS_FEC_STATUS_0_2 0x38e44
+
+#define S_FEC_STATUS_2_14 14
+#define V_FEC_STATUS_2_14(x) ((x) << S_FEC_STATUS_2_14)
+#define F_FEC_STATUS_2_14 V_FEC_STATUS_2_14(1U)
+
+#define S_FEC_STATUS_2_11 8
+#define M_FEC_STATUS_2_11 0xfU
+#define V_FEC_STATUS_2_11(x) ((x) << S_FEC_STATUS_2_11)
+#define G_FEC_STATUS_2_11(x) (((x) >> S_FEC_STATUS_2_11) & M_FEC_STATUS_2_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_0 V_RS_FEC_DEGRADE_SER_RECEIVED2_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_1 V_RS_FEC_DEGRADE_SER_RECEIVED2_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_2 V_RS_FEC_DEGRADE_SER_RECEIVED2_2(1U)
+
+#define S_FEC_STATUS_2_4 4
+#define V_FEC_STATUS_2_4(x) ((x) << S_FEC_STATUS_2_4)
+#define F_FEC_STATUS_2_4 V_FEC_STATUS_2_4(1U)
+
+#define S_FEC_STATUS_2_3 3
+#define V_FEC_STATUS_2_3(x) ((x) << S_FEC_STATUS_2_3)
+#define F_FEC_STATUS_2_3 V_FEC_STATUS_2_3(1U)
+
+#define S_FEC_STATUS_2_2 2
+#define V_FEC_STATUS_2_2(x) ((x) << S_FEC_STATUS_2_2)
+#define F_FEC_STATUS_2_2 V_FEC_STATUS_2_2(1U)
+
+#define S_FEC_STATUS_2_1 1
+#define V_FEC_STATUS_2_1(x) ((x) << S_FEC_STATUS_2_1)
+#define F_FEC_STATUS_2_1 V_FEC_STATUS_2_1(1U)
+
+#define S_FEC_STATUS_2_0 0
+#define V_FEC_STATUS_2_0(x) ((x) << S_FEC_STATUS_2_0)
+#define F_FEC_STATUS_2_0 V_FEC_STATUS_2_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_2 0x38e48
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_2 0x38e4c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_2 0x38e50
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_2 0x38e54
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_2 0x38e58
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_2 0x38e5c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_3 0x38e60
+#define A_MAC_MTIP_RS_FEC_STATUS_0_3 0x38e64
+
+#define S_FEC_STATUS_3_14 14
+#define V_FEC_STATUS_3_14(x) ((x) << S_FEC_STATUS_3_14)
+#define F_FEC_STATUS_3_14 V_FEC_STATUS_3_14(1U)
+
+#define S_FEC_STATUS_3_11 8
+#define M_FEC_STATUS_3_11 0xfU
+#define V_FEC_STATUS_3_11(x) ((x) << S_FEC_STATUS_3_11)
+#define G_FEC_STATUS_3_11(x) (((x) >> S_FEC_STATUS_3_11) & M_FEC_STATUS_3_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_0 V_RS_FEC_DEGRADE_SER_RECEIVED3_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_1 V_RS_FEC_DEGRADE_SER_RECEIVED3_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_2 V_RS_FEC_DEGRADE_SER_RECEIVED3_2(1U)
+
+#define S_FEC_STATUS_3_4 4
+#define V_FEC_STATUS_3_4(x) ((x) << S_FEC_STATUS_3_4)
+#define F_FEC_STATUS_3_4 V_FEC_STATUS_3_4(1U)
+
+#define S_FEC_STATUS_3_3 3
+#define V_FEC_STATUS_3_3(x) ((x) << S_FEC_STATUS_3_3)
+#define F_FEC_STATUS_3_3 V_FEC_STATUS_3_3(1U)
+
+#define S_FEC_STATUS_3_2 2
+#define V_FEC_STATUS_3_2(x) ((x) << S_FEC_STATUS_3_2)
+#define F_FEC_STATUS_3_2 V_FEC_STATUS_3_2(1U)
+
+#define S_FEC_STATUS_3_1 1
+#define V_FEC_STATUS_3_1(x) ((x) << S_FEC_STATUS_3_1)
+#define F_FEC_STATUS_3_1 V_FEC_STATUS_3_1(1U)
+
+#define S_FEC_STATUS_3_0 0
+#define V_FEC_STATUS_3_0(x) ((x) << S_FEC_STATUS_3_0)
+#define F_FEC_STATUS_3_0 V_FEC_STATUS_3_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_3 0x38e68
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_3 0x38e6c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_3 0x38e70
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_3 0x38e74
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_3 0x38e78
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_3 0x38e7c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_4 0x38e80
+#define A_MAC_MTIP_RS_FEC_STATUS_0_4 0x38e84
+
+#define S_FEC_STATUS_4_14 14
+#define V_FEC_STATUS_4_14(x) ((x) << S_FEC_STATUS_4_14)
+#define F_FEC_STATUS_4_14 V_FEC_STATUS_4_14(1U)
+
+#define S_FEC_STATUS_4_11 8
+#define M_FEC_STATUS_4_11 0xfU
+#define V_FEC_STATUS_4_11(x) ((x) << S_FEC_STATUS_4_11)
+#define G_FEC_STATUS_4_11(x) (((x) >> S_FEC_STATUS_4_11) & M_FEC_STATUS_4_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_0 V_RS_FEC_DEGRADE_SER_RECEIVED4_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_1 V_RS_FEC_DEGRADE_SER_RECEIVED4_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_2 V_RS_FEC_DEGRADE_SER_RECEIVED4_2(1U)
+
+#define S_FEC_STATUS_4_4 4
+#define V_FEC_STATUS_4_4(x) ((x) << S_FEC_STATUS_4_4)
+#define F_FEC_STATUS_4_4 V_FEC_STATUS_4_4(1U)
+
+#define S_FEC_STATUS_4_3 3
+#define V_FEC_STATUS_4_3(x) ((x) << S_FEC_STATUS_4_3)
+#define F_FEC_STATUS_4_3 V_FEC_STATUS_4_3(1U)
+
+#define S_FEC_STATUS_4_2 2
+#define V_FEC_STATUS_4_2(x) ((x) << S_FEC_STATUS_4_2)
+#define F_FEC_STATUS_4_2 V_FEC_STATUS_4_2(1U)
+
+#define S_FEC_STATUS_4_1 1
+#define V_FEC_STATUS_4_1(x) ((x) << S_FEC_STATUS_4_1)
+#define F_FEC_STATUS_4_1 V_FEC_STATUS_4_1(1U)
+
+#define S_FEC_STATUS_4_0 0
+#define V_FEC_STATUS_4_0(x) ((x) << S_FEC_STATUS_4_0)
+#define F_FEC_STATUS_4_0 V_FEC_STATUS_4_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_4 0x38e88
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_4 0x38e8c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_4 0x38e90
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_4 0x38e94
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_4 0x38e98
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_4 0x38e9c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_5 0x38ea0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_5 0x38ea4
+
+#define S_FEC_STATUS_5_14 14
+#define V_FEC_STATUS_5_14(x) ((x) << S_FEC_STATUS_5_14)
+#define F_FEC_STATUS_5_14 V_FEC_STATUS_5_14(1U)
+
+#define S_FEC_STATUS_5_11 8
+#define M_FEC_STATUS_5_11 0xfU
+#define V_FEC_STATUS_5_11(x) ((x) << S_FEC_STATUS_5_11)
+#define G_FEC_STATUS_5_11(x) (((x) >> S_FEC_STATUS_5_11) & M_FEC_STATUS_5_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_0 V_RS_FEC_DEGRADE_SER_RECEIVED5_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_1 V_RS_FEC_DEGRADE_SER_RECEIVED5_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_2 V_RS_FEC_DEGRADE_SER_RECEIVED5_2(1U)
+
+#define S_FEC_STATUS_5_4 4
+#define V_FEC_STATUS_5_4(x) ((x) << S_FEC_STATUS_5_4)
+#define F_FEC_STATUS_5_4 V_FEC_STATUS_5_4(1U)
+
+#define S_FEC_STATUS_5_3 3
+#define V_FEC_STATUS_5_3(x) ((x) << S_FEC_STATUS_5_3)
+#define F_FEC_STATUS_5_3 V_FEC_STATUS_5_3(1U)
+
+#define S_FEC_STATUS_5_2 2
+#define V_FEC_STATUS_5_2(x) ((x) << S_FEC_STATUS_5_2)
+#define F_FEC_STATUS_5_2 V_FEC_STATUS_5_2(1U)
+
+#define S_FEC_STATUS_5_1 1
+#define V_FEC_STATUS_5_1(x) ((x) << S_FEC_STATUS_5_1)
+#define F_FEC_STATUS_5_1 V_FEC_STATUS_5_1(1U)
+
+#define S_FEC_STATUS_5_0 0
+#define V_FEC_STATUS_5_0(x) ((x) << S_FEC_STATUS_5_0)
+#define F_FEC_STATUS_5_0 V_FEC_STATUS_5_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_5 0x38ea8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_5 0x38eac
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_5 0x38eb0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_5 0x38eb4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_5 0x38eb8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_5 0x38ebc
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_6 0x38ec0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_6 0x38ec4
+
+#define S_FEC_STATUS_6_14 14
+#define V_FEC_STATUS_6_14(x) ((x) << S_FEC_STATUS_6_14)
+#define F_FEC_STATUS_6_14 V_FEC_STATUS_6_14(1U)
+
+#define S_FEC_STATUS_6_11 8
+#define M_FEC_STATUS_6_11 0xfU
+#define V_FEC_STATUS_6_11(x) ((x) << S_FEC_STATUS_6_11)
+#define G_FEC_STATUS_6_11(x) (((x) >> S_FEC_STATUS_6_11) & M_FEC_STATUS_6_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_0 V_RS_FEC_DEGRADE_SER_RECEIVED6_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_1 V_RS_FEC_DEGRADE_SER_RECEIVED6_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_2 V_RS_FEC_DEGRADE_SER_RECEIVED6_2(1U)
+
+#define S_FEC_STATUS_6_4 4
+#define V_FEC_STATUS_6_4(x) ((x) << S_FEC_STATUS_6_4)
+#define F_FEC_STATUS_6_4 V_FEC_STATUS_6_4(1U)
+
+#define S_FEC_STATUS_6_3 3
+#define V_FEC_STATUS_6_3(x) ((x) << S_FEC_STATUS_6_3)
+#define F_FEC_STATUS_6_3 V_FEC_STATUS_6_3(1U)
+
+#define S_FEC_STATUS_6_2 2
+#define V_FEC_STATUS_6_2(x) ((x) << S_FEC_STATUS_6_2)
+#define F_FEC_STATUS_6_2 V_FEC_STATUS_6_2(1U)
+
+#define S_FEC_STATUS_6_1 1
+#define V_FEC_STATUS_6_1(x) ((x) << S_FEC_STATUS_6_1)
+#define F_FEC_STATUS_6_1 V_FEC_STATUS_6_1(1U)
+
+#define S_FEC_STATUS_6_0 0
+#define V_FEC_STATUS_6_0(x) ((x) << S_FEC_STATUS_6_0)
+#define F_FEC_STATUS_6_0 V_FEC_STATUS_6_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_6 0x38ec8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_6 0x38ecc
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_6 0x38ed0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_6 0x38ed4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_6 0x38ed8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_6 0x38edc
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_7 0x38ee0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_7 0x38ee4
+
+#define S_FEC_STATUS_7_14 14
+#define V_FEC_STATUS_7_14(x) ((x) << S_FEC_STATUS_7_14)
+#define F_FEC_STATUS_7_14 V_FEC_STATUS_7_14(1U)
+
+#define S_FEC_STATUS_7_11 8
+#define M_FEC_STATUS_7_11 0xfU
+#define V_FEC_STATUS_7_11(x) ((x) << S_FEC_STATUS_7_11)
+#define G_FEC_STATUS_7_11(x) (((x) >> S_FEC_STATUS_7_11) & M_FEC_STATUS_7_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_0 V_RS_FEC_DEGRADE_SER_RECEIVED7_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_1 V_RS_FEC_DEGRADE_SER_RECEIVED7_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_2 V_RS_FEC_DEGRADE_SER_RECEIVED7_2(1U)
+
+#define S_FEC_STATUS_7_4 4
+#define V_FEC_STATUS_7_4(x) ((x) << S_FEC_STATUS_7_4)
+#define F_FEC_STATUS_7_4 V_FEC_STATUS_7_4(1U)
+
+#define S_FEC_STATUS_7_3 3
+#define V_FEC_STATUS_7_3(x) ((x) << S_FEC_STATUS_7_3)
+#define F_FEC_STATUS_7_3 V_FEC_STATUS_7_3(1U)
+
+#define S_FEC_STATUS_7_2 2
+#define V_FEC_STATUS_7_2(x) ((x) << S_FEC_STATUS_7_2)
+#define F_FEC_STATUS_7_2 V_FEC_STATUS_7_2(1U)
+
+#define S_FEC_STATUS_7_1 1
+#define V_FEC_STATUS_7_1(x) ((x) << S_FEC_STATUS_7_1)
+#define F_FEC_STATUS_7_1 V_FEC_STATUS_7_1(1U)
+
+#define S_FEC_STATUS_7_0 0
+#define V_FEC_STATUS_7_0(x) ((x) << S_FEC_STATUS_7_0)
+#define F_FEC_STATUS_7_0 V_FEC_STATUS_7_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_7 0x38ee8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_7 0x38eec
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_7 0x38ef0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_7 0x38ef4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_7 0x38ef8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_7 0x38efc
+#define A_MAC_MTIP_RS_FEC_HISER_CW 0x38f00
+
+#define S_HISER_CW 0
+#define M_HISER_CW 0xffffU
+#define V_HISER_CW(x) ((x) << S_HISER_CW)
+#define G_HISER_CW(x) (((x) >> S_HISER_CW) & M_HISER_CW)
+
+#define A_MAC_MTIP_RS_FEC_HISER_THRESH 0x38f04
+
+#define S_HISER_THRESH 0
+#define M_HISER_THRESH 0xffffU
+#define V_HISER_THRESH(x) ((x) << S_HISER_THRESH)
+#define G_HISER_THRESH(x) (((x) >> S_HISER_THRESH) & M_HISER_THRESH)
+
+#define A_MAC_MTIP_RS_FEC_HISER_TIME 0x38f08
+
+#define S_HISER_TIME 0
+#define M_HISER_TIME 0xffffU
+#define V_HISER_TIME(x) ((x) << S_HISER_TIME)
+#define G_HISER_TIME(x) (((x) >> S_HISER_TIME) & M_HISER_TIME)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CW 0x38f10
+
+#define S_DEGRADE_SET_CW 0
+#define M_DEGRADE_SET_CW 0xffffU
+#define V_DEGRADE_SET_CW(x) ((x) << S_DEGRADE_SET_CW)
+#define G_DEGRADE_SET_CW(x) (((x) >> S_DEGRADE_SET_CW) & M_DEGRADE_SET_CW)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CW_HI 0x38f14
+
+#define S_DEGRADE_SET_CW_HI 0
+#define M_DEGRADE_SET_CW_HI 0xffffU
+#define V_DEGRADE_SET_CW_HI(x) ((x) << S_DEGRADE_SET_CW_HI)
+#define G_DEGRADE_SET_CW_HI(x) (((x) >> S_DEGRADE_SET_CW_HI) & M_DEGRADE_SET_CW_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_THRESH 0x38f18
+
+#define S_DEGRADE_SET_THRESH 0
+#define M_DEGRADE_SET_THRESH 0xffffU
+#define V_DEGRADE_SET_THRESH(x) ((x) << S_DEGRADE_SET_THRESH)
+#define G_DEGRADE_SET_THRESH(x) (((x) >> S_DEGRADE_SET_THRESH) & M_DEGRADE_SET_THRESH)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_THRESH_HI 0x38f1c
+
+#define S_DEGRADE_SET_THRESH_HI 0
+#define M_DEGRADE_SET_THRESH_HI 0xffffU
+#define V_DEGRADE_SET_THRESH_HI(x) ((x) << S_DEGRADE_SET_THRESH_HI)
+#define G_DEGRADE_SET_THRESH_HI(x) (((x) >> S_DEGRADE_SET_THRESH_HI) & M_DEGRADE_SET_THRESH_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_CLEAR 0x38f20
+
+#define S_DEGRADE_SET_CLEAR 0
+#define M_DEGRADE_SET_CLEAR 0xffffU
+#define V_DEGRADE_SET_CLEAR(x) ((x) << S_DEGRADE_SET_CLEAR)
+#define G_DEGRADE_SET_CLEAR(x) (((x) >> S_DEGRADE_SET_CLEAR) & M_DEGRADE_SET_CLEAR)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CLEAR_HI 0x38f24
+
+#define S_DEGRADE_SET_CLEAR_HI 0
+#define M_DEGRADE_SET_CLEAR_HI 0xffffU
+#define V_DEGRADE_SET_CLEAR_HI(x) ((x) << S_DEGRADE_SET_CLEAR_HI)
+#define G_DEGRADE_SET_CLEAR_HI(x) (((x) >> S_DEGRADE_SET_CLEAR_HI) & M_DEGRADE_SET_CLEAR_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_CLEAR_THRESH 0x38f28
+
+#define S_DEGRADE_SET_CLEAR_THRESH 0
+#define M_DEGRADE_SET_CLEAR_THRESH 0xffffU
+#define V_DEGRADE_SET_CLEAR_THRESH(x) ((x) << S_DEGRADE_SET_CLEAR_THRESH)
+#define G_DEGRADE_SET_CLEAR_THRESH(x) (((x) >> S_DEGRADE_SET_CLEAR_THRESH) & M_DEGRADE_SET_CLEAR_THRESH)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CLEAR_THRESH_HI 0x38f2c
+
+#define S_DEGRADE_SET_CLEAR_THRESH_HI 0
+#define M_DEGRADE_SET_CLEAR_THRESH_HI 0xffffU
+#define V_DEGRADE_SET_CLEAR_THRESH_HI(x) ((x) << S_DEGRADE_SET_CLEAR_THRESH_HI)
+#define G_DEGRADE_SET_CLEAR_THRESH_HI(x) (((x) >> S_DEGRADE_SET_CLEAR_THRESH_HI) & M_DEGRADE_SET_CLEAR_THRESH_HI)
+
+#define A_MAC_MTIP_RS_VL0_0 0x38f80
+#define A_MAC_MTIP_RS_VL0_1 0x38f84
+#define A_MAC_MTIP_RS_VL1_0 0x38f88
+#define A_MAC_MTIP_RS_VL1_1 0x38f8c
+#define A_MAC_MTIP_RS_VL2_0 0x38f90
+#define A_MAC_MTIP_RS_VL2_1 0x38f94
+#define A_MAC_MTIP_RS_VL3_0 0x38f98
+#define A_MAC_MTIP_RS_VL3_1 0x38f9c
+#define A_MAC_MTIP_RS_VL4_0 0x38fa0
+#define A_MAC_MTIP_RS_VL4_1 0x38fa4
+#define A_MAC_MTIP_RS_VL5_0 0x38fa8
+#define A_MAC_MTIP_RS_VL5_1 0x38fac
+#define A_MAC_MTIP_RS_VL6_0 0x38fb0
+#define A_MAC_MTIP_RS_VL6_1 0x38fb4
+#define A_MAC_MTIP_RS_VL7_0 0x38fb8
+#define A_MAC_MTIP_RS_VL7_1 0x38fbc
+#define A_MAC_MTIP_RS_VL8_0 0x38fc0
+#define A_MAC_MTIP_RS_VL8_1 0x38fc4
+#define A_MAC_MTIP_RS_VL9_0 0x38fc8
+#define A_MAC_MTIP_RS_VL9_1 0x38fcc
+#define A_MAC_MTIP_RS_VL10_0 0x38fd0
+#define A_MAC_MTIP_RS_VL10_1 0x38fd4
+#define A_MAC_MTIP_RS_VL11_0 0x38fd8
+#define A_MAC_MTIP_RS_VL11_1 0x38fdc
+#define A_MAC_MTIP_RS_VL12_0 0x38fe0
+#define A_MAC_MTIP_RS_VL12_1 0x38fe4
+#define A_MAC_MTIP_RS_VL13_0 0x38fe8
+#define A_MAC_MTIP_RS_VL13_1 0x38fec
+#define A_MAC_MTIP_RS_VL14_0 0x38ff0
+#define A_MAC_MTIP_RS_VL14_1 0x38ff4
+#define A_MAC_MTIP_RS_VL15_0 0x38ff8
+#define A_MAC_MTIP_RS_VL15_1 0x38ffc
+#define A_MAC_MTIP_RS_FEC_SYMBLERR0_LO 0x39000
+#define A_MAC_MTIP_RS_FEC_SYMBLERR0_HI 0x39004
+#define A_MAC_MTIP_RS_FEC_SYMBLERR1_LO 0x39008
+#define A_MAC_MTIP_RS_FEC_SYMBLERR1_HI 0x3900c
+#define A_MAC_MTIP_RS_FEC_SYMBLERR2_LO 0x39010
+#define A_MAC_MTIP_RS_FEC_SYMBLERR2_HI 0x39014
+#define A_MAC_MTIP_RS_FEC_SYMBLERR3_LO 0x39018
+#define A_MAC_MTIP_RS_FEC_SYMBLERR3_HI 0x3901c
+#define A_MAC_MTIP_RS_FEC_SYMBLERR4_LO 0x39020
+
+#define S_RS_FEC_SYMBLERR4_LO 0
+#define V_RS_FEC_SYMBLERR4_LO(x) ((x) << S_RS_FEC_SYMBLERR4_LO)
+#define F_RS_FEC_SYMBLERR4_LO V_RS_FEC_SYMBLERR4_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR4_HI 0x39024
+
+#define S_RS_FEC_SYMBLERR4_HI 0
+#define V_RS_FEC_SYMBLERR4_HI(x) ((x) << S_RS_FEC_SYMBLERR4_HI)
+#define F_RS_FEC_SYMBLERR4_HI V_RS_FEC_SYMBLERR4_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR5_LO 0x39028
+
+#define S_RS_FEC_SYMBLERR5_LO 0
+#define V_RS_FEC_SYMBLERR5_LO(x) ((x) << S_RS_FEC_SYMBLERR5_LO)
+#define F_RS_FEC_SYMBLERR5_LO V_RS_FEC_SYMBLERR5_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR5_HI 0x3902c
+
+#define S_RS_FEC_SYMBLERR5_HI 0
+#define V_RS_FEC_SYMBLERR5_HI(x) ((x) << S_RS_FEC_SYMBLERR5_HI)
+#define F_RS_FEC_SYMBLERR5_HI V_RS_FEC_SYMBLERR5_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR6_LO 0x39030
+
+#define S_RS_FEC_SYMBLERR6_LO 0
+#define V_RS_FEC_SYMBLERR6_LO(x) ((x) << S_RS_FEC_SYMBLERR6_LO)
+#define F_RS_FEC_SYMBLERR6_LO V_RS_FEC_SYMBLERR6_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR6_HI 0x39034
+
+#define S_RS_FEC_SYMBLERR6_HI 0
+#define V_RS_FEC_SYMBLERR6_HI(x) ((x) << S_RS_FEC_SYMBLERR6_HI)
+#define F_RS_FEC_SYMBLERR6_HI V_RS_FEC_SYMBLERR6_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR7_LO 0x39038
+
+#define S_RS_FEC_SYMBLERR7_LO 0
+#define V_RS_FEC_SYMBLERR7_LO(x) ((x) << S_RS_FEC_SYMBLERR7_LO)
+#define F_RS_FEC_SYMBLERR7_LO V_RS_FEC_SYMBLERR7_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR7_HI 0x3903c
+
+#define S_RS_FEC_SYMBLERR7_HI 0
+#define V_RS_FEC_SYMBLERR7_HI(x) ((x) << S_RS_FEC_SYMBLERR7_HI)
+#define F_RS_FEC_SYMBLERR7_HI V_RS_FEC_SYMBLERR7_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR8_LO 0x39040
+
+#define S_RS_FEC_SYMBLERR8_LO 0
+#define V_RS_FEC_SYMBLERR8_LO(x) ((x) << S_RS_FEC_SYMBLERR8_LO)
+#define F_RS_FEC_SYMBLERR8_LO V_RS_FEC_SYMBLERR8_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR8_HI 0x39044
+
+#define S_RS_FEC_SYMBLERR8_HI 0
+#define V_RS_FEC_SYMBLERR8_HI(x) ((x) << S_RS_FEC_SYMBLERR8_HI)
+#define F_RS_FEC_SYMBLERR8_HI V_RS_FEC_SYMBLERR8_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR9_LO 0x39048
+
+#define S_RS_FEC_SYMBLERR9_LO 0
+#define V_RS_FEC_SYMBLERR9_LO(x) ((x) << S_RS_FEC_SYMBLERR9_LO)
+#define F_RS_FEC_SYMBLERR9_LO V_RS_FEC_SYMBLERR9_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR9_HI 0x3904c
+
+#define S_RS_FEC_SYMBLERR9_HI 0
+#define V_RS_FEC_SYMBLERR9_HI(x) ((x) << S_RS_FEC_SYMBLERR9_HI)
+#define F_RS_FEC_SYMBLERR9_HI V_RS_FEC_SYMBLERR9_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR10_LO 0x39050
+
+#define S_RS_FEC_SYMBLERR10_LO 0
+#define V_RS_FEC_SYMBLERR10_LO(x) ((x) << S_RS_FEC_SYMBLERR10_LO)
+#define F_RS_FEC_SYMBLERR10_LO V_RS_FEC_SYMBLERR10_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR10_HI 0x39054
+
+#define S_RS_FEC_SYMBLERR10_HI 0
+#define V_RS_FEC_SYMBLERR10_HI(x) ((x) << S_RS_FEC_SYMBLERR10_HI)
+#define F_RS_FEC_SYMBLERR10_HI V_RS_FEC_SYMBLERR10_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR11_LO 0x39058
+
+#define S_RS_FEC_SYMBLERR11_LO 0
+#define V_RS_FEC_SYMBLERR11_LO(x) ((x) << S_RS_FEC_SYMBLERR11_LO)
+#define F_RS_FEC_SYMBLERR11_LO V_RS_FEC_SYMBLERR11_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR11_HI 0x3905c
+
+#define S_RS_FEC_SYMBLERR11_HI 0
+#define V_RS_FEC_SYMBLERR11_HI(x) ((x) << S_RS_FEC_SYMBLERR11_HI)
+#define F_RS_FEC_SYMBLERR11_HI V_RS_FEC_SYMBLERR11_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR12_LO 0x39060
+
+#define S_RS_FEC_SYMBLERR12_LO 0
+#define V_RS_FEC_SYMBLERR12_LO(x) ((x) << S_RS_FEC_SYMBLERR12_LO)
+#define F_RS_FEC_SYMBLERR12_LO V_RS_FEC_SYMBLERR12_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR12_HI 0x39064
+
+#define S_RS_FEC_SYMBLERR12_HI 0
+#define V_RS_FEC_SYMBLERR12_HI(x) ((x) << S_RS_FEC_SYMBLERR12_HI)
+#define F_RS_FEC_SYMBLERR12_HI V_RS_FEC_SYMBLERR12_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR13_LO 0x39068
+
+#define S_RS_FEC_SYMBLERR13_LO 0
+#define V_RS_FEC_SYMBLERR13_LO(x) ((x) << S_RS_FEC_SYMBLERR13_LO)
+#define F_RS_FEC_SYMBLERR13_LO V_RS_FEC_SYMBLERR13_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR13_HI 0x3906c
+
+#define S_RS_FEC_SYMBLERR13_HI 0
+#define V_RS_FEC_SYMBLERR13_HI(x) ((x) << S_RS_FEC_SYMBLERR13_HI)
+#define F_RS_FEC_SYMBLERR13_HI V_RS_FEC_SYMBLERR13_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR14_LO 0x39070
+
+#define S_RS_FEC_SYMBLERR14_LO 0
+#define V_RS_FEC_SYMBLERR14_LO(x) ((x) << S_RS_FEC_SYMBLERR14_LO)
+#define F_RS_FEC_SYMBLERR14_LO V_RS_FEC_SYMBLERR14_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR14_HI 0x39074
+
+#define S_RS_FEC_SYMBLERR14_HI 0
+#define V_RS_FEC_SYMBLERR14_HI(x) ((x) << S_RS_FEC_SYMBLERR14_HI)
+#define F_RS_FEC_SYMBLERR14_HI V_RS_FEC_SYMBLERR14_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR15_LO 0x39078
+
+#define S_RS_FEC_SYMBLERR15_LO 0
+#define V_RS_FEC_SYMBLERR15_LO(x) ((x) << S_RS_FEC_SYMBLERR15_LO)
+#define F_RS_FEC_SYMBLERR15_LO V_RS_FEC_SYMBLERR15_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR15_HI 0x3907c
+
+#define S_RS_FEC_SYMBLERR15_HI 0
+#define V_RS_FEC_SYMBLERR15_HI(x) ((x) << S_RS_FEC_SYMBLERR15_HI)
+#define F_RS_FEC_SYMBLERR15_HI V_RS_FEC_SYMBLERR15_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_CONTROL 0x39080
+#define A_MAC_MTIP_RS_FEC_VENDOR_INFO_1 0x39084
+
+#define S_VENDOR_INFO_1_AMPS_LOCK 0
+#define V_VENDOR_INFO_1_AMPS_LOCK(x) ((x) << S_VENDOR_INFO_1_AMPS_LOCK)
+#define F_VENDOR_INFO_1_AMPS_LOCK V_VENDOR_INFO_1_AMPS_LOCK(1U)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_INFO_2 0x39088
+
+#define S_VENDOR_INFO_2_AMPS_LOCK 0
+#define M_VENDOR_INFO_2_AMPS_LOCK 0xffffU
+#define V_VENDOR_INFO_2_AMPS_LOCK(x) ((x) << S_VENDOR_INFO_2_AMPS_LOCK)
+#define G_VENDOR_INFO_2_AMPS_LOCK(x) (((x) >> S_VENDOR_INFO_2_AMPS_LOCK) & M_VENDOR_INFO_2_AMPS_LOCK)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_REVISION 0x3908c
+#define A_MAC_MTIP_RS_FEC_VENDOR_ALIGN_STATUS 0x39090
+
+#define S_RS_FEC_VENDOR_ALIGN_STATUS 0
+#define M_RS_FEC_VENDOR_ALIGN_STATUS 0xffffU
+#define V_RS_FEC_VENDOR_ALIGN_STATUS(x) ((x) << S_RS_FEC_VENDOR_ALIGN_STATUS)
+#define G_RS_FEC_VENDOR_ALIGN_STATUS(x) (((x) >> S_RS_FEC_VENDOR_ALIGN_STATUS) & M_RS_FEC_VENDOR_ALIGN_STATUS)
+
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_0 0x39100
+
+#define S_FEC74_FEC_ABILITY_0_B1 1
+#define V_FEC74_FEC_ABILITY_0_B1(x) ((x) << S_FEC74_FEC_ABILITY_0_B1)
+#define F_FEC74_FEC_ABILITY_0_B1 V_FEC74_FEC_ABILITY_0_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_0_B0 0
+#define V_FEC74_FEC_ABILITY_0_B0(x) ((x) << S_FEC74_FEC_ABILITY_0_B0)
+#define F_FEC74_FEC_ABILITY_0_B0 V_FEC74_FEC_ABILITY_0_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_0 0x39104
+
+#define S_FEC_ENABLE_ERROR_INDICATION 1
+#define V_FEC_ENABLE_ERROR_INDICATION(x) ((x) << S_FEC_ENABLE_ERROR_INDICATION)
+#define F_FEC_ENABLE_ERROR_INDICATION V_FEC_ENABLE_ERROR_INDICATION(1U)
+
+#define S_T7_FEC_ENABLE 0
+#define V_T7_FEC_ENABLE(x) ((x) << S_T7_FEC_ENABLE)
+#define F_T7_FEC_ENABLE V_T7_FEC_ENABLE(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_STATUS_0 0x39108
+
+#define S_FEC_LOCKED_1 1
+#define V_FEC_LOCKED_1(x) ((x) << S_FEC_LOCKED_1)
+#define F_FEC_LOCKED_1 V_FEC_LOCKED_1(1U)
+
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_0 0x3910c
+
+#define S_VL0_CCW_LO 0
+#define M_VL0_CCW_LO 0xffffU
+#define V_VL0_CCW_LO(x) ((x) << S_VL0_CCW_LO)
+#define G_VL0_CCW_LO(x) (((x) >> S_VL0_CCW_LO) & M_VL0_CCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_0 0x39110
+
+#define S_VL0_NCCW_LO 0
+#define M_VL0_NCCW_LO 0xffffU
+#define V_VL0_NCCW_LO(x) ((x) << S_VL0_NCCW_LO)
+#define G_VL0_NCCW_LO(x) (((x) >> S_VL0_NCCW_LO) & M_VL0_NCCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_0 0x39114
+
+#define S_VL1_CCW_LO 0
+#define M_VL1_CCW_LO 0xffffU
+#define V_VL1_CCW_LO(x) ((x) << S_VL1_CCW_LO)
+#define G_VL1_CCW_LO(x) (((x) >> S_VL1_CCW_LO) & M_VL1_CCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_0 0x39118
+
+#define S_VL1_NCCW_LO 0
+#define M_VL1_NCCW_LO 0xffffU
+#define V_VL1_NCCW_LO(x) ((x) << S_VL1_NCCW_LO)
+#define G_VL1_NCCW_LO(x) (((x) >> S_VL1_NCCW_LO) & M_VL1_NCCW_LO)
+
+#define A_MAC_MTIP_FEC74_COUNTER_HI_0 0x3911c
+
+#define S_COUNTER_HI 0
+#define M_COUNTER_HI 0xffffU
+#define V_COUNTER_HI(x) ((x) << S_COUNTER_HI)
+#define G_COUNTER_HI(x) (((x) >> S_COUNTER_HI) & M_COUNTER_HI)
+
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_1 0x39120
+
+#define S_FEC74_FEC_ABILITY_1_B1 1
+#define V_FEC74_FEC_ABILITY_1_B1(x) ((x) << S_FEC74_FEC_ABILITY_1_B1)
+#define F_FEC74_FEC_ABILITY_1_B1 V_FEC74_FEC_ABILITY_1_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_1_B0 0
+#define V_FEC74_FEC_ABILITY_1_B0(x) ((x) << S_FEC74_FEC_ABILITY_1_B0)
+#define F_FEC74_FEC_ABILITY_1_B0 V_FEC74_FEC_ABILITY_1_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_1 0x39124
+#define A_MAC_MTIP_FEC74_FEC_STATUS_1 0x39128
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_1 0x3912c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_1 0x39130
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_1 0x39134
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_1 0x39138
+#define A_MAC_MTIP_FEC74_COUNTER_HI_1 0x3913c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_2 0x39140
+
+#define S_FEC74_FEC_ABILITY_2_B1 1
+#define V_FEC74_FEC_ABILITY_2_B1(x) ((x) << S_FEC74_FEC_ABILITY_2_B1)
+#define F_FEC74_FEC_ABILITY_2_B1 V_FEC74_FEC_ABILITY_2_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_2_B0 0
+#define V_FEC74_FEC_ABILITY_2_B0(x) ((x) << S_FEC74_FEC_ABILITY_2_B0)
+#define F_FEC74_FEC_ABILITY_2_B0 V_FEC74_FEC_ABILITY_2_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_2 0x39144
+#define A_MAC_MTIP_FEC74_FEC_STATUS_2 0x39148
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_2 0x3914c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_2 0x39150
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_2 0x39154
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_2 0x39158
+#define A_MAC_MTIP_FEC74_COUNTER_HI_2 0x3915c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_3 0x39160
+
+#define S_FEC74_FEC_ABILITY_3_B1 1
+#define V_FEC74_FEC_ABILITY_3_B1(x) ((x) << S_FEC74_FEC_ABILITY_3_B1)
+#define F_FEC74_FEC_ABILITY_3_B1 V_FEC74_FEC_ABILITY_3_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_3_B0 0
+#define V_FEC74_FEC_ABILITY_3_B0(x) ((x) << S_FEC74_FEC_ABILITY_3_B0)
+#define F_FEC74_FEC_ABILITY_3_B0 V_FEC74_FEC_ABILITY_3_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_3 0x39164
+#define A_MAC_MTIP_FEC74_FEC_STATUS_3 0x39168
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_3 0x3916c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_3 0x39170
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_3 0x39174
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_3 0x39178
+#define A_MAC_MTIP_FEC74_COUNTER_HI_3 0x3917c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_4 0x39180
+
+#define S_FEC74_FEC_ABILITY_4_B1 1
+#define V_FEC74_FEC_ABILITY_4_B1(x) ((x) << S_FEC74_FEC_ABILITY_4_B1)
+#define F_FEC74_FEC_ABILITY_4_B1 V_FEC74_FEC_ABILITY_4_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_4_B0 0
+#define V_FEC74_FEC_ABILITY_4_B0(x) ((x) << S_FEC74_FEC_ABILITY_4_B0)
+#define F_FEC74_FEC_ABILITY_4_B0 V_FEC74_FEC_ABILITY_4_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_4 0x39184
+#define A_MAC_MTIP_FEC74_FEC_STATUS_4 0x39188
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_4 0x3918c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_4 0x39190
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_4 0x39194
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_4 0x39198
+#define A_MAC_MTIP_FEC74_COUNTER_HI_4 0x3919c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_5 0x391a0
+
+#define S_FEC74_FEC_ABILITY_5_B1 1
+#define V_FEC74_FEC_ABILITY_5_B1(x) ((x) << S_FEC74_FEC_ABILITY_5_B1)
+#define F_FEC74_FEC_ABILITY_5_B1 V_FEC74_FEC_ABILITY_5_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_5_B0 0
+#define V_FEC74_FEC_ABILITY_5_B0(x) ((x) << S_FEC74_FEC_ABILITY_5_B0)
+#define F_FEC74_FEC_ABILITY_5_B0 V_FEC74_FEC_ABILITY_5_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_5 0x391a4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_5 0x391a8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_5 0x391ac
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_5 0x391b0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_5 0x391b4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_5 0x391b8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_5 0x391bc
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_6 0x391c0
+
+#define S_FEC74_FEC_ABILITY_6_B1 1
+#define V_FEC74_FEC_ABILITY_6_B1(x) ((x) << S_FEC74_FEC_ABILITY_6_B1)
+#define F_FEC74_FEC_ABILITY_6_B1 V_FEC74_FEC_ABILITY_6_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_6_B0 0
+#define V_FEC74_FEC_ABILITY_6_B0(x) ((x) << S_FEC74_FEC_ABILITY_6_B0)
+#define F_FEC74_FEC_ABILITY_6_B0 V_FEC74_FEC_ABILITY_6_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_6 0x391c4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_6 0x391c8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_6 0x391cc
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_6 0x391d0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_6 0x391d4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_6 0x391d8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_6 0x391dc
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_7 0x391e0
+
+#define S_FEC74_FEC_ABILITY_7_B1 1
+#define V_FEC74_FEC_ABILITY_7_B1(x) ((x) << S_FEC74_FEC_ABILITY_7_B1)
+#define F_FEC74_FEC_ABILITY_7_B1 V_FEC74_FEC_ABILITY_7_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_7_B0 0
+#define V_FEC74_FEC_ABILITY_7_B0(x) ((x) << S_FEC74_FEC_ABILITY_7_B0)
+#define F_FEC74_FEC_ABILITY_7_B0 V_FEC74_FEC_ABILITY_7_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_7 0x391e4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_7 0x391e8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_7 0x391ec
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_7 0x391f0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_7 0x391f4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_7 0x391f8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_7 0x391fc
+#define A_MAC_BEAN0_CTL 0x39200
+#define A_MAC_BEAN0_STATUS 0x39204
+#define A_MAC_BEAN0_ABILITY_0 0x39208
+
+#define S_BEAN0_REM_FAULT 13
+#define V_BEAN0_REM_FAULT(x) ((x) << S_BEAN0_REM_FAULT)
+#define F_BEAN0_REM_FAULT V_BEAN0_REM_FAULT(1U)
+
+#define A_MAC_BEAN0_ABILITY_1 0x3920c
+#define A_MAC_BEAN0_ABILITY_2 0x39210
+
+#define S_BEAN0_AB_2_15_12 12
+#define M_BEAN0_AB_2_15_12 0xfU
+#define V_BEAN0_AB_2_15_12(x) ((x) << S_BEAN0_AB_2_15_12)
+#define G_BEAN0_AB_2_15_12(x) (((x) >> S_BEAN0_AB_2_15_12) & M_BEAN0_AB_2_15_12)
+
+#define S_BEAN0_AB_2_11_0 0
+#define M_BEAN0_AB_2_11_0 0xfffU
+#define V_BEAN0_AB_2_11_0(x) ((x) << S_BEAN0_AB_2_11_0)
+#define G_BEAN0_AB_2_11_0(x) (((x) >> S_BEAN0_AB_2_11_0) & M_BEAN0_AB_2_11_0)
+
+#define A_MAC_BEAN0_REM_ABILITY_0 0x39214
+
+#define S_BEAN0_ABL_REM_FAULT 13
+#define V_BEAN0_ABL_REM_FAULT(x) ((x) << S_BEAN0_ABL_REM_FAULT)
+#define F_BEAN0_ABL_REM_FAULT V_BEAN0_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN0_REM_ABILITY_1 0x39218
+#define A_MAC_BEAN0_REM_ABILITY_2 0x3921c
+
+#define S_BEAN0_REM_AB_15_12 12
+#define M_BEAN0_REM_AB_15_12 0xfU
+#define V_BEAN0_REM_AB_15_12(x) ((x) << S_BEAN0_REM_AB_15_12)
+#define G_BEAN0_REM_AB_15_12(x) (((x) >> S_BEAN0_REM_AB_15_12) & M_BEAN0_REM_AB_15_12)
+
+#define S_BEAN0_REM_AB_11_0 0
+#define M_BEAN0_REM_AB_11_0 0xfffU
+#define V_BEAN0_REM_AB_11_0(x) ((x) << S_BEAN0_REM_AB_11_0)
+#define G_BEAN0_REM_AB_11_0(x) (((x) >> S_BEAN0_REM_AB_11_0) & M_BEAN0_REM_AB_11_0)
+
+#define A_MAC_BEAN0_MS_COUNT 0x39220
+#define A_MAC_BEAN0_XNP_0 0x39224
+#define A_MAC_BEAN0_XNP_1 0x39228
+#define A_MAC_BEAN0_XNP_2 0x3922c
+#define A_MAC_LP_BEAN0_XNP_0 0x39230
+#define A_MAC_LP_BEAN0_XNP_1 0x39234
+#define A_MAC_LP_BEAN0_XNP_2 0x39238
+#define A_MAC_BEAN0_ETH_STATUS 0x3923c
+
+#define S_5GKR 15
+#define V_5GKR(x) ((x) << S_5GKR)
+#define F_5GKR V_5GKR(1U)
+
+#define S_2P5GKX 14
+#define V_2P5GKX(x) ((x) << S_2P5GKX)
+#define F_2P5GKX V_2P5GKX(1U)
+
+#define S_25G_KR 13
+#define V_25G_KR(x) ((x) << S_25G_KR)
+#define F_25G_KR V_25G_KR(1U)
+
+#define S_25G_KR_S 12
+#define V_25G_KR_S(x) ((x) << S_25G_KR_S)
+#define F_25G_KR_S V_25G_KR_S(1U)
+
+#define S_RS_FEC 7
+#define V_RS_FEC(x) ((x) << S_RS_FEC)
+#define F_RS_FEC V_RS_FEC(1U)
+
+#define S_FC_FEC 4
+#define V_FC_FEC(x) ((x) << S_FC_FEC)
+#define F_FC_FEC V_FC_FEC(1U)
+
+#define A_MAC_BEAN0_ETH_STATUS_2 0x39240
+
+#define S_RS_FEC_NEGOTIATED 6
+#define V_RS_FEC_NEGOTIATED(x) ((x) << S_RS_FEC_NEGOTIATED)
+#define F_RS_FEC_NEGOTIATED V_RS_FEC_NEGOTIATED(1U)
+
+#define S_400GKR4CR4 5
+#define V_400GKR4CR4(x) ((x) << S_400GKR4CR4)
+#define F_400GKR4CR4 V_400GKR4CR4(1U)
+
+#define S_200GKR2CR2 4
+#define V_200GKR2CR2(x) ((x) << S_200GKR2CR2)
+#define F_200GKR2CR2 V_200GKR2CR2(1U)
+
+#define S_100GKR1CR1 3
+#define V_100GKR1CR1(x) ((x) << S_100GKR1CR1)
+#define F_100GKR1CR1 V_100GKR1CR1(1U)
+
+#define S_200GKR4CR4 2
+#define V_200GKR4CR4(x) ((x) << S_200GKR4CR4)
+#define F_200GKR4CR4 V_200GKR4CR4(1U)
+
+#define S_100GKR2CR2 1
+#define V_100GKR2CR2(x) ((x) << S_100GKR2CR2)
+#define F_100GKR2CR2 V_100GKR2CR2(1U)
+
+#define S_50GKRCR 0
+#define V_50GKRCR(x) ((x) << S_50GKRCR)
+#define F_50GKRCR V_50GKRCR(1U)
+
+#define A_MAC_BEAN1_CTL 0x39300
+#define A_MAC_BEAN1_STATUS 0x39304
+#define A_MAC_BEAN1_ABILITY_0 0x39308
+
+#define S_BEAN1_REM_FAULT 13
+#define V_BEAN1_REM_FAULT(x) ((x) << S_BEAN1_REM_FAULT)
+#define F_BEAN1_REM_FAULT V_BEAN1_REM_FAULT(1U)
+
+#define A_MAC_BEAN1_ABILITY_1 0x3930c
+#define A_MAC_BEAN1_ABILITY_2 0x39310
+
+#define S_BEAN1_AB_2_15_12 12
+#define M_BEAN1_AB_2_15_12 0xfU
+#define V_BEAN1_AB_2_15_12(x) ((x) << S_BEAN1_AB_2_15_12)
+#define G_BEAN1_AB_2_15_12(x) (((x) >> S_BEAN1_AB_2_15_12) & M_BEAN1_AB_2_15_12)
+
+#define S_BEAN1_AB_2_11_0 0
+#define M_BEAN1_AB_2_11_0 0xfffU
+#define V_BEAN1_AB_2_11_0(x) ((x) << S_BEAN1_AB_2_11_0)
+#define G_BEAN1_AB_2_11_0(x) (((x) >> S_BEAN1_AB_2_11_0) & M_BEAN1_AB_2_11_0)
+
+#define A_MAC_BEAN1_REM_ABILITY_0 0x39314
+
+#define S_BEAN1_ABL_REM_FAULT 13
+#define V_BEAN1_ABL_REM_FAULT(x) ((x) << S_BEAN1_ABL_REM_FAULT)
+#define F_BEAN1_ABL_REM_FAULT V_BEAN1_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN1_REM_ABILITY_1 0x39318
+#define A_MAC_BEAN1_REM_ABILITY_2 0x3931c
+
+#define S_BEAN1_REM_AB_15_12 12
+#define M_BEAN1_REM_AB_15_12 0xfU
+#define V_BEAN1_REM_AB_15_12(x) ((x) << S_BEAN1_REM_AB_15_12)
+#define G_BEAN1_REM_AB_15_12(x) (((x) >> S_BEAN1_REM_AB_15_12) & M_BEAN1_REM_AB_15_12)
+
+#define S_BEAN1_REM_AB_11_0 0
+#define M_BEAN1_REM_AB_11_0 0xfffU
+#define V_BEAN1_REM_AB_11_0(x) ((x) << S_BEAN1_REM_AB_11_0)
+#define G_BEAN1_REM_AB_11_0(x) (((x) >> S_BEAN1_REM_AB_11_0) & M_BEAN1_REM_AB_11_0)
+
+#define A_MAC_BEAN1_MS_COUNT 0x39320
+#define A_MAC_BEAN1_XNP_0 0x39324
+#define A_MAC_BEAN1_XNP_1 0x39328
+#define A_MAC_BEAN1_XNP_2 0x3932c
+#define A_MAC_LP_BEAN1_XNP_0 0x39330
+#define A_MAC_LP_BEAN1_XNP_1 0x39334
+#define A_MAC_LP_BEAN1_XNP_2 0x39338
+#define A_MAC_BEAN1_ETH_STATUS 0x3933c
+#define A_MAC_BEAN1_ETH_STATUS_2 0x39340
+#define A_MAC_BEAN2_CTL 0x39400
+#define A_MAC_BEAN2_STATUS 0x39404
+#define A_MAC_BEAN2_ABILITY_0 0x39408
+
+#define S_BEAN2_REM_FAULT 13
+#define V_BEAN2_REM_FAULT(x) ((x) << S_BEAN2_REM_FAULT)
+#define F_BEAN2_REM_FAULT V_BEAN2_REM_FAULT(1U)
+
+#define A_MAC_BEAN2_ABILITY_1 0x3940c
+#define A_MAC_BEAN2_ABILITY_2 0x39410
+
+#define S_BEAN2_AB_2_15_12 12
+#define M_BEAN2_AB_2_15_12 0xfU
+#define V_BEAN2_AB_2_15_12(x) ((x) << S_BEAN2_AB_2_15_12)
+#define G_BEAN2_AB_2_15_12(x) (((x) >> S_BEAN2_AB_2_15_12) & M_BEAN2_AB_2_15_12)
+
+#define S_BEAN2_AB_2_11_0 0
+#define M_BEAN2_AB_2_11_0 0xfffU
+#define V_BEAN2_AB_2_11_0(x) ((x) << S_BEAN2_AB_2_11_0)
+#define G_BEAN2_AB_2_11_0(x) (((x) >> S_BEAN2_AB_2_11_0) & M_BEAN2_AB_2_11_0)
+
+#define A_MAC_BEAN2_REM_ABILITY_0 0x39414
+
+#define S_BEAN2_ABL_REM_FAULT 13
+#define V_BEAN2_ABL_REM_FAULT(x) ((x) << S_BEAN2_ABL_REM_FAULT)
+#define F_BEAN2_ABL_REM_FAULT V_BEAN2_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN2_REM_ABILITY_1 0x39418
+#define A_MAC_BEAN2_REM_ABILITY_2 0x3941c
+
+#define S_BEAN2_REM_AB_15_12 12
+#define M_BEAN2_REM_AB_15_12 0xfU
+#define V_BEAN2_REM_AB_15_12(x) ((x) << S_BEAN2_REM_AB_15_12)
+#define G_BEAN2_REM_AB_15_12(x) (((x) >> S_BEAN2_REM_AB_15_12) & M_BEAN2_REM_AB_15_12)
+
+#define S_BEAN2_REM_AB_11_0 0
+#define M_BEAN2_REM_AB_11_0 0xfffU
+#define V_BEAN2_REM_AB_11_0(x) ((x) << S_BEAN2_REM_AB_11_0)
+#define G_BEAN2_REM_AB_11_0(x) (((x) >> S_BEAN2_REM_AB_11_0) & M_BEAN2_REM_AB_11_0)
+
+#define A_MAC_BEAN2_MS_COUNT 0x39420
+#define A_MAC_BEAN2_XNP_0 0x39424
+#define A_MAC_BEAN2_XNP_1 0x39428
+#define A_MAC_BEAN2_XNP_2 0x3942c
+#define A_MAC_LP_BEAN2_XNP_0 0x39430
+#define A_MAC_LP_BEAN2_XNP_1 0x39434
+#define A_MAC_LP_BEAN2_XNP_2 0x39438
+#define A_MAC_BEAN2_ETH_STATUS 0x3943c
+#define A_MAC_BEAN2_ETH_STATUS_2 0x39440
+#define A_MAC_BEAN3_CTL 0x39500
+#define A_MAC_BEAN3_STATUS 0x39504
+#define A_MAC_BEAN3_ABILITY_0 0x39508
+
+#define S_BEAN3_REM_FAULT 13
+#define V_BEAN3_REM_FAULT(x) ((x) << S_BEAN3_REM_FAULT)
+#define F_BEAN3_REM_FAULT V_BEAN3_REM_FAULT(1U)
+
+#define A_MAC_BEAN3_ABILITY_1 0x3950c
+#define A_MAC_BEAN3_ABILITY_2 0x39510
+
+#define S_BEAN3_AB_2_15_12 12
+#define M_BEAN3_AB_2_15_12 0xfU
+#define V_BEAN3_AB_2_15_12(x) ((x) << S_BEAN3_AB_2_15_12)
+#define G_BEAN3_AB_2_15_12(x) (((x) >> S_BEAN3_AB_2_15_12) & M_BEAN3_AB_2_15_12)
+
+#define S_BEAN3_AB_2_11_0 0
+#define M_BEAN3_AB_2_11_0 0xfffU
+#define V_BEAN3_AB_2_11_0(x) ((x) << S_BEAN3_AB_2_11_0)
+#define G_BEAN3_AB_2_11_0(x) (((x) >> S_BEAN3_AB_2_11_0) & M_BEAN3_AB_2_11_0)
+
+#define A_MAC_BEAN3_REM_ABILITY_0 0x39514
+
+#define S_BEAN3_ABL_REM_FAULT 13
+#define V_BEAN3_ABL_REM_FAULT(x) ((x) << S_BEAN3_ABL_REM_FAULT)
+#define F_BEAN3_ABL_REM_FAULT V_BEAN3_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN3_REM_ABILITY_1 0x39518
+#define A_MAC_BEAN3_REM_ABILITY_2 0x3951c
+
+#define S_BEAN3_REM_AB_15_12 12
+#define M_BEAN3_REM_AB_15_12 0xfU
+#define V_BEAN3_REM_AB_15_12(x) ((x) << S_BEAN3_REM_AB_15_12)
+#define G_BEAN3_REM_AB_15_12(x) (((x) >> S_BEAN3_REM_AB_15_12) & M_BEAN3_REM_AB_15_12)
+
+#define S_BEAN3_REM_AB_11_0 0
+#define M_BEAN3_REM_AB_11_0 0xfffU
+#define V_BEAN3_REM_AB_11_0(x) ((x) << S_BEAN3_REM_AB_11_0)
+#define G_BEAN3_REM_AB_11_0(x) (((x) >> S_BEAN3_REM_AB_11_0) & M_BEAN3_REM_AB_11_0)
+
+#define A_MAC_BEAN3_MS_COUNT 0x39520
+#define A_MAC_BEAN3_XNP_0 0x39524
+#define A_MAC_BEAN3_XNP_1 0x39528
+#define A_MAC_BEAN3_XNP_2 0x3952c
+#define A_MAC_LP_BEAN3_XNP_0 0x39530
+#define A_MAC_LP_BEAN3_XNP_1 0x39534
+#define A_MAC_LP_BEAN3_XNP_2 0x39538
+#define A_MAC_BEAN3_ETH_STATUS 0x3953c
+#define A_MAC_BEAN3_ETH_STATUS_2 0x39540
+#define A_MAC_BEAN4_CTL 0x39600
+#define A_MAC_BEAN4_STATUS 0x39604
+#define A_MAC_BEAN4_ABILITY_0 0x39608
+
+#define S_BEAN4_REM_FAULT 13
+#define V_BEAN4_REM_FAULT(x) ((x) << S_BEAN4_REM_FAULT)
+#define F_BEAN4_REM_FAULT V_BEAN4_REM_FAULT(1U)
+
+#define A_MAC_BEAN4_ABILITY_1 0x3960c
+#define A_MAC_BEAN4_ABILITY_2 0x39610
+
+#define S_BEAN4_AB_2_15_12 12
+#define M_BEAN4_AB_2_15_12 0xfU
+#define V_BEAN4_AB_2_15_12(x) ((x) << S_BEAN4_AB_2_15_12)
+#define G_BEAN4_AB_2_15_12(x) (((x) >> S_BEAN4_AB_2_15_12) & M_BEAN4_AB_2_15_12)
+
+#define S_BEAN4_AB_2_11_0 0
+#define M_BEAN4_AB_2_11_0 0xfffU
+#define V_BEAN4_AB_2_11_0(x) ((x) << S_BEAN4_AB_2_11_0)
+#define G_BEAN4_AB_2_11_0(x) (((x) >> S_BEAN4_AB_2_11_0) & M_BEAN4_AB_2_11_0)
+
+#define A_MAC_BEAN4_REM_ABILITY_0 0x39614
+
+#define S_BEAN4_ABL_REM_FAULT 13
+#define V_BEAN4_ABL_REM_FAULT(x) ((x) << S_BEAN4_ABL_REM_FAULT)
+#define F_BEAN4_ABL_REM_FAULT V_BEAN4_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN4_REM_ABILITY_1 0x39618
+#define A_MAC_BEAN4_REM_ABILITY_2 0x3961c
+
+#define S_BEAN4_REM_AB_15_12 12
+#define M_BEAN4_REM_AB_15_12 0xfU
+#define V_BEAN4_REM_AB_15_12(x) ((x) << S_BEAN4_REM_AB_15_12)
+#define G_BEAN4_REM_AB_15_12(x) (((x) >> S_BEAN4_REM_AB_15_12) & M_BEAN4_REM_AB_15_12)
+
+#define S_BEAN4_REM_AB_11_0 0
+#define M_BEAN4_REM_AB_11_0 0xfffU
+#define V_BEAN4_REM_AB_11_0(x) ((x) << S_BEAN4_REM_AB_11_0)
+#define G_BEAN4_REM_AB_11_0(x) (((x) >> S_BEAN4_REM_AB_11_0) & M_BEAN4_REM_AB_11_0)
+
+#define A_MAC_BEAN4_MS_COUNT 0x39620
+#define A_MAC_BEAN4_XNP_0 0x39624
+#define A_MAC_BEAN4_XNP_1 0x39628
+#define A_MAC_BEAN4_XNP_2 0x3962c
+#define A_MAC_LP_BEAN4_XNP_0 0x39630
+#define A_MAC_LP_BEAN4_XNP_1 0x39634
+#define A_MAC_LP_BEAN4_XNP_2 0x39638
+#define A_MAC_BEAN4_ETH_STATUS 0x3963c
+#define A_MAC_BEAN4_ETH_STATUS_2 0x39640
+#define A_MAC_BEAN5_CTL 0x39700
+#define A_MAC_BEAN5_STATUS 0x39704
+#define A_MAC_BEAN5_ABILITY_0 0x39708
+
+#define S_BEAN5_REM_FAULT 13
+#define V_BEAN5_REM_FAULT(x) ((x) << S_BEAN5_REM_FAULT)
+#define F_BEAN5_REM_FAULT V_BEAN5_REM_FAULT(1U)
+
+#define A_MAC_BEAN5_ABILITY_1 0x3970c
+#define A_MAC_BEAN5_ABILITY_2 0x39710
+
+#define S_BEAN5_AB_2_15_12 12
+#define M_BEAN5_AB_2_15_12 0xfU
+#define V_BEAN5_AB_2_15_12(x) ((x) << S_BEAN5_AB_2_15_12)
+#define G_BEAN5_AB_2_15_12(x) (((x) >> S_BEAN5_AB_2_15_12) & M_BEAN5_AB_2_15_12)
+
+#define S_BEAN5_AB_2_11_0 0
+#define M_BEAN5_AB_2_11_0 0xfffU
+#define V_BEAN5_AB_2_11_0(x) ((x) << S_BEAN5_AB_2_11_0)
+#define G_BEAN5_AB_2_11_0(x) (((x) >> S_BEAN5_AB_2_11_0) & M_BEAN5_AB_2_11_0)
+
+#define A_MAC_BEAN5_REM_ABILITY_0 0x39714
+
+#define S_BEAN5_ABL_REM_FAULT 13
+#define V_BEAN5_ABL_REM_FAULT(x) ((x) << S_BEAN5_ABL_REM_FAULT)
+#define F_BEAN5_ABL_REM_FAULT V_BEAN5_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN5_REM_ABILITY_1 0x39718
+#define A_MAC_BEAN5_REM_ABILITY_2 0x3971c
+
+#define S_BEAN5_REM_AB_15_12 12
+#define M_BEAN5_REM_AB_15_12 0xfU
+#define V_BEAN5_REM_AB_15_12(x) ((x) << S_BEAN5_REM_AB_15_12)
+#define G_BEAN5_REM_AB_15_12(x) (((x) >> S_BEAN5_REM_AB_15_12) & M_BEAN5_REM_AB_15_12)
+
+#define S_BEAN5_REM_AB_11_0 0
+#define M_BEAN5_REM_AB_11_0 0xfffU
+#define V_BEAN5_REM_AB_11_0(x) ((x) << S_BEAN5_REM_AB_11_0)
+#define G_BEAN5_REM_AB_11_0(x) (((x) >> S_BEAN5_REM_AB_11_0) & M_BEAN5_REM_AB_11_0)
+
+#define A_MAC_BEAN5_MS_COUNT 0x39720
+#define A_MAC_BEAN5_XNP_0 0x39724
+#define A_MAC_BEAN5_XNP_1 0x39728
+#define A_MAC_BEAN5_XNP_2 0x3972c
+#define A_MAC_LP_BEAN5_XNP_0 0x39730
+#define A_MAC_LP_BEAN5_XNP_1 0x39734
+#define A_MAC_LP_BEAN5_XNP_2 0x39738
+#define A_MAC_BEAN5_ETH_STATUS 0x3973c
+#define A_MAC_BEAN5_ETH_STATUS_2 0x39740
+#define A_MAC_BEAN6_CTL 0x39800
+#define A_MAC_BEAN6_STATUS 0x39804
+#define A_MAC_BEAN6_ABILITY_0 0x39808
+
+#define S_BEAN6_REM_FAULT 13
+#define V_BEAN6_REM_FAULT(x) ((x) << S_BEAN6_REM_FAULT)
+#define F_BEAN6_REM_FAULT V_BEAN6_REM_FAULT(1U)
+
+#define A_MAC_BEAN6_ABILITY_1 0x3980c
+#define A_MAC_BEAN6_ABILITY_2 0x39810
+
+#define S_BEAN6_AB_2_15_12 12
+#define M_BEAN6_AB_2_15_12 0xfU
+#define V_BEAN6_AB_2_15_12(x) ((x) << S_BEAN6_AB_2_15_12)
+#define G_BEAN6_AB_2_15_12(x) (((x) >> S_BEAN6_AB_2_15_12) & M_BEAN6_AB_2_15_12)
+
+#define S_BEAN6_AB_2_11_0 0
+#define M_BEAN6_AB_2_11_0 0xfffU
+#define V_BEAN6_AB_2_11_0(x) ((x) << S_BEAN6_AB_2_11_0)
+#define G_BEAN6_AB_2_11_0(x) (((x) >> S_BEAN6_AB_2_11_0) & M_BEAN6_AB_2_11_0)
+
+#define A_MAC_BEAN6_REM_ABILITY_0 0x39814
+
+#define S_BEAN6_ABL_REM_FAULT 13
+#define V_BEAN6_ABL_REM_FAULT(x) ((x) << S_BEAN6_ABL_REM_FAULT)
+#define F_BEAN6_ABL_REM_FAULT V_BEAN6_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN6_REM_ABILITY_1 0x39818
+#define A_MAC_BEAN6_REM_ABILITY_2 0x3981c
+
+#define S_BEAN6_REM_AB_15_12 12
+#define M_BEAN6_REM_AB_15_12 0xfU
+#define V_BEAN6_REM_AB_15_12(x) ((x) << S_BEAN6_REM_AB_15_12)
+#define G_BEAN6_REM_AB_15_12(x) (((x) >> S_BEAN6_REM_AB_15_12) & M_BEAN6_REM_AB_15_12)
+
+#define S_BEAN6_REM_AB_11_0 0
+#define M_BEAN6_REM_AB_11_0 0xfffU
+#define V_BEAN6_REM_AB_11_0(x) ((x) << S_BEAN6_REM_AB_11_0)
+#define G_BEAN6_REM_AB_11_0(x) (((x) >> S_BEAN6_REM_AB_11_0) & M_BEAN6_REM_AB_11_0)
+
+#define A_MAC_BEAN6_MS_COUNT 0x39820
+#define A_MAC_BEAN6_XNP_0 0x39824
+#define A_MAC_BEAN6_XNP_1 0x39828
+#define A_MAC_BEAN6_XNP_2 0x3982c
+#define A_MAC_LP_BEAN6_XNP_0 0x39830
+#define A_MAC_LP_BEAN6_XNP_1 0x39834
+#define A_MAC_LP_BEAN6_XNP_2 0x39838
+#define A_MAC_BEAN6_ETH_STATUS 0x3983c
+#define A_MAC_BEAN6_ETH_STATUS_2 0x39840
+#define A_MAC_BEAN7_CTL 0x39900
+#define A_MAC_BEAN7_STATUS 0x39904
+#define A_MAC_BEAN7_ABILITY_0 0x39908
+
+#define S_BEAN7_REM_FAULT 13
+#define V_BEAN7_REM_FAULT(x) ((x) << S_BEAN7_REM_FAULT)
+#define F_BEAN7_REM_FAULT V_BEAN7_REM_FAULT(1U)
+
+#define A_MAC_BEAN7_ABILITY_1 0x3990c
+#define A_MAC_BEAN7_ABILITY_2 0x39910
+
+#define S_BEAN7_AB_2_15_12 12
+#define M_BEAN7_AB_2_15_12 0xfU
+#define V_BEAN7_AB_2_15_12(x) ((x) << S_BEAN7_AB_2_15_12)
+#define G_BEAN7_AB_2_15_12(x) (((x) >> S_BEAN7_AB_2_15_12) & M_BEAN7_AB_2_15_12)
+
+#define S_BEAN7_AB_2_11_0 0
+#define M_BEAN7_AB_2_11_0 0xfffU
+#define V_BEAN7_AB_2_11_0(x) ((x) << S_BEAN7_AB_2_11_0)
+#define G_BEAN7_AB_2_11_0(x) (((x) >> S_BEAN7_AB_2_11_0) & M_BEAN7_AB_2_11_0)
+
+#define A_MAC_BEAN7_REM_ABILITY_0 0x39914
+
+#define S_BEAN7_ABL_REM_FAULT 13
+#define V_BEAN7_ABL_REM_FAULT(x) ((x) << S_BEAN7_ABL_REM_FAULT)
+#define F_BEAN7_ABL_REM_FAULT V_BEAN7_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN7_REM_ABILITY_1 0x39918
+#define A_MAC_BEAN7_REM_ABILITY_2 0x3991c
+
+#define S_BEAN7_REM_AB_15_12 12
+#define M_BEAN7_REM_AB_15_12 0xfU
+#define V_BEAN7_REM_AB_15_12(x) ((x) << S_BEAN7_REM_AB_15_12)
+#define G_BEAN7_REM_AB_15_12(x) (((x) >> S_BEAN7_REM_AB_15_12) & M_BEAN7_REM_AB_15_12)
+
+#define S_BEAN7_REM_AB_11_0 0
+#define M_BEAN7_REM_AB_11_0 0xfffU
+#define V_BEAN7_REM_AB_11_0(x) ((x) << S_BEAN7_REM_AB_11_0)
+#define G_BEAN7_REM_AB_11_0(x) (((x) >> S_BEAN7_REM_AB_11_0) & M_BEAN7_REM_AB_11_0)
+
+#define A_MAC_BEAN7_MS_COUNT 0x39920
+#define A_MAC_BEAN7_XNP_0 0x39924
+#define A_MAC_BEAN7_XNP_1 0x39928
+#define A_MAC_BEAN7_XNP_2 0x3992c
+#define A_MAC_LP_BEAN7_XNP_0 0x39930
+#define A_MAC_LP_BEAN7_XNP_1 0x39934
+#define A_MAC_LP_BEAN7_XNP_2 0x39938
+#define A_MAC_BEAN7_ETH_STATUS 0x3993c
+#define A_MAC_BEAN7_ETH_STATUS_2 0x39940
+#define A_MAC_MTIP_ETHERSTATS_DATA_HI 0x39a00
+#define A_MAC_MTIP_ETHERSTATS_STATN_STATUS 0x39a04
+#define A_MAC_MTIP_ETHERSTATS_STATN_CONFIG 0x39a08
+
+#define S_T7_RESET 31
+#define V_T7_RESET(x) ((x) << S_T7_RESET)
+#define F_T7_RESET V_T7_RESET(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CONTROL 0x39a0c
+
+#define S_CMD_CLEAR_TX 31
+#define V_CMD_CLEAR_TX(x) ((x) << S_CMD_CLEAR_TX)
+#define F_CMD_CLEAR_TX V_CMD_CLEAR_TX(1U)
+
+#define S_CMD_CLEAR_RX 30
+#define V_CMD_CLEAR_RX(x) ((x) << S_CMD_CLEAR_RX)
+#define F_CMD_CLEAR_RX V_CMD_CLEAR_RX(1U)
+
+#define S_CLEAR_PRE 29
+#define V_CLEAR_PRE(x) ((x) << S_CLEAR_PRE)
+#define F_CLEAR_PRE V_CLEAR_PRE(1U)
+
+#define S_CMD_CAPTURE_TX 28
+#define V_CMD_CAPTURE_TX(x) ((x) << S_CMD_CAPTURE_TX)
+#define F_CMD_CAPTURE_TX V_CMD_CAPTURE_TX(1U)
+
+#define S_CMD_CAPTURE_RX 27
+#define V_CMD_CAPTURE_RX(x) ((x) << S_CMD_CAPTURE_RX)
+#define F_CMD_CAPTURE_RX V_CMD_CAPTURE_RX(1U)
+
+#define S_PORTMASK 0
+#define M_PORTMASK 0xffU
+#define V_PORTMASK(x) ((x) << S_PORTMASK)
+#define G_PORTMASK(x) (((x) >> S_PORTMASK) & M_PORTMASK)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CLEARVALUE_LO 0x39a10
+
+#define S_STATN_CLEARVALUE_LO 0
+#define V_STATN_CLEARVALUE_LO(x) ((x) << S_STATN_CLEARVALUE_LO)
+#define F_STATN_CLEARVALUE_LO V_STATN_CLEARVALUE_LO(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CLEARVALUE_HI 0x39a14
+
+#define S_STATN_CLEARVALUE_HI 0
+#define V_STATN_CLEARVALUE_HI(x) ((x) << S_STATN_CLEARVALUE_HI)
+#define F_STATN_CLEARVALUE_HI V_STATN_CLEARVALUE_HI(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_DATA_HI_1 0x39a1c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_0 0x39a20
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_1 0x39a24
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_2 0x39a28
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_3 0x39a2c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_4 0x39a30
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_5 0x39a34
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_6 0x39a38
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_7 0x39a3c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_8 0x39a40
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_9 0x39a44
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_10 0x39a48
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_11 0x39a4c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_12 0x39a50
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_13 0x39a54
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_14 0x39a58
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_15 0x39a5c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_16 0x39a60
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_17 0x39a64
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_18 0x39a68
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_19 0x39a6c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_20 0x39a70
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_21 0x39a74
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_22 0x39a78
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_23 0x39a7c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_24 0x39a80
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_25 0x39a84
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_26 0x39a88
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_27 0x39a8c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_28 0x39a90
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_29 0x39a94
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_30 0x39a98
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_31 0x39a9c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_32 0x39aa0
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_33 0x39aa4
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_34 0x39aa8
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOCTETS 0x39b00
+#define A_MAC_MTIP_ETHERSTATS0_OCTETSRECEIVEDOK 0x39b04
+#define A_MAC_MTIP_ETHERSTATS0_AALIGNMENTERRORS 0x39b08
+#define A_MAC_MTIP_ETHERSTATS0_APAUSEMACCTRLFRAMESRECEIVED 0x39b0c
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMETOOLONGERRORS 0x39b10
+#define A_MAC_MTIP_ETHERSTATS0_AINRANGELENGTHERRORS 0x39b14
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMESRECEIVEDOK 0x39b18
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMECHECKSEQUENCEERRORS 0x39b1c
+#define A_MAC_MTIP_ETHERSTATS0_VLANRECEIVEDOK 0x39b20
+#define A_MAC_MTIP_ETHERSTATS0_IFINERRORS_RX 0x39b24
+#define A_MAC_MTIP_ETHERSTATS0_IFINUCASTPKTS_RX 0x39b28
+#define A_MAC_MTIP_ETHERSTATS0_IFINMULTICASTPKTS_RX 0x39b2c
+#define A_MAC_MTIP_ETHERSTATS0_IFINBROADCASTPKTS_RX 0x39b30
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSDROPEVENTS_RX 0x39b34
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS_RX 0x39b38
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSUNDERSIZEPKTS_RX 0x39b3c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS64OCTETS_RX 0x39b40
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS65TO127OCTETS_RX 0x39b44
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS128TO255OCTETS_RX 0x39b48
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS256TO511OCTETS_RX 0x39b4c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39b50
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39b54
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39b58
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOVERSIZEPKTS_RX 0x39b5c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSJABBERS_RX 0x39b60
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSFRAGMENTS_RX 0x39b64
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39b68
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39b6c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39b70
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39b74
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39b78
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39b7c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39b80
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39b84
+#define A_MAC_MTIP_ETHERSTATS0_AMACCONTROLFRAMESRECEIVED_RX 0x39b88
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOCTETS 0x39b8c
+#define A_MAC_MTIP_ETHERSTATS1_OCTETSRECEIVEDOK 0x39b90
+#define A_MAC_MTIP_ETHERSTATS1_AALIGNMENTERRORS 0x39b94
+#define A_MAC_MTIP_ETHERSTATS1_APAUSEMACCTRLFRAMESRECEIVED 0x39b98
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMETOOLONGERRORS 0x39b9c
+#define A_MAC_MTIP_ETHERSTATS1_AINRANGELENGTHERRORS 0x39ba0
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMESRECEIVEDOK 0x39ba4
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMECHECKSEQUENCEERRORS 0x39ba8
+#define A_MAC_MTIP_ETHERSTATS1_VLANRECEIVEDOK 0x39bac
+#define A_MAC_MTIP_ETHERSTATS1_IFINERRORS_RX 0x39bb0
+#define A_MAC_MTIP_ETHERSTATS1_IFINUCASTPKTS_RX 0x39bb4
+#define A_MAC_MTIP_ETHERSTATS1_IFINMULTICASTPKTS_RX 0x39bb8
+#define A_MAC_MTIP_ETHERSTATS1_IFINBROADCASTPKTS_RX 0x39bbc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSDROPEVENTS_RX 0x39bc0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS_RX 0x39bc4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSUNDERSIZEPKTS_RX 0x39bc8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS64OCTETS_RX 0x39bcc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS65TO127OCTETS_RX 0x39bd0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS128TO255OCTETS_RX 0x39bd4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS256TO511OCTETS_RX 0x39bd8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39bdc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39be0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39be4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOVERSIZEPKTS_RX 0x39be8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSJABBERS_RX 0x39bec
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSFRAGMENTS_RX 0x39bf0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39bf4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39bf8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39bfc
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39c00
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39c04
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39c08
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39c0c
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39c10
+#define A_MAC_MTIP_ETHERSTATS1_AMACCONTROLFRAMESRECEIVED_RX 0x39c14
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOCTETS 0x39c18
+#define A_MAC_MTIP_ETHERSTATS2_OCTETSRECEIVEDOK 0x39c1c
+#define A_MAC_MTIP_ETHERSTATS2_AALIGNMENTERRORS 0x39c20
+#define A_MAC_MTIP_ETHERSTATS2_APAUSEMACCTRLFRAMESRECEIVED 0x39c24
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMETOOLONGERRORS 0x39c28
+#define A_MAC_MTIP_ETHERSTATS2_AINRANGELENGTHERRORS 0x39c2c
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMESRECEIVEDOK 0x39c30
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMECHECKSEQUENCEERRORS 0x39c34
+#define A_MAC_MTIP_ETHERSTATS2_VLANRECEIVEDOK 0x39c38
+#define A_MAC_MTIP_ETHERSTATS2_IFINERRORS_RX 0x39c3c
+#define A_MAC_MTIP_ETHERSTATS2_IFINUCASTPKTS_RX 0x39c40
+#define A_MAC_MTIP_ETHERSTATS2_IFINMULTICASTPKTS_RX 0x39c44
+#define A_MAC_MTIP_ETHERSTATS2_IFINBROADCASTPKTS_RX 0x39c48
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSDROPEVENTS_RX 0x39c4c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS_RX 0x39c50
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSUNDERSIZEPKTS_RX 0x39c54
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS64OCTETS_RX 0x39c58
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS65TO127OCTETS_RX 0x39c5c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS128TO255OCTETS_RX 0x39c60
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS256TO511OCTETS_RX 0x39c64
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39c68
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39c6c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39c70
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOVERSIZEPKTS_RX 0x39c74
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSJABBERS_RX 0x39c78
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSFRAGMENTS_RX 0x39c7c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39c80
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39c84
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39c88
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39c8c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39c90
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39c94
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39c98
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39c9c
+#define A_MAC_MTIP_ETHERSTATS2_AMACCONTROLFRAMESRECEIVED_RX 0x39ca0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOCTETS 0x39ca4
+#define A_MAC_MTIP_ETHERSTATS3_OCTETSRECEIVEDOK 0x39ca8
+#define A_MAC_MTIP_ETHERSTATS3_AALIGNMENTERRORS 0x39cac
+#define A_MAC_MTIP_ETHERSTATS3_APAUSEMACCTRLFRAMESRECEIVED 0x39cb0
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMETOOLONGERRORS 0x39cb4
+#define A_MAC_MTIP_ETHERSTATS3_AINRANGELENGTHERRORS 0x39cb8
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMESRECEIVEDOK 0x39cbc
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMECHECKSEQUENCEERRORS 0x39cc0
+#define A_MAC_MTIP_ETHERSTATS3_VLANRECEIVEDOK 0x39cc4
+#define A_MAC_MTIP_ETHERSTATS3_IFINERRORS_RX 0x39cc8
+#define A_MAC_MTIP_ETHERSTATS3_IFINUCASTPKTS_RX 0x39ccc
+#define A_MAC_MTIP_ETHERSTATS3_IFINMULTICASTPKTS_RX 0x39cd0
+#define A_MAC_MTIP_ETHERSTATS3_IFINBROADCASTPKTS_RX 0x39cd4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSDROPEVENTS_RX 0x39cd8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS_RX 0x39cdc
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSUNDERSIZEPKTS_RX 0x39ce0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS64OCTETS_RX 0x39ce4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS65TO127OCTETS_RX 0x39ce8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS128TO255OCTETS_RX 0x39cec
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS256TO511OCTETS_RX 0x39cf0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39cf4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39cf8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39cfc
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOVERSIZEPKTS_RX 0x39d00
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSJABBERS_RX 0x39d04
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSFRAGMENTS_RX 0x39d08
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39d0c
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39d10
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39d14
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39d18
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39d1c
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39d20
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39d24
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39d28
+#define A_MAC_MTIP_ETHERSTATS3_AMACCONTROLFRAMESRECEIVED_RX 0x39d2c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOCTETS_TX 0x39d30
+#define A_MAC_MTIP_ETHERSTATS0_OCTETSTRANSMITTEDOK_TX 0x39d34
+#define A_MAC_MTIP_ETHERSTATS0_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39d38
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMESTRANSMITTEDOK_TX 0x39d3c
+#define A_MAC_MTIP_ETHERSTATS0_VLANTRANSMITTEDOK_TX 0x39d40
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTERRORS_TX 0x39d44
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTUCASTPKTS_TX 0x39d48
+#define A_MAC_MTIP_ETHERSTATS0IFOUTMULTICASTPKTS_TX 0x39d4c
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTBROADCASTPKTS_TX 0x39d50
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS64OCTETS_TX 0x39d54
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS65TO127OCTETS_TX 0x39d58
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS128TO255OCTETS_TX 0x39d5c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS256TO511OCTETS_TX 0x39d60
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39d64
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39d68
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39d6c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39d70
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39d74
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39d78
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39d7c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39d80
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39d84
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39d88
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39d8c
+#define A_MAC_MTIP_ETHERSTATS0_AMACCONTROLFRAMESTRANSMITTED_TX 0x39d90
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS_TX 0x39d94
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOCTETS_TX 0x39d98
+#define A_MAC_MTIP_ETHERSTATS1_OCTETSTRANSMITTEDOK_TX 0x39d9c
+#define A_MAC_MTIP_ETHERSTATS1_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39da0
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMESTRANSMITTEDOK_TX 0x39da4
+#define A_MAC_MTIP_ETHERSTATS1_VLANTRANSMITTEDOK_TX 0x39da8
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTERRORS_TX 0x39dac
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTUCASTPKTS_TX 0x39db0
+#define A_MAC_MTIP_ETHERSTATS1IFOUTMULTICASTPKTS_TX 0x39db4
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTBROADCASTPKTS_TX 0x39db8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS64OCTETS_TX 0x39dbc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS65TO127OCTETS_TX 0x39dc0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS128TO255OCTETS_TX 0x39dc4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS256TO511OCTETS_TX 0x39dc8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39dcc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39dd0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39dd4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39dd8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39ddc
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39de0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39de4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39de8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39dec
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39df0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39df4
+#define A_MAC_MTIP_ETHERSTATS1_AMACCONTROLFRAMESTRANSMITTED_TX 0x39df8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS_TX 0x39dfc
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOCTETS_TX 0x39e00
+#define A_MAC_MTIP_ETHERSTATS2_OCTETSTRANSMITTEDOK_TX 0x39e04
+#define A_MAC_MTIP_ETHERSTATS2_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39e08
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMESTRANSMITTEDOK_TX 0x39e0c
+#define A_MAC_MTIP_ETHERSTATS2_VLANTRANSMITTEDOK_TX 0x39e10
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTERRORS_TX 0x39e14
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTUCASTPKTS_TX 0x39e18
+#define A_MAC_MTIP_ETHERSTATS2IFOUTMULTICASTPKTS_TX 0x39e1c
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTBROADCASTPKTS_TX 0x39e20
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS64OCTETS_TX 0x39e24
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS65TO127OCTETS_TX 0x39e28
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS128TO255OCTETS_TX 0x39e2c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS256TO511OCTETS_TX 0x39e30
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39e34
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39e38
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39e3c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39e40
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39e44
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39e48
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39e4c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39e50
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39e54
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39e58
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39e5c
+#define A_MAC_MTIP_ETHERSTATS2_AMACCONTROLFRAMESTRANSMITTED_TX 0x39e60
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS_TX 0x39e64
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOCTETS_TX 0x39e68
+#define A_MAC_MTIP_ETHERSTATS3_OCTETSTRANSMITTEDOK_TX 0x39e6c
+#define A_MAC_MTIP_ETHERSTATS3_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39e70
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMESTRANSMITTEDOK_TX 0x39e74
+#define A_MAC_MTIP_ETHERSTATS3_VLANTRANSMITTEDOK_TX 0x39e78
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTERRORS_TX 0x39e7c
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTUCASTPKTS_TX 0x39e80
+#define A_MAC_MTIP_ETHERSTATS3IFOUTMULTICASTPKTS_TX 0x39e84
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTBROADCASTPKTS_TX 0x39e88
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS64OCTETS_TX 0x39e8c
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS65TO127OCTETS_TX 0x39e90
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS128TO255OCTETS_TX 0x39e94
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS256TO511OCTETS_TX 0x39e98
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39e9c
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39ea0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39ea4
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39ea8
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39eac
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39eb0
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39eb4
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39eb8
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39ebc
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39ec0
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39ec4
+#define A_MAC_MTIP_ETHERSTATS3_AMACCONTROLFRAMESTRANSMITTED_TX 0x39ec8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS_TX 0x39ecc
+#define A_MAC_IOS_CTRL 0x3a000
+
+#define S_SUB_BLOCK_SEL 28
+#define M_SUB_BLOCK_SEL 0x7U
+#define V_SUB_BLOCK_SEL(x) ((x) << S_SUB_BLOCK_SEL)
+#define G_SUB_BLOCK_SEL(x) (((x) >> S_SUB_BLOCK_SEL) & M_SUB_BLOCK_SEL)
+
+#define S_QUAD_BROADCAST_EN 24
+#define V_QUAD_BROADCAST_EN(x) ((x) << S_QUAD_BROADCAST_EN)
+#define F_QUAD_BROADCAST_EN V_QUAD_BROADCAST_EN(1U)
+
+#define S_AUTO_INCR 20
+#define V_AUTO_INCR(x) ((x) << S_AUTO_INCR)
+#define F_AUTO_INCR V_AUTO_INCR(1U)
+
+#define S_T7_2_ADDR 0
+#define M_T7_2_ADDR 0x7ffffU
+#define V_T7_2_ADDR(x) ((x) << S_T7_2_ADDR)
+#define G_T7_2_ADDR(x) (((x) >> S_T7_2_ADDR) & M_T7_2_ADDR)
+
+#define A_MAC_IOS_DATA 0x3a004
+#define A_MAC_IOS_BGR_RST 0x3a050
+
+#define S_BGR_RSTN 0
+#define V_BGR_RSTN(x) ((x) << S_BGR_RSTN)
+#define F_BGR_RSTN V_BGR_RSTN(1U)
+
+#define A_MAC_IOS_BGR_CFG 0x3a054
+
+#define S_SOC_REFCLK_EN 0
+#define V_SOC_REFCLK_EN(x) ((x) << S_SOC_REFCLK_EN)
+#define F_SOC_REFCLK_EN V_SOC_REFCLK_EN(1U)
+
+#define A_MAC_IOS_QUAD0_CFG 0x3a058
+
+#define S_QUAD0_CH3_RSTN 5
+#define V_QUAD0_CH3_RSTN(x) ((x) << S_QUAD0_CH3_RSTN)
+#define F_QUAD0_CH3_RSTN V_QUAD0_CH3_RSTN(1U)
+
+#define S_QUAD0_CH2_RSTN 4
+#define V_QUAD0_CH2_RSTN(x) ((x) << S_QUAD0_CH2_RSTN)
+#define F_QUAD0_CH2_RSTN V_QUAD0_CH2_RSTN(1U)
+
+#define S_QUAD0_CH1_RSTN 3
+#define V_QUAD0_CH1_RSTN(x) ((x) << S_QUAD0_CH1_RSTN)
+#define F_QUAD0_CH1_RSTN V_QUAD0_CH1_RSTN(1U)
+
+#define S_QUAD0_CH0_RSTN 2
+#define V_QUAD0_CH0_RSTN(x) ((x) << S_QUAD0_CH0_RSTN)
+#define F_QUAD0_CH0_RSTN V_QUAD0_CH0_RSTN(1U)
+
+#define S_QUAD0_RSTN 1
+#define V_QUAD0_RSTN(x) ((x) << S_QUAD0_RSTN)
+#define F_QUAD0_RSTN V_QUAD0_RSTN(1U)
+
+#define S_PLL0_RSTN 0
+#define V_PLL0_RSTN(x) ((x) << S_PLL0_RSTN)
+#define F_PLL0_RSTN V_PLL0_RSTN(1U)
+
+#define A_MAC_IOS_QUAD1_CFG 0x3a05c
+
+#define S_QUAD1_CH3_RSTN 5
+#define V_QUAD1_CH3_RSTN(x) ((x) << S_QUAD1_CH3_RSTN)
+#define F_QUAD1_CH3_RSTN V_QUAD1_CH3_RSTN(1U)
+
+#define S_QUAD1_CH2_RSTN 4
+#define V_QUAD1_CH2_RSTN(x) ((x) << S_QUAD1_CH2_RSTN)
+#define F_QUAD1_CH2_RSTN V_QUAD1_CH2_RSTN(1U)
+
+#define S_QUAD1_CH1_RSTN 3
+#define V_QUAD1_CH1_RSTN(x) ((x) << S_QUAD1_CH1_RSTN)
+#define F_QUAD1_CH1_RSTN V_QUAD1_CH1_RSTN(1U)
+
+#define S_QUAD1_CH0_RSTN 2
+#define V_QUAD1_CH0_RSTN(x) ((x) << S_QUAD1_CH0_RSTN)
+#define F_QUAD1_CH0_RSTN V_QUAD1_CH0_RSTN(1U)
+
+#define S_QUAD1_RSTN 1
+#define V_QUAD1_RSTN(x) ((x) << S_QUAD1_RSTN)
+#define F_QUAD1_RSTN V_QUAD1_RSTN(1U)
+
+#define S_PLL1_RSTN 0
+#define V_PLL1_RSTN(x) ((x) << S_PLL1_RSTN)
+#define F_PLL1_RSTN V_PLL1_RSTN(1U)
+
+#define A_MAC_IOS_SCRATCHPAD0 0x3a060
+#define A_MAC_IOS_SCRATCHPAD1 0x3a064
+#define A_MAC_IOS_SCRATCHPAD2 0x3a068
+#define A_MAC_IOS_SCRATCHPAD3 0x3a06c
+
+#define S_DATA0 1
+#define M_DATA0 0x7fffffffU
+#define V_DATA0(x) ((x) << S_DATA0)
+#define G_DATA0(x) (((x) >> S_DATA0) & M_DATA0)
+
+#define S_I2C_MODE 0
+#define V_I2C_MODE(x) ((x) << S_I2C_MODE)
+#define F_I2C_MODE V_I2C_MODE(1U)
+
+#define A_MAC_IOS_BGR_DBG_COUNTER 0x3a070
+#define A_MAC_IOS_QUAD0_DBG_COUNTER 0x3a074
+#define A_MAC_IOS_PLL0_DBG_COUNTER 0x3a078
+#define A_MAC_IOS_QUAD1_DBG_COUNTER 0x3a07c
+#define A_MAC_IOS_PLL1_DBG_COUNTER 0x3a080
+#define A_MAC_IOS_DBG_CLK_CFG 0x3a084
+
+#define S_DBG_CLK_MUX_GPIO 3
+#define V_DBG_CLK_MUX_GPIO(x) ((x) << S_DBG_CLK_MUX_GPIO)
+#define F_DBG_CLK_MUX_GPIO V_DBG_CLK_MUX_GPIO(1U)
+
+#define S_DBG_CLK_MUX_SEL 0
+#define M_DBG_CLK_MUX_SEL 0x7U
+#define V_DBG_CLK_MUX_SEL(x) ((x) << S_DBG_CLK_MUX_SEL)
+#define G_DBG_CLK_MUX_SEL(x) (((x) >> S_DBG_CLK_MUX_SEL) & M_DBG_CLK_MUX_SEL)
+
+#define A_MAC_IOS_INTR_EN_QUAD0 0x3a090
+
+#define S_Q0_MAILBOX_INT_ASSERT 24
+#define V_Q0_MAILBOX_INT_ASSERT(x) ((x) << S_Q0_MAILBOX_INT_ASSERT)
+#define F_Q0_MAILBOX_INT_ASSERT V_Q0_MAILBOX_INT_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_3_ASSERT 23
+#define V_Q0_TRAINING_FAILURE_3_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_3_ASSERT)
+#define F_Q0_TRAINING_FAILURE_3_ASSERT V_Q0_TRAINING_FAILURE_3_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_2_ASSERT 22
+#define V_Q0_TRAINING_FAILURE_2_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_2_ASSERT)
+#define F_Q0_TRAINING_FAILURE_2_ASSERT V_Q0_TRAINING_FAILURE_2_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_1_ASSERT 21
+#define V_Q0_TRAINING_FAILURE_1_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_1_ASSERT)
+#define F_Q0_TRAINING_FAILURE_1_ASSERT V_Q0_TRAINING_FAILURE_1_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_0_ASSERT 20
+#define V_Q0_TRAINING_FAILURE_0_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_0_ASSERT)
+#define F_Q0_TRAINING_FAILURE_0_ASSERT V_Q0_TRAINING_FAILURE_0_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_3_ASSERT 19
+#define V_Q0_TRAINING_COMPLETE_3_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_3_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_3_ASSERT V_Q0_TRAINING_COMPLETE_3_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_2_ASSERT 18
+#define V_Q0_TRAINING_COMPLETE_2_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_2_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_2_ASSERT V_Q0_TRAINING_COMPLETE_2_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_1_ASSERT 17
+#define V_Q0_TRAINING_COMPLETE_1_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_1_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_1_ASSERT V_Q0_TRAINING_COMPLETE_1_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_0_ASSERT 16
+#define V_Q0_TRAINING_COMPLETE_0_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_0_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_0_ASSERT V_Q0_TRAINING_COMPLETE_0_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_3_ASSERT 15
+#define V_Q0_AN_TX_INT_3_ASSERT(x) ((x) << S_Q0_AN_TX_INT_3_ASSERT)
+#define F_Q0_AN_TX_INT_3_ASSERT V_Q0_AN_TX_INT_3_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_2_ASSERT 14
+#define V_Q0_AN_TX_INT_2_ASSERT(x) ((x) << S_Q0_AN_TX_INT_2_ASSERT)
+#define F_Q0_AN_TX_INT_2_ASSERT V_Q0_AN_TX_INT_2_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_1_ASSERT 13
+#define V_Q0_AN_TX_INT_1_ASSERT(x) ((x) << S_Q0_AN_TX_INT_1_ASSERT)
+#define F_Q0_AN_TX_INT_1_ASSERT V_Q0_AN_TX_INT_1_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_0_ASSERT 12
+#define V_Q0_AN_TX_INT_0_ASSERT(x) ((x) << S_Q0_AN_TX_INT_0_ASSERT)
+#define F_Q0_AN_TX_INT_0_ASSERT V_Q0_AN_TX_INT_0_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_3_ASSERT 11
+#define V_Q0_SIGNAL_DETECT_3_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_3_ASSERT)
+#define F_Q0_SIGNAL_DETECT_3_ASSERT V_Q0_SIGNAL_DETECT_3_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_2_ASSERT 10
+#define V_Q0_SIGNAL_DETECT_2_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_2_ASSERT)
+#define F_Q0_SIGNAL_DETECT_2_ASSERT V_Q0_SIGNAL_DETECT_2_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_1_ASSERT 9
+#define V_Q0_SIGNAL_DETECT_1_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_1_ASSERT)
+#define F_Q0_SIGNAL_DETECT_1_ASSERT V_Q0_SIGNAL_DETECT_1_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_0_ASSERT 8
+#define V_Q0_SIGNAL_DETECT_0_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_0_ASSERT)
+#define F_Q0_SIGNAL_DETECT_0_ASSERT V_Q0_SIGNAL_DETECT_0_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_3_ASSERT 7
+#define V_Q0_CDR_LOL_3_ASSERT(x) ((x) << S_Q0_CDR_LOL_3_ASSERT)
+#define F_Q0_CDR_LOL_3_ASSERT V_Q0_CDR_LOL_3_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_2_ASSERT 6
+#define V_Q0_CDR_LOL_2_ASSERT(x) ((x) << S_Q0_CDR_LOL_2_ASSERT)
+#define F_Q0_CDR_LOL_2_ASSERT V_Q0_CDR_LOL_2_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_1_ASSERT 5
+#define V_Q0_CDR_LOL_1_ASSERT(x) ((x) << S_Q0_CDR_LOL_1_ASSERT)
+#define F_Q0_CDR_LOL_1_ASSERT V_Q0_CDR_LOL_1_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_0_ASSERT 4
+#define V_Q0_CDR_LOL_0_ASSERT(x) ((x) << S_Q0_CDR_LOL_0_ASSERT)
+#define F_Q0_CDR_LOL_0_ASSERT V_Q0_CDR_LOL_0_ASSERT(1U)
+
+#define S_Q0_LOS_3_ASSERT 3
+#define V_Q0_LOS_3_ASSERT(x) ((x) << S_Q0_LOS_3_ASSERT)
+#define F_Q0_LOS_3_ASSERT V_Q0_LOS_3_ASSERT(1U)
+
+#define S_Q0_LOS_2_ASSERT 2
+#define V_Q0_LOS_2_ASSERT(x) ((x) << S_Q0_LOS_2_ASSERT)
+#define F_Q0_LOS_2_ASSERT V_Q0_LOS_2_ASSERT(1U)
+
+#define S_Q0_LOS_1_ASSERT 1
+#define V_Q0_LOS_1_ASSERT(x) ((x) << S_Q0_LOS_1_ASSERT)
+#define F_Q0_LOS_1_ASSERT V_Q0_LOS_1_ASSERT(1U)
+
+#define S_Q0_LOS_0_ASSERT 0
+#define V_Q0_LOS_0_ASSERT(x) ((x) << S_Q0_LOS_0_ASSERT)
+#define F_Q0_LOS_0_ASSERT V_Q0_LOS_0_ASSERT(1U)
+
+#define A_MAC_IOS_INTR_CAUSE_QUAD0 0x3a094
+#define A_MAC_IOS_INTR_EN_QUAD1 0x3a098
+
+#define S_Q1_MAILBOX_INT_ASSERT 24
+#define V_Q1_MAILBOX_INT_ASSERT(x) ((x) << S_Q1_MAILBOX_INT_ASSERT)
+#define F_Q1_MAILBOX_INT_ASSERT V_Q1_MAILBOX_INT_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_3_ASSERT 23
+#define V_Q1_TRAINING_FAILURE_3_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_3_ASSERT)
+#define F_Q1_TRAINING_FAILURE_3_ASSERT V_Q1_TRAINING_FAILURE_3_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_2_ASSERT 22
+#define V_Q1_TRAINING_FAILURE_2_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_2_ASSERT)
+#define F_Q1_TRAINING_FAILURE_2_ASSERT V_Q1_TRAINING_FAILURE_2_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_1_ASSERT 21
+#define V_Q1_TRAINING_FAILURE_1_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_1_ASSERT)
+#define F_Q1_TRAINING_FAILURE_1_ASSERT V_Q1_TRAINING_FAILURE_1_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_0_ASSERT 20
+#define V_Q1_TRAINING_FAILURE_0_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_0_ASSERT)
+#define F_Q1_TRAINING_FAILURE_0_ASSERT V_Q1_TRAINING_FAILURE_0_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_3_ASSERT 19
+#define V_Q1_TRAINING_COMPLETE_3_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_3_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_3_ASSERT V_Q1_TRAINING_COMPLETE_3_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_2_ASSERT 18
+#define V_Q1_TRAINING_COMPLETE_2_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_2_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_2_ASSERT V_Q1_TRAINING_COMPLETE_2_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_1_ASSERT 17
+#define V_Q1_TRAINING_COMPLETE_1_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_1_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_1_ASSERT V_Q1_TRAINING_COMPLETE_1_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_0_ASSERT 16
+#define V_Q1_TRAINING_COMPLETE_0_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_0_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_0_ASSERT V_Q1_TRAINING_COMPLETE_0_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_3_ASSERT 15
+#define V_Q1_AN_TX_INT_3_ASSERT(x) ((x) << S_Q1_AN_TX_INT_3_ASSERT)
+#define F_Q1_AN_TX_INT_3_ASSERT V_Q1_AN_TX_INT_3_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_2_ASSERT 14
+#define V_Q1_AN_TX_INT_2_ASSERT(x) ((x) << S_Q1_AN_TX_INT_2_ASSERT)
+#define F_Q1_AN_TX_INT_2_ASSERT V_Q1_AN_TX_INT_2_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_1_ASSERT 13
+#define V_Q1_AN_TX_INT_1_ASSERT(x) ((x) << S_Q1_AN_TX_INT_1_ASSERT)
+#define F_Q1_AN_TX_INT_1_ASSERT V_Q1_AN_TX_INT_1_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_0_ASSERT 12
+#define V_Q1_AN_TX_INT_0_ASSERT(x) ((x) << S_Q1_AN_TX_INT_0_ASSERT)
+#define F_Q1_AN_TX_INT_0_ASSERT V_Q1_AN_TX_INT_0_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_3_ASSERT 11
+#define V_Q1_SIGNAL_DETECT_3_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_3_ASSERT)
+#define F_Q1_SIGNAL_DETECT_3_ASSERT V_Q1_SIGNAL_DETECT_3_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_2_ASSERT 10
+#define V_Q1_SIGNAL_DETECT_2_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_2_ASSERT)
+#define F_Q1_SIGNAL_DETECT_2_ASSERT V_Q1_SIGNAL_DETECT_2_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_1_ASSERT 9
+#define V_Q1_SIGNAL_DETECT_1_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_1_ASSERT)
+#define F_Q1_SIGNAL_DETECT_1_ASSERT V_Q1_SIGNAL_DETECT_1_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_0_ASSERT 8
+#define V_Q1_SIGNAL_DETECT_0_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_0_ASSERT)
+#define F_Q1_SIGNAL_DETECT_0_ASSERT V_Q1_SIGNAL_DETECT_0_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_3_ASSERT 7
+#define V_Q1_CDR_LOL_3_ASSERT(x) ((x) << S_Q1_CDR_LOL_3_ASSERT)
+#define F_Q1_CDR_LOL_3_ASSERT V_Q1_CDR_LOL_3_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_2_ASSERT 6
+#define V_Q1_CDR_LOL_2_ASSERT(x) ((x) << S_Q1_CDR_LOL_2_ASSERT)
+#define F_Q1_CDR_LOL_2_ASSERT V_Q1_CDR_LOL_2_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_1_ASSERT 5
+#define V_Q1_CDR_LOL_1_ASSERT(x) ((x) << S_Q1_CDR_LOL_1_ASSERT)
+#define F_Q1_CDR_LOL_1_ASSERT V_Q1_CDR_LOL_1_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_0_ASSERT 4
+#define V_Q1_CDR_LOL_0_ASSERT(x) ((x) << S_Q1_CDR_LOL_0_ASSERT)
+#define F_Q1_CDR_LOL_0_ASSERT V_Q1_CDR_LOL_0_ASSERT(1U)
+
+#define S_Q1_LOS_3_ASSERT 3
+#define V_Q1_LOS_3_ASSERT(x) ((x) << S_Q1_LOS_3_ASSERT)
+#define F_Q1_LOS_3_ASSERT V_Q1_LOS_3_ASSERT(1U)
+
+#define S_Q1_LOS_2_ASSERT 2
+#define V_Q1_LOS_2_ASSERT(x) ((x) << S_Q1_LOS_2_ASSERT)
+#define F_Q1_LOS_2_ASSERT V_Q1_LOS_2_ASSERT(1U)
+
+#define S_Q1_LOS_1_ASSERT 1
+#define V_Q1_LOS_1_ASSERT(x) ((x) << S_Q1_LOS_1_ASSERT)
+#define F_Q1_LOS_1_ASSERT V_Q1_LOS_1_ASSERT(1U)
+
+#define S_Q1_LOS_0_ASSERT 0
+#define V_Q1_LOS_0_ASSERT(x) ((x) << S_Q1_LOS_0_ASSERT)
+#define F_Q1_LOS_0_ASSERT V_Q1_LOS_0_ASSERT(1U)
+
+#define A_MAC_IOS_INTR_CAUSE_QUAD1 0x3a09c
+#define A_MAC_MTIP_PCS_1G_0_CONTROL 0x3e000
+
+#define S_SPEED_SEL_1 13
+#define V_SPEED_SEL_1(x) ((x) << S_SPEED_SEL_1)
+#define F_SPEED_SEL_1 V_SPEED_SEL_1(1U)
+
+#define S_AUTO_NEG_ENA 12
+#define V_AUTO_NEG_ENA(x) ((x) << S_AUTO_NEG_ENA)
+#define F_AUTO_NEG_ENA V_AUTO_NEG_ENA(1U)
+
+#define S_T7_POWER_DOWN 11
+#define V_T7_POWER_DOWN(x) ((x) << S_T7_POWER_DOWN)
+#define F_T7_POWER_DOWN V_T7_POWER_DOWN(1U)
+
+#define S_RESTART_AUTO_NEG 9
+#define V_RESTART_AUTO_NEG(x) ((x) << S_RESTART_AUTO_NEG)
+#define F_RESTART_AUTO_NEG V_RESTART_AUTO_NEG(1U)
+
+#define S_SPEED_SEL_0 6
+#define V_SPEED_SEL_0(x) ((x) << S_SPEED_SEL_0)
+#define F_SPEED_SEL_0 V_SPEED_SEL_0(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_STATUS 0x3e004
+
+#define S_100BASE_T4 15
+#define V_100BASE_T4(x) ((x) << S_100BASE_T4)
+#define F_100BASE_T4 V_100BASE_T4(1U)
+
+#define S_100BASE_X_FULL_DUPLEX 14
+#define V_100BASE_X_FULL_DUPLEX(x) ((x) << S_100BASE_X_FULL_DUPLEX)
+#define F_100BASE_X_FULL_DUPLEX V_100BASE_X_FULL_DUPLEX(1U)
+
+#define S_100BASE_X_HALF_DUPLEX 13
+#define V_100BASE_X_HALF_DUPLEX(x) ((x) << S_100BASE_X_HALF_DUPLEX)
+#define F_100BASE_X_HALF_DUPLEX V_100BASE_X_HALF_DUPLEX(1U)
+
+#define S_10MBPS_FULL_DUPLEX 12
+#define V_10MBPS_FULL_DUPLEX(x) ((x) << S_10MBPS_FULL_DUPLEX)
+#define F_10MBPS_FULL_DUPLEX V_10MBPS_FULL_DUPLEX(1U)
+
+#define S_10MBPS_HALF_DUPLEX 11
+#define V_10MBPS_HALF_DUPLEX(x) ((x) << S_10MBPS_HALF_DUPLEX)
+#define F_10MBPS_HALF_DUPLEX V_10MBPS_HALF_DUPLEX(1U)
+
+#define S_100BASE_T2_HALF_DUPLEX1 10
+#define V_100BASE_T2_HALF_DUPLEX1(x) ((x) << S_100BASE_T2_HALF_DUPLEX1)
+#define F_100BASE_T2_HALF_DUPLEX1 V_100BASE_T2_HALF_DUPLEX1(1U)
+
+#define S_100BASE_T2_HALF_DUPLEX0 9
+#define V_100BASE_T2_HALF_DUPLEX0(x) ((x) << S_100BASE_T2_HALF_DUPLEX0)
+#define F_100BASE_T2_HALF_DUPLEX0 V_100BASE_T2_HALF_DUPLEX0(1U)
+
+#define S_T7_EXTENDED_STATUS 8
+#define V_T7_EXTENDED_STATUS(x) ((x) << S_T7_EXTENDED_STATUS)
+#define F_T7_EXTENDED_STATUS V_T7_EXTENDED_STATUS(1U)
+
+#define S_AUTO_NEG_COMPLETE 5
+#define V_AUTO_NEG_COMPLETE(x) ((x) << S_AUTO_NEG_COMPLETE)
+#define F_AUTO_NEG_COMPLETE V_AUTO_NEG_COMPLETE(1U)
+
+#define S_T7_REMOTE_FAULT 4
+#define V_T7_REMOTE_FAULT(x) ((x) << S_T7_REMOTE_FAULT)
+#define F_T7_REMOTE_FAULT V_T7_REMOTE_FAULT(1U)
+
+#define S_AUTO_NEG_ABILITY 3
+#define V_AUTO_NEG_ABILITY(x) ((x) << S_AUTO_NEG_ABILITY)
+#define F_AUTO_NEG_ABILITY V_AUTO_NEG_ABILITY(1U)
+
+#define S_JABBER_DETECT 1
+#define V_JABBER_DETECT(x) ((x) << S_JABBER_DETECT)
+#define F_JABBER_DETECT V_JABBER_DETECT(1U)
+
+#define S_EXTENDED_CAPABILITY 0
+#define V_EXTENDED_CAPABILITY(x) ((x) << S_EXTENDED_CAPABILITY)
+#define F_EXTENDED_CAPABILITY V_EXTENDED_CAPABILITY(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_PHY_IDENTIFIER_0 0x3e008
+#define A_MAC_MTIP_PCS_1G_0_PHY_IDENTIFIER_1 0x3e00c
+#define A_MAC_MTIP_PCS_1G_0_DEV_ABILITY 0x3e010
+
+#define S_EEE_CLOCK_STOP_ENABLE 8
+#define V_EEE_CLOCK_STOP_ENABLE(x) ((x) << S_EEE_CLOCK_STOP_ENABLE)
+#define F_EEE_CLOCK_STOP_ENABLE V_EEE_CLOCK_STOP_ENABLE(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_PARTNER_ABILITY 0x3e014
+
+#define S_COPPER_LINK_STATUS 15
+#define V_COPPER_LINK_STATUS(x) ((x) << S_COPPER_LINK_STATUS)
+#define F_COPPER_LINK_STATUS V_COPPER_LINK_STATUS(1U)
+
+#define S_COPPER_DUPLEX_STATUS 12
+#define V_COPPER_DUPLEX_STATUS(x) ((x) << S_COPPER_DUPLEX_STATUS)
+#define F_COPPER_DUPLEX_STATUS V_COPPER_DUPLEX_STATUS(1U)
+
+#define S_COPPER_SPEED 10
+#define M_COPPER_SPEED 0x3U
+#define V_COPPER_SPEED(x) ((x) << S_COPPER_SPEED)
+#define G_COPPER_SPEED(x) (((x) >> S_COPPER_SPEED) & M_COPPER_SPEED)
+
+#define S_EEE_CAPABILITY 9
+#define V_EEE_CAPABILITY(x) ((x) << S_EEE_CAPABILITY)
+#define F_EEE_CAPABILITY V_EEE_CAPABILITY(1U)
+
+#define S_EEE_CLOCK_STOP_CAPABILITY 8
+#define V_EEE_CLOCK_STOP_CAPABILITY(x) ((x) << S_EEE_CLOCK_STOP_CAPABILITY)
+#define F_EEE_CLOCK_STOP_CAPABILITY V_EEE_CLOCK_STOP_CAPABILITY(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_AN_EXPANSION 0x3e018
+#define A_MAC_MTIP_PCS_1G_0_NP_TX 0x3e01c
+#define A_MAC_MTIP_PCS_1G_0_LP_NP_RX 0x3e020
+
+#define S_T7_DATA 0
+#define M_T7_DATA 0x7ffU
+#define V_T7_DATA(x) ((x) << S_T7_DATA)
+#define G_T7_DATA(x) (((x) >> S_T7_DATA) & M_T7_DATA)
+
+#define A_MAC_MTIP_PCS_1G_0_EXTENDED_STATUS 0x3e03c
+#define A_MAC_MTIP_PCS_1G_0_SCRATCH 0x3e040
+#define A_MAC_MTIP_PCS_1G_0_REV 0x3e044
+#define A_MAC_MTIP_PCS_1G_0_LINK_TIMER_0 0x3e048
+
+#define S_LINK_TIMER_VAL 0
+#define M_LINK_TIMER_VAL 0xffffU
+#define V_LINK_TIMER_VAL(x) ((x) << S_LINK_TIMER_VAL)
+#define G_LINK_TIMER_VAL(x) (((x) >> S_LINK_TIMER_VAL) & M_LINK_TIMER_VAL)
+
+#define A_MAC_MTIP_PCS_1G_0_LINK_TIMER_1 0x3e04c
+
+#define S_T7_LINK_TIMER_VAL 0
+#define M_T7_LINK_TIMER_VAL 0x1fU
+#define V_T7_LINK_TIMER_VAL(x) ((x) << S_T7_LINK_TIMER_VAL)
+#define G_T7_LINK_TIMER_VAL(x) (((x) >> S_T7_LINK_TIMER_VAL) & M_T7_LINK_TIMER_VAL)
+
+#define A_MAC_MTIP_PCS_1G_0_IF_MODE 0x3e050
+#define A_MAC_MTIP_PCS_1G_0_DEC_ERR_CNT 0x3e054
+#define A_MAC_MTIP_PCS_1G_0_VENDOR_CONTROL 0x3e058
+
+#define S_SGPCS_ENA_ST 15
+#define V_SGPCS_ENA_ST(x) ((x) << S_SGPCS_ENA_ST)
+#define F_SGPCS_ENA_ST V_SGPCS_ENA_ST(1U)
+
+#define S_T7_CFG_CLOCK_RATE 4
+#define M_T7_CFG_CLOCK_RATE 0xfU
+#define V_T7_CFG_CLOCK_RATE(x) ((x) << S_T7_CFG_CLOCK_RATE)
+#define G_T7_CFG_CLOCK_RATE(x) (((x) >> S_T7_CFG_CLOCK_RATE) & M_T7_CFG_CLOCK_RATE)
+
+#define S_SGPCS_ENA_R 0
+#define V_SGPCS_ENA_R(x) ((x) << S_SGPCS_ENA_R)
+#define F_SGPCS_ENA_R V_SGPCS_ENA_R(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_SD_BIT_SLIP 0x3e05c
+
+#define S_SD_BIT_SLIP 0
+#define M_SD_BIT_SLIP 0xfU
+#define V_SD_BIT_SLIP(x) ((x) << S_SD_BIT_SLIP)
+#define G_SD_BIT_SLIP(x) (((x) >> S_SD_BIT_SLIP) & M_SD_BIT_SLIP)
+
+#define A_MAC_MTIP_PCS_1G_1_CONTROL 0x3e100
+#define A_MAC_MTIP_PCS_1G_1_STATUS 0x3e104
+#define A_MAC_MTIP_PCS_1G_1_PHY_IDENTIFIER_0 0x3e108
+#define A_MAC_MTIP_PCS_1G_1_PHY_IDENTIFIER_1 0x3e10c
+#define A_MAC_MTIP_PCS_1G_1_DEV_ABILITY 0x3e110
+#define A_MAC_MTIP_PCS_1G_1_PARTNER_ABILITY 0x3e114
+#define A_MAC_MTIP_PCS_1G_1_AN_EXPANSION 0x3e118
+#define A_MAC_MTIP_PCS_1G_1_NP_TX 0x3e11c
+#define A_MAC_MTIP_PCS_1G_1_LP_NP_RX 0x3e120
+#define A_MAC_MTIP_PCS_1G_1_EXTENDED_STATUS 0x3e13c
+#define A_MAC_MTIP_PCS_1G_1_SCRATCH 0x3e140
+#define A_MAC_MTIP_PCS_1G_1_REV 0x3e144
+#define A_MAC_MTIP_PCS_1G_1_LINK_TIMER_0 0x3e148
+#define A_MAC_MTIP_PCS_1G_1_LINK_TIMER_1 0x3e14c
+#define A_MAC_MTIP_PCS_1G_1_IF_MODE 0x3e150
+#define A_MAC_MTIP_PCS_1G_1_DEC_ERR_CNT 0x3e154
+#define A_MAC_MTIP_PCS_1G_1_VENDOR_CONTROL 0x3e158
+#define A_MAC_MTIP_PCS_1G_1_SD_BIT_SLIP 0x3e15c
+#define A_MAC_MTIP_PCS_1G_2_CONTROL 0x3e200
+#define A_MAC_MTIP_PCS_1G_2_STATUS 0x3e204
+#define A_MAC_MTIP_PCS_1G_2_PHY_IDENTIFIER_0 0x3e208
+#define A_MAC_MTIP_PCS_1G_2_PHY_IDENTIFIER_1 0x3e20c
+#define A_MAC_MTIP_PCS_1G_2_DEV_ABILITY 0x3e210
+#define A_MAC_MTIP_PCS_1G_2_PARTNER_ABILITY 0x3e214
+#define A_MAC_MTIP_PCS_1G_2_AN_EXPANSION 0x3e218
+#define A_MAC_MTIP_PCS_1G_2_NP_TX 0x3e21c
+#define A_MAC_MTIP_PCS_1G_2_LP_NP_RX 0x3e220
+#define A_MAC_MTIP_PCS_1G_2_EXTENDED_STATUS 0x3e23c
+#define A_MAC_MTIP_PCS_1G_2_SCRATCH 0x3e240
+#define A_MAC_MTIP_PCS_1G_2_REV 0x3e244
+#define A_MAC_MTIP_PCS_1G_2_LINK_TIMER_0 0x3e248
+#define A_MAC_MTIP_PCS_1G_2_LINK_TIMER_1 0x3e24c
+#define A_MAC_MTIP_PCS_1G_2_IF_MODE 0x3e250
+#define A_MAC_MTIP_PCS_1G_2_DEC_ERR_CNT 0x3e254
+#define A_MAC_MTIP_PCS_1G_2_VENDOR_CONTROL 0x3e258
+#define A_MAC_MTIP_PCS_1G_2_SD_BIT_SLIP 0x3e25c
+#define A_MAC_MTIP_PCS_1G_3_CONTROL 0x3e300
+#define A_MAC_MTIP_PCS_1G_3_STATUS 0x3e304
+#define A_MAC_MTIP_PCS_1G_3_PHY_IDENTIFIER_0 0x3e308
+#define A_MAC_MTIP_PCS_1G_3_PHY_IDENTIFIER_1 0x3e30c
+#define A_MAC_MTIP_PCS_1G_3_DEV_ABILITY 0x3e310
+#define A_MAC_MTIP_PCS_1G_3_PARTNER_ABILITY 0x3e314
+#define A_MAC_MTIP_PCS_1G_3_AN_EXPANSION 0x3e318
+#define A_MAC_MTIP_PCS_1G_3_NP_TX 0x3e31c
+#define A_MAC_MTIP_PCS_1G_3_LP_NP_RX 0x3e320
+#define A_MAC_MTIP_PCS_1G_3_EXTENDED_STATUS 0x3e33c
+#define A_MAC_MTIP_PCS_1G_3_SCRATCH 0x3e340
+#define A_MAC_MTIP_PCS_1G_3_REV 0x3e344
+#define A_MAC_MTIP_PCS_1G_3_LINK_TIMER_0 0x3e348
+#define A_MAC_MTIP_PCS_1G_3_LINK_TIMER_1 0x3e34c
+#define A_MAC_MTIP_PCS_1G_3_IF_MODE 0x3e350
+#define A_MAC_MTIP_PCS_1G_3_DEC_ERR_CNT 0x3e354
+#define A_MAC_MTIP_PCS_1G_3_VENDOR_CONTROL 0x3e358
+#define A_MAC_MTIP_PCS_1G_3_SD_BIT_SLIP 0x3e35c
+#define A_MAC_DPLL_CTRL_0 0x3f000
+
+#define S_LOCAL_FAULT_OVRD 18
+#define V_LOCAL_FAULT_OVRD(x) ((x) << S_LOCAL_FAULT_OVRD)
+#define F_LOCAL_FAULT_OVRD V_LOCAL_FAULT_OVRD(1U)
+
+#define S_LOCAL_FAULT_HOLD_EN 17
+#define V_LOCAL_FAULT_HOLD_EN(x) ((x) << S_LOCAL_FAULT_HOLD_EN)
+#define F_LOCAL_FAULT_HOLD_EN V_LOCAL_FAULT_HOLD_EN(1U)
+
+#define S_DPLL_RST 16
+#define V_DPLL_RST(x) ((x) << S_DPLL_RST)
+#define F_DPLL_RST V_DPLL_RST(1U)
+
+#define S_CNTOFFSET 0
+#define M_CNTOFFSET 0xffffU
+#define V_CNTOFFSET(x) ((x) << S_CNTOFFSET)
+#define G_CNTOFFSET(x) (((x) >> S_CNTOFFSET) & M_CNTOFFSET)
+
+#define A_MAC_DPLL_CTRL_1 0x3f004
+
+#define S_DELAYK 0
+#define M_DELAYK 0xffffffU
+#define V_DELAYK(x) ((x) << S_DELAYK)
+#define G_DELAYK(x) (((x) >> S_DELAYK) & M_DELAYK)
+
+#define A_MAC_DPLL_CTRL_2 0x3f008
+
+#define S_DIVFFB 16
+#define M_DIVFFB 0xffffU
+#define V_DIVFFB(x) ((x) << S_DIVFFB)
+#define G_DIVFFB(x) (((x) >> S_DIVFFB) & M_DIVFFB)
+
+#define S_DIVFIN 0
+#define M_DIVFIN 0xffffU
+#define V_DIVFIN(x) ((x) << S_DIVFIN)
+#define G_DIVFIN(x) (((x) >> S_DIVFIN) & M_DIVFIN)
+
+#define A_MAC_DPLL_CTRL_3 0x3f00c
+
+#define S_ISHIFT_HOLD 28
+#define M_ISHIFT_HOLD 0xfU
+#define V_ISHIFT_HOLD(x) ((x) << S_ISHIFT_HOLD)
+#define G_ISHIFT_HOLD(x) (((x) >> S_ISHIFT_HOLD) & M_ISHIFT_HOLD)
+
+#define S_ISHIFT 24
+#define M_ISHIFT 0xfU
+#define V_ISHIFT(x) ((x) << S_ISHIFT)
+#define G_ISHIFT(x) (((x) >> S_ISHIFT) & M_ISHIFT)
+
+#define S_INT_PRESET 12
+#define M_INT_PRESET 0xfffU
+#define V_INT_PRESET(x) ((x) << S_INT_PRESET)
+#define G_INT_PRESET(x) (((x) >> S_INT_PRESET) & M_INT_PRESET)
+
+#define S_FMI 4
+#define M_FMI 0xffU
+#define V_FMI(x) ((x) << S_FMI)
+#define G_FMI(x) (((x) >> S_FMI) & M_FMI)
+
+#define S_DPLL_PROGRAM 3
+#define V_DPLL_PROGRAM(x) ((x) << S_DPLL_PROGRAM)
+#define F_DPLL_PROGRAM V_DPLL_PROGRAM(1U)
+
+#define S_PRESET_EN 2
+#define V_PRESET_EN(x) ((x) << S_PRESET_EN)
+#define F_PRESET_EN V_PRESET_EN(1U)
+
+#define S_ONTARGETOV 1
+#define V_ONTARGETOV(x) ((x) << S_ONTARGETOV)
+#define F_ONTARGETOV V_ONTARGETOV(1U)
+
+#define S_FDONLY 0
+#define V_FDONLY(x) ((x) << S_FDONLY)
+#define F_FDONLY V_FDONLY(1U)
+
+#define A_MAC_DPLL_CTRL_4 0x3f010
+
+#define S_FKI 24
+#define M_FKI 0x1fU
+#define V_FKI(x) ((x) << S_FKI)
+#define G_FKI(x) (((x) >> S_FKI) & M_FKI)
+
+#define S_FRAC_PRESET 0
+#define M_FRAC_PRESET 0xffffffU
+#define V_FRAC_PRESET(x) ((x) << S_FRAC_PRESET)
+#define G_FRAC_PRESET(x) (((x) >> S_FRAC_PRESET) & M_FRAC_PRESET)
+
+#define A_MAC_DPLL_CTRL_5 0x3f014
+
+#define S_PH_STEP_CNT_HOLD 24
+#define M_PH_STEP_CNT_HOLD 0x1fU
+#define V_PH_STEP_CNT_HOLD(x) ((x) << S_PH_STEP_CNT_HOLD)
+#define G_PH_STEP_CNT_HOLD(x) (((x) >> S_PH_STEP_CNT_HOLD) & M_PH_STEP_CNT_HOLD)
+
+#define S_CFG_RESET 23
+#define V_CFG_RESET(x) ((x) << S_CFG_RESET)
+#define F_CFG_RESET V_CFG_RESET(1U)
+
+#define S_PH_STEP_CNT 16
+#define M_PH_STEP_CNT 0x1fU
+#define V_PH_STEP_CNT(x) ((x) << S_PH_STEP_CNT)
+#define G_PH_STEP_CNT(x) (((x) >> S_PH_STEP_CNT) & M_PH_STEP_CNT)
+
+#define S_OTDLY 0
+#define M_OTDLY 0xffffU
+#define V_OTDLY(x) ((x) << S_OTDLY)
+#define G_OTDLY(x) (((x) >> S_OTDLY) & M_OTDLY)
+
+#define A_MAC_DPLL_CTRL_6 0x3f018
+
+#define S_TARGETCNT 16
+#define M_TARGETCNT 0xffffU
+#define V_TARGETCNT(x) ((x) << S_TARGETCNT)
+#define G_TARGETCNT(x) (((x) >> S_TARGETCNT) & M_TARGETCNT)
+
+#define S_PKP 8
+#define M_PKP 0x1fU
+#define V_PKP(x) ((x) << S_PKP)
+#define G_PKP(x) (((x) >> S_PKP) & M_PKP)
+
+#define S_PMP 0
+#define M_PMP 0xffU
+#define V_PMP(x) ((x) << S_PMP)
+#define G_PMP(x) (((x) >> S_PMP) & M_PMP)
+
+#define A_MAC_DPLL_CTRL_7 0x3f01c
+#define A_MAC_DPLL_STATUS_0 0x3f020
+
+#define S_FRAC 0
+#define M_FRAC 0xffffffU
+#define V_FRAC(x) ((x) << S_FRAC)
+#define G_FRAC(x) (((x) >> S_FRAC) & M_FRAC)
+
+#define A_MAC_DPLL_STATUS_1 0x3f024
+
+#define S_FRAC_PD_OUT 0
+#define M_FRAC_PD_OUT 0xffffffU
+#define V_FRAC_PD_OUT(x) ((x) << S_FRAC_PD_OUT)
+#define G_FRAC_PD_OUT(x) (((x) >> S_FRAC_PD_OUT) & M_FRAC_PD_OUT)
+
+#define A_MAC_DPLL_STATUS_2 0x3f028
+
+#define S_INT 12
+#define M_INT 0xfffU
+#define V_INT(x) ((x) << S_INT)
+#define G_INT(x) (((x) >> S_INT) & M_INT)
+
+#define S_INT_PD_OUT 0
+#define M_INT_PD_OUT 0xfffU
+#define V_INT_PD_OUT(x) ((x) << S_INT_PD_OUT)
+#define G_INT_PD_OUT(x) (((x) >> S_INT_PD_OUT) & M_INT_PD_OUT)
+
+#define A_MAC_FRAC_N_PLL_CTRL_0 0x3f02c
+
+#define S_FRAC_N_DSKEWCALCNT 29
+#define M_FRAC_N_DSKEWCALCNT 0x7U
+#define V_FRAC_N_DSKEWCALCNT(x) ((x) << S_FRAC_N_DSKEWCALCNT)
+#define G_FRAC_N_DSKEWCALCNT(x) (((x) >> S_FRAC_N_DSKEWCALCNT) & M_FRAC_N_DSKEWCALCNT)
+
+#define S_PLLEN 28
+#define V_PLLEN(x) ((x) << S_PLLEN)
+#define F_PLLEN V_PLLEN(1U)
+
+#define S_T7_BYPASS 24
+#define M_T7_BYPASS 0xfU
+#define V_T7_BYPASS(x) ((x) << S_T7_BYPASS)
+#define G_T7_BYPASS(x) (((x) >> S_T7_BYPASS) & M_T7_BYPASS)
+
+#define S_POSTDIV3A 21
+#define M_POSTDIV3A 0x7U
+#define V_POSTDIV3A(x) ((x) << S_POSTDIV3A)
+#define G_POSTDIV3A(x) (((x) >> S_POSTDIV3A) & M_POSTDIV3A)
+
+#define S_POSTDIV3B 18
+#define M_POSTDIV3B 0x7U
+#define V_POSTDIV3B(x) ((x) << S_POSTDIV3B)
+#define G_POSTDIV3B(x) (((x) >> S_POSTDIV3B) & M_POSTDIV3B)
+
+#define S_POSTDIV2A 15
+#define M_POSTDIV2A 0x7U
+#define V_POSTDIV2A(x) ((x) << S_POSTDIV2A)
+#define G_POSTDIV2A(x) (((x) >> S_POSTDIV2A) & M_POSTDIV2A)
+
+#define S_POSTDIV2B 12
+#define M_POSTDIV2B 0x7U
+#define V_POSTDIV2B(x) ((x) << S_POSTDIV2B)
+#define G_POSTDIV2B(x) (((x) >> S_POSTDIV2B) & M_POSTDIV2B)
+
+#define S_POSTDIV1A 9
+#define M_POSTDIV1A 0x7U
+#define V_POSTDIV1A(x) ((x) << S_POSTDIV1A)
+#define G_POSTDIV1A(x) (((x) >> S_POSTDIV1A) & M_POSTDIV1A)
+
+#define S_POSTDIV1B 6
+#define M_POSTDIV1B 0x7U
+#define V_POSTDIV1B(x) ((x) << S_POSTDIV1B)
+#define G_POSTDIV1B(x) (((x) >> S_POSTDIV1B) & M_POSTDIV1B)
+
+#define S_POSTDIV0A 3
+#define M_POSTDIV0A 0x7U
+#define V_POSTDIV0A(x) ((x) << S_POSTDIV0A)
+#define G_POSTDIV0A(x) (((x) >> S_POSTDIV0A) & M_POSTDIV0A)
+
+#define S_POSTDIV0B 0
+#define M_POSTDIV0B 0x7U
+#define V_POSTDIV0B(x) ((x) << S_POSTDIV0B)
+#define G_POSTDIV0B(x) (((x) >> S_POSTDIV0B) & M_POSTDIV0B)
+
+#define A_MAC_FRAC_N_PLL_CTRL_1 0x3f030
+
+#define S_FRAC_N_FRAC_N_FOUTEN 28
+#define M_FRAC_N_FRAC_N_FOUTEN 0xfU
+#define V_FRAC_N_FRAC_N_FOUTEN(x) ((x) << S_FRAC_N_FRAC_N_FOUTEN)
+#define G_FRAC_N_FRAC_N_FOUTEN(x) (((x) >> S_FRAC_N_FRAC_N_FOUTEN) & M_FRAC_N_FRAC_N_FOUTEN)
+
+#define S_FRAC_N_DSKEWCALIN 16
+#define M_FRAC_N_DSKEWCALIN 0xfffU
+#define V_FRAC_N_DSKEWCALIN(x) ((x) << S_FRAC_N_DSKEWCALIN)
+#define G_FRAC_N_DSKEWCALIN(x) (((x) >> S_FRAC_N_DSKEWCALIN) & M_FRAC_N_DSKEWCALIN)
+
+#define S_FRAC_N_REFDIV 10
+#define M_FRAC_N_REFDIV 0x3fU
+#define V_FRAC_N_REFDIV(x) ((x) << S_FRAC_N_REFDIV)
+#define G_FRAC_N_REFDIV(x) (((x) >> S_FRAC_N_REFDIV) & M_FRAC_N_REFDIV)
+
+#define S_FRAC_N_DSMEN 9
+#define V_FRAC_N_DSMEN(x) ((x) << S_FRAC_N_DSMEN)
+#define F_FRAC_N_DSMEN V_FRAC_N_DSMEN(1U)
+
+#define S_FRAC_N_PLLEN 8
+#define V_FRAC_N_PLLEN(x) ((x) << S_FRAC_N_PLLEN)
+#define F_FRAC_N_PLLEN V_FRAC_N_PLLEN(1U)
+
+#define S_FRAC_N_DACEN 7
+#define V_FRAC_N_DACEN(x) ((x) << S_FRAC_N_DACEN)
+#define F_FRAC_N_DACEN V_FRAC_N_DACEN(1U)
+
+#define S_FRAC_N_POSTDIV0PRE 6
+#define V_FRAC_N_POSTDIV0PRE(x) ((x) << S_FRAC_N_POSTDIV0PRE)
+#define F_FRAC_N_POSTDIV0PRE V_FRAC_N_POSTDIV0PRE(1U)
+
+#define S_FRAC_N_DSKEWCALBYP 5
+#define V_FRAC_N_DSKEWCALBYP(x) ((x) << S_FRAC_N_DSKEWCALBYP)
+#define F_FRAC_N_DSKEWCALBYP V_FRAC_N_DSKEWCALBYP(1U)
+
+#define S_FRAC_N_DSKEWFASTCAL 4
+#define V_FRAC_N_DSKEWFASTCAL(x) ((x) << S_FRAC_N_DSKEWFASTCAL)
+#define F_FRAC_N_DSKEWFASTCAL V_FRAC_N_DSKEWFASTCAL(1U)
+
+#define S_FRAC_N_DSKEWCALEN 3
+#define V_FRAC_N_DSKEWCALEN(x) ((x) << S_FRAC_N_DSKEWCALEN)
+#define F_FRAC_N_DSKEWCALEN V_FRAC_N_DSKEWCALEN(1U)
+
+#define S_FRAC_N_FREFCMLEN 2
+#define V_FRAC_N_FREFCMLEN(x) ((x) << S_FRAC_N_FREFCMLEN)
+#define F_FRAC_N_FREFCMLEN V_FRAC_N_FREFCMLEN(1U)
+
+#define A_MAC_FRAC_N_PLL_STATUS_0 0x3f034
+
+#define S_DSKEWCALLOCK 12
+#define V_DSKEWCALLOCK(x) ((x) << S_DSKEWCALLOCK)
+#define F_DSKEWCALLOCK V_DSKEWCALLOCK(1U)
+
+#define S_DSKEWCALOUT 0
+#define M_DSKEWCALOUT 0xfffU
+#define V_DSKEWCALOUT(x) ((x) << S_DSKEWCALOUT)
+#define G_DSKEWCALOUT(x) (((x) >> S_DSKEWCALOUT) & M_DSKEWCALOUT)
+
+#define A_MAC_MTIP_PCS_STATUS_0 0x3f100
+
+#define S_XLGMII7_TX_TSU 22
+#define M_XLGMII7_TX_TSU 0x3U
+#define V_XLGMII7_TX_TSU(x) ((x) << S_XLGMII7_TX_TSU)
+#define G_XLGMII7_TX_TSU(x) (((x) >> S_XLGMII7_TX_TSU) & M_XLGMII7_TX_TSU)
+
+#define S_XLGMII6_TX_TSU 20
+#define M_XLGMII6_TX_TSU 0x3U
+#define V_XLGMII6_TX_TSU(x) ((x) << S_XLGMII6_TX_TSU)
+#define G_XLGMII6_TX_TSU(x) (((x) >> S_XLGMII6_TX_TSU) & M_XLGMII6_TX_TSU)
+
+#define S_XLGMII5_TX_TSU 18
+#define M_XLGMII5_TX_TSU 0x3U
+#define V_XLGMII5_TX_TSU(x) ((x) << S_XLGMII5_TX_TSU)
+#define G_XLGMII5_TX_TSU(x) (((x) >> S_XLGMII5_TX_TSU) & M_XLGMII5_TX_TSU)
+
+#define S_XLGMII4_TX_TSU 16
+#define M_XLGMII4_TX_TSU 0x3U
+#define V_XLGMII4_TX_TSU(x) ((x) << S_XLGMII4_TX_TSU)
+#define G_XLGMII4_TX_TSU(x) (((x) >> S_XLGMII4_TX_TSU) & M_XLGMII4_TX_TSU)
+
+#define S_XLGMII3_TX_TSU 14
+#define M_XLGMII3_TX_TSU 0x3U
+#define V_XLGMII3_TX_TSU(x) ((x) << S_XLGMII3_TX_TSU)
+#define G_XLGMII3_TX_TSU(x) (((x) >> S_XLGMII3_TX_TSU) & M_XLGMII3_TX_TSU)
+
+#define S_XLGMII2_TX_TSU 12
+#define M_XLGMII2_TX_TSU 0x3U
+#define V_XLGMII2_TX_TSU(x) ((x) << S_XLGMII2_TX_TSU)
+#define G_XLGMII2_TX_TSU(x) (((x) >> S_XLGMII2_TX_TSU) & M_XLGMII2_TX_TSU)
+
+#define S_XLGMII1_TX_TSU 10
+#define M_XLGMII1_TX_TSU 0x3U
+#define V_XLGMII1_TX_TSU(x) ((x) << S_XLGMII1_TX_TSU)
+#define G_XLGMII1_TX_TSU(x) (((x) >> S_XLGMII1_TX_TSU) & M_XLGMII1_TX_TSU)
+
+#define S_XLGMII0_TX_TSU 8
+#define M_XLGMII0_TX_TSU 0x3U
+#define V_XLGMII0_TX_TSU(x) ((x) << S_XLGMII0_TX_TSU)
+#define G_XLGMII0_TX_TSU(x) (((x) >> S_XLGMII0_TX_TSU) & M_XLGMII0_TX_TSU)
+
+#define S_CGMII3_TX_TSU 6
+#define M_CGMII3_TX_TSU 0x3U
+#define V_CGMII3_TX_TSU(x) ((x) << S_CGMII3_TX_TSU)
+#define G_CGMII3_TX_TSU(x) (((x) >> S_CGMII3_TX_TSU) & M_CGMII3_TX_TSU)
+
+#define S_CGMII2_TX_TSU 4
+#define M_CGMII2_TX_TSU 0x3U
+#define V_CGMII2_TX_TSU(x) ((x) << S_CGMII2_TX_TSU)
+#define G_CGMII2_TX_TSU(x) (((x) >> S_CGMII2_TX_TSU) & M_CGMII2_TX_TSU)
+
+#define S_CGMII1_TX_TSU 2
+#define M_CGMII1_TX_TSU 0x3U
+#define V_CGMII1_TX_TSU(x) ((x) << S_CGMII1_TX_TSU)
+#define G_CGMII1_TX_TSU(x) (((x) >> S_CGMII1_TX_TSU) & M_CGMII1_TX_TSU)
+
+#define S_CGMII0_TX_TSU 0
+#define M_CGMII0_TX_TSU 0x3U
+#define V_CGMII0_TX_TSU(x) ((x) << S_CGMII0_TX_TSU)
+#define G_CGMII0_TX_TSU(x) (((x) >> S_CGMII0_TX_TSU) & M_CGMII0_TX_TSU)
+
+#define A_MAC_MTIP_PCS_STATUS_1 0x3f104
+
+#define S_CDMII1_RX_TSU 26
+#define M_CDMII1_RX_TSU 0x3U
+#define V_CDMII1_RX_TSU(x) ((x) << S_CDMII1_RX_TSU)
+#define G_CDMII1_RX_TSU(x) (((x) >> S_CDMII1_RX_TSU) & M_CDMII1_RX_TSU)
+
+#define S_CDMII0_RX_TSU 24
+#define M_CDMII0_RX_TSU 0x3U
+#define V_CDMII0_RX_TSU(x) ((x) << S_CDMII0_RX_TSU)
+#define G_CDMII0_RX_TSU(x) (((x) >> S_CDMII0_RX_TSU) & M_CDMII0_RX_TSU)
+
+#define S_XLGMII7_RX_TSU 22
+#define M_XLGMII7_RX_TSU 0x3U
+#define V_XLGMII7_RX_TSU(x) ((x) << S_XLGMII7_RX_TSU)
+#define G_XLGMII7_RX_TSU(x) (((x) >> S_XLGMII7_RX_TSU) & M_XLGMII7_RX_TSU)
+
+#define S_XLGMII6_RX_TSU 20
+#define M_XLGMII6_RX_TSU 0x3U
+#define V_XLGMII6_RX_TSU(x) ((x) << S_XLGMII6_RX_TSU)
+#define G_XLGMII6_RX_TSU(x) (((x) >> S_XLGMII6_RX_TSU) & M_XLGMII6_RX_TSU)
+
+#define S_XLGMII5_RX_TSU 18
+#define M_XLGMII5_RX_TSU 0x3U
+#define V_XLGMII5_RX_TSU(x) ((x) << S_XLGMII5_RX_TSU)
+#define G_XLGMII5_RX_TSU(x) (((x) >> S_XLGMII5_RX_TSU) & M_XLGMII5_RX_TSU)
+
+#define S_XLGMII4_RX_TSU 16
+#define M_XLGMII4_RX_TSU 0x3U
+#define V_XLGMII4_RX_TSU(x) ((x) << S_XLGMII4_RX_TSU)
+#define G_XLGMII4_RX_TSU(x) (((x) >> S_XLGMII4_RX_TSU) & M_XLGMII4_RX_TSU)
+
+#define S_XLGMII3_RX_TSU 14
+#define M_XLGMII3_RX_TSU 0x3U
+#define V_XLGMII3_RX_TSU(x) ((x) << S_XLGMII3_RX_TSU)
+#define G_XLGMII3_RX_TSU(x) (((x) >> S_XLGMII3_RX_TSU) & M_XLGMII3_RX_TSU)
+
+#define S_XLGMII2_RX_TSU 12
+#define M_XLGMII2_RX_TSU 0x3U
+#define V_XLGMII2_RX_TSU(x) ((x) << S_XLGMII2_RX_TSU)
+#define G_XLGMII2_RX_TSU(x) (((x) >> S_XLGMII2_RX_TSU) & M_XLGMII2_RX_TSU)
+
+#define S_XLGMII1_RX_TSU 10
+#define M_XLGMII1_RX_TSU 0x3U
+#define V_XLGMII1_RX_TSU(x) ((x) << S_XLGMII1_RX_TSU)
+#define G_XLGMII1_RX_TSU(x) (((x) >> S_XLGMII1_RX_TSU) & M_XLGMII1_RX_TSU)
+
+#define S_XLGMII0_RX_TSU 8
+#define M_XLGMII0_RX_TSU 0x3U
+#define V_XLGMII0_RX_TSU(x) ((x) << S_XLGMII0_RX_TSU)
+#define G_XLGMII0_RX_TSU(x) (((x) >> S_XLGMII0_RX_TSU) & M_XLGMII0_RX_TSU)
+
+#define S_CGMII3_RX_TSU 6
+#define M_CGMII3_RX_TSU 0x3U
+#define V_CGMII3_RX_TSU(x) ((x) << S_CGMII3_RX_TSU)
+#define G_CGMII3_RX_TSU(x) (((x) >> S_CGMII3_RX_TSU) & M_CGMII3_RX_TSU)
+
+#define S_CGMII2_RX_TSU 4
+#define M_CGMII2_RX_TSU 0x3U
+#define V_CGMII2_RX_TSU(x) ((x) << S_CGMII2_RX_TSU)
+#define G_CGMII2_RX_TSU(x) (((x) >> S_CGMII2_RX_TSU) & M_CGMII2_RX_TSU)
+
+#define S_CGMII1_RX_TSU 2
+#define M_CGMII1_RX_TSU 0x3U
+#define V_CGMII1_RX_TSU(x) ((x) << S_CGMII1_RX_TSU)
+#define G_CGMII1_RX_TSU(x) (((x) >> S_CGMII1_RX_TSU) & M_CGMII1_RX_TSU)
+
+#define S_CGMII0_RX_TSU 0
+#define M_CGMII0_RX_TSU 0x3U
+#define V_CGMII0_RX_TSU(x) ((x) << S_CGMII0_RX_TSU)
+#define G_CGMII0_RX_TSU(x) (((x) >> S_CGMII0_RX_TSU) & M_CGMII0_RX_TSU)
+
+#define A_MAC_MTIP_PCS_STATUS_2 0x3f108
+
+#define S_SD_BIT_SLIP_0 0
+#define M_SD_BIT_SLIP_0 0x3fffffffU
+#define V_SD_BIT_SLIP_0(x) ((x) << S_SD_BIT_SLIP_0)
+#define G_SD_BIT_SLIP_0(x) (((x) >> S_SD_BIT_SLIP_0) & M_SD_BIT_SLIP_0)
+
+#define A_MAC_MTIP_PCS_STATUS_3 0x3f10c
+
+#define S_SD_BIT_SLIP_1 0
+#define M_SD_BIT_SLIP_1 0x3ffffU
+#define V_SD_BIT_SLIP_1(x) ((x) << S_SD_BIT_SLIP_1)
+#define G_SD_BIT_SLIP_1(x) (((x) >> S_SD_BIT_SLIP_1) & M_SD_BIT_SLIP_1)
+
+#define A_MAC_MTIP_PCS_STATUS_4 0x3f110
+
+#define S_TSU_RX_SD 0
+#define M_TSU_RX_SD 0xffffU
+#define V_TSU_RX_SD(x) ((x) << S_TSU_RX_SD)
+#define G_TSU_RX_SD(x) (((x) >> S_TSU_RX_SD) & M_TSU_RX_SD)
+
+#define A_MAC_MTIP_PCS_STATUS_5 0x3f114
+
+#define S_RSFEC_XSTATS_STRB 0
+#define M_RSFEC_XSTATS_STRB 0xffffffU
+#define V_RSFEC_XSTATS_STRB(x) ((x) << S_RSFEC_XSTATS_STRB)
+#define G_RSFEC_XSTATS_STRB(x) (((x) >> S_RSFEC_XSTATS_STRB) & M_RSFEC_XSTATS_STRB)
+
+#define A_MAC_MTIP_PCS_STATUS_6 0x3f118
+#define A_MAC_MTIP_PCS_STATUS_7 0x3f11c
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_0 0x3f120
+
+#define S_TSV_XON_STB_2 24
+#define M_TSV_XON_STB_2 0xffU
+#define V_TSV_XON_STB_2(x) ((x) << S_TSV_XON_STB_2)
+#define G_TSV_XON_STB_2(x) (((x) >> S_TSV_XON_STB_2) & M_TSV_XON_STB_2)
+
+#define S_TSV_XOFF_STB_2 16
+#define M_TSV_XOFF_STB_2 0xffU
+#define V_TSV_XOFF_STB_2(x) ((x) << S_TSV_XOFF_STB_2)
+#define G_TSV_XOFF_STB_2(x) (((x) >> S_TSV_XOFF_STB_2) & M_TSV_XOFF_STB_2)
+
+#define S_RSV_XON_STB_2 8
+#define M_RSV_XON_STB_2 0xffU
+#define V_RSV_XON_STB_2(x) ((x) << S_RSV_XON_STB_2)
+#define G_RSV_XON_STB_2(x) (((x) >> S_RSV_XON_STB_2) & M_RSV_XON_STB_2)
+
+#define S_RSV_XOFF_STB_2 0
+#define M_RSV_XOFF_STB_2 0xffU
+#define V_RSV_XOFF_STB_2(x) ((x) << S_RSV_XOFF_STB_2)
+#define G_RSV_XOFF_STB_2(x) (((x) >> S_RSV_XOFF_STB_2) & M_RSV_XOFF_STB_2)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_1 0x3f124
+
+#define S_TSV_XON_STB_3 24
+#define M_TSV_XON_STB_3 0xffU
+#define V_TSV_XON_STB_3(x) ((x) << S_TSV_XON_STB_3)
+#define G_TSV_XON_STB_3(x) (((x) >> S_TSV_XON_STB_3) & M_TSV_XON_STB_3)
+
+#define S_TSV_XOFF_STB_3 16
+#define M_TSV_XOFF_STB_3 0xffU
+#define V_TSV_XOFF_STB_3(x) ((x) << S_TSV_XOFF_STB_3)
+#define G_TSV_XOFF_STB_3(x) (((x) >> S_TSV_XOFF_STB_3) & M_TSV_XOFF_STB_3)
+
+#define S_RSV_XON_STB_3 8
+#define M_RSV_XON_STB_3 0xffU
+#define V_RSV_XON_STB_3(x) ((x) << S_RSV_XON_STB_3)
+#define G_RSV_XON_STB_3(x) (((x) >> S_RSV_XON_STB_3) & M_RSV_XON_STB_3)
+
+#define S_RSV_XOFF_STB_3 0
+#define M_RSV_XOFF_STB_3 0xffU
+#define V_RSV_XOFF_STB_3(x) ((x) << S_RSV_XOFF_STB_3)
+#define G_RSV_XOFF_STB_3(x) (((x) >> S_RSV_XOFF_STB_3) & M_RSV_XOFF_STB_3)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_2 0x3f128
+
+#define S_TSV_XON_STB_4 24
+#define M_TSV_XON_STB_4 0xffU
+#define V_TSV_XON_STB_4(x) ((x) << S_TSV_XON_STB_4)
+#define G_TSV_XON_STB_4(x) (((x) >> S_TSV_XON_STB_4) & M_TSV_XON_STB_4)
+
+#define S_TSV_XOFF_STB_4 16
+#define M_TSV_XOFF_STB_4 0xffU
+#define V_TSV_XOFF_STB_4(x) ((x) << S_TSV_XOFF_STB_4)
+#define G_TSV_XOFF_STB_4(x) (((x) >> S_TSV_XOFF_STB_4) & M_TSV_XOFF_STB_4)
+
+#define S_RSV_XON_STB_4 8
+#define M_RSV_XON_STB_4 0xffU
+#define V_RSV_XON_STB_4(x) ((x) << S_RSV_XON_STB_4)
+#define G_RSV_XON_STB_4(x) (((x) >> S_RSV_XON_STB_4) & M_RSV_XON_STB_4)
+
+#define S_RSV_XOFF_STB_4 0
+#define M_RSV_XOFF_STB_4 0xffU
+#define V_RSV_XOFF_STB_4(x) ((x) << S_RSV_XOFF_STB_4)
+#define G_RSV_XOFF_STB_4(x) (((x) >> S_RSV_XOFF_STB_4) & M_RSV_XOFF_STB_4)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_3 0x3f12c
+
+#define S_TSV_XON_STB_5 24
+#define M_TSV_XON_STB_5 0xffU
+#define V_TSV_XON_STB_5(x) ((x) << S_TSV_XON_STB_5)
+#define G_TSV_XON_STB_5(x) (((x) >> S_TSV_XON_STB_5) & M_TSV_XON_STB_5)
+
+#define S_TSV_XOFF_STB_5 16
+#define M_TSV_XOFF_STB_5 0xffU
+#define V_TSV_XOFF_STB_5(x) ((x) << S_TSV_XOFF_STB_5)
+#define G_TSV_XOFF_STB_5(x) (((x) >> S_TSV_XOFF_STB_5) & M_TSV_XOFF_STB_5)
+
+#define S_RSV_XON_STB_5 8
+#define M_RSV_XON_STB_5 0xffU
+#define V_RSV_XON_STB_5(x) ((x) << S_RSV_XON_STB_5)
+#define G_RSV_XON_STB_5(x) (((x) >> S_RSV_XON_STB_5) & M_RSV_XON_STB_5)
+
+#define S_RSV_XOFF_STB_5 0
+#define M_RSV_XOFF_STB_5 0xffU
+#define V_RSV_XOFF_STB_5(x) ((x) << S_RSV_XOFF_STB_5)
+#define G_RSV_XOFF_STB_5(x) (((x) >> S_RSV_XOFF_STB_5) & M_RSV_XOFF_STB_5)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_4 0x3f130
+
+#define S_TX_SFD_O_5 19
+#define V_TX_SFD_O_5(x) ((x) << S_TX_SFD_O_5)
+#define F_TX_SFD_O_5 V_TX_SFD_O_5(1U)
+
+#define S_TX_SFD_O_4 18
+#define V_TX_SFD_O_4(x) ((x) << S_TX_SFD_O_4)
+#define F_TX_SFD_O_4 V_TX_SFD_O_4(1U)
+
+#define S_TX_SFD_O_3 17
+#define V_TX_SFD_O_3(x) ((x) << S_TX_SFD_O_3)
+#define F_TX_SFD_O_3 V_TX_SFD_O_3(1U)
+
+#define S_TX_SFD_O_2 16
+#define V_TX_SFD_O_2(x) ((x) << S_TX_SFD_O_2)
+#define F_TX_SFD_O_2 V_TX_SFD_O_2(1U)
+
+#define S_RX_SFD_O_5 15
+#define V_RX_SFD_O_5(x) ((x) << S_RX_SFD_O_5)
+#define F_RX_SFD_O_5 V_RX_SFD_O_5(1U)
+
+#define S_RX_SFD_O_4 14
+#define V_RX_SFD_O_4(x) ((x) << S_RX_SFD_O_4)
+#define F_RX_SFD_O_4 V_RX_SFD_O_4(1U)
+
+#define S_RX_SFD_O_3 13
+#define V_RX_SFD_O_3(x) ((x) << S_RX_SFD_O_3)
+#define F_RX_SFD_O_3 V_RX_SFD_O_3(1U)
+
+#define S_RX_SFD_O_2 12
+#define V_RX_SFD_O_2(x) ((x) << S_RX_SFD_O_2)
+#define F_RX_SFD_O_2 V_RX_SFD_O_2(1U)
+
+#define S_RX_SFD_SHIFT_O_5 11
+#define V_RX_SFD_SHIFT_O_5(x) ((x) << S_RX_SFD_SHIFT_O_5)
+#define F_RX_SFD_SHIFT_O_5 V_RX_SFD_SHIFT_O_5(1U)
+
+#define S_RX_SFD_SHIFT_O_4 10
+#define V_RX_SFD_SHIFT_O_4(x) ((x) << S_RX_SFD_SHIFT_O_4)
+#define F_RX_SFD_SHIFT_O_4 V_RX_SFD_SHIFT_O_4(1U)
+
+#define S_RX_SFD_SHIFT_O_3 9
+#define V_RX_SFD_SHIFT_O_3(x) ((x) << S_RX_SFD_SHIFT_O_3)
+#define F_RX_SFD_SHIFT_O_3 V_RX_SFD_SHIFT_O_3(1U)
+
+#define S_RX_SFD_SHIFT_O_2 8
+#define V_RX_SFD_SHIFT_O_2(x) ((x) << S_RX_SFD_SHIFT_O_2)
+#define F_RX_SFD_SHIFT_O_2 V_RX_SFD_SHIFT_O_2(1U)
+
+#define S_TX_SFD_SHIFT_O_5 7
+#define V_TX_SFD_SHIFT_O_5(x) ((x) << S_TX_SFD_SHIFT_O_5)
+#define F_TX_SFD_SHIFT_O_5 V_TX_SFD_SHIFT_O_5(1U)
+
+#define S_TX_SFD_SHIFT_O_4 6
+#define V_TX_SFD_SHIFT_O_4(x) ((x) << S_TX_SFD_SHIFT_O_4)
+#define F_TX_SFD_SHIFT_O_4 V_TX_SFD_SHIFT_O_4(1U)
+
+#define S_TX_SFD_SHIFT_O_3 5
+#define V_TX_SFD_SHIFT_O_3(x) ((x) << S_TX_SFD_SHIFT_O_3)
+#define F_TX_SFD_SHIFT_O_3 V_TX_SFD_SHIFT_O_3(1U)
+
+#define S_TX_SFD_SHIFT_O_2 4
+#define V_TX_SFD_SHIFT_O_2(x) ((x) << S_TX_SFD_SHIFT_O_2)
+#define F_TX_SFD_SHIFT_O_2 V_TX_SFD_SHIFT_O_2(1U)
+
+#define S_TS_SFD_ENA_5 3
+#define V_TS_SFD_ENA_5(x) ((x) << S_TS_SFD_ENA_5)
+#define F_TS_SFD_ENA_5 V_TS_SFD_ENA_5(1U)
+
+#define S_TS_SFD_ENA_4 2
+#define V_TS_SFD_ENA_4(x) ((x) << S_TS_SFD_ENA_4)
+#define F_TS_SFD_ENA_4 V_TS_SFD_ENA_4(1U)
+
+#define S_TS_SFD_ENA_3 1
+#define V_TS_SFD_ENA_3(x) ((x) << S_TS_SFD_ENA_3)
+#define F_TS_SFD_ENA_3 V_TS_SFD_ENA_3(1U)
+
+#define S_TS_SFD_ENA_2 0
+#define V_TS_SFD_ENA_2(x) ((x) << S_TS_SFD_ENA_2)
+#define F_TS_SFD_ENA_2 V_TS_SFD_ENA_2(1U)
+
+#define A_MAC_STS_CONFIG 0x3f200
+
+#define S_STS_ENA 30
+#define V_STS_ENA(x) ((x) << S_STS_ENA)
+#define F_STS_ENA V_STS_ENA(1U)
+
+#define S_N_PPS_ENA 29
+#define V_N_PPS_ENA(x) ((x) << S_N_PPS_ENA)
+#define F_N_PPS_ENA V_N_PPS_ENA(1U)
+
+#define S_STS_RESET 28
+#define V_STS_RESET(x) ((x) << S_STS_RESET)
+#define F_STS_RESET V_STS_RESET(1U)
+
+#define S_DEBOUNCE_CNT 0
+#define M_DEBOUNCE_CNT 0xfffffffU
+#define V_DEBOUNCE_CNT(x) ((x) << S_DEBOUNCE_CNT)
+#define G_DEBOUNCE_CNT(x) (((x) >> S_DEBOUNCE_CNT) & M_DEBOUNCE_CNT)
+
+#define A_MAC_STS_COUNTER 0x3f204
+#define A_MAC_STS_COUNT_1 0x3f208
+#define A_MAC_STS_COUNT_2 0x3f20c
+#define A_MAC_STS_N_PPS_COUNT_HI 0x3f210
+#define A_MAC_STS_N_PPS_COUNT_LO 0x3f214
+#define A_MAC_STS_N_PPS_COUNTER 0x3f218
+#define A_MAC_BGR_PQ0_FIRMWARE_COMMON_0 0x4030
+
+#define S_MAC_BGR_BGR_REG_APB_SEL 0
+#define V_MAC_BGR_BGR_REG_APB_SEL(x) ((x) << S_MAC_BGR_BGR_REG_APB_SEL)
+#define F_MAC_BGR_BGR_REG_APB_SEL V_MAC_BGR_BGR_REG_APB_SEL(1U)
+
+#define A_MAC_BGR_TOP_DIG_CTRL1_REG_LSB 0x4430
+
+#define S_MAC_BGR_BGR_REFCLK_CTRL_BYPASS 15
+#define V_MAC_BGR_BGR_REFCLK_CTRL_BYPASS(x) ((x) << S_MAC_BGR_BGR_REFCLK_CTRL_BYPASS)
+#define F_MAC_BGR_BGR_REFCLK_CTRL_BYPASS V_MAC_BGR_BGR_REFCLK_CTRL_BYPASS(1U)
+
+#define S_MAC_BGR_BGR_COREREFCLK_SEL 14
+#define V_MAC_BGR_BGR_COREREFCLK_SEL(x) ((x) << S_MAC_BGR_BGR_COREREFCLK_SEL)
+#define F_MAC_BGR_BGR_COREREFCLK_SEL V_MAC_BGR_BGR_COREREFCLK_SEL(1U)
+
+#define S_MAC_BGR_BGR_TEST_CLK_DIV 8
+#define M_MAC_BGR_BGR_TEST_CLK_DIV 0x7U
+#define V_MAC_BGR_BGR_TEST_CLK_DIV(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_DIV)
+#define G_MAC_BGR_BGR_TEST_CLK_DIV(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_DIV) & M_MAC_BGR_BGR_TEST_CLK_DIV)
+
+#define S_MAC_BGR_BGR_TEST_CLK_EN 7
+#define V_MAC_BGR_BGR_TEST_CLK_EN(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_EN)
+#define F_MAC_BGR_BGR_TEST_CLK_EN V_MAC_BGR_BGR_TEST_CLK_EN(1U)
+
+#define S_MAC_BGR_BGR_TEST_CLK_BGRSEL 5
+#define M_MAC_BGR_BGR_TEST_CLK_BGRSEL 0x3U
+#define V_MAC_BGR_BGR_TEST_CLK_BGRSEL(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_BGRSEL)
+#define G_MAC_BGR_BGR_TEST_CLK_BGRSEL(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_BGRSEL) & M_MAC_BGR_BGR_TEST_CLK_BGRSEL)
+
+#define S_MAC_BGR_BGR_TEST_CLK_SEL 0
+#define M_MAC_BGR_BGR_TEST_CLK_SEL 0x1fU
+#define V_MAC_BGR_BGR_TEST_CLK_SEL(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_SEL)
+#define G_MAC_BGR_BGR_TEST_CLK_SEL(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_SEL) & M_MAC_BGR_BGR_TEST_CLK_SEL)
+
+#define A_MAC_BGR_PQ0_FIRMWARE_SEQ0_0 0x6000
+
+#define S_MAC_BGR_BGR_REG_PRG_EN 0
+#define V_MAC_BGR_BGR_REG_PRG_EN(x) ((x) << S_MAC_BGR_BGR_REG_PRG_EN)
+#define F_MAC_BGR_BGR_REG_PRG_EN V_MAC_BGR_BGR_REG_PRG_EN(1U)
+
+#define A_MAC_BGR_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_BGR_BGR_REG_GPO 0
+#define V_MAC_BGR_BGR_REG_GPO(x) ((x) << S_MAC_BGR_BGR_REG_GPO)
+#define F_MAC_BGR_BGR_REG_GPO V_MAC_BGR_BGR_REG_GPO(1U)
+
+#define A_MAC_BGR_MGMT_SPINE_MACRO_PMA_0 0x40000
+
+#define S_MAC_BGR_CUREFCLKSEL1 0
+#define M_MAC_BGR_CUREFCLKSEL1 0x3U
+#define V_MAC_BGR_CUREFCLKSEL1(x) ((x) << S_MAC_BGR_CUREFCLKSEL1)
+#define G_MAC_BGR_CUREFCLKSEL1(x) (((x) >> S_MAC_BGR_CUREFCLKSEL1) & M_MAC_BGR_CUREFCLKSEL1)
+
+#define A_MAC_BGR_REFCLK_CONTROL_1 0x40004
+
+#define S_MAC_BGR_IM_CUREFCLKLR_EN 0
+#define V_MAC_BGR_IM_CUREFCLKLR_EN(x) ((x) << S_MAC_BGR_IM_CUREFCLKLR_EN)
+#define F_MAC_BGR_IM_CUREFCLKLR_EN V_MAC_BGR_IM_CUREFCLKLR_EN(1U)
+
+#define A_MAC_BGR_REFCLK_CONTROL_2 0x40080
+
+#define S_MAC_BGR_IM_REF_EN 0
+#define V_MAC_BGR_IM_REF_EN(x) ((x) << S_MAC_BGR_IM_REF_EN)
+#define F_MAC_BGR_IM_REF_EN V_MAC_BGR_IM_REF_EN(1U)
+
+#define A_MAC_PLL0_PLL_TOP_CUPLL_LOCK 0x4438
+
+#define S_MAC_PLL0_PLL2_LOCK_STATUS 2
+#define V_MAC_PLL0_PLL2_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL2_LOCK_STATUS)
+#define F_MAC_PLL0_PLL2_LOCK_STATUS V_MAC_PLL0_PLL2_LOCK_STATUS(1U)
+
+#define S_MAC_PLL0_PLL1_LOCK_STATUS 1
+#define V_MAC_PLL0_PLL1_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL1_LOCK_STATUS)
+#define F_MAC_PLL0_PLL1_LOCK_STATUS V_MAC_PLL0_PLL1_LOCK_STATUS(1U)
+
+#define S_MAC_PLL0_PLL0_LOCK_STATUS 0
+#define V_MAC_PLL0_PLL0_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL0_LOCK_STATUS)
+#define F_MAC_PLL0_PLL0_LOCK_STATUS V_MAC_PLL0_PLL0_LOCK_STATUS(1U)
+
+#define A_MAC_PLL0_PLL_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_PLL0_PLL_PRG_EN 0
+#define M_MAC_PLL0_PLL_PRG_EN 0xfU
+#define V_MAC_PLL0_PLL_PRG_EN(x) ((x) << S_MAC_PLL0_PLL_PRG_EN)
+#define G_MAC_PLL0_PLL_PRG_EN(x) (((x) >> S_MAC_PLL0_PLL_PRG_EN) & M_MAC_PLL0_PLL_PRG_EN)
+
+#define A_MAC_PLL0_PLL_CMUTOP_KV16_MGMT_PLL_MACRO_SELECT_0 0x7fc00
+
+#define S_MAC_PLL0_PMA_MACRO_SELECT 0
+#define M_MAC_PLL0_PMA_MACRO_SELECT 0x3ffU
+#define V_MAC_PLL0_PMA_MACRO_SELECT(x) ((x) << S_MAC_PLL0_PMA_MACRO_SELECT)
+#define G_MAC_PLL0_PMA_MACRO_SELECT(x) (((x) >> S_MAC_PLL0_PMA_MACRO_SELECT) & M_MAC_PLL0_PMA_MACRO_SELECT)
+
+#define A_MAC_PLL1_PLL_TOP_CUPLL_LOCK 0x4438
+
+#define S_MAC_PLL1_PLL2_LOCK_STATUS 2
+#define V_MAC_PLL1_PLL2_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL2_LOCK_STATUS)
+#define F_MAC_PLL1_PLL2_LOCK_STATUS V_MAC_PLL1_PLL2_LOCK_STATUS(1U)
+
+#define S_MAC_PLL1_PLL1_LOCK_STATUS 1
+#define V_MAC_PLL1_PLL1_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL1_LOCK_STATUS)
+#define F_MAC_PLL1_PLL1_LOCK_STATUS V_MAC_PLL1_PLL1_LOCK_STATUS(1U)
+
+#define S_MAC_PLL1_PLL0_LOCK_STATUS 0
+#define V_MAC_PLL1_PLL0_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL0_LOCK_STATUS)
+#define F_MAC_PLL1_PLL0_LOCK_STATUS V_MAC_PLL1_PLL0_LOCK_STATUS(1U)
+
+#define A_MAC_PLL1_PLL_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_PLL1_PLL_PRG_EN 0
+#define M_MAC_PLL1_PLL_PRG_EN 0xfU
+#define V_MAC_PLL1_PLL_PRG_EN(x) ((x) << S_MAC_PLL1_PLL_PRG_EN)
+#define G_MAC_PLL1_PLL_PRG_EN(x) (((x) >> S_MAC_PLL1_PLL_PRG_EN) & M_MAC_PLL1_PLL_PRG_EN)
+
+#define A_MAC_PLL1_PLL_CMUTOP_KV16_MGMT_PLL_MACRO_SELECT_0 0x7fc00
+
+#define S_MAC_PLL1_PMA_MACRO_SELECT 0
+#define M_MAC_PLL1_PMA_MACRO_SELECT 0x3ffU
+#define V_MAC_PLL1_PMA_MACRO_SELECT(x) ((x) << S_MAC_PLL1_PMA_MACRO_SELECT)
+#define G_MAC_PLL1_PMA_MACRO_SELECT(x) (((x) >> S_MAC_PLL1_PMA_MACRO_SELECT) & M_MAC_PLL1_PMA_MACRO_SELECT)
+
+/* registers for module CRYPTO_0 */
+#define CRYPTO_0_BASE_ADDR 0x44000
+
+#define A_TLS_TX_CH_CONFIG 0x44000
+
+#define S_SMALL_LEN_THRESH 16
+#define M_SMALL_LEN_THRESH 0xffffU
+#define V_SMALL_LEN_THRESH(x) ((x) << S_SMALL_LEN_THRESH)
+#define G_SMALL_LEN_THRESH(x) (((x) >> S_SMALL_LEN_THRESH) & M_SMALL_LEN_THRESH)
+
+#define S_CIPH0_CTL_SEL 12
+#define M_CIPH0_CTL_SEL 0x7U
+#define V_CIPH0_CTL_SEL(x) ((x) << S_CIPH0_CTL_SEL)
+#define G_CIPH0_CTL_SEL(x) (((x) >> S_CIPH0_CTL_SEL) & M_CIPH0_CTL_SEL)
+
+#define S_CIPHN_CTL_SEL 9
+#define M_CIPHN_CTL_SEL 0x7U
+#define V_CIPHN_CTL_SEL(x) ((x) << S_CIPHN_CTL_SEL)
+#define G_CIPHN_CTL_SEL(x) (((x) >> S_CIPHN_CTL_SEL) & M_CIPHN_CTL_SEL)
+
+#define S_MAC_CTL_SEL 6
+#define M_MAC_CTL_SEL 0x7U
+#define V_MAC_CTL_SEL(x) ((x) << S_MAC_CTL_SEL)
+#define G_MAC_CTL_SEL(x) (((x) >> S_MAC_CTL_SEL) & M_MAC_CTL_SEL)
+
+#define S_CIPH0_XOR_SEL 5
+#define V_CIPH0_XOR_SEL(x) ((x) << S_CIPH0_XOR_SEL)
+#define F_CIPH0_XOR_SEL V_CIPH0_XOR_SEL(1U)
+
+#define S_CIPHN_XOR_SEL 4
+#define V_CIPHN_XOR_SEL(x) ((x) << S_CIPHN_XOR_SEL)
+#define F_CIPHN_XOR_SEL V_CIPHN_XOR_SEL(1U)
+
+#define S_MAC_XOR_SEL 3
+#define V_MAC_XOR_SEL(x) ((x) << S_MAC_XOR_SEL)
+#define F_MAC_XOR_SEL V_MAC_XOR_SEL(1U)
+
+#define S_CIPH0_DP_SEL 2
+#define V_CIPH0_DP_SEL(x) ((x) << S_CIPH0_DP_SEL)
+#define F_CIPH0_DP_SEL V_CIPH0_DP_SEL(1U)
+
+#define S_CIPHN_DP_SEL 1
+#define V_CIPHN_DP_SEL(x) ((x) << S_CIPHN_DP_SEL)
+#define F_CIPHN_DP_SEL V_CIPHN_DP_SEL(1U)
+
+#define S_MAC_DP_SEL 0
+#define V_MAC_DP_SEL(x) ((x) << S_MAC_DP_SEL)
+#define F_MAC_DP_SEL V_MAC_DP_SEL(1U)
+
+#define A_TLS_TX_CH_PERR_INJECT 0x44004
+#define A_TLS_TX_CH_INT_ENABLE 0x44008
+
+#define S_KEYLENERR 3
+#define V_KEYLENERR(x) ((x) << S_KEYLENERR)
+#define F_KEYLENERR V_KEYLENERR(1U)
+
+#define S_INTF1_PERR 2
+#define V_INTF1_PERR(x) ((x) << S_INTF1_PERR)
+#define F_INTF1_PERR V_INTF1_PERR(1U)
+
+#define S_INTF0_PERR 1
+#define V_INTF0_PERR(x) ((x) << S_INTF0_PERR)
+#define F_INTF0_PERR V_INTF0_PERR(1U)
+
+#define A_TLS_TX_CH_INT_CAUSE 0x4400c
+
+#define S_KEX_CERR 4
+#define V_KEX_CERR(x) ((x) << S_KEX_CERR)
+#define F_KEX_CERR V_KEX_CERR(1U)
+
+#define A_TLS_TX_CH_PERR_ENABLE 0x44010
+#define A_TLS_TX_CH_DEBUG_FLAGS 0x44014
+#define A_TLS_TX_CH_HMACCTRL_CFG 0x44020
+#define A_TLS_TX_CH_ERR_RSP_HDR 0x44024
+#define A_TLS_TX_CH_HANG_TIMEOUT 0x44028
+
+#define S_T7_TIMEOUT 0
+#define M_T7_TIMEOUT 0xffU
+#define V_T7_TIMEOUT(x) ((x) << S_T7_TIMEOUT)
+#define G_T7_TIMEOUT(x) (((x) >> S_T7_TIMEOUT) & M_T7_TIMEOUT)
+
+#define A_TLS_TX_CH_DBG_STEP_CTRL 0x44030
+
+#define S_DBG_STEP_CTRL 1
+#define V_DBG_STEP_CTRL(x) ((x) << S_DBG_STEP_CTRL)
+#define F_DBG_STEP_CTRL V_DBG_STEP_CTRL(1U)
+
+#define S_DBG_STEP_EN 0
+#define V_DBG_STEP_EN(x) ((x) << S_DBG_STEP_EN)
+#define F_DBG_STEP_EN V_DBG_STEP_EN(1U)
+
+#define A_TLS_TX_DBG_SELL_DATA 0x44714
+#define A_TLS_TX_DBG_SELH_DATA 0x44718
+#define A_TLS_TX_DBG_SEL_CTRL 0x44730
+#define A_TLS_TX_GLOBAL_CONFIG 0x447c0
+
+#define S_QUIC_EN 2
+#define V_QUIC_EN(x) ((x) << S_QUIC_EN)
+#define F_QUIC_EN V_QUIC_EN(1U)
+
+#define S_IPSEC_IDX_UPD_EN 1
+#define V_IPSEC_IDX_UPD_EN(x) ((x) << S_IPSEC_IDX_UPD_EN)
+#define F_IPSEC_IDX_UPD_EN V_IPSEC_IDX_UPD_EN(1U)
+
+#define S_IPSEC_IDX_CTL 0
+#define V_IPSEC_IDX_CTL(x) ((x) << S_IPSEC_IDX_CTL)
+#define F_IPSEC_IDX_CTL V_IPSEC_IDX_CTL(1U)
+
+#define A_TLS_TX_CGEN 0x447f0
+
+#define S_CHCGEN 0
+#define M_CHCGEN 0x3fU
+#define V_CHCGEN(x) ((x) << S_CHCGEN)
+#define G_CHCGEN(x) (((x) >> S_CHCGEN) & M_CHCGEN)
+
+#define A_TLS_TX_IND_ADDR 0x447f8
+
+#define S_T7_3_ADDR 0
+#define M_T7_3_ADDR 0xfffU
+#define V_T7_3_ADDR(x) ((x) << S_T7_3_ADDR)
+#define G_T7_3_ADDR(x) (((x) >> S_T7_3_ADDR) & M_T7_3_ADDR)
+
+#define A_TLS_TX_IND_DATA 0x447fc
+#define A_TLS_TX_CH_IND_ING_BYTE_CNT_LO 0x0
+#define A_TLS_TX_CH_IND_ING_BYTE_CNT_HI 0x1
+#define A_TLS_TX_CH_IND_ING_PKT_CNT 0x2
+#define A_TLS_TX_CH_IND_DISPATCH_PKT_CNT 0x4
+#define A_TLS_TX_CH_IND_ERROR_CNTS0 0x5
+#define A_TLS_TX_CH_IND_DEC_ERROR_CNTS 0x7
+#define A_TLS_TX_CH_IND_DBG_SPP_CFG 0x1f
+
+#define S_DIS_IF_ERR 11
+#define V_DIS_IF_ERR(x) ((x) << S_DIS_IF_ERR)
+#define F_DIS_IF_ERR V_DIS_IF_ERR(1U)
+
+#define S_DIS_ERR_MSG 10
+#define V_DIS_ERR_MSG(x) ((x) << S_DIS_ERR_MSG)
+#define F_DIS_ERR_MSG V_DIS_ERR_MSG(1U)
+
+#define S_DIS_BP_SEQF 9
+#define V_DIS_BP_SEQF(x) ((x) << S_DIS_BP_SEQF)
+#define F_DIS_BP_SEQF V_DIS_BP_SEQF(1U)
+
+#define S_DIS_BP_LENF 8
+#define V_DIS_BP_LENF(x) ((x) << S_DIS_BP_LENF)
+#define F_DIS_BP_LENF V_DIS_BP_LENF(1U)
+
+#define S_DIS_KEX_ERR 6
+#define V_DIS_KEX_ERR(x) ((x) << S_DIS_KEX_ERR)
+#define F_DIS_KEX_ERR V_DIS_KEX_ERR(1U)
+
+#define S_CLR_STS 5
+#define V_CLR_STS(x) ((x) << S_CLR_STS)
+#define F_CLR_STS V_CLR_STS(1U)
+
+#define S_TGL_CNT 4
+#define V_TGL_CNT(x) ((x) << S_TGL_CNT)
+#define F_TGL_CNT V_TGL_CNT(1U)
+
+#define S_ENB_PAZ 3
+#define V_ENB_PAZ(x) ((x) << S_ENB_PAZ)
+#define F_ENB_PAZ V_ENB_PAZ(1U)
+
+#define S_DIS_NOP 2
+#define V_DIS_NOP(x) ((x) << S_DIS_NOP)
+#define F_DIS_NOP V_DIS_NOP(1U)
+
+#define S_DIS_CPL_ERR 1
+#define V_DIS_CPL_ERR(x) ((x) << S_DIS_CPL_ERR)
+#define F_DIS_CPL_ERR V_DIS_CPL_ERR(1U)
+
+#define S_DIS_OFF_ERR 0
+#define V_DIS_OFF_ERR(x) ((x) << S_DIS_OFF_ERR)
+#define F_DIS_OFF_ERR V_DIS_OFF_ERR(1U)
+
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID0 0x20
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID1 0x21
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID2 0x22
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID3 0x23
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID4 0x24
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID5 0x25
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID6 0x26
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID7 0x27
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W0 0x28
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W1 0x29
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W2 0x2a
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W3 0x2b
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W0 0x2c
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W1 0x2d
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W2 0x2e
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W3 0x2f
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_ERR 0x30
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_BP 0x31
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_CTL_M 0x32
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_CTL_L 0x33
+#define A_TLS_TX_CH_IND_DBG_PKT_STAT 0x3f
+
+/* registers for module CRYPTO_1 */
+#define CRYPTO_1_BASE_ADDR 0x45000
+
+/* registers for module CRYPTO_KEY */
+#define CRYPTO_KEY_BASE_ADDR 0x46000
+
+#define A_CRYPTO_KEY_CONFIG 0x46000
+
+#define S_ESNWIN 1
+#define M_ESNWIN 0x7U
+#define V_ESNWIN(x) ((x) << S_ESNWIN)
+#define G_ESNWIN(x) (((x) >> S_ESNWIN) & M_ESNWIN)
+
+#define S_INGKEY96 0
+#define V_INGKEY96(x) ((x) << S_INGKEY96)
+#define F_INGKEY96 V_INGKEY96(1U)
+
+#define A_CRYPTO_KEY_RST 0x46004
+
+#define S_CORE1RST 1
+#define V_CORE1RST(x) ((x) << S_CORE1RST)
+#define F_CORE1RST V_CORE1RST(1U)
+
+#define S_CORE0RST 0
+#define V_CORE0RST(x) ((x) << S_CORE0RST)
+#define F_CORE0RST V_CORE0RST(1U)
+
+#define A_CRYPTO_KEY_INT_ENABLE 0x46008
+
+#define S_MA_FIFO_PERR 22
+#define V_MA_FIFO_PERR(x) ((x) << S_MA_FIFO_PERR)
+#define F_MA_FIFO_PERR V_MA_FIFO_PERR(1U)
+
+#define S_MA_RSP_PERR 21
+#define V_MA_RSP_PERR(x) ((x) << S_MA_RSP_PERR)
+#define F_MA_RSP_PERR V_MA_RSP_PERR(1U)
+
+#define S_ING_CACHE_DATA_PERR 19
+#define V_ING_CACHE_DATA_PERR(x) ((x) << S_ING_CACHE_DATA_PERR)
+#define F_ING_CACHE_DATA_PERR V_ING_CACHE_DATA_PERR(1U)
+
+#define S_ING_CACHE_TAG_PERR 18
+#define V_ING_CACHE_TAG_PERR(x) ((x) << S_ING_CACHE_TAG_PERR)
+#define F_ING_CACHE_TAG_PERR V_ING_CACHE_TAG_PERR(1U)
+
+#define S_LKP_KEY_REQ_PERR 17
+#define V_LKP_KEY_REQ_PERR(x) ((x) << S_LKP_KEY_REQ_PERR)
+#define F_LKP_KEY_REQ_PERR V_LKP_KEY_REQ_PERR(1U)
+
+#define S_LKP_CLIP_TCAM_PERR 16
+#define V_LKP_CLIP_TCAM_PERR(x) ((x) << S_LKP_CLIP_TCAM_PERR)
+#define F_LKP_CLIP_TCAM_PERR V_LKP_CLIP_TCAM_PERR(1U)
+
+#define S_LKP_MAIN_TCAM_PERR 15
+#define V_LKP_MAIN_TCAM_PERR(x) ((x) << S_LKP_MAIN_TCAM_PERR)
+#define F_LKP_MAIN_TCAM_PERR V_LKP_MAIN_TCAM_PERR(1U)
+
+#define S_EGR_KEY_REQ_PERR 14
+#define V_EGR_KEY_REQ_PERR(x) ((x) << S_EGR_KEY_REQ_PERR)
+#define F_EGR_KEY_REQ_PERR V_EGR_KEY_REQ_PERR(1U)
+
+#define S_EGR_CACHE_DATA_PERR 13
+#define V_EGR_CACHE_DATA_PERR(x) ((x) << S_EGR_CACHE_DATA_PERR)
+#define F_EGR_CACHE_DATA_PERR V_EGR_CACHE_DATA_PERR(1U)
+
+#define S_EGR_CACHE_TAG_PERR 12
+#define V_EGR_CACHE_TAG_PERR(x) ((x) << S_EGR_CACHE_TAG_PERR)
+#define F_EGR_CACHE_TAG_PERR V_EGR_CACHE_TAG_PERR(1U)
+
+#define S_CIM_PERR 11
+#define V_CIM_PERR(x) ((x) << S_CIM_PERR)
+#define F_CIM_PERR V_CIM_PERR(1U)
+
+#define S_MA_INV_RSP_TAG 10
+#define V_MA_INV_RSP_TAG(x) ((x) << S_MA_INV_RSP_TAG)
+#define F_MA_INV_RSP_TAG V_MA_INV_RSP_TAG(1U)
+
+#define S_ING_KEY_RANGE_ERR 9
+#define V_ING_KEY_RANGE_ERR(x) ((x) << S_ING_KEY_RANGE_ERR)
+#define F_ING_KEY_RANGE_ERR V_ING_KEY_RANGE_ERR(1U)
+
+#define S_ING_MFIFO_OVFL 8
+#define V_ING_MFIFO_OVFL(x) ((x) << S_ING_MFIFO_OVFL)
+#define F_ING_MFIFO_OVFL V_ING_MFIFO_OVFL(1U)
+
+#define S_LKP_REQ_OVFL 7
+#define V_LKP_REQ_OVFL(x) ((x) << S_LKP_REQ_OVFL)
+#define F_LKP_REQ_OVFL V_LKP_REQ_OVFL(1U)
+
+#define S_EOK_WAIT_ERR 6
+#define V_EOK_WAIT_ERR(x) ((x) << S_EOK_WAIT_ERR)
+#define F_EOK_WAIT_ERR V_EOK_WAIT_ERR(1U)
+
+#define S_EGR_KEY_RANGE_ERR 5
+#define V_EGR_KEY_RANGE_ERR(x) ((x) << S_EGR_KEY_RANGE_ERR)
+#define F_EGR_KEY_RANGE_ERR V_EGR_KEY_RANGE_ERR(1U)
+
+#define S_EGR_MFIFO_OVFL 4
+#define V_EGR_MFIFO_OVFL(x) ((x) << S_EGR_MFIFO_OVFL)
+#define F_EGR_MFIFO_OVFL V_EGR_MFIFO_OVFL(1U)
+
+#define S_SEQ_WRAP_HP_OVFL 3
+#define V_SEQ_WRAP_HP_OVFL(x) ((x) << S_SEQ_WRAP_HP_OVFL)
+#define F_SEQ_WRAP_HP_OVFL V_SEQ_WRAP_HP_OVFL(1U)
+
+#define S_SEQ_WRAP_LP_OVFL 2
+#define V_SEQ_WRAP_LP_OVFL(x) ((x) << S_SEQ_WRAP_LP_OVFL)
+#define F_SEQ_WRAP_LP_OVFL V_SEQ_WRAP_LP_OVFL(1U)
+
+#define S_EGR_SEQ_WRAP_HP 1
+#define V_EGR_SEQ_WRAP_HP(x) ((x) << S_EGR_SEQ_WRAP_HP)
+#define F_EGR_SEQ_WRAP_HP V_EGR_SEQ_WRAP_HP(1U)
+
+#define S_EGR_SEQ_WRAP_LP 0
+#define V_EGR_SEQ_WRAP_LP(x) ((x) << S_EGR_SEQ_WRAP_LP)
+#define F_EGR_SEQ_WRAP_LP V_EGR_SEQ_WRAP_LP(1U)
+
+#define A_CRYPTO_KEY_INT_CAUSE 0x4600c
+#define A_CRYPTO_KEY_PERR_ENABLE 0x46010
+#define A_CRYPTO_KEY_EGR_SEQ_WRAP_LP_KEY_ID 0x46018
+
+#define S_KEY_VALID 31
+#define V_KEY_VALID(x) ((x) << S_KEY_VALID)
+#define F_KEY_VALID V_KEY_VALID(1U)
+
+#define S_KEY_ID 0
+#define M_KEY_ID 0x7fffffffU
+#define V_KEY_ID(x) ((x) << S_KEY_ID)
+#define G_KEY_ID(x) (((x) >> S_KEY_ID) & M_KEY_ID)
+
+#define A_CRYPTO_KEY_EGR_SEQ_WRAP_HP_KEY_ID 0x4601c
+#define A_CRYPTO_KEY_TCAM_DATA0 0x46020
+#define A_CRYPTO_KEY_TCAM_DATA1 0x46024
+#define A_CRYPTO_KEY_TCAM_DATA2 0x46028
+#define A_CRYPTO_KEY_TCAM_DATA3 0x4602c
+#define A_CRYPTO_KEY_TCAM_CTL 0x46030
+
+#define S_SRCHMHIT 21
+#define V_SRCHMHIT(x) ((x) << S_SRCHMHIT)
+#define F_SRCHMHIT V_SRCHMHIT(1U)
+
+#define S_T7_BUSY 20
+#define V_T7_BUSY(x) ((x) << S_T7_BUSY)
+#define F_T7_BUSY V_T7_BUSY(1U)
+
+#define S_SRCHHIT 19
+#define V_SRCHHIT(x) ((x) << S_SRCHHIT)
+#define F_SRCHHIT V_SRCHHIT(1U)
+
+#define S_IPVERSION 18
+#define V_IPVERSION(x) ((x) << S_IPVERSION)
+#define F_IPVERSION V_IPVERSION(1U)
+
+#define S_BITSEL 17
+#define V_BITSEL(x) ((x) << S_BITSEL)
+#define F_BITSEL V_BITSEL(1U)
+
+#define S_TCAMSEL 16
+#define V_TCAMSEL(x) ((x) << S_TCAMSEL)
+#define F_TCAMSEL V_TCAMSEL(1U)
+
+#define S_CMDTYPE 14
+#define M_CMDTYPE 0x3U
+#define V_CMDTYPE(x) ((x) << S_CMDTYPE)
+#define G_CMDTYPE(x) (((x) >> S_CMDTYPE) & M_CMDTYPE)
+
+#define S_TCAMINDEX 0
+#define M_TCAMINDEX 0x3fffU
+#define V_TCAMINDEX(x) ((x) << S_TCAMINDEX)
+#define G_TCAMINDEX(x) (((x) >> S_TCAMINDEX) & M_TCAMINDEX)
+
+#define A_CRYPTO_KEY_TCAM_CONFIG 0x46034
+
+#define S_T7_CLTCAMDEEPSLEEP_STAT 3
+#define V_T7_CLTCAMDEEPSLEEP_STAT(x) ((x) << S_T7_CLTCAMDEEPSLEEP_STAT)
+#define F_T7_CLTCAMDEEPSLEEP_STAT V_T7_CLTCAMDEEPSLEEP_STAT(1U)
+
+#define S_T7_TCAMDEEPSLEEP_STAT 2
+#define V_T7_TCAMDEEPSLEEP_STAT(x) ((x) << S_T7_TCAMDEEPSLEEP_STAT)
+#define F_T7_TCAMDEEPSLEEP_STAT V_T7_TCAMDEEPSLEEP_STAT(1U)
+
+#define S_T7_CLTCAMDEEPSLEEP 1
+#define V_T7_CLTCAMDEEPSLEEP(x) ((x) << S_T7_CLTCAMDEEPSLEEP)
+#define F_T7_CLTCAMDEEPSLEEP V_T7_CLTCAMDEEPSLEEP(1U)
+
+#define S_T7_TCAMDEEPSLEEP 0
+#define V_T7_TCAMDEEPSLEEP(x) ((x) << S_T7_TCAMDEEPSLEEP)
+#define F_T7_TCAMDEEPSLEEP V_T7_TCAMDEEPSLEEP(1U)
+
+#define A_CRYPTO_KEY_TX_CMM_CONFIG 0x46040
+#define A_CRYPTO_KEY_TX_TNL_BASE 0x46044
+#define A_CRYPTO_KEY_TX_TRN_BASE 0x46048
+#define A_CRYPTO_KEY_TX_MAX_KEYS 0x4604c
+
+#define S_TNL_MAX 16
+#define M_TNL_MAX 0xffffU
+#define V_TNL_MAX(x) ((x) << S_TNL_MAX)
+#define G_TNL_MAX(x) (((x) >> S_TNL_MAX) & M_TNL_MAX)
+
+#define S_TRN_MAX 0
+#define M_TRN_MAX 0xffffU
+#define V_TRN_MAX(x) ((x) << S_TRN_MAX)
+#define G_TRN_MAX(x) (((x) >> S_TRN_MAX) & M_TRN_MAX)
+
+#define A_CRYPTO_KEY_TX_SEQ_STAT 0x46050
+
+#define S_ESN 24
+#define V_ESN(x) ((x) << S_ESN)
+#define F_ESN V_ESN(1U)
+
+#define S_SEQHI 20
+#define M_SEQHI 0xfU
+#define V_SEQHI(x) ((x) << S_SEQHI)
+#define G_SEQHI(x) (((x) >> S_SEQHI) & M_SEQHI)
+
+#define S_KEYID 0
+#define M_KEYID 0xfffffU
+#define V_KEYID(x) ((x) << S_KEYID)
+#define G_KEYID(x) (((x) >> S_KEYID) & M_KEYID)
+
+#define A_CRYPTO_KEY_RX_CMM_CONFIG 0x46060
+#define A_CRYPTO_KEY_RX_BASE 0x46064
+#define A_CRYPTO_KEY_RX_MAX_KEYS 0x46068
+
+#define S_MAXKEYS 0
+#define M_MAXKEYS 0xffffU
+#define V_MAXKEYS(x) ((x) << S_MAXKEYS)
+#define G_MAXKEYS(x) (((x) >> S_MAXKEYS) & M_MAXKEYS)
+
+#define A_CRYPTO_KEY_CRYPTO_REVISION 0x4606c
+#define A_CRYPTO_KEY_RX_SEQ_STAT 0x46070
+#define A_CRYPTO_KEY_TCAM_BIST_CTRL 0x46074
+#define A_CRYPTO_KEY_TCAM_BIST_CB_PASS 0x46078
+#define A_CRYPTO_KEY_TCAM_BIST_CB_BUSY 0x4607c
+#define A_CRYPTO_KEY_DBG_SEL_CTRL 0x46080
+
+#define S_SEL_OVR_EN 16
+#define V_SEL_OVR_EN(x) ((x) << S_SEL_OVR_EN)
+#define F_SEL_OVR_EN V_SEL_OVR_EN(1U)
+
+#define S_T7_1_SELH 8
+#define M_T7_1_SELH 0xffU
+#define V_T7_1_SELH(x) ((x) << S_T7_1_SELH)
+#define G_T7_1_SELH(x) (((x) >> S_T7_1_SELH) & M_T7_1_SELH)
+
+#define S_T7_1_SELL 0
+#define M_T7_1_SELL 0xffU
+#define V_T7_1_SELL(x) ((x) << S_T7_1_SELL)
+#define G_T7_1_SELL(x) (((x) >> S_T7_1_SELL) & M_T7_1_SELL)
+
+#define A_CRYPTO_KEY_DBG_SELL_DATA 0x46084
+#define A_CRYPTO_KEY_DBG_SELH_DATA 0x46088
+
+/* registers for module ARM */
+#define ARM_BASE_ADDR 0x47000
+
+#define A_ARM_CPU_POR_RST 0x47000
+
+#define S_CPUPORRSTN3 3
+#define V_CPUPORRSTN3(x) ((x) << S_CPUPORRSTN3)
+#define F_CPUPORRSTN3 V_CPUPORRSTN3(1U)
+
+#define S_CPUPORRSTN2 2
+#define V_CPUPORRSTN2(x) ((x) << S_CPUPORRSTN2)
+#define F_CPUPORRSTN2 V_CPUPORRSTN2(1U)
+
+#define S_CPUPORRSTN1 1
+#define V_CPUPORRSTN1(x) ((x) << S_CPUPORRSTN1)
+#define F_CPUPORRSTN1 V_CPUPORRSTN1(1U)
+
+#define S_CPUPORRSTN0 0
+#define V_CPUPORRSTN0(x) ((x) << S_CPUPORRSTN0)
+#define F_CPUPORRSTN0 V_CPUPORRSTN0(1U)
+
+#define A_ARM_CPU_CORE_RST 0x47004
+
+#define S_CPUCORERSTN3 3
+#define V_CPUCORERSTN3(x) ((x) << S_CPUCORERSTN3)
+#define F_CPUCORERSTN3 V_CPUCORERSTN3(1U)
+
+#define S_CPUCORERSTN2 2
+#define V_CPUCORERSTN2(x) ((x) << S_CPUCORERSTN2)
+#define F_CPUCORERSTN2 V_CPUCORERSTN2(1U)
+
+#define S_CPUCORERSTN1 1
+#define V_CPUCORERSTN1(x) ((x) << S_CPUCORERSTN1)
+#define F_CPUCORERSTN1 V_CPUCORERSTN1(1U)
+
+#define S_CPUCORERSTN0 0
+#define V_CPUCORERSTN0(x) ((x) << S_CPUCORERSTN0)
+#define F_CPUCORERSTN0 V_CPUCORERSTN0(1U)
+
+#define A_ARM_CPU_WARM_RST_REQ 0x47008
+
+#define S_CPUWARMRSTREQ3 3
+#define V_CPUWARMRSTREQ3(x) ((x) << S_CPUWARMRSTREQ3)
+#define F_CPUWARMRSTREQ3 V_CPUWARMRSTREQ3(1U)
+
+#define S_CPUWARMRSTREQ2 2
+#define V_CPUWARMRSTREQ2(x) ((x) << S_CPUWARMRSTREQ2)
+#define F_CPUWARMRSTREQ2 V_CPUWARMRSTREQ2(1U)
+
+#define S_CPUWARMRSTREQ1 1
+#define V_CPUWARMRSTREQ1(x) ((x) << S_CPUWARMRSTREQ1)
+#define F_CPUWARMRSTREQ1 V_CPUWARMRSTREQ1(1U)
+
+#define S_CPUWARMRSTREQ0 0
+#define V_CPUWARMRSTREQ0(x) ((x) << S_CPUWARMRSTREQ0)
+#define F_CPUWARMRSTREQ0 V_CPUWARMRSTREQ0(1U)
+
+#define A_ARM_CPU_L2_RST 0x4700c
+
+#define S_CPUL2RSTN 0
+#define V_CPUL2RSTN(x) ((x) << S_CPUL2RSTN)
+#define F_CPUL2RSTN V_CPUL2RSTN(1U)
+
+#define A_ARM_CPU_L2_RST_DIS 0x47010
+
+#define S_CPUL2RSTDISABLE 0
+#define V_CPUL2RSTDISABLE(x) ((x) << S_CPUL2RSTDISABLE)
+#define F_CPUL2RSTDISABLE V_CPUL2RSTDISABLE(1U)
+
+#define A_ARM_CPU_PRESET_DBG 0x47014
+
+#define S_CPUPRESETDBGN 0
+#define V_CPUPRESETDBGN(x) ((x) << S_CPUPRESETDBGN)
+#define F_CPUPRESETDBGN V_CPUPRESETDBGN(1U)
+
+#define A_ARM_PL_DMA_AW_OFFSET 0x47018
+
+#define S_PL_DMA_AW_OFFSET 0
+#define M_PL_DMA_AW_OFFSET 0x3fffffffU
+#define V_PL_DMA_AW_OFFSET(x) ((x) << S_PL_DMA_AW_OFFSET)
+#define G_PL_DMA_AW_OFFSET(x) (((x) >> S_PL_DMA_AW_OFFSET) & M_PL_DMA_AW_OFFSET)
+
+#define A_ARM_PL_DMA_AR_OFFSET 0x4701c
+
+#define S_PL_DMA_AR_OFFSET 0
+#define M_PL_DMA_AR_OFFSET 0x3fffffffU
+#define V_PL_DMA_AR_OFFSET(x) ((x) << S_PL_DMA_AR_OFFSET)
+#define G_PL_DMA_AR_OFFSET(x) (((x) >> S_PL_DMA_AR_OFFSET) & M_PL_DMA_AR_OFFSET)
+
+#define A_ARM_CPU_RESET_VECTOR_BASE_ADDR0 0x47020
+#define A_ARM_CPU_RESET_VECTOR_BASE_ADDR1 0x47024
+
+#define S_CPURESETVECBA1 0
+#define M_CPURESETVECBA1 0x3ffU
+#define V_CPURESETVECBA1(x) ((x) << S_CPURESETVECBA1)
+#define G_CPURESETVECBA1(x) (((x) >> S_CPURESETVECBA1) & M_CPURESETVECBA1)
+
+#define A_ARM_CPU_PMU_EVENT 0x47028
+
+#define S_CPUPMUEVENT 0
+#define M_CPUPMUEVENT 0x1ffffffU
+#define V_CPUPMUEVENT(x) ((x) << S_CPUPMUEVENT)
+#define G_CPUPMUEVENT(x) (((x) >> S_CPUPMUEVENT) & M_CPUPMUEVENT)
+
+#define A_ARM_DMA_RST 0x4702c
+
+#define S_DMA_PL_RST_N 0
+#define V_DMA_PL_RST_N(x) ((x) << S_DMA_PL_RST_N)
+#define F_DMA_PL_RST_N V_DMA_PL_RST_N(1U)
+
+#define A_ARM_PLM_RID_CFG 0x4703c
+#define A_ARM_PLM_EROM_CFG 0x47040
+#define A_ARM_PL_ARM_HDR_CFG 0x4704c
+#define A_ARM_RC_INT_STATUS 0x4705c
+
+#define S_RC_INT_STATUS_REG 0
+#define M_RC_INT_STATUS_REG 0x3fU
+#define V_RC_INT_STATUS_REG(x) ((x) << S_RC_INT_STATUS_REG)
+#define G_RC_INT_STATUS_REG(x) (((x) >> S_RC_INT_STATUS_REG) & M_RC_INT_STATUS_REG)
+
+#define A_ARM_CPU_DBG_PWR_UP_REQ 0x47060
+
+#define S_CPUDBGPWRUPREQ3 3
+#define V_CPUDBGPWRUPREQ3(x) ((x) << S_CPUDBGPWRUPREQ3)
+#define F_CPUDBGPWRUPREQ3 V_CPUDBGPWRUPREQ3(1U)
+
+#define S_CPUDBGPWRUPREQ2 2
+#define V_CPUDBGPWRUPREQ2(x) ((x) << S_CPUDBGPWRUPREQ2)
+#define F_CPUDBGPWRUPREQ2 V_CPUDBGPWRUPREQ2(1U)
+
+#define S_CPUDBGPWRUPREQ1 1
+#define V_CPUDBGPWRUPREQ1(x) ((x) << S_CPUDBGPWRUPREQ1)
+#define F_CPUDBGPWRUPREQ1 V_CPUDBGPWRUPREQ1(1U)
+
+#define S_CPUDBGPWRUPREQ0 0
+#define V_CPUDBGPWRUPREQ0(x) ((x) << S_CPUDBGPWRUPREQ0)
+#define F_CPUDBGPWRUPREQ0 V_CPUDBGPWRUPREQ0(1U)
+
+#define A_ARM_CPU_STANDBY_WFE_WFI 0x47064
+
+#define S_CPUSTANDBYWFIL2 8
+#define V_CPUSTANDBYWFIL2(x) ((x) << S_CPUSTANDBYWFIL2)
+#define F_CPUSTANDBYWFIL2 V_CPUSTANDBYWFIL2(1U)
+
+#define S_CPUSTANDBYWFI3 7
+#define V_CPUSTANDBYWFI3(x) ((x) << S_CPUSTANDBYWFI3)
+#define F_CPUSTANDBYWFI3 V_CPUSTANDBYWFI3(1U)
+
+#define S_CPUSTANDBYWFI2 6
+#define V_CPUSTANDBYWFI2(x) ((x) << S_CPUSTANDBYWFI2)
+#define F_CPUSTANDBYWFI2 V_CPUSTANDBYWFI2(1U)
+
+#define S_CPUSTANDBYWFI1 5
+#define V_CPUSTANDBYWFI1(x) ((x) << S_CPUSTANDBYWFI1)
+#define F_CPUSTANDBYWFI1 V_CPUSTANDBYWFI1(1U)
+
+#define S_CPUSTANDBYWFI0 4
+#define V_CPUSTANDBYWFI0(x) ((x) << S_CPUSTANDBYWFI0)
+#define F_CPUSTANDBYWFI0 V_CPUSTANDBYWFI0(1U)
+
+#define S_CPUSTANDBYWFE3 3
+#define V_CPUSTANDBYWFE3(x) ((x) << S_CPUSTANDBYWFE3)
+#define F_CPUSTANDBYWFE3 V_CPUSTANDBYWFE3(1U)
+
+#define S_CPUSTANDBYWFE2 2
+#define V_CPUSTANDBYWFE2(x) ((x) << S_CPUSTANDBYWFE2)
+#define F_CPUSTANDBYWFE2 V_CPUSTANDBYWFE2(1U)
+
+#define S_CPUSTANDBYWFE1 1
+#define V_CPUSTANDBYWFE1(x) ((x) << S_CPUSTANDBYWFE1)
+#define F_CPUSTANDBYWFE1 V_CPUSTANDBYWFE1(1U)
+
+#define S_CPUSTANDBYWFE0 0
+#define V_CPUSTANDBYWFE0(x) ((x) << S_CPUSTANDBYWFE0)
+#define F_CPUSTANDBYWFE0 V_CPUSTANDBYWFE0(1U)
+
+#define A_ARM_CPU_SMPEN 0x47068
+
+#define S_CPUSMPEN3 3
+#define V_CPUSMPEN3(x) ((x) << S_CPUSMPEN3)
+#define F_CPUSMPEN3 V_CPUSMPEN3(1U)
+
+#define S_CPUSMPEN2 2
+#define V_CPUSMPEN2(x) ((x) << S_CPUSMPEN2)
+#define F_CPUSMPEN2 V_CPUSMPEN2(1U)
+
+#define S_CPUSMPEN1 1
+#define V_CPUSMPEN1(x) ((x) << S_CPUSMPEN1)
+#define F_CPUSMPEN1 V_CPUSMPEN1(1U)
+
+#define S_CPUSMPEN0 0
+#define V_CPUSMPEN0(x) ((x) << S_CPUSMPEN0)
+#define F_CPUSMPEN0 V_CPUSMPEN0(1U)
+
+#define A_ARM_CPU_QACTIVE 0x4706c
+
+#define S_CPUQACTIVE3 3
+#define V_CPUQACTIVE3(x) ((x) << S_CPUQACTIVE3)
+#define F_CPUQACTIVE3 V_CPUQACTIVE3(1U)
+
+#define S_CPUQACTIVE2 2
+#define V_CPUQACTIVE2(x) ((x) << S_CPUQACTIVE2)
+#define F_CPUQACTIVE2 V_CPUQACTIVE2(1U)
+
+#define S_CPUQACTIVE1 1
+#define V_CPUQACTIVE1(x) ((x) << S_CPUQACTIVE1)
+#define F_CPUQACTIVE1 V_CPUQACTIVE1(1U)
+
+#define S_CPUQACTIVE0 0
+#define V_CPUQACTIVE0(x) ((x) << S_CPUQACTIVE0)
+#define F_CPUQACTIVE0 V_CPUQACTIVE0(1U)
+
+#define A_ARM_CPU_QREQ 0x47070
+
+#define S_CPUL2FLUSHREQ 5
+#define V_CPUL2FLUSHREQ(x) ((x) << S_CPUL2FLUSHREQ)
+#define F_CPUL2FLUSHREQ V_CPUL2FLUSHREQ(1U)
+
+#define S_CPUL2QREQN 4
+#define V_CPUL2QREQN(x) ((x) << S_CPUL2QREQN)
+#define F_CPUL2QREQN V_CPUL2QREQN(1U)
+
+#define S_CPUQREQ3N 3
+#define V_CPUQREQ3N(x) ((x) << S_CPUQREQ3N)
+#define F_CPUQREQ3N V_CPUQREQ3N(1U)
+
+#define S_CPUQREQ2N 2
+#define V_CPUQREQ2N(x) ((x) << S_CPUQREQ2N)
+#define F_CPUQREQ2N V_CPUQREQ2N(1U)
+
+#define S_CPUQREQ1N 1
+#define V_CPUQREQ1N(x) ((x) << S_CPUQREQ1N)
+#define F_CPUQREQ1N V_CPUQREQ1N(1U)
+
+#define S_CPUQREQ0N 0
+#define V_CPUQREQ0N(x) ((x) << S_CPUQREQ0N)
+#define F_CPUQREQ0N V_CPUQREQ0N(1U)
+
+#define A_ARM_CPU_QREQ_STATUS 0x47074
+
+#define S_CPUL2FLUSHDONE 10
+#define V_CPUL2FLUSHDONE(x) ((x) << S_CPUL2FLUSHDONE)
+#define F_CPUL2FLUSHDONE V_CPUL2FLUSHDONE(1U)
+
+#define S_CPUL2QDENY 9
+#define V_CPUL2QDENY(x) ((x) << S_CPUL2QDENY)
+#define F_CPUL2QDENY V_CPUL2QDENY(1U)
+
+#define S_CPUL2QACCEPTN 8
+#define V_CPUL2QACCEPTN(x) ((x) << S_CPUL2QACCEPTN)
+#define F_CPUL2QACCEPTN V_CPUL2QACCEPTN(1U)
+
+#define S_CPUQDENY3 7
+#define V_CPUQDENY3(x) ((x) << S_CPUQDENY3)
+#define F_CPUQDENY3 V_CPUQDENY3(1U)
+
+#define S_CPUQDENY2 6
+#define V_CPUQDENY2(x) ((x) << S_CPUQDENY2)
+#define F_CPUQDENY2 V_CPUQDENY2(1U)
+
+#define S_CPUQDENY1 5
+#define V_CPUQDENY1(x) ((x) << S_CPUQDENY1)
+#define F_CPUQDENY1 V_CPUQDENY1(1U)
+
+#define S_CPUQDENY0 4
+#define V_CPUQDENY0(x) ((x) << S_CPUQDENY0)
+#define F_CPUQDENY0 V_CPUQDENY0(1U)
+
+#define S_CPUQACCEPT3N 3
+#define V_CPUQACCEPT3N(x) ((x) << S_CPUQACCEPT3N)
+#define F_CPUQACCEPT3N V_CPUQACCEPT3N(1U)
+
+#define S_CPUQACCEPT2N 2
+#define V_CPUQACCEPT2N(x) ((x) << S_CPUQACCEPT2N)
+#define F_CPUQACCEPT2N V_CPUQACCEPT2N(1U)
+
+#define S_CPUQACCEPT1N 1
+#define V_CPUQACCEPT1N(x) ((x) << S_CPUQACCEPT1N)
+#define F_CPUQACCEPT1N V_CPUQACCEPT1N(1U)
+
+#define S_CPUQACCEPT0N 0
+#define V_CPUQACCEPT0N(x) ((x) << S_CPUQACCEPT0N)
+#define F_CPUQACCEPT0N V_CPUQACCEPT0N(1U)
+
+#define A_ARM_CPU_DBG_EN 0x47078
+
+#define S_CPUDBGL1RSTDISABLE 28
+#define V_CPUDBGL1RSTDISABLE(x) ((x) << S_CPUDBGL1RSTDISABLE)
+#define F_CPUDBGL1RSTDISABLE V_CPUDBGL1RSTDISABLE(1U)
+
+#define S_CPUDBGRSTREQ3 27
+#define V_CPUDBGRSTREQ3(x) ((x) << S_CPUDBGRSTREQ3)
+#define F_CPUDBGRSTREQ3 V_CPUDBGRSTREQ3(1U)
+
+#define S_CPUDBGRSTREQ2 26
+#define V_CPUDBGRSTREQ2(x) ((x) << S_CPUDBGRSTREQ2)
+#define F_CPUDBGRSTREQ2 V_CPUDBGRSTREQ2(1U)
+
+#define S_CPUDBGRSTREQ1 25
+#define V_CPUDBGRSTREQ1(x) ((x) << S_CPUDBGRSTREQ1)
+#define F_CPUDBGRSTREQ1 V_CPUDBGRSTREQ1(1U)
+
+#define S_CPUDBGRSTREQ0 24
+#define V_CPUDBGRSTREQ0(x) ((x) << S_CPUDBGRSTREQ0)
+#define F_CPUDBGRSTREQ0 V_CPUDBGRSTREQ0(1U)
+
+#define S_CPUDBGPWRDUP3 23
+#define V_CPUDBGPWRDUP3(x) ((x) << S_CPUDBGPWRDUP3)
+#define F_CPUDBGPWRDUP3 V_CPUDBGPWRDUP3(1U)
+
+#define S_CPUDBGPWRDUP2 22
+#define V_CPUDBGPWRDUP2(x) ((x) << S_CPUDBGPWRDUP2)
+#define F_CPUDBGPWRDUP2 V_CPUDBGPWRDUP2(1U)
+
+#define S_CPUDBGPWRDUP1 21
+#define V_CPUDBGPWRDUP1(x) ((x) << S_CPUDBGPWRDUP1)
+#define F_CPUDBGPWRDUP1 V_CPUDBGPWRDUP1(1U)
+
+#define S_CPUDBGPWRDUP0 20
+#define V_CPUDBGPWRDUP0(x) ((x) << S_CPUDBGPWRDUP0)
+#define F_CPUDBGPWRDUP0 V_CPUDBGPWRDUP0(1U)
+
+#define S_CPUEXTDBGREQ3 19
+#define V_CPUEXTDBGREQ3(x) ((x) << S_CPUEXTDBGREQ3)
+#define F_CPUEXTDBGREQ3 V_CPUEXTDBGREQ3(1U)
+
+#define S_CPUEXTDBGREQ2 18
+#define V_CPUEXTDBGREQ2(x) ((x) << S_CPUEXTDBGREQ2)
+#define F_CPUEXTDBGREQ2 V_CPUEXTDBGREQ2(1U)
+
+#define S_CPUEXTDBGREQ1 17
+#define V_CPUEXTDBGREQ1(x) ((x) << S_CPUEXTDBGREQ1)
+#define F_CPUEXTDBGREQ1 V_CPUEXTDBGREQ1(1U)
+
+#define S_CPUEXTDBGREQ0 16
+#define V_CPUEXTDBGREQ0(x) ((x) << S_CPUEXTDBGREQ0)
+#define F_CPUEXTDBGREQ0 V_CPUEXTDBGREQ0(1U)
+
+#define S_CPUSPNIDEN3 15
+#define V_CPUSPNIDEN3(x) ((x) << S_CPUSPNIDEN3)
+#define F_CPUSPNIDEN3 V_CPUSPNIDEN3(1U)
+
+#define S_CPUSPNIDEN2 14
+#define V_CPUSPNIDEN2(x) ((x) << S_CPUSPNIDEN2)
+#define F_CPUSPNIDEN2 V_CPUSPNIDEN2(1U)
+
+#define S_CPUSPNIDEN1 13
+#define V_CPUSPNIDEN1(x) ((x) << S_CPUSPNIDEN1)
+#define F_CPUSPNIDEN1 V_CPUSPNIDEN1(1U)
+
+#define S_CPUSPNIDEN0 12
+#define V_CPUSPNIDEN0(x) ((x) << S_CPUSPNIDEN0)
+#define F_CPUSPNIDEN0 V_CPUSPNIDEN0(1U)
+
+#define S_CPUSPDBGEN3 11
+#define V_CPUSPDBGEN3(x) ((x) << S_CPUSPDBGEN3)
+#define F_CPUSPDBGEN3 V_CPUSPDBGEN3(1U)
+
+#define S_CPUSPDBGEN2 10
+#define V_CPUSPDBGEN2(x) ((x) << S_CPUSPDBGEN2)
+#define F_CPUSPDBGEN2 V_CPUSPDBGEN2(1U)
+
+#define S_CPUSPDBGEN1 9
+#define V_CPUSPDBGEN1(x) ((x) << S_CPUSPDBGEN1)
+#define F_CPUSPDBGEN1 V_CPUSPDBGEN1(1U)
+
+#define S_CPUSPDBGEN0 8
+#define V_CPUSPDBGEN0(x) ((x) << S_CPUSPDBGEN0)
+#define F_CPUSPDBGEN0 V_CPUSPDBGEN0(1U)
+
+#define S_CPUNIDEN3 7
+#define V_CPUNIDEN3(x) ((x) << S_CPUNIDEN3)
+#define F_CPUNIDEN3 V_CPUNIDEN3(1U)
+
+#define S_CPUNIDEN2 6
+#define V_CPUNIDEN2(x) ((x) << S_CPUNIDEN2)
+#define F_CPUNIDEN2 V_CPUNIDEN2(1U)
+
+#define S_CPUNIDEN1 5
+#define V_CPUNIDEN1(x) ((x) << S_CPUNIDEN1)
+#define F_CPUNIDEN1 V_CPUNIDEN1(1U)
+
+#define S_CPUNIDEN0 4
+#define V_CPUNIDEN0(x) ((x) << S_CPUNIDEN0)
+#define F_CPUNIDEN0 V_CPUNIDEN0(1U)
+
+#define S_CPUDBGEN3 3
+#define V_CPUDBGEN3(x) ((x) << S_CPUDBGEN3)
+#define F_CPUDBGEN3 V_CPUDBGEN3(1U)
+
+#define S_CPUDBGEN2 2
+#define V_CPUDBGEN2(x) ((x) << S_CPUDBGEN2)
+#define F_CPUDBGEN2 V_CPUDBGEN2(1U)
+
+#define S_CPUDBGEN1 1
+#define V_CPUDBGEN1(x) ((x) << S_CPUDBGEN1)
+#define F_CPUDBGEN1 V_CPUDBGEN1(1U)
+
+#define S_CPUDBGEN0 0
+#define V_CPUDBGEN0(x) ((x) << S_CPUDBGEN0)
+#define F_CPUDBGEN0 V_CPUDBGEN0(1U)
+
+#define A_ARM_CPU_DBG_ACK 0x4707c
+
+#define S_CPUDBGNOPWRDWN3 11
+#define V_CPUDBGNOPWRDWN3(x) ((x) << S_CPUDBGNOPWRDWN3)
+#define F_CPUDBGNOPWRDWN3 V_CPUDBGNOPWRDWN3(1U)
+
+#define S_CPUDBGNOPWRDWN2 10
+#define V_CPUDBGNOPWRDWN2(x) ((x) << S_CPUDBGNOPWRDWN2)
+#define F_CPUDBGNOPWRDWN2 V_CPUDBGNOPWRDWN2(1U)
+
+#define S_CPUDBGNOPWRDWN1 9
+#define V_CPUDBGNOPWRDWN1(x) ((x) << S_CPUDBGNOPWRDWN1)
+#define F_CPUDBGNOPWRDWN1 V_CPUDBGNOPWRDWN1(1U)
+
+#define S_CPUDBGNOPWRDWN0 8
+#define V_CPUDBGNOPWRDWN0(x) ((x) << S_CPUDBGNOPWRDWN0)
+#define F_CPUDBGNOPWRDWN0 V_CPUDBGNOPWRDWN0(1U)
+
+#define S_CPUDGNRSTREQ3 7
+#define V_CPUDGNRSTREQ3(x) ((x) << S_CPUDGNRSTREQ3)
+#define F_CPUDGNRSTREQ3 V_CPUDGNRSTREQ3(1U)
+
+#define S_CPUDGNRSTREQ2 6
+#define V_CPUDGNRSTREQ2(x) ((x) << S_CPUDGNRSTREQ2)
+#define F_CPUDGNRSTREQ2 V_CPUDGNRSTREQ2(1U)
+
+#define S_CPUDGNRSTREQ1 5
+#define V_CPUDGNRSTREQ1(x) ((x) << S_CPUDGNRSTREQ1)
+#define F_CPUDGNRSTREQ1 V_CPUDGNRSTREQ1(1U)
+
+#define S_CPUDGNRSTREQ0 4
+#define V_CPUDGNRSTREQ0(x) ((x) << S_CPUDGNRSTREQ0)
+#define F_CPUDGNRSTREQ0 V_CPUDGNRSTREQ0(1U)
+
+#define S_CPUDBGACK3 3
+#define V_CPUDBGACK3(x) ((x) << S_CPUDBGACK3)
+#define F_CPUDBGACK3 V_CPUDBGACK3(1U)
+
+#define S_CPUDBGACK2 2
+#define V_CPUDBGACK2(x) ((x) << S_CPUDBGACK2)
+#define F_CPUDBGACK2 V_CPUDBGACK2(1U)
+
+#define S_CPUDBGACK1 1
+#define V_CPUDBGACK1(x) ((x) << S_CPUDBGACK1)
+#define F_CPUDBGACK1 V_CPUDBGACK1(1U)
+
+#define S_CPUDBGACK0 0
+#define V_CPUDBGACK0(x) ((x) << S_CPUDBGACK0)
+#define F_CPUDBGACK0 V_CPUDBGACK0(1U)
+
+#define A_ARM_CPU_PMU_SNAPSHOT_REQ 0x47080
+
+#define S_CPUPMUSNAPSHOTREQ3 3
+#define V_CPUPMUSNAPSHOTREQ3(x) ((x) << S_CPUPMUSNAPSHOTREQ3)
+#define F_CPUPMUSNAPSHOTREQ3 V_CPUPMUSNAPSHOTREQ3(1U)
+
+#define S_CPUPMUSNAPSHOTREQ2 2
+#define V_CPUPMUSNAPSHOTREQ2(x) ((x) << S_CPUPMUSNAPSHOTREQ2)
+#define F_CPUPMUSNAPSHOTREQ2 V_CPUPMUSNAPSHOTREQ2(1U)
+
+#define S_CPUPMUSNAPSHOTREQ1 1
+#define V_CPUPMUSNAPSHOTREQ1(x) ((x) << S_CPUPMUSNAPSHOTREQ1)
+#define F_CPUPMUSNAPSHOTREQ1 V_CPUPMUSNAPSHOTREQ1(1U)
+
+#define S_CPUPMUSNAPSHOTREQ0 0
+#define V_CPUPMUSNAPSHOTREQ0(x) ((x) << S_CPUPMUSNAPSHOTREQ0)
+#define F_CPUPMUSNAPSHOTREQ0 V_CPUPMUSNAPSHOTREQ0(1U)
+
+#define A_ARM_CPU_PMU_SNAPSHOT_ACK 0x47084
+
+#define S_CPUPMUSNAPSHOTACK3 3
+#define V_CPUPMUSNAPSHOTACK3(x) ((x) << S_CPUPMUSNAPSHOTACK3)
+#define F_CPUPMUSNAPSHOTACK3 V_CPUPMUSNAPSHOTACK3(1U)
+
+#define S_CPUPMUSNAPSHOTACK2 2
+#define V_CPUPMUSNAPSHOTACK2(x) ((x) << S_CPUPMUSNAPSHOTACK2)
+#define F_CPUPMUSNAPSHOTACK2 V_CPUPMUSNAPSHOTACK2(1U)
+
+#define S_CPUPMUSNAPSHOTACK1 1
+#define V_CPUPMUSNAPSHOTACK1(x) ((x) << S_CPUPMUSNAPSHOTACK1)
+#define F_CPUPMUSNAPSHOTACK1 V_CPUPMUSNAPSHOTACK1(1U)
+
+#define S_CPUPMUSNAPSHOTACK0 0
+#define V_CPUPMUSNAPSHOTACK0(x) ((x) << S_CPUPMUSNAPSHOTACK0)
+#define F_CPUPMUSNAPSHOTACK0 V_CPUPMUSNAPSHOTACK0(1U)
+
+#define A_ARM_EMMC_CTRL 0x47088
+
+#define S_EMMC_DATA_P2 24
+#define M_EMMC_DATA_P2 0xffU
+#define V_EMMC_DATA_P2(x) ((x) << S_EMMC_DATA_P2)
+#define G_EMMC_DATA_P2(x) (((x) >> S_EMMC_DATA_P2) & M_EMMC_DATA_P2)
+
+#define S_EMMC_DATA_P1 16
+#define M_EMMC_DATA_P1 0xffU
+#define V_EMMC_DATA_P1(x) ((x) << S_EMMC_DATA_P1)
+#define G_EMMC_DATA_P1(x) (((x) >> S_EMMC_DATA_P1) & M_EMMC_DATA_P1)
+
+#define S_EMMC_CMD_P2 15
+#define V_EMMC_CMD_P2(x) ((x) << S_EMMC_CMD_P2)
+#define F_EMMC_CMD_P2 V_EMMC_CMD_P2(1U)
+
+#define S_EMMC_CMD_P1 14
+#define V_EMMC_CMD_P1(x) ((x) << S_EMMC_CMD_P1)
+#define F_EMMC_CMD_P1 V_EMMC_CMD_P1(1U)
+
+#define S_EMMC_RST_P2 13
+#define V_EMMC_RST_P2(x) ((x) << S_EMMC_RST_P2)
+#define F_EMMC_RST_P2 V_EMMC_RST_P2(1U)
+
+#define S_EMMC_RST_P1 12
+#define V_EMMC_RST_P1(x) ((x) << S_EMMC_RST_P1)
+#define F_EMMC_RST_P1 V_EMMC_RST_P1(1U)
+
+#define S_EMMC_GP_IN_P2 10
+#define M_EMMC_GP_IN_P2 0x3U
+#define V_EMMC_GP_IN_P2(x) ((x) << S_EMMC_GP_IN_P2)
+#define G_EMMC_GP_IN_P2(x) (((x) >> S_EMMC_GP_IN_P2) & M_EMMC_GP_IN_P2)
+
+#define S_EMMC_GP_IN_P1 8
+#define M_EMMC_GP_IN_P1 0x3U
+#define V_EMMC_GP_IN_P1(x) ((x) << S_EMMC_GP_IN_P1)
+#define G_EMMC_GP_IN_P1(x) (((x) >> S_EMMC_GP_IN_P1) & M_EMMC_GP_IN_P1)
+
+#define S_EMMC_CLK_SEL 0
+#define M_EMMC_CLK_SEL 0xffU
+#define V_EMMC_CLK_SEL(x) ((x) << S_EMMC_CLK_SEL)
+#define G_EMMC_CLK_SEL(x) (((x) >> S_EMMC_CLK_SEL) & M_EMMC_CLK_SEL)
+
+#define A_ARM_CPU_CFG_END_VINI_TE 0x4708c
+
+#define S_CPUSYSBARDISABLE 23
+#define V_CPUSYSBARDISABLE(x) ((x) << S_CPUSYSBARDISABLE)
+#define F_CPUSYSBARDISABLE V_CPUSYSBARDISABLE(1U)
+
+#define S_CPUBROADCACHEMAIN 22
+#define V_CPUBROADCACHEMAIN(x) ((x) << S_CPUBROADCACHEMAIN)
+#define F_CPUBROADCACHEMAIN V_CPUBROADCACHEMAIN(1U)
+
+#define S_CPUBROADOUTER 21
+#define V_CPUBROADOUTER(x) ((x) << S_CPUBROADOUTER)
+#define F_CPUBROADOUTER V_CPUBROADOUTER(1U)
+
+#define S_CPUBROADINNER 20
+#define V_CPUBROADINNER(x) ((x) << S_CPUBROADINNER)
+#define F_CPUBROADINNER V_CPUBROADINNER(1U)
+
+#define S_CPUCRYPTODISABLE3 19
+#define V_CPUCRYPTODISABLE3(x) ((x) << S_CPUCRYPTODISABLE3)
+#define F_CPUCRYPTODISABLE3 V_CPUCRYPTODISABLE3(1U)
+
+#define S_CPUCRYPTODISABLE2 18
+#define V_CPUCRYPTODISABLE2(x) ((x) << S_CPUCRYPTODISABLE2)
+#define F_CPUCRYPTODISABLE2 V_CPUCRYPTODISABLE2(1U)
+
+#define S_CPUCRYPTODISABLE1 17
+#define V_CPUCRYPTODISABLE1(x) ((x) << S_CPUCRYPTODISABLE1)
+#define F_CPUCRYPTODISABLE1 V_CPUCRYPTODISABLE1(1U)
+
+#define S_CPUCRYPTODISABLE0 16
+#define V_CPUCRYPTODISABLE0(x) ((x) << S_CPUCRYPTODISABLE0)
+#define F_CPUCRYPTODISABLE0 V_CPUCRYPTODISABLE0(1U)
+
+#define S_CPUAA64NAA323 15
+#define V_CPUAA64NAA323(x) ((x) << S_CPUAA64NAA323)
+#define F_CPUAA64NAA323 V_CPUAA64NAA323(1U)
+
+#define S_CPUAA64NAA322 14
+#define V_CPUAA64NAA322(x) ((x) << S_CPUAA64NAA322)
+#define F_CPUAA64NAA322 V_CPUAA64NAA322(1U)
+
+#define S_CPUAA64NAA321 13
+#define V_CPUAA64NAA321(x) ((x) << S_CPUAA64NAA321)
+#define F_CPUAA64NAA321 V_CPUAA64NAA321(1U)
+
+#define S_CPUAA64NAA320 12
+#define V_CPUAA64NAA320(x) ((x) << S_CPUAA64NAA320)
+#define F_CPUAA64NAA320 V_CPUAA64NAA320(1U)
+
+#define S_CPUCFGTE3 11
+#define V_CPUCFGTE3(x) ((x) << S_CPUCFGTE3)
+#define F_CPUCFGTE3 V_CPUCFGTE3(1U)
+
+#define S_CPUCFGTE2 10
+#define V_CPUCFGTE2(x) ((x) << S_CPUCFGTE2)
+#define F_CPUCFGTE2 V_CPUCFGTE2(1U)
+
+#define S_CPUCFGTE1 9
+#define V_CPUCFGTE1(x) ((x) << S_CPUCFGTE1)
+#define F_CPUCFGTE1 V_CPUCFGTE1(1U)
+
+#define S_CPUCFGTE0 8
+#define V_CPUCFGTE0(x) ((x) << S_CPUCFGTE0)
+#define F_CPUCFGTE0 V_CPUCFGTE0(1U)
+
+#define S_CPUVINIHI3 7
+#define V_CPUVINIHI3(x) ((x) << S_CPUVINIHI3)
+#define F_CPUVINIHI3 V_CPUVINIHI3(1U)
+
+#define S_CPUVINIHI2 6
+#define V_CPUVINIHI2(x) ((x) << S_CPUVINIHI2)
+#define F_CPUVINIHI2 V_CPUVINIHI2(1U)
+
+#define S_CPUVINIHI1 5
+#define V_CPUVINIHI1(x) ((x) << S_CPUVINIHI1)
+#define F_CPUVINIHI1 V_CPUVINIHI1(1U)
+
+#define S_CPUVINIHI0 4
+#define V_CPUVINIHI0(x) ((x) << S_CPUVINIHI0)
+#define F_CPUVINIHI0 V_CPUVINIHI0(1U)
+
+#define S_CPUCFGEND3 3
+#define V_CPUCFGEND3(x) ((x) << S_CPUCFGEND3)
+#define F_CPUCFGEND3 V_CPUCFGEND3(1U)
+
+#define S_CPUCFGEND2 2
+#define V_CPUCFGEND2(x) ((x) << S_CPUCFGEND2)
+#define F_CPUCFGEND2 V_CPUCFGEND2(1U)
+
+#define S_CPUCFGEND1 1
+#define V_CPUCFGEND1(x) ((x) << S_CPUCFGEND1)
+#define F_CPUCFGEND1 V_CPUCFGEND1(1U)
+
+#define S_CPUCFGEND0 0
+#define V_CPUCFGEND0(x) ((x) << S_CPUCFGEND0)
+#define F_CPUCFGEND0 V_CPUCFGEND0(1U)
+
+#define A_ARM_CPU_CP15_SDISABLE 0x47090
+
+#define S_CPUCP15SDISABLE3 3
+#define V_CPUCP15SDISABLE3(x) ((x) << S_CPUCP15SDISABLE3)
+#define F_CPUCP15SDISABLE3 V_CPUCP15SDISABLE3(1U)
+
+#define S_CPUCP15SDISABLE2 2
+#define V_CPUCP15SDISABLE2(x) ((x) << S_CPUCP15SDISABLE2)
+#define F_CPUCP15SDISABLE2 V_CPUCP15SDISABLE2(1U)
+
+#define S_CPUCP15SDISABLE1 1
+#define V_CPUCP15SDISABLE1(x) ((x) << S_CPUCP15SDISABLE1)
+#define F_CPUCP15SDISABLE1 V_CPUCP15SDISABLE1(1U)
+
+#define S_CPUCP15SDISABLE0 0
+#define V_CPUCP15SDISABLE0(x) ((x) << S_CPUCP15SDISABLE0)
+#define F_CPUCP15SDISABLE0 V_CPUCP15SDISABLE0(1U)
+
+#define A_ARM_CPU_CLUSTER_ID_AFF 0x47094
+
+#define S_CPUCLUSTERIDAFF2 8
+#define M_CPUCLUSTERIDAFF2 0xffU
+#define V_CPUCLUSTERIDAFF2(x) ((x) << S_CPUCLUSTERIDAFF2)
+#define G_CPUCLUSTERIDAFF2(x) (((x) >> S_CPUCLUSTERIDAFF2) & M_CPUCLUSTERIDAFF2)
+
+#define S_CPUCLUSTERIDAFF1 0
+#define M_CPUCLUSTERIDAFF1 0xffU
+#define V_CPUCLUSTERIDAFF1(x) ((x) << S_CPUCLUSTERIDAFF1)
+#define G_CPUCLUSTERIDAFF1(x) (((x) >> S_CPUCLUSTERIDAFF1) & M_CPUCLUSTERIDAFF1)
+
+#define A_ARM_CPU_CLK_CFG 0x47098
+
+#define S_CPUACINACTIVEM 1
+#define V_CPUACINACTIVEM(x) ((x) << S_CPUACINACTIVEM)
+#define F_CPUACINACTIVEM V_CPUACINACTIVEM(1U)
+
+#define S_CPUACLKENM 0
+#define V_CPUACLKENM(x) ((x) << S_CPUACLKENM)
+#define F_CPUACLKENM V_CPUACLKENM(1U)
+
+#define A_ARM_NVME_DB_EMU_INT_CAUSE 0x4709c
+
+#define S_INVALID_BRESP 3
+#define V_INVALID_BRESP(x) ((x) << S_INVALID_BRESP)
+#define F_INVALID_BRESP V_INVALID_BRESP(1U)
+
+#define S_DATA_LEN_OF 2
+#define V_DATA_LEN_OF(x) ((x) << S_DATA_LEN_OF)
+#define F_DATA_LEN_OF V_DATA_LEN_OF(1U)
+
+#define S_INVALID_EMU_ADDR 1
+#define V_INVALID_EMU_ADDR(x) ((x) << S_INVALID_EMU_ADDR)
+#define F_INVALID_EMU_ADDR V_INVALID_EMU_ADDR(1U)
+
+#define S_INVALID_AXI_ADDR_CFG 0
+#define V_INVALID_AXI_ADDR_CFG(x) ((x) << S_INVALID_AXI_ADDR_CFG)
+#define F_INVALID_AXI_ADDR_CFG V_INVALID_AXI_ADDR_CFG(1U)
+
+#define A_ARM_CS_RST 0x470c0
+
+#define S_ATCLKEN 9
+#define V_ATCLKEN(x) ((x) << S_ATCLKEN)
+#define F_ATCLKEN V_ATCLKEN(1U)
+
+#define S_CXAPBICRSTN 8
+#define V_CXAPBICRSTN(x) ((x) << S_CXAPBICRSTN)
+#define F_CXAPBICRSTN V_CXAPBICRSTN(1U)
+
+#define S_CSDBGEN 7
+#define V_CSDBGEN(x) ((x) << S_CSDBGEN)
+#define F_CSDBGEN V_CSDBGEN(1U)
+
+#define S_JTAGNPOTRST 6
+#define V_JTAGNPOTRST(x) ((x) << S_JTAGNPOTRST)
+#define F_JTAGNPOTRST V_JTAGNPOTRST(1U)
+
+#define S_JTAGNTRST 5
+#define V_JTAGNTRST(x) ((x) << S_JTAGNTRST)
+#define F_JTAGNTRST V_JTAGNTRST(1U)
+
+#define S_PADDR31S0 4
+#define V_PADDR31S0(x) ((x) << S_PADDR31S0)
+#define F_PADDR31S0 V_PADDR31S0(1U)
+
+#define S_CTICLKEN 3
+#define V_CTICLKEN(x) ((x) << S_CTICLKEN)
+#define F_CTICLKEN V_CTICLKEN(1U)
+
+#define S_PCLKENDBG 2
+#define V_PCLKENDBG(x) ((x) << S_PCLKENDBG)
+#define F_PCLKENDBG V_PCLKENDBG(1U)
+
+#define S_CPU_NIDEN 1
+#define V_CPU_NIDEN(x) ((x) << S_CPU_NIDEN)
+#define F_CPU_NIDEN V_CPU_NIDEN(1U)
+
+#define S_CPU_DBGEN 0
+#define V_CPU_DBGEN(x) ((x) << S_CPU_DBGEN)
+#define F_CPU_DBGEN V_CPU_DBGEN(1U)
+
+#define A_ARM_CS_ADDRL 0x470c4
+#define A_ARM_CS_ADDRH 0x470c8
+#define A_ARM_CS_DFT_CONTROL 0x470cc
+
+#define S_DFTMBISTADDR 5
+#define M_DFTMBISTADDR 0x7ffU
+#define V_DFTMBISTADDR(x) ((x) << S_DFTMBISTADDR)
+#define G_DFTMBISTADDR(x) (((x) >> S_DFTMBISTADDR) & M_DFTMBISTADDR)
+
+#define S_DFTMTESTON 3
+#define V_DFTMTESTON(x) ((x) << S_DFTMTESTON)
+#define F_DFTMTESTON V_DFTMTESTON(1U)
+
+#define S_DFTMBISTCE 2
+#define V_DFTMBISTCE(x) ((x) << S_DFTMBISTCE)
+#define F_DFTMBISTCE V_DFTMBISTCE(1U)
+
+#define S_DFTMBITWR 1
+#define V_DFTMBITWR(x) ((x) << S_DFTMBITWR)
+#define F_DFTMBITWR V_DFTMBITWR(1U)
+
+#define S_DFTSE 0
+#define V_DFTSE(x) ((x) << S_DFTSE)
+#define F_DFTSE V_DFTSE(1U)
+
+#define A_ARM_CS_DFT_IN 0x470d0
+#define A_ARM_CS_DFT_OUT 0x470d4
+#define A_ARM_CPU_EVENT_I 0x47100
+
+#define S_CPUEVENTI 0
+#define V_CPUEVENTI(x) ((x) << S_CPUEVENTI)
+#define F_CPUEVENTI V_CPUEVENTI(1U)
+
+#define A_ARM_CPU_EVENT_O 0x47104
+
+#define S_CPUEVENTO 0
+#define V_CPUEVENTO(x) ((x) << S_CPUEVENTO)
+#define F_CPUEVENTO V_CPUEVENTO(1U)
+
+#define A_ARM_CPU_CLR_EXMON_REQ 0x47108
+
+#define S_CPUCLREXMONREQ 0
+#define V_CPUCLREXMONREQ(x) ((x) << S_CPUCLREXMONREQ)
+#define F_CPUCLREXMONREQ V_CPUCLREXMONREQ(1U)
+
+#define A_ARM_CPU_CLR_EXMON_ACK 0x4710c
+
+#define S_CPUCLREXMONACK 0
+#define V_CPUCLREXMONACK(x) ((x) << S_CPUCLREXMONACK)
+#define F_CPUCLREXMONACK V_CPUCLREXMONACK(1U)
+
+#define A_ARM_UART_MSTR_RXD 0x47110
+#define A_ARM_UART_MSTR_RXC 0x47114
+
+#define S_UART_MSTR_RXC 0
+#define V_UART_MSTR_RXC(x) ((x) << S_UART_MSTR_RXC)
+#define F_UART_MSTR_RXC V_UART_MSTR_RXC(1U)
+
+#define A_ARM_UART_MSTR_TXD 0x47118
+#define A_ARM_UART_MSTR_TXC 0x4711c
+
+#define S_T7_INT 1
+#define V_T7_INT(x) ((x) << S_T7_INT)
+#define F_T7_INT V_T7_INT(1U)
+
+#define S_UART_MSTC_TXC 0
+#define V_UART_MSTC_TXC(x) ((x) << S_UART_MSTC_TXC)
+#define F_UART_MSTC_TXC V_UART_MSTC_TXC(1U)
+
+#define A_ARM_UART_SLV_SEL 0x47120
+
+#define S_UART_SLV_SEL 0
+#define V_UART_SLV_SEL(x) ((x) << S_UART_SLV_SEL)
+#define F_UART_SLV_SEL V_UART_SLV_SEL(1U)
+
+#define A_ARM_CPU_PERIPH_BASE 0x47124
+#define A_ARM_PERR_INT_ENB2 0x47128
+#define A_ARM_PERR_ENABLE2 0x4712c
+#define A_ARM_UART_CONFIG 0x47130
+#define A_ARM_UART_STAT 0x47134
+
+#define S_RSV1 6
+#define M_RSV1 0x3ffffffU
+#define V_RSV1(x) ((x) << S_RSV1)
+#define G_RSV1(x) (((x) >> S_RSV1) & M_RSV1)
+
+#define S_RXFRMERR 5
+#define V_RXFRMERR(x) ((x) << S_RXFRMERR)
+#define F_RXFRMERR V_RXFRMERR(1U)
+
+#define S_RXPARERR 4
+#define V_RXPARERR(x) ((x) << S_RXPARERR)
+#define F_RXPARERR V_RXPARERR(1U)
+
+#define S_RXOVRN 3
+#define V_RXOVRN(x) ((x) << S_RXOVRN)
+#define F_RXOVRN V_RXOVRN(1U)
+
+#define S_CTL_RXRDY 2
+#define V_CTL_RXRDY(x) ((x) << S_CTL_RXRDY)
+#define F_CTL_RXRDY V_CTL_RXRDY(1U)
+
+#define S_TXOVRN 1
+#define V_TXOVRN(x) ((x) << S_TXOVRN)
+#define F_TXOVRN V_TXOVRN(1U)
+
+#define S_CTL_TXRDY 0
+#define V_CTL_TXRDY(x) ((x) << S_CTL_TXRDY)
+#define F_CTL_TXRDY V_CTL_TXRDY(1U)
+
+#define A_ARM_UART_TX_DATA 0x47138
+
+#define S_TX_DATA 0
+#define M_TX_DATA 0xffU
+#define V_TX_DATA(x) ((x) << S_TX_DATA)
+#define G_TX_DATA(x) (((x) >> S_TX_DATA) & M_TX_DATA)
+
+#define A_ARM_UART_RX_DATA 0x4713c
+
+#define S_RX_DATA 0
+#define M_RX_DATA 0xffU
+#define V_RX_DATA(x) ((x) << S_RX_DATA)
+#define G_RX_DATA(x) (((x) >> S_RX_DATA) & M_RX_DATA)
+
+#define A_ARM_UART_DBG0 0x47140
+#define A_ARM_UART_DBG1 0x47144
+#define A_ARM_UART_DBG2 0x47148
+#define A_ARM_UART_DBG3 0x4714c
+#define A_ARM_ARM_CPU_PC0 0x47150
+#define A_ARM_ARM_CPU_PC1 0x47154
+#define A_ARM_ARM_UART_INT_CAUSE 0x47158
+
+#define S_RX_FIFO_NOT_EMPTY 1
+#define V_RX_FIFO_NOT_EMPTY(x) ((x) << S_RX_FIFO_NOT_EMPTY)
+#define F_RX_FIFO_NOT_EMPTY V_RX_FIFO_NOT_EMPTY(1U)
+
+#define S_TX_FIFO_EMPTY 0
+#define V_TX_FIFO_EMPTY(x) ((x) << S_TX_FIFO_EMPTY)
+#define F_TX_FIFO_EMPTY V_TX_FIFO_EMPTY(1U)
+
+#define A_ARM_ARM_UART_INT_EN 0x4715c
+
+#define S_RX_FIFO_INT_NOT_EMPTY 1
+#define V_RX_FIFO_INT_NOT_EMPTY(x) ((x) << S_RX_FIFO_INT_NOT_EMPTY)
+#define F_RX_FIFO_INT_NOT_EMPTY V_RX_FIFO_INT_NOT_EMPTY(1U)
+
+#define S_TX_FIFO_INT_EMPTY 0
+#define V_TX_FIFO_INT_EMPTY(x) ((x) << S_TX_FIFO_INT_EMPTY)
+#define F_TX_FIFO_INT_EMPTY V_TX_FIFO_INT_EMPTY(1U)
+
+#define A_ARM_ARM_UART_GPIO_SEL 0x47160
+
+#define S_PC_SEL 1
+#define M_PC_SEL 0x7U
+#define V_PC_SEL(x) ((x) << S_PC_SEL)
+#define G_PC_SEL(x) (((x) >> S_PC_SEL) & M_PC_SEL)
+
+#define S_UART_GPIO_SEL 0
+#define V_UART_GPIO_SEL(x) ((x) << S_UART_GPIO_SEL)
+#define F_UART_GPIO_SEL V_UART_GPIO_SEL(1U)
+
+#define A_ARM_ARM_SCRATCH_PAD0 0x47164
+#define A_ARM_ARM_SCRATCH_PAD1 0x47168
+#define A_ARM_ARM_SCRATCH_PAD2 0x4716c
+#define A_ARM_PERR_INT_CAUSE0 0x47170
+
+#define S_INIC_WRDATA_FIFO_PERR 31
+#define V_INIC_WRDATA_FIFO_PERR(x) ((x) << S_INIC_WRDATA_FIFO_PERR)
+#define F_INIC_WRDATA_FIFO_PERR V_INIC_WRDATA_FIFO_PERR(1U)
+
+#define S_INIC_RDATA_FIFO_PERR 30
+#define V_INIC_RDATA_FIFO_PERR(x) ((x) << S_INIC_RDATA_FIFO_PERR)
+#define F_INIC_RDATA_FIFO_PERR V_INIC_RDATA_FIFO_PERR(1U)
+
+#define S_MSI_MEM_PERR 29
+#define V_MSI_MEM_PERR(x) ((x) << S_MSI_MEM_PERR)
+#define F_MSI_MEM_PERR V_MSI_MEM_PERR(1U)
+
+#define S_ARM_DB_SRAM_PERR 27
+#define M_ARM_DB_SRAM_PERR 0x3U
+#define V_ARM_DB_SRAM_PERR(x) ((x) << S_ARM_DB_SRAM_PERR)
+#define G_ARM_DB_SRAM_PERR(x) (((x) >> S_ARM_DB_SRAM_PERR) & M_ARM_DB_SRAM_PERR)
+
+#define S_EMMC_FIFOPARINT 26
+#define V_EMMC_FIFOPARINT(x) ((x) << S_EMMC_FIFOPARINT)
+#define F_EMMC_FIFOPARINT V_EMMC_FIFOPARINT(1U)
+
+#define S_ICB_RAM_PERR 25
+#define V_ICB_RAM_PERR(x) ((x) << S_ICB_RAM_PERR)
+#define F_ICB_RAM_PERR V_ICB_RAM_PERR(1U)
+
+#define S_MESS2AXI4_WRFIFO_PERR 24
+#define V_MESS2AXI4_WRFIFO_PERR(x) ((x) << S_MESS2AXI4_WRFIFO_PERR)
+#define F_MESS2AXI4_WRFIFO_PERR V_MESS2AXI4_WRFIFO_PERR(1U)
+
+#define S_RC_WFIFO_OUTPERR 23
+#define V_RC_WFIFO_OUTPERR(x) ((x) << S_RC_WFIFO_OUTPERR)
+#define F_RC_WFIFO_OUTPERR V_RC_WFIFO_OUTPERR(1U)
+
+#define S_RC_SRAM_PERR 21
+#define M_RC_SRAM_PERR 0x3U
+#define V_RC_SRAM_PERR(x) ((x) << S_RC_SRAM_PERR)
+#define G_RC_SRAM_PERR(x) (((x) >> S_RC_SRAM_PERR) & M_RC_SRAM_PERR)
+
+#define S_MSI_FIFO_PAR_ERR 20
+#define V_MSI_FIFO_PAR_ERR(x) ((x) << S_MSI_FIFO_PAR_ERR)
+#define F_MSI_FIFO_PAR_ERR V_MSI_FIFO_PAR_ERR(1U)
+
+#define S_INIC2MA_INTFPERR 19
+#define V_INIC2MA_INTFPERR(x) ((x) << S_INIC2MA_INTFPERR)
+#define F_INIC2MA_INTFPERR V_INIC2MA_INTFPERR(1U)
+
+#define S_RDATAFIFO0_PERR 18
+#define V_RDATAFIFO0_PERR(x) ((x) << S_RDATAFIFO0_PERR)
+#define F_RDATAFIFO0_PERR V_RDATAFIFO0_PERR(1U)
+
+#define S_RDATAFIFO1_PERR 17
+#define V_RDATAFIFO1_PERR(x) ((x) << S_RDATAFIFO1_PERR)
+#define F_RDATAFIFO1_PERR V_RDATAFIFO1_PERR(1U)
+
+#define S_WRDATAFIFO0_PERR 16
+#define V_WRDATAFIFO0_PERR(x) ((x) << S_WRDATAFIFO0_PERR)
+#define F_WRDATAFIFO0_PERR V_WRDATAFIFO0_PERR(1U)
+
+#define S_WRDATAFIFO1_PERR 15
+#define V_WRDATAFIFO1_PERR(x) ((x) << S_WRDATAFIFO1_PERR)
+#define F_WRDATAFIFO1_PERR V_WRDATAFIFO1_PERR(1U)
+
+#define S_WR512DATAFIFO0_PERR 14
+#define V_WR512DATAFIFO0_PERR(x) ((x) << S_WR512DATAFIFO0_PERR)
+#define F_WR512DATAFIFO0_PERR V_WR512DATAFIFO0_PERR(1U)
+
+#define S_WR512DATAFIFO1_PERR 13
+#define V_WR512DATAFIFO1_PERR(x) ((x) << S_WR512DATAFIFO1_PERR)
+#define F_WR512DATAFIFO1_PERR V_WR512DATAFIFO1_PERR(1U)
+
+#define S_ROBUFF_PARERR3 12
+#define V_ROBUFF_PARERR3(x) ((x) << S_ROBUFF_PARERR3)
+#define F_ROBUFF_PARERR3 V_ROBUFF_PARERR3(1U)
+
+#define S_ROBUFF_PARERR2 11
+#define V_ROBUFF_PARERR2(x) ((x) << S_ROBUFF_PARERR2)
+#define F_ROBUFF_PARERR2 V_ROBUFF_PARERR2(1U)
+
+#define S_ROBUFF_PARERR1 10
+#define V_ROBUFF_PARERR1(x) ((x) << S_ROBUFF_PARERR1)
+#define F_ROBUFF_PARERR1 V_ROBUFF_PARERR1(1U)
+
+#define S_ROBUFF_PARERR0 9
+#define V_ROBUFF_PARERR0(x) ((x) << S_ROBUFF_PARERR0)
+#define F_ROBUFF_PARERR0 V_ROBUFF_PARERR0(1U)
+
+#define S_MA2AXI_REQDATAPARERR 8
+#define V_MA2AXI_REQDATAPARERR(x) ((x) << S_MA2AXI_REQDATAPARERR)
+#define F_MA2AXI_REQDATAPARERR V_MA2AXI_REQDATAPARERR(1U)
+
+#define S_MA2AXI_REQCTLPARERR 7
+#define V_MA2AXI_REQCTLPARERR(x) ((x) << S_MA2AXI_REQCTLPARERR)
+#define F_MA2AXI_REQCTLPARERR V_MA2AXI_REQCTLPARERR(1U)
+
+#define S_MA_RSPPERR 6
+#define V_MA_RSPPERR(x) ((x) << S_MA_RSPPERR)
+#define F_MA_RSPPERR V_MA_RSPPERR(1U)
+
+#define S_PCIE2MA_REQCTLPARERR 5
+#define V_PCIE2MA_REQCTLPARERR(x) ((x) << S_PCIE2MA_REQCTLPARERR)
+#define F_PCIE2MA_REQCTLPARERR V_PCIE2MA_REQCTLPARERR(1U)
+
+#define S_PCIE2MA_REQDATAPARERR 4
+#define V_PCIE2MA_REQDATAPARERR(x) ((x) << S_PCIE2MA_REQDATAPARERR)
+#define F_PCIE2MA_REQDATAPARERR V_PCIE2MA_REQDATAPARERR(1U)
+
+#define S_INIC2MA_REQCTLPARERR 3
+#define V_INIC2MA_REQCTLPARERR(x) ((x) << S_INIC2MA_REQCTLPARERR)
+#define F_INIC2MA_REQCTLPARERR V_INIC2MA_REQCTLPARERR(1U)
+
+#define S_INIC2MA_REQDATAPARERR 2
+#define V_INIC2MA_REQDATAPARERR(x) ((x) << S_INIC2MA_REQDATAPARERR)
+#define F_INIC2MA_REQDATAPARERR V_INIC2MA_REQDATAPARERR(1U)
+
+#define S_MA_RSPUE 1
+#define V_MA_RSPUE(x) ((x) << S_MA_RSPUE)
+#define F_MA_RSPUE V_MA_RSPUE(1U)
+
+#define S_APB2PL_RSPDATAPERR 0
+#define V_APB2PL_RSPDATAPERR(x) ((x) << S_APB2PL_RSPDATAPERR)
+#define F_APB2PL_RSPDATAPERR V_APB2PL_RSPDATAPERR(1U)
+
+#define A_ARM_PERR_INT_ENB0 0x47174
+#define A_ARM_SCRATCH_PAD3 0x47178
+
+#define S_ECO_43187 31
+#define V_ECO_43187(x) ((x) << S_ECO_43187)
+#define F_ECO_43187 V_ECO_43187(1U)
+
+#define S_TIMER_SEL 28
+#define M_TIMER_SEL 0x7U
+#define V_TIMER_SEL(x) ((x) << S_TIMER_SEL)
+#define G_TIMER_SEL(x) (((x) >> S_TIMER_SEL) & M_TIMER_SEL)
+
+#define S_TIMER 4
+#define M_TIMER 0xffffffU
+#define V_TIMER(x) ((x) << S_TIMER)
+#define G_TIMER(x) (((x) >> S_TIMER) & M_TIMER)
+
+#define S_T7_1_INT 0
+#define M_T7_1_INT 0x3U
+#define V_T7_1_INT(x) ((x) << S_T7_1_INT)
+#define G_T7_1_INT(x) (((x) >> S_T7_1_INT) & M_T7_1_INT)
+
+#define A_ARM_PERR_INT_CAUSE2 0x4717c
+
+#define S_INIC_WSTRB_FIFO_PERR 31
+#define V_INIC_WSTRB_FIFO_PERR(x) ((x) << S_INIC_WSTRB_FIFO_PERR)
+#define F_INIC_WSTRB_FIFO_PERR V_INIC_WSTRB_FIFO_PERR(1U)
+
+#define S_INIC_BID_FIFO_PERR 30
+#define V_INIC_BID_FIFO_PERR(x) ((x) << S_INIC_BID_FIFO_PERR)
+#define F_INIC_BID_FIFO_PERR V_INIC_BID_FIFO_PERR(1U)
+
+#define S_CC_SRAM_PKA_PERR 29
+#define V_CC_SRAM_PKA_PERR(x) ((x) << S_CC_SRAM_PKA_PERR)
+#define F_CC_SRAM_PKA_PERR V_CC_SRAM_PKA_PERR(1U)
+
+#define S_CC_SRAM_SEC_PERR 28
+#define V_CC_SRAM_SEC_PERR(x) ((x) << S_CC_SRAM_SEC_PERR)
+#define F_CC_SRAM_SEC_PERR V_CC_SRAM_SEC_PERR(1U)
+
+#define S_MESS2AXI4_PARERR 27
+#define V_MESS2AXI4_PARERR(x) ((x) << S_MESS2AXI4_PARERR)
+#define F_MESS2AXI4_PARERR V_MESS2AXI4_PARERR(1U)
+
+#define S_CCI2INIC_INTF_PARERR 26
+#define V_CCI2INIC_INTF_PARERR(x) ((x) << S_CCI2INIC_INTF_PARERR)
+#define F_CCI2INIC_INTF_PARERR V_CCI2INIC_INTF_PARERR(1U)
+
+#define A_ARM_MA2AXI_AW_ATTR 0x47180
+
+#define S_AWLOCKR1 29
+#define V_AWLOCKR1(x) ((x) << S_AWLOCKR1)
+#define F_AWLOCKR1 V_AWLOCKR1(1U)
+
+#define S_AWCACHER1 25
+#define M_AWCACHER1 0xfU
+#define V_AWCACHER1(x) ((x) << S_AWCACHER1)
+#define G_AWCACHER1(x) (((x) >> S_AWCACHER1) & M_AWCACHER1)
+
+#define S_AWPROTR1 21
+#define M_AWPROTR1 0xfU
+#define V_AWPROTR1(x) ((x) << S_AWPROTR1)
+#define G_AWPROTR1(x) (((x) >> S_AWPROTR1) & M_AWPROTR1)
+
+#define S_AWSNOOPR1 18
+#define M_AWSNOOPR1 0x7U
+#define V_AWSNOOPR1(x) ((x) << S_AWSNOOPR1)
+#define G_AWSNOOPR1(x) (((x) >> S_AWSNOOPR1) & M_AWSNOOPR1)
+
+#define S_AWDOMAINR1 16
+#define M_AWDOMAINR1 0x3U
+#define V_AWDOMAINR1(x) ((x) << S_AWDOMAINR1)
+#define G_AWDOMAINR1(x) (((x) >> S_AWDOMAINR1) & M_AWDOMAINR1)
+
+#define S_AWLOCKR0 13
+#define V_AWLOCKR0(x) ((x) << S_AWLOCKR0)
+#define F_AWLOCKR0 V_AWLOCKR0(1U)
+
+#define S_AWCACHER0 9
+#define M_AWCACHER0 0xfU
+#define V_AWCACHER0(x) ((x) << S_AWCACHER0)
+#define G_AWCACHER0(x) (((x) >> S_AWCACHER0) & M_AWCACHER0)
+
+#define S_AWPROTR0 5
+#define M_AWPROTR0 0xfU
+#define V_AWPROTR0(x) ((x) << S_AWPROTR0)
+#define G_AWPROTR0(x) (((x) >> S_AWPROTR0) & M_AWPROTR0)
+
+#define S_AWSNOOPR0 2
+#define M_AWSNOOPR0 0x7U
+#define V_AWSNOOPR0(x) ((x) << S_AWSNOOPR0)
+#define G_AWSNOOPR0(x) (((x) >> S_AWSNOOPR0) & M_AWSNOOPR0)
+
+#define S_AWDOMAINR0 0
+#define M_AWDOMAINR0 0x3U
+#define V_AWDOMAINR0(x) ((x) << S_AWDOMAINR0)
+#define G_AWDOMAINR0(x) (((x) >> S_AWDOMAINR0) & M_AWDOMAINR0)
+
+#define A_ARM_MA2AXI_AR_ATTR 0x47184
+
+#define S_ARLOCKR1 29
+#define V_ARLOCKR1(x) ((x) << S_ARLOCKR1)
+#define F_ARLOCKR1 V_ARLOCKR1(1U)
+
+#define S_ARCACHER1 25
+#define M_ARCACHER1 0xfU
+#define V_ARCACHER1(x) ((x) << S_ARCACHER1)
+#define G_ARCACHER1(x) (((x) >> S_ARCACHER1) & M_ARCACHER1)
+
+#define S_ARPROTR1 21
+#define M_ARPROTR1 0xfU
+#define V_ARPROTR1(x) ((x) << S_ARPROTR1)
+#define G_ARPROTR1(x) (((x) >> S_ARPROTR1) & M_ARPROTR1)
+
+#define S_ARSNOOPR1 18
+#define M_ARSNOOPR1 0x7U
+#define V_ARSNOOPR1(x) ((x) << S_ARSNOOPR1)
+#define G_ARSNOOPR1(x) (((x) >> S_ARSNOOPR1) & M_ARSNOOPR1)
+
+#define S_ARDOMAINR1 16
+#define M_ARDOMAINR1 0x3U
+#define V_ARDOMAINR1(x) ((x) << S_ARDOMAINR1)
+#define G_ARDOMAINR1(x) (((x) >> S_ARDOMAINR1) & M_ARDOMAINR1)
+
+#define S_ARLOCKR0 13
+#define V_ARLOCKR0(x) ((x) << S_ARLOCKR0)
+#define F_ARLOCKR0 V_ARLOCKR0(1U)
+
+#define S_ARCACHER0 9
+#define M_ARCACHER0 0xfU
+#define V_ARCACHER0(x) ((x) << S_ARCACHER0)
+#define G_ARCACHER0(x) (((x) >> S_ARCACHER0) & M_ARCACHER0)
+
+#define S_ARPROTR0 5
+#define M_ARPROTR0 0xfU
+#define V_ARPROTR0(x) ((x) << S_ARPROTR0)
+#define G_ARPROTR0(x) (((x) >> S_ARPROTR0) & M_ARPROTR0)
+
+#define S_ARSNOOPR0 2
+#define M_ARSNOOPR0 0x7U
+#define V_ARSNOOPR0(x) ((x) << S_ARSNOOPR0)
+#define G_ARSNOOPR0(x) (((x) >> S_ARSNOOPR0) & M_ARSNOOPR0)
+
+#define S_ARDOMAINR0 0
+#define M_ARDOMAINR0 0x3U
+#define V_ARDOMAINR0(x) ((x) << S_ARDOMAINR0)
+#define G_ARDOMAINR0(x) (((x) >> S_ARDOMAINR0) & M_ARDOMAINR0)
+
+#define A_ARM_MA2AXI_SNOOP_RGN 0x47188
+
+#define S_SNOOP_END 16
+#define M_SNOOP_END 0xffffU
+#define V_SNOOP_END(x) ((x) << S_SNOOP_END)
+#define G_SNOOP_END(x) (((x) >> S_SNOOP_END) & M_SNOOP_END)
+
+#define S_SNOOP_START 0
+#define M_SNOOP_START 0xffffU
+#define V_SNOOP_START(x) ((x) << S_SNOOP_START)
+#define G_SNOOP_START(x) (((x) >> S_SNOOP_START) & M_SNOOP_START)
+
+#define A_ARM_PERIPHERAL_INT_CAUSE 0x4718c
+
+#define S_TIMER_INT 5
+#define V_TIMER_INT(x) ((x) << S_TIMER_INT)
+#define F_TIMER_INT V_TIMER_INT(1U)
+
+#define S_NVME_INT 4
+#define V_NVME_INT(x) ((x) << S_NVME_INT)
+#define F_NVME_INT V_NVME_INT(1U)
+
+#define S_EMMC_WAKEUP_INT 3
+#define V_EMMC_WAKEUP_INT(x) ((x) << S_EMMC_WAKEUP_INT)
+#define F_EMMC_WAKEUP_INT V_EMMC_WAKEUP_INT(1U)
+
+#define S_EMMC_INT 2
+#define V_EMMC_INT(x) ((x) << S_EMMC_INT)
+#define F_EMMC_INT V_EMMC_INT(1U)
+
+#define S_USB_MC_INT 1
+#define V_USB_MC_INT(x) ((x) << S_USB_MC_INT)
+#define F_USB_MC_INT V_USB_MC_INT(1U)
+
+#define S_USB_DMA_INT 0
+#define V_USB_DMA_INT(x) ((x) << S_USB_DMA_INT)
+#define F_USB_DMA_INT V_USB_DMA_INT(1U)
+
+#define A_ARM_SCRATCH_PAD4 0x47190
+
+#define S_PAD4 15
+#define M_PAD4 0x1ffffU
+#define V_PAD4(x) ((x) << S_PAD4)
+#define G_PAD4(x) (((x) >> S_PAD4) & M_PAD4)
+
+#define S_ARM_DB_CNT 0
+#define M_ARM_DB_CNT 0x7fffU
+#define V_ARM_DB_CNT(x) ((x) << S_ARM_DB_CNT)
+#define G_ARM_DB_CNT(x) (((x) >> S_ARM_DB_CNT) & M_ARM_DB_CNT)
+
+#define A_ARM_SCRATCH_PAD5 0x47194
+#define A_ARM_SCRATCH_PAD6 0x47198
+#define A_ARM_SCRATCH_PAD7 0x4719c
+#define A_ARM_NVME_DB_EMU_INDEX 0x471a0
+#define A_ARM_NVME_DB_EMU_REGION_CTL 0x471a4
+
+#define S_WINDOW_EN 4
+#define V_WINDOW_EN(x) ((x) << S_WINDOW_EN)
+#define F_WINDOW_EN V_WINDOW_EN(1U)
+
+#define S_RGN2_INT_EN 3
+#define V_RGN2_INT_EN(x) ((x) << S_RGN2_INT_EN)
+#define F_RGN2_INT_EN V_RGN2_INT_EN(1U)
+
+#define S_RGN1_INT_EN 2
+#define V_RGN1_INT_EN(x) ((x) << S_RGN1_INT_EN)
+#define F_RGN1_INT_EN V_RGN1_INT_EN(1U)
+
+#define S_QUEUE_EN 1
+#define V_QUEUE_EN(x) ((x) << S_QUEUE_EN)
+#define F_QUEUE_EN V_QUEUE_EN(1U)
+
+#define S_RGN0_INT_EN 0
+#define V_RGN0_INT_EN(x) ((x) << S_RGN0_INT_EN)
+#define F_RGN0_INT_EN V_RGN0_INT_EN(1U)
+
+#define A_ARM_NVME_DB_EMU_DEVICE_CTL 0x471a8
+
+#define S_DEVICE_SIZE 8
+#define M_DEVICE_SIZE 0xfU
+#define V_DEVICE_SIZE(x) ((x) << S_DEVICE_SIZE)
+#define G_DEVICE_SIZE(x) (((x) >> S_DEVICE_SIZE) & M_DEVICE_SIZE)
+
+#define S_RGN1_SIZE 4
+#define M_RGN1_SIZE 0xfU
+#define V_RGN1_SIZE(x) ((x) << S_RGN1_SIZE)
+#define G_RGN1_SIZE(x) (((x) >> S_RGN1_SIZE) & M_RGN1_SIZE)
+
+#define S_RGN0_SIZE 0
+#define M_RGN0_SIZE 0xfU
+#define V_RGN0_SIZE(x) ((x) << S_RGN0_SIZE)
+#define G_RGN0_SIZE(x) (((x) >> S_RGN0_SIZE) & M_RGN0_SIZE)
+
+#define A_ARM_NVME_DB_EMU_WINDOW_START_ADDR 0x471b0
+
+#define S_T7_4_ADDR 0
+#define M_T7_4_ADDR 0xfffffffU
+#define V_T7_4_ADDR(x) ((x) << S_T7_4_ADDR)
+#define G_T7_4_ADDR(x) (((x) >> S_T7_4_ADDR) & M_T7_4_ADDR)
+
+#define A_ARM_NVME_DB_EMU_WINDOW_END_ADDR 0x471b4
+#define A_ARM_NVME_DB_EMU_QBASE_ADDR 0x471b8
+#define A_ARM_NVME_DB_EMU_QUEUE_CID 0x471bc
+
+#define S_T7_CID 0
+#define M_T7_CID 0x1ffffU
+#define V_T7_CID(x) ((x) << S_T7_CID)
+#define G_T7_CID(x) (((x) >> S_T7_CID) & M_T7_CID)
+
+#define A_ARM_NVME_DB_EMU_QUEUE_CTL 0x471c0
+
+#define S_INT_EN 27
+#define V_INT_EN(x) ((x) << S_INT_EN)
+#define F_INT_EN V_INT_EN(1U)
+
+#define S_THRESHOLD 10
+#define M_THRESHOLD 0x1ffffU
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+#define G_THRESHOLD(x) (((x) >> S_THRESHOLD) & M_THRESHOLD)
+
+#define S_T7_1_SIZE 0
+#define M_T7_1_SIZE 0x3ffU
+#define V_T7_1_SIZE(x) ((x) << S_T7_1_SIZE)
+#define G_T7_1_SIZE(x) (((x) >> S_T7_1_SIZE) & M_T7_1_SIZE)
+
+#define A_ARM_NVME_DB_EMU_MSIX_ADDR_L 0x471c4
+#define A_ARM_NVME_DB_EMU_MSIX_ADDR_H 0x471c8
+#define A_ARM_NVME_DB_EMU_MSIX_OFFSET 0x471cc
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_ADDR_L 0x471d0
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_ADDR_H 0x471d4
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_OFFSET 0x471d8
+#define A_ARM_CERR_INT_CAUSE0 0x471dc
+
+#define S_WRDATA_FIFO0_CERR 31
+#define V_WRDATA_FIFO0_CERR(x) ((x) << S_WRDATA_FIFO0_CERR)
+#define F_WRDATA_FIFO0_CERR V_WRDATA_FIFO0_CERR(1U)
+
+#define S_WRDATA_FIFO1_CERR 30
+#define V_WRDATA_FIFO1_CERR(x) ((x) << S_WRDATA_FIFO1_CERR)
+#define F_WRDATA_FIFO1_CERR V_WRDATA_FIFO1_CERR(1U)
+
+#define S_WR512DATAFIFO0_CERR 29
+#define V_WR512DATAFIFO0_CERR(x) ((x) << S_WR512DATAFIFO0_CERR)
+#define F_WR512DATAFIFO0_CERR V_WR512DATAFIFO0_CERR(1U)
+
+#define S_WR512DATAFIFO1_CERR 28
+#define V_WR512DATAFIFO1_CERR(x) ((x) << S_WR512DATAFIFO1_CERR)
+#define F_WR512DATAFIFO1_CERR V_WR512DATAFIFO1_CERR(1U)
+
+#define S_RDATAFIFO0_CERR 27
+#define V_RDATAFIFO0_CERR(x) ((x) << S_RDATAFIFO0_CERR)
+#define F_RDATAFIFO0_CERR V_RDATAFIFO0_CERR(1U)
+
+#define S_RDATAFIFO1_CERR 26
+#define V_RDATAFIFO1_CERR(x) ((x) << S_RDATAFIFO1_CERR)
+#define F_RDATAFIFO1_CERR V_RDATAFIFO1_CERR(1U)
+
+#define S_ROBUFF_CORERR0 25
+#define V_ROBUFF_CORERR0(x) ((x) << S_ROBUFF_CORERR0)
+#define F_ROBUFF_CORERR0 V_ROBUFF_CORERR0(1U)
+
+#define S_ROBUFF_CORERR1 24
+#define V_ROBUFF_CORERR1(x) ((x) << S_ROBUFF_CORERR1)
+#define F_ROBUFF_CORERR1 V_ROBUFF_CORERR1(1U)
+
+#define S_ROBUFF_CORERR2 23
+#define V_ROBUFF_CORERR2(x) ((x) << S_ROBUFF_CORERR2)
+#define F_ROBUFF_CORERR2 V_ROBUFF_CORERR2(1U)
+
+#define S_ROBUFF_CORERR3 22
+#define V_ROBUFF_CORERR3(x) ((x) << S_ROBUFF_CORERR3)
+#define F_ROBUFF_CORERR3 V_ROBUFF_CORERR3(1U)
+
+#define S_MA2AXI_RSPDATACORERR 21
+#define V_MA2AXI_RSPDATACORERR(x) ((x) << S_MA2AXI_RSPDATACORERR)
+#define F_MA2AXI_RSPDATACORERR V_MA2AXI_RSPDATACORERR(1U)
+
+#define S_RC_SRAM_CERR 19
+#define M_RC_SRAM_CERR 0x3U
+#define V_RC_SRAM_CERR(x) ((x) << S_RC_SRAM_CERR)
+#define G_RC_SRAM_CERR(x) (((x) >> S_RC_SRAM_CERR) & M_RC_SRAM_CERR)
+
+#define S_RC_WFIFO_OUTCERR 18
+#define V_RC_WFIFO_OUTCERR(x) ((x) << S_RC_WFIFO_OUTCERR)
+#define F_RC_WFIFO_OUTCERR V_RC_WFIFO_OUTCERR(1U)
+
+#define S_RC_RSPFIFO_CERR 17
+#define V_RC_RSPFIFO_CERR(x) ((x) << S_RC_RSPFIFO_CERR)
+#define F_RC_RSPFIFO_CERR V_RC_RSPFIFO_CERR(1U)
+
+#define S_MSI_MEM_CERR 16
+#define V_MSI_MEM_CERR(x) ((x) << S_MSI_MEM_CERR)
+#define F_MSI_MEM_CERR V_MSI_MEM_CERR(1U)
+
+#define S_INIC_WRDATA_FIFO_CERR 15
+#define V_INIC_WRDATA_FIFO_CERR(x) ((x) << S_INIC_WRDATA_FIFO_CERR)
+#define F_INIC_WRDATA_FIFO_CERR V_INIC_WRDATA_FIFO_CERR(1U)
+
+#define S_INIC_RDATAFIFO_CERR 14
+#define V_INIC_RDATAFIFO_CERR(x) ((x) << S_INIC_RDATAFIFO_CERR)
+#define F_INIC_RDATAFIFO_CERR V_INIC_RDATAFIFO_CERR(1U)
+
+#define S_ARM_DB_SRAM_CERR 12
+#define M_ARM_DB_SRAM_CERR 0x3U
+#define V_ARM_DB_SRAM_CERR(x) ((x) << S_ARM_DB_SRAM_CERR)
+#define G_ARM_DB_SRAM_CERR(x) (((x) >> S_ARM_DB_SRAM_CERR) & M_ARM_DB_SRAM_CERR)
+
+#define S_ICB_RAM_CERR 11
+#define V_ICB_RAM_CERR(x) ((x) << S_ICB_RAM_CERR)
+#define F_ICB_RAM_CERR V_ICB_RAM_CERR(1U)
+
+#define S_CC_SRAM_PKA_CERR 10
+#define V_CC_SRAM_PKA_CERR(x) ((x) << S_CC_SRAM_PKA_CERR)
+#define F_CC_SRAM_PKA_CERR V_CC_SRAM_PKA_CERR(1U)
+
+#define S_CC_SRAM_SEC_CERR 9
+#define V_CC_SRAM_SEC_CERR(x) ((x) << S_CC_SRAM_SEC_CERR)
+#define F_CC_SRAM_SEC_CERR V_CC_SRAM_SEC_CERR(1U)
+
+#define A_ARM_NVME_DB_EMU_QUEUE_CTL_2 0x471e0
+
+#define S_INTERRUPT_CLEAR 0
+#define V_INTERRUPT_CLEAR(x) ((x) << S_INTERRUPT_CLEAR)
+#define F_INTERRUPT_CLEAR V_INTERRUPT_CLEAR(1U)
+
+#define A_ARM_PERIPHERAL_INT_ENB 0x471e4
+#define A_ARM_CERR_INT_ENB0 0x471e8
+#define A_ARM_CPU_DBG_ROM_ADDR0 0x47200
+
+#define S_CPUDBGROMADDR0 0
+#define M_CPUDBGROMADDR0 0xfffffU
+#define V_CPUDBGROMADDR0(x) ((x) << S_CPUDBGROMADDR0)
+#define G_CPUDBGROMADDR0(x) (((x) >> S_CPUDBGROMADDR0) & M_CPUDBGROMADDR0)
+
+#define A_ARM_CPU_DBG_ROM_ADDR1 0x47204
+
+#define S_CPUDBGROMADDR1 0
+#define M_CPUDBGROMADDR1 0x3ffU
+#define V_CPUDBGROMADDR1(x) ((x) << S_CPUDBGROMADDR1)
+#define G_CPUDBGROMADDR1(x) (((x) >> S_CPUDBGROMADDR1) & M_CPUDBGROMADDR1)
+
+#define A_ARM_CPU_DBG_ROM_ADDR_VALID 0x47208
+
+#define S_CPUDBGROMADDRVALID 0
+#define V_CPUDBGROMADDRVALID(x) ((x) << S_CPUDBGROMADDRVALID)
+#define F_CPUDBGROMADDRVALID V_CPUDBGROMADDRVALID(1U)
+
+#define A_ARM_PERR_ENABLE0 0x4720c
+#define A_ARM_SRAM2_WRITE_DATA3 0x47210
+#define A_ARM_SRAM2_READ_DATA3 0x4721c
+#define A_ARM_CPU_DFT_CFG 0x47220
+
+#define S_CPUMBISTREQ 11
+#define V_CPUMBISTREQ(x) ((x) << S_CPUMBISTREQ)
+#define F_CPUMBISTREQ V_CPUMBISTREQ(1U)
+
+#define S_CPUMBISTRSTN 10
+#define V_CPUMBISTRSTN(x) ((x) << S_CPUMBISTRSTN)
+#define F_CPUMBISTRSTN V_CPUMBISTRSTN(1U)
+
+#define S_CPUDFTDFTSE 9
+#define V_CPUDFTDFTSE(x) ((x) << S_CPUDFTDFTSE)
+#define F_CPUDFTDFTSE V_CPUDFTDFTSE(1U)
+
+#define S_CPUDFTRSTDISABLE 8
+#define V_CPUDFTRSTDISABLE(x) ((x) << S_CPUDFTRSTDISABLE)
+#define F_CPUDFTRSTDISABLE V_CPUDFTRSTDISABLE(1U)
+
+#define S_CPUDFTRAMDISABLE 7
+#define V_CPUDFTRAMDISABLE(x) ((x) << S_CPUDFTRAMDISABLE)
+#define F_CPUDFTRAMDISABLE V_CPUDFTRAMDISABLE(1U)
+
+#define S_CPUDFTMCPDISABLE 6
+#define V_CPUDFTMCPDISABLE(x) ((x) << S_CPUDFTMCPDISABLE)
+#define F_CPUDFTMCPDISABLE V_CPUDFTMCPDISABLE(1U)
+
+#define S_CPUDFTL2CLKDISABLE 5
+#define V_CPUDFTL2CLKDISABLE(x) ((x) << S_CPUDFTL2CLKDISABLE)
+#define F_CPUDFTL2CLKDISABLE V_CPUDFTL2CLKDISABLE(1U)
+
+#define S_CPUDFTCLKDISABLE3 4
+#define V_CPUDFTCLKDISABLE3(x) ((x) << S_CPUDFTCLKDISABLE3)
+#define F_CPUDFTCLKDISABLE3 V_CPUDFTCLKDISABLE3(1U)
+
+#define S_CPUDFTCLKDISABLE2 3
+#define V_CPUDFTCLKDISABLE2(x) ((x) << S_CPUDFTCLKDISABLE2)
+#define F_CPUDFTCLKDISABLE2 V_CPUDFTCLKDISABLE2(1U)
+
+#define S_CPUDFTCLKDISABLE1 2
+#define V_CPUDFTCLKDISABLE1(x) ((x) << S_CPUDFTCLKDISABLE1)
+#define F_CPUDFTCLKDISABLE1 V_CPUDFTCLKDISABLE1(1U)
+
+#define S_CPUDFTCLKDISABLE0 1
+#define V_CPUDFTCLKDISABLE0(x) ((x) << S_CPUDFTCLKDISABLE0)
+#define F_CPUDFTCLKDISABLE0 V_CPUDFTCLKDISABLE0(1U)
+
+#define S_CPUDFTCLKBYPASS 0
+#define V_CPUDFTCLKBYPASS(x) ((x) << S_CPUDFTCLKBYPASS)
+#define F_CPUDFTCLKBYPASS V_CPUDFTCLKBYPASS(1U)
+
+#define A_ARM_APB_CFG 0x47224
+
+#define S_APB_CFG 0
+#define M_APB_CFG 0x3ffffU
+#define V_APB_CFG(x) ((x) << S_APB_CFG)
+#define G_APB_CFG(x) (((x) >> S_APB_CFG) & M_APB_CFG)
+
+#define A_ARM_EMMC_BUFS 0x47228
+
+#define S_EMMC_BUFS_OEN 2
+#define M_EMMC_BUFS_OEN 0x3U
+#define V_EMMC_BUFS_OEN(x) ((x) << S_EMMC_BUFS_OEN)
+#define G_EMMC_BUFS_OEN(x) (((x) >> S_EMMC_BUFS_OEN) & M_EMMC_BUFS_OEN)
+
+#define S_EMMC_BUFS_I 0
+#define M_EMMC_BUFS_I 0x3U
+#define V_EMMC_BUFS_I(x) ((x) << S_EMMC_BUFS_I)
+#define G_EMMC_BUFS_I(x) (((x) >> S_EMMC_BUFS_I) & M_EMMC_BUFS_I)
+
+#define A_ARM_SWP_EN 0x4722c
+#define A_ARM_ADB_PWR_DWN_REQ_N 0x47230
+
+#define S_ADBPWRDWNREQN 0
+#define V_ADBPWRDWNREQN(x) ((x) << S_ADBPWRDWNREQN)
+#define F_ADBPWRDWNREQN V_ADBPWRDWNREQN(1U)
+
+#define A_ARM_GIC_USER 0x47238
+
+#define S_CPU_GIC_USER 0
+#define M_CPU_GIC_USER 0x7fU
+#define V_CPU_GIC_USER(x) ((x) << S_CPU_GIC_USER)
+#define G_CPU_GIC_USER(x) (((x) >> S_CPU_GIC_USER) & M_CPU_GIC_USER)
+
+#define A_ARM_DBPROC_SRAM_TH_ADDR 0x47240
+
+#define S_DBPROC_TH_ADDR 0
+#define M_DBPROC_TH_ADDR 0x1ffU
+#define V_DBPROC_TH_ADDR(x) ((x) << S_DBPROC_TH_ADDR)
+#define G_DBPROC_TH_ADDR(x) (((x) >> S_DBPROC_TH_ADDR) & M_DBPROC_TH_ADDR)
+
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA0 0x47244
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA1 0x47248
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA2 0x4724c
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA3 0x47250
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA0 0x47254
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA1 0x47258
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA2 0x4725c
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA3 0x47260
+#define A_ARM_SWP_EN_2 0x47264
+
+#define S_SWP_EN_2 0
+#define M_SWP_EN_2 0x3U
+#define V_SWP_EN_2(x) ((x) << S_SWP_EN_2)
+#define G_SWP_EN_2(x) (((x) >> S_SWP_EN_2) & M_SWP_EN_2)
+
+#define A_ARM_GIC_ERR 0x47268
+
+#define S_ECC_FATAL 1
+#define V_ECC_FATAL(x) ((x) << S_ECC_FATAL)
+#define F_ECC_FATAL V_ECC_FATAL(1U)
+
+#define S_AXIM_ERR 0
+#define V_AXIM_ERR(x) ((x) << S_AXIM_ERR)
+#define F_AXIM_ERR V_AXIM_ERR(1U)
+
+#define A_ARM_CPU_STAT 0x4726c
+
+#define S_CPU_L2_QACTIVE 12
+#define V_CPU_L2_QACTIVE(x) ((x) << S_CPU_L2_QACTIVE)
+#define F_CPU_L2_QACTIVE V_CPU_L2_QACTIVE(1U)
+
+#define S_WAKEUPM_O_ADB 11
+#define V_WAKEUPM_O_ADB(x) ((x) << S_WAKEUPM_O_ADB)
+#define F_WAKEUPM_O_ADB V_WAKEUPM_O_ADB(1U)
+
+#define S_PWRQACTIVEM_ADB 10
+#define V_PWRQACTIVEM_ADB(x) ((x) << S_PWRQACTIVEM_ADB)
+#define F_PWRQACTIVEM_ADB V_PWRQACTIVEM_ADB(1U)
+
+#define S_CLKQACTIVEM_ADB 9
+#define V_CLKQACTIVEM_ADB(x) ((x) << S_CLKQACTIVEM_ADB)
+#define F_CLKQACTIVEM_ADB V_CLKQACTIVEM_ADB(1U)
+
+#define S_CLKQDENYM_ADB 8
+#define V_CLKQDENYM_ADB(x) ((x) << S_CLKQDENYM_ADB)
+#define F_CLKQDENYM_ADB V_CLKQDENYM_ADB(1U)
+
+#define S_CLKQACCEPTNM_ADB 7
+#define V_CLKQACCEPTNM_ADB(x) ((x) << S_CLKQACCEPTNM_ADB)
+#define F_CLKQACCEPTNM_ADB V_CLKQACCEPTNM_ADB(1U)
+
+#define S_WAKEUPS_O_ADB 6
+#define V_WAKEUPS_O_ADB(x) ((x) << S_WAKEUPS_O_ADB)
+#define F_WAKEUPS_O_ADB V_WAKEUPS_O_ADB(1U)
+
+#define S_PWRQACTIVES_ADB 5
+#define V_PWRQACTIVES_ADB(x) ((x) << S_PWRQACTIVES_ADB)
+#define F_PWRQACTIVES_ADB V_PWRQACTIVES_ADB(1U)
+
+#define S_CLKQACTIVES_ADB 4
+#define V_CLKQACTIVES_ADB(x) ((x) << S_CLKQACTIVES_ADB)
+#define F_CLKQACTIVES_ADB V_CLKQACTIVES_ADB(1U)
+
+#define S_CLKQDENYS_ADB 3
+#define V_CLKQDENYS_ADB(x) ((x) << S_CLKQDENYS_ADB)
+#define F_CLKQDENYS_ADB V_CLKQDENYS_ADB(1U)
+
+#define S_CLKQACCEPTNS_ADB 2
+#define V_CLKQACCEPTNS_ADB(x) ((x) << S_CLKQACCEPTNS_ADB)
+#define F_CLKQACCEPTNS_ADB V_CLKQACCEPTNS_ADB(1U)
+
+#define S_PWRQDENYS_ADB 1
+#define V_PWRQDENYS_ADB(x) ((x) << S_PWRQDENYS_ADB)
+#define F_PWRQDENYS_ADB V_PWRQDENYS_ADB(1U)
+
+#define S_PWRQACCEPTNS_ADB 0
+#define V_PWRQACCEPTNS_ADB(x) ((x) << S_PWRQACCEPTNS_ADB)
+#define F_PWRQACCEPTNS_ADB V_PWRQACCEPTNS_ADB(1U)
+
+#define A_ARM_DEBUG_INT_WRITE_DATA 0x47270
+
+#define S_DEBUG_INT_WRITE_DATA 0
+#define M_DEBUG_INT_WRITE_DATA 0xfffU
+#define V_DEBUG_INT_WRITE_DATA(x) ((x) << S_DEBUG_INT_WRITE_DATA)
+#define G_DEBUG_INT_WRITE_DATA(x) (((x) >> S_DEBUG_INT_WRITE_DATA) & M_DEBUG_INT_WRITE_DATA)
+
+#define A_ARM_DEBUG_INT_STAT 0x47274
+
+#define S_DEBUG_INT_STATUS_REG 0
+#define M_DEBUG_INT_STATUS_REG 0xfffU
+#define V_DEBUG_INT_STATUS_REG(x) ((x) << S_DEBUG_INT_STATUS_REG)
+#define G_DEBUG_INT_STATUS_REG(x) (((x) >> S_DEBUG_INT_STATUS_REG) & M_DEBUG_INT_STATUS_REG)
+
+#define A_ARM_DEBUG_STAT 0x47278
+
+#define S_ARM_DEBUG_STAT 0
+#define M_ARM_DEBUG_STAT 0x3fffU
+#define V_ARM_DEBUG_STAT(x) ((x) << S_ARM_DEBUG_STAT)
+#define G_ARM_DEBUG_STAT(x) (((x) >> S_ARM_DEBUG_STAT) & M_ARM_DEBUG_STAT)
+
+#define A_ARM_SIZE_STAT 0x4727c
+
+#define S_ARM_SIZE_STAT 0
+#define M_ARM_SIZE_STAT 0x3fffffffU
+#define V_ARM_SIZE_STAT(x) ((x) << S_ARM_SIZE_STAT)
+#define G_ARM_SIZE_STAT(x) (((x) >> S_ARM_SIZE_STAT) & M_ARM_SIZE_STAT)
+
+#define A_ARM_CCI_CFG0 0x47280
+
+#define S_CCIBROADCASTCACHEMAINT 28
+#define M_CCIBROADCASTCACHEMAINT 0x7U
+#define V_CCIBROADCASTCACHEMAINT(x) ((x) << S_CCIBROADCASTCACHEMAINT)
+#define G_CCIBROADCASTCACHEMAINT(x) (((x) >> S_CCIBROADCASTCACHEMAINT) & M_CCIBROADCASTCACHEMAINT)
+
+#define S_CCISTRIPINGGRANULE 25
+#define M_CCISTRIPINGGRANULE 0x7U
+#define V_CCISTRIPINGGRANULE(x) ((x) << S_CCISTRIPINGGRANULE)
+#define G_CCISTRIPINGGRANULE(x) (((x) >> S_CCISTRIPINGGRANULE) & M_CCISTRIPINGGRANULE)
+
+#define S_CCIPERIPHBASE 0
+#define M_CCIPERIPHBASE 0x1ffffffU
+#define V_CCIPERIPHBASE(x) ((x) << S_CCIPERIPHBASE)
+#define G_CCIPERIPHBASE(x) (((x) >> S_CCIPERIPHBASE) & M_CCIPERIPHBASE)
+
+#define A_ARM_CCI_CFG1 0x47284
+
+#define S_CCIDFTRSTDISABLE 18
+#define V_CCIDFTRSTDISABLE(x) ((x) << S_CCIDFTRSTDISABLE)
+#define F_CCIDFTRSTDISABLE V_CCIDFTRSTDISABLE(1U)
+
+#define S_CCISPNIDEN 17
+#define V_CCISPNIDEN(x) ((x) << S_CCISPNIDEN)
+#define F_CCISPNIDEN V_CCISPNIDEN(1U)
+
+#define S_CCINIDEN 16
+#define V_CCINIDEN(x) ((x) << S_CCINIDEN)
+#define F_CCINIDEN V_CCINIDEN(1U)
+
+#define S_CCIACCHANNELN 11
+#define M_CCIACCHANNELN 0x1fU
+#define V_CCIACCHANNELN(x) ((x) << S_CCIACCHANNELN)
+#define G_CCIACCHANNELN(x) (((x) >> S_CCIACCHANNELN) & M_CCIACCHANNELN)
+
+#define S_CCIQOSOVERRIDE 6
+#define M_CCIQOSOVERRIDE 0x1fU
+#define V_CCIQOSOVERRIDE(x) ((x) << S_CCIQOSOVERRIDE)
+#define G_CCIQOSOVERRIDE(x) (((x) >> S_CCIQOSOVERRIDE) & M_CCIQOSOVERRIDE)
+
+#define S_CCIBUFFERABLEOVERRIDE 3
+#define M_CCIBUFFERABLEOVERRIDE 0x7U
+#define V_CCIBUFFERABLEOVERRIDE(x) ((x) << S_CCIBUFFERABLEOVERRIDE)
+#define G_CCIBUFFERABLEOVERRIDE(x) (((x) >> S_CCIBUFFERABLEOVERRIDE) & M_CCIBUFFERABLEOVERRIDE)
+
+#define S_CCIBARRIERTERMINATE 0
+#define M_CCIBARRIERTERMINATE 0x7U
+#define V_CCIBARRIERTERMINATE(x) ((x) << S_CCIBARRIERTERMINATE)
+#define G_CCIBARRIERTERMINATE(x) (((x) >> S_CCIBARRIERTERMINATE) & M_CCIBARRIERTERMINATE)
+
+#define A_ARM_CCI_CFG2 0x47288
+
+#define S_CCIADDRMAP15 30
+#define M_CCIADDRMAP15 0x3U
+#define V_CCIADDRMAP15(x) ((x) << S_CCIADDRMAP15)
+#define G_CCIADDRMAP15(x) (((x) >> S_CCIADDRMAP15) & M_CCIADDRMAP15)
+
+#define S_CCIADDRMAP14 28
+#define M_CCIADDRMAP14 0x3U
+#define V_CCIADDRMAP14(x) ((x) << S_CCIADDRMAP14)
+#define G_CCIADDRMAP14(x) (((x) >> S_CCIADDRMAP14) & M_CCIADDRMAP14)
+
+#define S_CCIADDRMAP13 26
+#define M_CCIADDRMAP13 0x3U
+#define V_CCIADDRMAP13(x) ((x) << S_CCIADDRMAP13)
+#define G_CCIADDRMAP13(x) (((x) >> S_CCIADDRMAP13) & M_CCIADDRMAP13)
+
+#define S_CCIADDRMAP12 24
+#define M_CCIADDRMAP12 0x3U
+#define V_CCIADDRMAP12(x) ((x) << S_CCIADDRMAP12)
+#define G_CCIADDRMAP12(x) (((x) >> S_CCIADDRMAP12) & M_CCIADDRMAP12)
+
+#define S_CCIADDRMAP11 22
+#define M_CCIADDRMAP11 0x3U
+#define V_CCIADDRMAP11(x) ((x) << S_CCIADDRMAP11)
+#define G_CCIADDRMAP11(x) (((x) >> S_CCIADDRMAP11) & M_CCIADDRMAP11)
+
+#define S_CCIADDRMAP10 20
+#define M_CCIADDRMAP10 0x3U
+#define V_CCIADDRMAP10(x) ((x) << S_CCIADDRMAP10)
+#define G_CCIADDRMAP10(x) (((x) >> S_CCIADDRMAP10) & M_CCIADDRMAP10)
+
+#define S_CCIADDRMAP9 18
+#define M_CCIADDRMAP9 0x3U
+#define V_CCIADDRMAP9(x) ((x) << S_CCIADDRMAP9)
+#define G_CCIADDRMAP9(x) (((x) >> S_CCIADDRMAP9) & M_CCIADDRMAP9)
+
+#define S_CCIADDRMAP8 16
+#define M_CCIADDRMAP8 0x3U
+#define V_CCIADDRMAP8(x) ((x) << S_CCIADDRMAP8)
+#define G_CCIADDRMAP8(x) (((x) >> S_CCIADDRMAP8) & M_CCIADDRMAP8)
+
+#define S_CCIADDRMAP7 14
+#define M_CCIADDRMAP7 0x3U
+#define V_CCIADDRMAP7(x) ((x) << S_CCIADDRMAP7)
+#define G_CCIADDRMAP7(x) (((x) >> S_CCIADDRMAP7) & M_CCIADDRMAP7)
+
+#define S_CCIADDRMAP6 12
+#define M_CCIADDRMAP6 0x3U
+#define V_CCIADDRMAP6(x) ((x) << S_CCIADDRMAP6)
+#define G_CCIADDRMAP6(x) (((x) >> S_CCIADDRMAP6) & M_CCIADDRMAP6)
+
+#define S_CCIADDRMAP5 10
+#define M_CCIADDRMAP5 0x3U
+#define V_CCIADDRMAP5(x) ((x) << S_CCIADDRMAP5)
+#define G_CCIADDRMAP5(x) (((x) >> S_CCIADDRMAP5) & M_CCIADDRMAP5)
+
+#define S_CCIADDRMAP4 8
+#define M_CCIADDRMAP4 0x3U
+#define V_CCIADDRMAP4(x) ((x) << S_CCIADDRMAP4)
+#define G_CCIADDRMAP4(x) (((x) >> S_CCIADDRMAP4) & M_CCIADDRMAP4)
+
+#define S_CCIADDRMAP3 6
+#define M_CCIADDRMAP3 0x3U
+#define V_CCIADDRMAP3(x) ((x) << S_CCIADDRMAP3)
+#define G_CCIADDRMAP3(x) (((x) >> S_CCIADDRMAP3) & M_CCIADDRMAP3)
+
+#define S_CCIADDRMAP2 4
+#define M_CCIADDRMAP2 0x3U
+#define V_CCIADDRMAP2(x) ((x) << S_CCIADDRMAP2)
+#define G_CCIADDRMAP2(x) (((x) >> S_CCIADDRMAP2) & M_CCIADDRMAP2)
+
+#define S_CCIADDRMAP1 2
+#define M_CCIADDRMAP1 0x3U
+#define V_CCIADDRMAP1(x) ((x) << S_CCIADDRMAP1)
+#define G_CCIADDRMAP1(x) (((x) >> S_CCIADDRMAP1) & M_CCIADDRMAP1)
+
+#define S_CCIADDRMAP0 0
+#define M_CCIADDRMAP0 0x3U
+#define V_CCIADDRMAP0(x) ((x) << S_CCIADDRMAP0)
+#define G_CCIADDRMAP0(x) (((x) >> S_CCIADDRMAP0) & M_CCIADDRMAP0)
+
+#define A_ARM_CCI_STATUS 0x4728c
+
+#define S_CCICACTIVE 6
+#define V_CCICACTIVE(x) ((x) << S_CCICACTIVE)
+#define F_CCICACTIVE V_CCICACTIVE(1U)
+
+#define S_CCICSYSACK 5
+#define V_CCICSYSACK(x) ((x) << S_CCICSYSACK)
+#define F_CCICSYSACK V_CCICSYSACK(1U)
+
+#define S_CCINEVNTCNTOVERFLOW 0
+#define M_CCINEVNTCNTOVERFLOW 0x1fU
+#define V_CCINEVNTCNTOVERFLOW(x) ((x) << S_CCINEVNTCNTOVERFLOW)
+#define G_CCINEVNTCNTOVERFLOW(x) (((x) >> S_CCINEVNTCNTOVERFLOW) & M_CCINEVNTCNTOVERFLOW)
+
+#define A_ARM_CCIM_CCI_QVN_MASTER_CFG 0x47290
+
+#define S_CCIVWREADYVN3M 20
+#define V_CCIVWREADYVN3M(x) ((x) << S_CCIVWREADYVN3M)
+#define F_CCIVWREADYVN3M V_CCIVWREADYVN3M(1U)
+
+#define S_CCIVAWREADYVN3M 19
+#define V_CCIVAWREADYVN3M(x) ((x) << S_CCIVAWREADYVN3M)
+#define F_CCIVAWREADYVN3M V_CCIVAWREADYVN3M(1U)
+
+#define S_CCIVARREADYVN3M 18
+#define V_CCIVARREADYVN3M(x) ((x) << S_CCIVARREADYVN3M)
+#define F_CCIVARREADYVN3M V_CCIVARREADYVN3M(1U)
+
+#define S_CCIVWREADYVN2M 17
+#define V_CCIVWREADYVN2M(x) ((x) << S_CCIVWREADYVN2M)
+#define F_CCIVWREADYVN2M V_CCIVWREADYVN2M(1U)
+
+#define S_CCIVAWREADYVN2M 16
+#define V_CCIVAWREADYVN2M(x) ((x) << S_CCIVAWREADYVN2M)
+#define F_CCIVAWREADYVN2M V_CCIVAWREADYVN2M(1U)
+
+#define S_CCIVARREADYVN2M 15
+#define V_CCIVARREADYVN2M(x) ((x) << S_CCIVARREADYVN2M)
+#define F_CCIVARREADYVN2M V_CCIVARREADYVN2M(1U)
+
+#define S_CCIVWREADYVN1M 14
+#define V_CCIVWREADYVN1M(x) ((x) << S_CCIVWREADYVN1M)
+#define F_CCIVWREADYVN1M V_CCIVWREADYVN1M(1U)
+
+#define S_CCIVAWREADYVN1M 13
+#define V_CCIVAWREADYVN1M(x) ((x) << S_CCIVAWREADYVN1M)
+#define F_CCIVAWREADYVN1M V_CCIVAWREADYVN1M(1U)
+
+#define S_CCIVARREADYVN1M 12
+#define V_CCIVARREADYVN1M(x) ((x) << S_CCIVARREADYVN1M)
+#define F_CCIVARREADYVN1M V_CCIVARREADYVN1M(1U)
+
+#define S_CCIVWREADYVN0M 11
+#define V_CCIVWREADYVN0M(x) ((x) << S_CCIVWREADYVN0M)
+#define F_CCIVWREADYVN0M V_CCIVWREADYVN0M(1U)
+
+#define S_CCIVAWREADYVN0M 10
+#define V_CCIVAWREADYVN0M(x) ((x) << S_CCIVAWREADYVN0M)
+#define F_CCIVAWREADYVN0M V_CCIVAWREADYVN0M(1U)
+
+#define S_CCIVARREADYVN0M 9
+#define V_CCIVARREADYVN0M(x) ((x) << S_CCIVARREADYVN0M)
+#define F_CCIVARREADYVN0M V_CCIVARREADYVN0M(1U)
+
+#define S_CCIQVNPREALLOCWM 5
+#define M_CCIQVNPREALLOCWM 0xfU
+#define V_CCIQVNPREALLOCWM(x) ((x) << S_CCIQVNPREALLOCWM)
+#define G_CCIQVNPREALLOCWM(x) (((x) >> S_CCIQVNPREALLOCWM) & M_CCIQVNPREALLOCWM)
+
+#define S_CCIQVNPREALLOCRM 1
+#define M_CCIQVNPREALLOCRM 0xfU
+#define V_CCIQVNPREALLOCRM(x) ((x) << S_CCIQVNPREALLOCRM)
+#define G_CCIQVNPREALLOCRM(x) (((x) >> S_CCIQVNPREALLOCRM) & M_CCIQVNPREALLOCRM)
+
+#define S_CCIQVNENABLEM 0
+#define V_CCIQVNENABLEM(x) ((x) << S_CCIQVNENABLEM)
+#define F_CCIQVNENABLEM V_CCIQVNENABLEM(1U)
+
+#define A_ARM_CCIM_CCI_QVN_MASTER_STATUS 0x47294
+
+#define S_CCIVWVALIDN3M 31
+#define V_CCIVWVALIDN3M(x) ((x) << S_CCIVWVALIDN3M)
+#define F_CCIVWVALIDN3M V_CCIVWVALIDN3M(1U)
+
+#define S_CCIVAWVALIDN3M 30
+#define V_CCIVAWVALIDN3M(x) ((x) << S_CCIVAWVALIDN3M)
+#define F_CCIVAWVALIDN3M V_CCIVAWVALIDN3M(1U)
+
+#define S_CCIVAWQOSN3M 29
+#define V_CCIVAWQOSN3M(x) ((x) << S_CCIVAWQOSN3M)
+#define F_CCIVAWQOSN3M V_CCIVAWQOSN3M(1U)
+
+#define S_CCIVARVALIDN3M 28
+#define V_CCIVARVALIDN3M(x) ((x) << S_CCIVARVALIDN3M)
+#define F_CCIVARVALIDN3M V_CCIVARVALIDN3M(1U)
+
+#define S_CCIVARQOSN3M 24
+#define M_CCIVARQOSN3M 0xfU
+#define V_CCIVARQOSN3M(x) ((x) << S_CCIVARQOSN3M)
+#define G_CCIVARQOSN3M(x) (((x) >> S_CCIVARQOSN3M) & M_CCIVARQOSN3M)
+
+#define S_CCIVWVALIDN2M 23
+#define V_CCIVWVALIDN2M(x) ((x) << S_CCIVWVALIDN2M)
+#define F_CCIVWVALIDN2M V_CCIVWVALIDN2M(1U)
+
+#define S_CCIVAWVALIDN2M 22
+#define V_CCIVAWVALIDN2M(x) ((x) << S_CCIVAWVALIDN2M)
+#define F_CCIVAWVALIDN2M V_CCIVAWVALIDN2M(1U)
+
+#define S_CCIVAWQOSN2M 21
+#define V_CCIVAWQOSN2M(x) ((x) << S_CCIVAWQOSN2M)
+#define F_CCIVAWQOSN2M V_CCIVAWQOSN2M(1U)
+
+#define S_CCIVARVALIDN2M 20
+#define V_CCIVARVALIDN2M(x) ((x) << S_CCIVARVALIDN2M)
+#define F_CCIVARVALIDN2M V_CCIVARVALIDN2M(1U)
+
+#define S_CCIVARQOSN2M 16
+#define M_CCIVARQOSN2M 0xfU
+#define V_CCIVARQOSN2M(x) ((x) << S_CCIVARQOSN2M)
+#define G_CCIVARQOSN2M(x) (((x) >> S_CCIVARQOSN2M) & M_CCIVARQOSN2M)
+
+#define S_CCIVWVALIDN1M 15
+#define V_CCIVWVALIDN1M(x) ((x) << S_CCIVWVALIDN1M)
+#define F_CCIVWVALIDN1M V_CCIVWVALIDN1M(1U)
+
+#define S_CCIVAWVALIDN1M 14
+#define V_CCIVAWVALIDN1M(x) ((x) << S_CCIVAWVALIDN1M)
+#define F_CCIVAWVALIDN1M V_CCIVAWVALIDN1M(1U)
+
+#define S_CCIVAWQOSN1M 13
+#define V_CCIVAWQOSN1M(x) ((x) << S_CCIVAWQOSN1M)
+#define F_CCIVAWQOSN1M V_CCIVAWQOSN1M(1U)
+
+#define S_CCIVARVALIDN1M 12
+#define V_CCIVARVALIDN1M(x) ((x) << S_CCIVARVALIDN1M)
+#define F_CCIVARVALIDN1M V_CCIVARVALIDN1M(1U)
+
+#define S_CCIVARQOSN1M 8
+#define M_CCIVARQOSN1M 0xfU
+#define V_CCIVARQOSN1M(x) ((x) << S_CCIVARQOSN1M)
+#define G_CCIVARQOSN1M(x) (((x) >> S_CCIVARQOSN1M) & M_CCIVARQOSN1M)
+
+#define S_CCIVWVALIDN0M 7
+#define V_CCIVWVALIDN0M(x) ((x) << S_CCIVWVALIDN0M)
+#define F_CCIVWVALIDN0M V_CCIVWVALIDN0M(1U)
+
+#define S_CCIVAWVALIDN0M 6
+#define V_CCIVAWVALIDN0M(x) ((x) << S_CCIVAWVALIDN0M)
+#define F_CCIVAWVALIDN0M V_CCIVAWVALIDN0M(1U)
+
+#define S_CCIVAWQOSN0M 5
+#define V_CCIVAWQOSN0M(x) ((x) << S_CCIVAWQOSN0M)
+#define F_CCIVAWQOSN0M V_CCIVAWQOSN0M(1U)
+
+#define S_CCIVARVALIDN0M 4
+#define V_CCIVARVALIDN0M(x) ((x) << S_CCIVARVALIDN0M)
+#define F_CCIVARVALIDN0M V_CCIVARVALIDN0M(1U)
+
+#define S_CCIVARQOSN0M 0
+#define M_CCIVARQOSN0M 0xfU
+#define V_CCIVARQOSN0M(x) ((x) << S_CCIVARQOSN0M)
+#define G_CCIVARQOSN0M(x) (((x) >> S_CCIVARQOSN0M) & M_CCIVARQOSN0M)
+
+#define A_ARM_CCIS_CCI_QVN_SLAVE_CFG 0x472d0
+
+#define S_CCIQVNVNETS 0
+#define M_CCIQVNVNETS 0x3U
+#define V_CCIQVNVNETS(x) ((x) << S_CCIQVNVNETS)
+#define G_CCIQVNVNETS(x) (((x) >> S_CCIQVNVNETS) & M_CCIQVNVNETS)
+
+#define A_ARM_CCIS_CCI_QVN_SLAVE_STATUS 0x472d4
+
+#define S_CCIEVNTAWQOS 4
+#define M_CCIEVNTAWQOS 0xfU
+#define V_CCIEVNTAWQOS(x) ((x) << S_CCIEVNTAWQOS)
+#define G_CCIEVNTAWQOS(x) (((x) >> S_CCIEVNTAWQOS) & M_CCIEVNTAWQOS)
+
+#define S_CCIEVNTARQOS 0
+#define M_CCIEVNTARQOS 0xfU
+#define V_CCIEVNTARQOS(x) ((x) << S_CCIEVNTARQOS)
+#define G_CCIEVNTARQOS(x) (((x) >> S_CCIEVNTARQOS) & M_CCIEVNTARQOS)
+
+#define A_ARM_CCI_EVNTBUS 0x47300
+#define A_ARM_CCI_RST_N 0x47318
+
+#define S_CCIRSTN 0
+#define V_CCIRSTN(x) ((x) << S_CCIRSTN)
+#define F_CCIRSTN V_CCIRSTN(1U)
+
+#define A_ARM_CCI_CSYREQ 0x4731c
+
+#define S_CCICSYSREQ 0
+#define V_CCICSYSREQ(x) ((x) << S_CCICSYSREQ)
+#define F_CCICSYSREQ V_CCICSYSREQ(1U)
+
+#define A_ARM_CCI_TR_DEBUGS0 0x47320
+
+#define S_CCIS0RCNT 24
+#define M_CCIS0RCNT 0xffU
+#define V_CCIS0RCNT(x) ((x) << S_CCIS0RCNT)
+#define G_CCIS0RCNT(x) (((x) >> S_CCIS0RCNT) & M_CCIS0RCNT)
+
+#define S_CCIS0ARCNT 16
+#define M_CCIS0ARCNT 0xffU
+#define V_CCIS0ARCNT(x) ((x) << S_CCIS0ARCNT)
+#define G_CCIS0ARCNT(x) (((x) >> S_CCIS0ARCNT) & M_CCIS0ARCNT)
+
+#define S_CCIS0WCNT 8
+#define M_CCIS0WCNT 0xffU
+#define V_CCIS0WCNT(x) ((x) << S_CCIS0WCNT)
+#define G_CCIS0WCNT(x) (((x) >> S_CCIS0WCNT) & M_CCIS0WCNT)
+
+#define S_CCIS0AWCNT 0
+#define M_CCIS0AWCNT 0xffU
+#define V_CCIS0AWCNT(x) ((x) << S_CCIS0AWCNT)
+#define G_CCIS0AWCNT(x) (((x) >> S_CCIS0AWCNT) & M_CCIS0AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS1 0x47324
+
+#define S_CCIS1RCNT 24
+#define M_CCIS1RCNT 0xffU
+#define V_CCIS1RCNT(x) ((x) << S_CCIS1RCNT)
+#define G_CCIS1RCNT(x) (((x) >> S_CCIS1RCNT) & M_CCIS1RCNT)
+
+#define S_CCIS1ARCNT 16
+#define M_CCIS1ARCNT 0xffU
+#define V_CCIS1ARCNT(x) ((x) << S_CCIS1ARCNT)
+#define G_CCIS1ARCNT(x) (((x) >> S_CCIS1ARCNT) & M_CCIS1ARCNT)
+
+#define S_CCIS1WCNT 8
+#define M_CCIS1WCNT 0xffU
+#define V_CCIS1WCNT(x) ((x) << S_CCIS1WCNT)
+#define G_CCIS1WCNT(x) (((x) >> S_CCIS1WCNT) & M_CCIS1WCNT)
+
+#define S_CCIS1AWCNT 0
+#define M_CCIS1AWCNT 0xffU
+#define V_CCIS1AWCNT(x) ((x) << S_CCIS1AWCNT)
+#define G_CCIS1AWCNT(x) (((x) >> S_CCIS1AWCNT) & M_CCIS1AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS2 0x47328
+
+#define S_CCIS2RCNT 24
+#define M_CCIS2RCNT 0xffU
+#define V_CCIS2RCNT(x) ((x) << S_CCIS2RCNT)
+#define G_CCIS2RCNT(x) (((x) >> S_CCIS2RCNT) & M_CCIS2RCNT)
+
+#define S_CCIS2ARCNT 16
+#define M_CCIS2ARCNT 0xffU
+#define V_CCIS2ARCNT(x) ((x) << S_CCIS2ARCNT)
+#define G_CCIS2ARCNT(x) (((x) >> S_CCIS2ARCNT) & M_CCIS2ARCNT)
+
+#define S_CCIS2WCNT 8
+#define M_CCIS2WCNT 0xffU
+#define V_CCIS2WCNT(x) ((x) << S_CCIS2WCNT)
+#define G_CCIS2WCNT(x) (((x) >> S_CCIS2WCNT) & M_CCIS2WCNT)
+
+#define S_CCIS2AWCNT 0
+#define M_CCIS2AWCNT 0xffU
+#define V_CCIS2AWCNT(x) ((x) << S_CCIS2AWCNT)
+#define G_CCIS2AWCNT(x) (((x) >> S_CCIS2AWCNT) & M_CCIS2AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS3 0x4732c
+
+#define S_CCIS3RCNT 24
+#define M_CCIS3RCNT 0xffU
+#define V_CCIS3RCNT(x) ((x) << S_CCIS3RCNT)
+#define G_CCIS3RCNT(x) (((x) >> S_CCIS3RCNT) & M_CCIS3RCNT)
+
+#define S_CCIS3ARCNT 16
+#define M_CCIS3ARCNT 0xffU
+#define V_CCIS3ARCNT(x) ((x) << S_CCIS3ARCNT)
+#define G_CCIS3ARCNT(x) (((x) >> S_CCIS3ARCNT) & M_CCIS3ARCNT)
+
+#define S_CCIS3WCNT 8
+#define M_CCIS3WCNT 0xffU
+#define V_CCIS3WCNT(x) ((x) << S_CCIS3WCNT)
+#define G_CCIS3WCNT(x) (((x) >> S_CCIS3WCNT) & M_CCIS3WCNT)
+
+#define S_CCIS3AWCNT 0
+#define M_CCIS3AWCNT 0xffU
+#define V_CCIS3AWCNT(x) ((x) << S_CCIS3AWCNT)
+#define G_CCIS3AWCNT(x) (((x) >> S_CCIS3AWCNT) & M_CCIS3AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS4 0x47330
+
+#define S_CCIS4RCNT 24
+#define M_CCIS4RCNT 0xffU
+#define V_CCIS4RCNT(x) ((x) << S_CCIS4RCNT)
+#define G_CCIS4RCNT(x) (((x) >> S_CCIS4RCNT) & M_CCIS4RCNT)
+
+#define S_CCIS4ARCNT 16
+#define M_CCIS4ARCNT 0xffU
+#define V_CCIS4ARCNT(x) ((x) << S_CCIS4ARCNT)
+#define G_CCIS4ARCNT(x) (((x) >> S_CCIS4ARCNT) & M_CCIS4ARCNT)
+
+#define S_CCIS4WCNT 8
+#define M_CCIS4WCNT 0xffU
+#define V_CCIS4WCNT(x) ((x) << S_CCIS4WCNT)
+#define G_CCIS4WCNT(x) (((x) >> S_CCIS4WCNT) & M_CCIS4WCNT)
+
+#define S_CCIS4AWCNT 0
+#define M_CCIS4AWCNT 0xffU
+#define V_CCIS4AWCNT(x) ((x) << S_CCIS4AWCNT)
+#define G_CCIS4AWCNT(x) (((x) >> S_CCIS4AWCNT) & M_CCIS4AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS34 0x47334
+
+#define S_CCIS4RSPCNT 24
+#define M_CCIS4RSPCNT 0xffU
+#define V_CCIS4RSPCNT(x) ((x) << S_CCIS4RSPCNT)
+#define G_CCIS4RSPCNT(x) (((x) >> S_CCIS4RSPCNT) & M_CCIS4RSPCNT)
+
+#define S_CCIS4ACCNT 16
+#define M_CCIS4ACCNT 0xffU
+#define V_CCIS4ACCNT(x) ((x) << S_CCIS4ACCNT)
+#define G_CCIS4ACCNT(x) (((x) >> S_CCIS4ACCNT) & M_CCIS4ACCNT)
+
+#define S_CCIS3RSPCNT 8
+#define M_CCIS3RSPCNT 0xffU
+#define V_CCIS3RSPCNT(x) ((x) << S_CCIS3RSPCNT)
+#define G_CCIS3RSPCNT(x) (((x) >> S_CCIS3RSPCNT) & M_CCIS3RSPCNT)
+
+#define S_CCIS3ACCNT 0
+#define M_CCIS3ACCNT 0xffU
+#define V_CCIS3ACCNT(x) ((x) << S_CCIS3ACCNT)
+#define G_CCIS3ACCNT(x) (((x) >> S_CCIS3ACCNT) & M_CCIS3ACCNT)
+
+#define A_ARM_CCI_TR_DEBUGM0 0x47338
+
+#define S_CCIM0RCNT 24
+#define M_CCIM0RCNT 0xffU
+#define V_CCIM0RCNT(x) ((x) << S_CCIM0RCNT)
+#define G_CCIM0RCNT(x) (((x) >> S_CCIM0RCNT) & M_CCIM0RCNT)
+
+#define S_CCIM0ARCNT 16
+#define M_CCIM0ARCNT 0xffU
+#define V_CCIM0ARCNT(x) ((x) << S_CCIM0ARCNT)
+#define G_CCIM0ARCNT(x) (((x) >> S_CCIM0ARCNT) & M_CCIM0ARCNT)
+
+#define S_CCIM0WCNT 8
+#define M_CCIM0WCNT 0xffU
+#define V_CCIM0WCNT(x) ((x) << S_CCIM0WCNT)
+#define G_CCIM0WCNT(x) (((x) >> S_CCIM0WCNT) & M_CCIM0WCNT)
+
+#define S_CCIM0AWCNT 0
+#define M_CCIM0AWCNT 0xffU
+#define V_CCIM0AWCNT(x) ((x) << S_CCIM0AWCNT)
+#define G_CCIM0AWCNT(x) (((x) >> S_CCIM0AWCNT) & M_CCIM0AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGM1 0x4733c
+
+#define S_CCIM1RCNT 24
+#define M_CCIM1RCNT 0xffU
+#define V_CCIM1RCNT(x) ((x) << S_CCIM1RCNT)
+#define G_CCIM1RCNT(x) (((x) >> S_CCIM1RCNT) & M_CCIM1RCNT)
+
+#define S_CCIM1ARCNT 16
+#define M_CCIM1ARCNT 0xffU
+#define V_CCIM1ARCNT(x) ((x) << S_CCIM1ARCNT)
+#define G_CCIM1ARCNT(x) (((x) >> S_CCIM1ARCNT) & M_CCIM1ARCNT)
+
+#define S_CCIM1WCNT 8
+#define M_CCIM1WCNT 0xffU
+#define V_CCIM1WCNT(x) ((x) << S_CCIM1WCNT)
+#define G_CCIM1WCNT(x) (((x) >> S_CCIM1WCNT) & M_CCIM1WCNT)
+
+#define S_CCIM1AWCNT 0
+#define M_CCIM1AWCNT 0xffU
+#define V_CCIM1AWCNT(x) ((x) << S_CCIM1AWCNT)
+#define G_CCIM1AWCNT(x) (((x) >> S_CCIM1AWCNT) & M_CCIM1AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGM2 0x47340
+
+#define S_CCIM2RCNT 24
+#define M_CCIM2RCNT 0xffU
+#define V_CCIM2RCNT(x) ((x) << S_CCIM2RCNT)
+#define G_CCIM2RCNT(x) (((x) >> S_CCIM2RCNT) & M_CCIM2RCNT)
+
+#define S_CCIM2ARCNT 16
+#define M_CCIM2ARCNT 0xffU
+#define V_CCIM2ARCNT(x) ((x) << S_CCIM2ARCNT)
+#define G_CCIM2ARCNT(x) (((x) >> S_CCIM2ARCNT) & M_CCIM2ARCNT)
+
+#define S_CCIM2WCNT 8
+#define M_CCIM2WCNT 0xffU
+#define V_CCIM2WCNT(x) ((x) << S_CCIM2WCNT)
+#define G_CCIM2WCNT(x) (((x) >> S_CCIM2WCNT) & M_CCIM2WCNT)
+
+#define S_CCIM2AWCNT 0
+#define M_CCIM2AWCNT 0xffU
+#define V_CCIM2AWCNT(x) ((x) << S_CCIM2AWCNT)
+#define G_CCIM2AWCNT(x) (((x) >> S_CCIM2AWCNT) & M_CCIM2AWCNT)
+
+#define A_ARM_MA_TR_DEBUG 0x47344
+
+#define S_MA1_RD_CNT 24
+#define M_MA1_RD_CNT 0xffU
+#define V_MA1_RD_CNT(x) ((x) << S_MA1_RD_CNT)
+#define G_MA1_RD_CNT(x) (((x) >> S_MA1_RD_CNT) & M_MA1_RD_CNT)
+
+#define S_MA1_WR_CNT 16
+#define M_MA1_WR_CNT 0xffU
+#define V_MA1_WR_CNT(x) ((x) << S_MA1_WR_CNT)
+#define G_MA1_WR_CNT(x) (((x) >> S_MA1_WR_CNT) & M_MA1_WR_CNT)
+
+#define S_MA0_RD_CNT 8
+#define M_MA0_RD_CNT 0xffU
+#define V_MA0_RD_CNT(x) ((x) << S_MA0_RD_CNT)
+#define G_MA0_RD_CNT(x) (((x) >> S_MA0_RD_CNT) & M_MA0_RD_CNT)
+
+#define S_MA0_WR_CNT 0
+#define M_MA0_WR_CNT 0xffU
+#define V_MA0_WR_CNT(x) ((x) << S_MA0_WR_CNT)
+#define G_MA0_WR_CNT(x) (((x) >> S_MA0_WR_CNT) & M_MA0_WR_CNT)
+
+#define A_ARM_GP_INT 0x47348
+
+#define S_GP_INT 0
+#define M_GP_INT 0xffU
+#define V_GP_INT(x) ((x) << S_GP_INT)
+#define G_GP_INT(x) (((x) >> S_GP_INT) & M_GP_INT)
+
+#define A_ARM_DMA_CFG0 0x47350
+#define A_ARM_DMA_CFG1 0x47354
+
+#define S_DMABOOTPERIPHNS 16
+#define M_DMABOOTPERIPHNS 0x3ffU
+#define V_DMABOOTPERIPHNS(x) ((x) << S_DMABOOTPERIPHNS)
+#define G_DMABOOTPERIPHNS(x) (((x) >> S_DMABOOTPERIPHNS) & M_DMABOOTPERIPHNS)
+
+#define S_DMABOOTIRQNS 4
+#define M_DMABOOTIRQNS 0x3ffU
+#define V_DMABOOTIRQNS(x) ((x) << S_DMABOOTIRQNS)
+#define G_DMABOOTIRQNS(x) (((x) >> S_DMABOOTIRQNS) & M_DMABOOTIRQNS)
+
+#define S_DMABOOTMANAGERNS 1
+#define V_DMABOOTMANAGERNS(x) ((x) << S_DMABOOTMANAGERNS)
+#define F_DMABOOTMANAGERNS V_DMABOOTMANAGERNS(1U)
+
+#define S_DMABOOTFROMPC 0
+#define V_DMABOOTFROMPC(x) ((x) << S_DMABOOTFROMPC)
+#define F_DMABOOTFROMPC V_DMABOOTFROMPC(1U)
+
+#define A_ARM_ARM_CFG0 0x47380
+
+#define S_MESSAGEBYPASS_DATA 2
+#define V_MESSAGEBYPASS_DATA(x) ((x) << S_MESSAGEBYPASS_DATA)
+#define F_MESSAGEBYPASS_DATA V_MESSAGEBYPASS_DATA(1U)
+
+#define S_MESSAGEBYPASS 1
+#define V_MESSAGEBYPASS(x) ((x) << S_MESSAGEBYPASS)
+#define F_MESSAGEBYPASS V_MESSAGEBYPASS(1U)
+
+#define S_PCIEBYPASS 0
+#define V_PCIEBYPASS(x) ((x) << S_PCIEBYPASS)
+#define F_PCIEBYPASS V_PCIEBYPASS(1U)
+
+#define A_ARM_ARM_CFG1 0x47384
+#define A_ARM_ARM_CFG2 0x47390
+#define A_ARM_PCIE_MA_ADDR_REGION0 0x47400
+
+#define S_ADDRREG0 0
+#define M_ADDRREG0 0xfffffffU
+#define V_ADDRREG0(x) ((x) << S_ADDRREG0)
+#define G_ADDRREG0(x) (((x) >> S_ADDRREG0) & M_ADDRREG0)
+
+#define A_ARM_PCIE_MA_ADDR_REGION1 0x47404
+
+#define S_ADDRREG1 0
+#define M_ADDRREG1 0xfffffffU
+#define V_ADDRREG1(x) ((x) << S_ADDRREG1)
+#define G_ADDRREG1(x) (((x) >> S_ADDRREG1) & M_ADDRREG1)
+
+#define A_ARM_PCIE_MA_ADDR_REGION2 0x47408
+
+#define S_ADDRREG2 0
+#define M_ADDRREG2 0xfffffffU
+#define V_ADDRREG2(x) ((x) << S_ADDRREG2)
+#define G_ADDRREG2(x) (((x) >> S_ADDRREG2) & M_ADDRREG2)
+
+#define A_ARM_PCIE_MA_ADDR_REGION3 0x4740c
+
+#define S_ADDRREG3 0
+#define M_ADDRREG3 0xfffffffU
+#define V_ADDRREG3(x) ((x) << S_ADDRREG3)
+#define G_ADDRREG3(x) (((x) >> S_ADDRREG3) & M_ADDRREG3)
+
+#define A_ARM_PCIE_MA_ADDR_REGION4 0x47410
+
+#define S_ADDRREG4 0
+#define M_ADDRREG4 0xfffffffU
+#define V_ADDRREG4(x) ((x) << S_ADDRREG4)
+#define G_ADDRREG4(x) (((x) >> S_ADDRREG4) & M_ADDRREG4)
+
+#define A_ARM_PCIE_MA_ADDR_REGION5 0x47414
+
+#define S_ADDRREG5 0
+#define M_ADDRREG5 0xfffffffU
+#define V_ADDRREG5(x) ((x) << S_ADDRREG5)
+#define G_ADDRREG5(x) (((x) >> S_ADDRREG5) & M_ADDRREG5)
+
+#define A_ARM_PCIE_MA_ADDR_REGION6 0x47418
+
+#define S_ADDRREG6 0
+#define M_ADDRREG6 0xfffffffU
+#define V_ADDRREG6(x) ((x) << S_ADDRREG6)
+#define G_ADDRREG6(x) (((x) >> S_ADDRREG6) & M_ADDRREG6)
+
+#define A_ARM_PCIE_MA_ADDR_REGION7 0x4741c
+
+#define S_ADDRREG7 0
+#define M_ADDRREG7 0xfffffffU
+#define V_ADDRREG7(x) ((x) << S_ADDRREG7)
+#define G_ADDRREG7(x) (((x) >> S_ADDRREG7) & M_ADDRREG7)
+
+#define A_ARM_INTERRUPT_GEN 0x47420
+
+#define S_INT_GEN 0
+#define M_INT_GEN 0x3U
+#define V_INT_GEN(x) ((x) << S_INT_GEN)
+#define G_INT_GEN(x) (((x) >> S_INT_GEN) & M_INT_GEN)
+
+#define A_ARM_INTERRUPT_CLEAR 0x47424
+
+#define S_INT_CLEAR 0
+#define M_INT_CLEAR 0x3U
+#define V_INT_CLEAR(x) ((x) << S_INT_CLEAR)
+#define G_INT_CLEAR(x) (((x) >> S_INT_CLEAR) & M_INT_CLEAR)
+
+#define A_ARM_DEBUG_STATUS_0 0x47428
+#define A_ARM_DBPROC_CONTROL 0x4742c
+
+#define S_NO_OF_INTERRUPTS 0
+#define M_NO_OF_INTERRUPTS 0x3U
+#define V_NO_OF_INTERRUPTS(x) ((x) << S_NO_OF_INTERRUPTS)
+#define G_NO_OF_INTERRUPTS(x) (((x) >> S_NO_OF_INTERRUPTS) & M_NO_OF_INTERRUPTS)
+
+#define A_ARM_PERR_INT_CAUSE1 0x47430
+
+#define S_ARWFIFO0_PERR 31
+#define V_ARWFIFO0_PERR(x) ((x) << S_ARWFIFO0_PERR)
+#define F_ARWFIFO0_PERR V_ARWFIFO0_PERR(1U)
+
+#define S_ARWFIFO1_PERR 30
+#define V_ARWFIFO1_PERR(x) ((x) << S_ARWFIFO1_PERR)
+#define F_ARWFIFO1_PERR V_ARWFIFO1_PERR(1U)
+
+#define S_ARWIDFIFO0_PERR 29
+#define V_ARWIDFIFO0_PERR(x) ((x) << S_ARWIDFIFO0_PERR)
+#define F_ARWIDFIFO0_PERR V_ARWIDFIFO0_PERR(1U)
+
+#define S_ARWIDFIFO1_PERR 28
+#define V_ARWIDFIFO1_PERR(x) ((x) << S_ARWIDFIFO1_PERR)
+#define F_ARWIDFIFO1_PERR V_ARWIDFIFO1_PERR(1U)
+
+#define S_ARIDFIFO0_PERR 27
+#define V_ARIDFIFO0_PERR(x) ((x) << S_ARIDFIFO0_PERR)
+#define F_ARIDFIFO0_PERR V_ARIDFIFO0_PERR(1U)
+
+#define S_ARIDFIFO1_PERR 26
+#define V_ARIDFIFO1_PERR(x) ((x) << S_ARIDFIFO1_PERR)
+#define F_ARIDFIFO1_PERR V_ARIDFIFO1_PERR(1U)
+
+#define S_RRSPADDR_FIFO0_PERR 25
+#define V_RRSPADDR_FIFO0_PERR(x) ((x) << S_RRSPADDR_FIFO0_PERR)
+#define F_RRSPADDR_FIFO0_PERR V_RRSPADDR_FIFO0_PERR(1U)
+
+#define S_RRSPADDR_FIFO1_PERR 24
+#define V_RRSPADDR_FIFO1_PERR(x) ((x) << S_RRSPADDR_FIFO1_PERR)
+#define F_RRSPADDR_FIFO1_PERR V_RRSPADDR_FIFO1_PERR(1U)
+
+#define S_WRSTRB_FIFO0_PERR 23
+#define V_WRSTRB_FIFO0_PERR(x) ((x) << S_WRSTRB_FIFO0_PERR)
+#define F_WRSTRB_FIFO0_PERR V_WRSTRB_FIFO0_PERR(1U)
+
+#define S_WRSTRB_FIFO1_PERR 22
+#define V_WRSTRB_FIFO1_PERR(x) ((x) << S_WRSTRB_FIFO1_PERR)
+#define F_WRSTRB_FIFO1_PERR V_WRSTRB_FIFO1_PERR(1U)
+
+#define S_MA2AXI_RSPDATAPARERR 21
+#define V_MA2AXI_RSPDATAPARERR(x) ((x) << S_MA2AXI_RSPDATAPARERR)
+#define F_MA2AXI_RSPDATAPARERR V_MA2AXI_RSPDATAPARERR(1U)
+
+#define S_MA2AXI_DATA_PAR_ERR 20
+#define V_MA2AXI_DATA_PAR_ERR(x) ((x) << S_MA2AXI_DATA_PAR_ERR)
+#define F_MA2AXI_DATA_PAR_ERR V_MA2AXI_DATA_PAR_ERR(1U)
+
+#define S_MA2AXI_WR_ORD_FIFO_PARERR 19
+#define V_MA2AXI_WR_ORD_FIFO_PARERR(x) ((x) << S_MA2AXI_WR_ORD_FIFO_PARERR)
+#define F_MA2AXI_WR_ORD_FIFO_PARERR V_MA2AXI_WR_ORD_FIFO_PARERR(1U)
+
+#define S_NVME_DB_EMU_TRACKER_FIFO_PERR 18
+#define V_NVME_DB_EMU_TRACKER_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_TRACKER_FIFO_PERR)
+#define F_NVME_DB_EMU_TRACKER_FIFO_PERR V_NVME_DB_EMU_TRACKER_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR 17
+#define V_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR)
+#define F_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR V_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR 16
+#define V_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR)
+#define F_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR V_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_ID_FIFO0_PERR 15
+#define V_NVME_DB_EMU_ID_FIFO0_PERR(x) ((x) << S_NVME_DB_EMU_ID_FIFO0_PERR)
+#define F_NVME_DB_EMU_ID_FIFO0_PERR V_NVME_DB_EMU_ID_FIFO0_PERR(1U)
+
+#define S_NVME_DB_EMU_ID_FIFO1_PERR 14
+#define V_NVME_DB_EMU_ID_FIFO1_PERR(x) ((x) << S_NVME_DB_EMU_ID_FIFO1_PERR)
+#define F_NVME_DB_EMU_ID_FIFO1_PERR V_NVME_DB_EMU_ID_FIFO1_PERR(1U)
+
+#define S_RC_ARWFIFO_PERR 13
+#define V_RC_ARWFIFO_PERR(x) ((x) << S_RC_ARWFIFO_PERR)
+#define F_RC_ARWFIFO_PERR V_RC_ARWFIFO_PERR(1U)
+
+#define S_RC_ARIDBURSTADDRFIFO_PERR 12
+#define V_RC_ARIDBURSTADDRFIFO_PERR(x) ((x) << S_RC_ARIDBURSTADDRFIFO_PERR)
+#define F_RC_ARIDBURSTADDRFIFO_PERR V_RC_ARIDBURSTADDRFIFO_PERR(1U)
+
+#define S_RC_CFG_FIFO_PERR 11
+#define V_RC_CFG_FIFO_PERR(x) ((x) << S_RC_CFG_FIFO_PERR)
+#define F_RC_CFG_FIFO_PERR V_RC_CFG_FIFO_PERR(1U)
+
+#define S_RC_RSPFIFO_PERR 10
+#define V_RC_RSPFIFO_PERR(x) ((x) << S_RC_RSPFIFO_PERR)
+#define F_RC_RSPFIFO_PERR V_RC_RSPFIFO_PERR(1U)
+
+#define S_INIC_ARIDFIFO_PERR 9
+#define V_INIC_ARIDFIFO_PERR(x) ((x) << S_INIC_ARIDFIFO_PERR)
+#define F_INIC_ARIDFIFO_PERR V_INIC_ARIDFIFO_PERR(1U)
+
+#define S_INIC_ARWFIFO_PERR 8
+#define V_INIC_ARWFIFO_PERR(x) ((x) << S_INIC_ARWFIFO_PERR)
+#define F_INIC_ARWFIFO_PERR V_INIC_ARWFIFO_PERR(1U)
+
+#define S_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR 7
+#define V_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR(x) ((x) << S_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR)
+#define F_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR V_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR(1U)
+
+#define S_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR 6
+#define V_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR(x) ((x) << S_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR)
+#define F_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR V_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR(1U)
+
+#define S_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR 5
+#define V_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR(x) ((x) << S_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR)
+#define F_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR V_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR(1U)
+
+#define S_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR 4
+#define V_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR(x) ((x) << S_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR)
+#define F_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR V_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR(1U)
+
+#define S_ARM_MA_512B_ARB_FIFO_PERR 3
+#define V_ARM_MA_512B_ARB_FIFO_PERR(x) ((x) << S_ARM_MA_512B_ARB_FIFO_PERR)
+#define F_ARM_MA_512B_ARB_FIFO_PERR V_ARM_MA_512B_ARB_FIFO_PERR(1U)
+
+#define S_PCIE_INIC_MA_ARB_FIFO_PERR 2
+#define V_PCIE_INIC_MA_ARB_FIFO_PERR(x) ((x) << S_PCIE_INIC_MA_ARB_FIFO_PERR)
+#define F_PCIE_INIC_MA_ARB_FIFO_PERR V_PCIE_INIC_MA_ARB_FIFO_PERR(1U)
+
+#define S_PCIE_INIC_ARB_RSPPERR 1
+#define V_PCIE_INIC_ARB_RSPPERR(x) ((x) << S_PCIE_INIC_ARB_RSPPERR)
+#define F_PCIE_INIC_ARB_RSPPERR V_PCIE_INIC_ARB_RSPPERR(1U)
+
+#define S_ITE_CACHE_PERR 0
+#define V_ITE_CACHE_PERR(x) ((x) << S_ITE_CACHE_PERR)
+#define F_ITE_CACHE_PERR V_ITE_CACHE_PERR(1U)
+
+#define A_ARM_PERR_INT_ENB1 0x47434
+#define A_ARM_PERR_ENABLE1 0x47438
+#define A_ARM_DEBUG_STATUS_1 0x4743c
+#define A_ARM_PCIE_MA_ADDR_REGION_DST 0x47440
+
+#define S_ADDRREGDST 0
+#define M_ADDRREGDST 0x1ffU
+#define V_ADDRREGDST(x) ((x) << S_ADDRREGDST)
+#define G_ADDRREGDST(x) (((x) >> S_ADDRREGDST) & M_ADDRREGDST)
+
+#define A_ARM_ERR_INT_CAUSE0 0x47444
+
+#define S_STRB0_ERROR 31
+#define V_STRB0_ERROR(x) ((x) << S_STRB0_ERROR)
+#define F_STRB0_ERROR V_STRB0_ERROR(1U)
+
+#define S_STRB1_ERROR 30
+#define V_STRB1_ERROR(x) ((x) << S_STRB1_ERROR)
+#define F_STRB1_ERROR V_STRB1_ERROR(1U)
+
+#define S_PCIE_INIC_MA_ARB_INV_RSP_TAG 29
+#define V_PCIE_INIC_MA_ARB_INV_RSP_TAG(x) ((x) << S_PCIE_INIC_MA_ARB_INV_RSP_TAG)
+#define F_PCIE_INIC_MA_ARB_INV_RSP_TAG V_PCIE_INIC_MA_ARB_INV_RSP_TAG(1U)
+
+#define S_ERROR0_NOCMD_DATA 28
+#define V_ERROR0_NOCMD_DATA(x) ((x) << S_ERROR0_NOCMD_DATA)
+#define F_ERROR0_NOCMD_DATA V_ERROR0_NOCMD_DATA(1U)
+
+#define S_ERROR1_NOCMD_DATA 27
+#define V_ERROR1_NOCMD_DATA(x) ((x) << S_ERROR1_NOCMD_DATA)
+#define F_ERROR1_NOCMD_DATA V_ERROR1_NOCMD_DATA(1U)
+
+#define S_INIC_STRB_ERROR 26
+#define V_INIC_STRB_ERROR(x) ((x) << S_INIC_STRB_ERROR)
+#define F_INIC_STRB_ERROR V_INIC_STRB_ERROR(1U)
+
+#define A_ARM_ERR_INT_ENB0 0x47448
+#define A_ARM_DEBUG_INDEX 0x47450
+#define A_ARM_DEBUG_DATA_HIGH 0x47454
+#define A_ARM_DEBUG_DATA_LOW 0x47458
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_BA0 0x47500
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_BA1 0x47504
+
+#define S_BASEADDRESS 0
+#define M_BASEADDRESS 0x3U
+#define V_BASEADDRESS(x) ((x) << S_BASEADDRESS)
+#define G_BASEADDRESS(x) (((x) >> S_BASEADDRESS) & M_BASEADDRESS)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG0 0x47508
+
+#define S_WATERMARK 16
+#define M_WATERMARK 0x3ffU
+#define V_WATERMARK(x) ((x) << S_WATERMARK)
+#define G_WATERMARK(x) (((x) >> S_WATERMARK) & M_WATERMARK)
+
+#define S_SIZEMAX 0
+#define M_SIZEMAX 0x3ffU
+#define V_SIZEMAX(x) ((x) << S_SIZEMAX)
+#define G_SIZEMAX(x) (((x) >> S_SIZEMAX) & M_SIZEMAX)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG1 0x4750c
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG2 0x47510
+
+#define S_CPUREADADDRESS 0
+#define M_CPUREADADDRESS 0x3ffU
+#define V_CPUREADADDRESS(x) ((x) << S_CPUREADADDRESS)
+#define G_CPUREADADDRESS(x) (((x) >> S_CPUREADADDRESS) & M_CPUREADADDRESS)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG3 0x47514
+
+#define S_CPUREADADDRESSVLD 0
+#define V_CPUREADADDRESSVLD(x) ((x) << S_CPUREADADDRESSVLD)
+#define F_CPUREADADDRESSVLD V_CPUREADADDRESSVLD(1U)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG4 0x47518
+#define A_ARM_APB2MSI_INTERRUPT_0_STATUS 0x47600
+#define A_ARM_APB2MSI_INTERRUPT_1_STATUS 0x47604
+#define A_ARM_APB2MSI_INTERRUPT_2_STATUS 0x47608
+#define A_ARM_APB2MSI_INTERRUPT_3_STATUS 0x4760c
+#define A_ARM_APB2MSI_INTERRUPT_0_ENABLE 0x47610
+#define A_ARM_APB2MSI_INTERRUPT_1_ENABLE 0x47614
+#define A_ARM_APB2MSI_INTERRUPT_2_ENABLE 0x47618
+#define A_ARM_APB2MSI_INTERRUPT_3_ENABLE 0x4761c
+#define A_ARM_APB2MSI_INTERRUPT_PRIORITY_LEVEL 0x47620
+
+#define S_ARM_APB2MSI_INT_PRIORITY_LEVEL 0
+#define M_ARM_APB2MSI_INT_PRIORITY_LEVEL 0x7U
+#define V_ARM_APB2MSI_INT_PRIORITY_LEVEL(x) ((x) << S_ARM_APB2MSI_INT_PRIORITY_LEVEL)
+#define G_ARM_APB2MSI_INT_PRIORITY_LEVEL(x) (((x) >> S_ARM_APB2MSI_INT_PRIORITY_LEVEL) & M_ARM_APB2MSI_INT_PRIORITY_LEVEL)
+
+#define A_ARM_APB2MSI_MEM_READ_ADDR 0x47624
+
+#define S_ARM_APB2MSI_MEM_READ_ADDR 0
+#define M_ARM_APB2MSI_MEM_READ_ADDR 0x7fU
+#define V_ARM_APB2MSI_MEM_READ_ADDR(x) ((x) << S_ARM_APB2MSI_MEM_READ_ADDR)
+#define G_ARM_APB2MSI_MEM_READ_ADDR(x) (((x) >> S_ARM_APB2MSI_MEM_READ_ADDR) & M_ARM_APB2MSI_MEM_READ_ADDR)
+
+#define A_ARM_MSI_MEMORY_DATA 0x47628
+#define A_ARM_MSI_MEMORY_ADDR 0x4762c
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG5 0x47630
+
+#define S_CONFIGDONE 0
+#define V_CONFIGDONE(x) ((x) << S_CONFIGDONE)
+#define F_CONFIGDONE V_CONFIGDONE(1U)
+
+#define A_ARM_AXI2MA_TIMERCNT 0x47640
+#define A_ARM_AXI2MA_TRTYPE 0x47644
+
+#define S_ARMMA2AXI1ARTRTYPE 3
+#define V_ARMMA2AXI1ARTRTYPE(x) ((x) << S_ARMMA2AXI1ARTRTYPE)
+#define F_ARMMA2AXI1ARTRTYPE V_ARMMA2AXI1ARTRTYPE(1U)
+
+#define S_ARMMA2AXI1AWTRTYPE 2
+#define V_ARMMA2AXI1AWTRTYPE(x) ((x) << S_ARMMA2AXI1AWTRTYPE)
+#define F_ARMMA2AXI1AWTRTYPE V_ARMMA2AXI1AWTRTYPE(1U)
+
+#define S_ARMMA2AXI0ARTRTYPE 1
+#define V_ARMMA2AXI0ARTRTYPE(x) ((x) << S_ARMMA2AXI0ARTRTYPE)
+#define F_ARMMA2AXI0ARTRTYPE V_ARMMA2AXI0ARTRTYPE(1U)
+
+#define S_ARMMA2AXI0AWTRTYPE 0
+#define V_ARMMA2AXI0AWTRTYPE(x) ((x) << S_ARMMA2AXI0AWTRTYPE)
+#define F_ARMMA2AXI0AWTRTYPE V_ARMMA2AXI0AWTRTYPE(1U)
+
+#define A_ARM_AXI2PCIE_VENDOR 0x47660
+
+#define S_T7_VENDORID 4
+#define M_T7_VENDORID 0xffffU
+#define V_T7_VENDORID(x) ((x) << S_T7_VENDORID)
+#define G_T7_VENDORID(x) (((x) >> S_T7_VENDORID) & M_T7_VENDORID)
+
+#define S_OBFFCODE 0
+#define M_OBFFCODE 0xfU
+#define V_OBFFCODE(x) ((x) << S_OBFFCODE)
+#define G_OBFFCODE(x) (((x) >> S_OBFFCODE) & M_OBFFCODE)
+
+#define A_ARM_AXI2PCIE_VENMSGHDR_DW3 0x47664
+#define A_ARM_CLUSTER_SEL 0x47668
+
+#define S_ARM_CLUSTER_SEL 0
+#define V_ARM_CLUSTER_SEL(x) ((x) << S_ARM_CLUSTER_SEL)
+#define F_ARM_CLUSTER_SEL V_ARM_CLUSTER_SEL(1U)
+
+#define A_ARM_PWRREQ_PERMIT_ADB 0x4766c
+
+#define S_PWRQ_PERMIT_DENY_SAR 1
+#define V_PWRQ_PERMIT_DENY_SAR(x) ((x) << S_PWRQ_PERMIT_DENY_SAR)
+#define F_PWRQ_PERMIT_DENY_SAR V_PWRQ_PERMIT_DENY_SAR(1U)
+
+#define S_PWRQREQNS_ADB 0
+#define V_PWRQREQNS_ADB(x) ((x) << S_PWRQREQNS_ADB)
+#define F_PWRQREQNS_ADB V_PWRQREQNS_ADB(1U)
+
+#define A_ARM_CLK_REQ_ADB 0x47670
+
+#define S_CLKQREQNS_ADB 0
+#define V_CLKQREQNS_ADB(x) ((x) << S_CLKQREQNS_ADB)
+#define F_CLKQREQNS_ADB V_CLKQREQNS_ADB(1U)
+
+#define A_ARM_WAKEUPM 0x47674
+
+#define S_DFTRSTDISABLEM_ADB 2
+#define V_DFTRSTDISABLEM_ADB(x) ((x) << S_DFTRSTDISABLEM_ADB)
+#define F_DFTRSTDISABLEM_ADB V_DFTRSTDISABLEM_ADB(1U)
+
+#define S_DFTRSTDISABLES_ADB 1
+#define V_DFTRSTDISABLES_ADB(x) ((x) << S_DFTRSTDISABLES_ADB)
+#define F_DFTRSTDISABLES_ADB V_DFTRSTDISABLES_ADB(1U)
+
+#define S_WAKEUPM_I_ADB 0
+#define V_WAKEUPM_I_ADB(x) ((x) << S_WAKEUPM_I_ADB)
+#define F_WAKEUPM_I_ADB V_WAKEUPM_I_ADB(1U)
+
+#define A_ARM_CC_APB_FILTERING 0x47678
+
+#define S_CC_DFTSCANMODE 11
+#define V_CC_DFTSCANMODE(x) ((x) << S_CC_DFTSCANMODE)
+#define F_CC_DFTSCANMODE V_CC_DFTSCANMODE(1U)
+
+#define S_CC_OTP_FILTERING_DISABLE 10
+#define V_CC_OTP_FILTERING_DISABLE(x) ((x) << S_CC_OTP_FILTERING_DISABLE)
+#define F_CC_OTP_FILTERING_DISABLE V_CC_OTP_FILTERING_DISABLE(1U)
+
+#define S_CC_APB_FILTERING 0
+#define M_CC_APB_FILTERING 0x3ffU
+#define V_CC_APB_FILTERING(x) ((x) << S_CC_APB_FILTERING)
+#define G_CC_APB_FILTERING(x) (((x) >> S_CC_APB_FILTERING) & M_CC_APB_FILTERING)
+
+#define A_ARM_DCU_EN0 0x4767c
+#define A_ARM_DCU_EN1 0x47680
+#define A_ARM_DCU_EN2 0x47684
+#define A_ARM_DCU_EN3 0x47688
+#define A_ARM_DCU_LOCK0 0x4768c
+#define A_ARM_DCU_LOCK1 0x47690
+#define A_ARM_DCU_LOCK2 0x47694
+#define A_ARM_DCU_LOCK3 0x47698
+#define A_ARM_GPPC 0x4769c
+
+#define S_CC_SEC_DEBUG_RESET 24
+#define V_CC_SEC_DEBUG_RESET(x) ((x) << S_CC_SEC_DEBUG_RESET)
+#define F_CC_SEC_DEBUG_RESET V_CC_SEC_DEBUG_RESET(1U)
+
+#define S_CC_DFTSE 23
+#define V_CC_DFTSE(x) ((x) << S_CC_DFTSE)
+#define F_CC_DFTSE V_CC_DFTSE(1U)
+
+#define S_CC_DFTCGEN 22
+#define V_CC_DFTCGEN(x) ((x) << S_CC_DFTCGEN)
+#define F_CC_DFTCGEN V_CC_DFTCGEN(1U)
+
+#define S_CC_DFTRAMHOLD 21
+#define V_CC_DFTRAMHOLD(x) ((x) << S_CC_DFTRAMHOLD)
+#define F_CC_DFTRAMHOLD V_CC_DFTRAMHOLD(1U)
+
+#define S_CC_LOCK_BITS 12
+#define M_CC_LOCK_BITS 0x1ffU
+#define V_CC_LOCK_BITS(x) ((x) << S_CC_LOCK_BITS)
+#define G_CC_LOCK_BITS(x) (((x) >> S_CC_LOCK_BITS) & M_CC_LOCK_BITS)
+
+#define S_CC_LCS_IS_VALID 11
+#define V_CC_LCS_IS_VALID(x) ((x) << S_CC_LCS_IS_VALID)
+#define F_CC_LCS_IS_VALID V_CC_LCS_IS_VALID(1U)
+
+#define S_CC_LCS 8
+#define M_CC_LCS 0x7U
+#define V_CC_LCS(x) ((x) << S_CC_LCS)
+#define G_CC_LCS(x) (((x) >> S_CC_LCS) & M_CC_LCS)
+
+#define S_CC_GPPC 0
+#define M_CC_GPPC 0xffU
+#define V_CC_GPPC(x) ((x) << S_CC_GPPC)
+#define G_CC_GPPC(x) (((x) >> S_CC_GPPC) & M_CC_GPPC)
+
+#define A_ARM_EMMC 0x47700
+
+#define S_EMMC_CARD_CLK_EN 31
+#define V_EMMC_CARD_CLK_EN(x) ((x) << S_EMMC_CARD_CLK_EN)
+#define F_EMMC_CARD_CLK_EN V_EMMC_CARD_CLK_EN(1U)
+
+#define S_EMMC_LED_CONTROL 30
+#define V_EMMC_LED_CONTROL(x) ((x) << S_EMMC_LED_CONTROL)
+#define F_EMMC_LED_CONTROL V_EMMC_LED_CONTROL(1U)
+
+#define S_EMMC_UHS1_SWVOLT_EN 29
+#define V_EMMC_UHS1_SWVOLT_EN(x) ((x) << S_EMMC_UHS1_SWVOLT_EN)
+#define F_EMMC_UHS1_SWVOLT_EN V_EMMC_UHS1_SWVOLT_EN(1U)
+
+#define S_EMMC_UHS1_DRV_STH 27
+#define M_EMMC_UHS1_DRV_STH 0x3U
+#define V_EMMC_UHS1_DRV_STH(x) ((x) << S_EMMC_UHS1_DRV_STH)
+#define G_EMMC_UHS1_DRV_STH(x) (((x) >> S_EMMC_UHS1_DRV_STH) & M_EMMC_UHS1_DRV_STH)
+
+#define S_EMMC_SD_VDD1_ON 26
+#define V_EMMC_SD_VDD1_ON(x) ((x) << S_EMMC_SD_VDD1_ON)
+#define F_EMMC_SD_VDD1_ON V_EMMC_SD_VDD1_ON(1U)
+
+#define S_EMMC_SD_VDD1_SEL 23
+#define M_EMMC_SD_VDD1_SEL 0x7U
+#define V_EMMC_SD_VDD1_SEL(x) ((x) << S_EMMC_SD_VDD1_SEL)
+#define G_EMMC_SD_VDD1_SEL(x) (((x) >> S_EMMC_SD_VDD1_SEL) & M_EMMC_SD_VDD1_SEL)
+
+#define S_EMMC_INTCLK_EN 22
+#define V_EMMC_INTCLK_EN(x) ((x) << S_EMMC_INTCLK_EN)
+#define F_EMMC_INTCLK_EN V_EMMC_INTCLK_EN(1U)
+
+#define S_EMMC_CARD_CLK_FREQ_SEL 12
+#define M_EMMC_CARD_CLK_FREQ_SEL 0x3ffU
+#define V_EMMC_CARD_CLK_FREQ_SEL(x) ((x) << S_EMMC_CARD_CLK_FREQ_SEL)
+#define G_EMMC_CARD_CLK_FREQ_SEL(x) (((x) >> S_EMMC_CARD_CLK_FREQ_SEL) & M_EMMC_CARD_CLK_FREQ_SEL)
+
+#define S_EMMC_CARD_CLK_GEN_SEL 11
+#define V_EMMC_CARD_CLK_GEN_SEL(x) ((x) << S_EMMC_CARD_CLK_GEN_SEL)
+#define F_EMMC_CARD_CLK_GEN_SEL V_EMMC_CARD_CLK_GEN_SEL(1U)
+
+#define S_EMMC_CLK2CARD_ON 10
+#define V_EMMC_CLK2CARD_ON(x) ((x) << S_EMMC_CLK2CARD_ON)
+#define F_EMMC_CLK2CARD_ON V_EMMC_CLK2CARD_ON(1U)
+
+#define S_EMMC_CARD_CLK_STABLE 9
+#define V_EMMC_CARD_CLK_STABLE(x) ((x) << S_EMMC_CARD_CLK_STABLE)
+#define F_EMMC_CARD_CLK_STABLE V_EMMC_CARD_CLK_STABLE(1U)
+
+#define S_EMMC_INT_BCLK_STABLE 8
+#define V_EMMC_INT_BCLK_STABLE(x) ((x) << S_EMMC_INT_BCLK_STABLE)
+#define F_EMMC_INT_BCLK_STABLE V_EMMC_INT_BCLK_STABLE(1U)
+
+#define S_EMMC_INT_ACLK_STABLE 7
+#define V_EMMC_INT_ACLK_STABLE(x) ((x) << S_EMMC_INT_ACLK_STABLE)
+#define F_EMMC_INT_ACLK_STABLE V_EMMC_INT_ACLK_STABLE(1U)
+
+#define S_EMMC_INT_TMCLK_STABLE 6
+#define V_EMMC_INT_TMCLK_STABLE(x) ((x) << S_EMMC_INT_TMCLK_STABLE)
+#define F_EMMC_INT_TMCLK_STABLE V_EMMC_INT_TMCLK_STABLE(1U)
+
+#define S_EMMC_HOST_REG_VOL_STABLE 5
+#define V_EMMC_HOST_REG_VOL_STABLE(x) ((x) << S_EMMC_HOST_REG_VOL_STABLE)
+#define F_EMMC_HOST_REG_VOL_STABLE V_EMMC_HOST_REG_VOL_STABLE(1U)
+
+#define S_EMMC_CARD_DETECT_N 4
+#define V_EMMC_CARD_DETECT_N(x) ((x) << S_EMMC_CARD_DETECT_N)
+#define F_EMMC_CARD_DETECT_N V_EMMC_CARD_DETECT_N(1U)
+
+#define S_EMMC_CARD_WRITE_PROT 3
+#define V_EMMC_CARD_WRITE_PROT(x) ((x) << S_EMMC_CARD_WRITE_PROT)
+#define F_EMMC_CARD_WRITE_PROT V_EMMC_CARD_WRITE_PROT(1U)
+
+#define S_EMMC_GP_IN 2
+#define V_EMMC_GP_IN(x) ((x) << S_EMMC_GP_IN)
+#define F_EMMC_GP_IN V_EMMC_GP_IN(1U)
+
+#define S_EMMC_TEST_SCAN_MODE 1
+#define V_EMMC_TEST_SCAN_MODE(x) ((x) << S_EMMC_TEST_SCAN_MODE)
+#define F_EMMC_TEST_SCAN_MODE V_EMMC_TEST_SCAN_MODE(1U)
+
+#define S_EMMC_FIFOINJDATAERR 0
+#define V_EMMC_FIFOINJDATAERR(x) ((x) << S_EMMC_FIFOINJDATAERR)
+#define F_EMMC_FIFOINJDATAERR V_EMMC_FIFOINJDATAERR(1U)
+
+#define A_ARM_WAKEUPS 0x47704
+
+#define S_WAKEUPS_I_ADB 0
+#define V_WAKEUPS_I_ADB(x) ((x) << S_WAKEUPS_I_ADB)
+#define F_WAKEUPS_I_ADB V_WAKEUPS_I_ADB(1U)
+
+#define A_ARM_CLKREQNM_ADB 0x47708
+
+#define S_CLKQREQNM_ADB 0
+#define V_CLKQREQNM_ADB(x) ((x) << S_CLKQREQNM_ADB)
+#define F_CLKQREQNM_ADB V_CLKQREQNM_ADB(1U)
+
+#define A_ARM_ATOMICDATA0_0 0x4770c
+#define A_ARM_ATOMICDATA1_0 0x47710
+#define A_ARM_NVME_DB_EMU_INT_ENABLE 0x47740
+#define A_ARM_TCAM_WRITE_DATA 0x47744
+
+#define S_TCAM_WRITE_DATA 0
+#define M_TCAM_WRITE_DATA 0x3fffffffU
+#define V_TCAM_WRITE_DATA(x) ((x) << S_TCAM_WRITE_DATA)
+#define G_TCAM_WRITE_DATA(x) (((x) >> S_TCAM_WRITE_DATA) & M_TCAM_WRITE_DATA)
+
+#define A_ARM_TCAM_WRITE_ADDR 0x47748
+
+#define S_TCAM_WRITE_ADDR 0
+#define M_TCAM_WRITE_ADDR 0x1ffU
+#define V_TCAM_WRITE_ADDR(x) ((x) << S_TCAM_WRITE_ADDR)
+#define G_TCAM_WRITE_ADDR(x) (((x) >> S_TCAM_WRITE_ADDR) & M_TCAM_WRITE_ADDR)
+
+#define A_ARM_TCAM_READ_ADDR 0x4774c
+
+#define S_TCAM_READ_ADDR 0
+#define M_TCAM_READ_ADDR 0x1ffU
+#define V_TCAM_READ_ADDR(x) ((x) << S_TCAM_READ_ADDR)
+#define G_TCAM_READ_ADDR(x) (((x) >> S_TCAM_READ_ADDR) & M_TCAM_READ_ADDR)
+
+#define A_ARM_TCAM_CTL 0x47750
+
+#define S_TCAMCBBUSY 6
+#define V_TCAMCBBUSY(x) ((x) << S_TCAMCBBUSY)
+#define F_TCAMCBBUSY V_TCAMCBBUSY(1U)
+
+#define S_TCAMCBPASS 5
+#define V_TCAMCBPASS(x) ((x) << S_TCAMCBPASS)
+#define F_TCAMCBPASS V_TCAMCBPASS(1U)
+
+#define S_TCAMCBSTART 4
+#define V_TCAMCBSTART(x) ((x) << S_TCAMCBSTART)
+#define F_TCAMCBSTART V_TCAMCBSTART(1U)
+
+#define S_TCAMRSTCB 3
+#define V_TCAMRSTCB(x) ((x) << S_TCAMRSTCB)
+#define F_TCAMRSTCB V_TCAMRSTCB(1U)
+
+#define S_TCAM_REQBITPOS 2
+#define V_TCAM_REQBITPOS(x) ((x) << S_TCAM_REQBITPOS)
+#define F_TCAM_REQBITPOS V_TCAM_REQBITPOS(1U)
+
+#define S_TCAM_WRITE 1
+#define V_TCAM_WRITE(x) ((x) << S_TCAM_WRITE)
+#define F_TCAM_WRITE V_TCAM_WRITE(1U)
+
+#define S_TCAM_ENABLE 0
+#define V_TCAM_ENABLE(x) ((x) << S_TCAM_ENABLE)
+#define F_TCAM_ENABLE V_TCAM_ENABLE(1U)
+
+#define A_ARM_TCAM_READ_DATA 0x4775c
+
+#define S_TCAM_READ_DATA 0
+#define M_TCAM_READ_DATA 0x3fffffffU
+#define V_TCAM_READ_DATA(x) ((x) << S_TCAM_READ_DATA)
+#define G_TCAM_READ_DATA(x) (((x) >> S_TCAM_READ_DATA) & M_TCAM_READ_DATA)
+
+#define A_ARM_SRAM1_WRITE_DATA 0x47760
+
+#define S_SRAM1_WRITE_DATA 0
+#define M_SRAM1_WRITE_DATA 0x7fffffU
+#define V_SRAM1_WRITE_DATA(x) ((x) << S_SRAM1_WRITE_DATA)
+#define G_SRAM1_WRITE_DATA(x) (((x) >> S_SRAM1_WRITE_DATA) & M_SRAM1_WRITE_DATA)
+
+#define A_ARM_SRAM1_WRITE_ADDR 0x47764
+
+#define S_SRAM1_WRITE_ADDR 0
+#define M_SRAM1_WRITE_ADDR 0x1ffU
+#define V_SRAM1_WRITE_ADDR(x) ((x) << S_SRAM1_WRITE_ADDR)
+#define G_SRAM1_WRITE_ADDR(x) (((x) >> S_SRAM1_WRITE_ADDR) & M_SRAM1_WRITE_ADDR)
+
+#define A_ARM_SRAM1_READ_ADDR 0x47768
+
+#define S_SRAM1_READ_ADDR 0
+#define M_SRAM1_READ_ADDR 0x1ffU
+#define V_SRAM1_READ_ADDR(x) ((x) << S_SRAM1_READ_ADDR)
+#define G_SRAM1_READ_ADDR(x) (((x) >> S_SRAM1_READ_ADDR) & M_SRAM1_READ_ADDR)
+
+#define A_ARM_SRAM1_CTL 0x4776c
+
+#define S_SRAM1_WRITE 1
+#define V_SRAM1_WRITE(x) ((x) << S_SRAM1_WRITE)
+#define F_SRAM1_WRITE V_SRAM1_WRITE(1U)
+
+#define S_SRAM1_ENABLE 0
+#define V_SRAM1_ENABLE(x) ((x) << S_SRAM1_ENABLE)
+#define F_SRAM1_ENABLE V_SRAM1_ENABLE(1U)
+
+#define A_ARM_SRAM1_READ_DATA 0x47770
+
+#define S_SRAM1_READ_DATA 0
+#define M_SRAM1_READ_DATA 0x7fffffU
+#define V_SRAM1_READ_DATA(x) ((x) << S_SRAM1_READ_DATA)
+#define G_SRAM1_READ_DATA(x) (((x) >> S_SRAM1_READ_DATA) & M_SRAM1_READ_DATA)
+
+#define A_ARM_SRAM2_WRITE_DATA0 0x47774
+#define A_ARM_SRAM2_WRITE_DATA1 0x47778
+#define A_ARM_SRAM2_WRITE_DATA2 0x4777c
+#define A_ARM_SRAM2_WRITE_ADDR 0x47780
+
+#define S_SRAM2_WRITE_ADDR 0
+#define M_SRAM2_WRITE_ADDR 0x1fffU
+#define V_SRAM2_WRITE_ADDR(x) ((x) << S_SRAM2_WRITE_ADDR)
+#define G_SRAM2_WRITE_ADDR(x) (((x) >> S_SRAM2_WRITE_ADDR) & M_SRAM2_WRITE_ADDR)
+
+#define A_ARM_SRAM2_READ_ADDR 0x47784
+
+#define S_SRAM2_READ_ADDR 0
+#define M_SRAM2_READ_ADDR 0x1fffU
+#define V_SRAM2_READ_ADDR(x) ((x) << S_SRAM2_READ_ADDR)
+#define G_SRAM2_READ_ADDR(x) (((x) >> S_SRAM2_READ_ADDR) & M_SRAM2_READ_ADDR)
+
+#define A_ARM_SRAM2_CTL 0x47788
+
+#define S_SRAM2_WRITE 1
+#define V_SRAM2_WRITE(x) ((x) << S_SRAM2_WRITE)
+#define F_SRAM2_WRITE V_SRAM2_WRITE(1U)
+
+#define S_SRAM2_ENABLE 0
+#define V_SRAM2_ENABLE(x) ((x) << S_SRAM2_ENABLE)
+#define F_SRAM2_ENABLE V_SRAM2_ENABLE(1U)
+
+#define A_ARM_SRAM2_READ_DATA0 0x4778c
+#define A_ARM_SRAM2_READ_DATA1 0x47790
+#define A_ARM_SRAM2_READ_DATA2 0x47794
+#define A_ARM_DBPROC_SRAM_CTL 0x47798
+
+#define S_DBPROC_RD_EN 0
+#define V_DBPROC_RD_EN(x) ((x) << S_DBPROC_RD_EN)
+#define F_DBPROC_RD_EN V_DBPROC_RD_EN(1U)
+
+#define A_ARM_DBPROC_SRAM_READ_ADDR 0x4779c
+
+#define S_DBPROC_RD_ADDR 0
+#define M_DBPROC_RD_ADDR 0x1ffU
+#define V_DBPROC_RD_ADDR(x) ((x) << S_DBPROC_RD_ADDR)
+#define G_DBPROC_RD_ADDR(x) (((x) >> S_DBPROC_RD_ADDR) & M_DBPROC_RD_ADDR)
+
+#define A_ARM_DBPROC_SRAM_READ_DATA0 0x477a0
+#define A_ARM_DBPROC_SRAM_READ_DATA1 0x477a4
+#define A_ARM_DBPROC_SRAM_READ_DATA2 0x477a8
+#define A_ARM_DBPROC_SRAM_READ_DATA3 0x477ac
+#define A_ARM_ATOMICDATA0_1 0x477b0
+#define A_ARM_ATOMICDATA1_1 0x477b4
+#define A_ARM_SPIDEN 0x477b8
+
+#define S_SPIDEN 0
+#define V_SPIDEN(x) ((x) << S_SPIDEN)
+#define F_SPIDEN V_SPIDEN(1U)
+
+#define A_ARM_RC_INT_WRITE_DATA 0x477bc
+
+#define S_RC_INT_STATUS_WRITE_DATA 0
+#define M_RC_INT_STATUS_WRITE_DATA 0x3fU
+#define V_RC_INT_STATUS_WRITE_DATA(x) ((x) << S_RC_INT_STATUS_WRITE_DATA)
+#define G_RC_INT_STATUS_WRITE_DATA(x) (((x) >> S_RC_INT_STATUS_WRITE_DATA) & M_RC_INT_STATUS_WRITE_DATA)
+
+#define A_ARM_DFT_MBI 0x477c4
+
+#define S_MBISTREQ 3
+#define V_MBISTREQ(x) ((x) << S_MBISTREQ)
+#define F_MBISTREQ V_MBISTREQ(1U)
+
+#define S_MBISTRESETN 2
+#define V_MBISTRESETN(x) ((x) << S_MBISTRESETN)
+#define F_MBISTRESETN V_MBISTRESETN(1U)
+
+#define S_DFTRAMHOLD 1
+#define V_DFTRAMHOLD(x) ((x) << S_DFTRAMHOLD)
+#define F_DFTRAMHOLD V_DFTRAMHOLD(1U)
+
+#define S_DFTCGEN 0
+#define V_DFTCGEN(x) ((x) << S_DFTCGEN)
+#define F_DFTCGEN V_DFTCGEN(1U)
+
+#define A_ARM_DBPROC_SRAM_TH_CTL 0x477c8
+
+#define S_DBPROC_TH_WR_EN 1
+#define V_DBPROC_TH_WR_EN(x) ((x) << S_DBPROC_TH_WR_EN)
+#define F_DBPROC_TH_WR_EN V_DBPROC_TH_WR_EN(1U)
+
+#define S_DBPROC_TH_RD_EN 0
+#define V_DBPROC_TH_RD_EN(x) ((x) << S_DBPROC_TH_RD_EN)
+#define F_DBPROC_TH_RD_EN V_DBPROC_TH_RD_EN(1U)
+
+#define A_ARM_MBISTACK 0x477d4
+
+#define S_MBISTACK 0
+#define V_MBISTACK(x) ((x) << S_MBISTACK)
+#define F_MBISTACK V_MBISTACK(1U)
+
+#define A_ARM_MBISTADDR 0x477d8
+
+#define S_MBISTADDR 0
+#define M_MBISTADDR 0xfffU
+#define V_MBISTADDR(x) ((x) << S_MBISTADDR)
+#define G_MBISTADDR(x) (((x) >> S_MBISTADDR) & M_MBISTADDR)
+
+#define A_ARM_MBISTREADEN 0x477dc
+
+#define S_MBISTREADEN 0
+#define V_MBISTREADEN(x) ((x) << S_MBISTREADEN)
+#define F_MBISTREADEN V_MBISTREADEN(1U)
+
+#define A_ARM_MBISTWRITEEN 0x477e0
+
+#define S_MBISTWRITEEN 0
+#define V_MBISTWRITEEN(x) ((x) << S_MBISTWRITEEN)
+#define F_MBISTWRITEEN V_MBISTWRITEEN(1U)
+
+#define A_ARM_MBISTARRAY 0x477e4
+
+#define S_MBISTARRAY 0
+#define M_MBISTARRAY 0x3U
+#define V_MBISTARRAY(x) ((x) << S_MBISTARRAY)
+#define G_MBISTARRAY(x) (((x) >> S_MBISTARRAY) & M_MBISTARRAY)
+
+#define A_ARM_MBISTCFG 0x477e8
+
+#define S_MBISTCFG 0
+#define V_MBISTCFG(x) ((x) << S_MBISTCFG)
+#define F_MBISTCFG V_MBISTCFG(1U)
+
+#define A_ARM_MBISTINDATA0 0x477ec
+#define A_ARM_MBISTINDATA1 0x477f0
+#define A_ARM_MBISTOUTDATA1 0x477f4
+#define A_ARM_MBISTOUTDATA0 0x477f8
+#define A_ARM_NVME_DB_EMU_EN 0x477fc
+
+#define S_NVME_DB_EN 0
+#define V_NVME_DB_EN(x) ((x) << S_NVME_DB_EN)
+#define F_NVME_DB_EN V_NVME_DB_EN(1U)
+
+/* registers for module MC_T70 */
+#define MC_T70_BASE_ADDR 0x48000
+
+#define A_MC_IND_ADDR 0x48000
+
+#define S_T7_AUTOINCR 30
+#define M_T7_AUTOINCR 0x3U
+#define V_T7_AUTOINCR(x) ((x) << S_T7_AUTOINCR)
+#define G_T7_AUTOINCR(x) (((x) >> S_T7_AUTOINCR) & M_T7_AUTOINCR)
+
+#define S_IND_ADDR_ADDR 0
+#define M_IND_ADDR_ADDR 0x1ffffffU
+#define V_IND_ADDR_ADDR(x) ((x) << S_IND_ADDR_ADDR)
+#define G_IND_ADDR_ADDR(x) (((x) >> S_IND_ADDR_ADDR) & M_IND_ADDR_ADDR)
+
+#define A_MC_IND_DATA 0x48004
+#define A_MC_DBG_CTL 0x48018
+#define A_MC_DBG_DATA 0x4801c
+#define A_T7_MC_P_DDRPHY_RST_CTRL 0x49300
+#define A_T7_MC_P_PERFORMANCE_CTRL 0x49304
+#define A_T7_MC_P_ECC_CTRL 0x49308
+
+#define S_BISTECCHBWCTL 7
+#define M_BISTECCHBWCTL 0x3U
+#define V_BISTECCHBWCTL(x) ((x) << S_BISTECCHBWCTL)
+#define G_BISTECCHBWCTL(x) (((x) >> S_BISTECCHBWCTL) & M_BISTECCHBWCTL)
+
+#define S_BISTTESTMODE 6
+#define V_BISTTESTMODE(x) ((x) << S_BISTTESTMODE)
+#define F_BISTTESTMODE V_BISTTESTMODE(1U)
+
+#define S_RMW_CTL_CFG 4
+#define M_RMW_CTL_CFG 0x3U
+#define V_RMW_CTL_CFG(x) ((x) << S_RMW_CTL_CFG)
+#define G_RMW_CTL_CFG(x) (((x) >> S_RMW_CTL_CFG) & M_RMW_CTL_CFG)
+
+#define A_MC_P_DDRCTL_INT_ENABLE 0x4930c
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE 5
+#define V_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE V_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_DCH1_ENABLE 4
+#define V_HIF_RDATA_CRC_ERR_DCH1_ENABLE(x) ((x) << S_HIF_RDATA_CRC_ERR_DCH1_ENABLE)
+#define F_HIF_RDATA_CRC_ERR_DCH1_ENABLE V_HIF_RDATA_CRC_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_DCH1_ENABLE 3
+#define V_HIF_RDATA_ADDR_ERR_DCH1_ENABLE(x) ((x) << S_HIF_RDATA_ADDR_ERR_DCH1_ENABLE)
+#define F_HIF_RDATA_ADDR_ERR_DCH1_ENABLE V_HIF_RDATA_ADDR_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE 2
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE 1
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE V_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE 0
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE V_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE(1U)
+
+#define A_MC_P_DDRCTL_INT_CAUSE 0x49310
+
+#define S_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE 25
+#define V_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_WR_CRC_ERR_INTR_DCH1_CAUSE 24
+#define V_WR_CRC_ERR_INTR_DCH1_CAUSE(x) ((x) << S_WR_CRC_ERR_INTR_DCH1_CAUSE)
+#define F_WR_CRC_ERR_INTR_DCH1_CAUSE V_WR_CRC_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE 23
+#define V_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE 22
+#define V_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE 21
+#define V_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE(x) ((x) << S_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE)
+#define F_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE V_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE(1U)
+
+#define S_SWCMD_ERR_INTR_DCH1_CAUSE 20
+#define V_SWCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_SWCMD_ERR_INTR_DCH1_CAUSE)
+#define F_SWCMD_ERR_INTR_DCH1_CAUSE V_SWCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_DUCMD_ERR_INTR_DCH1_CAUSE 19
+#define V_DUCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_DUCMD_ERR_INTR_DCH1_CAUSE)
+#define F_DUCMD_ERR_INTR_DCH1_CAUSE V_DUCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_LCCMD_ERR_INTR_DCH1_CAUSE 18
+#define V_LCCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_LCCMD_ERR_INTR_DCH1_CAUSE)
+#define F_LCCMD_ERR_INTR_DCH1_CAUSE V_LCCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_CTRLUPD_ERR_INTR_DCH1_CAUSE 17
+#define V_CTRLUPD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_CTRLUPD_ERR_INTR_DCH1_CAUSE)
+#define F_CTRLUPD_ERR_INTR_DCH1_CAUSE V_CTRLUPD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_RFM_ALERT_INTR_DCH1_CAUSE 16
+#define V_RFM_ALERT_INTR_DCH1_CAUSE(x) ((x) << S_RFM_ALERT_INTR_DCH1_CAUSE)
+#define F_RFM_ALERT_INTR_DCH1_CAUSE V_RFM_ALERT_INTR_DCH1_CAUSE(1U)
+
+#define S_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE 15
+#define V_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_WR_CRC_ERR_INTR_DCH0_CAUSE 14
+#define V_WR_CRC_ERR_INTR_DCH0_CAUSE(x) ((x) << S_WR_CRC_ERR_INTR_DCH0_CAUSE)
+#define F_WR_CRC_ERR_INTR_DCH0_CAUSE V_WR_CRC_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE 13
+#define V_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE 12
+#define V_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE 11
+#define V_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE(x) ((x) << S_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE)
+#define F_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE V_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE(1U)
+
+#define S_SWCMD_ERR_INTR_DCH0_CAUSE 10
+#define V_SWCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_SWCMD_ERR_INTR_DCH0_CAUSE)
+#define F_SWCMD_ERR_INTR_DCH0_CAUSE V_SWCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_DUCMD_ERR_INTR_DCH0_CAUSE 9
+#define V_DUCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_DUCMD_ERR_INTR_DCH0_CAUSE)
+#define F_DUCMD_ERR_INTR_DCH0_CAUSE V_DUCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_LCCMD_ERR_INTR_DCH0_CAUSE 8
+#define V_LCCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_LCCMD_ERR_INTR_DCH0_CAUSE)
+#define F_LCCMD_ERR_INTR_DCH0_CAUSE V_LCCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_CTRLUPD_ERR_INTR_DCH0_CAUSE 7
+#define V_CTRLUPD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_CTRLUPD_ERR_INTR_DCH0_CAUSE)
+#define F_CTRLUPD_ERR_INTR_DCH0_CAUSE V_CTRLUPD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_RFM_ALERT_INTR_DCH0_CAUSE 6
+#define V_RFM_ALERT_INTR_DCH0_CAUSE(x) ((x) << S_RFM_ALERT_INTR_DCH0_CAUSE)
+#define F_RFM_ALERT_INTR_DCH0_CAUSE V_RFM_ALERT_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE 5
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE 4
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE V_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE 3
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE V_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE 2
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE 1
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE V_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE 0
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE V_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE(1U)
+
+#define A_T7_MC_P_PAR_ENABLE 0x49314
+
+#define S_HIF_WDATA_Q_PARERR_DCH1_ENABLE 13
+#define V_HIF_WDATA_Q_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH1_ENABLE)
+#define F_HIF_WDATA_Q_PARERR_DCH1_ENABLE V_HIF_WDATA_Q_PARERR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH1_ENABLE 12
+#define V_DDRCTL_ECC_CE_PAR_DCH1_ENABLE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH1_ENABLE)
+#define F_DDRCTL_ECC_CE_PAR_DCH1_ENABLE V_DDRCTL_ECC_CE_PAR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH0_ENABLE 11
+#define V_DDRCTL_ECC_CE_PAR_DCH0_ENABLE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH0_ENABLE)
+#define F_DDRCTL_ECC_CE_PAR_DCH0_ENABLE V_DDRCTL_ECC_CE_PAR_DCH0_ENABLE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH1_ENABLE 10
+#define V_DDRCTL_ECC_UE_PAR_DCH1_ENABLE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH1_ENABLE)
+#define F_DDRCTL_ECC_UE_PAR_DCH1_ENABLE V_DDRCTL_ECC_UE_PAR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH0_ENABLE 9
+#define V_DDRCTL_ECC_UE_PAR_DCH0_ENABLE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH0_ENABLE)
+#define F_DDRCTL_ECC_UE_PAR_DCH0_ENABLE V_DDRCTL_ECC_UE_PAR_DCH0_ENABLE(1U)
+
+#define S_WDATARAM_PARERR_DCH1_ENABLE 8
+#define V_WDATARAM_PARERR_DCH1_ENABLE(x) ((x) << S_WDATARAM_PARERR_DCH1_ENABLE)
+#define F_WDATARAM_PARERR_DCH1_ENABLE V_WDATARAM_PARERR_DCH1_ENABLE(1U)
+
+#define S_WDATARAM_PARERR_DCH0_ENABLE 7
+#define V_WDATARAM_PARERR_DCH0_ENABLE(x) ((x) << S_WDATARAM_PARERR_DCH0_ENABLE)
+#define F_WDATARAM_PARERR_DCH0_ENABLE V_WDATARAM_PARERR_DCH0_ENABLE(1U)
+
+#define S_BIST_ADDR_FIFO_PARERR_ENABLE 6
+#define V_BIST_ADDR_FIFO_PARERR_ENABLE(x) ((x) << S_BIST_ADDR_FIFO_PARERR_ENABLE)
+#define F_BIST_ADDR_FIFO_PARERR_ENABLE V_BIST_ADDR_FIFO_PARERR_ENABLE(1U)
+
+#define S_BIST_ERR_ADDR_FIFO_PARERR_ENABLE 5
+#define V_BIST_ERR_ADDR_FIFO_PARERR_ENABLE(x) ((x) << S_BIST_ERR_ADDR_FIFO_PARERR_ENABLE)
+#define F_BIST_ERR_ADDR_FIFO_PARERR_ENABLE V_BIST_ERR_ADDR_FIFO_PARERR_ENABLE(1U)
+
+#define S_HIF_WDATA_Q_PARERR_DCH0_ENABLE 4
+#define V_HIF_WDATA_Q_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH0_ENABLE)
+#define F_HIF_WDATA_Q_PARERR_DCH0_ENABLE V_HIF_WDATA_Q_PARERR_DCH0_ENABLE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE 3
+#define V_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE V_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE 2
+#define V_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE V_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE 1
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE 0
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE(1U)
+
+#define A_T7_MC_P_PAR_CAUSE 0x49318
+
+#define S_HIF_WDATA_Q_PARERR_DCH1_CAUSE 13
+#define V_HIF_WDATA_Q_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH1_CAUSE)
+#define F_HIF_WDATA_Q_PARERR_DCH1_CAUSE V_HIF_WDATA_Q_PARERR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH1_CAUSE 12
+#define V_DDRCTL_ECC_CE_PAR_DCH1_CAUSE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH1_CAUSE)
+#define F_DDRCTL_ECC_CE_PAR_DCH1_CAUSE V_DDRCTL_ECC_CE_PAR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH0_CAUSE 11
+#define V_DDRCTL_ECC_CE_PAR_DCH0_CAUSE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH0_CAUSE)
+#define F_DDRCTL_ECC_CE_PAR_DCH0_CAUSE V_DDRCTL_ECC_CE_PAR_DCH0_CAUSE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH1_CAUSE 10
+#define V_DDRCTL_ECC_UE_PAR_DCH1_CAUSE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH1_CAUSE)
+#define F_DDRCTL_ECC_UE_PAR_DCH1_CAUSE V_DDRCTL_ECC_UE_PAR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH0_CAUSE 9
+#define V_DDRCTL_ECC_UE_PAR_DCH0_CAUSE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH0_CAUSE)
+#define F_DDRCTL_ECC_UE_PAR_DCH0_CAUSE V_DDRCTL_ECC_UE_PAR_DCH0_CAUSE(1U)
+
+#define S_WDATARAM_PARERR_DCH1_CAUSE 8
+#define V_WDATARAM_PARERR_DCH1_CAUSE(x) ((x) << S_WDATARAM_PARERR_DCH1_CAUSE)
+#define F_WDATARAM_PARERR_DCH1_CAUSE V_WDATARAM_PARERR_DCH1_CAUSE(1U)
+
+#define S_WDATARAM_PARERR_DCH0_CAUSE 7
+#define V_WDATARAM_PARERR_DCH0_CAUSE(x) ((x) << S_WDATARAM_PARERR_DCH0_CAUSE)
+#define F_WDATARAM_PARERR_DCH0_CAUSE V_WDATARAM_PARERR_DCH0_CAUSE(1U)
+
+#define S_BIST_ADDR_FIFO_PARERR_CAUSE 6
+#define V_BIST_ADDR_FIFO_PARERR_CAUSE(x) ((x) << S_BIST_ADDR_FIFO_PARERR_CAUSE)
+#define F_BIST_ADDR_FIFO_PARERR_CAUSE V_BIST_ADDR_FIFO_PARERR_CAUSE(1U)
+
+#define S_BIST_ERR_ADDR_FIFO_PARERR_CAUSE 5
+#define V_BIST_ERR_ADDR_FIFO_PARERR_CAUSE(x) ((x) << S_BIST_ERR_ADDR_FIFO_PARERR_CAUSE)
+#define F_BIST_ERR_ADDR_FIFO_PARERR_CAUSE V_BIST_ERR_ADDR_FIFO_PARERR_CAUSE(1U)
+
+#define S_HIF_WDATA_Q_PARERR_DCH0_CAUSE 4
+#define V_HIF_WDATA_Q_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH0_CAUSE)
+#define F_HIF_WDATA_Q_PARERR_DCH0_CAUSE V_HIF_WDATA_Q_PARERR_DCH0_CAUSE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE 3
+#define V_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE V_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE 2
+#define V_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE V_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE 1
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE 0
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE(1U)
+
+#define A_T7_MC_P_INT_ENABLE 0x4931c
+
+#define S_DDRPHY_INT_ENABLE 4
+#define V_DDRPHY_INT_ENABLE(x) ((x) << S_DDRPHY_INT_ENABLE)
+#define F_DDRPHY_INT_ENABLE V_DDRPHY_INT_ENABLE(1U)
+
+#define S_DDRCTL_INT_ENABLE 3
+#define V_DDRCTL_INT_ENABLE(x) ((x) << S_DDRCTL_INT_ENABLE)
+#define F_DDRCTL_INT_ENABLE V_DDRCTL_INT_ENABLE(1U)
+
+#define S_T7_ECC_CE_INT_ENABLE 2
+#define V_T7_ECC_CE_INT_ENABLE(x) ((x) << S_T7_ECC_CE_INT_ENABLE)
+#define F_T7_ECC_CE_INT_ENABLE V_T7_ECC_CE_INT_ENABLE(1U)
+
+#define S_T7_ECC_UE_INT_ENABLE 1
+#define V_T7_ECC_UE_INT_ENABLE(x) ((x) << S_T7_ECC_UE_INT_ENABLE)
+#define F_T7_ECC_UE_INT_ENABLE V_T7_ECC_UE_INT_ENABLE(1U)
+
+#define A_T7_MC_P_INT_CAUSE 0x49320
+
+#define S_DDRPHY_INT_CAUSE 4
+#define V_DDRPHY_INT_CAUSE(x) ((x) << S_DDRPHY_INT_CAUSE)
+#define F_DDRPHY_INT_CAUSE V_DDRPHY_INT_CAUSE(1U)
+
+#define S_DDRCTL_INT_CAUSE 3
+#define V_DDRCTL_INT_CAUSE(x) ((x) << S_DDRCTL_INT_CAUSE)
+#define F_DDRCTL_INT_CAUSE V_DDRCTL_INT_CAUSE(1U)
+
+#define S_T7_ECC_CE_INT_CAUSE 2
+#define V_T7_ECC_CE_INT_CAUSE(x) ((x) << S_T7_ECC_CE_INT_CAUSE)
+#define F_T7_ECC_CE_INT_CAUSE V_T7_ECC_CE_INT_CAUSE(1U)
+
+#define S_T7_ECC_UE_INT_CAUSE 1
+#define V_T7_ECC_UE_INT_CAUSE(x) ((x) << S_T7_ECC_UE_INT_CAUSE)
+#define F_T7_ECC_UE_INT_CAUSE V_T7_ECC_UE_INT_CAUSE(1U)
+
+#define A_MC_P_ECC_UE_INT_ENABLE 0x49324
+
+#define S_BIST_RSP_SRAM_UERR_ENABLE 0
+#define V_BIST_RSP_SRAM_UERR_ENABLE(x) ((x) << S_BIST_RSP_SRAM_UERR_ENABLE)
+#define F_BIST_RSP_SRAM_UERR_ENABLE V_BIST_RSP_SRAM_UERR_ENABLE(1U)
+
+#define A_MC_P_ECC_UE_INT_CAUSE 0x49328
+
+#define S_BIST_RSP_SRAM_UERR_CAUSE 0
+#define V_BIST_RSP_SRAM_UERR_CAUSE(x) ((x) << S_BIST_RSP_SRAM_UERR_CAUSE)
+#define F_BIST_RSP_SRAM_UERR_CAUSE V_BIST_RSP_SRAM_UERR_CAUSE(1U)
+
+#define A_T7_MC_P_ECC_STATUS 0x4932c
+#define A_T7_MC_P_PHY_CTRL 0x49330
+#define A_T7_MC_P_STATIC_CFG_STATUS 0x49334
+
+#define S_DFIFREQRATIO 27
+#define V_DFIFREQRATIO(x) ((x) << S_DFIFREQRATIO)
+#define F_DFIFREQRATIO V_DFIFREQRATIO(1U)
+
+#define S_STATIC_DDR5_HBW_CHANNEL 3
+#define V_STATIC_DDR5_HBW_CHANNEL(x) ((x) << S_STATIC_DDR5_HBW_CHANNEL)
+#define F_STATIC_DDR5_HBW_CHANNEL V_STATIC_DDR5_HBW_CHANNEL(1U)
+
+#define S_STATIC_DDR5_HBW 2
+#define V_STATIC_DDR5_HBW(x) ((x) << S_STATIC_DDR5_HBW)
+#define F_STATIC_DDR5_HBW V_STATIC_DDR5_HBW(1U)
+
+#define S_T7_STATIC_WIDTH 1
+#define V_T7_STATIC_WIDTH(x) ((x) << S_T7_STATIC_WIDTH)
+#define F_T7_STATIC_WIDTH V_T7_STATIC_WIDTH(1U)
+
+#define A_T7_MC_P_CORE_PCTL_STAT 0x49338
+#define A_T7_MC_P_DEBUG_CNT 0x4933c
+#define A_T7_MC_CE_ERR_DATA_RDATA 0x49340
+#define A_T7_MC_UE_ERR_DATA_RDATA 0x49380
+#define A_T7_MC_CE_ADDR 0x493c0
+#define A_T7_MC_UE_ADDR 0x493c4
+#define A_T7_MC_P_DEEP_SLEEP 0x493c8
+#define A_T7_MC_P_FPGA_BONUS 0x493cc
+#define A_T7_MC_P_DEBUG_CFG 0x493d0
+#define A_T7_MC_P_DEBUG_RPT 0x493d4
+#define A_T7_MC_P_PHY_ADR_CK_EN 0x493d8
+#define A_MC_P_WDATARAM_INIT 0x493dc
+
+#define S_ENABLE_DCH1 1
+#define V_ENABLE_DCH1(x) ((x) << S_ENABLE_DCH1)
+#define F_ENABLE_DCH1 V_ENABLE_DCH1(1U)
+
+#define S_ENABLE_DCH0 0
+#define V_ENABLE_DCH0(x) ((x) << S_ENABLE_DCH0)
+#define F_ENABLE_DCH0 V_ENABLE_DCH0(1U)
+
+#define A_T7_MC_CE_ERR_ECC_DATA0 0x493e0
+#define A_T7_MC_CE_ERR_ECC_DATA1 0x493e4
+#define A_T7_MC_UE_ERR_ECC_DATA0 0x493e8
+#define A_T7_MC_UE_ERR_ECC_DATA1 0x493ec
+#define A_T7_MC_P_RMW_PRIO 0x493f0
+#define A_T7_MC_P_BIST_CMD 0x49400
+
+#define S_FIFO_ERROR_FLAG 30
+#define V_FIFO_ERROR_FLAG(x) ((x) << S_FIFO_ERROR_FLAG)
+#define F_FIFO_ERROR_FLAG V_FIFO_ERROR_FLAG(1U)
+
+#define A_T7_MC_P_BIST_CMD_ADDR 0x49404
+
+#define S_T7_VALUE 0
+#define M_T7_VALUE 0x1fffffffU
+#define V_T7_VALUE(x) ((x) << S_T7_VALUE)
+#define G_T7_VALUE(x) (((x) >> S_T7_VALUE) & M_T7_VALUE)
+
+#define A_MC_P_BIST_NUM_BURST 0x49408
+#define A_T7_MC_P_BIST_DATA_PATTERN 0x4940c
+
+#define S_DATA_TYPE 0
+#define M_DATA_TYPE 0xfU
+#define V_DATA_TYPE(x) ((x) << S_DATA_TYPE)
+#define G_DATA_TYPE(x) (((x) >> S_DATA_TYPE) & M_DATA_TYPE)
+
+#define A_T7_MC_P_BIST_CRC_SEED 0x49410
+#define A_T7_MC_P_BIST_NUM_ERR 0x49460
+#define A_MC_P_BIST_ERR_ADDR 0x49464
+
+#define S_ERROR_ADDR 0
+#define M_ERROR_ADDR 0x3fffffffU
+#define V_ERROR_ADDR(x) ((x) << S_ERROR_ADDR)
+#define G_ERROR_ADDR(x) (((x) >> S_ERROR_ADDR) & M_ERROR_ADDR)
+
+#define A_MC_P_BIST_USER_RWEDATA 0x49468
+#define A_MC_REGB_DDRC_CH0_SCHED0 0x10380
+
+#define S_OPT_VPRW_SCH 31
+#define V_OPT_VPRW_SCH(x) ((x) << S_OPT_VPRW_SCH)
+#define F_OPT_VPRW_SCH V_OPT_VPRW_SCH(1U)
+
+#define S_DIS_SPECULATIVE_ACT 30
+#define V_DIS_SPECULATIVE_ACT(x) ((x) << S_DIS_SPECULATIVE_ACT)
+#define F_DIS_SPECULATIVE_ACT V_DIS_SPECULATIVE_ACT(1U)
+
+#define S_OPT_ACT_LAT 27
+#define V_OPT_ACT_LAT(x) ((x) << S_OPT_ACT_LAT)
+#define F_OPT_ACT_LAT V_OPT_ACT_LAT(1U)
+
+#define S_LPR_NUM_ENTRIES 8
+#define M_LPR_NUM_ENTRIES 0x3fU
+#define V_LPR_NUM_ENTRIES(x) ((x) << S_LPR_NUM_ENTRIES)
+#define G_LPR_NUM_ENTRIES(x) (((x) >> S_LPR_NUM_ENTRIES) & M_LPR_NUM_ENTRIES)
+
+#define S_AUTOPRE_RMW 7
+#define V_AUTOPRE_RMW(x) ((x) << S_AUTOPRE_RMW)
+#define F_AUTOPRE_RMW V_AUTOPRE_RMW(1U)
+
+#define S_DIS_OPT_NTT_BY_PRE 6
+#define V_DIS_OPT_NTT_BY_PRE(x) ((x) << S_DIS_OPT_NTT_BY_PRE)
+#define F_DIS_OPT_NTT_BY_PRE V_DIS_OPT_NTT_BY_PRE(1U)
+
+#define S_DIS_OPT_NTT_BY_ACT 5
+#define V_DIS_OPT_NTT_BY_ACT(x) ((x) << S_DIS_OPT_NTT_BY_ACT)
+#define F_DIS_OPT_NTT_BY_ACT V_DIS_OPT_NTT_BY_ACT(1U)
+
+#define S_OPT_WRCAM_FILL_LEVEL 4
+#define V_OPT_WRCAM_FILL_LEVEL(x) ((x) << S_OPT_WRCAM_FILL_LEVEL)
+#define F_OPT_WRCAM_FILL_LEVEL V_OPT_WRCAM_FILL_LEVEL(1U)
+
+#define S_PAGECLOSE 2
+#define V_PAGECLOSE(x) ((x) << S_PAGECLOSE)
+#define F_PAGECLOSE V_PAGECLOSE(1U)
+
+#define S_PREFER_WRITE 1
+#define V_PREFER_WRITE(x) ((x) << S_PREFER_WRITE)
+#define F_PREFER_WRITE V_PREFER_WRITE(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG0 0x10600
+
+#define S_DIS_SCRUB 23
+#define V_DIS_SCRUB(x) ((x) << S_DIS_SCRUB)
+#define F_DIS_SCRUB V_DIS_SCRUB(1U)
+
+#define S_ECC_TYPE 4
+#define M_ECC_TYPE 0x3U
+#define V_ECC_TYPE(x) ((x) << S_ECC_TYPE)
+#define G_ECC_TYPE(x) (((x) >> S_ECC_TYPE) & M_ECC_TYPE)
+
+#define S_TEST_MODE 3
+#define V_TEST_MODE(x) ((x) << S_TEST_MODE)
+#define F_TEST_MODE V_TEST_MODE(1U)
+
+#define S_ECC_MODE 0
+#define M_ECC_MODE 0x7U
+#define V_ECC_MODE(x) ((x) << S_ECC_MODE)
+#define G_ECC_MODE(x) (((x) >> S_ECC_MODE) & M_ECC_MODE)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG1 0x10604
+
+#define S_DATA_POISON_BIT 1
+#define V_DATA_POISON_BIT(x) ((x) << S_DATA_POISON_BIT)
+#define F_DATA_POISON_BIT V_DATA_POISON_BIT(1U)
+
+#define S_DATA_POISON_EN 0
+#define V_DATA_POISON_EN(x) ((x) << S_DATA_POISON_EN)
+#define F_DATA_POISON_EN V_DATA_POISON_EN(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCSTAT 0x10608
+
+#define S_ECC_UNCORRECTED_ERR 16
+#define M_ECC_UNCORRECTED_ERR 0xffU
+#define V_ECC_UNCORRECTED_ERR(x) ((x) << S_ECC_UNCORRECTED_ERR)
+#define G_ECC_UNCORRECTED_ERR(x) (((x) >> S_ECC_UNCORRECTED_ERR) & M_ECC_UNCORRECTED_ERR)
+
+#define S_ECC_CORRECTED_ERR 8
+#define M_ECC_CORRECTED_ERR 0xffU
+#define V_ECC_CORRECTED_ERR(x) ((x) << S_ECC_CORRECTED_ERR)
+#define G_ECC_CORRECTED_ERR(x) (((x) >> S_ECC_CORRECTED_ERR) & M_ECC_CORRECTED_ERR)
+
+#define S_ECC_CORRECTED_BIT_NUM 0
+#define M_ECC_CORRECTED_BIT_NUM 0x7fU
+#define V_ECC_CORRECTED_BIT_NUM(x) ((x) << S_ECC_CORRECTED_BIT_NUM)
+#define G_ECC_CORRECTED_BIT_NUM(x) (((x) >> S_ECC_CORRECTED_BIT_NUM) & M_ECC_CORRECTED_BIT_NUM)
+
+#define A_MC_REGB_DDRC_CH0_ECCCTL 0x1060c
+
+#define S_ECC_UNCORRECTED_ERR_INTR_FORCE 17
+#define V_ECC_UNCORRECTED_ERR_INTR_FORCE(x) ((x) << S_ECC_UNCORRECTED_ERR_INTR_FORCE)
+#define F_ECC_UNCORRECTED_ERR_INTR_FORCE V_ECC_UNCORRECTED_ERR_INTR_FORCE(1U)
+
+#define S_ECC_CORRECTED_ERR_INTR_FORCE 16
+#define V_ECC_CORRECTED_ERR_INTR_FORCE(x) ((x) << S_ECC_CORRECTED_ERR_INTR_FORCE)
+#define F_ECC_CORRECTED_ERR_INTR_FORCE V_ECC_CORRECTED_ERR_INTR_FORCE(1U)
+
+#define S_ECC_UNCORRECTED_ERR_INTR_EN 9
+#define V_ECC_UNCORRECTED_ERR_INTR_EN(x) ((x) << S_ECC_UNCORRECTED_ERR_INTR_EN)
+#define F_ECC_UNCORRECTED_ERR_INTR_EN V_ECC_UNCORRECTED_ERR_INTR_EN(1U)
+
+#define S_ECC_CORRECTED_ERR_INTR_EN 8
+#define V_ECC_CORRECTED_ERR_INTR_EN(x) ((x) << S_ECC_CORRECTED_ERR_INTR_EN)
+#define F_ECC_CORRECTED_ERR_INTR_EN V_ECC_CORRECTED_ERR_INTR_EN(1U)
+
+#define S_ECC_UNCORR_ERR_CNT_CLR 3
+#define V_ECC_UNCORR_ERR_CNT_CLR(x) ((x) << S_ECC_UNCORR_ERR_CNT_CLR)
+#define F_ECC_UNCORR_ERR_CNT_CLR V_ECC_UNCORR_ERR_CNT_CLR(1U)
+
+#define S_ECC_CORR_ERR_CNT_CLR 2
+#define V_ECC_CORR_ERR_CNT_CLR(x) ((x) << S_ECC_CORR_ERR_CNT_CLR)
+#define F_ECC_CORR_ERR_CNT_CLR V_ECC_CORR_ERR_CNT_CLR(1U)
+
+#define S_ECC_UNCORRECTED_ERR_CLR 1
+#define V_ECC_UNCORRECTED_ERR_CLR(x) ((x) << S_ECC_UNCORRECTED_ERR_CLR)
+#define F_ECC_UNCORRECTED_ERR_CLR V_ECC_UNCORRECTED_ERR_CLR(1U)
+
+#define S_ECC_CORRECTED_ERR_CLR 0
+#define V_ECC_CORRECTED_ERR_CLR(x) ((x) << S_ECC_CORRECTED_ERR_CLR)
+#define F_ECC_CORRECTED_ERR_CLR V_ECC_CORRECTED_ERR_CLR(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCERRCNT 0x10610
+
+#define S_ECC_UNCORR_ERR_CNT 16
+#define M_ECC_UNCORR_ERR_CNT 0xffffU
+#define V_ECC_UNCORR_ERR_CNT(x) ((x) << S_ECC_UNCORR_ERR_CNT)
+#define G_ECC_UNCORR_ERR_CNT(x) (((x) >> S_ECC_UNCORR_ERR_CNT) & M_ECC_UNCORR_ERR_CNT)
+
+#define S_ECC_CORR_ERR_CNT 0
+#define M_ECC_CORR_ERR_CNT 0xffffU
+#define V_ECC_CORR_ERR_CNT(x) ((x) << S_ECC_CORR_ERR_CNT)
+#define G_ECC_CORR_ERR_CNT(x) (((x) >> S_ECC_CORR_ERR_CNT) & M_ECC_CORR_ERR_CNT)
+
+#define A_MC_REGB_DDRC_CH0_ECCCADDR0 0x10614
+
+#define S_ECC_CORR_RANK 24
+#define V_ECC_CORR_RANK(x) ((x) << S_ECC_CORR_RANK)
+#define F_ECC_CORR_RANK V_ECC_CORR_RANK(1U)
+
+#define S_ECC_CORR_ROW 0
+#define M_ECC_CORR_ROW 0x3ffffU
+#define V_ECC_CORR_ROW(x) ((x) << S_ECC_CORR_ROW)
+#define G_ECC_CORR_ROW(x) (((x) >> S_ECC_CORR_ROW) & M_ECC_CORR_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCCADDR1 0x10618
+
+#define S_ECC_CORR_BG 24
+#define M_ECC_CORR_BG 0x7U
+#define V_ECC_CORR_BG(x) ((x) << S_ECC_CORR_BG)
+#define G_ECC_CORR_BG(x) (((x) >> S_ECC_CORR_BG) & M_ECC_CORR_BG)
+
+#define S_ECC_CORR_BANK 16
+#define M_ECC_CORR_BANK 0x3U
+#define V_ECC_CORR_BANK(x) ((x) << S_ECC_CORR_BANK)
+#define G_ECC_CORR_BANK(x) (((x) >> S_ECC_CORR_BANK) & M_ECC_CORR_BANK)
+
+#define S_ECC_CORR_COL 0
+#define M_ECC_CORR_COL 0x7ffU
+#define V_ECC_CORR_COL(x) ((x) << S_ECC_CORR_COL)
+#define G_ECC_CORR_COL(x) (((x) >> S_ECC_CORR_COL) & M_ECC_CORR_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCCSYN0 0x1061c
+#define A_MC_REGB_DDRC_CH0_ECCCSYN1 0x10620
+#define A_MC_REGB_DDRC_CH0_ECCCSYN2 0x10624
+
+#define S_CB_CORR_SYNDROME 16
+#define M_CB_CORR_SYNDROME 0xffU
+#define V_CB_CORR_SYNDROME(x) ((x) << S_CB_CORR_SYNDROME)
+#define G_CB_CORR_SYNDROME(x) (((x) >> S_CB_CORR_SYNDROME) & M_CB_CORR_SYNDROME)
+
+#define S_ECC_CORR_SYNDROMES_71_64 0
+#define M_ECC_CORR_SYNDROMES_71_64 0xffU
+#define V_ECC_CORR_SYNDROMES_71_64(x) ((x) << S_ECC_CORR_SYNDROMES_71_64)
+#define G_ECC_CORR_SYNDROMES_71_64(x) (((x) >> S_ECC_CORR_SYNDROMES_71_64) & M_ECC_CORR_SYNDROMES_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK0 0x10628
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK1 0x1062c
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK2 0x10630
+
+#define S_ECC_CORR_BIT_MASK_71_64 0
+#define M_ECC_CORR_BIT_MASK_71_64 0xffU
+#define V_ECC_CORR_BIT_MASK_71_64(x) ((x) << S_ECC_CORR_BIT_MASK_71_64)
+#define G_ECC_CORR_BIT_MASK_71_64(x) (((x) >> S_ECC_CORR_BIT_MASK_71_64) & M_ECC_CORR_BIT_MASK_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCUADDR0 0x10634
+
+#define S_ECC_UNCORR_RANK 24
+#define V_ECC_UNCORR_RANK(x) ((x) << S_ECC_UNCORR_RANK)
+#define F_ECC_UNCORR_RANK V_ECC_UNCORR_RANK(1U)
+
+#define S_ECC_UNCORR_ROW 0
+#define M_ECC_UNCORR_ROW 0x3ffffU
+#define V_ECC_UNCORR_ROW(x) ((x) << S_ECC_UNCORR_ROW)
+#define G_ECC_UNCORR_ROW(x) (((x) >> S_ECC_UNCORR_ROW) & M_ECC_UNCORR_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCUADDR1 0x10638
+
+#define S_ECC_UNCORR_BG 24
+#define M_ECC_UNCORR_BG 0x7U
+#define V_ECC_UNCORR_BG(x) ((x) << S_ECC_UNCORR_BG)
+#define G_ECC_UNCORR_BG(x) (((x) >> S_ECC_UNCORR_BG) & M_ECC_UNCORR_BG)
+
+#define S_ECC_UNCORR_BANK 16
+#define M_ECC_UNCORR_BANK 0x3U
+#define V_ECC_UNCORR_BANK(x) ((x) << S_ECC_UNCORR_BANK)
+#define G_ECC_UNCORR_BANK(x) (((x) >> S_ECC_UNCORR_BANK) & M_ECC_UNCORR_BANK)
+
+#define S_ECC_UNCORR_COL 0
+#define M_ECC_UNCORR_COL 0x7ffU
+#define V_ECC_UNCORR_COL(x) ((x) << S_ECC_UNCORR_COL)
+#define G_ECC_UNCORR_COL(x) (((x) >> S_ECC_UNCORR_COL) & M_ECC_UNCORR_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCUSYN0 0x1063c
+#define A_MC_REGB_DDRC_CH0_ECCUSYN1 0x10640
+#define A_MC_REGB_DDRC_CH0_ECCUSYN2 0x10644
+
+#define S_CB_UNCORR_SYNDROME 16
+#define M_CB_UNCORR_SYNDROME 0xffU
+#define V_CB_UNCORR_SYNDROME(x) ((x) << S_CB_UNCORR_SYNDROME)
+#define G_CB_UNCORR_SYNDROME(x) (((x) >> S_CB_UNCORR_SYNDROME) & M_CB_UNCORR_SYNDROME)
+
+#define S_ECC_UNCORR_SYNDROMES_71_64 0
+#define M_ECC_UNCORR_SYNDROMES_71_64 0xffU
+#define V_ECC_UNCORR_SYNDROMES_71_64(x) ((x) << S_ECC_UNCORR_SYNDROMES_71_64)
+#define G_ECC_UNCORR_SYNDROMES_71_64(x) (((x) >> S_ECC_UNCORR_SYNDROMES_71_64) & M_ECC_UNCORR_SYNDROMES_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONADDR0 0x10648
+
+#define S_ECC_POISON_RANK 24
+#define V_ECC_POISON_RANK(x) ((x) << S_ECC_POISON_RANK)
+#define F_ECC_POISON_RANK V_ECC_POISON_RANK(1U)
+
+#define S_ECC_POISON_COL 0
+#define M_ECC_POISON_COL 0xfffU
+#define V_ECC_POISON_COL(x) ((x) << S_ECC_POISON_COL)
+#define G_ECC_POISON_COL(x) (((x) >> S_ECC_POISON_COL) & M_ECC_POISON_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONADDR1 0x1064c
+
+#define S_ECC_POISON_BG 28
+#define M_ECC_POISON_BG 0x7U
+#define V_ECC_POISON_BG(x) ((x) << S_ECC_POISON_BG)
+#define G_ECC_POISON_BG(x) (((x) >> S_ECC_POISON_BG) & M_ECC_POISON_BG)
+
+#define S_ECC_POISON_BANK 24
+#define M_ECC_POISON_BANK 0x3U
+#define V_ECC_POISON_BANK(x) ((x) << S_ECC_POISON_BANK)
+#define G_ECC_POISON_BANK(x) (((x) >> S_ECC_POISON_BANK) & M_ECC_POISON_BANK)
+
+#define S_ECC_POISON_ROW 0
+#define M_ECC_POISON_ROW 0x3ffffU
+#define V_ECC_POISON_ROW(x) ((x) << S_ECC_POISON_ROW)
+#define G_ECC_POISON_ROW(x) (((x) >> S_ECC_POISON_ROW) & M_ECC_POISON_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT0 0x10658
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT1 0x1065c
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT2 0x10660
+
+#define S_ECC_POISON_DATA_71_64 0
+#define M_ECC_POISON_DATA_71_64 0xffU
+#define V_ECC_POISON_DATA_71_64(x) ((x) << S_ECC_POISON_DATA_71_64)
+#define G_ECC_POISON_DATA_71_64(x) (((x) >> S_ECC_POISON_DATA_71_64) & M_ECC_POISON_DATA_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG2 0x10668
+
+#define S_FLIP_BIT_POS1 24
+#define M_FLIP_BIT_POS1 0x7fU
+#define V_FLIP_BIT_POS1(x) ((x) << S_FLIP_BIT_POS1)
+#define G_FLIP_BIT_POS1(x) (((x) >> S_FLIP_BIT_POS1) & M_FLIP_BIT_POS1)
+
+#define S_FLIP_BIT_POS0 16
+#define M_FLIP_BIT_POS0 0x7fU
+#define V_FLIP_BIT_POS0(x) ((x) << S_FLIP_BIT_POS0)
+#define G_FLIP_BIT_POS0(x) (((x) >> S_FLIP_BIT_POS0) & M_FLIP_BIT_POS0)
+
+#define A_MC_REGB_DDRC_CH1_ECCCTL 0x1160c
+#define A_MC_REGB_DDRC_CH1_ECCERRCNT 0x11610
+#define A_MC_REGB_DDRC_CH1_ECCCADDR0 0x11614
+#define A_MC_REGB_DDRC_CH1_ECCCADDR1 0x11618
+#define A_MC_REGB_DDRC_CH1_ECCCSYN0 0x1161c
+#define A_MC_REGB_DDRC_CH1_ECCCSYN1 0x11620
+#define A_MC_REGB_DDRC_CH1_ECCCSYN2 0x11624
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK0 0x11628
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK1 0x1162c
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK2 0x11630
+#define A_MC_REGB_DDRC_CH1_ECCUADDR0 0x11634
+#define A_MC_REGB_DDRC_CH1_ECCUADDR1 0x11638
+#define A_MC_REGB_DDRC_CH1_ECCUSYN0 0x1163c
+#define A_MC_REGB_DDRC_CH1_ECCUSYN1 0x11640
+#define A_MC_REGB_DDRC_CH1_ECCUSYN2 0x11644
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTENABLE 0x20100
+
+#define S_PHYSTICKYUNLOCKEN 15
+#define V_PHYSTICKYUNLOCKEN(x) ((x) << S_PHYSTICKYUNLOCKEN)
+#define F_PHYSTICKYUNLOCKEN V_PHYSTICKYUNLOCKEN(1U)
+
+#define S_PHYBSIEN 14
+#define V_PHYBSIEN(x) ((x) << S_PHYBSIEN)
+#define F_PHYBSIEN V_PHYBSIEN(1U)
+
+#define S_PHYANIBRCVERREN 13
+#define V_PHYANIBRCVERREN(x) ((x) << S_PHYANIBRCVERREN)
+#define F_PHYANIBRCVERREN V_PHYANIBRCVERREN(1U)
+
+#define S_PHYD5ACSM1PARITYEN 12
+#define V_PHYD5ACSM1PARITYEN(x) ((x) << S_PHYD5ACSM1PARITYEN)
+#define F_PHYD5ACSM1PARITYEN V_PHYD5ACSM1PARITYEN(1U)
+
+#define S_PHYD5ACSM0PARITYEN 11
+#define V_PHYD5ACSM0PARITYEN(x) ((x) << S_PHYD5ACSM0PARITYEN)
+#define F_PHYD5ACSM0PARITYEN V_PHYD5ACSM0PARITYEN(1U)
+
+#define S_PHYRXFIFOCHECKEN 10
+#define V_PHYRXFIFOCHECKEN(x) ((x) << S_PHYRXFIFOCHECKEN)
+#define F_PHYRXFIFOCHECKEN V_PHYRXFIFOCHECKEN(1U)
+
+#define S_PHYTXPPTEN 9
+#define V_PHYTXPPTEN(x) ((x) << S_PHYTXPPTEN)
+#define F_PHYTXPPTEN V_PHYTXPPTEN(1U)
+
+#define S_PHYECCEN 8
+#define V_PHYECCEN(x) ((x) << S_PHYECCEN)
+#define F_PHYECCEN V_PHYECCEN(1U)
+
+#define S_PHYFWRESERVEDEN 3
+#define M_PHYFWRESERVEDEN 0x1fU
+#define V_PHYFWRESERVEDEN(x) ((x) << S_PHYFWRESERVEDEN)
+#define G_PHYFWRESERVEDEN(x) (((x) >> S_PHYFWRESERVEDEN) & M_PHYFWRESERVEDEN)
+
+#define S_PHYTRNGFAILEN 2
+#define V_PHYTRNGFAILEN(x) ((x) << S_PHYTRNGFAILEN)
+#define F_PHYTRNGFAILEN V_PHYTRNGFAILEN(1U)
+
+#define S_PHYINITCMPLTEN 1
+#define V_PHYINITCMPLTEN(x) ((x) << S_PHYINITCMPLTEN)
+#define F_PHYINITCMPLTEN V_PHYINITCMPLTEN(1U)
+
+#define S_PHYTRNGCMPLTEN 0
+#define V_PHYTRNGCMPLTEN(x) ((x) << S_PHYTRNGCMPLTEN)
+#define F_PHYTRNGCMPLTEN V_PHYTRNGCMPLTEN(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTFWCONTROL 0x20101
+
+#define S_PHYFWRESERVEDFW 3
+#define M_PHYFWRESERVEDFW 0x1fU
+#define V_PHYFWRESERVEDFW(x) ((x) << S_PHYFWRESERVEDFW)
+#define G_PHYFWRESERVEDFW(x) (((x) >> S_PHYFWRESERVEDFW) & M_PHYFWRESERVEDFW)
+
+#define S_PHYTRNGFAILFW 2
+#define V_PHYTRNGFAILFW(x) ((x) << S_PHYTRNGFAILFW)
+#define F_PHYTRNGFAILFW V_PHYTRNGFAILFW(1U)
+
+#define S_PHYINITCMPLTFW 1
+#define V_PHYINITCMPLTFW(x) ((x) << S_PHYINITCMPLTFW)
+#define F_PHYINITCMPLTFW V_PHYINITCMPLTFW(1U)
+
+#define S_PHYTRNGCMPLTFW 0
+#define V_PHYTRNGCMPLTFW(x) ((x) << S_PHYTRNGCMPLTFW)
+#define F_PHYTRNGCMPLTFW V_PHYTRNGCMPLTFW(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTMASK 0x20102
+
+#define S_PHYSTICKYUNLOCKMSK 15
+#define V_PHYSTICKYUNLOCKMSK(x) ((x) << S_PHYSTICKYUNLOCKMSK)
+#define F_PHYSTICKYUNLOCKMSK V_PHYSTICKYUNLOCKMSK(1U)
+
+#define S_PHYBSIMSK 14
+#define V_PHYBSIMSK(x) ((x) << S_PHYBSIMSK)
+#define F_PHYBSIMSK V_PHYBSIMSK(1U)
+
+#define S_PHYANIBRCVERRMSK 13
+#define V_PHYANIBRCVERRMSK(x) ((x) << S_PHYANIBRCVERRMSK)
+#define F_PHYANIBRCVERRMSK V_PHYANIBRCVERRMSK(1U)
+
+#define S_PHYD5ACSM1PARITYMSK 12
+#define V_PHYD5ACSM1PARITYMSK(x) ((x) << S_PHYD5ACSM1PARITYMSK)
+#define F_PHYD5ACSM1PARITYMSK V_PHYD5ACSM1PARITYMSK(1U)
+
+#define S_PHYD5ACSM0PARITYMSK 11
+#define V_PHYD5ACSM0PARITYMSK(x) ((x) << S_PHYD5ACSM0PARITYMSK)
+#define F_PHYD5ACSM0PARITYMSK V_PHYD5ACSM0PARITYMSK(1U)
+
+#define S_PHYRXFIFOCHECKMSK 10
+#define V_PHYRXFIFOCHECKMSK(x) ((x) << S_PHYRXFIFOCHECKMSK)
+#define F_PHYRXFIFOCHECKMSK V_PHYRXFIFOCHECKMSK(1U)
+
+#define S_PHYTXPPTMSK 9
+#define V_PHYTXPPTMSK(x) ((x) << S_PHYTXPPTMSK)
+#define F_PHYTXPPTMSK V_PHYTXPPTMSK(1U)
+
+#define S_PHYECCMSK 8
+#define V_PHYECCMSK(x) ((x) << S_PHYECCMSK)
+#define F_PHYECCMSK V_PHYECCMSK(1U)
+
+#define S_PHYFWRESERVEDMSK 3
+#define M_PHYFWRESERVEDMSK 0x1fU
+#define V_PHYFWRESERVEDMSK(x) ((x) << S_PHYFWRESERVEDMSK)
+#define G_PHYFWRESERVEDMSK(x) (((x) >> S_PHYFWRESERVEDMSK) & M_PHYFWRESERVEDMSK)
+
+#define S_PHYTRNGFAILMSK 2
+#define V_PHYTRNGFAILMSK(x) ((x) << S_PHYTRNGFAILMSK)
+#define F_PHYTRNGFAILMSK V_PHYTRNGFAILMSK(1U)
+
+#define S_PHYINITCMPLTMSK 1
+#define V_PHYINITCMPLTMSK(x) ((x) << S_PHYINITCMPLTMSK)
+#define F_PHYINITCMPLTMSK V_PHYINITCMPLTMSK(1U)
+
+#define S_PHYTRNGCMPLTMSK 0
+#define V_PHYTRNGCMPLTMSK(x) ((x) << S_PHYTRNGCMPLTMSK)
+#define F_PHYTRNGCMPLTMSK V_PHYTRNGCMPLTMSK(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTCLEAR 0x20103
+
+#define S_PHYSTICKYUNLOCKCLR 15
+#define V_PHYSTICKYUNLOCKCLR(x) ((x) << S_PHYSTICKYUNLOCKCLR)
+#define F_PHYSTICKYUNLOCKCLR V_PHYSTICKYUNLOCKCLR(1U)
+
+#define S_PHYBSICLR 14
+#define V_PHYBSICLR(x) ((x) << S_PHYBSICLR)
+#define F_PHYBSICLR V_PHYBSICLR(1U)
+
+#define S_PHYANIBRCVERRCLR 13
+#define V_PHYANIBRCVERRCLR(x) ((x) << S_PHYANIBRCVERRCLR)
+#define F_PHYANIBRCVERRCLR V_PHYANIBRCVERRCLR(1U)
+
+#define S_PHYD5ACSM1PARITYCLR 12
+#define V_PHYD5ACSM1PARITYCLR(x) ((x) << S_PHYD5ACSM1PARITYCLR)
+#define F_PHYD5ACSM1PARITYCLR V_PHYD5ACSM1PARITYCLR(1U)
+
+#define S_PHYD5ACSM0PARITYCLR 11
+#define V_PHYD5ACSM0PARITYCLR(x) ((x) << S_PHYD5ACSM0PARITYCLR)
+#define F_PHYD5ACSM0PARITYCLR V_PHYD5ACSM0PARITYCLR(1U)
+
+#define S_PHYRXFIFOCHECKCLR 10
+#define V_PHYRXFIFOCHECKCLR(x) ((x) << S_PHYRXFIFOCHECKCLR)
+#define F_PHYRXFIFOCHECKCLR V_PHYRXFIFOCHECKCLR(1U)
+
+#define S_PHYTXPPTCLR 9
+#define V_PHYTXPPTCLR(x) ((x) << S_PHYTXPPTCLR)
+#define F_PHYTXPPTCLR V_PHYTXPPTCLR(1U)
+
+#define S_PHYECCCLR 8
+#define V_PHYECCCLR(x) ((x) << S_PHYECCCLR)
+#define F_PHYECCCLR V_PHYECCCLR(1U)
+
+#define S_PHYFWRESERVEDCLR 3
+#define M_PHYFWRESERVEDCLR 0x1fU
+#define V_PHYFWRESERVEDCLR(x) ((x) << S_PHYFWRESERVEDCLR)
+#define G_PHYFWRESERVEDCLR(x) (((x) >> S_PHYFWRESERVEDCLR) & M_PHYFWRESERVEDCLR)
+
+#define S_PHYTRNGFAILCLR 2
+#define V_PHYTRNGFAILCLR(x) ((x) << S_PHYTRNGFAILCLR)
+#define F_PHYTRNGFAILCLR V_PHYTRNGFAILCLR(1U)
+
+#define S_PHYINITCMPLTCLR 1
+#define V_PHYINITCMPLTCLR(x) ((x) << S_PHYINITCMPLTCLR)
+#define F_PHYINITCMPLTCLR V_PHYINITCMPLTCLR(1U)
+
+#define S_PHYTRNGCMPLTCLR 0
+#define V_PHYTRNGCMPLTCLR(x) ((x) << S_PHYTRNGCMPLTCLR)
+#define F_PHYTRNGCMPLTCLR V_PHYTRNGCMPLTCLR(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTSTATUS 0x20104
+
+#define S_PHYSTICKYUNLOCKERR 15
+#define V_PHYSTICKYUNLOCKERR(x) ((x) << S_PHYSTICKYUNLOCKERR)
+#define F_PHYSTICKYUNLOCKERR V_PHYSTICKYUNLOCKERR(1U)
+
+#define S_PHYBSIINT 14
+#define V_PHYBSIINT(x) ((x) << S_PHYBSIINT)
+#define F_PHYBSIINT V_PHYBSIINT(1U)
+
+#define S_PHYANIBRCVERR 13
+#define V_PHYANIBRCVERR(x) ((x) << S_PHYANIBRCVERR)
+#define F_PHYANIBRCVERR V_PHYANIBRCVERR(1U)
+
+#define S_PHYD5ACSM1PARITYERR 12
+#define V_PHYD5ACSM1PARITYERR(x) ((x) << S_PHYD5ACSM1PARITYERR)
+#define F_PHYD5ACSM1PARITYERR V_PHYD5ACSM1PARITYERR(1U)
+
+#define S_PHYD5ACSM0PARITYERR 11
+#define V_PHYD5ACSM0PARITYERR(x) ((x) << S_PHYD5ACSM0PARITYERR)
+#define F_PHYD5ACSM0PARITYERR V_PHYD5ACSM0PARITYERR(1U)
+
+#define S_PHYRXFIFOCHECKERR 10
+#define V_PHYRXFIFOCHECKERR(x) ((x) << S_PHYRXFIFOCHECKERR)
+#define F_PHYRXFIFOCHECKERR V_PHYRXFIFOCHECKERR(1U)
+
+#define S_PHYRXTXPPTERR 9
+#define V_PHYRXTXPPTERR(x) ((x) << S_PHYRXTXPPTERR)
+#define F_PHYRXTXPPTERR V_PHYRXTXPPTERR(1U)
+
+#define S_PHYECCERR 8
+#define V_PHYECCERR(x) ((x) << S_PHYECCERR)
+#define F_PHYECCERR V_PHYECCERR(1U)
+
+#define S_PHYFWRESERVED 3
+#define M_PHYFWRESERVED 0x1fU
+#define V_PHYFWRESERVED(x) ((x) << S_PHYFWRESERVED)
+#define G_PHYFWRESERVED(x) (((x) >> S_PHYFWRESERVED) & M_PHYFWRESERVED)
+
+#define S_PHYTRNGFAIL 2
+#define V_PHYTRNGFAIL(x) ((x) << S_PHYTRNGFAIL)
+#define F_PHYTRNGFAIL V_PHYTRNGFAIL(1U)
+
+#define S_PHYINITCMPLT 1
+#define V_PHYINITCMPLT(x) ((x) << S_PHYINITCMPLT)
+#define F_PHYINITCMPLT V_PHYINITCMPLT(1U)
+
+#define S_PHYTRNGCMPLT 0
+#define V_PHYTRNGCMPLT(x) ((x) << S_PHYTRNGCMPLT)
+#define F_PHYTRNGCMPLT V_PHYTRNGCMPLT(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTOVERRIDE 0x20107
+
+#define S_PHYINTERRUPTOVERRIDE 0
+#define M_PHYINTERRUPTOVERRIDE 0xffffU
+#define V_PHYINTERRUPTOVERRIDE(x) ((x) << S_PHYINTERRUPTOVERRIDE)
+#define G_PHYINTERRUPTOVERRIDE(x) (((x) >> S_PHYINTERRUPTOVERRIDE) & M_PHYINTERRUPTOVERRIDE)
+
+/* registers for module MC_T71 */
+#define MC_T71_BASE_ADDR 0x58000
+
+/* registers for module GCACHE */
+#define GCACHE_BASE_ADDR 0x51400
+
+#define A_GCACHE_MODE_SEL0 0x51400
+
+#define S_GC_MA_RSP 16
+#define V_GC_MA_RSP(x) ((x) << S_GC_MA_RSP)
+#define F_GC_MA_RSP V_GC_MA_RSP(1U)
+
+#define A_GCACHE_MEMZONE0_REGION1 0x51404
+
+#define S_REGION_EN1 18
+#define V_REGION_EN1(x) ((x) << S_REGION_EN1)
+#define F_REGION_EN1 V_REGION_EN1(1U)
+
+#define S_EDC_REGION1 17
+#define V_EDC_REGION1(x) ((x) << S_EDC_REGION1)
+#define F_EDC_REGION1 V_EDC_REGION1(1U)
+
+#define S_CACHE_REGION1 16
+#define V_CACHE_REGION1(x) ((x) << S_CACHE_REGION1)
+#define F_CACHE_REGION1 V_CACHE_REGION1(1U)
+
+#define S_END1 0
+#define M_END1 0xffffU
+#define V_END1(x) ((x) << S_END1)
+#define G_END1(x) (((x) >> S_END1) & M_END1)
+
+#define A_GCACHE_MEMZONE0_REGION2 0x51408
+
+#define S_REGION_EN2 18
+#define V_REGION_EN2(x) ((x) << S_REGION_EN2)
+#define F_REGION_EN2 V_REGION_EN2(1U)
+
+#define S_EDC_REGION2 17
+#define V_EDC_REGION2(x) ((x) << S_EDC_REGION2)
+#define F_EDC_REGION2 V_EDC_REGION2(1U)
+
+#define S_CACHE_REGION2 16
+#define V_CACHE_REGION2(x) ((x) << S_CACHE_REGION2)
+#define F_CACHE_REGION2 V_CACHE_REGION2(1U)
+
+#define S_END2 0
+#define M_END2 0xffffU
+#define V_END2(x) ((x) << S_END2)
+#define G_END2(x) (((x) >> S_END2) & M_END2)
+
+#define A_GCACHE_MEMZONE0_REGION3 0x5140c
+
+#define S_REGION_EN3 18
+#define V_REGION_EN3(x) ((x) << S_REGION_EN3)
+#define F_REGION_EN3 V_REGION_EN3(1U)
+
+#define S_EDC_REGION3 17
+#define V_EDC_REGION3(x) ((x) << S_EDC_REGION3)
+#define F_EDC_REGION3 V_EDC_REGION3(1U)
+
+#define S_CACHE_REGION3 16
+#define V_CACHE_REGION3(x) ((x) << S_CACHE_REGION3)
+#define F_CACHE_REGION3 V_CACHE_REGION3(1U)
+
+#define S_END3 0
+#define M_END3 0xffffU
+#define V_END3(x) ((x) << S_END3)
+#define G_END3(x) (((x) >> S_END3) & M_END3)
+
+#define A_GCACHE_MEMZONE0_REGION4 0x51410
+
+#define S_REGION_EN4 18
+#define V_REGION_EN4(x) ((x) << S_REGION_EN4)
+#define F_REGION_EN4 V_REGION_EN4(1U)
+
+#define S_EDC_REGION4 17
+#define V_EDC_REGION4(x) ((x) << S_EDC_REGION4)
+#define F_EDC_REGION4 V_EDC_REGION4(1U)
+
+#define S_CACHE_REGION4 16
+#define V_CACHE_REGION4(x) ((x) << S_CACHE_REGION4)
+#define F_CACHE_REGION4 V_CACHE_REGION4(1U)
+
+#define S_END4 0
+#define M_END4 0xffffU
+#define V_END4(x) ((x) << S_END4)
+#define G_END4(x) (((x) >> S_END4) & M_END4)
+
+#define A_GCACHE_MEMZONE0_REGION5 0x51414
+
+#define S_REGION_EN5 18
+#define V_REGION_EN5(x) ((x) << S_REGION_EN5)
+#define F_REGION_EN5 V_REGION_EN5(1U)
+
+#define S_EDC_REGION5 17
+#define V_EDC_REGION5(x) ((x) << S_EDC_REGION5)
+#define F_EDC_REGION5 V_EDC_REGION5(1U)
+
+#define S_CACHE_REGION5 16
+#define V_CACHE_REGION5(x) ((x) << S_CACHE_REGION5)
+#define F_CACHE_REGION5 V_CACHE_REGION5(1U)
+
+#define S_END5 0
+#define M_END5 0xffffU
+#define V_END5(x) ((x) << S_END5)
+#define G_END5(x) (((x) >> S_END5) & M_END5)
+
+#define A_GCACHE_MEMZONE0_REGION6 0x51418
+
+#define S_REGION_EN6 18
+#define V_REGION_EN6(x) ((x) << S_REGION_EN6)
+#define F_REGION_EN6 V_REGION_EN6(1U)
+
+#define S_EDC_REGION6 17
+#define V_EDC_REGION6(x) ((x) << S_EDC_REGION6)
+#define F_EDC_REGION6 V_EDC_REGION6(1U)
+
+#define S_CACHE_REGION6 16
+#define V_CACHE_REGION6(x) ((x) << S_CACHE_REGION6)
+#define F_CACHE_REGION6 V_CACHE_REGION6(1U)
+
+#define S_END6 0
+#define M_END6 0xffffU
+#define V_END6(x) ((x) << S_END6)
+#define G_END6(x) (((x) >> S_END6) & M_END6)
+
+#define A_GCACHE_MEMZONE0_REGION7 0x5141c
+
+#define S_REGION_EN7 18
+#define V_REGION_EN7(x) ((x) << S_REGION_EN7)
+#define F_REGION_EN7 V_REGION_EN7(1U)
+
+#define S_EDC_REGION7 17
+#define V_EDC_REGION7(x) ((x) << S_EDC_REGION7)
+#define F_EDC_REGION7 V_EDC_REGION7(1U)
+
+#define S_CACHE_REGION7 16
+#define V_CACHE_REGION7(x) ((x) << S_CACHE_REGION7)
+#define F_CACHE_REGION7 V_CACHE_REGION7(1U)
+
+#define S_END7 0
+#define M_END7 0xffffU
+#define V_END7(x) ((x) << S_END7)
+#define G_END7(x) (((x) >> S_END7) & M_END7)
+
+#define A_GCACHE_MEMZONE0_REGION8 0x51420
+
+#define S_REGION_EN8 18
+#define V_REGION_EN8(x) ((x) << S_REGION_EN8)
+#define F_REGION_EN8 V_REGION_EN8(1U)
+
+#define S_EDC_REGION8 17
+#define V_EDC_REGION8(x) ((x) << S_EDC_REGION8)
+#define F_EDC_REGION8 V_EDC_REGION8(1U)
+
+#define S_CACHE_REGION8 16
+#define V_CACHE_REGION8(x) ((x) << S_CACHE_REGION8)
+#define F_CACHE_REGION8 V_CACHE_REGION8(1U)
+
+#define S_END8 0
+#define M_END8 0xffffU
+#define V_END8(x) ((x) << S_END8)
+#define G_END8(x) (((x) >> S_END8) & M_END8)
+
+#define A_GCACHE_REG0_BASE_MSB 0x51424
+#define A_GCACHE_MEMZONE0_REGION1_MSB 0x51428
+
+#define S_START1 0
+#define M_START1 0xffffU
+#define V_START1(x) ((x) << S_START1)
+#define G_START1(x) (((x) >> S_START1) & M_START1)
+
+#define A_GCACHE_MEMZONE0_REGION2_MSB 0x5142c
+
+#define S_START2 0
+#define M_START2 0xffffU
+#define V_START2(x) ((x) << S_START2)
+#define G_START2(x) (((x) >> S_START2) & M_START2)
+
+#define A_GCACHE_MEMZONE0_REGION3_MSB 0x51430
+
+#define S_START3 0
+#define M_START3 0xffffU
+#define V_START3(x) ((x) << S_START3)
+#define G_START3(x) (((x) >> S_START3) & M_START3)
+
+#define A_GCACHE_MEMZONE0_REGION4_MSB 0x51434
+
+#define S_START4 0
+#define M_START4 0xffffU
+#define V_START4(x) ((x) << S_START4)
+#define G_START4(x) (((x) >> S_START4) & M_START4)
+
+#define A_GCACHE_MEMZONE0_REGION5_MSB 0x51438
+
+#define S_START5 0
+#define M_START5 0xffffU
+#define V_START5(x) ((x) << S_START5)
+#define G_START5(x) (((x) >> S_START5) & M_START5)
+
+#define A_GCACHE_MEMZONE0_REGION6_MSB 0x5143c
+
+#define S_START6 0
+#define M_START6 0xffffU
+#define V_START6(x) ((x) << S_START6)
+#define G_START6(x) (((x) >> S_START6) & M_START6)
+
+#define A_GCACHE_MEMZONE0_REGION7_MSB 0x51440
+
+#define S_START7 0
+#define M_START7 0xffffU
+#define V_START7(x) ((x) << S_START7)
+#define G_START7(x) (((x) >> S_START7) & M_START7)
+
+#define A_GCACHE_MEMZONE0_REGION8_MSB 0x51444
+
+#define S_START8 0
+#define M_START8 0xffffU
+#define V_START8(x) ((x) << S_START8)
+#define G_START8(x) (((x) >> S_START8) & M_START8)
+
+#define A_GCACHE_MODE_SEL1 0x51448
+#define A_GCACHE_MEMZONE1_REGION1 0x5144c
+#define A_GCACHE_MEMZONE1_REGION2 0x51450
+#define A_GCACHE_MEMZONE1_REGION3 0x51454
+#define A_GCACHE_MEMZONE1_REGION4 0x51458
+#define A_GCACHE_MEMZONE1_REGION5 0x5145c
+#define A_GCACHE_MEMZONE1_REGION6 0x51460
+#define A_GCACHE_MEMZONE1_REGION7 0x51464
+#define A_GCACHE_MEMZONE1_REGION8 0x51468
+#define A_GCACHE_MEMZONE1_REGION1_MSB 0x5146c
+#define A_GCACHE_MEMZONE1_REGION2_MSB 0x51470
+#define A_GCACHE_MEMZONE1_REGION3_MSB 0x51474
+#define A_GCACHE_MEMZONE1_REGION4_MSB 0x51478
+#define A_GCACHE_MEMZONE1_REGION5_MSB 0x5147c
+#define A_GCACHE_MEMZONE1_REGION6_MSB 0x51480
+#define A_GCACHE_MEMZONE1_REGION7_MSB 0x51484
+#define A_GCACHE_MEMZONE1_REGION8_MSB 0x51488
+#define A_GCACHE_HMA_MC1_EN 0x5148c
+
+#define S_MC1_EN 1
+#define V_MC1_EN(x) ((x) << S_MC1_EN)
+#define F_MC1_EN V_MC1_EN(1U)
+
+#define S_HMA_EN 0
+#define V_HMA_EN(x) ((x) << S_HMA_EN)
+#define F_HMA_EN V_HMA_EN(1U)
+
+#define A_GCACHE_P_BIST_CMD 0x51490
+#define A_GCACHE_P_BIST_CMD_ADDR 0x51494
+#define A_GCACHE_P_BIST_CMD_LEN 0x51498
+#define A_GCACHE_P_BIST_DATA_PATTERN 0x5149c
+#define A_GCACHE_P_BIST_USER_WDATA0 0x514a0
+#define A_GCACHE_P_BIST_USER_WDATA1 0x514a4
+#define A_GCACHE_P_BIST_USER_WDATA2 0x514a8
+#define A_GCACHE_P_BIST_NUM_ERR 0x514ac
+#define A_GCACHE_P_BIST_ERR_FIRST_ADDR 0x514b0
+#define A_GCACHE_P_BIST_STATUS_RDATA 0x514b4
+#define A_GCACHE_P_BIST_CRC_SEED 0x514fc
+#define A_GCACHE_CACHE_SIZE 0x51500
+
+#define S_HMA_2MB 1
+#define V_HMA_2MB(x) ((x) << S_HMA_2MB)
+#define F_HMA_2MB V_HMA_2MB(1U)
+
+#define S_MC0_2MB 0
+#define V_MC0_2MB(x) ((x) << S_MC0_2MB)
+#define F_MC0_2MB V_MC0_2MB(1U)
+
+#define A_GCACHE_HINT_MAPPING 0x51504
+
+#define S_CLIENT_HINT_EN 16
+#define M_CLIENT_HINT_EN 0x7fffU
+#define V_CLIENT_HINT_EN(x) ((x) << S_CLIENT_HINT_EN)
+#define G_CLIENT_HINT_EN(x) (((x) >> S_CLIENT_HINT_EN) & M_CLIENT_HINT_EN)
+
+#define S_HINT_ADDR_SPLIT_EN 8
+#define V_HINT_ADDR_SPLIT_EN(x) ((x) << S_HINT_ADDR_SPLIT_EN)
+#define F_HINT_ADDR_SPLIT_EN V_HINT_ADDR_SPLIT_EN(1U)
+
+#define S_TP_HINT_HMA_MC 2
+#define V_TP_HINT_HMA_MC(x) ((x) << S_TP_HINT_HMA_MC)
+#define F_TP_HINT_HMA_MC V_TP_HINT_HMA_MC(1U)
+
+#define S_CIM_HINT_HMA_MC 1
+#define V_CIM_HINT_HMA_MC(x) ((x) << S_CIM_HINT_HMA_MC)
+#define F_CIM_HINT_HMA_MC V_CIM_HINT_HMA_MC(1U)
+
+#define S_LE_HINT_HMA_MC 0
+#define V_LE_HINT_HMA_MC(x) ((x) << S_LE_HINT_HMA_MC)
+#define F_LE_HINT_HMA_MC V_LE_HINT_HMA_MC(1U)
+
+#define A_GCACHE_PERF_EN 0x51508
+
+#define S_PERF_CLEAR_GC1 3
+#define V_PERF_CLEAR_GC1(x) ((x) << S_PERF_CLEAR_GC1)
+#define F_PERF_CLEAR_GC1 V_PERF_CLEAR_GC1(1U)
+
+#define S_PERF_CLEAR_GC0 2
+#define V_PERF_CLEAR_GC0(x) ((x) << S_PERF_CLEAR_GC0)
+#define F_PERF_CLEAR_GC0 V_PERF_CLEAR_GC0(1U)
+
+#define S_PERF_EN_GC1 1
+#define V_PERF_EN_GC1(x) ((x) << S_PERF_EN_GC1)
+#define F_PERF_EN_GC1 V_PERF_EN_GC1(1U)
+
+#define S_PERF_EN_GC0 0
+#define V_PERF_EN_GC0(x) ((x) << S_PERF_EN_GC0)
+#define F_PERF_EN_GC0 V_PERF_EN_GC0(1U)
+
+#define A_GCACHE_PERF_GC0_RD_HIT 0x5150c
+#define A_GCACHE_PERF_GC1_RD_HIT 0x51510
+#define A_GCACHE_PERF_GC0_WR_HIT 0x51514
+#define A_GCACHE_PERF_GC1_WR_HIT 0x51518
+#define A_GCACHE_PERF_GC0_RD_MISS 0x5151c
+#define A_GCACHE_PERF_GC1_RD_MISS 0x51520
+#define A_GCACHE_PERF_GC0_WR_MISS 0x51524
+#define A_GCACHE_PERF_GC1_WR_MISS 0x51528
+#define A_GCACHE_PERF_GC0_RD_REQ 0x5152c
+#define A_GCACHE_PERF_GC1_RD_REQ 0x51530
+#define A_GCACHE_PERF_GC0_WR_REQ 0x51534
+#define A_GCACHE_PERF_GC1_WR_REQ 0x51538
+#define A_GCACHE_PAR_CAUSE 0x5153c
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE V_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE V_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_PAR_CAUSE 25
+#define V_GC1_WQDATA_FIFO_PERR_PAR_CAUSE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_PAR_CAUSE)
+#define F_GC1_WQDATA_FIFO_PERR_PAR_CAUSE V_GC1_WQDATA_FIFO_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_PAR_CAUSE 24
+#define V_GC0_WQDATA_FIFO_PERR_PAR_CAUSE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_PAR_CAUSE)
+#define F_GC0_WQDATA_FIFO_PERR_PAR_CAUSE V_GC0_WQDATA_FIFO_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE 23
+#define V_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE 22
+#define V_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_RSP_PERR_PAR_CAUSE 19
+#define V_GC1_RSP_PERR_PAR_CAUSE(x) ((x) << S_GC1_RSP_PERR_PAR_CAUSE)
+#define F_GC1_RSP_PERR_PAR_CAUSE V_GC1_RSP_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_RSP_PERR_PAR_CAUSE 18
+#define V_GC0_RSP_PERR_PAR_CAUSE(x) ((x) << S_GC0_RSP_PERR_PAR_CAUSE)
+#define F_GC0_RSP_PERR_PAR_CAUSE V_GC0_RSP_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_LRU_UERR_PAR_CAUSE 17
+#define V_GC1_LRU_UERR_PAR_CAUSE(x) ((x) << S_GC1_LRU_UERR_PAR_CAUSE)
+#define F_GC1_LRU_UERR_PAR_CAUSE V_GC1_LRU_UERR_PAR_CAUSE(1U)
+
+#define S_GC0_LRU_UERR_PAR_CAUSE 16
+#define V_GC0_LRU_UERR_PAR_CAUSE(x) ((x) << S_GC0_LRU_UERR_PAR_CAUSE)
+#define F_GC0_LRU_UERR_PAR_CAUSE V_GC0_LRU_UERR_PAR_CAUSE(1U)
+
+#define S_GC1_TAG_UERR_PAR_CAUSE 15
+#define V_GC1_TAG_UERR_PAR_CAUSE(x) ((x) << S_GC1_TAG_UERR_PAR_CAUSE)
+#define F_GC1_TAG_UERR_PAR_CAUSE V_GC1_TAG_UERR_PAR_CAUSE(1U)
+
+#define S_GC0_TAG_UERR_PAR_CAUSE 14
+#define V_GC0_TAG_UERR_PAR_CAUSE(x) ((x) << S_GC0_TAG_UERR_PAR_CAUSE)
+#define F_GC0_TAG_UERR_PAR_CAUSE V_GC0_TAG_UERR_PAR_CAUSE(1U)
+
+#define S_GC1_LRU_CERR_PAR_CAUSE 13
+#define V_GC1_LRU_CERR_PAR_CAUSE(x) ((x) << S_GC1_LRU_CERR_PAR_CAUSE)
+#define F_GC1_LRU_CERR_PAR_CAUSE V_GC1_LRU_CERR_PAR_CAUSE(1U)
+
+#define S_GC0_LRU_CERR_PAR_CAUSE 12
+#define V_GC0_LRU_CERR_PAR_CAUSE(x) ((x) << S_GC0_LRU_CERR_PAR_CAUSE)
+#define F_GC0_LRU_CERR_PAR_CAUSE V_GC0_LRU_CERR_PAR_CAUSE(1U)
+
+#define S_GC1_TAG_CERR_PAR_CAUSE 11
+#define V_GC1_TAG_CERR_PAR_CAUSE(x) ((x) << S_GC1_TAG_CERR_PAR_CAUSE)
+#define F_GC1_TAG_CERR_PAR_CAUSE V_GC1_TAG_CERR_PAR_CAUSE(1U)
+
+#define S_GC0_TAG_CERR_PAR_CAUSE 10
+#define V_GC0_TAG_CERR_PAR_CAUSE(x) ((x) << S_GC0_TAG_CERR_PAR_CAUSE)
+#define F_GC0_TAG_CERR_PAR_CAUSE V_GC0_TAG_CERR_PAR_CAUSE(1U)
+
+#define S_GC1_CE_PAR_CAUSE 9
+#define V_GC1_CE_PAR_CAUSE(x) ((x) << S_GC1_CE_PAR_CAUSE)
+#define F_GC1_CE_PAR_CAUSE V_GC1_CE_PAR_CAUSE(1U)
+
+#define S_GC0_CE_PAR_CAUSE 8
+#define V_GC0_CE_PAR_CAUSE(x) ((x) << S_GC0_CE_PAR_CAUSE)
+#define F_GC0_CE_PAR_CAUSE V_GC0_CE_PAR_CAUSE(1U)
+
+#define S_GC1_UE_PAR_CAUSE 7
+#define V_GC1_UE_PAR_CAUSE(x) ((x) << S_GC1_UE_PAR_CAUSE)
+#define F_GC1_UE_PAR_CAUSE V_GC1_UE_PAR_CAUSE(1U)
+
+#define S_GC0_UE_PAR_CAUSE 6
+#define V_GC0_UE_PAR_CAUSE(x) ((x) << S_GC0_UE_PAR_CAUSE)
+#define F_GC0_UE_PAR_CAUSE V_GC0_UE_PAR_CAUSE(1U)
+
+#define S_GC1_CMD_PAR_CAUSE 5
+#define V_GC1_CMD_PAR_CAUSE(x) ((x) << S_GC1_CMD_PAR_CAUSE)
+#define F_GC1_CMD_PAR_CAUSE V_GC1_CMD_PAR_CAUSE(1U)
+
+#define S_GC1_DATA_PAR_CAUSE 4
+#define V_GC1_DATA_PAR_CAUSE(x) ((x) << S_GC1_DATA_PAR_CAUSE)
+#define F_GC1_DATA_PAR_CAUSE V_GC1_DATA_PAR_CAUSE(1U)
+
+#define S_GC0_CMD_PAR_CAUSE 3
+#define V_GC0_CMD_PAR_CAUSE(x) ((x) << S_GC0_CMD_PAR_CAUSE)
+#define F_GC0_CMD_PAR_CAUSE V_GC0_CMD_PAR_CAUSE(1U)
+
+#define S_GC0_DATA_PAR_CAUSE 2
+#define V_GC0_DATA_PAR_CAUSE(x) ((x) << S_GC0_DATA_PAR_CAUSE)
+#define F_GC0_DATA_PAR_CAUSE V_GC0_DATA_PAR_CAUSE(1U)
+
+#define S_ILLADDRACCESS1_PAR_CAUSE 1
+#define V_ILLADDRACCESS1_PAR_CAUSE(x) ((x) << S_ILLADDRACCESS1_PAR_CAUSE)
+#define F_ILLADDRACCESS1_PAR_CAUSE V_ILLADDRACCESS1_PAR_CAUSE(1U)
+
+#define S_ILLADDRACCESS0_PAR_CAUSE 0
+#define V_ILLADDRACCESS0_PAR_CAUSE(x) ((x) << S_ILLADDRACCESS0_PAR_CAUSE)
+#define F_ILLADDRACCESS0_PAR_CAUSE V_ILLADDRACCESS0_PAR_CAUSE(1U)
+
+#define A_GCACHE_PAR_ENABLE 0x51540
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE V_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE V_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_PAR_ENABLE 25
+#define V_GC1_WQDATA_FIFO_PERR_PAR_ENABLE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_PAR_ENABLE)
+#define F_GC1_WQDATA_FIFO_PERR_PAR_ENABLE V_GC1_WQDATA_FIFO_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_PAR_ENABLE 24
+#define V_GC0_WQDATA_FIFO_PERR_PAR_ENABLE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_PAR_ENABLE)
+#define F_GC0_WQDATA_FIFO_PERR_PAR_ENABLE V_GC0_WQDATA_FIFO_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE 23
+#define V_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE 22
+#define V_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_RSP_PERR_PAR_ENABLE 19
+#define V_GC1_RSP_PERR_PAR_ENABLE(x) ((x) << S_GC1_RSP_PERR_PAR_ENABLE)
+#define F_GC1_RSP_PERR_PAR_ENABLE V_GC1_RSP_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_RSP_PERR_PAR_ENABLE 18
+#define V_GC0_RSP_PERR_PAR_ENABLE(x) ((x) << S_GC0_RSP_PERR_PAR_ENABLE)
+#define F_GC0_RSP_PERR_PAR_ENABLE V_GC0_RSP_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_LRU_UERR_PAR_ENABLE 17
+#define V_GC1_LRU_UERR_PAR_ENABLE(x) ((x) << S_GC1_LRU_UERR_PAR_ENABLE)
+#define F_GC1_LRU_UERR_PAR_ENABLE V_GC1_LRU_UERR_PAR_ENABLE(1U)
+
+#define S_GC0_LRU_UERR_PAR_ENABLE 16
+#define V_GC0_LRU_UERR_PAR_ENABLE(x) ((x) << S_GC0_LRU_UERR_PAR_ENABLE)
+#define F_GC0_LRU_UERR_PAR_ENABLE V_GC0_LRU_UERR_PAR_ENABLE(1U)
+
+#define S_GC1_TAG_UERR_PAR_ENABLE 15
+#define V_GC1_TAG_UERR_PAR_ENABLE(x) ((x) << S_GC1_TAG_UERR_PAR_ENABLE)
+#define F_GC1_TAG_UERR_PAR_ENABLE V_GC1_TAG_UERR_PAR_ENABLE(1U)
+
+#define S_GC0_TAG_UERR_PAR_ENABLE 14
+#define V_GC0_TAG_UERR_PAR_ENABLE(x) ((x) << S_GC0_TAG_UERR_PAR_ENABLE)
+#define F_GC0_TAG_UERR_PAR_ENABLE V_GC0_TAG_UERR_PAR_ENABLE(1U)
+
+#define S_GC1_LRU_CERR_PAR_ENABLE 13
+#define V_GC1_LRU_CERR_PAR_ENABLE(x) ((x) << S_GC1_LRU_CERR_PAR_ENABLE)
+#define F_GC1_LRU_CERR_PAR_ENABLE V_GC1_LRU_CERR_PAR_ENABLE(1U)
+
+#define S_GC0_LRU_CERR_PAR_ENABLE 12
+#define V_GC0_LRU_CERR_PAR_ENABLE(x) ((x) << S_GC0_LRU_CERR_PAR_ENABLE)
+#define F_GC0_LRU_CERR_PAR_ENABLE V_GC0_LRU_CERR_PAR_ENABLE(1U)
+
+#define S_GC1_TAG_CERR_PAR_ENABLE 11
+#define V_GC1_TAG_CERR_PAR_ENABLE(x) ((x) << S_GC1_TAG_CERR_PAR_ENABLE)
+#define F_GC1_TAG_CERR_PAR_ENABLE V_GC1_TAG_CERR_PAR_ENABLE(1U)
+
+#define S_GC0_TAG_CERR_PAR_ENABLE 10
+#define V_GC0_TAG_CERR_PAR_ENABLE(x) ((x) << S_GC0_TAG_CERR_PAR_ENABLE)
+#define F_GC0_TAG_CERR_PAR_ENABLE V_GC0_TAG_CERR_PAR_ENABLE(1U)
+
+#define S_GC1_CE_PAR_ENABLE 9
+#define V_GC1_CE_PAR_ENABLE(x) ((x) << S_GC1_CE_PAR_ENABLE)
+#define F_GC1_CE_PAR_ENABLE V_GC1_CE_PAR_ENABLE(1U)
+
+#define S_GC0_CE_PAR_ENABLE 8
+#define V_GC0_CE_PAR_ENABLE(x) ((x) << S_GC0_CE_PAR_ENABLE)
+#define F_GC0_CE_PAR_ENABLE V_GC0_CE_PAR_ENABLE(1U)
+
+#define S_GC1_UE_PAR_ENABLE 7
+#define V_GC1_UE_PAR_ENABLE(x) ((x) << S_GC1_UE_PAR_ENABLE)
+#define F_GC1_UE_PAR_ENABLE V_GC1_UE_PAR_ENABLE(1U)
+
+#define S_GC0_UE_PAR_ENABLE 6
+#define V_GC0_UE_PAR_ENABLE(x) ((x) << S_GC0_UE_PAR_ENABLE)
+#define F_GC0_UE_PAR_ENABLE V_GC0_UE_PAR_ENABLE(1U)
+
+#define S_GC1_CMD_PAR_ENABLE 5
+#define V_GC1_CMD_PAR_ENABLE(x) ((x) << S_GC1_CMD_PAR_ENABLE)
+#define F_GC1_CMD_PAR_ENABLE V_GC1_CMD_PAR_ENABLE(1U)
+
+#define S_GC1_DATA_PAR_ENABLE 4
+#define V_GC1_DATA_PAR_ENABLE(x) ((x) << S_GC1_DATA_PAR_ENABLE)
+#define F_GC1_DATA_PAR_ENABLE V_GC1_DATA_PAR_ENABLE(1U)
+
+#define S_GC0_CMD_PAR_ENABLE 3
+#define V_GC0_CMD_PAR_ENABLE(x) ((x) << S_GC0_CMD_PAR_ENABLE)
+#define F_GC0_CMD_PAR_ENABLE V_GC0_CMD_PAR_ENABLE(1U)
+
+#define S_GC0_DATA_PAR_ENABLE 2
+#define V_GC0_DATA_PAR_ENABLE(x) ((x) << S_GC0_DATA_PAR_ENABLE)
+#define F_GC0_DATA_PAR_ENABLE V_GC0_DATA_PAR_ENABLE(1U)
+
+#define S_ILLADDRACCESS1_PAR_ENABLE 1
+#define V_ILLADDRACCESS1_PAR_ENABLE(x) ((x) << S_ILLADDRACCESS1_PAR_ENABLE)
+#define F_ILLADDRACCESS1_PAR_ENABLE V_ILLADDRACCESS1_PAR_ENABLE(1U)
+
+#define S_ILLADDRACCESS0_PAR_ENABLE 0
+#define V_ILLADDRACCESS0_PAR_ENABLE(x) ((x) << S_ILLADDRACCESS0_PAR_ENABLE)
+#define F_ILLADDRACCESS0_PAR_ENABLE V_ILLADDRACCESS0_PAR_ENABLE(1U)
+
+#define A_GCACHE_INT_ENABLE 0x51544
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE V_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE V_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_INT_ENABLE 25
+#define V_GC1_WQDATA_FIFO_PERR_INT_ENABLE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_INT_ENABLE)
+#define F_GC1_WQDATA_FIFO_PERR_INT_ENABLE V_GC1_WQDATA_FIFO_PERR_INT_ENABLE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_INT_ENABLE 24
+#define V_GC0_WQDATA_FIFO_PERR_INT_ENABLE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_INT_ENABLE)
+#define F_GC0_WQDATA_FIFO_PERR_INT_ENABLE V_GC0_WQDATA_FIFO_PERR_INT_ENABLE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_INT_ENABLE 23
+#define V_GC1_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC1_RDTAG_QUEUE_PERR_INT_ENABLE V_GC1_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_INT_ENABLE 22
+#define V_GC0_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC0_RDTAG_QUEUE_PERR_INT_ENABLE V_GC0_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC1_RSP_PERR_INT_ENABLE 19
+#define V_GC1_RSP_PERR_INT_ENABLE(x) ((x) << S_GC1_RSP_PERR_INT_ENABLE)
+#define F_GC1_RSP_PERR_INT_ENABLE V_GC1_RSP_PERR_INT_ENABLE(1U)
+
+#define S_GC0_RSP_PERR_INT_ENABLE 18
+#define V_GC0_RSP_PERR_INT_ENABLE(x) ((x) << S_GC0_RSP_PERR_INT_ENABLE)
+#define F_GC0_RSP_PERR_INT_ENABLE V_GC0_RSP_PERR_INT_ENABLE(1U)
+
+#define S_GC1_LRU_UERR_INT_ENABLE 17
+#define V_GC1_LRU_UERR_INT_ENABLE(x) ((x) << S_GC1_LRU_UERR_INT_ENABLE)
+#define F_GC1_LRU_UERR_INT_ENABLE V_GC1_LRU_UERR_INT_ENABLE(1U)
+
+#define S_GC0_LRU_UERR_INT_ENABLE 16
+#define V_GC0_LRU_UERR_INT_ENABLE(x) ((x) << S_GC0_LRU_UERR_INT_ENABLE)
+#define F_GC0_LRU_UERR_INT_ENABLE V_GC0_LRU_UERR_INT_ENABLE(1U)
+
+#define S_GC1_TAG_UERR_INT_ENABLE 15
+#define V_GC1_TAG_UERR_INT_ENABLE(x) ((x) << S_GC1_TAG_UERR_INT_ENABLE)
+#define F_GC1_TAG_UERR_INT_ENABLE V_GC1_TAG_UERR_INT_ENABLE(1U)
+
+#define S_GC0_TAG_UERR_INT_ENABLE 14
+#define V_GC0_TAG_UERR_INT_ENABLE(x) ((x) << S_GC0_TAG_UERR_INT_ENABLE)
+#define F_GC0_TAG_UERR_INT_ENABLE V_GC0_TAG_UERR_INT_ENABLE(1U)
+
+#define S_GC1_LRU_CERR_INT_ENABLE 13
+#define V_GC1_LRU_CERR_INT_ENABLE(x) ((x) << S_GC1_LRU_CERR_INT_ENABLE)
+#define F_GC1_LRU_CERR_INT_ENABLE V_GC1_LRU_CERR_INT_ENABLE(1U)
+
+#define S_GC0_LRU_CERR_INT_ENABLE 12
+#define V_GC0_LRU_CERR_INT_ENABLE(x) ((x) << S_GC0_LRU_CERR_INT_ENABLE)
+#define F_GC0_LRU_CERR_INT_ENABLE V_GC0_LRU_CERR_INT_ENABLE(1U)
+
+#define S_GC1_TAG_CERR_INT_ENABLE 11
+#define V_GC1_TAG_CERR_INT_ENABLE(x) ((x) << S_GC1_TAG_CERR_INT_ENABLE)
+#define F_GC1_TAG_CERR_INT_ENABLE V_GC1_TAG_CERR_INT_ENABLE(1U)
+
+#define S_GC0_TAG_CERR_INT_ENABLE 10
+#define V_GC0_TAG_CERR_INT_ENABLE(x) ((x) << S_GC0_TAG_CERR_INT_ENABLE)
+#define F_GC0_TAG_CERR_INT_ENABLE V_GC0_TAG_CERR_INT_ENABLE(1U)
+
+#define S_GC1_CE_INT_ENABLE 9
+#define V_GC1_CE_INT_ENABLE(x) ((x) << S_GC1_CE_INT_ENABLE)
+#define F_GC1_CE_INT_ENABLE V_GC1_CE_INT_ENABLE(1U)
+
+#define S_GC0_CE_INT_ENABLE 8
+#define V_GC0_CE_INT_ENABLE(x) ((x) << S_GC0_CE_INT_ENABLE)
+#define F_GC0_CE_INT_ENABLE V_GC0_CE_INT_ENABLE(1U)
+
+#define S_GC1_UE_INT_ENABLE 7
+#define V_GC1_UE_INT_ENABLE(x) ((x) << S_GC1_UE_INT_ENABLE)
+#define F_GC1_UE_INT_ENABLE V_GC1_UE_INT_ENABLE(1U)
+
+#define S_GC0_UE_INT_ENABLE 6
+#define V_GC0_UE_INT_ENABLE(x) ((x) << S_GC0_UE_INT_ENABLE)
+#define F_GC0_UE_INT_ENABLE V_GC0_UE_INT_ENABLE(1U)
+
+#define S_GC1_CMD_PAR_INT_ENABLE 5
+#define V_GC1_CMD_PAR_INT_ENABLE(x) ((x) << S_GC1_CMD_PAR_INT_ENABLE)
+#define F_GC1_CMD_PAR_INT_ENABLE V_GC1_CMD_PAR_INT_ENABLE(1U)
+
+#define S_GC1_DATA_PAR_INT_ENABLE 4
+#define V_GC1_DATA_PAR_INT_ENABLE(x) ((x) << S_GC1_DATA_PAR_INT_ENABLE)
+#define F_GC1_DATA_PAR_INT_ENABLE V_GC1_DATA_PAR_INT_ENABLE(1U)
+
+#define S_GC0_CMD_PAR_INT_ENABLE 3
+#define V_GC0_CMD_PAR_INT_ENABLE(x) ((x) << S_GC0_CMD_PAR_INT_ENABLE)
+#define F_GC0_CMD_PAR_INT_ENABLE V_GC0_CMD_PAR_INT_ENABLE(1U)
+
+#define S_GC0_DATA_PAR_INT_ENABLE 2
+#define V_GC0_DATA_PAR_INT_ENABLE(x) ((x) << S_GC0_DATA_PAR_INT_ENABLE)
+#define F_GC0_DATA_PAR_INT_ENABLE V_GC0_DATA_PAR_INT_ENABLE(1U)
+
+#define S_ILLADDRACCESS1_INT_ENABLE 1
+#define V_ILLADDRACCESS1_INT_ENABLE(x) ((x) << S_ILLADDRACCESS1_INT_ENABLE)
+#define F_ILLADDRACCESS1_INT_ENABLE V_ILLADDRACCESS1_INT_ENABLE(1U)
+
+#define S_ILLADDRACCESS0_INT_ENABLE 0
+#define V_ILLADDRACCESS0_INT_ENABLE(x) ((x) << S_ILLADDRACCESS0_INT_ENABLE)
+#define F_ILLADDRACCESS0_INT_ENABLE V_ILLADDRACCESS0_INT_ENABLE(1U)
+
+#define A_GCACHE_INT_CAUSE 0x51548
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE V_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE V_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_INT_CAUSE 25
+#define V_GC1_WQDATA_FIFO_PERR_INT_CAUSE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_INT_CAUSE)
+#define F_GC1_WQDATA_FIFO_PERR_INT_CAUSE V_GC1_WQDATA_FIFO_PERR_INT_CAUSE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_INT_CAUSE 24
+#define V_GC0_WQDATA_FIFO_PERR_INT_CAUSE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_INT_CAUSE)
+#define F_GC0_WQDATA_FIFO_PERR_INT_CAUSE V_GC0_WQDATA_FIFO_PERR_INT_CAUSE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_INT_CAUSE 23
+#define V_GC1_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC1_RDTAG_QUEUE_PERR_INT_CAUSE V_GC1_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_INT_CAUSE 22
+#define V_GC0_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC0_RDTAG_QUEUE_PERR_INT_CAUSE V_GC0_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC1_RSP_PERR_INT_CAUSE 19
+#define V_GC1_RSP_PERR_INT_CAUSE(x) ((x) << S_GC1_RSP_PERR_INT_CAUSE)
+#define F_GC1_RSP_PERR_INT_CAUSE V_GC1_RSP_PERR_INT_CAUSE(1U)
+
+#define S_GC0_RSP_PERR_INT_CAUSE 18
+#define V_GC0_RSP_PERR_INT_CAUSE(x) ((x) << S_GC0_RSP_PERR_INT_CAUSE)
+#define F_GC0_RSP_PERR_INT_CAUSE V_GC0_RSP_PERR_INT_CAUSE(1U)
+
+#define S_GC1_LRU_UERR_INT_CAUSE 17
+#define V_GC1_LRU_UERR_INT_CAUSE(x) ((x) << S_GC1_LRU_UERR_INT_CAUSE)
+#define F_GC1_LRU_UERR_INT_CAUSE V_GC1_LRU_UERR_INT_CAUSE(1U)
+
+#define S_GC0_LRU_UERR_INT_CAUSE 16
+#define V_GC0_LRU_UERR_INT_CAUSE(x) ((x) << S_GC0_LRU_UERR_INT_CAUSE)
+#define F_GC0_LRU_UERR_INT_CAUSE V_GC0_LRU_UERR_INT_CAUSE(1U)
+
+#define S_GC1_TAG_UERR_INT_CAUSE 15
+#define V_GC1_TAG_UERR_INT_CAUSE(x) ((x) << S_GC1_TAG_UERR_INT_CAUSE)
+#define F_GC1_TAG_UERR_INT_CAUSE V_GC1_TAG_UERR_INT_CAUSE(1U)
+
+#define S_GC0_TAG_UERR_INT_CAUSE 14
+#define V_GC0_TAG_UERR_INT_CAUSE(x) ((x) << S_GC0_TAG_UERR_INT_CAUSE)
+#define F_GC0_TAG_UERR_INT_CAUSE V_GC0_TAG_UERR_INT_CAUSE(1U)
+
+#define S_GC1_LRU_CERR_INT_CAUSE 13
+#define V_GC1_LRU_CERR_INT_CAUSE(x) ((x) << S_GC1_LRU_CERR_INT_CAUSE)
+#define F_GC1_LRU_CERR_INT_CAUSE V_GC1_LRU_CERR_INT_CAUSE(1U)
+
+#define S_GC0_LRU_CERR_INT_CAUSE 12
+#define V_GC0_LRU_CERR_INT_CAUSE(x) ((x) << S_GC0_LRU_CERR_INT_CAUSE)
+#define F_GC0_LRU_CERR_INT_CAUSE V_GC0_LRU_CERR_INT_CAUSE(1U)
+
+#define S_GC1_TAG_CERR_INT_CAUSE 11
+#define V_GC1_TAG_CERR_INT_CAUSE(x) ((x) << S_GC1_TAG_CERR_INT_CAUSE)
+#define F_GC1_TAG_CERR_INT_CAUSE V_GC1_TAG_CERR_INT_CAUSE(1U)
+
+#define S_GC0_TAG_CERR_INT_CAUSE 10
+#define V_GC0_TAG_CERR_INT_CAUSE(x) ((x) << S_GC0_TAG_CERR_INT_CAUSE)
+#define F_GC0_TAG_CERR_INT_CAUSE V_GC0_TAG_CERR_INT_CAUSE(1U)
+
+#define S_GC1_CE_INT_CAUSE 9
+#define V_GC1_CE_INT_CAUSE(x) ((x) << S_GC1_CE_INT_CAUSE)
+#define F_GC1_CE_INT_CAUSE V_GC1_CE_INT_CAUSE(1U)
+
+#define S_GC0_CE_INT_CAUSE 8
+#define V_GC0_CE_INT_CAUSE(x) ((x) << S_GC0_CE_INT_CAUSE)
+#define F_GC0_CE_INT_CAUSE V_GC0_CE_INT_CAUSE(1U)
+
+#define S_GC1_UE_INT_CAUSE 7
+#define V_GC1_UE_INT_CAUSE(x) ((x) << S_GC1_UE_INT_CAUSE)
+#define F_GC1_UE_INT_CAUSE V_GC1_UE_INT_CAUSE(1U)
+
+#define S_GC0_UE_INT_CAUSE 6
+#define V_GC0_UE_INT_CAUSE(x) ((x) << S_GC0_UE_INT_CAUSE)
+#define F_GC0_UE_INT_CAUSE V_GC0_UE_INT_CAUSE(1U)
+
+#define S_GC1_CMD_PAR_INT_CAUSE 5
+#define V_GC1_CMD_PAR_INT_CAUSE(x) ((x) << S_GC1_CMD_PAR_INT_CAUSE)
+#define F_GC1_CMD_PAR_INT_CAUSE V_GC1_CMD_PAR_INT_CAUSE(1U)
+
+#define S_GC1_DATA_PAR_INT_CAUSE 4
+#define V_GC1_DATA_PAR_INT_CAUSE(x) ((x) << S_GC1_DATA_PAR_INT_CAUSE)
+#define F_GC1_DATA_PAR_INT_CAUSE V_GC1_DATA_PAR_INT_CAUSE(1U)
+
+#define S_GC0_CMD_PAR_INT_CAUSE 3
+#define V_GC0_CMD_PAR_INT_CAUSE(x) ((x) << S_GC0_CMD_PAR_INT_CAUSE)
+#define F_GC0_CMD_PAR_INT_CAUSE V_GC0_CMD_PAR_INT_CAUSE(1U)
+
+#define S_GC0_DATA_PAR_INT_CAUSE 2
+#define V_GC0_DATA_PAR_INT_CAUSE(x) ((x) << S_GC0_DATA_PAR_INT_CAUSE)
+#define F_GC0_DATA_PAR_INT_CAUSE V_GC0_DATA_PAR_INT_CAUSE(1U)
+
+#define S_ILLADDRACCESS1_INT_CAUSE 1
+#define V_ILLADDRACCESS1_INT_CAUSE(x) ((x) << S_ILLADDRACCESS1_INT_CAUSE)
+#define F_ILLADDRACCESS1_INT_CAUSE V_ILLADDRACCESS1_INT_CAUSE(1U)
+
+#define S_ILLADDRACCESS0_INT_CAUSE 0
+#define V_ILLADDRACCESS0_INT_CAUSE(x) ((x) << S_ILLADDRACCESS0_INT_CAUSE)
+#define F_ILLADDRACCESS0_INT_CAUSE V_ILLADDRACCESS0_INT_CAUSE(1U)
+
+#define A_GCACHE_DBG_SEL_CTRL 0x51550
+
+#define S_DBG_SEL_CTRLSEL_OVR_EN 31
+#define V_DBG_SEL_CTRLSEL_OVR_EN(x) ((x) << S_DBG_SEL_CTRLSEL_OVR_EN)
+#define F_DBG_SEL_CTRLSEL_OVR_EN V_DBG_SEL_CTRLSEL_OVR_EN(1U)
+
+#define S_T7_DEBUG_HI 16
+#define V_T7_DEBUG_HI(x) ((x) << S_T7_DEBUG_HI)
+#define F_T7_DEBUG_HI V_T7_DEBUG_HI(1U)
+
+#define S_DBG_SEL_CTRLSELH 8
+#define M_DBG_SEL_CTRLSELH 0xffU
+#define V_DBG_SEL_CTRLSELH(x) ((x) << S_DBG_SEL_CTRLSELH)
+#define G_DBG_SEL_CTRLSELH(x) (((x) >> S_DBG_SEL_CTRLSELH) & M_DBG_SEL_CTRLSELH)
+
+#define S_DBG_SEL_CTRLSELL 0
+#define M_DBG_SEL_CTRLSELL 0xffU
+#define V_DBG_SEL_CTRLSELL(x) ((x) << S_DBG_SEL_CTRLSELL)
+#define G_DBG_SEL_CTRLSELL(x) (((x) >> S_DBG_SEL_CTRLSELL) & M_DBG_SEL_CTRLSELL)
+
+#define A_GCACHE_LOCAL_DEBUG_RPT 0x51554
+#define A_GCACHE_DBG_ILL_ACC 0x5155c
+#define A_GCACHE_DBG_ILL_ADDR0 0x51560
+#define A_GCACHE_DBG_ILL_ADDR1 0x51564
+#define A_GCACHE_GC0_DBG_ADDR_0_32 0x51568
+#define A_GCACHE_GC0_DBG_ADDR_32_32 0x5156c
+#define A_GCACHE_GC0_DBG_ADDR_64_32 0x51570
+#define A_GCACHE_GC0_DBG_ADDR_96_32 0x51574
+#define A_GCACHE_GC0_DBG_ADDR_0_64 0x51578
+#define A_GCACHE_GC0_DBG_ADDR_64_64 0x5157c
+#define A_GCACHE_GC0_DBG_ADDR_0_96 0x51580
+#define A_GCACHE_GC0_DBG_ADDR_32_96 0x51584
+#define A_GCACHE_GC1_DBG_ADDR_0_32 0x5158c
+#define A_GCACHE_GC1_DBG_ADDR_32_32 0x51590
+#define A_GCACHE_GC1_DBG_ADDR_64_32 0x51594
+#define A_GCACHE_GC1_DBG_ADDR_96_32 0x51598
+#define A_GCACHE_GC1_DBG_ADDR_0_64 0x5159c
+#define A_GCACHE_GC1_DBG_ADDR_64_64 0x515a0
+#define A_GCACHE_GC1_DBG_ADDR_0_96 0x515a4
+#define A_GCACHE_GC1_DBG_ADDR_32_96 0x515a8
+#define A_GCACHE_GC0_DBG_ADDR_32_64 0x515ac
+#define A_GCACHE_GC1_DBG_ADDR_32_64 0x515b0
+#define A_GCACHE_PERF_GC0_EVICT 0x515b4
+#define A_GCACHE_PERF_GC1_EVICT 0x515b8
+#define A_GCACHE_PERF_GC0_CE_COUNT 0x515bc
+#define A_GCACHE_PERF_GC1_CE_COUNT 0x515c0
+#define A_GCACHE_PERF_GC0_UE_COUNT 0x515c4
+#define A_GCACHE_PERF_GC1_UE_COUNT 0x515c8
+#define A_GCACHE_DBG_CTL 0x515f0
+#define A_GCACHE_DBG_DATA 0x515f4
diff --git a/sys/dev/cxgbe/common/t4_regs_values.h b/sys/dev/cxgbe/common/t4_regs_values.h
index 830828097802..6485fa50bd08 100644
--- a/sys/dev/cxgbe/common/t4_regs_values.h
+++ b/sys/dev/cxgbe/common/t4_regs_values.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -269,6 +268,7 @@
#define X_WINDOW_SHIFT 10
#define X_PCIEOFST_SHIFT 10
+#define X_T7_MEMOFST_SHIFT 4
/*
* TP definitions.
@@ -284,6 +284,10 @@
#define S_FT_FIRST S_FCOE
#define S_FT_LAST S_FRAGMENTATION
+#define S_T7_FT_FIRST S_IPSECIDX
+#define S_T7_FT_LAST S_TCPFLAGS
+
+#define W_FT_IPSECIDX 12
#define W_FT_FCOE 1
#define W_FT_PORT 3
#define W_FT_VNIC_ID 17
@@ -294,17 +298,9 @@
#define W_FT_MACMATCH 9
#define W_FT_MPSHITTYPE 3
#define W_FT_FRAGMENTATION 1
-
-#define M_FT_FCOE ((1ULL << W_FT_FCOE) - 1)
-#define M_FT_PORT ((1ULL << W_FT_PORT) - 1)
-#define M_FT_VNIC_ID ((1ULL << W_FT_VNIC_ID) - 1)
-#define M_FT_VLAN ((1ULL << W_FT_VLAN) - 1)
-#define M_FT_TOS ((1ULL << W_FT_TOS) - 1)
-#define M_FT_PROTOCOL ((1ULL << W_FT_PROTOCOL) - 1)
-#define M_FT_ETHERTYPE ((1ULL << W_FT_ETHERTYPE) - 1)
-#define M_FT_MACMATCH ((1ULL << W_FT_MACMATCH) - 1)
-#define M_FT_MPSHITTYPE ((1ULL << W_FT_MPSHITTYPE) - 1)
-#define M_FT_FRAGMENTATION ((1ULL << W_FT_FRAGMENTATION) - 1)
+#define W_FT_ROCE 1
+#define W_FT_SYNONLY 1
+#define W_FT_TCPFLAGS 12
/*
* Some of the Compressed Filter Tuple fields have internal structure. These
@@ -327,6 +323,6 @@
#define S_FT_VNID_ID_VLD 16
#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
-#define F_FT_VNID_ID_VLD(x) V_FT_VNID_ID_VLD(1U)
+#define F_FT_VNID_ID_VLD V_FT_VNID_ID_VLD(1U)
#endif /* __T4_REGS_VALUES_H__ */
diff --git a/sys/dev/cxgbe/common/t4_tcb.h b/sys/dev/cxgbe/common/t4_tcb.h
index f9631ba58418..8bff15f04e7a 100644
--- a/sys/dev/cxgbe/common/t4_tcb.h
+++ b/sys/dev/cxgbe/common/t4_tcb.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -340,10 +339,9 @@
/* 1023:1020 */
#define W_TCB_ULP_EXT 31
-#define S_TCP_ULP_EXT 28
+#define S_TCB_ULP_EXT 28
#define M_TCB_ULP_EXT 0xfULL
-#define V_TCB_ULP_EXT(x) ((x) << S_TCP_ULP_EXT)
-
+#define V_TCB_ULP_EXT(x) ((x) << S_TCB_ULP_EXT)
/* 840:832 */
#define W_TCB_IRS_ULP 26
@@ -495,31 +493,31 @@
#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-/* 855:832 */
+/* 855:832 */
#define W_TCB_RX_TLS_BUF_OFFSET 26
#define S_TCB_RX_TLS_BUF_OFFSET 0
#define M_TCB_RX_TLS_BUF_OFFSET 0xffffffULL
#define V_TCB_RX_TLS_BUF_OFFSET(x) ((x) << S_TCB_RX_TLS_BUF_OFFSET)
-/* 876:856 */
+/* 879:856 */
#define W_TCB_RX_TLS_BUF_LEN 26
#define S_TCB_RX_TLS_BUF_LEN 24
#define M_TCB_RX_TLS_BUF_LEN 0xffffffULL
#define V_TCB_RX_TLS_BUF_LEN(x) ((__u64)(x) << S_TCB_RX_TLS_BUF_LEN)
-/* 895:880 */
-#define W_TCB_RX_TLS_FLAGS 26
-#define S_TCB_RX_TLS_FLAGS 48
+/* 895:880 */
+#define W_TCB_RX_TLS_FLAGS 27
+#define S_TCB_RX_TLS_FLAGS 16
#define M_TCB_RX_TLS_FLAGS 0xffffULL
#define V_TCB_RX_TLS_FLAGS(x) ((__u64)(x) << S_TCB_RX_TLS_FLAGS)
-/* 959:896 */
-#define W_TCB_TLS_SEQ 28
-#define S_TCB_TLS_SEQ 0
-#define M_TCB_TLS_SEQ 0xffffffffffffffffULL
-#define V_TCB_TLS_SEQ(x) ((__u64)(x) << S_TCB_TLS_SEQ)
+/* 959:896 */
+#define W_TCB_RX_TLS_SEQ 28
+#define S_TCB_RX_TLS_SEQ 0
+#define M_TCB_RX_TLS_SEQ 0xffffffffffffffffULL
+#define V_TCB_RX_TLS_SEQ(x) ((__u64)(x) << S_TCB_RX_TLS_SEQ)
-/* 991:960 */
+/* 991:960 */
#define W_TCB_RX_TLS_BUF_TAG 30
#define S_TCB_RX_TLS_BUF_TAG 0
#define M_TCB_RX_TLS_BUF_TAG 0xffffffffULL
@@ -531,17 +529,113 @@
#define M_TCB_RX_TLS_KEY_TAG 0xffffffffULL
#define V_TCB_RX_TLS_KEY_TAG(x) ((x) << S_TCB_RX_TLS_KEY_TAG)
+#define S_TF_TLS_ENABLE 0
+#define V_TF_TLS_ENABLE(x) ((x) << S_TF_TLS_ENABLE)
+
+#define S_TF_TLS_ACTIVE 1
+#define V_TF_TLS_ACTIVE(x) ((x) << S_TF_TLS_ACTIVE)
+
+#define S_TF_TLS_CONTROL 2
+#define V_TF_TLS_CONTROL(x) ((x) << S_TF_TLS_CONTROL)
+
#define S_TF_TLS_KEY_SIZE 7
#define V_TF_TLS_KEY_SIZE(x) ((x) << S_TF_TLS_KEY_SIZE)
-#define S_TF_TLS_CONTROL 2
-#define V_TF_TLS_CONTROL(x) ((x) << S_TF_TLS_CONTROL)
+/* 853:832 */
+#define W_TCB_TPT_OFFSET 26
+#define S_TCB_TPT_OFFSET 0
+#define M_TCB_TPT_OFFSET 0x3fffffULL
+#define V_TCB_TPT_OFFSET(x) ((x) << S_TCB_TPT_OFFSET)
+
+/* 863:854 */
+#define W_TCB_T10_CONFIG 26
+#define S_TCB_T10_CONFIG 22
+#define M_TCB_T10_CONFIG 0x3ffULL
+#define V_TCB_T10_CONFIG(x) ((x) << S_TCB_T10_CONFIG)
+
+/* 871:864 */
+#define W_TCB_PDU_HLEN 27
+#define S_TCB_PDU_HLEN 0
+#define M_TCB_PDU_HLEN 0xffULL
+#define V_TCB_PDU_HLEN(x) ((x) << S_TCB_PDU_HLEN)
+
+/* 879:872 */
+#define W_TCB_PDU_PDO 27
+#define S_TCB_PDU_PDO 8
+#define M_TCB_PDU_PDO 0xffULL
+#define V_TCB_PDU_PDO(x) ((x) << S_TCB_PDU_PDO)
-#define S_TF_TLS_ACTIVE 1
-#define V_TF_TLS_ACTIVE(x) ((x) << S_TF_TLS_ACTIVE)
+/* 895:880 */
+#define W_TCB_N_CQ_IDX_RQ 27
+#define S_TCB_N_CQ_IDX_RQ 16
+#define M_TCB_N_CQ_IDX_RQ 0xffffULL
+#define V_TCB_N_CQ_IDX_RQ(x) ((x) << S_TCB_N_CQ_IDX_RQ)
+
+/* 900:896 */
+#define W_TCB_NVMT_PDA 28
+#define S_TCB_NVMT_PDA 0
+#define M_TCB_NVMT_PDA 0x1fULL
+#define V_TCB_NVMT_PDA(x) ((x) << S_TCB_NVMT_PDA)
+
+/* 911:901 */
+#define W_TCB_RSVD 28
+#define S_TCB_RSVD 5
+#define M_TCB_RSVD 0x7ffULL
+#define V_TCB_RSVD(x) ((x) << S_TCB_RSVD)
-#define S_TF_TLS_ENABLE 0
-#define V_TF_TLS_ENABLE(x) ((x) << S_TF_TLS_ENABLE)
+/* 927:912 */
+#define W_TCB_N_PD_ID 28
+#define S_TCB_N_PD_ID 16
+#define M_TCB_N_PD_ID 0xffffULL
+#define V_TCB_N_PD_ID(x) ((x) << S_TCB_N_PD_ID)
+
+/* 929:928 */
+#define W_TCB_CMP_IMM_SZ 29
+#define S_TCB_CMP_IMM_SZ 0
+#define M_TCB_CMP_IMM_SZ 0x3ULL
+#define V_TCB_CMP_IMM_SZ(x) ((x) << S_TCB_CMP_IMM_SZ)
+
+/* 931:930 */
+#define W_TCB_PDU_DGST_FLAGS 29
+#define S_TCB_PDU_DGST_FLAGS 2
+#define M_TCB_PDU_DGST_FLAGS 0x3ULL
+#define V_TCB_PDU_DGST_FLAGS(x) ((x) << S_TCB_PDU_DGST_FLAGS)
+
+/* 959:932 */
+#define W_TCB_RSVD1 29
+#define S_TCB_RSVD1 4
+#define M_TCB_RSVD1 0xfffffffULL
+#define V_TCB_RSVD1(x) ((x) << S_TCB_RSVD1)
+
+/* 985:960 */
+#define W_TCB_N_RQ_START 30
+#define S_TCB_N_RQ_START 0
+#define M_TCB_N_RQ_START 0x3ffffffULL
+#define V_TCB_N_RQ_START(x) ((x) << S_TCB_N_RQ_START)
+
+/* 998:986 */
+#define W_TCB_N_RQ_MSN 30
+#define S_TCB_N_RQ_MSN 26
+#define M_TCB_N_RQ_MSN 0x1fffULL
+#define V_TCB_N_RQ_MSN(x) ((__u64)(x) << S_TCB_N_RQ_MSN)
+
+/* 1002:999 */
+#define W_TCB_N_RQ_MAX_OFFSET 31
+#define S_TCB_N_RQ_MAX_OFFSET 7
+#define M_TCB_N_RQ_MAX_OFFSET 0xfULL
+#define V_TCB_N_RQ_MAX_OFFSET(x) ((x) << S_TCB_N_RQ_MAX_OFFSET)
+
+/* 1015:1003 */
+#define W_TCB_N_RQ_WRITE_PTR 31
+#define S_TCB_N_RQ_WRITE_PTR 11
+#define M_TCB_N_RQ_WRITE_PTR 0x1fffULL
+#define V_TCB_N_RQ_WRITE_PTR(x) ((x) << S_TCB_N_RQ_WRITE_PTR)
+
+/* 1023:1016 */
+#define W_TCB_N_PDU_TYPE 31
+#define S_TCB_N_PDU_TYPE 24
+#define M_TCB_N_PDU_TYPE 0xffULL
+#define V_TCB_N_PDU_TYPE(x) ((x) << S_TCB_N_PDU_TYPE)
#define S_TF_MIGRATING 0
#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
@@ -549,15 +643,24 @@
#define S_TF_NON_OFFLOAD 1
#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
+#define S_TF_FILTER 1
+#define V_TF_FILTER(x) ((x) << S_TF_FILTER)
+
#define S_TF_LOCK_TID 2
#define V_TF_LOCK_TID(x) ((x) << S_TF_LOCK_TID)
#define S_TF_KEEPALIVE 3
#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
+#define S_TF_DROP_ENCAPS_HDR 3
+#define V_TF_DROP_ENCAPS_HDR(x) ((x) << S_TF_DROP_ENCAPS_HDR)
+
#define S_TF_DACK 4
#define V_TF_DACK(x) ((x) << S_TF_DACK)
+#define S_TF_COUNT_HITS 4
+#define V_TF_COUNT_HITS(x) ((x) << S_TF_COUNT_HITS)
+
#define S_TF_DACK_MSS 5
#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
@@ -567,6 +670,9 @@
#define S_TF_NAGLE 7
#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
+#define S_TF_REMOVE_VLAN 7
+#define V_TF_REMOVE_VLAN(x) ((x) << S_TF_REMOVE_VLAN)
+
#define S_TF_SSWS_DISABLED 8
#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
@@ -576,15 +682,24 @@
#define S_TF_RX_FLOW_CONTROL_DISABLE 10
#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
+#define S_TF_NAT_SEQ_CHECK 10
+#define V_TF_NAT_SEQ_CHECK(x) ((x) << S_TF_NAT_SEQ_CHECK)
+
#define S_TF_RX_CHANNEL 11
#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
#define S_TF_TX_CHANNEL0 12
#define V_TF_TX_CHANNEL0(x) ((x) << S_TF_TX_CHANNEL0)
+#define S_TF_LPBK_TX_CHANNEL0 12
+#define V_TF_LPBK_TX_CHANNEL0(x) ((x) << S_TF_LPBK_TX_CHANNEL0)
+
#define S_TF_TX_CHANNEL1 13
#define V_TF_TX_CHANNEL1(x) ((x) << S_TF_TX_CHANNEL1)
+#define S_TF_LPBK_TX_CHANNEL1 13
+#define V_TF_LPBK_TX_CHANNEL1(x) ((x) << S_TF_LPBK_TX_CHANNEL1)
+
#define S_TF_TX_QUIESCE 14
#define V_TF_TX_QUIESCE(x) ((x) << S_TF_TX_QUIESCE)
@@ -607,6 +722,10 @@
#define M_TF_TX_QUEUE 0x7ULL
#define V_TF_TX_QUEUE(x) ((x) << S_TF_TX_QUEUE)
+#define S_TF_NAT_MODE 18
+#define M_TF_NAT_MODE 0x7ULL
+#define V_TF_NAT_MODE(x) ((x) << S_TF_NAT_MODE)
+
#define S_TF_TURBO 21
#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
@@ -652,8 +771,8 @@
#define S_TF_RCV_COALESCE_HEARTBEAT 32
#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((__u64)(x) << S_TF_RCV_COALESCE_HEARTBEAT)
-#define S_TF_INIT 33
-#define V_TF_INIT(x) ((__u64)(x) << S_TF_INIT)
+#define S_TF_RSS_FW 33
+#define V_TF_RSS_FW(x) ((__u64)(x) << S_TF_RSS_FW)
#define S_TF_ACTIVE_OPEN 34
#define V_TF_ACTIVE_OPEN(x) ((__u64)(x) << S_TF_ACTIVE_OPEN)
@@ -712,12 +831,21 @@
#define S_TF_RECV_SCALE 52
#define V_TF_RECV_SCALE(x) ((__u64)(x) << S_TF_RECV_SCALE)
+#define S_TF_NAT_FLAG_CHECK 52
+#define V_TF_NAT_FLAG_CHECK(x) ((__u64)(x) << S_TF_NAT_FLAG_CHECK)
+
#define S_TF_RECV_TSTMP 53
#define V_TF_RECV_TSTMP(x) ((__u64)(x) << S_TF_RECV_TSTMP)
+#define S_TF_LPBK_TX_LPBK 53
+#define V_TF_LPBK_TX_LPBK(x) ((__u64)(x) << S_TF_LPBK_TX_LPBK)
+
#define S_TF_RECV_SACK 54
#define V_TF_RECV_SACK(x) ((__u64)(x) << S_TF_RECV_SACK)
+#define S_TF_SWAP_MAC_ADDR 54
+#define V_TF_SWAP_MAC_ADDR(x) ((__u64)(x) << S_TF_SWAP_MAC_ADDR)
+
#define S_TF_PEND_CTL0 55
#define V_TF_PEND_CTL0(x) ((__u64)(x) << S_TF_PEND_CTL0)
@@ -751,6 +879,9 @@
#define S_TF_CCTRL_RFR 62
#define V_TF_CCTRL_RFR(x) ((__u64)(x) << S_TF_CCTRL_RFR)
+#define S_TF_INSERT_VLAN 62
+#define V_TF_INSERT_VLAN(x) ((__u64)(x) << S_TF_INSERT_VLAN)
+
#define S_TF_CORE_BYPASS 63
#define V_TF_CORE_BYPASS(x) ((__u64)(x) << S_TF_CORE_BYPASS)
@@ -772,6 +903,9 @@
#define S_TF_DDP_RX2TX 21
#define V_TF_DDP_RX2TX(x) ((x) << S_TF_DDP_RX2TX)
+#define S_TF_DDP_INDICATE_FLL 22
+#define V_TF_DDP_INDICATE_FLL(x) ((x) << S_TF_DDP_INDICATE_FLL)
+
#define S_TF_DDP_BUF0_VALID 24
#define V_TF_DDP_BUF0_VALID(x) ((x) << S_TF_DDP_BUF0_VALID)
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.c b/sys/dev/cxgbe/crypto/t4_crypto.c
index 2c83b10b13d6..80e31b1159fd 100644
--- a/sys/dev/cxgbe/crypto/t4_crypto.c
+++ b/sys/dev/cxgbe/crypto/t4_crypto.c
@@ -208,6 +208,7 @@ struct ccr_softc {
counter_u64_t stats_pad_error;
counter_u64_t stats_sglist_error;
counter_u64_t stats_process_error;
+ counter_u64_t stats_pointer_error;
counter_u64_t stats_sw_fallback;
struct sysctl_ctx_list ctx;
@@ -458,8 +459,9 @@ ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s,
crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
V_ULP_TXPKT_DATAMODIFY(0) |
- V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
+ V_T7_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
V_ULP_TXPKT_DEST(0) |
+ (is_t7(sc->adapter) ? V_ULP_TXPKT_CMDMORE(1) : 0) |
V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1));
crwr->ulptx.len = htobe32(
((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
@@ -545,7 +547,7 @@ ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
@@ -705,7 +707,7 @@ ccr_cipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1006,7 +1008,7 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1293,7 +1295,7 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1645,7 +1647,7 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1883,6 +1885,9 @@ ccr_sysctls(struct ccr_softc *sc)
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error",
CTLFLAG_RD, &sc->stats_process_error,
"Requests failed during queueing");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pointer_error",
+ CTLFLAG_RD, &sc->stats_pointer_error,
+ "Requests with a misaligned request pointer");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback",
CTLFLAG_RD, &sc->stats_sw_fallback,
"Requests processed by falling back to software");
@@ -1932,13 +1937,15 @@ ccr_init_port(struct ccr_softc *sc, int port)
"Too many ports to fit in port_mask");
/*
- * Completions for crypto requests on port 1 can sometimes
+ * Completions for crypto requests on port 1 on T6 can sometimes
* return a stale cookie value due to a firmware bug. Disable
* requests on port 1 by default on affected firmware.
*/
- if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) ||
- port == 0)
- sc->port_mask |= 1u << port;
+ if (port != 0 && is_t6(sc->adapter) &&
+ sc->adapter->params.fw_vers < FW_VERSION32(1, 25, 4, 0))
+ return;
+
+ sc->port_mask |= 1u << port;
}
static int
@@ -1988,6 +1995,7 @@ ccr_attach(device_t dev)
sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
sc->stats_process_error = counter_u64_alloc(M_WAITOK);
+ sc->stats_pointer_error = counter_u64_alloc(M_WAITOK);
sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
ccr_sysctls(sc);
@@ -2034,6 +2042,7 @@ ccr_detach(device_t dev)
counter_u64_free(sc->stats_pad_error);
counter_u64_free(sc->stats_sglist_error);
counter_u64_free(sc->stats_process_error);
+ counter_u64_free(sc->stats_pointer_error);
counter_u64_free(sc->stats_sw_fallback);
for_each_port(sc->adapter, i) {
ccr_free_port(sc, i);
@@ -2531,6 +2540,16 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
s = crypto_get_driver_session(crp->crp_session);
sc = device_get_softc(dev);
+ /*
+ * Request pointers with the low bit set in the pointer can't
+ * be stored as the cookie in the CPL_FW6_PLD reply.
+ */
+ if (((uintptr_t)crp & CPL_FW6_COOKIE_MASK) != 0) {
+ counter_u64_add(sc->stats_pointer_error, 1);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+
mtx_lock(&s->lock);
error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
@@ -2637,6 +2656,7 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
out:
mtx_unlock(&s->lock);
+out_unlocked:
if (error) {
crp->crp_etype = error;
crypto_done(crp);
@@ -2646,7 +2666,7 @@ out:
}
static int
-do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
+fw6_pld_ccr(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
struct ccr_softc *sc;
@@ -2661,7 +2681,7 @@ do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
else
cpl = (const void *)(rss + 1);
- crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
+ crp = (struct cryptop *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
s = crypto_get_driver_session(crp->crp_session);
status = be64toh(cpl->data[0]);
if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
@@ -2715,10 +2735,12 @@ ccr_modevent(module_t mod, int cmd, void *arg)
switch (cmd) {
case MOD_LOAD:
- t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, fw6_pld_ccr,
+ CPL_FW6_COOKIE_CCR);
return (0);
case MOD_UNLOAD:
- t4_register_cpl_handler(CPL_FW6_PLD, NULL);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL,
+ CPL_FW6_COOKIE_CCR);
return (0);
default:
return (EOPNOTSUPP);
@@ -2745,7 +2767,9 @@ static driver_t ccr_driver = {
sizeof(struct ccr_softc)
};
-DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_modevent, NULL);
+DRIVER_MODULE(ccr, chnex, ccr_driver, ccr_modevent, NULL);
+DRIVER_MODULE(ccr, t6nex, ccr_driver, NULL, NULL);
MODULE_VERSION(ccr, 1);
MODULE_DEPEND(ccr, crypto, 1, 1, 1);
+MODULE_DEPEND(ccr, chnex, 1, 1, 1);
MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.h b/sys/dev/cxgbe/crypto/t4_crypto.h
index 452e48d20dfd..71c9ec3903ef 100644
--- a/sys/dev/cxgbe/crypto/t4_crypto.h
+++ b/sys/dev/cxgbe/crypto/t4_crypto.h
@@ -139,6 +139,7 @@ struct phys_sge_pairs {
#define SCMD_PROTO_VERSION_TLS_1_2 0
#define SCMD_PROTO_VERSION_TLS_1_1 1
#define SCMD_PROTO_VERSION_GENERIC 4
+#define SCMD_PROTO_VERSION_TLS_1_3 8
#define SCMD_CIPH_MODE_NOP 0
#define SCMD_CIPH_MODE_AES_CBC 1
diff --git a/sys/dev/cxgbe/crypto/t4_keyctx.c b/sys/dev/cxgbe/crypto/t4_keyctx.c
index 50e339ac2e05..b85e50fd6cb1 100644
--- a/sys/dev/cxgbe/crypto/t4_keyctx.c
+++ b/sys/dev/cxgbe/crypto/t4_keyctx.c
@@ -437,10 +437,16 @@ t4_tls_key_info_size(const struct ktls_session *tls)
int
t4_tls_proto_ver(const struct ktls_session *tls)
{
- if (tls->params.tls_vminor == TLS_MINOR_VER_ONE)
+ switch (tls->params.tls_vminor) {
+ case TLS_MINOR_VER_ONE:
return (SCMD_PROTO_VERSION_TLS_1_1);
- else
+ case TLS_MINOR_VER_TWO:
return (SCMD_PROTO_VERSION_TLS_1_2);
+ case TLS_MINOR_VER_THREE:
+ return (SCMD_PROTO_VERSION_TLS_1_3);
+ default:
+ __assert_unreachable();
+ }
}
int
@@ -492,6 +498,17 @@ t4_tls_hmac_ctrl(const struct ktls_session *tls)
}
static int
+tls_seqnum_ctrl(const struct ktls_session *tls)
+{
+ switch (tls->params.tls_vminor) {
+ case TLS_MINOR_VER_THREE:
+ return (0);
+ default:
+ return (3);
+ }
+}
+
+static int
tls_cipher_key_size(const struct ktls_session *tls)
{
switch (tls->params.cipher_key_len) {
@@ -557,7 +574,7 @@ t4_tls_key_ctx(const struct ktls_session *tls, int direction,
kctx->u.rxhdr.authmode_to_rxvalid =
V_TLS_KEYCTX_TX_WR_AUTHMODE(t4_tls_auth_mode(tls)) |
- V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
+ V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(tls_seqnum_ctrl(tls)) |
V_TLS_KEYCTX_TX_WR_RXVALID(1);
kctx->u.rxhdr.ivpresent_to_rxmk_size =
@@ -607,7 +624,8 @@ t4_tls_key_ctx(const struct ktls_session *tls, int direction,
_Static_assert(offsetof(struct tx_keyctx_hdr, txsalt) ==
offsetof(struct rx_keyctx_hdr, rxsalt),
"salt offset mismatch");
- memcpy(kctx->u.txhdr.txsalt, tls->params.iv, SALT_SIZE);
+ memcpy(kctx->u.txhdr.txsalt, tls->params.iv,
+ tls->params.iv_len);
t4_init_gmac_hash(tls->params.cipher_key,
tls->params.cipher_key_len, hash);
} else {
@@ -665,6 +683,10 @@ t4_write_tlskey_wr(const struct ktls_session *tls, int direction, int tid,
kwr->reneg_to_write_rx = V_KEY_GET_LOC(direction == KTLS_TX ?
KEY_WRITE_TX : KEY_WRITE_RX);
+ /* We don't need to use V_T7_ULP_MEMIO_DATA_LEN in this routine. */
+ _Static_assert(V_T7_ULP_MEMIO_DATA_LEN(TLS_KEY_CONTEXT_SZ >> 5) ==
+ V_ULP_MEMIO_DATA_LEN(TLS_KEY_CONTEXT_SZ >> 5), "datalen mismatch");
+
/* master command */
kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
diff --git a/sys/dev/cxgbe/crypto/t6_kern_tls.c b/sys/dev/cxgbe/crypto/t6_kern_tls.c
index 04bb6c944050..454b2e264a0e 100644
--- a/sys/dev/cxgbe/crypto/t6_kern_tls.c
+++ b/sys/dev/cxgbe/crypto/t6_kern_tls.c
@@ -2003,7 +2003,7 @@ t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
if (tlsp->l2te)
t4_l2t_release(tlsp->l2te);
tlsp->l2te = t4_l2t_alloc_tls(tlsp->sc, txq, dst, &ndesc,
- vlan_tag, tlsp->vi->pi->lport, eh->ether_dhost);
+ vlan_tag, tlsp->vi->pi->hw_port, eh->ether_dhost);
if (tlsp->l2te == NULL)
CXGBE_UNIMPLEMENTED("failed to allocate TLS L2TE");
if (ndesc != 0) {
diff --git a/sys/dev/cxgbe/crypto/t7_kern_tls.c b/sys/dev/cxgbe/crypto/t7_kern_tls.c
new file mode 100644
index 000000000000..217459126361
--- /dev/null
+++ b/sys/dev/cxgbe/crypto/t7_kern_tls.c
@@ -0,0 +1,2196 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Chelsio Communications
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_kern_tls.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/ktr.h>
+#include <sys/ktls.h>
+#include <sys/sglist.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sockbuf.h>
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp_var.h>
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+#include "common/t4_tcb.h"
+#include "t4_l2t.h"
+#include "t4_clip.h"
+#include "t4_mp_ring.h"
+#include "crypto/t4_crypto.h"
+
+#if defined(INET) || defined(INET6)
+
+#define TLS_HEADER_LENGTH 5
+
+struct tls_scmd {
+ __be32 seqno_numivs;
+ __be32 ivgen_hdrlen;
+};
+
+struct tlspcb {
+ struct m_snd_tag com;
+ struct vi_info *vi; /* virtual interface */
+ struct adapter *sc;
+ struct sge_txq *txq;
+
+ int tx_key_addr;
+ bool inline_key;
+ bool tls13;
+ unsigned char enc_mode;
+
+ struct tls_scmd scmd0;
+ struct tls_scmd scmd0_partial;
+ struct tls_scmd scmd0_short;
+
+ unsigned int tx_key_info_size;
+
+ uint16_t prev_mss;
+
+ /* Fields used for GCM records using GHASH state. */
+ uint16_t ghash_offset;
+ uint64_t ghash_tls_seqno;
+ char ghash[AES_GMAC_HASH_LEN];
+ bool ghash_valid;
+ bool ghash_pending;
+ bool ghash_lcb;
+ bool queue_mbufs;
+ uint8_t rx_chid;
+ uint16_t rx_qid;
+ struct mbufq pending_mbufs;
+
+ /*
+ * Only used outside of setup and teardown when using inline
+ * keys or for partial GCM mode.
+ */
+ struct tls_keyctx keyctx;
+};
+
+static void t7_tls_tag_free(struct m_snd_tag *mst);
+static int ktls_setup_keys(struct tlspcb *tlsp,
+ const struct ktls_session *tls, struct sge_txq *txq);
+
+static void *zero_buffer;
+static vm_paddr_t zero_buffer_pa;
+
+static const struct if_snd_tag_sw t7_tls_tag_sw = {
+ .snd_tag_free = t7_tls_tag_free,
+ .type = IF_SND_TAG_TYPE_TLS
+};
+
+static inline struct tlspcb *
+mst_to_tls(struct m_snd_tag *t)
+{
+ return (__containerof(t, struct tlspcb, com));
+}
+
+static struct tlspcb *
+alloc_tlspcb(struct ifnet *ifp, struct vi_info *vi, int flags)
+{
+ struct port_info *pi = vi->pi;
+ struct adapter *sc = pi->adapter;
+ struct tlspcb *tlsp;
+
+ tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
+ if (tlsp == NULL)
+ return (NULL);
+
+ m_snd_tag_init(&tlsp->com, ifp, &t7_tls_tag_sw);
+ tlsp->vi = vi;
+ tlsp->sc = sc;
+ tlsp->tx_key_addr = -1;
+ tlsp->ghash_offset = -1;
+ tlsp->rx_chid = pi->rx_chan;
+ tlsp->rx_qid = sc->sge.rxq[pi->vi->first_rxq].iq.abs_id;
+ mbufq_init(&tlsp->pending_mbufs, INT_MAX);
+
+ return (tlsp);
+}
+
+int
+t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
+ struct m_snd_tag **pt)
+{
+ const struct ktls_session *tls;
+ struct tlspcb *tlsp;
+ struct adapter *sc;
+ struct vi_info *vi;
+ struct inpcb *inp;
+ struct sge_txq *txq;
+ int error, iv_size, keyid, mac_first;
+
+ tls = params->tls.tls;
+
+ /* TLS 1.1 through TLS 1.3 are currently supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE)
+ return (EPROTONOSUPPORT);
+
+ /* Sanity check values in *tls. */
+ switch (tls->params.cipher_algorithm) {
+ case CRYPTO_AES_CBC:
+ /* XXX: Explicitly ignore any provided IV. */
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 192 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EINVAL);
+ }
+ switch (tls->params.auth_algorithm) {
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ break;
+ default:
+ return (EPROTONOSUPPORT);
+ }
+ iv_size = AES_BLOCK_LEN;
+ mac_first = 1;
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 192 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ /*
+ * The IV size for TLS 1.2 is the explicit IV in the
+ * record header. For TLS 1.3 it is the size of the
+ * sequence number.
+ */
+ iv_size = 8;
+ mac_first = 0;
+ break;
+ default:
+ return (EPROTONOSUPPORT);
+ }
+
+ vi = if_getsoftc(ifp);
+ sc = vi->adapter;
+
+ tlsp = alloc_tlspcb(ifp, vi, M_WAITOK);
+
+ /*
+ * Pointers with the low bit set in the pointer can't
+ * be stored as the cookie in the CPL_FW6_PLD reply.
+ */
+ if (((uintptr_t)tlsp & CPL_FW6_COOKIE_MASK) != 0) {
+ error = EINVAL;
+ goto failed;
+ }
+
+ tlsp->tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
+
+ if (sc->tlst.inline_keys)
+ keyid = -1;
+ else
+ keyid = t4_alloc_tls_keyid(sc);
+ if (keyid < 0) {
+ CTR(KTR_CXGBE, "%s: %p using immediate key ctx", __func__,
+ tlsp);
+ tlsp->inline_key = true;
+ } else {
+ tlsp->tx_key_addr = keyid;
+ CTR(KTR_CXGBE, "%s: %p allocated TX key addr %#x", __func__,
+ tlsp, tlsp->tx_key_addr);
+ }
+
+ inp = params->tls.inp;
+ INP_RLOCK(inp);
+ if (inp->inp_flags & INP_DROPPED) {
+ INP_RUNLOCK(inp);
+ error = ECONNRESET;
+ goto failed;
+ }
+
+ txq = &sc->sge.txq[vi->first_txq];
+ if (inp->inp_flowtype != M_HASHTYPE_NONE)
+ txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
+ vi->rsrv_noflowq);
+ tlsp->txq = txq;
+ INP_RUNLOCK(inp);
+
+ error = ktls_setup_keys(tlsp, tls, txq);
+ if (error)
+ goto failed;
+
+ tlsp->enc_mode = t4_tls_cipher_mode(tls);
+ tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
+
+ /* The SCMD fields used when encrypting a full TLS record. */
+ if (tlsp->tls13)
+ tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0);
+ else
+ tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(3);
+ tlsp->scmd0.seqno_numivs |=
+ V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_CIPH_MODE(tlsp->enc_mode) |
+ V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
+ V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
+ V_SCMD_IV_SIZE(iv_size / 2) | V_SCMD_NUM_IVS(1);
+ tlsp->scmd0.seqno_numivs = htobe32(tlsp->scmd0.seqno_numivs);
+
+ tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0);
+ if (tlsp->inline_key)
+ tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
+
+ /*
+ * The SCMD fields used when encrypting a short TLS record
+ * (no trailer and possibly a truncated payload).
+ */
+ tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
+ V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
+ V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0);
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
+ tlsp->scmd0_short.seqno_numivs |=
+ V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CTR);
+ else
+ tlsp->scmd0_short.seqno_numivs |=
+ V_SCMD_CIPH_MODE(tlsp->enc_mode);
+ tlsp->scmd0_short.seqno_numivs =
+ htobe32(tlsp->scmd0_short.seqno_numivs);
+
+ tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1);
+ if (tlsp->inline_key)
+ tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
+
+ /*
+ * The SCMD fields used when encrypting a short TLS record
+ * using a partial GHASH.
+ */
+ tlsp->scmd0_partial.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_CIPH_MODE(tlsp->enc_mode) |
+ V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
+ V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
+ V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(1);
+ tlsp->scmd0_partial.seqno_numivs =
+ htobe32(tlsp->scmd0_partial.seqno_numivs);
+
+ tlsp->scmd0_partial.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1) |
+ V_SCMD_KEY_CTX_INLINE(1);
+
+ TXQ_LOCK(txq);
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
+ txq->kern_tls_gcm++;
+ else
+ txq->kern_tls_cbc++;
+ TXQ_UNLOCK(txq);
+ *pt = &tlsp->com;
+ return (0);
+
+failed:
+ m_snd_tag_rele(&tlsp->com);
+ return (error);
+}
+
+static int
+ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls,
+ struct sge_txq *txq)
+{
+ struct tls_key_req *kwr;
+ struct tls_keyctx *kctx;
+ void *items[1];
+ struct mbuf *m;
+ int error;
+
+ /*
+ * Store the salt and keys in the key context. For
+ * connections with an inline key, this key context is passed
+ * as immediate data in each work request. For connections
+ * storing the key in DDR, a work request is used to store a
+ * copy of the key context in DDR.
+ */
+ t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
+ if (tlsp->inline_key)
+ return (0);
+
+ /* Populate key work request. */
+ m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT);
+ if (m == NULL) {
+ CTR(KTR_CXGBE, "%s: %p failed to alloc WR mbuf", __func__,
+ tlsp);
+ return (ENOMEM);
+ }
+ m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
+ m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
+ kwr = mtod(m, void *);
+ memset(kwr, 0, TLS_KEY_WR_SZ);
+
+ t4_write_tlskey_wr(tls, KTLS_TX, 0, 0, tlsp->tx_key_addr, kwr);
+ kctx = (struct tls_keyctx *)(kwr + 1);
+ memcpy(kctx, &tlsp->keyctx, sizeof(*kctx));
+
+ /*
+ * Place the key work request in the transmit queue. It
+ * should be sent to the NIC before any TLS packets using this
+ * session.
+ */
+ items[0] = m;
+ error = mp_ring_enqueue(txq->r, items, 1, 1);
+ if (error)
+ m_free(m);
+ else
+ CTR(KTR_CXGBE, "%s: %p sent key WR", __func__, tlsp);
+ return (error);
+}
+
+static u_int
+ktls_base_wr_size(struct tlspcb *tlsp, bool inline_key)
+{
+ u_int wr_len;
+
+ wr_len = sizeof(struct fw_ulptx_wr); // 16
+ wr_len += sizeof(struct ulp_txpkt); // 8
+ wr_len += sizeof(struct ulptx_idata); // 8
+ wr_len += sizeof(struct cpl_tx_sec_pdu);// 32
+ if (inline_key)
+ wr_len += tlsp->tx_key_info_size;
+ else {
+ wr_len += sizeof(struct ulptx_sc_memrd);// 8
+ wr_len += sizeof(struct ulptx_idata); // 8
+ }
+ /* SplitMode CPL_RX_PHYS_DSGL here if needed. */
+ /* CPL_TX_*_LSO here if needed. */
+ wr_len += sizeof(struct cpl_tx_pkt_core);// 16
+ return (wr_len);
+}
+
+static u_int
+ktls_sgl_size(u_int nsegs)
+{
+ u_int wr_len;
+
+ /* First segment is part of ulptx_sgl. */
+ nsegs--;
+
+ wr_len = sizeof(struct ulptx_sgl);
+ wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
+ return (wr_len);
+}
+
+/*
+ * A request that doesn't need to generate the TLS trailer is a short
+ * record. For these requests, part of the TLS record payload is
+ * encrypted without invoking the MAC.
+ *
+ * Returns true if this record should be sent as a short record. In
+ * either case, the remaining outputs describe the how much of the
+ * TLS record to send as input to the crypto block and the amount of
+ * crypto output to trim via SplitMode:
+ *
+ * *header_len - Number of bytes of TLS header to pass as immediate
+ * data
+ *
+ * *offset - Start offset of TLS record payload to pass as DSGL data
+ *
+ * *plen - Length of TLS record payload to pass as DSGL data
+ *
+ * *leading_waste - amount of non-packet-header bytes to drop at the
+ * start of the crypto output
+ *
+ * *trailing_waste - amount of crypto output to drop from the end
+ */
+static bool
+ktls_is_short_record(struct tlspcb *tlsp, struct mbuf *m_tls, u_int tlen,
+ u_int rlen, u_int *header_len, u_int *offset, u_int *plen,
+ u_int *leading_waste, u_int *trailing_waste, bool send_partial_ghash,
+ bool request_ghash)
+{
+ u_int new_tlen, trailer_len;
+
+ MPASS(tlen > m_tls->m_epg_hdrlen);
+
+ /*
+ * For TLS 1.3 treat the inner record type stored as the first
+ * byte of the trailer as part of the payload rather than part
+ * of the trailer.
+ */
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+
+ /*
+ * Default to sending the full record as input to the crypto
+ * engine and relying on SplitMode to drop any waste.
+ */
+ *header_len = m_tls->m_epg_hdrlen;
+ *offset = 0;
+ *plen = rlen - (m_tls->m_epg_hdrlen + trailer_len);
+ *leading_waste = mtod(m_tls, vm_offset_t);
+ *trailing_waste = rlen - tlen;
+ if (!tlsp->sc->tlst.short_records)
+ return (false);
+
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC) {
+ /*
+ * For AES-CBC we have to send input from the start of
+ * the TLS record payload that is a multiple of the
+ * block size. new_tlen rounds up tlen to the end of
+ * the containing AES block. If this last block
+ * overlaps with the trailer, send the full record to
+ * generate the MAC.
+ */
+ new_tlen = TLS_HEADER_LENGTH +
+ roundup2(tlen - TLS_HEADER_LENGTH, AES_BLOCK_LEN);
+ if (rlen - new_tlen < trailer_len)
+ return (false);
+
+ *trailing_waste = new_tlen - tlen;
+ *plen = new_tlen - m_tls->m_epg_hdrlen;
+ } else {
+ if (rlen - tlen < trailer_len ||
+ (rlen - tlen == trailer_len && request_ghash)) {
+ /*
+ * For AES-GCM we have to send the full record
+ * if the end overlaps with the trailer and a
+ * partial GHASH isn't being sent.
+ */
+ if (!send_partial_ghash)
+ return (false);
+
+ /*
+ * Will need to treat any excess trailer bytes as
+ * trailing waste. *trailing_waste is already
+ * correct.
+ */
+ } else {
+ /*
+ * We can use AES-CTR or AES-GCM in partial GHASH
+ * mode to encrypt a partial PDU.
+ *
+ * The last block can be partially encrypted
+ * without any trailing waste.
+ */
+ *trailing_waste = 0;
+ *plen = tlen - m_tls->m_epg_hdrlen;
+ }
+
+ /*
+ * If this request starts at the first byte of the
+ * payload (so the previous request sent the full TLS
+ * header as a tunnel packet) and a partial GHASH is
+ * being requested, the full TLS header must be sent
+ * as input for the GHASH.
+ */
+ if (mtod(m_tls, vm_offset_t) == m_tls->m_epg_hdrlen &&
+ request_ghash)
+ return (true);
+
+ /*
+ * In addition, we can minimize leading waste by
+ * starting encryption at the start of the closest AES
+ * block.
+ */
+ if (mtod(m_tls, vm_offset_t) >= m_tls->m_epg_hdrlen) {
+ *header_len = 0;
+ *offset = mtod(m_tls, vm_offset_t) -
+ m_tls->m_epg_hdrlen;
+ if (*offset >= *plen)
+ *offset = *plen;
+ else
+ *offset = rounddown2(*offset, AES_BLOCK_LEN);
+
+ /*
+ * If the request is just bytes from the trailer,
+ * trim the offset to the end of the payload.
+ */
+ *offset = min(*offset, *plen);
+ *plen -= *offset;
+ *leading_waste -= (m_tls->m_epg_hdrlen + *offset);
+ }
+ }
+ return (true);
+}
+
+/* Size of the AES-GCM TLS AAD for a given connection. */
+static int
+ktls_gcm_aad_len(struct tlspcb *tlsp)
+{
+ return (tlsp->tls13 ? sizeof(struct tls_aead_data_13) :
+ sizeof(struct tls_aead_data));
+}
+
+static int
+ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
+ int *nsegsp)
+{
+ const struct tls_record_layer *hdr;
+ u_int header_len, imm_len, offset, plen, rlen, tlen, wr_len;
+ u_int leading_waste, trailing_waste;
+ bool inline_key, last_ghash_frag, request_ghash, send_partial_ghash;
+ bool short_record;
+
+ M_ASSERTEXTPG(m_tls);
+
+ /*
+ * The relative offset of the last byte to send from the TLS
+ * record.
+ */
+ tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
+ if (tlen <= m_tls->m_epg_hdrlen) {
+ /*
+ * For requests that only want to send the TLS header,
+ * send a tunnelled packet as immediate data.
+ */
+ wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_core) +
+ roundup2(m->m_len + m_tls->m_len, 16);
+ if (wr_len > SGE_MAX_WR_LEN) {
+ CTR(KTR_CXGBE,
+ "%s: %p TLS header-only packet too long (len %d)",
+ __func__, tlsp, m->m_len + m_tls->m_len);
+ }
+
+ /* This should always be the last TLS record in a chain. */
+ MPASS(m_tls->m_next == NULL);
+ *nsegsp = 0;
+ return (wr_len);
+ }
+
+ hdr = (void *)m_tls->m_epg_hdr;
+ rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
+
+ /*
+ * See if this request might make use of GHASH state. This
+ * errs on the side of over-budgeting the WR size.
+ */
+ last_ghash_frag = false;
+ request_ghash = false;
+ send_partial_ghash = false;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
+ tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
+ u_int trailer_len;
+
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+ KASSERT(trailer_len == AES_GMAC_HASH_LEN,
+ ("invalid trailer length for AES-GCM"));
+
+ /* Is this the start of a TLS record? */
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
+ /*
+ * Might use partial GHASH if this doesn't
+ * send the full record.
+ */
+ if (tlen < rlen) {
+ if (tlen < (rlen - trailer_len))
+ send_partial_ghash = true;
+ request_ghash = true;
+ }
+ } else {
+ send_partial_ghash = true;
+ if (tlen < rlen)
+ request_ghash = true;
+ if (tlen >= (rlen - trailer_len))
+ last_ghash_frag = true;
+ }
+ }
+
+ /*
+ * Assume not sending partial GHASH for this call to get the
+ * larger size.
+ */
+ short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
+ &header_len, &offset, &plen, &leading_waste, &trailing_waste,
+ false, request_ghash);
+
+ inline_key = send_partial_ghash || tlsp->inline_key;
+
+ /* Calculate the size of the work request. */
+ wr_len = ktls_base_wr_size(tlsp, inline_key);
+
+ if (send_partial_ghash)
+ wr_len += AES_GMAC_HASH_LEN;
+
+ if (leading_waste != 0 || trailing_waste != 0) {
+ /*
+ * Partial records might require a SplitMode
+ * CPL_RX_PHYS_DSGL.
+ */
+ wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ }
+
+ /* Budget for an LSO header even if we don't use it. */
+ wr_len += sizeof(struct cpl_tx_pkt_lso_core);
+
+ /*
+ * Headers (including the TLS header) are always sent as
+ * immediate data. Short records include a raw AES IV as
+ * immediate data. TLS 1.3 non-short records include a
+ * placeholder for the sequence number as immediate data.
+ * Short records using a partial hash may also need to send
+ * TLS AAD. If a partial hash might be sent, assume a short
+ * record to get the larger size.
+ */
+ imm_len = m->m_len + header_len;
+ if (short_record || send_partial_ghash) {
+ imm_len += AES_BLOCK_LEN;
+ if (send_partial_ghash && header_len != 0)
+ imm_len += ktls_gcm_aad_len(tlsp);
+ } else if (tlsp->tls13)
+ imm_len += sizeof(uint64_t);
+ wr_len += roundup2(imm_len, 16);
+
+ /*
+ * TLS record payload via DSGL. For partial GCM mode we
+ * might need an extra SG entry for a placeholder.
+ */
+ *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
+ plen);
+ wr_len += ktls_sgl_size(*nsegsp + (last_ghash_frag ? 1 : 0));
+
+ if (request_ghash) {
+ /* AES-GCM records might return a partial hash. */
+ wr_len += sizeof(struct ulp_txpkt);
+ wr_len += sizeof(struct ulptx_idata);
+ wr_len += sizeof(struct cpl_tx_tls_ack);
+ wr_len += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ wr_len += AES_GMAC_HASH_LEN;
+ }
+
+ wr_len = roundup2(wr_len, 16);
+ return (wr_len);
+}
+
+/* Queue the next pending packet. */
+static void
+ktls_queue_next_packet(struct tlspcb *tlsp, bool enqueue_only)
+{
+#ifdef KTR
+ struct ether_header *eh;
+ struct tcphdr *tcp;
+ tcp_seq tcp_seqno;
+#endif
+ struct mbuf *m;
+ void *items[1];
+ int rc;
+
+ TXQ_LOCK_ASSERT_OWNED(tlsp->txq);
+ KASSERT(tlsp->queue_mbufs, ("%s: mbufs not being queued for %p",
+ __func__, tlsp));
+ for (;;) {
+ m = mbufq_dequeue(&tlsp->pending_mbufs);
+ if (m == NULL) {
+ tlsp->queue_mbufs = false;
+ return;
+ }
+
+#ifdef KTR
+ eh = mtod(m, struct ether_header *);
+ tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
+ m->m_pkthdr.l3hlen);
+ tcp_seqno = ntohl(tcp->th_seq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__,
+ m->m_pkthdr.len, tcp_seqno);
+#endif
+#endif
+
+ items[0] = m;
+ if (enqueue_only)
+ rc = mp_ring_enqueue_only(tlsp->txq->r, items, 1);
+ else {
+ TXQ_UNLOCK(tlsp->txq);
+ rc = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
+ TXQ_LOCK(tlsp->txq);
+ }
+ if (__predict_true(rc == 0))
+ return;
+
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u dropped", __func__,
+ m->m_pkthdr.len, tcp_seqno);
+ m_freem(m);
+ }
+}
+
+int
+t7_ktls_parse_pkt(struct mbuf *m)
+{
+ struct tlspcb *tlsp;
+ struct ether_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *tcp;
+ struct mbuf *m_tls;
+ void *items[1];
+ int error, nsegs;
+ u_int wr_len, tot_len;
+ uint16_t eh_type;
+
+ /*
+ * Locate headers in initial mbuf.
+ *
+ * XXX: This assumes all of the headers are in the initial mbuf.
+ * Could perhaps use m_advance() like parse_pkt() if that turns
+ * out to not be true.
+ */
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_pkthdr.snd_tag != NULL);
+ tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
+
+ if (m->m_len <= sizeof(*eh) + sizeof(*ip)) {
+ CTR(KTR_CXGBE, "%s: %p header mbuf too short", __func__, tlsp);
+ return (EINVAL);
+ }
+ eh = mtod(m, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+ if (eh_type == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evh = (void *)eh;
+
+ eh_type = ntohs(evh->evl_proto);
+ m->m_pkthdr.l2hlen = sizeof(*evh);
+ } else
+ m->m_pkthdr.l2hlen = sizeof(*eh);
+
+ switch (eh_type) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(eh + 1);
+ if (ip->ip_p != IPPROTO_TCP) {
+ CTR(KTR_CXGBE, "%s: %p mbuf not IPPROTO_TCP", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ m->m_pkthdr.l3hlen = ip->ip_hl * 4;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(eh + 1);
+ if (ip6->ip6_nxt != IPPROTO_TCP) {
+ CTR(KTR_CXGBE, "%s: %p, mbuf not IPPROTO_TCP (%u)",
+ __func__, tlsp, ip6->ip6_nxt);
+ return (EINVAL);
+ }
+ m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
+ break;
+ default:
+ CTR(KTR_CXGBE, "%s: %p mbuf not ETHERTYPE_IP{,V6}", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
+ sizeof(*tcp)) {
+ CTR(KTR_CXGBE, "%s: %p header mbuf too short (2)", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen);
+ m->m_pkthdr.l4hlen = tcp->th_off * 4;
+
+ /* Bail if there is TCP payload before the TLS record. */
+ if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
+ m->m_pkthdr.l4hlen) {
+ CTR(KTR_CXGBE,
+ "%s: %p header mbuf bad length (%d + %d + %d != %d)",
+ __func__, tlsp, m->m_pkthdr.l2hlen, m->m_pkthdr.l3hlen,
+ m->m_pkthdr.l4hlen, m->m_len);
+ return (EINVAL);
+ }
+
+ /* Assume all headers are in 'm' for now. */
+ MPASS(m->m_next != NULL);
+ MPASS(m->m_next->m_flags & M_EXTPG);
+
+ tot_len = 0;
+
+ /*
+ * Each of the remaining mbufs in the chain should reference a
+ * TLS record.
+ */
+ for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
+ MPASS(m_tls->m_flags & M_EXTPG);
+
+ wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p wr_len %d nsegs %d", __func__, tlsp,
+ wr_len, nsegs);
+#endif
+ if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS)
+ return (EFBIG);
+ tot_len += roundup2(wr_len, EQ_ESIZE);
+
+ /*
+ * Store 'nsegs' for the first TLS record in the
+ * header mbuf's metadata.
+ */
+ if (m_tls == m->m_next)
+ set_mbuf_nsegs(m, nsegs);
+ }
+
+ MPASS(tot_len != 0);
+ set_mbuf_len16(m, tot_len / 16);
+
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ /* Defer packets beyond what has been sent so far. */
+ TXQ_LOCK(tlsp->txq);
+ if (tlsp->queue_mbufs) {
+ error = mbufq_enqueue(&tlsp->pending_mbufs, m);
+ if (error == 0) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p len16 %d nsegs %d TCP seq %u deferred",
+ __func__, tlsp, mbuf_len16(m),
+ mbuf_nsegs(m), ntohl(tcp->th_seq));
+#endif
+ }
+ TXQ_UNLOCK(tlsp->txq);
+ return (error);
+ }
+ tlsp->queue_mbufs = true;
+ TXQ_UNLOCK(tlsp->txq);
+ }
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p len16 %d nsegs %d", __func__, tlsp,
+ mbuf_len16(m), mbuf_nsegs(m));
+#endif
+ items[0] = m;
+ error = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
+ if (__predict_false(error != 0)) {
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ TXQ_LOCK(tlsp->txq);
+ ktls_queue_next_packet(tlsp, false);
+ TXQ_UNLOCK(tlsp->txq);
+ }
+ }
+ return (error);
+}
+
+static inline bool
+needs_vlan_insertion(struct mbuf *m)
+{
+
+ M_ASSERTPKTHDR(m);
+
+ return (m->m_flags & M_VLANTAG);
+}
+
+static inline uint64_t
+pkt_ctrl1(struct sge_txq *txq, struct mbuf *m, uint16_t eh_type)
+{
+ uint64_t ctrl1;
+
+ /* Checksums are always offloaded */
+ if (eh_type == ETHERTYPE_IP) {
+ ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
+ V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
+ V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
+ } else {
+ MPASS(m->m_pkthdr.l3hlen == sizeof(struct ip6_hdr));
+ ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
+ V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
+ V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
+ }
+ txq->txcsum++;
+
+ /* VLAN tag insertion */
+ if (needs_vlan_insertion(m)) {
+ ctrl1 |= F_TXPKT_VLAN_VLD |
+ V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
+ txq->vlan_insertion++;
+ }
+
+ return (ctrl1);
+}
+
+static inline void *
+write_lso_cpl(void *cpl, struct mbuf *m0, uint16_t mss, uint16_t eh_type,
+ int total_len)
+{
+ struct cpl_tx_pkt_lso_core *lso;
+ uint32_t ctrl;
+
+ KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
+ m0->m_pkthdr.l4hlen > 0,
+ ("%s: mbuf %p needs TSO but missing header lengths",
+ __func__, m0));
+
+ ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
+ V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
+ V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
+ if (eh_type == ETHERTYPE_IPV6)
+ ctrl |= F_LSO_IPV6;
+
+ lso = cpl;
+ lso->lso_ctrl = htobe32(ctrl);
+ lso->ipid_ofst = htobe16(0);
+ lso->mss = htobe16(mss);
+ lso->seqno_offset = htobe32(0);
+ lso->len = htobe32(total_len);
+
+ return (lso + 1);
+}
+
+static inline void *
+write_tx_tls_ack(void *dst, u_int rx_chid, u_int hash_len, bool ghash_lcb)
+{
+ struct cpl_tx_tls_ack *cpl;
+ uint32_t flags;
+
+ flags = ghash_lcb ? F_CPL_TX_TLS_ACK_LCB : F_CPL_TX_TLS_ACK_PHASH;
+ cpl = dst;
+ cpl->op_to_Rsvd2 = htobe32(V_CPL_TX_TLS_ACK_OPCODE(CPL_TX_TLS_ACK) |
+ V_T7_CPL_TX_TLS_ACK_RXCHID(rx_chid) | F_CPL_TX_TLS_ACK_ULPTXLPBK |
+ flags);
+
+ /* 32 == AckEncCpl, 16 == LCB */
+ cpl->PldLen = htobe32(V_CPL_TX_TLS_ACK_PLDLEN(32 + 16 + hash_len));
+ cpl->Rsvd3 = 0;
+
+ return (cpl + 1);
+}
+
+static inline void *
+write_fw6_pld(void *dst, u_int rx_chid, u_int rx_qid, u_int hash_len,
+ uint64_t cookie)
+{
+ struct rss_header *rss;
+ struct cpl_fw6_pld *cpl;
+
+ rss = dst;
+ memset(rss, 0, sizeof(*rss));
+ rss->opcode = CPL_FW6_PLD;
+ rss->qid = htobe16(rx_qid);
+ rss->channel = rx_chid;
+
+ cpl = (void *)(rss + 1);
+ memset(cpl, 0, sizeof(*cpl));
+ cpl->opcode = CPL_FW6_PLD;
+ cpl->len = htobe16(hash_len);
+ cpl->data[1] = htobe64(cookie);
+
+ return (cpl + 1);
+}
+
+static inline void *
+write_split_mode_rx_phys(void *dst, struct mbuf *m, struct mbuf *m_tls,
+ u_int crypto_hdr_len, u_int leading_waste, u_int trailing_waste)
+{
+ struct cpl_t7_rx_phys_dsgl *cpl;
+ uint16_t *len;
+ uint8_t numsge;
+
+ /* Forward first (3) and third (1) segments. */
+ numsge = 0xa;
+
+ cpl = dst;
+ cpl->ot.opcode = CPL_RX_PHYS_DSGL;
+ cpl->PhysAddrFields_lo_to_NumSGE =
+ htobe32(F_CPL_T7_RX_PHYS_DSGL_SPLITMODE |
+ V_CPL_T7_RX_PHYS_DSGL_NUMSGE(numsge));
+
+ len = (uint16_t *)(cpl->RSSCopy);
+
+ /*
+ * First segment always contains packet headers as well as
+ * transmit-related CPLs.
+ */
+ len[0] = htobe16(crypto_hdr_len);
+
+ /*
+ * Second segment is "gap" of data to drop at the front of the
+ * TLS record.
+ */
+ len[1] = htobe16(leading_waste);
+
+ /* Third segment is how much of the TLS record to send. */
+ len[2] = htobe16(m_tls->m_len);
+
+ /* Fourth segment is how much data to drop at the end. */
+ len[3] = htobe16(trailing_waste);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: forward %u skip %u forward %u skip %u",
+ __func__, be16toh(len[0]), be16toh(len[1]), be16toh(len[2]),
+ be16toh(len[3]));
+#endif
+ return (cpl + 1);
+}
+
+/*
+ * If the SGL ends on an address that is not 16 byte aligned, this function will
+ * add a 0 filled flit at the end.
+ */
+static void *
+write_gl_to_buf(struct sglist *gl, caddr_t to)
+{
+ struct sglist_seg *seg;
+ __be64 *flitp;
+ struct ulptx_sgl *usgl;
+ int i, nflits, nsegs;
+
+ KASSERT(((uintptr_t)to & 0xf) == 0,
+ ("%s: SGL must start at a 16 byte boundary: %p", __func__, to));
+
+ nsegs = gl->sg_nseg;
+ MPASS(nsegs > 0);
+
+ nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
+ flitp = (__be64 *)to;
+ seg = &gl->sg_segs[0];
+ usgl = (void *)flitp;
+
+ usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(nsegs));
+ usgl->len0 = htobe32(seg->ss_len);
+ usgl->addr0 = htobe64(seg->ss_paddr);
+ seg++;
+
+ for (i = 0; i < nsegs - 1; i++, seg++) {
+ usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
+ usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
+ }
+ if (i & 1)
+ usgl->sge[i / 2].len[1] = htobe32(0);
+ flitp += nflits;
+
+ if (nflits & 1) {
+ MPASS(((uintptr_t)flitp) & 0xf);
+ *flitp++ = 0;
+ }
+
+ MPASS((((uintptr_t)flitp) & 0xf) == 0);
+ return (flitp);
+}
+
+static inline void
+copy_to_txd(struct sge_eq *eq, const char *from, caddr_t *to, int len)
+{
+
+ MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
+ MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
+
+ if (__predict_true((uintptr_t)(*to) + len <=
+ (uintptr_t)&eq->desc[eq->sidx])) {
+ bcopy(from, *to, len);
+ (*to) += len;
+ if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx])
+ (*to) = (caddr_t)eq->desc;
+ } else {
+ int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
+
+ bcopy(from, *to, portion);
+ from += portion;
+ portion = len - portion; /* remaining */
+ bcopy(from, (void *)eq->desc, portion);
+ (*to) = (caddr_t)eq->desc + portion;
+ }
+}
+
+static int
+ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
+ const void *src, u_int len, u_int available, tcp_seq tcp_seqno, u_int pidx,
+ uint16_t eh_type, bool last_wr)
+{
+ struct tx_sdesc *txsd;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ uint32_t ctrl;
+ int len16, ndesc, pktlen;
+ struct ether_header *eh;
+ struct ip *ip, newip;
+ struct ip6_hdr *ip6, newip6;
+ struct tcphdr *tcp, newtcp;
+ caddr_t out;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ M_ASSERTPKTHDR(m);
+
+ wr = dst;
+ pktlen = m->m_len + len;
+ ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
+ len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
+ ndesc = tx_len16_to_desc(len16);
+ MPASS(ndesc <= available);
+
+ /* Firmware work request header */
+ /* TODO: Handle VF work request. */
+ wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
+
+ ctrl = V_FW_WR_LEN16(len16);
+ wr->equiq_to_len16 = htobe32(ctrl);
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = txq->cpl_ctrl0;
+ cpl->pack = 0;
+ cpl->len = htobe16(pktlen);
+
+ out = (void *)(cpl + 1);
+
+ /* Copy over Ethernet header. */
+ eh = mtod(m, struct ether_header *);
+ copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
+
+ /* Fixup length in IP header and copy out. */
+ if (eh_type == ETHERTYPE_IP) {
+ ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
+ newip = *ip;
+ newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
+ copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
+ if (m->m_pkthdr.l3hlen > sizeof(*ip))
+ copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
+ m->m_pkthdr.l3hlen - sizeof(*ip));
+ } else {
+ ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
+ newip6 = *ip6;
+ newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen -
+ sizeof(*ip6));
+ copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
+ MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
+ }
+ cpl->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
+
+ /* Set sequence number in TCP header. */
+ tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
+ newtcp = *tcp;
+ newtcp.th_seq = htonl(tcp_seqno);
+ copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
+
+ /* Copy rest of TCP header. */
+ copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
+ (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
+
+ /* Copy the payload data. */
+ copy_to_txd(&txq->eq, src, &out, len);
+ txq->imm_wrs++;
+
+ txq->txpkt_wrs++;
+
+ txsd = &txq->sdesc[pidx];
+ if (last_wr)
+ txsd->m = m;
+ else
+ txsd->m = NULL;
+ txsd->desc_used = ndesc;
+
+ return (ndesc);
+}
+
+static int
+ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
+ void *dst, struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls,
+ u_int available, tcp_seq tcp_seqno, u_int pidx, uint16_t eh_type,
+ uint16_t mss)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tx_sdesc *txsd;
+ struct fw_ulptx_wr *wr;
+ struct ulp_txpkt *txpkt;
+ struct ulptx_sc_memrd *memrd;
+ struct ulptx_idata *idata;
+ struct cpl_tx_sec_pdu *sec_pdu;
+ struct cpl_tx_pkt_core *tx_pkt;
+ const struct tls_record_layer *hdr;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *newtcp;
+ char *iv, *out;
+ u_int aad_start, aad_stop;
+ u_int auth_start, auth_stop, auth_insert;
+ u_int cipher_start, cipher_stop, iv_offset;
+ u_int header_len, offset, plen, rlen, tlen;
+ u_int imm_len, ndesc, nsegs, txpkt_lens[2], wr_len;
+ u_int cpl_len, crypto_hdr_len, post_key_context_len;
+ u_int leading_waste, trailing_waste;
+ u_short ip_len;
+ bool inline_key, ghash_lcb, last_ghash_frag, last_wr, need_lso;
+ bool request_ghash, send_partial_ghash, short_record, split_mode;
+ bool using_scratch;
+
+ MPASS(tlsp->txq == txq);
+ M_ASSERTEXTPG(m_tls);
+
+ /* Final work request for this mbuf chain? */
+ last_wr = (m_tls->m_next == NULL);
+
+ /*
+ * The relative offset of the last byte to send from the TLS
+ * record.
+ */
+ tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
+ if (tlen <= m_tls->m_epg_hdrlen) {
+ /*
+ * For requests that only want to send the TLS header,
+ * send a tunnelled packet as immediate data.
+ */
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p header-only TLS record %u", __func__,
+ tlsp, (u_int)m_tls->m_epg_seqno);
+#endif
+ /* This should always be the last TLS record in a chain. */
+ MPASS(last_wr);
+
+ txq->kern_tls_header++;
+
+ return (ktls_write_tunnel_packet(txq, dst, m,
+ (char *)m_tls->m_epg_hdr + mtod(m_tls, vm_offset_t),
+ m_tls->m_len, available, tcp_seqno, pidx, eh_type,
+ last_wr));
+ }
+
+ /* Locate the TLS header. */
+ hdr = (void *)m_tls->m_epg_hdr;
+ rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: offset %lu len %u TCP seq %u TLS record %u",
+ __func__, mtod(m_tls, vm_offset_t), m_tls->m_len, tcp_seqno,
+ (u_int)m_tls->m_epg_seqno);
+#endif
+
+ /* Should this request make use of GHASH state? */
+ ghash_lcb = false;
+ last_ghash_frag = false;
+ request_ghash = false;
+ send_partial_ghash = false;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
+ tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
+ u_int trailer_len;
+
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+ KASSERT(trailer_len == AES_GMAC_HASH_LEN,
+ ("invalid trailer length for AES-GCM"));
+
+ /* Is this the start of a TLS record? */
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
+ /*
+ * If this is the very first TLS record or
+ * if this is a newer TLS record, request a partial
+ * hash, but not if we are going to send the whole
+ * thing.
+ */
+ if ((tlsp->ghash_tls_seqno == 0 ||
+ tlsp->ghash_tls_seqno < m_tls->m_epg_seqno) &&
+ tlen < rlen) {
+ /*
+ * If we are only missing part or all
+ * of the trailer, send a normal full
+ * record but request the hash.
+ * Otherwise, use partial GHASH mode.
+ */
+ if (tlen >= (rlen - trailer_len))
+ ghash_lcb = true;
+ else
+ send_partial_ghash = true;
+ request_ghash = true;
+ tlsp->ghash_tls_seqno = m_tls->m_epg_seqno;
+ }
+ } else if (tlsp->ghash_tls_seqno == m_tls->m_epg_seqno &&
+ tlsp->ghash_valid) {
+ /*
+ * Compute the offset of the first AES block as
+ * is done in ktls_is_short_record.
+ */
+ if (rlen - tlen < trailer_len)
+ plen = rlen - (m_tls->m_epg_hdrlen +
+ trailer_len);
+ else
+ plen = tlen - m_tls->m_epg_hdrlen;
+ offset = mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen;
+ if (offset >= plen)
+ offset = plen;
+ else
+ offset = rounddown2(offset, AES_BLOCK_LEN);
+ if (tlsp->ghash_offset == offset) {
+ if (offset == plen) {
+ /*
+ * Send a partial trailer as a
+ * tunnelled packet as
+ * immediate data.
+ */
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p trailer-only TLS record %u",
+ __func__, tlsp,
+ (u_int)m_tls->m_epg_seqno);
+#endif
+
+ txq->kern_tls_trailer++;
+
+ offset = mtod(m_tls, vm_offset_t) -
+ (m_tls->m_epg_hdrlen + plen);
+ KASSERT(offset <= AES_GMAC_HASH_LEN,
+ ("offset outside of trailer"));
+ return (ktls_write_tunnel_packet(txq,
+ dst, m, tlsp->ghash + offset,
+ m_tls->m_len, available, tcp_seqno,
+ pidx, eh_type, last_wr));
+ }
+
+ /*
+ * If this request sends the end of
+ * the payload, it is the last
+ * fragment.
+ */
+ if (tlen >= (rlen - trailer_len)) {
+ last_ghash_frag = true;
+ ghash_lcb = true;
+ }
+
+ /*
+ * Only use partial GCM mode (rather
+ * than an AES-CTR short record) if
+ * there is input auth data to pass to
+ * the GHASH. That is true so long as
+ * there is at least one full block of
+ * payload data, or if the remaining
+ * payload data is the final partial
+ * block.
+ */
+ if (plen - offset >= GMAC_BLOCK_LEN ||
+ last_ghash_frag) {
+ send_partial_ghash = true;
+
+ /*
+ * If not sending the complete
+ * end of the record, this is
+ * a middle request so needs
+ * to request an updated
+ * partial hash.
+ */
+ if (tlen < rlen)
+ request_ghash = true;
+ }
+ }
+ }
+ }
+
+ short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
+ &header_len, &offset, &plen, &leading_waste, &trailing_waste,
+ send_partial_ghash, request_ghash);
+
+ if (short_record) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p short TLS record %u hdr %u offs %u plen %u",
+ __func__, tlsp, (u_int)m_tls->m_epg_seqno, header_len,
+ offset, plen);
+ if (send_partial_ghash) {
+ if (header_len != 0)
+ CTR(KTR_CXGBE, "%s: %p sending initial GHASH",
+ __func__, tlsp);
+ else
+ CTR(KTR_CXGBE, "%s: %p sending partial GHASH for offset %u%s",
+ __func__, tlsp, tlsp->ghash_offset,
+ last_ghash_frag ? ", last_frag" : "");
+ }
+#endif
+ KASSERT(send_partial_ghash || !request_ghash,
+ ("requesting but not sending partial hash for short record"));
+ } else {
+ KASSERT(!send_partial_ghash,
+ ("sending partial hash with full record"));
+ }
+
+ if (tlen < rlen && m_tls->m_next == NULL &&
+ (tcp->th_flags & TH_FIN) != 0) {
+ txq->kern_tls_fin_short++;
+#ifdef INVARIANTS
+ panic("%s: FIN on short TLS record", __func__);
+#endif
+ }
+
+ /*
+ * Use cached value for first record in chain if not using
+ * partial GCM mode. ktls_parse_pkt() calculates nsegs based
+ * on send_partial_ghash being false.
+ */
+ if (m->m_next == m_tls && !send_partial_ghash)
+ nsegs = mbuf_nsegs(m);
+ else
+ nsegs = sglist_count_mbuf_epg(m_tls,
+ m_tls->m_epg_hdrlen + offset, plen);
+
+ /* Determine if we need an LSO header. */
+ need_lso = (m_tls->m_len > mss);
+
+ /* Calculate the size of the TLS work request. */
+ inline_key = send_partial_ghash || tlsp->inline_key;
+ wr_len = ktls_base_wr_size(tlsp, inline_key);
+
+ if (send_partial_ghash) {
+ /* Inline key context includes partial hash in OPAD. */
+ wr_len += AES_GMAC_HASH_LEN;
+ }
+
+ /*
+ * SplitMode is required if there is any thing we need to trim
+ * from the crypto output, either at the front or end of the
+ * record. Note that short records might not need trimming.
+ */
+ split_mode = leading_waste != 0 || trailing_waste != 0;
+ if (split_mode) {
+ /*
+ * Partial records require a SplitMode
+ * CPL_RX_PHYS_DSGL.
+ */
+ wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ }
+
+ if (need_lso)
+ wr_len += sizeof(struct cpl_tx_pkt_lso_core);
+
+ imm_len = m->m_len + header_len;
+ if (short_record) {
+ imm_len += AES_BLOCK_LEN;
+ if (send_partial_ghash && header_len != 0)
+ imm_len += ktls_gcm_aad_len(tlsp);
+ } else if (tlsp->tls13)
+ imm_len += sizeof(uint64_t);
+ wr_len += roundup2(imm_len, 16);
+ wr_len += ktls_sgl_size(nsegs + (last_ghash_frag ? 1 : 0));
+ wr_len = roundup2(wr_len, 16);
+ txpkt_lens[0] = wr_len - sizeof(*wr);
+
+ if (request_ghash) {
+ /*
+ * Requesting the hash entails a second ULP_TX_PKT
+ * containing CPL_TX_TLS_ACK, CPL_FW6_PLD, and space
+ * for the hash.
+ */
+ txpkt_lens[1] = sizeof(struct ulp_txpkt);
+ txpkt_lens[1] += sizeof(struct ulptx_idata);
+ txpkt_lens[1] += sizeof(struct cpl_tx_tls_ack);
+ txpkt_lens[1] += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ txpkt_lens[1] += AES_GMAC_HASH_LEN;
+ wr_len += txpkt_lens[1];
+ } else
+ txpkt_lens[1] = 0;
+
+ ndesc = howmany(wr_len, EQ_ESIZE);
+ MPASS(ndesc <= available);
+
+ /*
+ * Use the per-txq scratch pad if near the end of the ring to
+ * simplify handling of wrap-around.
+ */
+ using_scratch = (eq->sidx - pidx < ndesc);
+ if (using_scratch)
+ wr = (void *)txq->ss;
+ else
+ wr = dst;
+
+ /* FW_ULPTX_WR */
+ wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
+ wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
+ V_FW_WR_LEN16(wr_len / 16));
+ wr->cookie = 0;
+
+ /* ULP_TXPKT */
+ txpkt = (void *)(wr + 1);
+ txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DATAMODIFY(0) |
+ V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
+ V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_CMDMORE(request_ghash ? 1 : 0) |
+ V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
+ txpkt->len = htobe32(howmany(txpkt_lens[0], 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (void *)(txpkt + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = sizeof(struct cpl_tx_sec_pdu);
+
+ /*
+ * After the key context comes CPL_RX_PHYS_DSGL, CPL_TX_*, and
+ * immediate data containing headers. When using an inline
+ * key, these are counted as part of this ULPTX_IDATA. When
+ * reading the key from memory, these are part of a separate
+ * ULPTX_IDATA.
+ */
+ cpl_len = sizeof(struct cpl_tx_pkt_core);
+ if (need_lso)
+ cpl_len += sizeof(struct cpl_tx_pkt_lso_core);
+ if (split_mode)
+ cpl_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ post_key_context_len = cpl_len + imm_len;
+
+ if (inline_key) {
+ idata->len += tlsp->tx_key_info_size + post_key_context_len;
+ if (send_partial_ghash) {
+ /* Partial GHASH in key context. */
+ idata->len += AES_GMAC_HASH_LEN;
+ }
+ }
+ idata->len = htobe32(idata->len);
+
+ /* CPL_TX_SEC_PDU */
+ sec_pdu = (void *)(idata + 1);
+
+ /*
+ * Packet headers are passed through unchanged by the crypto
+ * engine by marking them as header data in SCMD0.
+ */
+ crypto_hdr_len = m->m_len;
+
+ if (send_partial_ghash) {
+ /*
+ * For short records using a partial hash, the TLS
+ * header is counted as header data in SCMD0. TLS AAD
+ * is next (if AAD is present) followed by the AES-CTR
+ * IV. Last is the cipher region for the payload.
+ */
+ if (header_len != 0) {
+ aad_start = 1;
+ aad_stop = ktls_gcm_aad_len(tlsp);
+ } else {
+ aad_start = 0;
+ aad_stop = 0;
+ }
+ iv_offset = aad_stop + 1;
+ cipher_start = iv_offset + AES_BLOCK_LEN;
+ cipher_stop = 0;
+ if (last_ghash_frag) {
+ auth_start = cipher_start;
+ auth_stop = AES_GMAC_HASH_LEN;
+ auth_insert = auth_stop;
+ } else if (plen < GMAC_BLOCK_LEN) {
+ /*
+ * A request that sends part of the first AES
+ * block will only have AAD.
+ */
+ KASSERT(header_len != 0,
+ ("%s: partial GHASH with no auth", __func__));
+ auth_start = 0;
+ auth_stop = 0;
+ auth_insert = 0;
+ } else {
+ auth_start = cipher_start;
+ auth_stop = plen % GMAC_BLOCK_LEN;
+ auth_insert = 0;
+ }
+
+ sec_pdu->pldlen = htobe32(aad_stop + AES_BLOCK_LEN + plen +
+ (last_ghash_frag ? AES_GMAC_HASH_LEN : 0));
+
+ /*
+ * For short records, the TLS header is treated as
+ * header data.
+ */
+ crypto_hdr_len += header_len;
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0_partial.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = tlsp->scmd0_partial.ivgen_hdrlen;
+ if (last_ghash_frag)
+ sec_pdu->ivgen_hdrlen |= V_SCMD_LAST_FRAG(1);
+ else
+ sec_pdu->ivgen_hdrlen |= V_SCMD_MORE_FRAGS(1);
+ sec_pdu->ivgen_hdrlen = htobe32(sec_pdu->ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ txq->kern_tls_partial_ghash++;
+ } else if (short_record) {
+ /*
+ * For short records without a partial hash, the TLS
+ * header is counted as header data in SCMD0 and the
+ * IV is next, followed by a cipher region for the
+ * payload.
+ */
+ aad_start = 0;
+ aad_stop = 0;
+ iv_offset = 1;
+ auth_start = 0;
+ auth_stop = 0;
+ auth_insert = 0;
+ cipher_start = AES_BLOCK_LEN + 1;
+ cipher_stop = 0;
+
+ sec_pdu->pldlen = htobe32(AES_BLOCK_LEN + plen);
+
+ /*
+ * For short records, the TLS header is treated as
+ * header data.
+ */
+ crypto_hdr_len += header_len;
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = htobe32(
+ tlsp->scmd0_short.ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ txq->kern_tls_short++;
+ } else {
+ /*
+ * AAD is TLS header. IV is after AAD for TLS < 1.3.
+ * For TLS 1.3, a placeholder for the TLS sequence
+ * number is provided as an IV before the AAD. The
+ * cipher region starts after the AAD and IV. See
+ * comments in ccr_authenc() and ccr_gmac() in
+ * t4_crypto.c regarding cipher and auth start/stop
+ * values.
+ */
+ if (tlsp->tls13) {
+ iv_offset = 1;
+ aad_start = 1 + sizeof(uint64_t);
+ aad_stop = sizeof(uint64_t) + TLS_HEADER_LENGTH;
+ cipher_start = aad_stop + 1;
+ } else {
+ aad_start = 1;
+ aad_stop = TLS_HEADER_LENGTH;
+ iv_offset = TLS_HEADER_LENGTH + 1;
+ cipher_start = m_tls->m_epg_hdrlen + 1;
+ }
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ cipher_stop = 0;
+ auth_start = cipher_start;
+ auth_stop = 0;
+ auth_insert = 0;
+ } else {
+ cipher_stop = 0;
+ auth_start = cipher_start;
+ auth_stop = 0;
+ auth_insert = 0;
+ }
+
+ sec_pdu->pldlen = htobe32((tlsp->tls13 ? sizeof(uint64_t) : 0) +
+ m_tls->m_epg_hdrlen + plen);
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ if (split_mode)
+ txq->kern_tls_partial++;
+ else
+ txq->kern_tls_full++;
+ }
+ sec_pdu->op_ivinsrtofst = htobe32(
+ V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
+ V_CPL_TX_SEC_PDU_CPLLEN(cpl_len / 8) |
+ V_CPL_TX_SEC_PDU_PLACEHOLDER(send_partial_ghash ? 1 : 0) |
+ V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset));
+ sec_pdu->aadstart_cipherstop_hi = htobe32(
+ V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
+ V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
+ V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
+ sec_pdu->cipherstop_lo_authinsert = htobe32(
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
+ V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
+ V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
+ V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
+
+ if (send_partial_ghash && last_ghash_frag) {
+ uint64_t aad_len, cipher_len;
+
+ aad_len = ktls_gcm_aad_len(tlsp);
+ cipher_len = rlen - (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
+ sec_pdu->scmd1 = htobe64(aad_len << 44 | cipher_len);
+ } else
+ sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
+
+ /* Key context */
+ out = (void *)(sec_pdu + 1);
+ if (inline_key) {
+ memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
+ if (send_partial_ghash) {
+ struct tls_keyctx *keyctx = (void *)out;
+
+ keyctx->u.txhdr.ctxlen++;
+ keyctx->u.txhdr.dualck_to_txvalid &= ~htobe16(
+ V_KEY_CONTEXT_MK_SIZE(M_KEY_CONTEXT_MK_SIZE));
+ keyctx->u.txhdr.dualck_to_txvalid |= htobe16(
+ F_KEY_CONTEXT_OPAD_PRESENT |
+ V_KEY_CONTEXT_MK_SIZE(0));
+ }
+ out += tlsp->tx_key_info_size;
+ if (send_partial_ghash) {
+ if (header_len != 0)
+ memset(out, 0, AES_GMAC_HASH_LEN);
+ else
+ memcpy(out, tlsp->ghash, AES_GMAC_HASH_LEN);
+ out += AES_GMAC_HASH_LEN;
+ }
+ } else {
+ /* ULPTX_SC_MEMRD to read key context. */
+ memrd = (void *)out;
+ memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
+ V_ULP_TX_SC_MORE(1) |
+ V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4));
+ memrd->addr = htobe32(tlsp->tx_key_addr >> 5);
+
+ /* ULPTX_IDATA for CPL_TX_* and headers. */
+ idata = (void *)(memrd + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = htobe32(post_key_context_len);
+
+ out = (void *)(idata + 1);
+ }
+
+ /* CPL_RX_PHYS_DSGL */
+ if (split_mode) {
+ crypto_hdr_len = sizeof(struct cpl_tx_pkt_core);
+ if (need_lso)
+ crypto_hdr_len += sizeof(struct cpl_tx_pkt_lso_core);
+ crypto_hdr_len += m->m_len;
+ out = write_split_mode_rx_phys(out, m, m_tls, crypto_hdr_len,
+ leading_waste, trailing_waste);
+ }
+
+ /* CPL_TX_PKT_LSO */
+ if (need_lso) {
+ out = write_lso_cpl(out, m, mss, eh_type, m->m_len +
+ m_tls->m_len);
+ txq->tso_wrs++;
+ }
+
+ /* CPL_TX_PKT_XT */
+ tx_pkt = (void *)out;
+ tx_pkt->ctrl0 = txq->cpl_ctrl0;
+ tx_pkt->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
+ tx_pkt->pack = 0;
+ tx_pkt->len = htobe16(m->m_len + m_tls->m_len);
+
+ /* Copy the packet headers. */
+ out = (void *)(tx_pkt + 1);
+ memcpy(out, mtod(m, char *), m->m_len);
+
+ /* Modify the packet length in the IP header. */
+ ip_len = m->m_len + m_tls->m_len - m->m_pkthdr.l2hlen;
+ if (eh_type == ETHERTYPE_IP) {
+ ip = (void *)(out + m->m_pkthdr.l2hlen);
+ be16enc(&ip->ip_len, ip_len);
+ } else {
+ ip6 = (void *)(out + m->m_pkthdr.l2hlen);
+ be16enc(&ip6->ip6_plen, ip_len - sizeof(*ip6));
+ }
+
+ /* Modify sequence number and flags in TCP header. */
+ newtcp = (void *)(out + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
+ be32enc(&newtcp->th_seq, tcp_seqno);
+ if (!last_wr)
+ newtcp->th_flags = tcp->th_flags & ~(TH_PUSH | TH_FIN);
+ out += m->m_len;
+
+ /*
+ * Insert placeholder for sequence number as IV for TLS 1.3
+ * non-short records.
+ */
+ if (tlsp->tls13 && !short_record) {
+ memset(out, 0, sizeof(uint64_t));
+ out += sizeof(uint64_t);
+ }
+
+ /* Populate the TLS header */
+ memcpy(out, m_tls->m_epg_hdr, header_len);
+ out += header_len;
+
+ /* TLS AAD for short records using a partial hash. */
+ if (send_partial_ghash && header_len != 0) {
+ if (tlsp->tls13) {
+ struct tls_aead_data_13 ad;
+
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ ad.tls_length = hdr->tls_length;
+ memcpy(out, &ad, sizeof(ad));
+ out += sizeof(ad);
+ } else {
+ struct tls_aead_data ad;
+ uint16_t cipher_len;
+
+ cipher_len = rlen -
+ (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
+ ad.seq = htobe64(m_tls->m_epg_seqno);
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ ad.tls_length = htons(cipher_len);
+ memcpy(out, &ad, sizeof(ad));
+ out += sizeof(ad);
+ }
+ }
+
+ /* AES IV for a short record. */
+ if (short_record) {
+ iv = out;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE);
+ if (tlsp->tls13) {
+ uint64_t value;
+
+ value = be64dec(tlsp->keyctx.u.txhdr.txsalt +
+ 4);
+ value ^= m_tls->m_epg_seqno;
+ be64enc(iv + 4, value);
+ } else
+ memcpy(iv + 4, hdr + 1, 8);
+ if (send_partial_ghash)
+ be32enc(iv + 12, 1 + offset / AES_BLOCK_LEN);
+ else
+ be32enc(iv + 12, 2 + offset / AES_BLOCK_LEN);
+ } else
+ memcpy(iv, hdr + 1, AES_BLOCK_LEN);
+ out += AES_BLOCK_LEN;
+ }
+
+ if (imm_len % 16 != 0) {
+ if (imm_len % 8 != 0) {
+ /* Zero pad to an 8-byte boundary. */
+ memset(out, 0, 8 - (imm_len % 8));
+ out += 8 - (imm_len % 8);
+ }
+
+ /*
+ * Insert a ULP_TX_SC_NOOP if needed so the SGL is
+ * 16-byte aligned.
+ */
+ if (imm_len % 16 <= 8) {
+ idata = (void *)out;
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = htobe32(0);
+ out = (void *)(idata + 1);
+ }
+ }
+
+ /* SGL for record payload */
+ sglist_reset(txq->gl);
+ if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
+ plen) != 0) {
+#ifdef INVARIANTS
+ panic("%s: failed to append sglist", __func__);
+#endif
+ }
+ if (last_ghash_frag) {
+ if (sglist_append_phys(txq->gl, zero_buffer_pa,
+ AES_GMAC_HASH_LEN) != 0) {
+#ifdef INVARIANTS
+ panic("%s: failed to append sglist (2)", __func__);
+#endif
+ }
+ }
+ out = write_gl_to_buf(txq->gl, out);
+
+ if (request_ghash) {
+ /* ULP_TXPKT */
+ txpkt = (void *)out;
+ txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DATAMODIFY(0) |
+ V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
+ V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
+ txpkt->len = htobe32(howmany(txpkt_lens[1], 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (void *)(txpkt + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(0));
+ idata->len = sizeof(struct cpl_tx_tls_ack);
+ idata->len += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ idata->len += AES_GMAC_HASH_LEN;
+ idata->len = htobe32(idata->len);
+ out = (void *)(idata + 1);
+
+ /* CPL_TX_TLS_ACK */
+ out = write_tx_tls_ack(out, tlsp->rx_chid, AES_GMAC_HASH_LEN,
+ ghash_lcb);
+
+ /* CPL_FW6_PLD */
+ out = write_fw6_pld(out, tlsp->rx_chid, tlsp->rx_qid,
+ AES_GMAC_HASH_LEN, (uintptr_t)tlsp | CPL_FW6_COOKIE_KTLS);
+
+ /* Space for partial hash. */
+ memset(out, 0, AES_GMAC_HASH_LEN);
+ out += AES_GMAC_HASH_LEN;
+
+ tlsp->ghash_pending = true;
+ tlsp->ghash_valid = false;
+ tlsp->ghash_lcb = ghash_lcb;
+ if (last_ghash_frag)
+ tlsp->ghash_offset = offset + plen;
+ else
+ tlsp->ghash_offset = rounddown2(offset + plen,
+ GMAC_BLOCK_LEN);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p requesting GHASH for offset %u",
+ __func__, tlsp, tlsp->ghash_offset);
+#endif
+ m_snd_tag_ref(&tlsp->com);
+
+ txq->kern_tls_ghash_requested++;
+ }
+
+ if (using_scratch) {
+ out = dst;
+ copy_to_txd(eq, txq->ss, &out, wr_len);
+ }
+
+ txq->kern_tls_records++;
+ txq->kern_tls_octets += m_tls->m_len;
+ if (split_mode) {
+ txq->kern_tls_splitmode++;
+ txq->kern_tls_waste += leading_waste + trailing_waste;
+ }
+ if (need_lso)
+ txq->kern_tls_lso++;
+
+ txsd = &txq->sdesc[pidx];
+ if (last_wr)
+ txsd->m = m;
+ else
+ txsd->m = NULL;
+ txsd->desc_used = ndesc;
+
+ return (ndesc);
+}
+
+int
+t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
+ u_int available)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tlspcb *tlsp;
+ struct tcphdr *tcp;
+ struct mbuf *m_tls;
+ struct ether_header *eh;
+ tcp_seq tcp_seqno;
+ u_int ndesc, pidx, totdesc;
+ uint16_t eh_type, mss;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_pkthdr.snd_tag != NULL);
+ tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
+
+ totdesc = 0;
+ eh = mtod(m, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+ if (eh_type == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evh = (void *)eh;
+
+ eh_type = ntohs(evh->evl_proto);
+ }
+
+ tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
+ m->m_pkthdr.l3hlen);
+ pidx = eq->pidx;
+
+ /* Determine MSS. */
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ mss = m->m_pkthdr.tso_segsz;
+ tlsp->prev_mss = mss;
+ } else if (tlsp->prev_mss != 0)
+ mss = tlsp->prev_mss;
+ else
+ mss = if_getmtu(tlsp->vi->ifp) -
+ (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
+
+ /* Fetch the starting TCP sequence number for this chain. */
+ tcp_seqno = ntohl(tcp->th_seq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__, m->m_pkthdr.len,
+ tcp_seqno);
+#endif
+ KASSERT(!tlsp->ghash_pending, ("%s: GHASH pending for send", __func__));
+
+ /*
+ * Iterate over each TLS record constructing a work request
+ * for that record.
+ */
+ for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
+ MPASS(m_tls->m_flags & M_EXTPG);
+
+ ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
+ available - totdesc, tcp_seqno, pidx, eh_type, mss);
+ totdesc += ndesc;
+ IDXINCR(pidx, ndesc, eq->sidx);
+ dst = &eq->desc[pidx];
+
+ tcp_seqno += m_tls->m_len;
+ }
+
+ /*
+ * Queue another packet if this was a GCM request that didn't
+ * request a GHASH response.
+ */
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM && !tlsp->ghash_pending)
+ ktls_queue_next_packet(tlsp, true);
+
+ MPASS(totdesc <= available);
+ return (totdesc);
+}
+
+static void
+t7_tls_tag_free(struct m_snd_tag *mst)
+{
+ struct adapter *sc;
+ struct tlspcb *tlsp;
+
+ tlsp = mst_to_tls(mst);
+ sc = tlsp->sc;
+
+ CTR2(KTR_CXGBE, "%s: %p", __func__, tlsp);
+
+ if (tlsp->tx_key_addr >= 0)
+ t4_free_tls_keyid(sc, tlsp->tx_key_addr);
+
+ KASSERT(mbufq_len(&tlsp->pending_mbufs) == 0,
+ ("%s: pending mbufs", __func__));
+
+ zfree(tlsp, M_CXGBE);
+}
+
+static int
+ktls_fw6_pld(struct sge_iq *iq, const struct rss_header *rss,
+ struct mbuf *m)
+{
+ const struct cpl_fw6_pld *cpl;
+ struct tlspcb *tlsp;
+ const void *ghash;
+
+ if (m != NULL)
+ cpl = mtod(m, const void *);
+ else
+ cpl = (const void *)(rss + 1);
+
+ tlsp = (struct tlspcb *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
+ KASSERT(cpl->data[0] == 0, ("%s: error status returned", __func__));
+
+ TXQ_LOCK(tlsp->txq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p received GHASH for offset %u%s", __func__, tlsp,
+ tlsp->ghash_offset, tlsp->ghash_lcb ? " in LCB" : "");
+#endif
+ if (tlsp->ghash_lcb)
+ ghash = &cpl->data[2];
+ else
+ ghash = cpl + 1;
+ memcpy(tlsp->ghash, ghash, AES_GMAC_HASH_LEN);
+ tlsp->ghash_valid = true;
+ tlsp->ghash_pending = false;
+ tlsp->txq->kern_tls_ghash_received++;
+
+ ktls_queue_next_packet(tlsp, false);
+ TXQ_UNLOCK(tlsp->txq);
+
+ m_snd_tag_rele(&tlsp->com);
+ m_freem(m);
+ return (0);
+}
+
+void
+t7_ktls_modload(void)
+{
+ zero_buffer = malloc_aligned(AES_GMAC_HASH_LEN, AES_GMAC_HASH_LEN,
+ M_CXGBE, M_ZERO | M_WAITOK);
+ zero_buffer_pa = vtophys(zero_buffer);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, ktls_fw6_pld,
+ CPL_FW6_COOKIE_KTLS);
+}
+
+void
+t7_ktls_modunload(void)
+{
+ free(zero_buffer, M_CXGBE);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL, CPL_FW6_COOKIE_KTLS);
+}
+
+#else
+
+int
+t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
+ struct m_snd_tag **pt)
+{
+ return (ENXIO);
+}
+
+int
+t7_ktls_parse_pkt(struct mbuf *m)
+{
+ return (EINVAL);
+}
+
+int
+t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
+ u_int available)
+{
+ panic("can't happen");
+}
+
+void
+t7_ktls_modload(void)
+{
+}
+
+void
+t7_ktls_modunload(void)
+{
+}
+
+#endif
diff --git a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
index b8e6eeba0280..2cd24c635325 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
+++ b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
@@ -32,19 +32,6 @@
#include "cudbg.h"
#include "cudbg_lib_common.h"
-enum {
- SF_ATTEMPTS = 10, /* max retries for SF operations */
-
- /* flash command opcodes */
- SF_PROG_PAGE = 2, /* program page */
- SF_WR_DISABLE = 4, /* disable writes */
- SF_RD_STATUS = 5, /* read status register */
- SF_WR_ENABLE = 6, /* enable writes */
- SF_RD_DATA_FAST = 0xb, /* read flash */
- SF_RD_ID = 0x9f, /* read ID */
- SF_ERASE_SECTOR = 0xd8, /* erase sector */
-};
-
int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size);
int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
u32 start_address);
@@ -56,10 +43,12 @@ update_skip_size(struct cudbg_flash_sec_info *sec_info, u32 size)
}
static
-void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
- int sector_nu, int avail)
+void set_sector_availability(struct adapter *adap,
+ struct cudbg_flash_sec_info *sec_info, int sector_nu, int avail)
{
- sector_nu -= CUDBG_START_SEC;
+ int start = t4_flash_loc_start(adap, FLASH_LOC_CUDBG, NULL);
+
+ sector_nu -= start / SF_SEC_SIZE;;
if (avail)
set_dbg_bitmap(sec_info->sec_bitmap, sector_nu);
else
@@ -68,13 +57,17 @@ void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
/* This function will return empty sector available for filling */
static int
-find_empty_sec(struct cudbg_flash_sec_info *sec_info)
+find_empty_sec(struct adapter *adap, struct cudbg_flash_sec_info *sec_info)
{
int i, index, bit;
-
- for (i = CUDBG_START_SEC; i < CUDBG_SF_MAX_SECTOR; i++) {
- index = (i - CUDBG_START_SEC) / 8;
- bit = (i - CUDBG_START_SEC) % 8;
+ unsigned int len = 0;
+ int start = t4_flash_loc_start(adap, FLASH_LOC_CUDBG, &len);
+
+ start /= SF_SEC_SIZE; /* addr -> sector */
+ len /= SF_SEC_SIZE;
+ for (i = start; i < start + len; i++) {
+ index = (i - start) / 8;
+ bit = (i - start) % 8;
if (!(sec_info->sec_bitmap[index] & (1 << bit)))
return i;
}
@@ -102,7 +95,7 @@ static void update_headers(void *handle, struct cudbg_buffer *dbg_buff,
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
sec_hdr = sec_info->sec_data + sec_hdr_start_addr;
flash_hdr = (struct cudbg_flash_hdr *)(sec_hdr);
@@ -166,11 +159,13 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
u32 space_left;
int rc = 0;
int sec;
+ unsigned int cudbg_max_size = 0;
+ t4_flash_loc_start(adap, FLASH_LOC_CUDBG, &cudbg_max_size);
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
sec_data_size = sec_hdr_start_addr;
cudbg_init->print("\tWriting %u bytes to flash\n", cur_entity_size);
@@ -191,12 +186,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
flash_hdr = (struct cudbg_flash_hdr *)(sec_info->sec_data +
sec_hdr_start_addr);
- if (flash_hdr->data_len > CUDBG_FLASH_SIZE) {
+ if (flash_hdr->data_len > cudbg_max_size) {
rc = CUDBG_STATUS_FLASH_FULL;
goto out;
}
- space_left = CUDBG_FLASH_SIZE - flash_hdr->data_len;
+ space_left = cudbg_max_size - flash_hdr->data_len;
if (cur_entity_size > space_left) {
rc = CUDBG_STATUS_FLASH_FULL;
@@ -204,10 +199,11 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
}
while (cur_entity_size > 0) {
- sec = find_empty_sec(sec_info);
+ sec = find_empty_sec(adap, sec_info);
if (sec_info->par_sec) {
sec_data_offset = sec_info->par_sec_offset;
- set_sector_availability(sec_info, sec_info->par_sec, 0);
+ set_sector_availability(adap, sec_info,
+ sec_info->par_sec, 0);
sec_info->par_sec = 0;
sec_info->par_sec_offset = 0;
@@ -230,13 +226,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
(void *)((char *)dbg_buff->data + start_offset),
tmp_size);
- rc = write_flash(adap, sec, sec_info->sec_data,
- CUDBG_SF_SECTOR_SIZE);
+ rc = write_flash(adap, sec, sec_info->sec_data, SF_SEC_SIZE);
if (rc)
goto out;
cur_entity_size -= tmp_size;
- set_sector_availability(sec_info, sec, 1);
+ set_sector_availability(adap, sec_info, sec, 1);
start_offset += tmp_size;
}
out:
@@ -247,19 +242,14 @@ int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size)
{
unsigned int addr;
unsigned int i, n;
- unsigned int sf_sec_size;
int rc = 0;
u8 *ptr = (u8 *)data;
- sf_sec_size = adap->params.sf_size/adap->params.sf_nsec;
-
- addr = start_sec * CUDBG_SF_SECTOR_SIZE;
- i = DIV_ROUND_UP(size,/* # of sectors spanned */
- sf_sec_size);
+ addr = start_sec * SF_SEC_SIZE;
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE);
- rc = t4_flash_erase_sectors(adap, start_sec,
- start_sec + i - 1);
+ rc = t4_flash_erase_sectors(adap, start_sec, start_sec + i - 1);
/*
* If size == 0 then we're simply erasing the FLASH sectors associated
* with the on-adapter OptionROM Configuration File.
@@ -337,6 +327,9 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
u32 data_offset = 0;
u32 i, j;
int rc;
+ unsigned int cudbg_len = 0;
+ int cudbg_start_sec = t4_flash_loc_start(adap, FLASH_LOC_CUDBG,
+ &cudbg_len) / SF_SEC_SIZE;
rc = t4_get_flash_params(adap);
if (rc) {
@@ -348,7 +341,7 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
if (!data_flag) {
/* fill header */
@@ -357,14 +350,14 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
* have older filled sector also
*/
memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
- rc = read_flash(adap, CUDBG_START_SEC, &flash_hdr,
+ rc = read_flash(adap, cudbg_start_sec, &flash_hdr,
sizeof(struct cudbg_flash_hdr),
sec_hdr_start_addr);
if (flash_hdr.signature == CUDBG_FL_SIGNATURE) {
sec_info->max_timestamp = flash_hdr.timestamp;
} else {
- rc = read_flash(adap, CUDBG_START_SEC + 1,
+ rc = read_flash(adap, cudbg_start_sec + 1,
&flash_hdr,
sizeof(struct cudbg_flash_hdr),
sec_hdr_start_addr);
@@ -383,8 +376,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
/* finding max sequence number because max sequenced
* sector has updated header
*/
- for (i = CUDBG_START_SEC; i <
- CUDBG_SF_MAX_SECTOR; i++) {
+ for (i = cudbg_start_sec; i < cudbg_start_sec +
+ cudbg_len / SF_SEC_SIZE; i++) {
memset(&flash_hdr, 0,
sizeof(struct cudbg_flash_hdr));
rc = read_flash(adap, i, &flash_hdr,
@@ -423,7 +416,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
/* finding sector sequence sorted */
for (i = 1; i <= sec_info->max_seq_no; i++) {
- for (j = CUDBG_START_SEC; j < CUDBG_SF_MAX_SECTOR; j++) {
+ for (j = cudbg_start_sec; j < cudbg_start_sec +
+ cudbg_len / SF_SEC_SIZE; j++) {
memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
rc = read_flash(adap, j, &flash_hdr,
sizeof(struct cudbg_flash_hdr),
@@ -434,10 +428,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
sec_info->max_timestamp ==
flash_hdr.timestamp &&
flash_hdr.sec_seq_no == i) {
- if (size + total_hdr_size >
- CUDBG_SF_SECTOR_SIZE)
- tmp_size = CUDBG_SF_SECTOR_SIZE -
- total_hdr_size;
+ if (size + total_hdr_size > SF_SEC_SIZE)
+ tmp_size = SF_SEC_SIZE - total_hdr_size;
else
tmp_size = size;
@@ -468,7 +460,7 @@ int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
unsigned int addr, i, n;
int rc;
u32 *ptr = (u32 *)data;
- addr = start_sec * CUDBG_SF_SECTOR_SIZE + start_address;
+ addr = start_sec * SF_SEC_SIZE + start_address;
size = size / 4;
for (i = 0; i < size; i += SF_PAGE_SIZE) {
if ((size - i) < SF_PAGE_SIZE)
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib.c b/sys/dev/cxgbe/cudbg/cudbg_lib.c
index a36c53f68223..f0273349263a 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_lib.c
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib.c
@@ -155,23 +155,25 @@ static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
u32 flash_data_offset;
u32 data_hdr_size;
int rc = -1;
+ unsigned int cudbg_len;
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
+ t4_flash_loc_start(cudbg_init->adap, FLASH_LOC_CUDBG, &cudbg_len);
- flash_data_offset = (FLASH_CUDBG_NSECS *
+ flash_data_offset = ((cudbg_len / SF_SEC_SIZE) *
(sizeof(struct cudbg_flash_hdr) +
data_hdr_size)) +
(cur_entity_data_offset - data_hdr_size);
- if (flash_data_offset > CUDBG_FLASH_SIZE) {
+ if (flash_data_offset > cudbg_len) {
update_skip_size(sec_info, cur_entity_size);
if (cudbg_init->verbose)
cudbg_init->print("Large entity skipping...\n");
return rc;
}
- remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
+ remain_flash_size = cudbg_len - flash_data_offset;
if (cur_entity_size > remain_flash_size) {
update_skip_size(sec_info, cur_entity_size);
@@ -1292,6 +1294,7 @@ static int collect_macstats(struct cudbg_init *pdbg_init,
mac_stats_buff->port_count = n;
for (i = 0; i < mac_stats_buff->port_count; i++)
+ /* Incorrect, should use hport instead of i */
t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
rc = write_compression_hdr(&scratch_buff, dbg_buff);
@@ -1967,7 +1970,7 @@ static int collect_fw_devlog(struct cudbg_init *pdbg_init,
u32 offset;
int rc = 0, i;
- rc = t4_init_devlog_params(padap, 1);
+ rc = t4_init_devlog_ncores_params(padap, 1);
if (rc < 0) {
pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
index 86390eb4399d..b6a85f436db0 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
@@ -59,11 +59,6 @@
#include "common/t4_hw.h"
#endif
-#define CUDBG_SF_MAX_SECTOR (FLASH_CUDBG_START_SEC + FLASH_CUDBG_NSECS)
-#define CUDBG_SF_SECTOR_SIZE SF_SEC_SIZE
-#define CUDBG_START_SEC FLASH_CUDBG_START_SEC
-#define CUDBG_FLASH_SIZE FLASH_CUDBG_MAX_SIZE
-
#define CUDBG_EXT_DATA_BIT 0
#define CUDBG_EXT_DATA_VALID (1 << CUDBG_EXT_DATA_BIT)
@@ -121,7 +116,7 @@ struct cudbg_flash_sec_info {
u32 hdr_data_len; /* Total data */
u32 skip_size; /* Total size of large entities. */
u64 max_timestamp;
- char sec_data[CUDBG_SF_SECTOR_SIZE];
+ char sec_data[SF_SEC_SIZE];
u8 sec_bitmap[8];
};
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index d805642541d3..9cdfd0fb9652 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -976,42 +976,6 @@ icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace,
return (0);
}
-/*
- * Request/response structure used to find out the adapter offloading a socket.
- */
-struct find_ofld_adapter_rr {
- struct socket *so;
- struct adapter *sc; /* result */
-};
-
-static void
-find_offload_adapter(struct adapter *sc, void *arg)
-{
- struct find_ofld_adapter_rr *fa = arg;
- struct socket *so = fa->so;
- struct tom_data *td = sc->tom_softc;
- struct tcpcb *tp;
- struct inpcb *inp;
-
- /* Non-TCP were filtered out earlier. */
- MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
-
- if (fa->sc != NULL)
- return; /* Found already. */
-
- if (td == NULL)
- return; /* TOE not enabled on this adapter. */
-
- inp = sotoinpcb(so);
- INP_WLOCK(inp);
- if ((inp->inp_flags & INP_DROPPED) == 0) {
- tp = intotcpcb(inp);
- if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
- fa->sc = sc; /* Found. */
- }
- INP_WUNLOCK(inp);
-}
-
static bool
is_memfree(struct adapter *sc)
{
@@ -1025,48 +989,6 @@ is_memfree(struct adapter *sc)
return (true);
}
-/* XXXNP: move this to t4_tom. */
-static void
-send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
-{
- struct wrqe *wr;
- struct fw_flowc_wr *flowc;
- const u_int nparams = 1;
- u_int flowclen;
- struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
-
- flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
-
- wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
- if (wr == NULL) {
- /* XXX */
- panic("%s: allocation failure.", __func__);
- }
- flowc = wrtod(wr);
- memset(flowc, 0, wr->wr_len);
-
- flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
- V_FW_FLOWC_WR_NPARAMS(nparams));
- flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
- V_FW_WR_FLOWID(toep->tid));
-
- flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
- flowc->mnemval[0].val = htobe32(maxlen);
-
- KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
- ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
- txsd->tx_credits = howmany(flowclen, 16);
- txsd->plen = 0;
- KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
- ("%s: not enough credits (%d)", __func__, toep->tx_credits));
- toep->tx_credits -= txsd->tx_credits;
- if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
- toep->txsd_pidx = 0;
- toep->txsd_avail--;
-
- t4_wrq_tx(sc, wr);
-}
-
static void
set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, u_int ulp_submode)
{
@@ -1095,7 +1017,6 @@ int
icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
{
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
- struct find_ofld_adapter_rr fa;
struct file *fp;
struct socket *so;
struct inpcb *inp;
@@ -1139,15 +1060,11 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
fdrop(fp, curthread);
ICL_CONN_UNLOCK(ic);
- /* Find the adapter offloading this socket. */
- fa.sc = NULL;
- fa.so = so;
- t4_iterate(find_offload_adapter, &fa);
- if (fa.sc == NULL) {
+ icc->sc = find_offload_adapter(so);
+ if (icc->sc == NULL) {
error = EINVAL;
goto out;
}
- icc->sc = fa.sc;
max_rx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_recv_data_segment_length;
max_tx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_send_data_segment_length;
@@ -1205,7 +1122,7 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
toep->params.ulp_mode = ULP_MODE_ISCSI;
toep->ulpcb = icc;
- send_iscsi_flowc_wr(icc->sc, toep,
+ send_txdataplen_max_flowc_wr(icc->sc, toep,
roundup(max_iso_pdus * max_tx_pdu_len, tp->t_maxseg));
set_ulp_mode_iscsi(icc->sc, toep, icc->ulp_submode);
INP_WUNLOCK(inp);
@@ -1778,7 +1695,6 @@ cxgbei_limits(struct adapter *sc, void *arg)
static int
cxgbei_limits_fd(struct icl_drv_limits *idl, int fd)
{
- struct find_ofld_adapter_rr fa;
struct file *fp;
struct socket *so;
struct adapter *sc;
@@ -1801,17 +1717,13 @@ cxgbei_limits_fd(struct icl_drv_limits *idl, int fd)
return (EINVAL);
}
- /* Find the adapter offloading this socket. */
- fa.sc = NULL;
- fa.so = so;
- t4_iterate(find_offload_adapter, &fa);
- if (fa.sc == NULL) {
+ sc = find_offload_adapter(so);
+ if (sc == NULL) {
fdrop(fp, curthread);
return (ENXIO);
}
fdrop(fp, curthread);
- sc = fa.sc;
error = begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims");
if (error != 0)
return (error);
diff --git a/sys/dev/cxgbe/firmware/t4fw_interface.h b/sys/dev/cxgbe/firmware/t4fw_interface.h
index 2794bae9474b..5874f0343b03 100644
--- a/sys/dev/cxgbe/firmware/t4fw_interface.h
+++ b/sys/dev/cxgbe/firmware/t4fw_interface.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2012-2017 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2012-2017, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,8 +66,8 @@ enum fw_retval {
FW_FCOE_NO_XCHG = 136, /* */
FW_SCSI_RSP_ERR = 137, /* */
FW_ERR_RDEV_IMPL_LOGO = 138, /* */
- FW_SCSI_UNDER_FLOW_ERR = 139, /* */
- FW_SCSI_OVER_FLOW_ERR = 140, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
FW_SCSI_DDP_ERR = 141, /* DDP error*/
FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
FW_SCSI_IO_BLOCK = 143, /* IO is going to be blocked due to resource failure */
@@ -85,7 +84,7 @@ enum fw_memtype {
FW_MEMTYPE_FLASH = 0x4,
FW_MEMTYPE_INTERNAL = 0x5,
FW_MEMTYPE_EXTMEM1 = 0x6,
- FW_MEMTYPE_HMA = 0x7,
+ FW_MEMTYPE_HMA = 0x7,
};
/******************************************************************************
@@ -106,10 +105,14 @@ enum fw_wr_opcodes {
FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
+ FW_OFLD_TX_DATA_V2_WR = 0x0f,
FW_CMD_WR = 0x10,
FW_ETH_TX_PKT_VM_WR = 0x11,
FW_ETH_TX_PKTS_VM_WR = 0x12,
FW_RI_RES_WR = 0x0c,
+ FW_QP_RES_WR = FW_RI_RES_WR,
+ /* iwarp wr used from rdma kernel and user space */
+ FW_V2_NVMET_TX_DATA_WR = 0x13,
FW_RI_RDMA_WRITE_WR = 0x14,
FW_RI_SEND_WR = 0x15,
FW_RI_RDMA_READ_WR = 0x16,
@@ -118,6 +121,15 @@ enum fw_wr_opcodes {
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_FR_NSMR_TPTE_WR = 0x20,
FW_RI_RDMA_WRITE_CMPL_WR = 0x21,
+ /* rocev2 wr used from rdma kernel and user space */
+ FW_RI_V2_RDMA_WRITE_WR = 0x22,
+ FW_RI_V2_SEND_WR = 0x23,
+ FW_RI_V2_RDMA_READ_WR = 0x24,
+ FW_RI_V2_BIND_MW_WR = 0x25,
+ FW_RI_V2_FR_NSMR_WR = 0x26,
+ FW_RI_V2_ATOMIC_WR = 0x27,
+ FW_NVMET_V2_FR_NSMR_WR = 0x28,
+ FW_RI_V2_INV_LSTAG_WR = 0x1e,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_RI_SEND_IMMEDIATE_WR = 0x15,
FW_RI_ATOMIC_WR = 0x16,
@@ -138,10 +150,11 @@ enum fw_wr_opcodes {
FW_POFCOE_TCB_WR = 0x42,
FW_POFCOE_ULPTX_WR = 0x43,
FW_ISCSI_TX_DATA_WR = 0x45,
- FW_PTP_TX_PKT_WR = 0x46,
+ FW_PTP_TX_PKT_WR = 0x46,
FW_TLSTX_DATA_WR = 0x68,
FW_TLS_TUNNEL_OFLD_WR = 0x69,
FW_CRYPTO_LOOKASIDE_WR = 0x6d,
+ FW_CRYPTO_UPDATE_SA_WR = 0x6e,
FW_COISCSI_TGT_WR = 0x70,
FW_COISCSI_TGT_CONN_WR = 0x71,
FW_COISCSI_TGT_XMIT_WR = 0x72,
@@ -149,7 +162,8 @@ enum fw_wr_opcodes {
FW_ISNS_WR = 0x75,
FW_ISNS_XMIT_WR = 0x76,
FW_FILTER2_WR = 0x77,
- FW_LASTC2E_WR = 0x80
+ /* FW_LASTC2E_WR = 0x80 */
+ FW_LASTC2E_WR = 0xB0
};
/*
@@ -308,7 +322,7 @@ enum fw_filter_wr_cookie {
enum fw_filter_wr_nat_mode {
FW_FILTER_WR_NATMODE_NONE = 0,
- FW_FILTER_WR_NATMODE_DIP ,
+ FW_FILTER_WR_NATMODE_DIP,
FW_FILTER_WR_NATMODE_DIPDP,
FW_FILTER_WR_NATMODE_DIPDPSIP,
FW_FILTER_WR_NATMODE_DIPDPSP,
@@ -387,7 +401,7 @@ struct fw_filter2_wr {
__u8 newlip[16];
__u8 newfip[16];
__be32 natseqcheck;
- __be32 r9;
+ __be32 rocev2_qpn;
__be64 r10;
__be64 r11;
__be64 r12;
@@ -675,6 +689,19 @@ struct fw_filter2_wr {
#define G_FW_FILTER_WR_MATCHTYPEM(x) \
(((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
+#define S_FW_FILTER2_WR_ROCEV2 31
+#define M_FW_FILTER2_WR_ROCEV2 0x1
+#define V_FW_FILTER2_WR_ROCEV2(x) ((x) << S_FW_FILTER2_WR_ROCEV2)
+#define G_FW_FILTER2_WR_ROCEV2(x) \
+ (((x) >> S_FW_FILTER2_WR_ROCEV2) & M_FW_FILTER2_WR_ROCEV2)
+#define F_FW_FILTER2_WR_ROCEV2 V_FW_FILTER2_WR_ROCEV2(1U)
+
+#define S_FW_FILTER2_WR_QPN 0
+#define M_FW_FILTER2_WR_QPN 0xffffff
+#define V_FW_FILTER2_WR_QPN(x) ((x) << S_FW_FILTER2_WR_QPN)
+#define G_FW_FILTER2_WR_QPN(x) \
+ (((x) >> S_FW_FILTER2_WR_QPN) & M_FW_FILTER2_WR_QPN)
+
struct fw_ulptx_wr {
__be32 op_to_compl;
__be32 flowid_len16;
@@ -1034,7 +1061,10 @@ enum fw_flowc_mnem {
FW_FLOWC_MNEM_SND_SCALE = 13,
FW_FLOWC_MNEM_RCV_SCALE = 14,
FW_FLOWC_MNEM_ULP_MODE = 15,
- FW_FLOWC_MNEM_MAX = 16,
+ FW_FLOWC_MNEM_EQID = 16,
+ FW_FLOWC_MNEM_CONG_ALG = 17,
+ FW_FLOWC_MNEM_TXDATAPLEN_MIN = 18,
+ FW_FLOWC_MNEM_MAX = 19,
};
struct fw_flowc_mnemval {
@@ -1153,6 +1183,55 @@ struct fw_ofld_tx_data_wr {
#define G_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x) \
(((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) & M_FW_ISCSI_TX_DATA_WR_FLAGS_LO)
+struct fw_ofld_tx_data_v2_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 r4;
+ __be16 r5;
+ __be16 wrid;
+ __be32 r6;
+ __be32 seqno;
+ __be32 plen;
+ __be32 lsodisable_to_flags;
+};
+
+#define S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE 31
+#define M_FW_OFLD_TX_DATA_V2_WR_LSODISABLE 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE)
+#define G_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE) & \
+ M_FW_OFLD_TX_DATA_V2_WR_LSODISABLE)
+#define F_FW_OFLD_TX_DATA_V2_WR_LSODISABLE \
+ V_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD 30
+#define M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD)
+#define G_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD) & \
+ M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD)
+#define F_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD \
+ V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE 29
+#define M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE)
+#define G_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE) & \
+ M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE)
+#define F_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE \
+ V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_FLAGS 0
+#define M_FW_OFLD_TX_DATA_V2_WR_FLAGS 0xfffffff
+#define V_FW_OFLD_TX_DATA_V2_WR_FLAGS(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_FLAGS)
+#define G_FW_OFLD_TX_DATA_V2_WR_FLAGS(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_FLAGS) & M_FW_OFLD_TX_DATA_V2_WR_FLAGS)
+
struct fw_cmd_wr {
__be32 op_dma;
__be32 len16_pkd;
@@ -1218,8 +1297,15 @@ enum fw_ri_wr_opcode {
FW_RI_FAST_REGISTER = 0xd,
FW_RI_LOCAL_INV = 0xe,
#endif
+ /* Chelsio specific */
FW_RI_SGE_EC_CR_RETURN = 0xf,
FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT,
+ FW_RI_SEND_IMMEDIATE = FW_RI_RDMA_INIT,
+
+ FW_RI_ROCEV2_SEND = 0x0,
+ FW_RI_ROCEV2_WRITE = 0x0,
+ FW_RI_ROCEV2_SEND_WITH_INV = 0x5,
+ FW_RI_ROCEV2_SEND_IMMEDIATE = 0xa,
};
enum fw_ri_wr_flags {
@@ -1229,7 +1315,8 @@ enum fw_ri_wr_flags {
FW_RI_READ_FENCE_FLAG = 0x08,
FW_RI_LOCAL_FENCE_FLAG = 0x10,
FW_RI_RDMA_READ_INVALIDATE = 0x20,
- FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40
+ FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40,
+ //FW_RI_REPLAYED_WR_FLAG = 0x80,
};
enum fw_ri_mpa_attrs {
@@ -1522,18 +1609,302 @@ struct fw_ri_cqe {
#define G_FW_RI_CQE_TYPE(x) \
(((x) >> S_FW_RI_CQE_TYPE) & M_FW_RI_CQE_TYPE)
-enum fw_ri_res_type {
+enum fw_res_type {
FW_RI_RES_TYPE_SQ,
FW_RI_RES_TYPE_RQ,
FW_RI_RES_TYPE_CQ,
FW_RI_RES_TYPE_SRQ,
+ FW_QP_RES_TYPE_SQ = FW_RI_RES_TYPE_SQ,
+ FW_QP_RES_TYPE_CQ = FW_RI_RES_TYPE_CQ,
};
-enum fw_ri_res_op {
+enum fw_res_op {
FW_RI_RES_OP_WRITE,
FW_RI_RES_OP_RESET,
+ FW_QP_RES_OP_WRITE = FW_RI_RES_OP_WRITE,
+ FW_QP_RES_OP_RESET = FW_RI_RES_OP_RESET,
+};
+
+enum fw_qp_transport_type {
+ FW_QP_TRANSPORT_TYPE_IWARP,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_UD,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_RC,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_XRC_INI,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_XRC_TGT,
+ FW_QP_TRANSPORT_TYPE_NVMET,
+ FW_QP_TRANSPORT_TYPE_TOE,
+ FW_QP_TRANSPORT_TYPE_ISCSI,
+};
+
+struct fw_qp_res {
+ union fw_qp_restype {
+ struct fw_qp_res_sqrq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ } sqrq;
+ struct fw_qp_res_cq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 iqid;
+ __be32 r4[2];
+ __be32 iqandst_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_iqro;
+ __be32 r6_lo;
+ __be64 r7;
+ } cq;
+ } u;
+};
+
+struct fw_qp_res_wr {
+ __be32 op_to_nres;
+ __be32 len16_pkd;
+ __u64 cookie;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_qp_res res[0];
+#endif
};
+#define S_FW_QP_RES_WR_TRANSPORT_TYPE 16
+#define M_FW_QP_RES_WR_TRANSPORT_TYPE 0x7
+#define V_FW_QP_RES_WR_TRANSPORT_TYPE(x) \
+ ((x) << S_FW_QP_RES_WR_TRANSPORT_TYPE)
+#define G_FW_QP_RES_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_QP_RES_WR_TRANSPORT_TYPE) & M_FW_QP_RES_WR_TRANSPORT_TYPE)
+
+#define S_FW_QP_RES_WR_VFN 8
+#define M_FW_QP_RES_WR_VFN 0xff
+#define V_FW_QP_RES_WR_VFN(x) ((x) << S_FW_QP_RES_WR_VFN)
+#define G_FW_QP_RES_WR_VFN(x) \
+ (((x) >> S_FW_QP_RES_WR_VFN) & M_FW_QP_RES_WR_VFN)
+
+#define S_FW_QP_RES_WR_NRES 0
+#define M_FW_QP_RES_WR_NRES 0xff
+#define V_FW_QP_RES_WR_NRES(x) ((x) << S_FW_QP_RES_WR_NRES)
+#define G_FW_QP_RES_WR_NRES(x) \
+ (((x) >> S_FW_QP_RES_WR_NRES) & M_FW_QP_RES_WR_NRES)
+
+#define S_FW_QP_RES_WR_FETCHSZM 26
+#define M_FW_QP_RES_WR_FETCHSZM 0x1
+#define V_FW_QP_RES_WR_FETCHSZM(x) ((x) << S_FW_QP_RES_WR_FETCHSZM)
+#define G_FW_QP_RES_WR_FETCHSZM(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHSZM) & M_FW_QP_RES_WR_FETCHSZM)
+#define F_FW_QP_RES_WR_FETCHSZM V_FW_QP_RES_WR_FETCHSZM(1U)
+
+#define S_FW_QP_RES_WR_STATUSPGNS 25
+#define M_FW_QP_RES_WR_STATUSPGNS 0x1
+#define V_FW_QP_RES_WR_STATUSPGNS(x) ((x) << S_FW_QP_RES_WR_STATUSPGNS)
+#define G_FW_QP_RES_WR_STATUSPGNS(x) \
+ (((x) >> S_FW_QP_RES_WR_STATUSPGNS) & M_FW_QP_RES_WR_STATUSPGNS)
+#define F_FW_QP_RES_WR_STATUSPGNS V_FW_QP_RES_WR_STATUSPGNS(1U)
+
+#define S_FW_QP_RES_WR_STATUSPGRO 24
+#define M_FW_QP_RES_WR_STATUSPGRO 0x1
+#define V_FW_QP_RES_WR_STATUSPGRO(x) ((x) << S_FW_QP_RES_WR_STATUSPGRO)
+#define G_FW_QP_RES_WR_STATUSPGRO(x) \
+ (((x) >> S_FW_QP_RES_WR_STATUSPGRO) & M_FW_QP_RES_WR_STATUSPGRO)
+#define F_FW_QP_RES_WR_STATUSPGRO V_FW_QP_RES_WR_STATUSPGRO(1U)
+
+#define S_FW_QP_RES_WR_FETCHNS 23
+#define M_FW_QP_RES_WR_FETCHNS 0x1
+#define V_FW_QP_RES_WR_FETCHNS(x) ((x) << S_FW_QP_RES_WR_FETCHNS)
+#define G_FW_QP_RES_WR_FETCHNS(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHNS) & M_FW_QP_RES_WR_FETCHNS)
+#define F_FW_QP_RES_WR_FETCHNS V_FW_QP_RES_WR_FETCHNS(1U)
+
+#define S_FW_QP_RES_WR_FETCHRO 22
+#define M_FW_QP_RES_WR_FETCHRO 0x1
+#define V_FW_QP_RES_WR_FETCHRO(x) ((x) << S_FW_QP_RES_WR_FETCHRO)
+#define G_FW_QP_RES_WR_FETCHRO(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHRO) & M_FW_QP_RES_WR_FETCHRO)
+#define F_FW_QP_RES_WR_FETCHRO V_FW_QP_RES_WR_FETCHRO(1U)
+
+#define S_FW_QP_RES_WR_HOSTFCMODE 20
+#define M_FW_QP_RES_WR_HOSTFCMODE 0x3
+#define V_FW_QP_RES_WR_HOSTFCMODE(x) ((x) << S_FW_QP_RES_WR_HOSTFCMODE)
+#define G_FW_QP_RES_WR_HOSTFCMODE(x) \
+ (((x) >> S_FW_QP_RES_WR_HOSTFCMODE) & M_FW_QP_RES_WR_HOSTFCMODE)
+
+#define S_FW_QP_RES_WR_CPRIO 19
+#define M_FW_QP_RES_WR_CPRIO 0x1
+#define V_FW_QP_RES_WR_CPRIO(x) ((x) << S_FW_QP_RES_WR_CPRIO)
+#define G_FW_QP_RES_WR_CPRIO(x) \
+ (((x) >> S_FW_QP_RES_WR_CPRIO) & M_FW_QP_RES_WR_CPRIO)
+#define F_FW_QP_RES_WR_CPRIO V_FW_QP_RES_WR_CPRIO(1U)
+
+#define S_FW_QP_RES_WR_ONCHIP 18
+#define M_FW_QP_RES_WR_ONCHIP 0x1
+#define V_FW_QP_RES_WR_ONCHIP(x) ((x) << S_FW_QP_RES_WR_ONCHIP)
+#define G_FW_QP_RES_WR_ONCHIP(x) \
+ (((x) >> S_FW_QP_RES_WR_ONCHIP) & M_FW_QP_RES_WR_ONCHIP)
+#define F_FW_QP_RES_WR_ONCHIP V_FW_QP_RES_WR_ONCHIP(1U)
+
+#define S_FW_QP_RES_WR_PCIECHN 16
+#define M_FW_QP_RES_WR_PCIECHN 0x3
+#define V_FW_QP_RES_WR_PCIECHN(x) ((x) << S_FW_QP_RES_WR_PCIECHN)
+#define G_FW_QP_RES_WR_PCIECHN(x) \
+ (((x) >> S_FW_QP_RES_WR_PCIECHN) & M_FW_QP_RES_WR_PCIECHN)
+
+#define S_FW_QP_RES_WR_IQID 0
+#define M_FW_QP_RES_WR_IQID 0xffff
+#define V_FW_QP_RES_WR_IQID(x) ((x) << S_FW_QP_RES_WR_IQID)
+#define G_FW_QP_RES_WR_IQID(x) \
+ (((x) >> S_FW_QP_RES_WR_IQID) & M_FW_QP_RES_WR_IQID)
+
+#define S_FW_QP_RES_WR_DCAEN 31
+#define M_FW_QP_RES_WR_DCAEN 0x1
+#define V_FW_QP_RES_WR_DCAEN(x) ((x) << S_FW_QP_RES_WR_DCAEN)
+#define G_FW_QP_RES_WR_DCAEN(x) \
+ (((x) >> S_FW_QP_RES_WR_DCAEN) & M_FW_QP_RES_WR_DCAEN)
+#define F_FW_QP_RES_WR_DCAEN V_FW_QP_RES_WR_DCAEN(1U)
+
+#define S_FW_QP_RES_WR_DCACPU 26
+#define M_FW_QP_RES_WR_DCACPU 0x1f
+#define V_FW_QP_RES_WR_DCACPU(x) ((x) << S_FW_QP_RES_WR_DCACPU)
+#define G_FW_QP_RES_WR_DCACPU(x) \
+ (((x) >> S_FW_QP_RES_WR_DCACPU) & M_FW_QP_RES_WR_DCACPU)
+
+#define S_FW_QP_RES_WR_FBMIN 23
+#define M_FW_QP_RES_WR_FBMIN 0x7
+#define V_FW_QP_RES_WR_FBMIN(x) ((x) << S_FW_QP_RES_WR_FBMIN)
+#define G_FW_QP_RES_WR_FBMIN(x) \
+ (((x) >> S_FW_QP_RES_WR_FBMIN) & M_FW_QP_RES_WR_FBMIN)
+
+#define S_FW_QP_RES_WR_FBMAX 20
+#define M_FW_QP_RES_WR_FBMAX 0x7
+#define V_FW_QP_RES_WR_FBMAX(x) ((x) << S_FW_QP_RES_WR_FBMAX)
+#define G_FW_QP_RES_WR_FBMAX(x) \
+ (((x) >> S_FW_QP_RES_WR_FBMAX) & M_FW_QP_RES_WR_FBMAX)
+
+#define S_FW_QP_RES_WR_CIDXFTHRESHO 19
+#define M_FW_QP_RES_WR_CIDXFTHRESHO 0x1
+#define V_FW_QP_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_QP_RES_WR_CIDXFTHRESHO)
+#define G_FW_QP_RES_WR_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_QP_RES_WR_CIDXFTHRESHO) & M_FW_QP_RES_WR_CIDXFTHRESHO)
+#define F_FW_QP_RES_WR_CIDXFTHRESHO V_FW_QP_RES_WR_CIDXFTHRESHO(1U)
+
+#define S_FW_QP_RES_WR_CIDXFTHRESH 16
+#define M_FW_QP_RES_WR_CIDXFTHRESH 0x7
+#define V_FW_QP_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_QP_RES_WR_CIDXFTHRESH)
+#define G_FW_QP_RES_WR_CIDXFTHRESH(x) \
+ (((x) >> S_FW_QP_RES_WR_CIDXFTHRESH) & M_FW_QP_RES_WR_CIDXFTHRESH)
+
+#define S_FW_QP_RES_WR_EQSIZE 0
+#define M_FW_QP_RES_WR_EQSIZE 0xffff
+#define V_FW_QP_RES_WR_EQSIZE(x) ((x) << S_FW_QP_RES_WR_EQSIZE)
+#define G_FW_QP_RES_WR_EQSIZE(x) \
+ (((x) >> S_FW_QP_RES_WR_EQSIZE) & M_FW_QP_RES_WR_EQSIZE)
+
+#define S_FW_QP_RES_WR_IQANDST 15
+#define M_FW_QP_RES_WR_IQANDST 0x1
+#define V_FW_QP_RES_WR_IQANDST(x) ((x) << S_FW_QP_RES_WR_IQANDST)
+#define G_FW_QP_RES_WR_IQANDST(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANDST) & M_FW_QP_RES_WR_IQANDST)
+#define F_FW_QP_RES_WR_IQANDST V_FW_QP_RES_WR_IQANDST(1U)
+
+#define S_FW_QP_RES_WR_IQANUS 14
+#define M_FW_QP_RES_WR_IQANUS 0x1
+#define V_FW_QP_RES_WR_IQANUS(x) ((x) << S_FW_QP_RES_WR_IQANUS)
+#define G_FW_QP_RES_WR_IQANUS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANUS) & M_FW_QP_RES_WR_IQANUS)
+#define F_FW_QP_RES_WR_IQANUS V_FW_QP_RES_WR_IQANUS(1U)
+
+#define S_FW_QP_RES_WR_IQANUD 12
+#define M_FW_QP_RES_WR_IQANUD 0x3
+#define V_FW_QP_RES_WR_IQANUD(x) ((x) << S_FW_QP_RES_WR_IQANUD)
+#define G_FW_QP_RES_WR_IQANUD(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANUD) & M_FW_QP_RES_WR_IQANUD)
+
+#define S_FW_QP_RES_WR_IQANDSTINDEX 0
+#define M_FW_QP_RES_WR_IQANDSTINDEX 0xfff
+#define V_FW_QP_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_QP_RES_WR_IQANDSTINDEX)
+#define G_FW_QP_RES_WR_IQANDSTINDEX(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANDSTINDEX) & M_FW_QP_RES_WR_IQANDSTINDEX)
+
+#define S_FW_QP_RES_WR_IQDROPRSS 15
+#define M_FW_QP_RES_WR_IQDROPRSS 0x1
+#define V_FW_QP_RES_WR_IQDROPRSS(x) ((x) << S_FW_QP_RES_WR_IQDROPRSS)
+#define G_FW_QP_RES_WR_IQDROPRSS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDROPRSS) & M_FW_QP_RES_WR_IQDROPRSS)
+#define F_FW_QP_RES_WR_IQDROPRSS V_FW_QP_RES_WR_IQDROPRSS(1U)
+
+#define S_FW_QP_RES_WR_IQGTSMODE 14
+#define M_FW_QP_RES_WR_IQGTSMODE 0x1
+#define V_FW_QP_RES_WR_IQGTSMODE(x) ((x) << S_FW_QP_RES_WR_IQGTSMODE)
+#define G_FW_QP_RES_WR_IQGTSMODE(x) \
+ (((x) >> S_FW_QP_RES_WR_IQGTSMODE) & M_FW_QP_RES_WR_IQGTSMODE)
+#define F_FW_QP_RES_WR_IQGTSMODE V_FW_QP_RES_WR_IQGTSMODE(1U)
+
+#define S_FW_QP_RES_WR_IQPCIECH 12
+#define M_FW_QP_RES_WR_IQPCIECH 0x3
+#define V_FW_QP_RES_WR_IQPCIECH(x) ((x) << S_FW_QP_RES_WR_IQPCIECH)
+#define G_FW_QP_RES_WR_IQPCIECH(x) \
+ (((x) >> S_FW_QP_RES_WR_IQPCIECH) & M_FW_QP_RES_WR_IQPCIECH)
+
+#define S_FW_QP_RES_WR_IQDCAEN 11
+#define M_FW_QP_RES_WR_IQDCAEN 0x1
+#define V_FW_QP_RES_WR_IQDCAEN(x) ((x) << S_FW_QP_RES_WR_IQDCAEN)
+#define G_FW_QP_RES_WR_IQDCAEN(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDCAEN) & M_FW_QP_RES_WR_IQDCAEN)
+#define F_FW_QP_RES_WR_IQDCAEN V_FW_QP_RES_WR_IQDCAEN(1U)
+
+#define S_FW_QP_RES_WR_IQDCACPU 6
+#define M_FW_QP_RES_WR_IQDCACPU 0x1f
+#define V_FW_QP_RES_WR_IQDCACPU(x) ((x) << S_FW_QP_RES_WR_IQDCACPU)
+#define G_FW_QP_RES_WR_IQDCACPU(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDCACPU) & M_FW_QP_RES_WR_IQDCACPU)
+
+#define S_FW_QP_RES_WR_IQINTCNTTHRESH 4
+#define M_FW_QP_RES_WR_IQINTCNTTHRESH 0x3
+#define V_FW_QP_RES_WR_IQINTCNTTHRESH(x) \
+ ((x) << S_FW_QP_RES_WR_IQINTCNTTHRESH)
+#define G_FW_QP_RES_WR_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_QP_RES_WR_IQINTCNTTHRESH) & M_FW_QP_RES_WR_IQINTCNTTHRESH)
+
+#define S_FW_QP_RES_WR_IQO 3
+#define M_FW_QP_RES_WR_IQO 0x1
+#define V_FW_QP_RES_WR_IQO(x) ((x) << S_FW_QP_RES_WR_IQO)
+#define G_FW_QP_RES_WR_IQO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQO) & M_FW_QP_RES_WR_IQO)
+#define F_FW_QP_RES_WR_IQO V_FW_QP_RES_WR_IQO(1U)
+
+#define S_FW_QP_RES_WR_IQCPRIO 2
+#define M_FW_QP_RES_WR_IQCPRIO 0x1
+#define V_FW_QP_RES_WR_IQCPRIO(x) ((x) << S_FW_QP_RES_WR_IQCPRIO)
+#define G_FW_QP_RES_WR_IQCPRIO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQCPRIO) & M_FW_QP_RES_WR_IQCPRIO)
+#define F_FW_QP_RES_WR_IQCPRIO V_FW_QP_RES_WR_IQCPRIO(1U)
+
+#define S_FW_QP_RES_WR_IQESIZE 0
+#define M_FW_QP_RES_WR_IQESIZE 0x3
+#define V_FW_QP_RES_WR_IQESIZE(x) ((x) << S_FW_QP_RES_WR_IQESIZE)
+#define G_FW_QP_RES_WR_IQESIZE(x) \
+ (((x) >> S_FW_QP_RES_WR_IQESIZE) & M_FW_QP_RES_WR_IQESIZE)
+
+#define S_FW_QP_RES_WR_IQNS 31
+#define M_FW_QP_RES_WR_IQNS 0x1
+#define V_FW_QP_RES_WR_IQNS(x) ((x) << S_FW_QP_RES_WR_IQNS)
+#define G_FW_QP_RES_WR_IQNS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQNS) & M_FW_QP_RES_WR_IQNS)
+#define F_FW_QP_RES_WR_IQNS V_FW_QP_RES_WR_IQNS(1U)
+
+#define S_FW_QP_RES_WR_IQRO 30
+#define M_FW_QP_RES_WR_IQRO 0x1
+#define V_FW_QP_RES_WR_IQRO(x) ((x) << S_FW_QP_RES_WR_IQRO)
+#define G_FW_QP_RES_WR_IQRO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQRO) & M_FW_QP_RES_WR_IQRO)
+#define F_FW_QP_RES_WR_IQRO V_FW_QP_RES_WR_IQRO(1U)
+
+
struct fw_ri_res {
union fw_ri_restype {
struct fw_ri_res_sqrq {
@@ -1586,6 +1957,13 @@ struct fw_ri_res_wr {
#endif
};
+#define S_FW_RI_RES_WR_TRANSPORT_TYPE 16
+#define M_FW_RI_RES_WR_TRANSPORT_TYPE 0x7
+#define V_FW_RI_RES_WR_TRANSPORT_TYPE(x) \
+ ((x) << S_FW_RI_RES_WR_TRANSPORT_TYPE)
+#define G_FW_RI_RES_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_RI_RES_WR_TRANSPORT_TYPE) & M_FW_RI_RES_WR_TRANSPORT_TYPE)
+
#define S_FW_RI_RES_WR_VFN 8
#define M_FW_RI_RES_WR_VFN 0xff
#define V_FW_RI_RES_WR_VFN(x) ((x) << S_FW_RI_RES_WR_VFN)
@@ -2092,8 +2470,18 @@ enum fw_ri_init_rqeqid_srq {
FW_RI_INIT_RQEQID_SRQ = 1 << 31,
};
+enum fw_nvmet_ulpsubmode {
+ FW_NVMET_ULPSUBMODE_HCRC = 0x1<<0,
+ FW_NVMET_ULPSUBMODE_DCRC = 0x1<<1,
+ FW_NVMET_ULPSUBMODE_ING_DIR = 0x1<<2,
+ FW_NVMET_ULPSUBMODE_SRQ_ENABLE = 0x1<<3,
+ FW_NVMET_ULPSUBMODE_PER_PDU_CMP = 0x1<<4,
+ FW_NVMET_ULPSUBMODE_PI_ENABLE = 0x1<<5,
+ FW_NVMET_ULPSUBMODE_USER_MODE = 0x1<<6,
+};
+
struct fw_ri_wr {
- __be32 op_compl;
+ __be32 op_compl; /* op_to_transport_type */
__be32 flowid_len16;
__u64 cookie;
union fw_ri {
@@ -2123,6 +2511,55 @@ struct fw_ri_wr {
struct fw_ri_send_wr send;
} u;
} init;
+ struct fw_ri_rocev2_init {
+ __u8 type;
+ __u8 r3[3];
+ __u8 rocev2_flags;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 ord_max;
+ __be32 ird_max;
+ __be32 psn_pkd;
+ __be32 epsn_pkd;
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __be32 q_key;
+ __u8 pkthdrsize;
+ __u8 r;
+ __be16 p_key;
+ //struct cpl_tx_tnl_lso tnl_lso;
+ __u8 tnl_lso[48]; /* cpl_tx_tnl_lso + cpl_tx_pkt_xt */
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_immd pkthdr[0];
+#endif
+ } rocev2_init;
+ struct fw_ri_nvmet_init {
+ __u8 type;
+ __u8 r3[3];
+ __u8 nvmt_flags;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 r4[4];
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __u8 ulpsubmode;
+ __u8 nvmt_pda_cmp_imm_sz;
+ __be16 r7;
+ __be32 tpt_offset_t10_config;
+ __be32 r8[2];
+ } nvmet_init;
struct fw_ri_fini {
__u8 type;
__u8 r3[7];
@@ -2137,6 +2574,12 @@ struct fw_ri_wr {
} u;
};
+#define S_FW_RI_WR_TRANSPORT_TYPE 16
+#define M_FW_RI_WR_TRANSPORT_TYPE 0x7
+#define V_FW_RI_WR_TRANSPORT_TYPE(x) ((x) << S_FW_RI_WR_TRANSPORT_TYPE)
+#define G_FW_RI_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_RI_WR_TRANSPORT_TYPE) & M_FW_RI_WR_TRANSPORT_TYPE)
+
#define S_FW_RI_WR_MPAREQBIT 7
#define M_FW_RI_WR_MPAREQBIT 0x1
#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
@@ -2157,6 +2600,414 @@ struct fw_ri_wr {
#define G_FW_RI_WR_P2PTYPE(x) \
(((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+#define S_FW_RI_WR_PSN 0
+#define M_FW_RI_WR_PSN 0xffffff
+#define V_FW_RI_WR_PSN(x) ((x) << S_FW_RI_WR_PSN)
+#define G_FW_RI_WR_PSN(x) (((x) >> S_FW_RI_WR_PSN) & M_FW_RI_WR_PSN)
+
+#define S_FW_RI_WR_EPSN 0
+#define M_FW_RI_WR_EPSN 0xffffff
+#define V_FW_RI_WR_EPSN(x) ((x) << S_FW_RI_WR_EPSN)
+#define G_FW_RI_WR_EPSN(x) (((x) >> S_FW_RI_WR_EPSN) & M_FW_RI_WR_EPSN)
+
+#define S_FW_RI_WR_NVMT_PDA 3
+#define M_FW_RI_WR_NVMT_PDA 0x1f
+#define V_FW_RI_WR_NVMT_PDA(x) ((x) << S_FW_RI_WR_NVMT_PDA)
+#define G_FW_RI_WR_NVMT_PDA(x) \
+ (((x) >> S_FW_RI_WR_NVMT_PDA) & M_FW_RI_WR_NVMT_PDA)
+
+#define S_FW_RI_WR_CMP_IMM_SZ 1
+#define M_FW_RI_WR_CMP_IMM_SZ 0x3
+#define V_FW_RI_WR_CMP_IMM_SZ(x) ((x) << S_FW_RI_WR_CMP_IMM_SZ)
+#define G_FW_RI_WR_CMP_IMM_SZ(x) \
+ (((x) >> S_FW_RI_WR_CMP_IMM_SZ) & M_FW_RI_WR_CMP_IMM_SZ)
+
+#define S_FW_RI_WR_TPT_OFFSET 10
+#define M_FW_RI_WR_TPT_OFFSET 0x3fffff
+#define V_FW_RI_WR_TPT_OFFSET(x) ((x) << S_FW_RI_WR_TPT_OFFSET)
+#define G_FW_RI_WR_TPT_OFFSET(x) \
+ (((x) >> S_FW_RI_WR_TPT_OFFSET) & M_FW_RI_WR_TPT_OFFSET)
+
+#define S_FW_RI_WR_T10_CONFIG 0
+#define M_FW_RI_WR_T10_CONFIG 0x3ff
+#define V_FW_RI_WR_T10_CONFIG(x) ((x) << S_FW_RI_WR_T10_CONFIG)
+#define G_FW_RI_WR_T10_CONFIG(x) \
+ (((x) >> S_FW_RI_WR_T10_CONFIG) & M_FW_RI_WR_T10_CONFIG)
+
+
+/******************************************************************************
+ * R o C E V 2 W O R K R E Q U E S T s
+ **************************************/
+enum fw_rocev2_wr_opcode {
+ /* RC */
+ FW_ROCEV2_RC_SEND_FIRST = 0x00,
+ FW_ROCEV2_RC_SEND_MIDDLE = 0x01,
+ FW_ROCEV2_RC_SEND_LAST = 0x02,
+ FW_ROCEV2_RC_SEND_LAST_WITH_IMMD = 0x03,
+ FW_ROCEV2_RC_SEND_ONLY = 0x04,
+ FW_ROCEV2_RC_SEND_ONLY_WITH_IMMD = 0x05,
+ FW_ROCEV2_RC_RDMA_WRITE_FIRST = 0x06,
+ FW_ROCEV2_RC_RDMA_WRITE_MIDDLE = 0x07,
+ FW_ROCEV2_RC_RDMA_WRITE_LAST = 0x08,
+ FW_ROCEV2_RC_RDMA_WRITE_LAST_WITH_IMMD = 0x09,
+ FW_ROCEV2_RC_RDMA_WRITE_ONLY = 0x0a,
+ FW_ROCEV2_RC_RDMA_WRITE_ONLY_WITH_IMMD = 0x0b,
+ FW_ROCEV2_RC_RDMA_READ_REQ = 0x0c,
+ FW_ROCEV2_RC_RDMA_READ_RESP_FIRST = 0x0d,
+ FW_ROCEV2_RC_RDMA_READ_RESP_MIDDLE = 0x0e,
+ FW_ROCEV2_RC_RDMA_READ_RESP_LAST = 0x0f,
+ FW_ROCEV2_RC_RDMA_READ_RESP_ONLY = 0x10,
+ FW_ROCEV2_RC_ACK = 0x11,
+ FW_ROCEV2_RC_ATOMIC_ACK = 0x12,
+ FW_ROCEV2_RC_CMP_SWAP = 0x13,
+ FW_ROCEV2_RC_FETCH_ADD = 0x14,
+ FW_ROCEV2_RC_SEND_LAST_WITH_INV = 0x16,
+ FW_ROCEV2_RC_SEND_ONLY_WITH_INV = 0x17,
+
+ /* XRC */
+ FW_ROCEV2_XRC_SEND_FIRST = 0xa0,
+ FW_ROCEV2_XRC_SEND_MIDDLE = 0xa1,
+ FW_ROCEV2_XRC_SEND_LAST = 0xa2,
+ FW_ROCEV2_XRC_SEND_LAST_WITH_IMMD = 0xa3,
+ FW_ROCEV2_XRC_SEND_ONLY = 0xa4,
+ FW_ROCEV2_XRC_SEND_ONLY_WITH_IMMD = 0xa5,
+ FW_ROCEV2_XRC_RDMA_WRITE_FIRST = 0xa6,
+ FW_ROCEV2_XRC_RDMA_WRITE_MIDDLE = 0xa7,
+ FW_ROCEV2_XRC_RDMA_WRITE_LAST = 0xa8,
+ FW_ROCEV2_XRC_RDMA_WRITE_LAST_WITH_IMMD = 0xa9,
+ FW_ROCEV2_XRC_RDMA_WRITE_ONLY = 0xaa,
+ FW_ROCEV2_XRC_RDMA_WRITE_ONLY_WITH_IMMD = 0xab,
+ FW_ROCEV2_XRC_RDMA_READ_REQ = 0xac,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_FIRST = 0xad,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_MIDDLE = 0xae,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_LAST = 0xaf,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_ONLY = 0xb0,
+ FW_ROCEV2_XRC_ACK = 0xb1,
+ FW_ROCEV2_XRC_ATOMIC_ACK = 0xb2,
+ FW_ROCEV2_XRC_CMP_SWAP = 0xb3,
+ FW_ROCEV2_XRC_FETCH_ADD = 0xb4,
+ FW_ROCEV2_XRC_SEND_LAST_WITH_INV = 0xb6,
+ FW_ROCEV2_XRC_SEND_ONLY_WITH_INV = 0xb7,
+};
+
+#if 0
+enum fw_rocev2_cqe_err {
+ /* TODO */
+};
+#endif
+
+struct fw_ri_v2_rdma_write_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 psn_pkd;
+ __be32 r4[2];
+ __be32 r5;
+ __be32 immd_data;
+ __be64 to_sink;
+ __be32 stag_sink;
+ __be32 plen;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_V2_RDMA_WRITE_WR_PSN 0
+#define M_FW_RI_V2_RDMA_WRITE_WR_PSN 0xffffff
+#define V_FW_RI_V2_RDMA_WRITE_WR_PSN(x) ((x) << S_FW_RI_V2_RDMA_WRITE_WR_PSN)
+#define G_FW_RI_V2_RDMA_WRITE_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_RDMA_WRITE_WR_PSN) & M_FW_RI_V2_RDMA_WRITE_WR_PSN)
+
+struct fw_ri_v2_send_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 sendop_psn;
+ __u8 immdlen;
+ __u8 r3[3];
+ __be32 r4;
+ /* CPL_TX_TNL_LSO, CPL_TX_PKT_XT and Eth/IP/UDP/BTH
+ * headers in UD QP case, align size to 16B */
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_V2_SEND_WR_SENDOP 24
+#define M_FW_RI_V2_SEND_WR_SENDOP 0xff
+#define V_FW_RI_V2_SEND_WR_SENDOP(x) ((x) << S_FW_RI_V2_SEND_WR_SENDOP)
+#define G_FW_RI_V2_SEND_WR_SENDOP(x) \
+ (((x) >> S_FW_RI_V2_SEND_WR_SENDOP) & M_FW_RI_V2_SEND_WR_SENDOP)
+
+#define S_FW_RI_V2_SEND_WR_PSN 0
+#define M_FW_RI_V2_SEND_WR_PSN 0xffffff
+#define V_FW_RI_V2_SEND_WR_PSN(x) ((x) << S_FW_RI_V2_SEND_WR_PSN)
+#define G_FW_RI_V2_SEND_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_SEND_WR_PSN) & M_FW_RI_V2_SEND_WR_PSN)
+
+struct fw_ri_v2_rdma_read_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 psn_pkd;
+ __be64 to_src;
+ __be32 stag_src;
+ __be32 plen;
+ struct fw_ri_isgl isgl_sink; /* RRQ, max 4 nsge in rocev2, 1 in iwarp */
+};
+
+#define S_FW_RI_V2_RDMA_READ_WR_PSN 0
+#define M_FW_RI_V2_RDMA_READ_WR_PSN 0xffffff
+#define V_FW_RI_V2_RDMA_READ_WR_PSN(x) ((x) << S_FW_RI_V2_RDMA_READ_WR_PSN)
+#define G_FW_RI_V2_RDMA_READ_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_RDMA_READ_WR_PSN) & M_FW_RI_V2_RDMA_READ_WR_PSN)
+
+struct fw_ri_v2_atomic_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 atomicop_psn;
+};
+
+#define S_FW_RI_V2_ATOMIC_WR_ATOMICOP 28
+#define M_FW_RI_V2_ATOMIC_WR_ATOMICOP 0xf
+#define V_FW_RI_V2_ATOMIC_WR_ATOMICOP(x) \
+ ((x) << S_FW_RI_V2_ATOMIC_WR_ATOMICOP)
+#define G_FW_RI_V2_ATOMIC_WR_ATOMICOP(x) \
+ (((x) >> S_FW_RI_V2_ATOMIC_WR_ATOMICOP) & M_FW_RI_V2_ATOMIC_WR_ATOMICOP)
+
+#define S_FW_RI_V2_ATOMIC_WR_PSN 0
+#define M_FW_RI_V2_ATOMIC_WR_PSN 0xffffff
+#define V_FW_RI_V2_ATOMIC_WR_PSN(x) ((x) << S_FW_RI_V2_ATOMIC_WR_PSN)
+#define G_FW_RI_V2_ATOMIC_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_ATOMIC_WR_PSN) & M_FW_RI_V2_ATOMIC_WR_PSN)
+
+struct fw_ri_v2_bind_mw_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 r5;
+ __be32 r6[2];
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag_mr;
+ __be32 stag_mw;
+ __be32 r3;
+ __be64 len_mw;
+ __be64 va_fbo;
+ __be64 r4;
+};
+
+
+#define S_FW_RI_V2_BIND_MW_WR_QPBINDE 6
+#define M_FW_RI_V2_BIND_MW_WR_QPBINDE 0x1
+#define V_FW_RI_V2_BIND_MW_WR_QPBINDE(x) \
+ ((x) << S_FW_RI_V2_BIND_MW_WR_QPBINDE)
+#define G_FW_RI_V2_BIND_MW_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_QPBINDE) & M_FW_RI_V2_BIND_MW_WR_QPBINDE)
+#define F_FW_RI_V2_BIND_MW_WR_QPBINDE V_FW_RI_V2_BIND_MW_WR_QPBINDE(1U)
+
+#define S_FW_RI_V2_BIND_MW_WR_NS 5
+#define M_FW_RI_V2_BIND_MW_WR_NS 0x1
+#define V_FW_RI_V2_BIND_MW_WR_NS(x) ((x) << S_FW_RI_V2_BIND_MW_WR_NS)
+#define G_FW_RI_V2_BIND_MW_WR_NS(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_NS) & M_FW_RI_V2_BIND_MW_WR_NS)
+#define F_FW_RI_V2_BIND_MW_WR_NS V_FW_RI_V2_BIND_MW_WR_NS(1U)
+
+#define S_FW_RI_V2_BIND_MW_WR_DCACPU 0
+#define M_FW_RI_V2_BIND_MW_WR_DCACPU 0x1f
+#define V_FW_RI_V2_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_V2_BIND_MW_WR_DCACPU)
+#define G_FW_RI_V2_BIND_MW_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_DCACPU) & M_FW_RI_V2_BIND_MW_WR_DCACPU)
+
+struct fw_ri_v2_fr_nsmr_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 r3;
+ __be32 r4[2];
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag;
+ __be32 len_hi;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+};
+
+#define S_FW_RI_V2_FR_NSMR_WR_QPBINDE 6
+#define M_FW_RI_V2_FR_NSMR_WR_QPBINDE 0x1
+#define V_FW_RI_V2_FR_NSMR_WR_QPBINDE(x) \
+ ((x) << S_FW_RI_V2_FR_NSMR_WR_QPBINDE)
+#define G_FW_RI_V2_FR_NSMR_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_QPBINDE) & M_FW_RI_V2_FR_NSMR_WR_QPBINDE)
+#define F_FW_RI_V2_FR_NSMR_WR_QPBINDE V_FW_RI_V2_FR_NSMR_WR_QPBINDE(1U)
+
+#define S_FW_RI_V2_FR_NSMR_WR_NS 5
+#define M_FW_RI_V2_FR_NSMR_WR_NS 0x1
+#define V_FW_RI_V2_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_V2_FR_NSMR_WR_NS)
+#define G_FW_RI_V2_FR_NSMR_WR_NS(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_NS) & M_FW_RI_V2_FR_NSMR_WR_NS)
+#define F_FW_RI_V2_FR_NSMR_WR_NS V_FW_RI_V2_FR_NSMR_WR_NS(1U)
+
+#define S_FW_RI_V2_FR_NSMR_WR_DCACPU 0
+#define M_FW_RI_V2_FR_NSMR_WR_DCACPU 0x1f
+#define V_FW_RI_V2_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_V2_FR_NSMR_WR_DCACPU)
+#define G_FW_RI_V2_FR_NSMR_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_DCACPU) & M_FW_RI_V2_FR_NSMR_WR_DCACPU)
+
+/******************************************************************************
+ * N V M E - T C P W O R K R E Q U E S T s
+ *****************************************************************************/
+
+struct fw_nvmet_v2_fr_nsmr_wr {
+ __be32 op_to_wrid;
+ __be32 flowid_len16;
+ __be32 r3;
+ __be32 r4;
+ __be32 mem_write_addr32;
+ __u8 r5;
+ __u8 imm_data_len32;
+ union {
+ __be16 dsgl_data_len32;
+ __be16 reset_mem_len32;
+ };
+ __be64 r6;
+};
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL 23
+#define M_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL 0x1
+#define V_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL)
+#define G_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL) & \
+ M_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL)
+#define F_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL \
+ V_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(1U)
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM 22
+#define M_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM 0x1
+#define V_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM)
+#define G_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM) & \
+ M_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM)
+#define F_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM \
+ V_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(1U)
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_WRID 0
+#define M_FW_NVMET_V2_FR_NSMR_WR_WRID 0xffff
+#define V_FW_NVMET_V2_FR_NSMR_WR_WRID(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_WRID)
+#define G_FW_NVMET_V2_FR_NSMR_WR_WRID(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_WRID) & M_FW_NVMET_V2_FR_NSMR_WR_WRID)
+
+struct fw_v2_nvmet_tx_data_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 r4;
+ __be16 r5;
+ __be16 wrid;
+ __be32 r6;
+ __be32 seqno;
+ __be32 plen;
+ __be32 flags_hi_to_flags_lo;
+ /* optional immdlen data (fw_tx_pi_hdr, iso cpl, nvmet header etc) */
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_dsgl dsgl_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI 10
+#define M_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI 0x3fffff
+#define V_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI)
+#define G_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI) & \
+ M_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO 9
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI 8
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC 7
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC 6
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO 0
+#define M_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO 0x3f
+#define V_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO)
+#define G_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO) & \
+ M_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO)
+
+
/******************************************************************************
* F O i S C S I W O R K R E Q U E S T s
*********************************************/
@@ -3827,17 +4678,17 @@ struct fw_pi_error {
(((x) >> S_FW_PI_ERROR_ERR_TYPE) & M_FW_PI_ERROR_ERR_TYPE)
struct fw_tlstx_data_wr {
- __be32 op_to_immdlen;
- __be32 flowid_len16;
- __be32 plen;
- __be32 lsodisable_to_flags;
- __be32 r5;
- __be32 ctxloc_to_exp;
- __be16 mfs;
- __be16 adjustedplen_pkd;
- __be16 expinplenmax_pkd;
- __u8 pdusinplenmax_pkd;
- __u8 r10;
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 plen;
+ __be32 lsodisable_to_flags;
+ __be32 r5;
+ __be32 ctxloc_to_exp;
+ __be16 mfs;
+ __be16 adjustedplen_pkd;
+ __be16 expinplenmax_pkd;
+ __u8 pdusinplenmax_pkd;
+ __u8 r10;
};
#define S_FW_TLSTX_DATA_WR_OPCODE 24
@@ -4092,6 +4943,265 @@ struct fw_tls_tunnel_ofld_wr {
__be32 r4;
};
+struct fw_crypto_update_sa_wr {
+ __u8 opcode;
+ __u8 saop_to_txrx;
+ __u8 vfn;
+ __u8 r1;
+ __u8 r2[3];
+ __u8 len16;
+ __be64 cookie;
+ __be16 r3;
+ __be16 ipsecidx;
+ __be32 SPI;
+ __be64 dip_hi;
+ __be64 dip_lo;
+ __be64 lip_hi;
+ __be64 lip_lo;
+ union fw_crypto_update_sa_sa {
+ struct egress_sa {
+ __be32 valid_SPI_hi;
+ __be32 SPI_lo_eSeqNum_hi;
+ __be32 eSeqNum_lo_Salt_hi;
+ __be32 Salt_lo_to_keyID;
+ } egress;
+ struct ingress_sa {
+ __be32 valid_to_iSeqNum_hi;
+ __be32 iSeqNum_mi;
+ __be32 iSeqNum_lo_Salt_hi;
+ __be32 Salt_lo_to_IPVer;
+ } ingress;
+ } sa;
+ union fw_crypto_update_sa_key {
+ struct _aes128 {
+ __u8 key128[16];
+ __u8 H128[16];
+ __u8 rsvd[16];
+ } aes128;
+ struct _aes192 {
+ __u8 key192[24];
+ __be64 r3;
+ __u8 H192[16];
+ } aes192;
+ struct _aes256 {
+ __u8 key256[32];
+ __u8 H256[16];
+ } aes256;
+ } key;
+};
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SAOP 2
+#define M_FW_CRYPTO_UPDATE_SA_WR_SAOP 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_SAOP(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SAOP)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SAOP(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SAOP) & M_FW_CRYPTO_UPDATE_SA_WR_SAOP)
+#define F_FW_CRYPTO_UPDATE_SA_WR_SAOP V_FW_CRYPTO_UPDATE_SA_WR_SAOP(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_MODE 1
+#define M_FW_CRYPTO_UPDATE_SA_WR_MODE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_MODE) & M_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_MODE V_FW_CRYPTO_UPDATE_SA_WR_MODE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_TXRX 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_TXRX 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_TXRX(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_TXRX)
+#define G_FW_CRYPTO_UPDATE_SA_WR_TXRX(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_TXRX) & M_FW_CRYPTO_UPDATE_SA_WR_TXRX)
+#define F_FW_CRYPTO_UPDATE_SA_WR_TXRX V_FW_CRYPTO_UPDATE_SA_WR_TXRX(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_VALID 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_VALID 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_VALID) & M_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define F_FW_CRYPTO_UPDATE_SA_WR_VALID V_FW_CRYPTO_UPDATE_SA_WR_VALID(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SPI_HI 0x7fffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SPI_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SPI_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SPI_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_SPI_LO 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SPI_LO)
+#define F_FW_CRYPTO_UPDATE_SA_WR_SPI_LO V_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI 0x7fffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 5
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE 4
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE \
+ V_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYID 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYID 0xf
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYID) & M_FW_CRYPTO_UPDATE_SA_WR_KEYID)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_VALID 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_VALID 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_VALID) & M_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define F_FW_CRYPTO_UPDATE_SA_WR_VALID V_FW_CRYPTO_UPDATE_SA_WR_VALID(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID 12
+#define M_FW_CRYPTO_UPDATE_SA_WR_EGKEYID 0xfff
+#define V_FW_CRYPTO_UPDATE_SA_WR_EGKEYID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_EGKEYID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_EGKEYID)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN 11
+#define M_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN)
+#define F_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN \
+ V_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW 0xf
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 5
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH 3
+#define M_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESNEN 2
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESNEN 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESNEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESNEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESNEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESNEN) & M_FW_CRYPTO_UPDATE_SA_WR_ESNEN)
+#define F_FW_CRYPTO_UPDATE_SA_WR_ESNEN V_FW_CRYPTO_UPDATE_SA_WR_ESNEN(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_MODE 1
+#define M_FW_CRYPTO_UPDATE_SA_WR_MODE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_MODE) & M_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_MODE V_FW_CRYPTO_UPDATE_SA_WR_MODE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_IPVER 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_IPVER 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_IPVER(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_IPVER)
+#define G_FW_CRYPTO_UPDATE_SA_WR_IPVER(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_IPVER) & M_FW_CRYPTO_UPDATE_SA_WR_IPVER)
+#define F_FW_CRYPTO_UPDATE_SA_WR_IPVER V_FW_CRYPTO_UPDATE_SA_WR_IPVER(1U)
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -4157,11 +5267,12 @@ enum fw_cmd_opcodes {
FW_FCOE_SPARAMS_CMD = 0x35,
FW_FCOE_STATS_CMD = 0x37,
FW_FCOE_FCF_CMD = 0x38,
- FW_DCB_IEEE_CMD = 0x3a,
- FW_DIAG_CMD = 0x3d,
+ FW_DCB_IEEE_CMD = 0x3a,
+ FW_DIAG_CMD = 0x3d,
FW_PTP_CMD = 0x3e,
FW_HMA_CMD = 0x3f,
- FW_LASTC2E_CMD = 0x40,
+ FW_JBOF_WIN_REG_CMD = 0x40,
+ FW_LASTC2E_CMD = 0x41,
FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81,
};
@@ -4246,7 +5357,7 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FUNC = 0x0028,
FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
FW_LDST_ADDRSPC_FUNC_I2C = 0x002A, /* legacy */
- FW_LDST_ADDRSPC_LE = 0x0030,
+ FW_LDST_ADDRSPC_LE = 0x0030,
FW_LDST_ADDRSPC_I2C = 0x0038,
FW_LDST_ADDRSPC_PCIE_CFGS = 0x0040,
FW_LDST_ADDRSPC_PCIE_DBG = 0x0041,
@@ -4665,11 +5776,17 @@ enum fw_caps_config_nic {
enum fw_caps_config_toe {
FW_CAPS_CONFIG_TOE = 0x00000001,
+ FW_CAPS_CONFIG_TOE_SENDPATH = 0x00000002,
};
enum fw_caps_config_rdma {
FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
+ FW_CAPS_CONFIG_RDMA_ROCEV2 = 0x00000004,
+};
+
+enum fw_caps_config_nvme {
+ FW_CAPS_CONFIG_NVME_TCP = 0x00000001,
};
enum fw_caps_config_iscsi {
@@ -4687,8 +5804,9 @@ enum fw_caps_config_iscsi {
enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLSKEYS = 0x00000002,
- FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
+ FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004, /* NIC over ipsecofld */
FW_CAPS_CONFIG_TLS_HW = 0x00000008,
+ FW_CAPS_CONFIG_OFLD_OVER_IPSEC_INLINE = 0x00000010,/* ofld over ipsecofld */
};
enum fw_caps_config_fcoe {
@@ -4716,7 +5834,7 @@ struct fw_caps_config_cmd {
__be16 nbmcaps;
__be16 linkcaps;
__be16 switchcaps;
- __be16 r3;
+ __be16 nvmecaps;
__be16 niccaps;
__be16 toecaps;
__be16 rdmacaps;
@@ -4840,6 +5958,8 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DEV_512SGL_MR = 0x30,
FW_PARAMS_PARAM_DEV_KTLS_HW = 0x31,
FW_PARAMS_PARAM_DEV_VI_ENABLE_INGRESS_AFTER_LINKUP = 0x32,
+ FW_PARAMS_PARAM_DEV_TID_QID_SEL_MASK = 0x33,
+ FW_PARAMS_PARAM_DEV_TX_TPCHMAP = 0x3A,
};
/*
@@ -4911,6 +6031,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+ /* no separate STAG/PBL START/END for nvmet.
+ * use same rdma stag/pbl memory range */
FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
@@ -4943,7 +6065,7 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32,
FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
FW_PARAMS_PARAM_PFVF_TLS_START = 0x34,
- FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
+ FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36,
FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_RSSKEYINFO = 0x38,
@@ -4955,6 +6077,13 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_GET_SMT_START = 0x3E,
FW_PARAMS_PARAM_PFVF_GET_SMT_SIZE = 0x3F,
FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
+ FW_PARAMS_PARAM_PFVF_RRQ_START = 0x41,
+ FW_PARAMS_PARAM_PFVF_RRQ_END = 0x42,
+ FW_PARAMS_PARAM_PFVF_PKTHDR_START = 0x43,
+ FW_PARAMS_PARAM_PFVF_PKTHDR_END = 0x44,
+ FW_PARAMS_PARAM_PFVF_NIPSEC_TUNNEL = 0x45,
+ FW_PARAMS_PARAM_PFVF_NIPSEC_TRANSPORT = 0x46,
+ FW_PARAMS_PARAM_PFVF_OFLD_NIPSEC_TUNNEL = 0x47,
};
/*
@@ -4984,6 +6113,19 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_FLM_DCA = 0x30
};
+#define S_T7_DMAQ_CONM_CTXT_CNGTPMODE 0
+#define M_T7_DMAQ_CONM_CTXT_CNGTPMODE 0x3
+#define V_T7_DMAQ_CONM_CTXT_CNGTPMODE(x) ((x) << S_T7_DMAQ_CONM_CTXT_CNGTPMODE)
+#define G_T7_DMAQ_CONM_CTXT_CNGTPMODE(x) \
+ (((x) >> S_T7_DMAQ_CONM_CTXT_CNGTPMODE) & M_T7_DMAQ_CONM_CTXT_CNGTPMODE)
+
+#define S_T7_DMAQ_CONM_CTXT_CH_VEC 2
+#define M_T7_DMAQ_CONM_CTXT_CH_VEC 0xf
+#define V_T7_DMAQ_CONM_CTXT_CH_VEC(x) ((x) << S_T7_DMAQ_CONM_CTXT_CH_VEC)
+#define G_T7_DMAQ_CONM_CTXT_CH_VEC(x) \
+ (((x) >> S_T7_DMAQ_CONM_CTXT_CH_VEC) & M_T7_DMAQ_CONM_CTXT_CH_VEC)
+
+
/*
* chnet parameters
*/
@@ -5199,7 +6341,8 @@ struct fw_pfvf_cmd {
enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
FW_IQ_TYPE_NO_FL_INT_CAP,
- FW_IQ_TYPE_VF_CQ
+ FW_IQ_TYPE_VF_CQ,
+ FW_IQ_TYPE_CQ,
};
enum fw_iq_iqtype {
@@ -5787,6 +6930,12 @@ struct fw_eq_mngt_cmd {
(((x) >> S_FW_EQ_MNGT_CMD_EQSTOP) & M_FW_EQ_MNGT_CMD_EQSTOP)
#define F_FW_EQ_MNGT_CMD_EQSTOP V_FW_EQ_MNGT_CMD_EQSTOP(1U)
+#define S_FW_EQ_MNGT_CMD_COREGROUP 16
+#define M_FW_EQ_MNGT_CMD_COREGROUP 0x3f
+#define V_FW_EQ_MNGT_CMD_COREGROUP(x) ((x) << S_FW_EQ_MNGT_CMD_COREGROUP)
+#define G_FW_EQ_MNGT_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_COREGROUP) & M_FW_EQ_MNGT_CMD_COREGROUP)
+
#define S_FW_EQ_MNGT_CMD_CMPLIQID 20
#define M_FW_EQ_MNGT_CMD_CMPLIQID 0xfff
#define V_FW_EQ_MNGT_CMD_CMPLIQID(x) ((x) << S_FW_EQ_MNGT_CMD_CMPLIQID)
@@ -5977,6 +7126,12 @@ struct fw_eq_eth_cmd {
(((x) >> S_FW_EQ_ETH_CMD_EQSTOP) & M_FW_EQ_ETH_CMD_EQSTOP)
#define F_FW_EQ_ETH_CMD_EQSTOP V_FW_EQ_ETH_CMD_EQSTOP(1U)
+#define S_FW_EQ_ETH_CMD_COREGROUP 16
+#define M_FW_EQ_ETH_CMD_COREGROUP 0x3f
+#define V_FW_EQ_ETH_CMD_COREGROUP(x) ((x) << S_FW_EQ_ETH_CMD_COREGROUP)
+#define G_FW_EQ_ETH_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_COREGROUP) & M_FW_EQ_ETH_CMD_COREGROUP)
+
#define S_FW_EQ_ETH_CMD_EQID 0
#define M_FW_EQ_ETH_CMD_EQID 0xfffff
#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID)
@@ -6190,6 +7345,12 @@ struct fw_eq_ctrl_cmd {
(((x) >> S_FW_EQ_CTRL_CMD_EQSTOP) & M_FW_EQ_CTRL_CMD_EQSTOP)
#define F_FW_EQ_CTRL_CMD_EQSTOP V_FW_EQ_CTRL_CMD_EQSTOP(1U)
+#define S_FW_EQ_CTRL_CMD_COREGROUP 16
+#define M_FW_EQ_CTRL_CMD_COREGROUP 0x3f
+#define V_FW_EQ_CTRL_CMD_COREGROUP(x) ((x) << S_FW_EQ_CTRL_CMD_COREGROUP)
+#define G_FW_EQ_CTRL_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_COREGROUP) & M_FW_EQ_CTRL_CMD_COREGROUP)
+
#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
#define M_FW_EQ_CTRL_CMD_CMPLIQID 0xfff
#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
@@ -6377,6 +7538,12 @@ struct fw_eq_ofld_cmd {
(((x) >> S_FW_EQ_OFLD_CMD_EQSTOP) & M_FW_EQ_OFLD_CMD_EQSTOP)
#define F_FW_EQ_OFLD_CMD_EQSTOP V_FW_EQ_OFLD_CMD_EQSTOP(1U)
+#define S_FW_EQ_OFLD_CMD_COREGROUP 16
+#define M_FW_EQ_OFLD_CMD_COREGROUP 0x3f
+#define V_FW_EQ_OFLD_CMD_COREGROUP(x) ((x) << S_FW_EQ_OFLD_CMD_COREGROUP)
+#define G_FW_EQ_OFLD_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_COREGROUP) & M_FW_EQ_OFLD_CMD_COREGROUP)
+
#define S_FW_EQ_OFLD_CMD_EQID 0
#define M_FW_EQ_OFLD_CMD_EQID 0xfffff
#define V_FW_EQ_OFLD_CMD_EQID(x) ((x) << S_FW_EQ_OFLD_CMD_EQID)
@@ -7285,7 +8452,8 @@ fec_supported(uint32_t caps)
{
return ((caps & (FW_PORT_CAP32_SPEED_25G | FW_PORT_CAP32_SPEED_50G |
- FW_PORT_CAP32_SPEED_100G)) != 0);
+ FW_PORT_CAP32_SPEED_100G | FW_PORT_CAP32_SPEED_200G |
+ FW_PORT_CAP32_SPEED_400G)) != 0);
}
enum fw_port_action {
@@ -7799,6 +8967,8 @@ enum fw_port_type {
FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
FW_PORT_TYPE_KR_XLAUI = 22, /* No, 4, 40G/10G/1G, No AN*/
+ FW_PORT_TYPE_SFP56 = 26,
+ FW_PORT_TYPE_QSFP56 = 27,
FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
};
@@ -8862,7 +10032,9 @@ struct fw_devlog_cmd {
__u8 r2[7];
__be32 memtype_devlog_memaddr16_devlog;
__be32 memsize_devlog;
- __be32 r3[2];
+ __u8 num_devlog;
+ __u8 r3[3];
+ __be32 r4;
};
#define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 28
@@ -9786,6 +10958,45 @@ struct fw_hma_cmd {
#define G_FW_HMA_CMD_ADDR_SIZE(x) \
(((x) >> S_FW_HMA_CMD_ADDR_SIZE) & M_FW_HMA_CMD_ADDR_SIZE)
+struct fw_jbof_win_reg_cmd {
+ __be32 op_pkd;
+ __be32 alloc_to_len16;
+ __be32 window_num_pcie_params;
+ __be32 window_size;
+ __be64 bus_addr;
+ __be64 phy_address;
+};
+
+#define S_FW_JBOF_WIN_REG_CMD_ALLOC 31
+#define M_FW_JBOF_WIN_REG_CMD_ALLOC 0x1
+#define V_FW_JBOF_WIN_REG_CMD_ALLOC(x) ((x) << S_FW_JBOF_WIN_REG_CMD_ALLOC)
+#define G_FW_JBOF_WIN_REG_CMD_ALLOC(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_ALLOC) & M_FW_JBOF_WIN_REG_CMD_ALLOC)
+#define F_FW_JBOF_WIN_REG_CMD_ALLOC V_FW_JBOF_WIN_REG_CMD_ALLOC(1U)
+
+#define S_FW_JBOF_WIN_REG_CMD_FREE 30
+#define M_FW_JBOF_WIN_REG_CMD_FREE 0x1
+#define V_FW_JBOF_WIN_REG_CMD_FREE(x) ((x) << S_FW_JBOF_WIN_REG_CMD_FREE)
+#define G_FW_JBOF_WIN_REG_CMD_FREE(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_FREE) & M_FW_JBOF_WIN_REG_CMD_FREE)
+#define F_FW_JBOF_WIN_REG_CMD_FREE V_FW_JBOF_WIN_REG_CMD_FREE(1U)
+
+#define S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM 7
+#define M_FW_JBOF_WIN_REG_CMD_WINDOW_NUM 0xf
+#define V_FW_JBOF_WIN_REG_CMD_WINDOW_NUM(x) \
+ ((x) << S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM)
+#define G_FW_JBOF_WIN_REG_CMD_WINDOW_NUM(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM) & \
+ M_FW_JBOF_WIN_REG_CMD_WINDOW_NUM)
+
+#define S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS 0
+#define M_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS 0x7f
+#define V_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS(x) \
+ ((x) << S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS)
+#define G_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS) & \
+ M_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS)
+
/******************************************************************************
* P C I E F W R E G I S T E R
**************************************/
@@ -9914,8 +11125,15 @@ enum pcie_fw_eval {
*/
#define PCIE_FW_PF_DEVLOG 7
+#define S_PCIE_FW_PF_DEVLOG_COUNT_MSB 31
+#define M_PCIE_FW_PF_DEVLOG_COUNT_MSB 0x1
+#define V_PCIE_FW_PF_DEVLOG_COUNT_MSB(x) \
+ ((x) << S_PCIE_FW_PF_DEVLOG_COUNT_MSB)
+#define G_PCIE_FW_PF_DEVLOG_COUNT_MSB(x) \
+ (((x) >> S_PCIE_FW_PF_DEVLOG_COUNT_MSB) & M_PCIE_FW_PF_DEVLOG_COUNT_MSB)
+
#define S_PCIE_FW_PF_DEVLOG_NENTRIES128 28
-#define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0xf
+#define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0x7
#define V_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
((x) << S_PCIE_FW_PF_DEVLOG_NENTRIES128)
#define G_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
@@ -9928,8 +11146,15 @@ enum pcie_fw_eval {
#define G_PCIE_FW_PF_DEVLOG_ADDR16(x) \
(((x) >> S_PCIE_FW_PF_DEVLOG_ADDR16) & M_PCIE_FW_PF_DEVLOG_ADDR16)
+#define S_PCIE_FW_PF_DEVLOG_COUNT_LSB 3
+#define M_PCIE_FW_PF_DEVLOG_COUNT_LSB 0x1
+#define V_PCIE_FW_PF_DEVLOG_COUNT_LSB(x) \
+ ((x) << S_PCIE_FW_PF_DEVLOG_COUNT_LSB)
+#define G_PCIE_FW_PF_DEVLOG_COUNT_LSB(x) \
+ (((x) >> S_PCIE_FW_PF_DEVLOG_COUNT_LSB) & M_PCIE_FW_PF_DEVLOG_COUNT_LSB)
+
#define S_PCIE_FW_PF_DEVLOG_MEMTYPE 0
-#define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0xf
+#define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0x7
#define V_PCIE_FW_PF_DEVLOG_MEMTYPE(x) ((x) << S_PCIE_FW_PF_DEVLOG_MEMTYPE)
#define G_PCIE_FW_PF_DEVLOG_MEMTYPE(x) \
(((x) >> S_PCIE_FW_PF_DEVLOG_MEMTYPE) & M_PCIE_FW_PF_DEVLOG_MEMTYPE)
@@ -9969,7 +11194,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
FW_HDR_CHIP_T5,
- FW_HDR_CHIP_T6
+ FW_HDR_CHIP_T6,
+ FW_HDR_CHIP_T7
};
#define S_FW_HDR_FW_VER_MAJOR 24
@@ -10015,6 +11241,11 @@ enum {
T6FW_VERSION_MINOR = 27,
T6FW_VERSION_MICRO = 5,
T6FW_VERSION_BUILD = 0,
+
+ T7FW_VERSION_MAJOR = 2,
+ T7FW_VERSION_MINOR = 0,
+ T7FW_VERSION_MICRO = 0,
+ T7FW_VERSION_BUILD = 0,
};
enum {
@@ -10050,6 +11281,17 @@ enum {
T6FW_HDR_INTFVER_ISCSI = 0x00,
T6FW_HDR_INTFVER_FCOEPDU= 0x00,
T6FW_HDR_INTFVER_FCOE = 0x00,
+
+ /* T7
+ */
+ T7FW_HDR_INTFVER_NIC = 0x00,
+ T7FW_HDR_INTFVER_VNIC = 0x00,
+ T7FW_HDR_INTFVER_OFLD = 0x00,
+ T7FW_HDR_INTFVER_RI = 0x00,
+ T7FW_HDR_INTFVER_ISCSIPDU= 0x00,
+ T7FW_HDR_INTFVER_ISCSI = 0x00,
+ T7FW_HDR_INTFVER_FCOEPDU= 0x00,
+ T7FW_HDR_INTFVER_FCOE = 0x00,
};
#define FW_VERSION32(MAJOR, MINOR, MICRO, BUILD) ( \
@@ -10085,7 +11327,7 @@ struct fw_ephy_hdr {
enum {
FW_EPHY_HDR_MAGIC = 0x65706879,
};
-
+
struct fw_ifconf_dhcp_info {
__be32 addr;
__be32 mask;
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg.txt b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
new file mode 100644
index 000000000000..499af3675bd9
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
@@ -0,0 +1,644 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 200 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 50 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+
+ #CIM_QUEUE_FEATURE_DISABLE.obq_eom_enable bit needs to be set to 1 for CmdMore handling support
+ reg[0x7c4c] = 0x20/0x20
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 #Crypto core reset
+
+ #Tick granularities in kbps
+ tsch_ticks = 100000, 10000, 1000, 10
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+[function "0"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port
+
+
+[function "1"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port
+
+[function "2"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x4 # access to only one port
+ pmask = 0x1 # access to only one port
+
+[function "3"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x2 # access to only one port
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+
+[function "4"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 224 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 320 # number of clip region entries
+ nfilter = 480 # number of filter region entries
+ nserver = 480 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "5"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 1536
+ tp_l2t = 508
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+[function "6"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 66 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX) + 2 (EXTRA)
+ nexactf = 32 # NPORTS + adding 28 exact entries for FCoE
+ # which is OK since < MIN(SUM PF0..3, PF4)
+ # and we never load PF0..3 and PF4 concurrently
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nhash = 1536
+ tp_l2t = 4
+ protocol = fcoe_initiator
+ tp_ddp = 1
+ fcoe_nfcf = 16
+ fcoe_nvnp = 32
+ fcoe_nssn = 1024
+
+# Following function 7 is used by embedded ARM to communicate to
+# the firmware.
+[function "7"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 16 # number of high priority filter region entries
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nserver = 16
+ nhash = 1024
+ tp_l2t = 512
+ protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+[function "2/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "3/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ #dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ #dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x684e23fb
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt
new file mode 100644
index 000000000000..f06f059f4112
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt
@@ -0,0 +1,530 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 1000 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 1 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 # Crypto core reset
+ reg[0x46000] = 0xa/0xe # 16K ESH Hi Extraction window
+
+ #Tick granularities in kbps
+ tsch_ticks = 1000, 100, 10, 1
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ #gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+[function "0"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 96 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 32 # number of clip region entries
+ nfilter = 48 # number of filter region entries
+ nserver = 48 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "1"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 2048
+ tp_l2t = 1020
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x22432d98
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
new file mode 100644
index 000000000000..0bca1c194af8
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
@@ -0,0 +1,644 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 200 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 50 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+
+ #CIM_QUEUE_FEATURE_DISABLE.obq_eom_enable bit needs to be set to 1 for CmdMore handling support
+ reg[0x7c4c] = 0x20/0x20
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 #Crypto core reset
+
+ #Tick granularities in kbps
+ tsch_ticks = 100000, 10000, 1000, 10
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+[function "0"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port
+
+
+[function "1"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port
+
+[function "2"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x4 # access to only one port
+ pmask = 0x1 # access to only one port
+
+[function "3"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x2 # access to only one port
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+
+[function "4"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 224 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 320 # number of clip region entries
+ nfilter = 480 # number of filter region entries
+ nserver = 480 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "5"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 1536
+ tp_l2t = 508
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+[function "6"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 66 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX) + 2 (EXTRA)
+ nexactf = 32 # NPORTS + adding 28 exact entries for FCoE
+ # which is OK since < MIN(SUM PF0..3, PF4)
+ # and we never load PF0..3 and PF4 concurrently
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nhash = 1536
+ tp_l2t = 4
+ protocol = fcoe_initiator
+ tp_ddp = 1
+ fcoe_nfcf = 16
+ fcoe_nvnp = 32
+ fcoe_nssn = 1024
+
+# Following function 7 is used by embedded ARM to communicate to
+# the firmware.
+[function "7"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 16 # number of high priority filter region entries
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nserver = 16
+ nhash = 1024
+ tp_l2t = 512
+ protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+[function "2/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "3/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ dcb = ppp, dcbx
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ dcb = ppp, dcbx
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x5cab62d4
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c
index 3c4d269f6c69..4610f91e96ac 100644
--- a/sys/dev/cxgbe/iw_cxgbe/device.c
+++ b/sys/dev/cxgbe/iw_cxgbe/device.c
@@ -132,26 +132,21 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->stats.rqt.total = sc->vres.rq.size;
rdev->stats.qid.total = sc->vres.qp.size;
- rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ rc = c4iw_init_resource(rdev, T4_MAX_NUM_PD);
if (rc) {
device_printf(sc->dev, "error %d initializing resources\n", rc);
goto err1;
}
- rc = c4iw_pblpool_create(rdev);
- if (rc) {
- device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
- goto err2;
- }
rc = c4iw_rqtpool_create(rdev);
if (rc) {
device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
- goto err3;
+ goto err2;
}
rdev->status_page = (struct t4_dev_status_page *)
__get_free_page(GFP_KERNEL);
if (!rdev->status_page) {
rc = -ENOMEM;
- goto err4;
+ goto err3;
}
rdev->status_page->qp_start = sc->vres.qp.start;
rdev->status_page->qp_size = sc->vres.qp.size;
@@ -168,15 +163,13 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
rc = -ENOMEM;
- goto err5;
+ goto err4;
}
return (0);
-err5:
- free_page((unsigned long)rdev->status_page);
err4:
- c4iw_rqtpool_destroy(rdev);
+ free_page((unsigned long)rdev->status_page);
err3:
- c4iw_pblpool_destroy(rdev);
+ c4iw_rqtpool_destroy(rdev);
err2:
c4iw_destroy_resource(&rdev->resource);
err1:
@@ -186,7 +179,6 @@ err1:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
free_page((unsigned long)rdev->status_page);
- c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
}
diff --git a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
index ca2595b65b02..47ce10562c66 100644
--- a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
+++ b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
@@ -99,7 +99,6 @@ struct c4iw_id_table {
};
struct c4iw_resource {
- struct c4iw_id_table tpt_table;
struct c4iw_id_table qid_table;
struct c4iw_id_table pdid_table;
};
@@ -904,11 +903,9 @@ int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
struct l2t_entry *l2t);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
-int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
diff --git a/sys/dev/cxgbe/iw_cxgbe/mem.c b/sys/dev/cxgbe/iw_cxgbe/mem.c
index 4a1adc118b7c..ae0aa0edc17a 100644
--- a/sys/dev/cxgbe/iw_cxgbe/mem.c
+++ b/sys/dev/cxgbe/iw_cxgbe/mem.c
@@ -56,46 +56,23 @@ mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
static int
_c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, int wait)
+ dma_addr_t data, int wait)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- addr &= 0x7FFFFFF;
-
if (wait)
c4iw_init_wr_wait(&wr_wait);
- wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16);
+ wr_len = T4_WRITE_MEM_DMA_LEN;
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- (wait ? F_FW_WR_COMPL : 0));
- ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0;
- ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
- V_T5_ULP_MEMIO_ORDER(1) |
- V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr));
-
- sgl = (struct ulptx_sgl *)(ulpmc + 1);
- sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
- V_ULPTX_NSGE(1));
- sgl->len0 = cpu_to_be32(len);
- sgl->addr0 = cpu_to_be64((u64)data);
-
+ t4_write_mem_dma_wr(sc, wrtod(wr), wr_len, 0, addr, len, data,
+ wait ? (u64)(unsigned long)&wr_wait : 0);
t4_wrq_tx(sc, wr);
if (wait)
@@ -108,70 +85,32 @@ static int
_c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_idata *ulpsc;
- u8 wr_len, *to_dp, *from_dp;
+ u8 wr_len, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- u32 cmd;
-
- cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
- cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
-
- addr &= 0x7FFFFFF;
CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
- num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait);
+ num_wqe = DIV_ROUND_UP(len, T4_MAX_INLINE_SIZE);
+ from_dp = data;
for (i = 0; i < num_wqe; i++) {
-
- copy_len = min(len, C4IW_MAX_INLINE_SIZE);
- wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
- roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+ copy_len = min(len, T4_MAX_INLINE_SIZE);
+ wr_len = T4_WRITE_MEM_INLINE_LEN(copy_len);
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
-
- if (i == (num_wqe-1)) {
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- F_FW_WR_COMPL);
- ulpmc->wr.wr_lo =
- (__force __be64)(unsigned long) &wr_wait;
- } else
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
- ulpmc->wr.wr_mid = cpu_to_be32(
- V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
-
- ulpmc->cmd = cmd;
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
- DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
- 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
-
- to_dp = (u8 *)(ulpsc + 1);
- from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
- if (data)
- memcpy(to_dp, from_dp, copy_len);
- else
- memset(to_dp, 0, copy_len);
- if (copy_len % T4_ULPTX_MIN_IO)
- memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
- (copy_len % T4_ULPTX_MIN_IO));
+ t4_write_mem_inline_wr(sc, wrtod(wr), wr_len, 0, addr, copy_len,
+ from_dp, i == (num_wqe - 1) ?
+ (__force __be64)(unsigned long) &wr_wait : 0);
t4_wrq_tx(sc, wr);
- len -= C4IW_MAX_INLINE_SIZE;
- }
+ if (from_dp != NULL)
+ from_dp += T4_MAX_INLINE_SIZE;
+ addr += T4_MAX_INLINE_SIZE >> 5;
+ len -= T4_MAX_INLINE_SIZE;
+ }
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
return ret;
}
@@ -201,7 +140,7 @@ _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen,
- (void *)daddr, !remain);
+ daddr, !remain);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -263,8 +202,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
stag_idx = (*stag) >> 8;
if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
- stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
- if (!stag_idx) {
+ stag_idx = t4_stag_alloc(rdev->adap, 1);
+ if (stag_idx == T4_STAG_UNSET) {
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
@@ -309,7 +248,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
sizeof(tpt), &tpt);
if (reset_tpt_entry) {
- c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+ t4_stag_free(rdev->adap, stag_idx, 1);
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
diff --git a/sys/dev/cxgbe/iw_cxgbe/resource.c b/sys/dev/cxgbe/iw_cxgbe/resource.c
index 644ea0c631bf..cd20f1eafdd6 100644
--- a/sys/dev/cxgbe/iw_cxgbe/resource.c
+++ b/sys/dev/cxgbe/iw_cxgbe/resource.c
@@ -59,13 +59,9 @@ static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
}
/* nr_* must be power of 2 */
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid)
{
int err = 0;
- err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
- C4IW_ID_TABLE_F_RANDOM);
- if (err)
- goto tpt_err;
err = c4iw_init_qid_table(rdev);
if (err)
goto qid_err;
@@ -77,8 +73,6 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
- c4iw_id_table_free(&rdev->resource.tpt_table);
- tpt_err:
return -ENOMEM;
}
@@ -243,7 +237,6 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
void c4iw_destroy_resource(struct c4iw_resource *rscp)
{
- c4iw_id_table_free(&rscp->tpt_table);
c4iw_id_table_free(&rscp->qid_table);
c4iw_id_table_free(&rscp->pdid_table);
}
@@ -254,12 +247,9 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
- unsigned long addr;
+ u32 addr;
- vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
- 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
- M_FIRSTFIT|M_NOWAIT, &addr);
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
+ addr = t4_pblpool_alloc(rdev->adap, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,33 +258,15 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
- return (u32)addr;
+ return addr;
}
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
- vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
-}
-
-int c4iw_pblpool_create(struct c4iw_rdev *rdev)
-{
- rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
- rdev->adap->vres.pbl.start,
- rdev->adap->vres.pbl.size,
- 1, 0, M_FIRSTFIT| M_NOWAIT);
- if (!rdev->pbl_arena)
- return -ENOMEM;
-
- return 0;
-}
-
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
-{
- vmem_destroy(rdev->pbl_arena);
+ t4_pblpool_free(rdev->adap, addr, size);
}
/* RQT Memory Manager. */
diff --git a/sys/dev/cxgbe/iw_cxgbe/t4.h b/sys/dev/cxgbe/iw_cxgbe/t4.h
index 48f85cf7965b..ffb610420640 100644
--- a/sys/dev/cxgbe/iw_cxgbe/t4.h
+++ b/sys/dev/cxgbe/iw_cxgbe/t4.h
@@ -64,7 +64,6 @@
#define T4_MAX_NUM_PD 65536
#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */
-#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define A_PCIE_MA_SYNC 0x30b4
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index b57c03f076b5..91a43785aaca 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -229,7 +229,17 @@ struct iw_tunables {
struct tls_tunables {
int inline_keys;
- int combo_wrs;
+ union {
+ struct {
+ /* T6 only. */
+ int combo_wrs;
+ };
+ struct {
+ /* T7 only. */
+ int short_records;
+ int partial_ghash;
+ };
+ };
};
#ifdef TCP_OFFLOAD
diff --git a/sys/dev/cxgbe/t4_filter.c b/sys/dev/cxgbe/t4_filter.c
index 8d4552116d96..4b583b67ba07 100644
--- a/sys/dev/cxgbe/t4_filter.c
+++ b/sys/dev/cxgbe/t4_filter.c
@@ -322,48 +322,85 @@ remove_hftid(struct adapter *sc, struct filter_entry *f)
LIST_REMOVE(f, link_tid);
}
-/*
- * Input: driver's 32b filter mode.
- * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
- */
static uint16_t
-mode_to_fconf(uint32_t mode)
+mode_to_fconf_t4(uint32_t mode)
{
uint32_t fconf = 0;
if (mode & T4_FILTER_IP_FRAGMENT)
fconf |= F_FRAGMENTATION;
-
if (mode & T4_FILTER_MPS_HIT_TYPE)
fconf |= F_MPSHITTYPE;
-
if (mode & T4_FILTER_MAC_IDX)
fconf |= F_MACMATCH;
-
if (mode & T4_FILTER_ETH_TYPE)
fconf |= F_ETHERTYPE;
-
if (mode & T4_FILTER_IP_PROTO)
fconf |= F_PROTOCOL;
-
if (mode & T4_FILTER_IP_TOS)
fconf |= F_TOS;
-
if (mode & T4_FILTER_VLAN)
fconf |= F_VLAN;
-
if (mode & T4_FILTER_VNIC)
fconf |= F_VNIC_ID;
-
if (mode & T4_FILTER_PORT)
fconf |= F_PORT;
-
if (mode & T4_FILTER_FCoE)
fconf |= F_FCOE;
return (fconf);
}
+static uint16_t
+mode_to_fconf_t7(uint32_t mode)
+{
+ uint32_t fconf = 0;
+
+ if (mode & T4_FILTER_TCPFLAGS)
+ fconf |= F_TCPFLAGS;
+ if (mode & T4_FILTER_SYNONLY)
+ fconf |= F_SYNONLY;
+ if (mode & T4_FILTER_ROCE)
+ fconf |= F_ROCE;
+ if (mode & T4_FILTER_IP_FRAGMENT)
+ fconf |= F_T7_FRAGMENTATION;
+ if (mode & T4_FILTER_MPS_HIT_TYPE)
+ fconf |= F_T7_MPSHITTYPE;
+ if (mode & T4_FILTER_MAC_IDX)
+ fconf |= F_T7_MACMATCH;
+ if (mode & T4_FILTER_ETH_TYPE)
+ fconf |= F_T7_ETHERTYPE;
+ if (mode & T4_FILTER_IP_PROTO)
+ fconf |= F_T7_PROTOCOL;
+ if (mode & T4_FILTER_IP_TOS)
+ fconf |= F_T7_TOS;
+ if (mode & T4_FILTER_VLAN)
+ fconf |= F_T7_VLAN;
+ if (mode & T4_FILTER_VNIC)
+ fconf |= F_T7_VNIC_ID;
+ if (mode & T4_FILTER_PORT)
+ fconf |= F_T7_PORT;
+ if (mode & T4_FILTER_FCoE)
+ fconf |= F_T7_FCOE;
+ if (mode & T4_FILTER_IPSECIDX)
+ fconf |= F_IPSECIDX;
+
+ return (fconf);
+}
+
+/*
+ * Input: driver's 32b filter mode.
+ * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
+ */
+static uint16_t
+mode_to_fconf(struct adapter *sc, uint32_t mode)
+{
+ if (chip_id(sc) >= CHELSIO_T7)
+ return (mode_to_fconf_t7(mode));
+ else
+ return (mode_to_fconf_t4(mode));
+}
+
/*
* Input: driver's 32b filter mode.
* Returns: hardware vnic mode (ingress config) matching the input.
@@ -389,65 +426,100 @@ check_fspec_against_fconf_iconf(struct adapter *sc,
struct tp_params *tpp = &sc->params.tp;
uint32_t fconf = 0;
- if (fs->val.frag || fs->mask.frag)
- fconf |= F_FRAGMENTATION;
-
- if (fs->val.matchtype || fs->mask.matchtype)
- fconf |= F_MPSHITTYPE;
-
- if (fs->val.macidx || fs->mask.macidx)
- fconf |= F_MACMATCH;
-
- if (fs->val.ethtype || fs->mask.ethtype)
- fconf |= F_ETHERTYPE;
-
- if (fs->val.proto || fs->mask.proto)
- fconf |= F_PROTOCOL;
-
- if (fs->val.tos || fs->mask.tos)
- fconf |= F_TOS;
-
- if (fs->val.vlan_vld || fs->mask.vlan_vld)
- fconf |= F_VLAN;
-
- if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
- return (EINVAL);
- fconf |= F_VNIC_ID;
- }
-
- if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
- return (EINVAL);
- fconf |= F_VNIC_ID;
- }
-
+ if (chip_id(sc) >= CHELSIO_T7) {
+ if (fs->val.tcpflags || fs->mask.tcpflags)
+ fconf |= F_TCPFLAGS;
+ if (fs->val.synonly || fs->mask.synonly)
+ fconf |= F_SYNONLY;
+ if (fs->val.roce || fs->mask.roce)
+ fconf |= F_ROCE;
+ if (fs->val.frag || fs->mask.frag)
+ fconf |= F_T7_FRAGMENTATION;
+ if (fs->val.matchtype || fs->mask.matchtype)
+ fconf |= F_T7_MPSHITTYPE;
+ if (fs->val.macidx || fs->mask.macidx)
+ fconf |= F_T7_MACMATCH;
+ if (fs->val.ethtype || fs->mask.ethtype)
+ fconf |= F_T7_ETHERTYPE;
+ if (fs->val.proto || fs->mask.proto)
+ fconf |= F_T7_PROTOCOL;
+ if (fs->val.tos || fs->mask.tos)
+ fconf |= F_T7_TOS;
+ if (fs->val.vlan_vld || fs->mask.vlan_vld)
+ fconf |= F_T7_VLAN;
+ if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
+ if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
#ifdef notyet
- if (fs->val.encap_vld || fs->mask.encap_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ if (fs->val.encap_vld || fs->mask.encap_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
+#endif
+ if (fs->val.iport || fs->mask.iport)
+ fconf |= F_T7_PORT;
+ if (fs->val.fcoe || fs->mask.fcoe)
+ fconf |= F_T7_FCOE;
+ if (fs->val.ipsecidx || fs->mask.ipsecidx)
+ fconf |= F_IPSECIDX;
+ } else {
+ if (fs->val.tcpflags || fs->mask.tcpflags ||
+ fs->val.synonly || fs->mask.synonly ||
+ fs->val.roce || fs->mask.roce ||
+ fs->val.ipsecidx || fs->mask.ipsecidx)
return (EINVAL);
- fconf |= F_VNIC_ID;
- }
+ if (fs->val.frag || fs->mask.frag)
+ fconf |= F_FRAGMENTATION;
+ if (fs->val.matchtype || fs->mask.matchtype)
+ fconf |= F_MPSHITTYPE;
+ if (fs->val.macidx || fs->mask.macidx)
+ fconf |= F_MACMATCH;
+ if (fs->val.ethtype || fs->mask.ethtype)
+ fconf |= F_ETHERTYPE;
+ if (fs->val.proto || fs->mask.proto)
+ fconf |= F_PROTOCOL;
+ if (fs->val.tos || fs->mask.tos)
+ fconf |= F_TOS;
+ if (fs->val.vlan_vld || fs->mask.vlan_vld)
+ fconf |= F_VLAN;
+ if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
+ if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
+#ifdef notyet
+ if (fs->val.encap_vld || fs->mask.encap_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
#endif
-
- if (fs->val.iport || fs->mask.iport)
- fconf |= F_PORT;
-
- if (fs->val.fcoe || fs->mask.fcoe)
- fconf |= F_FCOE;
-
+ if (fs->val.iport || fs->mask.iport)
+ fconf |= F_PORT;
+ if (fs->val.fcoe || fs->mask.fcoe)
+ fconf |= F_FCOE;
+ }
if ((tpp->filter_mode | fconf) != tpp->filter_mode)
return (E2BIG);
return (0);
}
-/*
- * Input: hardware filter configuration (filter mode/mask, ingress config).
- * Input: driver's 32b filter mode matching the input.
- */
static uint32_t
-fconf_to_mode(uint16_t hwmode, int vnic_mode)
+fconf_to_mode_t4(uint16_t hwmode, int vnic_mode)
{
uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
@@ -488,6 +560,69 @@ fconf_to_mode(uint16_t hwmode, int vnic_mode)
return (mode);
}
+static uint32_t
+fconf_to_mode_t7(uint16_t hwmode, int vnic_mode)
+{
+ uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
+ T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
+
+ if (hwmode & F_TCPFLAGS)
+ mode |= T4_FILTER_TCPFLAGS;
+ if (hwmode & F_SYNONLY)
+ mode |= T4_FILTER_SYNONLY;
+ if (hwmode & F_ROCE)
+ mode |= T4_FILTER_ROCE;
+ if (hwmode & F_T7_FRAGMENTATION)
+ mode |= T4_FILTER_IP_FRAGMENT;
+ if (hwmode & F_T7_MPSHITTYPE)
+ mode |= T4_FILTER_MPS_HIT_TYPE;
+ if (hwmode & F_T7_MACMATCH)
+ mode |= T4_FILTER_MAC_IDX;
+ if (hwmode & F_T7_ETHERTYPE)
+ mode |= T4_FILTER_ETH_TYPE;
+ if (hwmode & F_T7_PROTOCOL)
+ mode |= T4_FILTER_IP_PROTO;
+ if (hwmode & F_T7_TOS)
+ mode |= T4_FILTER_IP_TOS;
+ if (hwmode & F_T7_VLAN)
+ mode |= T4_FILTER_VLAN;
+ if (hwmode & F_T7_VNIC_ID)
+ mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
+ if (hwmode & F_T7_PORT)
+ mode |= T4_FILTER_PORT;
+ if (hwmode & F_T7_FCOE)
+ mode |= T4_FILTER_FCoE;
+ if (hwmode & F_IPSECIDX)
+ mode |= T4_FILTER_IPSECIDX;
+
+ switch (vnic_mode) {
+ case FW_VNIC_MODE_PF_VF:
+ mode |= T4_FILTER_IC_VNIC;
+ break;
+ case FW_VNIC_MODE_ENCAP_EN:
+ mode |= T4_FILTER_IC_ENCAP;
+ break;
+ case FW_VNIC_MODE_OUTER_VLAN:
+ default:
+ break;
+ }
+
+ return (mode);
+}
+
+/*
+ * Input: hardware filter configuration (filter mode/mask, ingress config).
+ * Output: driver's 32b filter mode matching the input.
+ */
+static inline uint32_t
+fconf_to_mode(struct adapter *sc, uint16_t hwmode, int vnic_mode)
+{
+ if (chip_id(sc) >= CHELSIO_T7)
+ return (fconf_to_mode_t7(hwmode, vnic_mode));
+ else
+ return (fconf_to_mode_t4(hwmode, vnic_mode));
+}
+
int
get_filter_mode(struct adapter *sc, uint32_t *mode)
{
@@ -499,7 +634,7 @@ get_filter_mode(struct adapter *sc, uint32_t *mode)
/* Non-zero incoming value in mode means "hashfilter mode". */
filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
- *mode = fconf_to_mode(filter_mode, tp->vnic_mode);
+ *mode = fconf_to_mode(sc, filter_mode, tp->vnic_mode);
return (0);
}
@@ -512,7 +647,7 @@ set_filter_mode(struct adapter *sc, uint32_t mode)
uint16_t fconf;
iconf = mode_to_iconf(mode);
- fconf = mode_to_fconf(mode);
+ fconf = mode_to_fconf(sc, mode);
if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
return (0); /* Nothing to do */
@@ -554,7 +689,7 @@ set_filter_mask(struct adapter *sc, uint32_t mode)
uint16_t fmask;
iconf = mode_to_iconf(mode);
- fmask = mode_to_fconf(mode);
+ fmask = mode_to_fconf(sc, mode);
if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
return (0); /* Nothing to do */
@@ -811,71 +946,138 @@ hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
struct tp_params *tp = &sc->params.tp;
uint16_t fmask;
- *ftuple = fmask = 0;
-
/*
* Initialize each of the fields which we care about which are present
* in the Compressed Filter Tuple.
*/
- if (tp->vlan_shift >= 0 && fs->mask.vlan) {
- *ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) <<
- tp->vlan_shift;
- fmask |= F_VLAN;
- }
-
- if (tp->port_shift >= 0 && fs->mask.iport) {
- *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
- fmask |= F_PORT;
- }
-
- if (tp->protocol_shift >= 0 && fs->mask.proto) {
- *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
- fmask |= F_PROTOCOL;
- }
-
- if (tp->tos_shift >= 0 && fs->mask.tos) {
- *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
- fmask |= F_TOS;
- }
-
- if (tp->vnic_shift >= 0 && fs->mask.vnic) {
- /* vnic_mode was already validated. */
- if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
- MPASS(fs->mask.pfvf_vld);
- else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
- MPASS(fs->mask.ovlan_vld);
+#define SFF(V, S) ((uint64_t)(V) << S) /* Shifted Filter Field. */
+ *ftuple = fmask = 0;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ if (tp->ipsecidx_shift >= 0 && fs->mask.ipsecidx) {
+ *ftuple |= SFF(fs->val.ipsecidx, tp->ipsecidx_shift);
+ fmask |= F_IPSECIDX;
+ }
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
+ *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
+ fmask |= F_T7_FCOE;
+ }
+ if (tp->port_shift >= 0 && fs->mask.iport) {
+ *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
+ fmask |= F_T7_PORT;
+ }
+ if (tp->vnic_shift >= 0 && fs->mask.vnic) {
+ /* vnic_mode was already validated. */
+ if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
+ MPASS(fs->mask.pfvf_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
+ MPASS(fs->mask.ovlan_vld);
#ifdef notyet
- else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
- MPASS(fs->mask.encap_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
+ MPASS(fs->mask.encap_vld);
#endif
- *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
- fmask |= F_VNIC_ID;
- }
-
- if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
- *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
- fmask |= F_MACMATCH;
- }
-
- if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
- *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
- fmask |= F_ETHERTYPE;
- }
-
- if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
- *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
- fmask |= F_MPSHITTYPE;
- }
-
- if (tp->frag_shift >= 0 && fs->mask.frag) {
- *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
- fmask |= F_FRAGMENTATION;
- }
-
- if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
- *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
- fmask |= F_FCOE;
+ *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
+ fmask |= F_T7_VNIC_ID;
+ }
+ if (tp->vlan_shift >= 0 && fs->mask.vlan) {
+ *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
+ fmask |= F_T7_VLAN;
+ }
+ if (tp->tos_shift >= 0 && fs->mask.tos) {
+ *ftuple |= SFF(fs->val.tos, tp->tos_shift);
+ fmask |= F_T7_TOS;
+ }
+ if (tp->protocol_shift >= 0 && fs->mask.proto) {
+ *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
+ fmask |= F_T7_PROTOCOL;
+ }
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
+ *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
+ fmask |= F_T7_ETHERTYPE;
+ }
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
+ *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
+ fmask |= F_T7_MACMATCH;
+ }
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
+ *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
+ fmask |= F_T7_MPSHITTYPE;
+ }
+ if (tp->frag_shift >= 0 && fs->mask.frag) {
+ *ftuple |= SFF(fs->val.frag, tp->frag_shift);
+ fmask |= F_T7_FRAGMENTATION;
+ }
+ if (tp->roce_shift >= 0 && fs->mask.roce) {
+ *ftuple |= SFF(fs->val.roce, tp->roce_shift);
+ fmask |= F_ROCE;
+ }
+ if (tp->synonly_shift >= 0 && fs->mask.synonly) {
+ *ftuple |= SFF(fs->val.synonly, tp->synonly_shift);
+ fmask |= F_SYNONLY;
+ }
+ if (tp->tcpflags_shift >= 0 && fs->mask.tcpflags) {
+ *ftuple |= SFF(fs->val.tcpflags, tp->synonly_shift);
+ fmask |= F_TCPFLAGS;
+ }
+ } else {
+ if (fs->mask.ipsecidx || fs->mask.roce || fs->mask.synonly ||
+ fs->mask.tcpflags) {
+ MPASS(tp->ipsecidx_shift == -1);
+ MPASS(tp->roce_shift == -1);
+ MPASS(tp->synonly_shift == -1);
+ MPASS(tp->tcpflags_shift == -1);
+ return (EINVAL);
+ }
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
+ *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
+ fmask |= F_FCOE;
+ }
+ if (tp->port_shift >= 0 && fs->mask.iport) {
+ *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
+ fmask |= F_PORT;
+ }
+ if (tp->vnic_shift >= 0 && fs->mask.vnic) {
+ /* vnic_mode was already validated. */
+ if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
+ MPASS(fs->mask.pfvf_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
+ MPASS(fs->mask.ovlan_vld);
+#ifdef notyet
+ else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
+ MPASS(fs->mask.encap_vld);
+#endif
+ *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
+ fmask |= F_VNIC_ID;
+ }
+ if (tp->vlan_shift >= 0 && fs->mask.vlan) {
+ *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
+ fmask |= F_VLAN;
+ }
+ if (tp->tos_shift >= 0 && fs->mask.tos) {
+ *ftuple |= SFF(fs->val.tos, tp->tos_shift);
+ fmask |= F_TOS;
+ }
+ if (tp->protocol_shift >= 0 && fs->mask.proto) {
+ *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
+ fmask |= F_PROTOCOL;
+ }
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
+ *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
+ fmask |= F_ETHERTYPE;
+ }
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
+ *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
+ fmask |= F_MACMATCH;
+ }
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
+ *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
+ fmask |= F_MPSHITTYPE;
+ }
+ if (tp->frag_shift >= 0 && fs->mask.frag) {
+ *ftuple |= SFF(fs->val.frag, tp->frag_shift);
+ fmask |= F_FRAGMENTATION;
+ }
}
+#undef SFF
/* A hashfilter must conform to the hardware filter mask. */
if (fmask != tp->filter_mask)
@@ -1195,11 +1397,19 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
return (ENOMEM);
bzero(req, sizeof(*req));
INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
- if (no_reply == 0) {
- req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
- V_NO_REPLY(0));
- } else
- req->reply_ctrl = htobe16(V_NO_REPLY(1));
+ if (no_reply) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ } else {
+ const int qid = sc->sge.fwq.abs_id;
+
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ }
req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
req->mask = htobe64(mask);
req->val = htobe64(val);
@@ -1594,7 +1804,7 @@ static int
act_open_cpl_len16(struct adapter *sc, int isipv6)
{
int idx;
- static const int sz_table[3][2] = {
+ static const int sz_table[4][2] = {
{
howmany(sizeof (struct cpl_act_open_req), 16),
howmany(sizeof (struct cpl_act_open_req6), 16)
@@ -1607,10 +1817,14 @@ act_open_cpl_len16(struct adapter *sc, int isipv6)
howmany(sizeof (struct cpl_t6_act_open_req), 16),
howmany(sizeof (struct cpl_t6_act_open_req6), 16)
},
+ {
+ howmany(sizeof (struct cpl_t7_act_open_req), 16),
+ howmany(sizeof (struct cpl_t7_act_open_req6), 16)
+ },
};
MPASS(chip_id(sc) >= CHELSIO_T4);
- idx = min(chip_id(sc) - CHELSIO_T4, 2);
+ idx = min(chip_id(sc) - CHELSIO_T4, 3);
return (sz_table[idx][!!isipv6]);
}
diff --git a/sys/dev/cxgbe/t4_ioctl.h b/sys/dev/cxgbe/t4_ioctl.h
index ba9a17dbaddf..f7c8ee24d596 100644
--- a/sys/dev/cxgbe/t4_ioctl.h
+++ b/sys/dev/cxgbe/t4_ioctl.h
@@ -64,6 +64,7 @@ enum {
T4_SET_FILTER_MASK, /* set filter mask (hashfilter mode) */
T4_HOLD_CLIP_ADDR, /* add ref on an IP in the CLIP */
T4_RELEASE_CLIP_ADDR, /* remove ref from an IP in the CLIP */
+ T4_GET_SGE_CTXT, /* get SGE context for a queue */
};
struct t4_reg {
@@ -119,6 +120,10 @@ struct t4_i2c_data {
#define T4_FILTER_MAC_IDX 0x2000 /* MPS MAC address match index */
#define T4_FILTER_MPS_HIT_TYPE 0x4000 /* MPS match type */
#define T4_FILTER_IP_FRAGMENT 0x8000 /* IP fragment */
+#define T4_FILTER_IPSECIDX 0x10000
+#define T4_FILTER_ROCE 0x20000
+#define T4_FILTER_SYNONLY 0x40000
+#define T4_FILTER_TCPFLAGS 0x80000
/*
* T4_FILTER_VNIC's real meaning depends on the ingress config.
*/
@@ -199,6 +204,10 @@ struct t4_filter_tuple {
uint32_t vlan_vld:1; /* VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN tag valid, value in "vnic" */
uint32_t pfvf_vld:1; /* VNIC id (PF/VF) valid, value in "vnic" */
+ uint32_t roce:1;
+ uint32_t synonly:1;
+ uint32_t tcpflags:6;
+ uint32_t ipsecidx:12;
};
struct t4_filter_specification {
@@ -322,6 +331,7 @@ struct t4_sched_queue {
};
#define T4_SGE_CONTEXT_SIZE 24
+#define T7_SGE_CONTEXT_SIZE 28
enum {
SGE_CONTEXT_EGRESS,
SGE_CONTEXT_INGRESS,
@@ -335,6 +345,12 @@ struct t4_sge_context {
uint32_t data[T4_SGE_CONTEXT_SIZE / 4];
};
+struct t4_sge_ctxt {
+ uint32_t mem_id;
+ uint32_t cid;
+ uint32_t data[T7_SGE_CONTEXT_SIZE / 4];
+};
+
struct t4_mem_range {
uint32_t addr;
uint32_t len;
@@ -444,4 +460,5 @@ struct t4_clip_addr {
#define CHELSIO_T4_SET_FILTER_MASK _IOW('f', T4_SET_FILTER_MASK, uint32_t)
#define CHELSIO_T4_HOLD_CLIP_ADDR _IOW('f', T4_HOLD_CLIP_ADDR, struct t4_clip_addr)
#define CHELSIO_T4_RELEASE_CLIP_ADDR _IOW('f', T4_RELEASE_CLIP_ADDR, struct t4_clip_addr)
+#define CHELSIO_T4_GET_SGE_CTXT _IOWR('f', T4_GET_SGE_CTXT, struct t4_sge_ctxt)
#endif
diff --git a/sys/dev/cxgbe/t4_iov.c b/sys/dev/cxgbe/t4_iov.c
index bfd1613e9795..452ebaaf0172 100644
--- a/sys/dev/cxgbe/t4_iov.c
+++ b/sys/dev/cxgbe/t4_iov.c
@@ -119,6 +119,28 @@ struct {
{0x6085, "Chelsio T6240-SO 85"},
{0x6086, "Chelsio T6225-SO-CR 86"},
{0x6087, "Chelsio T6225-CR 87"},
+}, t7iov_pciids[] = {
+ {0xd000, "Chelsio Terminator 7 FPGA"}, /* T7 PE12K FPGA */
+ {0x7000, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
+ {0x7001, "Chelsio T7250"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7002, "Chelsio S7250"}, /* 2 x 10/25/50G, nomem */
+ {0x7003, "Chelsio T7450"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7004, "Chelsio S7450"}, /* 4 x 10/25/50G, nomem */
+ {0x7005, "Chelsio T72200"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7006, "Chelsio S72200"}, /* 2 x 40/100/200G, nomem */
+ {0x7007, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7008, "Chelsio T71400"}, /* 1 x 400G, nomem */
+ {0x7009, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
+ {0x700a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x700b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x700c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x700d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
+ {0x700e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x700f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7010, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
+ {0x7011, "Chelsio T7_MAYRA_7"}, /* Motherboard */
+
+ {0x7080, "Custom T7"},
};
static inline uint32_t
@@ -191,6 +213,26 @@ t6iov_probe(device_t dev)
}
static int
+chiov_probe(device_t dev)
+{
+ uint16_t d;
+ size_t i;
+
+ if (pci_get_vendor(dev) != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ d = pci_get_device(dev);
+ for (i = 0; i < nitems(t7iov_pciids); i++) {
+ if (d == t7iov_pciids[i].device) {
+ device_set_desc(dev, t7iov_pciids[i].desc);
+ device_quiet(dev);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ return (ENXIO);
+}
+
+static int
t4iov_attach(device_t dev)
{
struct t4iov_softc *sc;
@@ -460,6 +502,28 @@ static driver_t t6iov_driver = {
sizeof(struct t4iov_softc)
};
+static device_method_t chiov_methods[] = {
+ DEVMETHOD(device_probe, chiov_probe),
+ DEVMETHOD(device_attach, t4iov_attach),
+ DEVMETHOD(device_detach, t4iov_detach),
+
+#ifdef PCI_IOV
+ DEVMETHOD(pci_iov_init, t4iov_iov_init),
+ DEVMETHOD(pci_iov_uninit, t4iov_iov_uninit),
+ DEVMETHOD(pci_iov_add_vf, t4iov_add_vf),
+#endif
+
+ DEVMETHOD(t4_attach_child, t4iov_attach_child),
+ DEVMETHOD(t4_detach_child, t4iov_detach_child),
+
+ DEVMETHOD_END
+};
+
+static driver_t chiov_driver = {
+ "chiov",
+ chiov_methods,
+ sizeof(struct t4iov_softc)
+};
DRIVER_MODULE(t4iov, pci, t4iov_driver, 0, 0);
MODULE_VERSION(t4iov, 1);
@@ -468,3 +532,6 @@ MODULE_VERSION(t5iov, 1);
DRIVER_MODULE(t6iov, pci, t6iov_driver, 0, 0);
MODULE_VERSION(t6iov, 1);
+
+DRIVER_MODULE(chiov, pci, chiov_driver, 0, 0);
+MODULE_VERSION(chiov, 1);
diff --git a/sys/dev/cxgbe/t4_l2t.c b/sys/dev/cxgbe/t4_l2t.c
index b1307bf2ace5..5f9c26a0f720 100644
--- a/sys/dev/cxgbe/t4_l2t.c
+++ b/sys/dev/cxgbe/t4_l2t.c
@@ -119,7 +119,7 @@ find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac
first_free = e;
} else if (e->state == L2T_STATE_SWITCHING &&
memcmp(e->dmac, dmac, ETHER_ADDR_LEN) == 0 &&
- e->vlan == vlan && e->lport == port)
+ e->vlan == vlan && e->hw_port == port)
return (e); /* Found existing entry that matches. */
}
@@ -156,7 +156,7 @@ mk_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync, int reply,
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
- req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!reply));
+ req->params = htons(V_L2T_W_PORT(e->hw_port) | V_L2T_W_NOREPLY(!reply));
req->l2t_idx = htons(idx);
req->vlan = htons(e->vlan);
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
@@ -227,7 +227,7 @@ t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
e = &d->l2tab[i];
if (e->state != L2T_STATE_TLS)
continue;
- if (e->vlan == vlan && e->lport == port &&
+ if (e->vlan == vlan && e->hw_port == port &&
e->wrq == (struct sge_wrq *)txq &&
memcmp(e->dmac, eth_addr, ETHER_ADDR_LEN) == 0) {
if (atomic_fetchadd_int(&e->refcnt, 1) == 0) {
@@ -263,7 +263,7 @@ t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
/* Initialize the entry. */
e->state = L2T_STATE_TLS;
e->vlan = vlan;
- e->lport = port;
+ e->hw_port = port;
e->iqid = sc->sge.fwq.abs_id;
e->wrq = (struct sge_wrq *)txq;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
@@ -303,7 +303,7 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
e->iqid = sc->sge.fwq.abs_id;
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
- e->lport = port;
+ e->hw_port = port;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
atomic_store_rel_int(&e->refcnt, 1);
atomic_subtract_int(&d->nfree, 1);
@@ -313,7 +313,7 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
e = NULL;
} else {
MPASS(e->vlan == vlan);
- MPASS(e->lport == port);
+ MPASS(e->hw_port == port);
atomic_add_int(&e->refcnt, 1);
}
}
@@ -488,7 +488,7 @@ sysctl_l2t(SYSCTL_HANDLER_ARGS)
" %u %2u %c %5u %s",
e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
e->dmac[3], e->dmac[4], e->dmac[5],
- e->vlan & 0xfff, vlan_prio(e), e->lport,
+ e->vlan & 0xfff, vlan_prio(e), e->hw_port,
l2e_state(e), atomic_load_acq_int(&e->refcnt),
e->ifp ? if_name(e->ifp) : "-");
skip:
diff --git a/sys/dev/cxgbe/t4_l2t.h b/sys/dev/cxgbe/t4_l2t.h
index 13e085bb7467..989d2d5ec8f3 100644
--- a/sys/dev/cxgbe/t4_l2t.h
+++ b/sys/dev/cxgbe/t4_l2t.h
@@ -71,7 +71,7 @@ struct l2t_entry {
volatile int refcnt; /* entry reference count */
uint16_t hash; /* hash bucket the entry is on */
uint8_t ipv6; /* entry is for an IPv6 address */
- uint8_t lport; /* associated offload logical port */
+ uint8_t hw_port; /* associated hardware port idx */
uint8_t dmac[ETHER_ADDR_LEN]; /* next hop's MAC address */
};
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 9756a6945384..22d2f504c257 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -241,6 +240,45 @@ static driver_t vcc_driver = {
sizeof(struct vi_info)
};
+/* T7+ bus driver interface */
+static int ch_probe(device_t);
+static device_method_t ch_methods[] = {
+ DEVMETHOD(device_probe, ch_probe),
+ DEVMETHOD(device_attach, t4_attach),
+ DEVMETHOD(device_detach, t4_detach),
+ DEVMETHOD(device_suspend, t4_suspend),
+ DEVMETHOD(device_resume, t4_resume),
+
+ DEVMETHOD(bus_child_location, t4_child_location),
+ DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
+ DEVMETHOD(bus_reset_post, t4_reset_post),
+
+ DEVMETHOD(t4_is_main_ready, t4_ready),
+ DEVMETHOD(t4_read_port_device, t4_read_port_device),
+
+ DEVMETHOD_END
+};
+static driver_t ch_driver = {
+ "chnex",
+ ch_methods,
+ sizeof(struct adapter)
+};
+
+
+/* T7+ port (che) interface */
+static driver_t che_driver = {
+ "che",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
+/* T7+ VI (vche) interface */
+static driver_t vche_driver = {
+ "vche",
+ vcxgbe_methods,
+ sizeof(struct vi_info)
+};
+
/* ifnet interface */
static void cxgbe_init(void *);
static int cxgbe_ioctl(if_t, unsigned long, caddr_t);
@@ -519,6 +557,9 @@ static int t4_fec = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
"Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
+static const char *
+t4_fec_bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2\6auto\7module";
+
/*
* Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it
* issues to the firmware. If the firmware doesn't support FORCE_FEC then the
@@ -570,6 +611,10 @@ static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
&t4_switchcaps_allowed, 0, "Default switch capabilities");
+static int t4_nvmecaps_allowed = 0;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, nvmecaps_allowed, CTLFLAG_RDTUN,
+ &t4_nvmecaps_allowed, 0, "Default NVMe capabilities");
+
#ifdef RATELIMIT
static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
@@ -716,6 +761,14 @@ SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
static int t4_tls_combo_wrs = 0;
SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
0, "Attempt to combine TCB field updates with TLS record work requests.");
+
+static int t4_tls_short_records = 1;
+SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, short_records, CTLFLAG_RDTUN,
+ &t4_tls_short_records, 0, "Use cipher-only mode for short records.");
+
+static int t4_tls_partial_ghash = 1;
+SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, partial_ghash, CTLFLAG_RDTUN,
+ &t4_tls_partial_ghash, 0, "Use partial GHASH for AES-GCM records.");
#endif
/* Functions used by VIs to obtain unique MAC addresses for each VI. */
@@ -809,17 +862,20 @@ static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
static int sysctl_force_fec(SYSCTL_HANDLER_ARGS);
+static int sysctl_handle_t4_portstat64(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
-static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_ibq(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_obq(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_qcfg_t7(SYSCTL_HANDLER_ARGS);
static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
@@ -831,6 +887,7 @@ static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
+static int sysctl_mps_tcam_t7(SYSCTL_HANDLER_ARGS);
static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
@@ -855,7 +912,7 @@ static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
#endif
-static int get_sge_context(struct adapter *, struct t4_sge_context *);
+static int get_sge_context(struct adapter *, int, uint32_t, int, uint32_t *);
static int load_fw(struct adapter *, struct t4_data *);
static int load_cfg(struct adapter *, struct t4_data *);
static int load_boot(struct adapter *, struct t4_bootrom *);
@@ -960,6 +1017,29 @@ struct {
{0x6485, "Custom T6240-SO"},
{0x6486, "Custom T6225-SO-CR"},
{0x6487, "Custom T6225-CR"},
+}, t7_pciids[] = {
+ {0xd000, "Chelsio Terminator 7 FPGA"}, /* T7 PE12K FPGA */
+ {0x7400, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
+ {0x7401, "Chelsio T7250"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7402, "Chelsio S7250"}, /* 2 x 10/25/50G, nomem */
+ {0x7403, "Chelsio T7450"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7404, "Chelsio S7450"}, /* 4 x 10/25/50G, nomem */
+ {0x7405, "Chelsio T72200"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7406, "Chelsio S72200"}, /* 2 x 40/100/200G, nomem */
+ {0x7407, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7408, "Chelsio S71400"}, /* 1 x 400G, nomem */
+ {0x7409, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
+ {0x740a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x740b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x740c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x740d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
+ {0x740e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x740f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7410, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
+ {0x7411, "Chelsio T7_MAYRA_7"}, /* Motherboard */
+
+ /* Custom */
+ {0x7480, "Custom T7"},
};
#ifdef TCP_OFFLOAD
@@ -1042,6 +1122,31 @@ t6_probe(device_t dev)
return (ENXIO);
}
+static int
+ch_probe(device_t dev)
+{
+ int i;
+ uint16_t v = pci_get_vendor(dev);
+ uint16_t d = pci_get_device(dev);
+ uint8_t f = pci_get_function(dev);
+
+ if (v != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ /* Attach only to PF0 of the FPGA */
+ if (d == 0xd000 && f != 0)
+ return (ENXIO);
+
+ for (i = 0; i < nitems(t7_pciids); i++) {
+ if (d == t7_pciids[i].device) {
+ device_set_desc(dev, t7_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
static void
t5_attribute_workaround(device_t dev)
{
@@ -1091,6 +1196,13 @@ static const struct devnames devnames[] = {
.pf03_drv_name = "t6iov",
.vf_nexus_name = "t6vf",
.vf_ifnet_name = "ccv"
+ }, {
+ .nexus_name = "chnex",
+ .ifnet_name = "che",
+ .vi_ifnet_name = "vche",
+ .pf03_drv_name = "chiov",
+ .vf_nexus_name = "chvf",
+ .vf_ifnet_name = "chev"
}
};
@@ -1100,12 +1212,13 @@ t4_init_devnames(struct adapter *sc)
int id;
id = chip_id(sc);
- if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
- sc->names = &devnames[id - CHELSIO_T4];
- else {
+ if (id < CHELSIO_T4) {
device_printf(sc->dev, "chip id %d is not supported.\n", id);
sc->names = NULL;
- }
+ } else if (id - CHELSIO_T4 < nitems(devnames))
+ sc->names = &devnames[id - CHELSIO_T4];
+ else
+ sc->names = &devnames[nitems(devnames) - 1];
}
static int
@@ -1277,6 +1390,7 @@ t4_attach(device_t dev)
goto done; /* error message displayed already */
memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
+ memset(sc->port_map, 0xff, sizeof(sc->port_map));
/* Prepare the adapter for operation. */
buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
@@ -1309,7 +1423,7 @@ t4_attach(device_t dev)
* will work even in "recovery mode".
*/
setup_memwin(sc);
- if (t4_init_devlog_params(sc, 0) == 0)
+ if (t4_init_devlog_ncores_params(sc, 0) == 0)
fixup_devlog_params(sc);
make_dev_args_init(&mda);
mda.mda_devsw = &t4_cdevsw;
@@ -1407,14 +1521,16 @@ t4_attach(device_t dev)
}
if (is_bt(pi->port_type))
- setbit(&sc->bt_map, pi->tx_chan);
+ setbit(&sc->bt_map, pi->hw_port);
else
- MPASS(!isset(&sc->bt_map, pi->tx_chan));
+ MPASS(!isset(&sc->bt_map, pi->hw_port));
snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
device_get_nameunit(dev), i);
mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
- sc->chan_map[pi->tx_chan] = i;
+ for (j = 0; j < sc->params.tp.lb_nchan; j++)
+ sc->chan_map[pi->tx_chan + j] = i;
+ sc->port_map[pi->hw_port] = i;
/*
* The MPS counter for FCS errors doesn't work correctly on the
@@ -1424,10 +1540,8 @@ t4_attach(device_t dev)
*/
if (is_t6(sc))
pi->fcs_reg = -1;
- else {
- pi->fcs_reg = t4_port_reg(sc, pi->tx_chan,
- A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
- }
+ else
+ pi->fcs_reg = A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L;
pi->fcs_base = 0;
/* All VIs on this port share this media. */
@@ -1467,6 +1581,7 @@ t4_attach(device_t dev)
sc->intr_count = iaq.nirq;
s = &sc->sge;
+ s->nctrlq = max(sc->params.nports, sc->params.ncores);
s->nrxq = nports * iaq.nrxq;
s->ntxq = nports * iaq.ntxq;
if (num_vis > 1) {
@@ -1521,7 +1636,7 @@ t4_attach(device_t dev)
MPASS(s->niq <= s->iqmap_sz);
MPASS(s->neq <= s->eqmap_sz);
- s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
+ s->ctrlq = malloc(s->nctrlq * sizeof(struct sge_wrq), M_CXGBE,
M_ZERO | M_WAITOK);
s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
M_ZERO | M_WAITOK);
@@ -1548,6 +1663,7 @@ t4_attach(device_t dev)
if (sc->vres.key.size != 0)
sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
+ t4_init_tpt(sc);
/*
* Second pass over the ports. This time we know the number of rx and
@@ -1849,6 +1965,7 @@ t4_detach_common(device_t dev)
#endif
if (sc->key_map)
vmem_destroy(sc->key_map);
+ t4_free_tpt(sc);
#ifdef INET6
t4_destroy_clip_table(sc);
#endif
@@ -2156,6 +2273,7 @@ struct adapter_pre_reset_state {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -2187,6 +2305,7 @@ save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
o->nbmcaps = sc->nbmcaps;
o->linkcaps = sc->linkcaps;
o->switchcaps = sc->switchcaps;
+ o->nvmecaps = sc->nvmecaps;
o->niccaps = sc->niccaps;
o->toecaps = sc->toecaps;
o->rdmacaps = sc->rdmacaps;
@@ -2225,6 +2344,7 @@ compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
COMPARE_CAPS(nbm);
COMPARE_CAPS(link);
COMPARE_CAPS(switch);
+ COMPARE_CAPS(nvme);
COMPARE_CAPS(nic);
COMPARE_CAPS(toe);
COMPARE_CAPS(rdma);
@@ -2417,11 +2537,7 @@ restart_lld(struct adapter *sc)
}
if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
- t4_write_reg(sc, is_t4(sc) ?
- A_MPS_TRC_RSS_CONTROL :
- A_MPS_T5_TRC_RSS_CONTROL,
- V_RSSCONTROL(pi->tx_chan) |
- V_QUEUENUMBER(sc->traceq));
+ t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq);
pi->flags |= HAS_TRACEQ;
}
@@ -3407,7 +3523,7 @@ cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
if (is_t6(vi->pi->adapter))
error = t6_tls_tag_alloc(ifp, params, pt);
else
- error = EOPNOTSUPP;
+ error = t7_tls_tag_alloc(ifp, params, pt);
break;
}
#endif
@@ -3534,6 +3650,8 @@ port_mword(struct port_info *pi, uint32_t speed)
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_CR2_QSFP:
case FW_PORT_TYPE_SFP28:
+ case FW_PORT_TYPE_SFP56:
+ case FW_PORT_TYPE_QSFP56:
/* Pluggable transceiver */
switch (pi->mod_type) {
case FW_PORT_MOD_TYPE_LR:
@@ -3551,6 +3669,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_LR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_LR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_LR4);
}
break;
case FW_PORT_MOD_TYPE_SR:
@@ -3567,6 +3687,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_SR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_SR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_SR4);
}
break;
case FW_PORT_MOD_TYPE_ER:
@@ -3588,6 +3710,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_CR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_CR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_CR4_PAM4);
}
break;
case FW_PORT_MOD_TYPE_LRM:
@@ -3597,6 +3721,8 @@ port_mword(struct port_info *pi, uint32_t speed)
case FW_PORT_MOD_TYPE_DR:
if (speed == FW_PORT_CAP32_SPEED_100G)
return (IFM_100G_DR);
+ if (speed == FW_PORT_CAP32_SPEED_200G)
+ return (IFM_200G_DR4);
break;
case FW_PORT_MOD_TYPE_NA:
MPASS(0); /* Not pluggable? */
@@ -3684,7 +3810,7 @@ alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
("%s: VI %s doesn't have a MAC func", __func__,
device_get_nameunit(vi->dev)));
func = vi_mac_funcs[index];
- rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
+ rc = t4_alloc_vi_func(sc, sc->mbox, pi->hw_port, sc->pf, 0, 1,
vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
if (rc < 0) {
CH_ERR(vi, "failed to allocate virtual interface %d"
@@ -3954,7 +4080,7 @@ setup_memwin(struct adapter *sc)
const struct memwin_init *mw_init;
struct memwin *mw;
int i;
- uint32_t bar0;
+ uint32_t bar0, reg;
if (is_t4(sc)) {
/*
@@ -3982,9 +4108,10 @@ setup_memwin(struct adapter *sc)
mw->mw_aperture = mw_init->aperture;
mw->mw_curpos = 0;
}
- t4_write_reg(sc,
- PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
- (mw->mw_base + bar0) | V_BIR(0) |
+ reg = chip_id(sc) > CHELSIO_T6 ?
+ PCIE_MEM_ACCESS_T7_REG(A_T7_PCIE_MEM_ACCESS_BASE_WIN, i) :
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i);
+ t4_write_reg(sc, reg, (mw->mw_base + bar0) | V_BIR(0) |
V_WINDOW(ilog2(mw->mw_aperture) - 10));
rw_wlock(&mw->mw_lock);
position_memwin(sc, i, mw->mw_curpos);
@@ -3992,7 +4119,7 @@ setup_memwin(struct adapter *sc)
}
/* flush */
- t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
+ t4_read_reg(sc, reg);
}
/*
@@ -4005,8 +4132,7 @@ static void
position_memwin(struct adapter *sc, int idx, uint32_t addr)
{
struct memwin *mw;
- uint32_t pf;
- uint32_t reg;
+ uint32_t pf, reg, val;
MPASS(idx >= 0 && idx < NUM_MEMWIN);
mw = &sc->memwin[idx];
@@ -4019,8 +4145,14 @@ position_memwin(struct adapter *sc, int idx, uint32_t addr)
pf = V_PFNUM(sc->pf);
mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
}
- reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
- t4_write_reg(sc, reg, mw->mw_curpos | pf);
+ if (chip_id(sc) > CHELSIO_T6) {
+ reg = PCIE_MEM_ACCESS_T7_REG(A_PCIE_MEM_ACCESS_OFFSET0, idx);
+ val = (mw->mw_curpos >> X_T7_MEMOFST_SHIFT) | pf;
+ } else {
+ reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
+ val = mw->mw_curpos | pf;
+ }
+ t4_write_reg(sc, reg, val);
t4_read_reg(sc, reg); /* flush */
}
@@ -4453,8 +4585,27 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
iaq->nrxq_vi = t4_nrxq_vi;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
if (is_offload(sc) || is_ethoffload(sc)) {
- iaq->nofldtxq = t4_nofldtxq;
- iaq->nofldtxq_vi = t4_nofldtxq_vi;
+ if (sc->params.tid_qid_sel_mask == 0) {
+ iaq->nofldtxq = t4_nofldtxq;
+ iaq->nofldtxq_vi = t4_nofldtxq_vi;
+ } else {
+ iaq->nofldtxq = roundup(t4_nofldtxq, sc->params.ncores);
+ iaq->nofldtxq_vi = roundup(t4_nofldtxq_vi,
+ sc->params.ncores);
+ if (iaq->nofldtxq != t4_nofldtxq)
+ device_printf(sc->dev,
+ "nofldtxq updated (%d -> %d) for correct"
+ " operation with %d firmware cores.\n",
+ t4_nofldtxq, iaq->nofldtxq,
+ sc->params.ncores);
+ if (iaq->num_vis > 1 &&
+ iaq->nofldtxq_vi != t4_nofldtxq_vi)
+ device_printf(sc->dev,
+ "nofldtxq_vi updated (%d -> %d) for correct"
+ " operation with %d firmware cores.\n",
+ t4_nofldtxq_vi, iaq->nofldtxq_vi,
+ sc->params.ncores);
+ }
}
#endif
#ifdef TCP_OFFLOAD
@@ -4555,6 +4706,10 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
if (iaq->nofldrxq > 0) {
iaq->nofldrxq = 1;
iaq->nofldtxq = 1;
+ if (sc->params.tid_qid_sel_mask == 0)
+ iaq->nofldtxq = 1;
+ else
+ iaq->nofldtxq = sc->params.ncores;
}
iaq->nnmtxq = 0;
iaq->nnmrxq = 0;
@@ -4567,9 +4722,10 @@ done:
MPASS(iaq->nirq > 0);
MPASS(iaq->nrxq > 0);
MPASS(iaq->ntxq > 0);
- if (itype == INTR_MSI) {
+ if (itype == INTR_MSI)
MPASS(powerof2(iaq->nirq));
- }
+ if (sc->params.tid_qid_sel_mask != 0)
+ MPASS(iaq->nofldtxq % sc->params.ncores == 0);
}
static int
@@ -4711,6 +4867,22 @@ struct fw_info {
.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
.intfver_fcoe = FW_INTFVER(T6, FCOE),
},
+ }, {
+ .chip = CHELSIO_T7,
+ .kld_name = "t7fw_cfg",
+ .fw_mod_name = "t7fw",
+ .fw_h = {
+ .chip = FW_HDR_CHIP_T7,
+ .fw_ver = htobe32(FW_VERSION(T7)),
+ .intfver_nic = FW_INTFVER(T7, NIC),
+ .intfver_vnic = FW_INTFVER(T7, VNIC),
+ .intfver_ofld = FW_INTFVER(T7, OFLD),
+ .intfver_ri = FW_INTFVER(T7, RI),
+ .intfver_iscsipdu = FW_INTFVER(T7, ISCSIPDU),
+ .intfver_iscsi = FW_INTFVER(T7, ISCSI),
+ .intfver_fcoepdu = FW_INTFVER(T7, FCOEPDU),
+ .intfver_fcoe = FW_INTFVER(T7, FCOE),
+ },
}
};
@@ -5032,7 +5204,7 @@ done:
static int
copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
- uint32_t mtype, uint32_t moff)
+ uint32_t mtype, uint32_t moff, u_int maxlen)
{
struct fw_info *fw_info;
const struct firmware *dcfg, *rcfg = NULL;
@@ -5084,10 +5256,10 @@ copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
cflen = rcfg->datasize & ~3;
}
- if (cflen > FLASH_CFG_MAX_SIZE) {
+ if (cflen > maxlen) {
device_printf(sc->dev,
"config file too long (%d, max allowed is %d).\n",
- cflen, FLASH_CFG_MAX_SIZE);
+ cflen, maxlen);
rc = EINVAL;
goto done;
}
@@ -5112,6 +5284,7 @@ struct caps_allowed {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -5139,6 +5312,8 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
int rc;
struct fw_caps_config_cmd caps;
uint32_t mtype, moff, finicsum, cfcsum, param, val;
+ unsigned int maxlen = 0;
+ const int cfg_addr = t4_flash_cfg_addr(sc, &maxlen);
rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
if (rc != 0) {
@@ -5155,7 +5330,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
} else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
mtype = FW_MEMTYPE_FLASH;
- moff = t4_flash_cfg_addr(sc);
+ moff = cfg_addr;
caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
@@ -5179,7 +5354,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
FW_LEN16(caps));
- rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
+ rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff, maxlen);
if (rc != 0) {
device_printf(sc->dev,
"failed to upload config file to card: %d.\n", rc);
@@ -5213,6 +5388,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
LIMIT_CAPS(nbm);
LIMIT_CAPS(link);
LIMIT_CAPS(switch);
+ LIMIT_CAPS(nvme);
LIMIT_CAPS(nic);
LIMIT_CAPS(toe);
LIMIT_CAPS(rdma);
@@ -5278,6 +5454,7 @@ partition_resources(struct adapter *sc)
COPY_CAPS(nbm);
COPY_CAPS(link);
COPY_CAPS(switch);
+ COPY_CAPS(nvme);
COPY_CAPS(nic);
COPY_CAPS(toe);
COPY_CAPS(rdma);
@@ -5354,7 +5531,7 @@ get_params__pre_init(struct adapter *sc)
sc->params.vpd.cclk = val[1];
/* Read device log parameters. */
- rc = -t4_init_devlog_params(sc, 1);
+ rc = -t4_init_devlog_ncores_params(sc, 1);
if (rc == 0)
fixup_devlog_params(sc);
else {
@@ -5508,6 +5685,14 @@ get_params__post_init(struct adapter *sc)
}
}
+ if (sc->params.ncores > 1) {
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ param[0] = FW_PARAM_DEV(TID_QID_SEL_MASK);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ sc->params.tid_qid_sel_mask = rc == 0 ? val[0] : 0;
+ }
+
/*
* The parameters that follow may not be available on all firmwares. We
* query them individually rather than in a compound query because old
@@ -5533,6 +5718,14 @@ get_params__post_init(struct adapter *sc)
else
sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */
+ param[0] = FW_PARAM_DEV(TX_TPCHMAP);
+ val[0] = 0;
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ if (rc == 0)
+ sc->params.tx_tp_ch_map = val[0];
+ else
+ sc->params.tx_tp_ch_map = UINT32_MAX; /* Not a legal value. */
+
/*
* Determine whether the firmware supports the filter2 work request.
*/
@@ -5604,6 +5797,7 @@ get_params__post_init(struct adapter *sc)
READ_CAPS(nbmcaps);
READ_CAPS(linkcaps);
READ_CAPS(switchcaps);
+ READ_CAPS(nvmecaps);
READ_CAPS(niccaps);
READ_CAPS(toecaps);
READ_CAPS(rdmacaps);
@@ -5946,9 +6140,13 @@ set_params__post_init(struct adapter *sc)
#ifdef KERN_TLS
if (is_ktls(sc)) {
sc->tlst.inline_keys = t4_tls_inline_keys;
- sc->tlst.combo_wrs = t4_tls_combo_wrs;
- if (t4_kern_tls != 0 && is_t6(sc))
+ if (t4_kern_tls != 0 && is_t6(sc)) {
+ sc->tlst.combo_wrs = t4_tls_combo_wrs;
t6_config_kern_tls(sc, true);
+ } else {
+ sc->tlst.short_records = t4_tls_short_records;
+ sc->tlst.partial_ghash = t4_tls_partial_ghash;
+ }
}
#endif
return (0);
@@ -6220,7 +6418,7 @@ apply_link_config(struct port_info *pi)
MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
#endif
if (!(sc->flags & IS_VF)) {
- rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
+ rc = -t4_link_l1cfg(sc, sc->mbox, pi->hw_port, lc);
if (rc != 0) {
device_printf(pi->dev, "l1cfg failed: %d\n", rc);
return (rc);
@@ -6581,9 +6779,7 @@ cxgbe_init_synchronized(struct vi_info *vi)
*/
if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
- t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
- A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
- V_QUEUENUMBER(sc->traceq));
+ t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq);
pi->flags |= HAS_TRACEQ;
}
@@ -7443,7 +7639,7 @@ cxgbe_refresh_stats(struct vi_info *vi)
pi = vi->pi;
sc = vi->adapter;
tnl_cong_drops = 0;
- t4_get_port_stats(sc, pi->port_id, &pi->stats);
+ t4_get_port_stats(sc, pi->hw_port, &pi->stats);
chan_map = pi->rx_e_chan_map;
while (chan_map) {
i = ffs(chan_map) - 1;
@@ -7481,6 +7677,150 @@ vi_tick(void *arg)
callout_schedule(&vi->tick, hz);
}
+/* CIM inbound queues */
+static const char *t4_ibq[CIM_NUM_IBQ] = {
+ "ibq_tp0", "ibq_tp1", "ibq_ulp", "ibq_sge0", "ibq_sge1", "ibq_ncsi"
+};
+static const char *t7_ibq[CIM_NUM_IBQ_T7] = {
+ "ibq_tp0", "ibq_tp1", "ibq_tp2", "ibq_tp3", "ibq_ulp", "ibq_sge0",
+ "ibq_sge1", "ibq_ncsi", NULL, "ibq_ipc1", "ibq_ipc2", "ibq_ipc3",
+ "ibq_ipc4", "ibq_ipc5", "ibq_ipc6", "ibq_ipc7"
+};
+static const char *t7_ibq_sec[] = {
+ "ibq_tp0", "ibq_tp1", "ibq_tp2", "ibq_tp3", "ibq_ulp", "ibq_sge0",
+ NULL, NULL, NULL, "ibq_ipc0"
+};
+
+/* CIM outbound queues */
+static const char *t4_obq[CIM_NUM_OBQ_T5] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", "obq_ncsi",
+ "obq_sge_rx_q0", "obq_sge_rx_q1" /* These two are T5/T6 only */
+};
+static const char *t7_obq[CIM_NUM_OBQ_T7] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", "obq_ncsi",
+ "obq_sge_rx_q0", NULL, NULL, "obq_ipc1", "obq_ipc2", "obq_ipc3",
+ "obq_ipc4", "obq_ipc5", "obq_ipc6", "obq_ipc7"
+};
+static const char *t7_obq_sec[] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", NULL,
+ "obq_sge_rx_q0", NULL, NULL, "obq_ipc0"
+};
+
+static void
+cim_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *c0)
+{
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children1;
+ int i, j, qcount;
+ char s[16];
+ const char **qname;
+
+ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "cim",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CIM block");
+ c0 = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_U8(ctx, c0, OID_AUTO, "ncores", CTLFLAG_RD, NULL,
+ sc->params.ncores, "# of active CIM cores");
+
+ for (i = 0; i < sc->params.ncores; i++) {
+ snprintf(s, sizeof(s), "%u", i);
+ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, s,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CIM core");
+ children1 = SYSCTL_CHILDREN(oid);
+
+ /*
+ * CTLFLAG_SKIP because the misc.devlog sysctl already displays
+ * the log for all cores. Use this sysctl to get the log for a
+ * particular core only.
+ */
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "devlog",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
+ sc, i, sysctl_devlog, "A", "firmware's device log");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "loadavg",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_loadavg, "A",
+ "microprocessor load averages (select firmwares only)");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "qcfg",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ chip_id(sc) > CHELSIO_T6 ? sysctl_cim_qcfg_t7 : sysctl_cim_qcfg,
+ "A", "Queue configuration");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_la, "A", "Logic analyzer");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "ma_la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "pif_la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
+
+ /* IBQs */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ qname = &t4_ibq[0];
+ qcount = nitems(t4_ibq);
+ break;
+ case CHELSIO_T7:
+ default:
+ if (i == 0) {
+ qname = &t7_ibq[0];
+ qcount = nitems(t7_ibq);
+ } else {
+ qname = &t7_ibq_sec[0];
+ qcount = nitems(t7_ibq_sec);
+ }
+ break;
+ }
+ MPASS(qcount <= sc->chip_params->cim_num_ibq);
+ for (j = 0; j < qcount; j++) {
+ if (qname[j] == NULL)
+ continue;
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, qname[j],
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
+ (i << 16) | j, sysctl_cim_ibq, "A", NULL);
+ }
+
+ /* OBQs */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ qname = t4_obq;
+ qcount = CIM_NUM_OBQ;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ qname = t4_obq;
+ qcount = nitems(t4_obq);
+ break;
+ case CHELSIO_T7:
+ default:
+ if (i == 0) {
+ qname = t7_obq;
+ qcount = nitems(t7_obq);
+ } else {
+ qname = t7_obq_sec;
+ qcount = nitems(t7_obq_sec);
+ }
+ break;
+ }
+ MPASS(qcount <= sc->chip_params->cim_num_obq);
+ for (j = 0; j < qcount; j++) {
+ if (qname[j] == NULL)
+ continue;
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, qname[j],
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
+ (i << 16) | j, sysctl_cim_obq, "A", NULL);
+ }
+ }
+}
+
/*
* Should match fw_caps_config_<foo> enums in t4fw_interface.h
*/
@@ -7490,17 +7830,18 @@ static char *caps_decoder[] = {
"\20\001INGRESS\002EGRESS", /* 2: switch */
"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
"\006HASHFILTER\007ETHOFLD",
- "\20\001TOE", /* 4: TOE */
- "\20\001RDDP\002RDMAC", /* 5: RDMA */
+ "\20\001TOE\002SENDPATH", /* 4: TOE */
+ "\20\001RDDP\002RDMAC\003ROCEv2", /* 5: RDMA */
"\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
"\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
"\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
"\007T10DIF"
"\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
"\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */
- "\004TLS_HW",
+ "\004TLS_HW,\005TOE_IPSEC",
"\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
"\004PO_INITIATOR\005PO_TARGET",
+ "\20\001NVMe_TCP", /* 9: NVMe */
};
void
@@ -7605,6 +7946,7 @@ t4_sysctls(struct adapter *sc)
SYSCTL_CAP(nbmcaps, 0, "NBM");
SYSCTL_CAP(linkcaps, 1, "link");
SYSCTL_CAP(switchcaps, 2, "switch");
+ SYSCTL_CAP(nvmecaps, 9, "NVMe");
SYSCTL_CAP(niccaps, 3, "NIC");
SYSCTL_CAP(toecaps, 4, "TCP offload");
SYSCTL_CAP(rdmacaps, 5, "RDMA");
@@ -7623,11 +7965,6 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_loadavg, "A",
- "microprocessor load averages (debug firmwares only)");
-
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
"I", "core Vdd (in mV)");
@@ -7659,81 +7996,7 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cctrl, "A", "congestion control");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_la, "A", "CIM logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
-
- if (chip_id(sc) > CHELSIO_T4) {
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
- "CIM OBQ 6 (SGE0-RX)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
- "CIM OBQ 7 (SGE1-RX)");
- }
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_qcfg, "A", "CIM queue configuration");
+ cim_sysctls(sc, ctx, children);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
@@ -7748,8 +8011,8 @@ t4_sysctls(struct adapter *sc)
sysctl_tid_stats, "A", "tid stats");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_devlog, "A", "firmware's device log");
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, -1,
+ sysctl_devlog, "A", "firmware's device log (all cores)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
@@ -7783,7 +8046,8 @@ t4_sysctls(struct adapter *sc)
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
+ chip_id(sc) >= CHELSIO_T7 ? sysctl_mps_tcam_t7 :
+ (chip_id(sc) >= CHELSIO_T6 ? sysctl_mps_tcam_t6 : sysctl_mps_tcam),
"A", "MPS TCAM entries");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
@@ -7855,6 +8119,14 @@ t4_sysctls(struct adapter *sc)
CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to "
"combine TCB field updates with TLS record work "
"requests.");
+ else {
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "short_records",
+ CTLFLAG_RW, &sc->tlst.short_records, 0,
+ "Use cipher-only mode for short records.");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "partial_ghash",
+ CTLFLAG_RW, &sc->tlst.partial_ghash, 0,
+ "Use partial GHASH for AES-GCM records.");
+ }
}
#endif
@@ -8230,86 +8502,112 @@ cxgbe_sysctls(struct port_info *pi)
&pi->tx_parse_error, 0,
"# of tx packets with invalid length or # of segments");
-#define T4_REGSTAT(name, stat, desc) \
- SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
- CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
- t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
- sysctl_handle_t4_reg64, "QU", desc)
-
-/* We get these from port_stats and they may be stale by up to 1s */
-#define T4_PORTSTAT(name, desc) \
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
- &pi->stats.name, desc)
-
- T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
- T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
- T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
- T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
- T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
- T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
- T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
- T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
- T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
- T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
- T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
- T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
- T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
- T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
- T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
- T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
- T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
-
- T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
- T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
- T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
- T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
- T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
- T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
- T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
+#define T4_LBSTAT(name, stat, desc) do { \
+ if (sc->params.tp.lb_mode) { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, \
+ A_MPS_PORT_STAT_##stat##_L, \
+ sysctl_handle_t4_portstat64, "QU", desc); \
+ } else { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
+ t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
+ sysctl_handle_t4_reg64, "QU", desc); \
+ } \
+} while (0)
+
+ T4_LBSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
+ T4_LBSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
+ T4_LBSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
+ T4_LBSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
+ T4_LBSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
+ T4_LBSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
+ T4_LBSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
+ T4_LBSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
+ T4_LBSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
+ T4_LBSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
+ T4_LBSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
+ T4_LBSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
+ T4_LBSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
+ T4_LBSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
+ T4_LBSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
+ T4_LBSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
+ T4_LBSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
+
+ T4_LBSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
+ T4_LBSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
+ T4_LBSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
+ T4_LBSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
+ T4_LBSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
+ T4_LBSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
+ T4_LBSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
if (is_t6(sc)) {
- T4_PORTSTAT(rx_fcs_err,
+ /* Read from port_stats and may be stale by up to 1s */
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "rx_fcs_err",
+ CTLFLAG_RD, &pi->stats.rx_fcs_err,
"# of frames received with bad FCS since last link up");
} else {
- T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
+ T4_LBSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
"# of frames received with bad FCS");
}
- T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
- T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
- T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
- T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
- T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
- T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
- T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
- T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
- T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
- T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
- T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
- T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
- T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
-
- T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
- T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
- T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
- T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
- T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
- T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
- T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
- T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
+ T4_LBSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
+ T4_LBSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
+ T4_LBSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
+ T4_LBSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
+ T4_LBSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
+ T4_LBSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
+ T4_LBSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
+ T4_LBSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
+ T4_LBSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
+ T4_LBSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
+ T4_LBSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
+ T4_LBSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
+ T4_LBSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
+#undef T4_LBSTAT
+
+#define T4_REGSTAT(name, stat, desc) do { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
+ A_MPS_STAT_##stat##_L, sysctl_handle_t4_reg64, "QU", desc); \
+} while (0)
+ if (pi->mps_bg_map & 1) {
+ T4_REGSTAT(rx_ovflow0, RX_BG_0_MAC_DROP_FRAME,
+ "# drops due to buffer-group 0 overflows");
+ T4_REGSTAT(rx_trunc0, RX_BG_0_MAC_TRUNC_FRAME,
+ "# of buffer-group 0 truncated packets");
+ }
+ if (pi->mps_bg_map & 2) {
+ T4_REGSTAT(rx_ovflow1, RX_BG_1_MAC_DROP_FRAME,
+ "# drops due to buffer-group 1 overflows");
+ T4_REGSTAT(rx_trunc1, RX_BG_1_MAC_TRUNC_FRAME,
+ "# of buffer-group 1 truncated packets");
+ }
+ if (pi->mps_bg_map & 4) {
+ T4_REGSTAT(rx_ovflow2, RX_BG_2_MAC_DROP_FRAME,
+ "# drops due to buffer-group 2 overflows");
+ T4_REGSTAT(rx_trunc2, RX_BG_2_MAC_TRUNC_FRAME,
+ "# of buffer-group 2 truncated packets");
+ }
+ if (pi->mps_bg_map & 8) {
+ T4_REGSTAT(rx_ovflow3, RX_BG_3_MAC_DROP_FRAME,
+ "# drops due to buffer-group 3 overflows");
+ T4_REGSTAT(rx_trunc3, RX_BG_3_MAC_TRUNC_FRAME,
+ "# of buffer-group 3 truncated packets");
+ }
#undef T4_REGSTAT
-#undef T4_PORTSTAT
}
static int
@@ -8452,14 +8750,14 @@ sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
vi->flags |= TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan));
+ V_TXPKT_INTF(pi->hw_port));
if (!(sc->flags & IS_VF))
npkt--;
} else {
vi->flags &= ~TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
}
for_each_txq(vi, i, txq) {
@@ -8669,13 +8967,12 @@ sysctl_link_fec(SYSCTL_HANDLER_ARGS)
struct link_config *lc = &pi->link_cfg;
int rc;
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
if (lc->link_ok)
- sbuf_printf(sb, "%b", lc->fec, bits);
+ sbuf_printf(sb, "%b", lc->fec, t4_fec_bits);
else
sbuf_printf(sb, "no link");
rc = sbuf_finish(sb);
@@ -8695,14 +8992,12 @@ sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
if (req->newptr == NULL) {
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
- "\5RSVD3\6auto\7module";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
- sbuf_printf(sb, "%b", lc->requested_fec, bits);
+ sbuf_printf(sb, "%b", lc->requested_fec, t4_fec_bits);
rc = sbuf_finish(sb);
sbuf_delete(sb);
} else {
@@ -8771,7 +9066,6 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
int rc;
int8_t fec;
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
@@ -8805,7 +9099,7 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
if (fec == 0)
fec = FEC_NONE;
PORT_UNLOCK(pi);
- sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
+ sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, t4_fec_bits);
}
rc = sbuf_finish(sb);
done:
@@ -8913,6 +9207,31 @@ sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_handle_t4_portstat64(SYSCTL_HANDLER_ARGS)
+{
+ struct port_info *pi = arg1;
+ struct adapter *sc = pi->adapter;
+ int rc, i, reg = arg2;
+ uint64_t val;
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ val = 0;
+ for (i = 0; i < sc->params.tp.lb_nchan; i++) {
+ val += t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan + i, reg));
+ }
+ rc = 0;
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc == 0)
+ rc = sysctl_handle_64(oidp, &val, 0, req);
+ return (rc);
+}
+
+static int
sysctl_temperature(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -9012,6 +9331,10 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
int rc;
uint32_t param, val;
+ uint8_t coreid = (uint8_t)arg2;
+
+ KASSERT(coreid < sc->params.ncores,
+ ("%s: bad coreid %u\n", __func__, coreid));
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
if (rc)
@@ -9020,7 +9343,8 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD) |
+ V_FW_PARAMS_PARAM_Y(coreid);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
}
end_synchronized_op(sc, 0);
@@ -9086,50 +9410,30 @@ done:
return (rc);
}
-static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
- "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
- "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
- "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
-};
-
static int
-sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
+sysctl_cim_ibq(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, i, n, qid = arg2;
+ int rc, i, n, qid, coreid;
uint32_t *buf, *p;
- char *qtype;
- u_int cim_num_obq = sc->chip_params->cim_num_obq;
- KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
- ("%s: bad qid %d\n", __func__, qid));
+ qid = arg2 & 0xffff;
+ coreid = arg2 >> 16;
- if (qid < CIM_NUM_IBQ) {
- /* inbound queue */
- qtype = "IBQ";
- n = 4 * CIM_IBQ_SIZE;
- buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
- rc = -ENXIO;
- else
- rc = t4_read_cim_ibq(sc, qid, buf, n);
- mtx_unlock(&sc->reg_lock);
- } else {
- /* outbound queue */
- qtype = "OBQ";
- qid -= CIM_NUM_IBQ;
- n = 4 * cim_num_obq * CIM_OBQ_SIZE;
- buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
- rc = -ENXIO;
- else
- rc = t4_read_cim_obq(sc, qid, buf, n);
- mtx_unlock(&sc->reg_lock);
- }
+ KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_ibq,
+ ("%s: bad ibq qid %d\n", __func__, qid));
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+ n = 4 * CIM_IBQ_SIZE;
+ buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_ibq_core(sc, coreid, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
if (rc < 0) {
rc = -rc;
goto done;
@@ -9141,12 +9445,58 @@ sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
rc = ENOMEM;
goto done;
}
-
- sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
for (i = 0, p = buf; i < n; i += 16, p += 4)
sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
p[2], p[3]);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+done:
+ free(buf, M_CXGBE);
+ return (rc);
+}
+
+static int
+sysctl_cim_obq(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct sbuf *sb;
+ int rc, i, n, qid, coreid;
+ uint32_t *buf, *p;
+
+ qid = arg2 & 0xffff;
+ coreid = arg2 >> 16;
+
+ KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_obq,
+ ("%s: bad obq qid %d\n", __func__, qid));
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
+ n = 6 * CIM_OBQ_SIZE * 4;
+ buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_obq_core(sc, coreid, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
+ if (rc < 0) {
+ rc = -rc;
+ goto done;
+ }
+ n = rc * sizeof(uint32_t); /* rc has # of words actually read */
+ rc = sysctl_wire_old_buffer(req, 0);
+ if (rc != 0)
+ goto done;
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
+ if (sb == NULL) {
+ rc = ENOMEM;
+ goto done;
+ }
+ for (i = 0, p = buf; i < n; i += 16, p += 4)
+ sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
+ p[2], p[3]);
rc = sbuf_finish(sb);
sbuf_delete(sb);
done:
@@ -9217,7 +9567,7 @@ sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
}
static int
-sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
+sbuf_cim_la(struct adapter *sc, int coreid, struct sbuf *sb, int flags)
{
uint32_t cfg, *buf;
int rc;
@@ -9232,9 +9582,10 @@ sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
if (hw_off_limits(sc))
rc = ENXIO;
else {
- rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ rc = -t4_cim_read_core(sc, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &cfg);
if (rc == 0)
- rc = -t4_cim_read_la(sc, buf, NULL);
+ rc = -t4_cim_read_la_core(sc, coreid, buf, NULL);
}
mtx_unlock(&sc->reg_lock);
if (rc == 0) {
@@ -9251,6 +9602,7 @@ static int
sysctl_cim_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
+ int coreid = arg2;
struct sbuf *sb;
int rc;
@@ -9258,7 +9610,7 @@ sysctl_cim_la(SYSCTL_HANDLER_ARGS)
if (sb == NULL)
return (ENOMEM);
- rc = sbuf_cim_la(sc, sb, M_WAITOK);
+ rc = sbuf_cim_la(sc, coreid, sb, M_WAITOK);
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -9295,7 +9647,7 @@ dump_cimla(struct adapter *sc)
device_get_nameunit(sc->dev));
return;
}
- rc = sbuf_cim_la(sc, &sb, M_WAITOK);
+ rc = sbuf_cim_la(sc, 0, &sb, M_WAITOK);
if (rc == 0) {
rc = sbuf_finish(&sb);
if (rc == 0) {
@@ -9419,6 +9771,13 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
+ static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
+ "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
+ "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
+ };
+
+ MPASS(chip_id(sc) < CHELSIO_T7);
cim_num_obq = sc->chip_params->cim_num_obq;
if (is_t4(sc)) {
@@ -9471,6 +9830,104 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_cim_qcfg_t7(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ u_int coreid = arg2;
+ struct sbuf *sb;
+ int rc, i;
+ u_int addr;
+ uint16_t base[CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7];
+ uint16_t size[CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7];
+ uint16_t thres[CIM_NUM_IBQ_T7];
+ uint32_t obq_wr[2 * CIM_NUM_OBQ_T7], *wr = obq_wr;
+ uint32_t stat[4 * (CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7)], *p = stat;
+ static const char * const qname_ibq_t7[] = {
+ "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "SGE1", "NC-SI",
+ "RSVD", "IPC1", "IPC2", "IPC3", "IPC4", "IPC5", "IPC6", "IPC7",
+ };
+ static const char * const qname_obq_t7[] = {
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", "SGE0-RX",
+ "RSVD", "RSVD", "IPC1", "IPC2", "IPC3", "IPC4", "IPC5",
+ "IPC6", "IPC7"
+ };
+ static const char * const qname_ibq_sec_t7[] = {
+ "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "RSVD", "RSVD",
+ "RSVD", "IPC0", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD",
+ };
+ static const char * const qname_obq_sec_t7[] = {
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "RSVD", "SGE0-RX",
+ "RSVD", "RSVD", "IPC0", "RSVD", "RSVD", "RSVD", "RSVD",
+ "RSVD", "RSVD",
+ };
+
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = -t4_cim_read_core(sc, 1, coreid,
+ A_T7_UP_IBQ_0_SHADOW_RDADDR, 4 * CIM_NUM_IBQ_T7, stat);
+ if (rc != 0)
+ goto unlock;
+
+ rc = -t4_cim_read_core(sc, 1, coreid,
+ A_T7_UP_OBQ_0_SHADOW_RDADDR, 4 * CIM_NUM_OBQ_T7,
+ &stat[4 * CIM_NUM_IBQ_T7]);
+ if (rc != 0)
+ goto unlock;
+
+ addr = A_T7_UP_OBQ_0_SHADOW_REALADDR;
+ for (i = 0; i < CIM_NUM_OBQ_T7 * 2; i++, addr += 8) {
+ rc = -t4_cim_read_core(sc, 1, coreid, addr, 1,
+ &obq_wr[i]);
+ if (rc != 0)
+ goto unlock;
+ }
+ t4_read_cimq_cfg_core(sc, coreid, base, size, thres);
+ }
+unlock:
+ mtx_unlock(&sc->reg_lock);
+ if (rc)
+ return (rc);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
+ if (sb == NULL)
+ return (ENOMEM);
+
+ sbuf_printf(sb,
+ " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
+
+ for (i = 0; i < CIM_NUM_IBQ_T7; i++, p += 4) {
+ if (!size[i])
+ continue;
+
+ sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
+ coreid == 0 ? qname_ibq_t7[i] : qname_ibq_sec_t7[i],
+ base[i], size[i], thres[i], G_IBQRDADDR(p[0]) & 0xfff,
+ G_IBQWRADDR(p[1]) & 0xfff, G_QUESOPCNT(p[3]),
+ G_QUEEOPCNT(p[3]), G_T7_QUEREMFLITS(p[2]) * 16);
+ }
+
+ for ( ; i < CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7; i++, p += 4, wr += 2) {
+ if (!size[i])
+ continue;
+
+ sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u",
+ coreid == 0 ? qname_obq_t7[i - CIM_NUM_IBQ_T7] :
+ qname_obq_sec_t7[i - CIM_NUM_IBQ_T7],
+ base[i], size[i], G_QUERDADDR(p[0]) & 0xfff,
+ wr[0] << 1, G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
+ G_T7_QUEREMFLITS(p[2]) * 16);
+ }
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+}
+
+static int
sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -9612,18 +10069,25 @@ static const char * const devlog_facility_strings[] = {
};
static int
-sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
+sbuf_devlog(struct adapter *sc, int coreid, struct sbuf *sb, int flags)
{
int i, j, rc, nentries, first = 0;
struct devlog_params *dparams = &sc->params.devlog;
struct fw_devlog_e *buf, *e;
+ uint32_t addr, size;
uint64_t ftstamp = UINT64_MAX;
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
if (dparams->addr == 0)
return (ENXIO);
+ size = dparams->size / sc->params.ncores;
+ addr = dparams->addr + coreid * size;
+
MPASS(flags == M_WAITOK || flags == M_NOWAIT);
- buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
+ buf = malloc(size, M_CXGBE, M_ZERO | flags);
if (buf == NULL)
return (ENOMEM);
@@ -9631,13 +10095,12 @@ sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
if (hw_off_limits(sc))
rc = ENXIO;
else
- rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
- dparams->size);
+ rc = read_via_memwin(sc, 1, addr, (void *)buf, size);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
- nentries = dparams->size / sizeof(struct fw_devlog_e);
+ nentries = size / sizeof(struct fw_devlog_e);
for (i = 0; i < nentries; i++) {
e = &buf[i];
@@ -9689,14 +10152,24 @@ static int
sysctl_devlog(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- int rc;
+ int rc, i, coreid = arg2;
struct sbuf *sb;
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
-
- rc = sbuf_devlog(sc, sb, M_WAITOK);
+ if (coreid == -1) {
+ /* -1 means all cores */
+ for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) {
+ if (sc->params.ncores > 0)
+ sbuf_printf(sb, "=== CIM core %u ===\n", i);
+ rc = sbuf_devlog(sc, i, sb, M_WAITOK);
+ }
+ } else {
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+ rc = sbuf_devlog(sc, coreid, sb, M_WAITOK);
+ }
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -9706,7 +10179,7 @@ sysctl_devlog(SYSCTL_HANDLER_ARGS)
static void
dump_devlog(struct adapter *sc)
{
- int rc;
+ int rc, i;
struct sbuf sb;
if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
@@ -9714,13 +10187,15 @@ dump_devlog(struct adapter *sc)
device_get_nameunit(sc->dev));
return;
}
- rc = sbuf_devlog(sc, &sb, M_WAITOK);
+ for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) {
+ if (sc->params.ncores > 0)
+ sbuf_printf(&sb, "=== CIM core %u ===\n", i);
+ rc = sbuf_devlog(sc, i, &sb, M_WAITOK);
+ }
if (rc == 0) {
- rc = sbuf_finish(&sb);
- if (rc == 0) {
- log(LOG_DEBUG, "%s: device log follows.\n%s",
- device_get_nameunit(sc->dev), sbuf_data(&sb));
- }
+ sbuf_finish(&sb);
+ log(LOG_DEBUG, "%s: device log follows.\n%s",
+ device_get_nameunit(sc->dev), sbuf_data(&sb));
}
sbuf_delete(&sb);
}
@@ -9909,16 +10384,16 @@ sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
}
struct mem_desc {
- u_int base;
- u_int limit;
+ uint64_t base;
+ uint64_t limit;
u_int idx;
};
static int
mem_desc_cmp(const void *a, const void *b)
{
- const u_int v1 = ((const struct mem_desc *)a)->base;
- const u_int v2 = ((const struct mem_desc *)b)->base;
+ const uint64_t v1 = ((const struct mem_desc *)a)->base;
+ const uint64_t v2 = ((const struct mem_desc *)b)->base;
if (v1 < v2)
return (-1);
@@ -9929,10 +10404,9 @@ mem_desc_cmp(const void *a, const void *b)
}
static void
-mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
- unsigned int to)
+mem_region_show(struct sbuf *sb, const char *name, uint64_t from, uint64_t to)
{
- unsigned int size;
+ uintmax_t size;
if (from == to)
return;
@@ -9941,8 +10415,12 @@ mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
if (size == 0)
return;
- /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
- sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
+ if (from > UINT32_MAX || to > UINT32_MAX)
+ sbuf_printf(sb, "%-18s 0x%012jx-0x%012jx [%ju]\n", name,
+ (uintmax_t)from, (uintmax_t)to, size);
+ else
+ sbuf_printf(sb, "%-18s 0x%08jx-0x%08jx [%ju]\n", name,
+ (uintmax_t)from, (uintmax_t)to, size);
}
static int
@@ -9950,7 +10428,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, i, n;
+ int rc, i, n, nchan;
uint32_t lo, hi, used, free, alloc;
static const char *memory[] = {
"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"
@@ -9961,12 +10439,14 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
"RQUDP region:", "PBL region:", "TXPBL region:",
- "TLSKey region:", "DBVFIFO region:", "ULPRX state:",
- "ULPTX state:", "On-chip queues:",
+ "TLSKey region:", "RRQ region:", "NVMe STAG region:",
+ "NVMe RQ region:", "NVMe RXPBL region:", "NVMe TPT region:",
+ "NVMe TXPBL region:", "DBVFIFO region:", "ULPRX state:",
+ "ULPTX state:", "RoCE RRQ region:", "On-chip queues:",
};
struct mem_desc avail[4];
struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
- struct mem_desc *md = mem;
+ struct mem_desc *md;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
@@ -9992,36 +10472,91 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
if (lo & F_EDRAM0_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
- avail[i].base = G_EDRAM0_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EDRAM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EDRAM0_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EDRAM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EDRAM0_SIZE(hi) << 20);
+ }
avail[i].idx = 0;
i++;
}
if (lo & F_EDRAM1_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
- avail[i].base = G_EDRAM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EDRAM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EDRAM1_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EDRAM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EDRAM1_SIZE(hi) << 20);
+ }
avail[i].idx = 1;
i++;
}
if (lo & F_EXT_MEM_ENABLE) {
- hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
- avail[i].base = G_EXT_MEM_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
- avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ case CHELSIO_T6:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
+ avail[i].base = (uint64_t)G_EXT_MEM_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM_SIZE(hi) << 20);
+ avail[i].idx = 2;
+ break;
+ case CHELSIO_T5:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY0_BAR);
+ avail[i].base = (uint64_t)G_EXT_MEM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM0_SIZE(hi) << 20);
+ avail[i].idx = 3; /* Call it MC0 for T5 */
+ break;
+ default:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY0_BAR);
+ avail[i].base = (uint64_t)G_T7_EXT_MEM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EXT_MEM0_SIZE(hi) << 20);
+ avail[i].idx = 3; /* Call it MC0 for T7+ */
+ break;
+ }
i++;
}
- if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
+ if (lo & F_EXT_MEM1_ENABLE && !(lo & F_MC_SPLIT)) {
+ /* Only T5 and T7+ have 2 MCs. */
+ MPASS(is_t5(sc) || chip_id(sc) >= CHELSIO_T7);
+
hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
- avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EXT_MEM1_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ }
avail[i].idx = 4;
i++;
}
- if (is_t6(sc) && lo & F_HMA_MUX) {
- hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
- avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
+ if (lo & F_HMA_MUX) {
+ /* Only T6+ have HMA. */
+ MPASS(chip_id(sc) >= CHELSIO_T6);
+
+ if (chip_id(sc) >= CHELSIO_T7) {
+ hi = t4_read_reg(sc, A_MA_HOST_MEMORY_BAR);
+ avail[i].base = (uint64_t)G_HMATARGETBASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_HMA_SIZE(hi) << 20);
+ } else {
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
+ avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ }
avail[i].idx = 5;
i++;
}
@@ -10030,6 +10565,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
goto done;
qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
+ md = &mem[0];
(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
@@ -10065,22 +10601,52 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
}
md++;
-#define ulp_region(reg) \
- md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
- (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
+#define ulp_region(reg) do {\
+ const u_int shift = chip_id(sc) >= CHELSIO_T7 ? 4 : 0; \
+ md->base = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT) << shift; \
+ md->limit = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) << shift; \
+ md->limit += (1 << shift) - 1; \
+ md++; \
+ } while (0)
+
+#define hide_ulp_region() do { \
+ md->base = 0; \
+ md->idx = nitems(region); \
+ md++; \
+ } while (0)
ulp_region(RX_ISCSI);
ulp_region(RX_TDDP);
ulp_region(TX_TPT);
ulp_region(RX_STAG);
ulp_region(RX_RQ);
- ulp_region(RX_RQUDP);
+ if (chip_id(sc) < CHELSIO_T7)
+ ulp_region(RX_RQUDP);
+ else
+ hide_ulp_region();
ulp_region(RX_PBL);
ulp_region(TX_PBL);
- if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
+ if (chip_id(sc) >= CHELSIO_T6)
ulp_region(RX_TLS_KEY);
+ else
+ hide_ulp_region();
+ if (chip_id(sc) >= CHELSIO_T7) {
+ ulp_region(RX_RRQ);
+ ulp_region(RX_NVME_TCP_STAG);
+ ulp_region(RX_NVME_TCP_RQ);
+ ulp_region(RX_NVME_TCP_PBL);
+ ulp_region(TX_NVME_TCP_TPT);
+ ulp_region(TX_NVME_TCP_PBL);
+ } else {
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
}
#undef ulp_region
+#undef hide_ulp_region
md->base = 0;
if (is_t4(sc))
@@ -10111,6 +10677,15 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
md->limit = 0;
md++;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ t4_tp_pio_read(sc, &lo, 1, A_TP_ROCE_RRQ_BASE, false);
+ md->base = lo;
+ } else {
+ md->base = 0;
+ md->idx = nitems(region);
+ }
+ md++;
+
md->base = sc->vres.ocq.start;
if (sc->vres.ocq.size)
md->limit = md->base + sc->vres.ocq.size - 1;
@@ -10143,31 +10718,41 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
mem[i].limit);
}
- sbuf_printf(sb, "\n");
lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
- mem_region_show(sb, "uP RAM:", lo, hi);
+ if (hi != lo - 1) {
+ sbuf_printf(sb, "\n");
+ mem_region_show(sb, "uP RAM:", lo, hi);
+ }
lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
- mem_region_show(sb, "uP Extmem2:", lo, hi);
+ if (hi != lo - 1)
+ mem_region_show(sb, "uP Extmem2:", lo, hi);
lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
- for (i = 0, free = 0; i < 2; i++)
+ if (chip_id(sc) >= CHELSIO_T7)
+ nchan = 1 << G_T7_PMRXNUMCHN(lo);
+ else
+ nchan = lo & F_PMRXNUMCHN ? 2 : 1;
+ for (i = 0, free = 0; i < nchan; i++)
free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT));
sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
G_PMRXMAXPAGE(lo), free,
- t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
- (lo & F_PMRXNUMCHN) ? 2 : 1);
+ t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, nchan);
lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
- for (i = 0, free = 0; i < 4; i++)
+ if (chip_id(sc) >= CHELSIO_T7)
+ nchan = 1 << G_T7_PMTXNUMCHN(lo);
+ else
+ nchan = 1 << G_PMTXNUMCHN(lo);
+ for (i = 0, free = 0; i < nchan; i++)
free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT));
sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
G_PMTXMAXPAGE(lo), free,
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
- hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
+ hi >= (1 << 20) ? 'M' : 'K', nchan);
sbuf_printf(sb, "%u p-structs (%u free)\n",
t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT),
G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT)));
@@ -10184,7 +10769,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
- /* For T6 these are MAC buffer groups */
+ /* For T6+ these are MAC buffer groups */
sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
i, used, alloc);
}
@@ -10200,7 +10785,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
- /* For T6 these are MAC buffer groups */
+ /* For T6+ these are MAC buffer groups */
sbuf_printf(sb,
"\nLoopback %d using %u pages out of %u allocated",
i, used, alloc);
@@ -10329,7 +10914,7 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
int rc, i;
- MPASS(chip_id(sc) > CHELSIO_T5);
+ MPASS(chip_id(sc) == CHELSIO_T6);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
@@ -10338,7 +10923,7 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
" IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
" Replication"
- " P0 P1 P2 P3 ML\n");
+ " P0 P1 P2 P3 ML");
rc = 0;
for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
@@ -10503,6 +11088,206 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_mps_tcam_t7(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct sbuf *sb;
+ int rc, i;
+
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+ if (sb == NULL)
+ return (ENOMEM);
+
+ sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
+ " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
+ " Replication"
+ " P0 P1 P2 P3 ML");
+
+ rc = 0;
+ for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
+ uint8_t dip_hit, vlan_vld, lookup_type, port_num;
+ uint16_t ivlan;
+ uint64_t tcamx, tcamy, val, mask;
+ uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
+ uint8_t addr[ETHER_ADDR_LEN];
+
+ /* Read tcamy */
+ ctl = (V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+ if (chip_rev(sc) == 0) {
+ if (i < 256)
+ ctl |= V_CTLTCAMINDEX(i) | V_T7_CTLTCAMSEL(0);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 256) | V_T7_CTLTCAMSEL(1);
+ } else {
+#if 0
+ ctl = (V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+#endif
+ if (i < 512)
+ ctl |= V_CTLTCAMINDEX(i) | V_T7_CTLTCAMSEL(0);
+ else if (i < 1024)
+ ctl |= V_CTLTCAMINDEX(i - 512) | V_T7_CTLTCAMSEL(1);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 1024) | V_T7_CTLTCAMSEL(2);
+ }
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA1_REQ_ID1);
+ tcamy = G_DMACH(val) << 32;
+ tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ lookup_type = G_DATALKPTYPE(data2);
+ port_num = G_DATAPORTNUM(data2);
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ /* Inner header VNI */
+ vniy = (((data2 & F_DATAVIDH2) |
+ G_DATAVIDH1(data2)) << 16) | G_VIDL(val);
+ dip_hit = data2 & F_DATADIPHIT;
+ vlan_vld = 0;
+ } else {
+ vniy = 0;
+ dip_hit = 0;
+ vlan_vld = data2 & F_DATAVIDH2;
+ ivlan = G_VIDL(val);
+ }
+
+ ctl |= V_CTLXYBITSEL(1);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA1_REQ_ID1);
+ tcamx = G_DMACH(val) << 32;
+ tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ /* Inner header VNI mask */
+ vnix = (((data2 & F_DATAVIDH2) |
+ G_DATAVIDH1(data2)) << 16) | G_VIDL(val);
+ } else
+ vnix = 0;
+
+ if (tcamx & tcamy)
+ continue;
+ tcamxy2valmask(tcamx, tcamy, addr, &mask);
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ if (chip_rev(sc) == 0) {
+ cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
+ cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+ } else {
+ t4_write_reg(sc, A_MPS_CLS_SRAM_H,
+ V_SRAMWRN(0) | V_SRAMINDEX(i));
+ cls_lo = t4_read_reg(sc, A_MPS_CLS_SRAM_L);
+ cls_hi = t4_read_reg(sc, A_MPS_CLS_SRAM_H);
+ }
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012jx %06x %06x - - %3c"
+ " I %4x %3c %#x%4u%4d", i, addr[0],
+ addr[1], addr[2], addr[3], addr[4], addr[5],
+ (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
+ port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+ G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+ cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+ } else {
+ sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012jx - - ", i, addr[0], addr[1],
+ addr[2], addr[3], addr[4], addr[5],
+ (uintmax_t)mask);
+
+ if (vlan_vld)
+ sbuf_printf(sb, "%4u Y ", ivlan);
+ else
+ sbuf_printf(sb, " - N ");
+
+ sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
+ lookup_type ? 'I' : 'O', port_num,
+ cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+ G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+ cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+ }
+
+ if (cls_lo & F_T6_REPLICATE) {
+ struct fw_ldst_cmd ldst_cmd;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
+ ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.mps.rplc.fid_idx =
+ htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
+ V_FW_LDST_CMD_IDX(i));
+
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
+ "t6mps");
+ if (rc)
+ break;
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+ end_synchronized_op(sc, 0);
+ if (rc != 0)
+ break;
+ else {
+ sbuf_printf(sb, " %08x %08x %08x %08x"
+ " %08x %08x %08x %08x",
+ be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
+ be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
+ be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
+ be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
+ be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
+ be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
+ be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
+ be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
+ }
+ } else
+ sbuf_printf(sb, "%72s", "");
+
+ sbuf_printf(sb, "%4u%3u%3u%3u %#x",
+ G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
+ G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
+ (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
+ }
+
+ if (rc)
+ (void) sbuf_finish(sb);
+ else
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+
+ return (rc);
+}
+
+static int
sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -10543,6 +11328,7 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
int rc, i;
uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
+ uint32_t stats[T7_PM_RX_CACHE_NSTATS];
static const char *tx_stats[MAX_PM_NSTATS] = {
"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
"Tx FIFO wait", NULL, "Tx latency"
@@ -10559,12 +11345,14 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
else {
t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
+ if (chip_id(sc) >= CHELSIO_T7)
+ t4_pmrx_cache_get_stats(sc, stats);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
- sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -10599,6 +11387,61 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
rx_cyc[i]);
}
+ if (chip_id(sc) >= CHELSIO_T7) {
+ i = 0;
+ sbuf_printf(sb, "\n\nPM RX Cache Stats\n");
+ sbuf_printf(sb, "%-40s %u\n", "ReqWrite", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "ReqReadInv", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "ReqReadNoInv", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Split Request",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Split (Read Invalidate)", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Feedback Read Split (Read NoInvalidate)",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Hit", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Feedback Read Hit",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full UnAvail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Hit Partial Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full UnAvail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Partial Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Full Free",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Part-avail Mul-Regions",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "FB Read Part-avail Mul-Regions",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Miss FL Used",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Miss LRU Used",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Write Miss LRU-Multiple Evict", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Write Hit Increasing Islands", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Island Read split", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Overflow Eviction",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u", "Read Overflow Eviction",
+ stats[i++]);
+ }
+
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -11609,15 +12452,17 @@ sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
#endif
static int
-get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
+get_sge_context(struct adapter *sc, int mem_id, uint32_t cid, int len,
+ uint32_t *data)
{
int rc;
- if (cntxt->cid > M_CTXTQID)
+ if (len < sc->chip_params->sge_ctxt_size)
+ return (ENOBUFS);
+ if (cid > M_CTXTQID)
return (EINVAL);
-
- if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
- cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
+ if (mem_id != CTXT_EGRESS && mem_id != CTXT_INGRESS &&
+ mem_id != CTXT_FLM && mem_id != CTXT_CNM)
return (EINVAL);
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
@@ -11630,8 +12475,7 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
}
if (sc->flags & FW_OK) {
- rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
- &cntxt->data[0]);
+ rc = -t4_sge_ctxt_rd(sc, sc->mbox, cid, mem_id, data);
if (rc == 0)
goto done;
}
@@ -11640,7 +12484,7 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
* Read via firmware failed or wasn't even attempted. Read directly via
* the backdoor.
*/
- rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
+ rc = -t4_sge_ctxt_rd_bd(sc, cid, mem_id, data);
done:
end_synchronized_op(sc, 0);
return (rc);
@@ -12048,10 +12892,11 @@ clear_stats(struct adapter *sc, u_int port_id)
mtx_lock(&sc->reg_lock);
if (!hw_off_limits(sc)) {
/* MAC stats */
- t4_clr_port_stats(sc, pi->tx_chan);
+ t4_clr_port_stats(sc, pi->hw_port);
if (is_t6(sc)) {
if (pi->fcs_reg != -1)
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ pi->fcs_base = t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan, pi->fcs_reg));
else
pi->stats.rx_fcs_err = 0;
}
@@ -12114,12 +12959,21 @@ clear_stats(struct adapter *sc, u_int port_id)
txq->kern_tls_full = 0;
txq->kern_tls_octets = 0;
txq->kern_tls_waste = 0;
- txq->kern_tls_options = 0;
txq->kern_tls_header = 0;
- txq->kern_tls_fin = 0;
txq->kern_tls_fin_short = 0;
txq->kern_tls_cbc = 0;
txq->kern_tls_gcm = 0;
+ if (is_t6(sc)) {
+ txq->kern_tls_options = 0;
+ txq->kern_tls_fin = 0;
+ } else {
+ txq->kern_tls_ghash_received = 0;
+ txq->kern_tls_ghash_requested = 0;
+ txq->kern_tls_lso = 0;
+ txq->kern_tls_partial_ghash = 0;
+ txq->kern_tls_splitmode = 0;
+ txq->kern_tls_trailer = 0;
+ }
mp_ring_reset_stats(txq->r);
}
@@ -12264,14 +13118,12 @@ t4_os_link_changed(struct port_info *pi)
if (is_t6(sc)) {
if (lc->link_ok) {
if (lc->speed > 25000 ||
- (lc->speed == 25000 && lc->fec == FEC_RS)) {
- pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
- A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS);
- } else {
- pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
- A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS);
- }
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ (lc->speed == 25000 && lc->fec == FEC_RS))
+ pi->fcs_reg = A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS;
+ else
+ pi->fcs_reg = A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS;
+ pi->fcs_base = t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan, pi->fcs_reg));
pi->stats.rx_fcs_err = 0;
} else {
pi->fcs_reg = -1;
@@ -12404,9 +13256,13 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
case CHELSIO_T4_DEL_FILTER:
rc = del_filter(sc, (struct t4_filter *)data);
break;
- case CHELSIO_T4_GET_SGE_CONTEXT:
- rc = get_sge_context(sc, (struct t4_sge_context *)data);
+ case CHELSIO_T4_GET_SGE_CONTEXT: {
+ struct t4_sge_context *ctxt = (struct t4_sge_context *)data;
+
+ rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid,
+ sizeof(ctxt->data), &ctxt->data[0]);
break;
+ }
case CHELSIO_T4_LOAD_FW:
rc = load_fw(sc, (struct t4_data *)data);
break;
@@ -12452,6 +13308,13 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
case CHELSIO_T4_RELEASE_CLIP_ADDR:
rc = release_clip_addr(sc, (struct t4_clip_addr *)data);
break;
+ case CHELSIO_T4_GET_SGE_CTXT: {
+ struct t4_sge_ctxt *ctxt = (struct t4_sge_ctxt *)data;
+
+ rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid,
+ sizeof(ctxt->data), &ctxt->data[0]);
+ break;
+ }
default:
rc = ENOTTY;
}
@@ -12898,7 +13761,9 @@ t4_dump_mem(struct adapter *sc, u_int addr, u_int len)
{
uint32_t base, j, off, pf, reg, save, win_pos;
- reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
+ reg = chip_id(sc) > CHELSIO_T6 ?
+ PCIE_MEM_ACCESS_T7_REG(A_PCIE_MEM_ACCESS_OFFSET0, 2) :
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
save = t4_read_reg(sc, reg);
base = sc->memwin[2].mw_base;
@@ -12910,6 +13775,8 @@ t4_dump_mem(struct adapter *sc, u_int addr, u_int len)
win_pos = addr & ~0x7f; /* start must be 128B aligned */
}
off = addr - win_pos;
+ if (chip_id(sc) > CHELSIO_T6)
+ win_pos >>= X_T7_MEMOFST_SHIFT;
t4_write_reg(sc, reg, win_pos | pf);
t4_read_reg(sc, reg);
@@ -13274,6 +14141,7 @@ mod_event(module_t mod, int cmd, void *arg)
#endif
#ifdef KERN_TLS
t6_ktls_modload();
+ t7_ktls_modload();
#endif
t4_tracer_modload();
tweak_tunables();
@@ -13337,6 +14205,7 @@ mod_event(module_t mod, int cmd, void *arg)
vxlan_stop_evtag);
t4_tracer_modunload();
#ifdef KERN_TLS
+ t7_ktls_modunload();
t6_ktls_modunload();
#endif
#ifdef INET6
@@ -13383,6 +14252,14 @@ MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
+DRIVER_MODULE(chnex, pci, ch_driver, mod_event, 0);
+MODULE_VERSION(chnex, 1);
+MODULE_DEPEND(chnex, crypto, 1, 1, 1);
+MODULE_DEPEND(chnex, firmware, 1, 1, 1);
+#ifdef DEV_NETMAP
+MODULE_DEPEND(chnex, netmap, 1, 1, 1);
+#endif /* DEV_NETMAP */
+
DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0);
MODULE_VERSION(cxgbe, 1);
@@ -13392,6 +14269,9 @@ MODULE_VERSION(cxl, 1);
DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0);
MODULE_VERSION(cc, 1);
+DRIVER_MODULE(che, chnex, che_driver, 0, 0);
+MODULE_VERSION(che, 1);
+
DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0);
MODULE_VERSION(vcxgbe, 1);
@@ -13400,3 +14280,6 @@ MODULE_VERSION(vcxl, 1);
DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0);
MODULE_VERSION(vcc, 1);
+
+DRIVER_MODULE(vche, che, vche_driver, 0, 0);
+MODULE_VERSION(vche, 1);
diff --git a/sys/dev/cxgbe/t4_mp_ring.c b/sys/dev/cxgbe/t4_mp_ring.c
index 531fd356728e..916c363a0c2a 100644
--- a/sys/dev/cxgbe/t4_mp_ring.c
+++ b/sys/dev/cxgbe/t4_mp_ring.c
@@ -305,7 +305,6 @@ failed:
}
void
-
mp_ring_free(struct mp_ring *r)
{
int i;
@@ -472,6 +471,86 @@ mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
return (0);
}
+/*
+ * Enqueue n items but never drain the ring. Can be called
+ * to enqueue new items while draining the ring.
+ *
+ * Returns an errno.
+ */
+int
+mp_ring_enqueue_only(struct mp_ring *r, void **items, int n)
+{
+ union ring_state os, ns;
+ uint16_t pidx_start, pidx_stop;
+ int i;
+
+ MPASS(items != NULL);
+ MPASS(n > 0);
+
+ /*
+ * Reserve room for the new items. Our reservation, if successful, is
+ * from 'pidx_start' to 'pidx_stop'.
+ */
+ os.state = atomic_load_64(&r->state);
+
+ /* Should only be used from the drain callback. */
+ MPASS(os.flags == BUSY || os.flags == TOO_BUSY ||
+ os.flags == TAKING_OVER);
+
+ for (;;) {
+ if (__predict_false(space_available(r, os) < n)) {
+ /* Not enough room in the ring. */
+ counter_u64_add(r->dropped, n);
+ return (ENOBUFS);
+ }
+
+ /* There is room in the ring. */
+
+ ns.state = os.state;
+ ns.pidx_head = increment_idx(r, os.pidx_head, n);
+ critical_enter();
+ if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
+ break;
+ critical_exit();
+ cpu_spinwait();
+ };
+
+ pidx_start = os.pidx_head;
+ pidx_stop = ns.pidx_head;
+
+ /*
+ * Wait for other producers who got in ahead of us to enqueue their
+ * items, one producer at a time. It is our turn when the ring's
+ * pidx_tail reaches the beginning of our reservation (pidx_start).
+ */
+ while (ns.pidx_tail != pidx_start) {
+ cpu_spinwait();
+ ns.state = atomic_load_64(&r->state);
+ }
+
+ /* Now it is our turn to fill up the area we reserved earlier. */
+ i = pidx_start;
+ do {
+ r->items[i] = *items++;
+ if (__predict_false(++i == r->size))
+ i = 0;
+ } while (i != pidx_stop);
+
+ /*
+ * Update the ring's pidx_tail. The release style atomic guarantees
+ * that the items are visible to any thread that sees the updated pidx.
+ */
+ os.state = atomic_load_64(&r->state);
+ do {
+ ns.state = os.state;
+ ns.pidx_tail = pidx_stop;
+ } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
+ critical_exit();
+
+ counter_u64_add(r->not_consumer, 1);
+ return (0);
+}
+
void
mp_ring_check_drainage(struct mp_ring *r, int budget)
{
diff --git a/sys/dev/cxgbe/t4_mp_ring.h b/sys/dev/cxgbe/t4_mp_ring.h
index 949174b9056d..07b15906cd43 100644
--- a/sys/dev/cxgbe/t4_mp_ring.h
+++ b/sys/dev/cxgbe/t4_mp_ring.h
@@ -62,6 +62,7 @@ int mp_ring_alloc(struct mp_ring **, int, void *, ring_drain_t,
ring_can_drain_t, struct malloc_type *, struct mtx *, int);
void mp_ring_free(struct mp_ring *);
int mp_ring_enqueue(struct mp_ring *, void **, int, int);
+int mp_ring_enqueue_only(struct mp_ring *, void **, int);
void mp_ring_check_drainage(struct mp_ring *, int);
void mp_ring_reset_stats(struct mp_ring *);
bool mp_ring_is_idle(struct mp_ring *);
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index e53fb5054316..0135bec6e2c1 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -232,7 +232,7 @@ alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx)
nm_txq->nid = idx;
nm_txq->iqidx = iqidx;
nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0))
nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
@@ -276,7 +276,7 @@ free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
static int
alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, cong_map;
__be32 v;
struct adapter *sc = vi->adapter;
struct port_info *pi = vi->pi;
@@ -284,7 +284,6 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
struct netmap_adapter *na = NA(vi->ifp);
struct fw_iq_cmd c;
const int cong_drop = nm_cong_drop;
- const int cong_map = pi->rx_e_chan_map;
MPASS(na != NULL);
MPASS(nm_rxq->iq_desc != NULL);
@@ -314,13 +313,17 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
c.iqsize = htobe16(vi->qsize_rxq);
c.iqaddr = htobe64(nm_rxq->iq_ba);
if (cong_drop != -1) {
+ if (chip_id(sc) >= CHELSIO_T7)
+ cong_map = 1 << pi->hw_port;
+ else
+ cong_map = pi->rx_e_chan_map;
c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) | F_FW_IQ_CMD_FL0CONGCIF |
F_FW_IQ_CMD_FL0CONGEN);
@@ -421,15 +424,19 @@ alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
V_FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
- if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
- c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC);
- else
+ if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) {
+ const int core = sc->params.ncores > 1 ?
+ nm_txq->nid % sc->params.ncores : 0;
+
+ c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ V_FW_EQ_ETH_CMD_COREGROUP(core));
+ } else
c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
diff --git a/sys/dev/cxgbe/t4_sched.c b/sys/dev/cxgbe/t4_sched.c
index 2186c8aa2ac0..65c2720d692c 100644
--- a/sys/dev/cxgbe/t4_sched.c
+++ b/sys/dev/cxgbe/t4_sched.c
@@ -272,7 +272,7 @@ update_tx_sched(void *context, int pending)
}
rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
FW_SCHED_PARAMS_LEVEL_CL_RL, tc->mode, tc->rateunit,
- tc->ratemode, pi->tx_chan, j, 0, tc->maxrate, 0,
+ tc->ratemode, pi->hw_port, j, 0, tc->maxrate, 0,
tc->pktsize, tc->burstsize, 1);
end_synchronized_op(sc, 0);
@@ -291,7 +291,7 @@ update_tx_sched(void *context, int pending)
"params: mode %d, rateunit %d, ratemode %d, "
"channel %d, minrate %d, maxrate %d, pktsize %d, "
"burstsize %d\n", j, rc, tc->mode, tc->rateunit,
- tc->ratemode, pi->tx_chan, 0, tc->maxrate,
+ tc->ratemode, pi->hw_port, 0, tc->maxrate,
tc->pktsize, tc->burstsize);
}
}
@@ -839,7 +839,7 @@ failed:
cst->tx_total = cst->tx_credits;
cst->plen = 0;
cst->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
/*
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 86454bc4fe10..2f9cb1a4ebb5 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -259,17 +258,20 @@ static void free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
static void add_ofld_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
struct sge_ofld_rxq *);
#endif
-static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
-static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
+static int ctrl_eq_alloc(struct adapter *, struct sge_eq *, int);
+static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
-static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
+static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
#endif
static int alloc_eq(struct adapter *, struct sge_eq *, struct sysctl_ctx_list *,
struct sysctl_oid *);
static void free_eq(struct adapter *, struct sge_eq *);
static void add_eq_sysctls(struct adapter *, struct sysctl_ctx_list *,
struct sysctl_oid *, struct sge_eq *);
-static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
+static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
static int free_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
struct sysctl_ctx_list *, struct sysctl_oid *);
@@ -348,6 +350,7 @@ cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES];
cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES];
cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES];
cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES];
+cpl_handler_t fw6_pld_handlers[NUM_CPL_FW6_COOKIES];
void
t4_register_an_handler(an_handler_t h)
@@ -477,6 +480,21 @@ fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (fw4_ack_handlers[cookie](iq, rss, m));
}
+static int
+fw6_pld_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+ const struct cpl_fw6_pld *cpl;
+ uint64_t cookie;
+
+ if (m != NULL)
+ cpl = mtod(m, const void *);
+ else
+ cpl = (const void *)(rss + 1);
+ cookie = be64toh(cpl->data[1]) & CPL_FW6_COOKIE_MASK;
+
+ return (fw6_pld_handlers[cookie](iq, rss, m));
+}
+
static void
t4_init_shared_cpl_handlers(void)
{
@@ -486,6 +504,7 @@ t4_init_shared_cpl_handlers(void)
t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler);
t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler);
t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler);
+ t4_register_cpl_handler(CPL_FW6_PLD, fw6_pld_handler);
}
void
@@ -494,8 +513,12 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
uintptr_t *loc;
MPASS(opcode < nitems(t4_cpl_handler));
- MPASS(cookie > CPL_COOKIE_RESERVED);
- MPASS(cookie < NUM_CPL_COOKIES);
+ if (opcode == CPL_FW6_PLD) {
+ MPASS(cookie < NUM_CPL_FW6_COOKIES);
+ } else {
+ MPASS(cookie > CPL_COOKIE_RESERVED);
+ MPASS(cookie < NUM_CPL_COOKIES);
+ }
MPASS(t4_cpl_handler[opcode] != NULL);
switch (opcode) {
@@ -514,6 +537,9 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
case CPL_FW4_ACK:
loc = (uintptr_t *)&fw4_ack_handlers[cookie];
break;
+ case CPL_FW6_PLD:
+ loc = (uintptr_t *)&fw6_pld_handlers[cookie];
+ break;
default:
MPASS(0);
return;
@@ -1064,9 +1090,9 @@ t4_setup_adapter_queues(struct adapter *sc)
*/
/*
- * Control queues, one per port.
+ * Control queues. At least one per port and per internal core.
*/
- for_each_port(sc, i) {
+ for (i = 0; i < sc->sge.nctrlq; i++) {
rc = alloc_ctrlq(sc, i);
if (rc != 0)
return (rc);
@@ -1087,7 +1113,7 @@ t4_teardown_adapter_queues(struct adapter *sc)
if (sc->sge.ctrlq != NULL) {
MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */
- for_each_port(sc, i)
+ for (i = 0; i < sc->sge.nctrlq; i++)
free_ctrlq(sc, i);
}
free_fwq(sc);
@@ -2701,9 +2727,14 @@ restart:
#endif
#ifdef KERN_TLS
if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) {
+ struct vi_info *vi = if_getsoftc(mst->ifp);
+
cflags |= MC_TLS;
set_mbuf_cflags(m0, cflags);
- rc = t6_ktls_parse_pkt(m0);
+ if (is_t6(vi->pi->adapter))
+ rc = t6_ktls_parse_pkt(m0);
+ else
+ rc = t7_ktls_parse_pkt(m0);
if (rc != 0)
goto fail;
return (EINPROGRESS);
@@ -3273,7 +3304,10 @@ skip_coalescing:
#ifdef KERN_TLS
} else if (mbuf_cflags(m0) & MC_TLS) {
ETHER_BPF_MTAP(ifp, m0);
- n = t6_ktls_write_wr(txq, wr, m0, avail);
+ if (is_t6(sc))
+ n = t6_ktls_write_wr(txq, wr, m0, avail);
+ else
+ n = t7_ktls_write_wr(txq, wr, m0, avail);
#endif
} else {
ETHER_BPF_MTAP(ifp, m0);
@@ -3414,6 +3448,7 @@ init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
eq->type = eqtype;
eq->port_id = port_id;
eq->tx_chan = sc->port[port_id]->tx_chan;
+ eq->hw_port = sc->port[port_id]->hw_port;
eq->iq = iq;
eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
strlcpy(eq->lockname, name, sizeof(eq->lockname));
@@ -3577,7 +3612,7 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
@@ -3585,7 +3620,13 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
c.iqaddr = htobe64(iq->ba);
c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype));
if (iq->cong_drop != -1) {
- cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0;
+ if (iq->qtype == IQ_ETH) {
+ if (chip_id(sc) >= CHELSIO_T7)
+ cong_map = 1 << pi->hw_port;
+ else
+ cong_map = pi->rx_e_chan_map;
+ } else
+ cong_map = 0;
c.iqns_to_fl0congen |= htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
}
@@ -3842,7 +3883,7 @@ alloc_ctrlq(struct adapter *sc, int idx)
struct sysctl_oid *oid;
struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx];
- MPASS(idx < sc->params.nports);
+ MPASS(idx < sc->sge.nctrlq);
if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) {
MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
@@ -3854,8 +3895,8 @@ alloc_ctrlq(struct adapter *sc, int idx)
snprintf(name, sizeof(name), "%s ctrlq%d",
device_get_nameunit(sc->dev), idx);
- init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx,
- &sc->sge.fwq, name);
+ init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE,
+ idx % sc->params.nports, &sc->sge.fwq, name);
rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid);
if (rc != 0) {
CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc);
@@ -3870,7 +3911,7 @@ alloc_ctrlq(struct adapter *sc, int idx)
MPASS(ctrlq->nwr_pending == 0);
MPASS(ctrlq->ndesc_needed == 0);
- rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq);
+ rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq, idx);
if (rc != 0) {
CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc);
return (rc);
@@ -3938,14 +3979,19 @@ t4_sge_set_conm_context(struct adapter *sc, int cntxt_id, int cong_drop,
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
V_FW_PARAMS_PARAM_YZ(cntxt_id);
- val = V_CONMCTXT_CNGTPMODE(cong_mode);
- if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
- cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
- for (i = 0, ch_map = 0; i < 4; i++) {
- if (cong_map & (1 << i))
- ch_map |= 1 << (i << cng_ch_bits_log);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ val = V_T7_DMAQ_CONM_CTXT_CNGTPMODE(cong_mode) |
+ V_T7_DMAQ_CONM_CTXT_CH_VEC(cong_map);
+ } else {
+ val = V_CONMCTXT_CNGTPMODE(cong_mode);
+ if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
+ cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
+ for (i = 0, ch_map = 0; i < 4; i++) {
+ if (cong_map & (1 << i))
+ ch_map |= 1 << (i << cng_ch_bits_log);
+ }
+ val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
- val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc != 0) {
@@ -4253,24 +4299,26 @@ qsize_to_fthresh(int qsize)
}
static int
-ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
+ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq, int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_ctrl_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ core = sc->params.tid_qid_sel_mask != 0 ? idx % sc->params.ncores : 0;
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
V_FW_EQ_CTRL_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
+ V_FW_EQ_CTRL_CMD_COREGROUP(core) |
F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
c.physeqid_pkd = htobe32(0);
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(eq->hw_port) |
F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4282,8 +4330,8 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
if (rc != 0) {
- CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n",
- eq->tx_chan, rc);
+ CH_ERR(sc, "failed to create hw ctrlq for port %d: %d\n",
+ eq->port_id, rc);
return (rc);
}
@@ -4299,24 +4347,26 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
}
static int
-eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_eth_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ core = sc->params.ncores > 1 ? idx % sc->params.ncores : 0;
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
V_FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ V_FW_EQ_ETH_CMD_COREGROUP(core) |
F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(eq->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4344,23 +4394,44 @@ eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+/*
+ * ncores number of uP cores.
+ * nq number of queues for this VI
+ * idx queue index
+ */
+static inline int
+qidx_to_core(int ncores, int nq, int idx)
+{
+ MPASS(nq % ncores == 0);
+ MPASS(idx >= 0 && idx < nq);
+
+ return (idx * ncores / nq);
+}
+
static int
-ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq,
+ int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_ofld_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ if (sc->params.tid_qid_sel_mask != 0)
+ core = qidx_to_core(sc->params.ncores, vi->nofldtxq, idx);
+ else
+ core = 0;
+
bzero(&c, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
V_FW_EQ_OFLD_CMD_VFN(0));
c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
+ V_FW_EQ_OFLD_CMD_COREGROUP(core) |
F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid =
htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
+ V_FW_EQ_OFLD_CMD_PCIECHN(eq->hw_port) |
F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4449,7 +4520,7 @@ add_eq_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
}
static int
-alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx)
{
int rc;
@@ -4464,16 +4535,16 @@ alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
switch (eq->type) {
case EQ_CTRL:
- rc = ctrl_eq_alloc(sc, eq);
+ rc = ctrl_eq_alloc(sc, eq, idx);
break;
case EQ_ETH:
- rc = eth_eq_alloc(sc, vi, eq);
+ rc = eth_eq_alloc(sc, vi, eq, idx);
break;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
case EQ_OFLD:
- rc = ofld_eq_alloc(sc, vi, eq);
+ rc = ofld_eq_alloc(sc, vi, eq, idx);
break;
#endif
@@ -4653,7 +4724,7 @@ failed:
if (!(eq->flags & EQ_HW_ALLOCATED)) {
MPASS(eq->flags & EQ_SW_ALLOCATED);
- rc = alloc_eq_hwq(sc, vi, eq);
+ rc = alloc_eq_hwq(sc, vi, eq, idx);
if (rc != 0) {
CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc);
return (rc);
@@ -4678,10 +4749,10 @@ failed:
if (vi->flags & TX_USES_VM_WR)
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan));
+ V_TXPKT_INTF(pi->hw_port));
else
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
txq->tc_idx = -1;
@@ -4788,18 +4859,46 @@ add_txq_sysctls(struct vi_info *vi, struct sysctl_ctx_list *ctx,
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_waste",
CTLFLAG_RD, &txq->kern_tls_waste,
"# of octets DMAd but not transmitted in NIC TLS records");
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_options",
- CTLFLAG_RD, &txq->kern_tls_options,
- "# of NIC TLS options-only packets transmitted");
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_header",
CTLFLAG_RD, &txq->kern_tls_header,
"# of NIC TLS header-only packets transmitted");
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin",
- CTLFLAG_RD, &txq->kern_tls_fin,
- "# of NIC TLS FIN-only packets transmitted");
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin_short",
CTLFLAG_RD, &txq->kern_tls_fin_short,
"# of NIC TLS padded FIN packets on short TLS records");
+ if (is_t6(sc)) {
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_options", CTLFLAG_RD,
+ &txq->kern_tls_options,
+ "# of NIC TLS options-only packets transmitted");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin,
+ "# of NIC TLS FIN-only packets transmitted");
+ } else {
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_ghash_received", CTLFLAG_RD,
+ &txq->kern_tls_ghash_received,
+ "# of NIC TLS GHASHes received");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_ghash_requested", CTLFLAG_RD,
+ &txq->kern_tls_ghash_requested,
+ "# of NIC TLS GHASHes requested");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_lso", CTLFLAG_RD,
+ &txq->kern_tls_lso,
+ "# of NIC TLS records transmitted using LSO");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_partial_ghash", CTLFLAG_RD,
+ &txq->kern_tls_partial_ghash,
+ "# of NIC TLS records encrypted using a partial GHASH");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_splitmode", CTLFLAG_RD,
+ &txq->kern_tls_splitmode,
+ "# of NIC TLS records using SplitMode");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_trailer", CTLFLAG_RD,
+ &txq->kern_tls_trailer,
+ "# of NIC TLS trailer-only packets transmitted");
+ }
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_cbc",
CTLFLAG_RD, &txq->kern_tls_cbc,
"# of NIC TLS sessions using AES-CBC");
@@ -4869,7 +4968,7 @@ alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
MPASS(eq->flags & EQ_SW_ALLOCATED);
MPASS(ofld_txq->wrq.nwr_pending == 0);
MPASS(ofld_txq->wrq.ndesc_needed == 0);
- rc = alloc_eq_hwq(sc, vi, eq);
+ rc = alloc_eq_hwq(sc, vi, eq, idx);
if (rc != 0) {
CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx,
rc);
@@ -5418,7 +5517,8 @@ write_tnl_lso_cpl(void *cpl, struct mbuf *m0)
m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen +
m0->m_pkthdr.l5hlen) |
V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN));
- tnl_lso->r1 = 0;
+ tnl_lso->ipsecen_to_rocev2 = 0;
+ tnl_lso->roce_eth = 0;
/* Inner headers. */
ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN(
@@ -6583,10 +6683,11 @@ send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
V_FW_WR_FLOWID(cst->etid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
+ /* Firmware expects hw port and will translate to channel itself. */
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
- flowc->mnemval[1].val = htobe32(pi->tx_chan);
+ flowc->mnemval[1].val = htobe32(pi->hw_port);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
- flowc->mnemval[2].val = htobe32(pi->tx_chan);
+ flowc->mnemval[2].val = htobe32(pi->hw_port);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(cst->iqid);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE;
diff --git a/sys/dev/cxgbe/t4_tpt.c b/sys/dev/cxgbe/t4_tpt.c
new file mode 100644
index 000000000000..d18eabb026f1
--- /dev/null
+++ b/sys/dev/cxgbe/t4_tpt.c
@@ -0,0 +1,193 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Chelsio Communications, Inc.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "common/common.h"
+
+/*
+ * Support routines to manage TPT entries used for both RDMA and NVMe
+ * offloads. This includes allocating STAG indices and managing the
+ * PBL pool.
+ */
+
+#define T4_ULPTX_MIN_IO 32
+#define T4_MAX_INLINE_SIZE 96
+#define T4_ULPTX_MAX_DMA 1024
+
+/* PBL and STAG Memory Managers. */
+
+#define MIN_PBL_SHIFT 5 /* 32B == min PBL size (4 entries) */
+
+uint32_t
+t4_pblpool_alloc(struct adapter *sc, int size)
+{
+ vmem_addr_t addr;
+
+ if (vmem_xalloc(sc->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
+ 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_FIRSTFIT | M_NOWAIT,
+ &addr) != 0)
+ return (0);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%lx size %d", __func__, addr, size);
+#endif
+ return (addr);
+}
+
+void
+t4_pblpool_free(struct adapter *sc, uint32_t addr, int size)
+{
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%x size %d", __func__, addr, size);
+#endif
+ vmem_xfree(sc->pbl_arena, addr, roundup(size, (1 << MIN_PBL_SHIFT)));
+}
+
+uint32_t
+t4_stag_alloc(struct adapter *sc, int size)
+{
+ vmem_addr_t stag_idx;
+
+ if (vmem_alloc(sc->stag_arena, size, M_FIRSTFIT | M_NOWAIT,
+ &stag_idx) != 0)
+ return (T4_STAG_UNSET);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: idx 0x%lx size %d", __func__, stag_idx, size);
+#endif
+ return (stag_idx);
+}
+
+void
+t4_stag_free(struct adapter *sc, uint32_t stag_idx, int size)
+{
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: idx 0x%x size %d", __func__, stag_idx, size);
+#endif
+ vmem_free(sc->stag_arena, stag_idx, size);
+}
+
+void
+t4_init_tpt(struct adapter *sc)
+{
+ if (sc->vres.pbl.size != 0)
+ sc->pbl_arena = vmem_create("PBL_MEM_POOL", sc->vres.pbl.start,
+ sc->vres.pbl.size, 1, 0, M_FIRSTFIT | M_WAITOK);
+ if (sc->vres.stag.size != 0)
+ sc->stag_arena = vmem_create("STAG", 1,
+ sc->vres.stag.size >> 5, 1, 0, M_FIRSTFIT | M_WAITOK);
+}
+
+void
+t4_free_tpt(struct adapter *sc)
+{
+ if (sc->pbl_arena != NULL)
+ vmem_destroy(sc->pbl_arena);
+ if (sc->stag_arena != NULL)
+ vmem_destroy(sc->stag_arena);
+}
+
+/*
+ * TPT support routines. TPT entries are stored in the STAG adapter
+ * memory region and are written to via ULP_TX_MEM_WRITE commands in
+ * FW_ULPTX_WR work requests.
+ */
+
+void
+t4_write_mem_dma_wr(struct adapter *sc, void *wr, int wr_len, int tid,
+ uint32_t addr, uint32_t len, vm_paddr_t data, uint64_t cookie)
+{
+ struct ulp_mem_io *ulpmc;
+ struct ulptx_sgl *sgl;
+
+ MPASS(wr_len == T4_WRITE_MEM_DMA_LEN);
+
+ addr &= 0x7FFFFFF;
+
+ memset(wr, 0, wr_len);
+ ulpmc = wr;
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
+ if (cookie != 0) {
+ ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
+ ulpmc->wr.wr_lo = cookie;
+ }
+ ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ V_T5_ULP_MEMIO_ORDER(1) |
+ V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(len >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(len >> 5));
+ ulpmc->len16 = htobe32((tid << 8) |
+ DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
+
+ sgl = (struct ulptx_sgl *)(ulpmc + 1);
+ sgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(1));
+ sgl->len0 = htobe32(len);
+ sgl->addr0 = htobe64(data);
+}
+
+void
+t4_write_mem_inline_wr(struct adapter *sc, void *wr, int wr_len, int tid,
+ uint32_t addr, uint32_t len, void *data, uint64_t cookie)
+{
+ struct ulp_mem_io *ulpmc;
+ struct ulptx_idata *ulpsc;
+
+ MPASS(len > 0 && len <= T4_MAX_INLINE_SIZE);
+ MPASS(wr_len == T4_WRITE_MEM_INLINE_LEN(len));
+
+ addr &= 0x7FFFFFF;
+
+ memset(wr, 0, wr_len);
+ ulpmc = wr;
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
+
+ if (cookie != 0) {
+ ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
+ ulpmc->wr.wr_lo = cookie;
+ }
+
+ ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ F_T5_ULP_MEMIO_IMM);
+
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
+ ulpmc->len16 = htobe32((tid << 8) |
+ DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
+
+ ulpsc = (struct ulptx_idata *)(ulpmc + 1);
+ ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ ulpsc->len = htobe32(roundup(len, T4_ULPTX_MIN_IO));
+
+ if (data != NULL)
+ memcpy(ulpsc + 1, data, len);
+}
diff --git a/sys/dev/cxgbe/t4_tracer.c b/sys/dev/cxgbe/t4_tracer.c
index 80689a543e83..4f8d28626bc9 100644
--- a/sys/dev/cxgbe/t4_tracer.c
+++ b/sys/dev/cxgbe/t4_tracer.c
@@ -123,9 +123,8 @@ static int
t4_cloner_match(struct if_clone *ifc, const char *name)
{
- if (strncmp(name, "t4nex", 5) != 0 &&
- strncmp(name, "t5nex", 5) != 0 &&
- strncmp(name, "t6nex", 5) != 0)
+ if (strncmp(name, "t4nex", 5) != 0 && strncmp(name, "t5nex", 5) != 0 &&
+ strncmp(name, "t6nex", 5) != 0 && strncmp(name, "chnex", 5) != 0)
return (0);
if (name[5] < '0' || name[5] > '9')
return (0);
diff --git a/sys/dev/cxgbe/t4_vf.c b/sys/dev/cxgbe/t4_vf.c
index b7b08e226a57..89dae02e9332 100644
--- a/sys/dev/cxgbe/t4_vf.c
+++ b/sys/dev/cxgbe/t4_vf.c
@@ -125,6 +125,28 @@ struct {
{0x6885, "Chelsio T6240-SO 85 VF"},
{0x6886, "Chelsio T6225-SO-CR 86 VF"},
{0x6887, "Chelsio T6225-CR 87 VF"},
+}, t7vf_pciids[] = {
+ {0xd800, "Chelsio T7 FPGA VF"}, /* T7 PE12K FPGA */
+ {0x7800, "Chelsio T72200-DBG VF"}, /* 2 x 200G, debug */
+ {0x7801, "Chelsio T7250 VF"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7802, "Chelsio S7250 VF"}, /* 2 x 10/25/50G, nomem */
+ {0x7803, "Chelsio T7450 VF"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7804, "Chelsio S7450 VF"}, /* 4 x 10/25/50G, nomem */
+ {0x7805, "Chelsio T72200 VF"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7806, "Chelsio S72200 VF"}, /* 2 x 40/100/200G, nomem */
+ {0x7807, "Chelsio T72200-FH VF"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7808, "Chelsio T71400 VF"}, /* 1 x 400G, nomem */
+ {0x7809, "Chelsio S7210-BT VF"}, /* 2 x 10GBASE-T, nomem */
+ {0x780a, "Chelsio T7450-RC VF"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x780b, "Chelsio T72200-RC VF"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x780c, "Chelsio T72200-FH-RC VF"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x780d, "Chelsio S72200-OCP3 VF"}, /* 2 x 40/100/200G OCP3 */
+ {0x780e, "Chelsio S7450-OCP3 VF"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x780f, "Chelsio S7410-BT-OCP3 VF"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7810, "Chelsio S7210-BT-A VF"}, /* 2 x 10GBASE-T */
+ {0x7811, "Chelsio T7_MAYRA_7 VF"}, /* Motherboard */
+
+ {0x7880, "Custom T7 VF"},
};
static d_ioctl_t t4vf_ioctl;
@@ -183,6 +205,22 @@ t6vf_probe(device_t dev)
return (ENXIO);
}
+static int
+chvf_probe(device_t dev)
+{
+ uint16_t d;
+ size_t i;
+
+ d = pci_get_device(dev);
+ for (i = 0; i < nitems(t7vf_pciids); i++) {
+ if (d == t7vf_pciids[i].device) {
+ device_set_desc(dev, t7vf_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ return (ENXIO);
+}
+
#define FW_PARAM_DEV(param) \
(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
@@ -956,6 +994,20 @@ static driver_t t6vf_driver = {
sizeof(struct adapter)
};
+static device_method_t chvf_methods[] = {
+ DEVMETHOD(device_probe, chvf_probe),
+ DEVMETHOD(device_attach, t4vf_attach),
+ DEVMETHOD(device_detach, t4_detach_common),
+
+ DEVMETHOD_END
+};
+
+static driver_t chvf_driver = {
+ "chvf",
+ chvf_methods,
+ sizeof(struct adapter)
+};
+
static driver_t cxgbev_driver = {
"cxgbev",
cxgbe_methods,
@@ -974,6 +1026,12 @@ static driver_t ccv_driver = {
sizeof(struct port_info)
};
+static driver_t chev_driver = {
+ "chev",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
DRIVER_MODULE(t4vf, pci, t4vf_driver, 0, 0);
MODULE_VERSION(t4vf, 1);
MODULE_DEPEND(t4vf, t4nex, 1, 1, 1);
@@ -986,6 +1044,10 @@ DRIVER_MODULE(t6vf, pci, t6vf_driver, 0, 0);
MODULE_VERSION(t6vf, 1);
MODULE_DEPEND(t6vf, t6nex, 1, 1, 1);
+DRIVER_MODULE(chvf, pci, chvf_driver, 0, 0);
+MODULE_VERSION(chvf, 1);
+MODULE_DEPEND(chvf, chnex, 1, 1, 1);
+
DRIVER_MODULE(cxgbev, t4vf, cxgbev_driver, 0, 0);
MODULE_VERSION(cxgbev, 1);
@@ -994,3 +1056,6 @@ MODULE_VERSION(cxlv, 1);
DRIVER_MODULE(ccv, t6vf, ccv_driver, 0, 0);
MODULE_VERSION(ccv, 1);
+
+DRIVER_MODULE(chev, chvf, chev_driver, 0, 0);
+MODULE_VERSION(chev, 1);
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index 99e4c222996d..c236ee060bc2 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -89,6 +89,12 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
INP_WLOCK(inp);
toep->tid = tid;
insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
+ if (sc->params.tid_qid_sel_mask != 0) {
+ update_tid_qid_sel(toep->vi, &toep->params, tid);
+ toep->ofld_txq = &sc->sge.ofld_txq[toep->params.txq_idx];
+ toep->ctrlq = &sc->sge.ctrlq[toep->params.ctrlq_idx];
+ }
+
if (inp->inp_flags & INP_DROPPED) {
/* socket closed by the kernel before hw told us it connected */
@@ -205,7 +211,7 @@ static inline int
act_open_cpl_size(struct adapter *sc, int isipv6)
{
int idx;
- static const int sz_table[3][2] = {
+ static const int sz_table[4][2] = {
{
sizeof (struct cpl_act_open_req),
sizeof (struct cpl_act_open_req6)
@@ -218,10 +224,14 @@ act_open_cpl_size(struct adapter *sc, int isipv6)
sizeof (struct cpl_t6_act_open_req),
sizeof (struct cpl_t6_act_open_req6)
},
+ {
+ sizeof (struct cpl_t7_act_open_req),
+ sizeof (struct cpl_t7_act_open_req6)
+ },
};
MPASS(chip_id(sc) >= CHELSIO_T4);
- idx = min(chip_id(sc) - CHELSIO_T4, 2);
+ idx = min(chip_id(sc) - CHELSIO_T4, 3);
return (sz_table[idx][!!isipv6]);
}
@@ -255,6 +265,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct offload_settings settings;
struct epoch_tracker et;
uint16_t vid = 0xfff, pcp = 0;
+ uint64_t ntuple;
INP_WLOCK_ASSERT(inp);
KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
@@ -308,10 +319,12 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) |
V_TID_COOKIE(CPL_COOKIE_TOM);
+ ntuple = select_ntuple(vi, toep->l2te);
if (isipv6) {
struct cpl_act_open_req6 *cpl = wrtod(wr);
struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
+ struct cpl_t7_act_open_req6 *cpl7 = (void *)cpl;
if ((inp->inp_vflag & INP_IPV6) == 0)
DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
@@ -323,18 +336,23 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
switch (chip_id(sc)) {
case CHELSIO_T4:
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(vi, toep->l2te);
+ cpl->params = htobe32((uint32_t)ntuple);
break;
case CHELSIO_T5:
INIT_TP_WR(cpl5, 0);
cpl5->iss = htobe32(tp->iss);
- cpl5->params = select_ntuple(vi, toep->l2te);
+ cpl5->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
case CHELSIO_T6:
- default:
INIT_TP_WR(cpl6, 0);
cpl6->iss = htobe32(tp->iss);
- cpl6->params = select_ntuple(vi, toep->l2te);
+ cpl6->params = htobe64(V_FILTER_TUPLE(ntuple));
+ break;
+ case CHELSIO_T7:
+ default:
+ INIT_TP_WR(cpl7, 0);
+ cpl7->iss = htobe32(tp->iss);
+ cpl7->params = htobe64(V_T7_FILTER_TUPLE(ntuple));
break;
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
@@ -356,23 +374,28 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct cpl_act_open_req *cpl = wrtod(wr);
struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
+ struct cpl_t7_act_open_req *cpl7 = (void *)cpl;
switch (chip_id(sc)) {
case CHELSIO_T4:
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(vi, toep->l2te);
+ cpl->params = htobe32((uint32_t)ntuple);
break;
case CHELSIO_T5:
INIT_TP_WR(cpl5, 0);
cpl5->iss = htobe32(tp->iss);
- cpl5->params = select_ntuple(vi, toep->l2te);
+ cpl5->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
case CHELSIO_T6:
- default:
INIT_TP_WR(cpl6, 0);
cpl6->iss = htobe32(tp->iss);
- cpl6->params = select_ntuple(vi, toep->l2te);
+ cpl6->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
+ case CHELSIO_T7:
+ default:
+ INIT_TP_WR(cpl7, 0);
+ cpl7->iss = htobe32(tp->iss);
+ cpl7->params = htobe64(V_T7_FILTER_TUPLE(ntuple));
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index be20ea42474e..84e31efa8b58 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -127,8 +127,9 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
paramidx = 0;
FLOWC_PARAM(PFNVFN, pfvf);
- FLOWC_PARAM(CH, pi->tx_chan);
- FLOWC_PARAM(PORT, pi->tx_chan);
+ /* Firmware expects hw port and will translate to channel itself. */
+ FLOWC_PARAM(CH, pi->hw_port);
+ FLOWC_PARAM(PORT, pi->hw_port);
FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id);
FLOWC_PARAM(SNDBUF, toep->params.sndbuf);
if (tp) {
@@ -2050,9 +2051,18 @@ write_set_tcb_field(struct adapter *sc, void *dst, struct toepcb *toep,
}
INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
- req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id));
- if (reply == 0)
- req->reply_ctrl |= htobe16(F_NO_REPLY);
+ if (reply == 0) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ } else {
+ const int qid = toep->ofld_rxq->iq.abs_id;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ }
req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
req->mask = htobe64(mask);
req->val = htobe64(val);
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index da0753296532..35fb1061d867 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -1655,7 +1655,10 @@ t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid,
INIT_ULPTX_WR(ulpmc, len, 0, 0);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -1842,7 +1845,10 @@ t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep,
ulpmc = mtod(m, struct ulp_mem_io *);
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -1922,7 +1928,10 @@ t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep,
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -2013,7 +2022,10 @@ t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep,
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index 06c495dcafc3..b879f6883f25 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -508,10 +508,11 @@ send_flowc_wr_synqe(struct adapter *sc, struct synq_entry *synqe)
V_FW_WR_FLOWID(synqe->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
+ /* Firmware expects hw port and will translate to channel itself. */
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
- flowc->mnemval[1].val = htobe32(pi->tx_chan);
+ flowc->mnemval[1].val = htobe32(pi->hw_port);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
- flowc->mnemval[2].val = htobe32(pi->tx_chan);
+ flowc->mnemval[2].val = htobe32(pi->hw_port);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(ofld_rxq->iq.abs_id);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
@@ -1507,6 +1508,8 @@ found:
init_conn_params(vi, &settings, &inc, so, &cpl->tcpopt, e->idx,
&synqe->params);
+ if (sc->params.tid_qid_sel_mask != 0)
+ update_tid_qid_sel(vi, &synqe->params, tid);
/*
* If all goes well t4_syncache_respond will get called during
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index ad72c6a6b025..bbcc1c88c3db 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -207,7 +207,7 @@ int
tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
{
struct adapter *sc = td_adapter(toep->td);
- int error, explicit_iv_size, mac_first;
+ int error, iv_size, mac_first;
if (!can_tls_offload(sc))
return (EINVAL);
@@ -228,6 +228,21 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
}
}
+ /* TLS 1.1 through TLS 1.3 are currently supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE) {
+ return (EPROTONOSUPPORT);
+ }
+
+ /* TLS 1.3 is only supported on T7+. */
+ if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) {
+ if (is_t6(sc)) {
+ return (EPROTONOSUPPORT);
+ }
+ }
+
+ /* Sanity check values in *tls. */
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_CBC:
/* XXX: Explicitly ignore any provided IV. */
@@ -247,13 +262,10 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
default:
return (EPROTONOSUPPORT);
}
- explicit_iv_size = AES_BLOCK_LEN;
+ iv_size = AES_BLOCK_LEN;
mac_first = 1;
break;
case CRYPTO_AES_NIST_GCM_16:
- if (tls->params.iv_len != SALT_SIZE) {
- return (EINVAL);
- }
switch (tls->params.cipher_key_len) {
case 128 / 8:
case 192 / 8:
@@ -262,20 +274,19 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
default:
return (EINVAL);
}
- explicit_iv_size = 8;
+
+ /*
+ * The IV size for TLS 1.2 is the explicit IV in the
+ * record header. For TLS 1.3 it is the size of the
+ * sequence number.
+ */
+ iv_size = 8;
mac_first = 0;
break;
default:
return (EPROTONOSUPPORT);
}
- /* Only TLS 1.1 and TLS 1.2 are currently supported. */
- if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
- tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
- tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
- return (EPROTONOSUPPORT);
- }
-
/* Bail if we already have a key. */
if (direction == KTLS_TX) {
if (toep->tls.tx_key_addr != -1)
@@ -289,6 +300,7 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
if (error)
return (error);
+ toep->tls.tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
if (direction == KTLS_TX) {
toep->tls.scmd0.seqno_numivs =
(V_SCMD_SEQ_NO_CTRL(3) |
@@ -298,14 +310,14 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
V_SCMD_CIPH_MODE(t4_tls_cipher_mode(tls)) |
V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
- V_SCMD_IV_SIZE(explicit_iv_size / 2));
+ V_SCMD_IV_SIZE(iv_size / 2));
toep->tls.scmd0.ivgen_hdrlen =
(V_SCMD_IV_GEN_CTRL(1) |
V_SCMD_KEY_CTX_INLINE(0) |
V_SCMD_TLS_FRAG_ENABLE(1));
- toep->tls.iv_len = explicit_iv_size;
+ toep->tls.iv_len = iv_size;
toep->tls.frag_size = tls->params.max_frame_len;
toep->tls.fcplenmax = get_tp_plen_max(tls);
toep->tls.expn_per_ulp = tls->params.tls_hlen +
@@ -352,7 +364,8 @@ tls_uninit_toep(struct toepcb *toep)
static void
write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
- unsigned int plen, unsigned int expn, uint8_t credits, int shove)
+ unsigned int plen, unsigned int expn, uint8_t credits, int shove,
+ int num_ivs)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
unsigned int len = plen + expn;
@@ -365,7 +378,7 @@ write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
txwr->plen = htobe32(len);
txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
- txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) |
+ txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(num_ivs) |
V_FW_TLSTX_DATA_WR_EXP(expn) |
V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) |
V_FW_TLSTX_DATA_WR_IVDSGL(0) |
@@ -381,20 +394,20 @@ write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
static void
write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
- struct tls_hdr *tls_hdr, unsigned int plen, uint64_t seqno)
+ struct tls_hdr *tls_hdr, unsigned int plen, uint8_t rec_type,
+ uint64_t seqno)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
int data_type, seglen;
seglen = plen;
- data_type = tls_content_type(tls_hdr->type);
+ data_type = tls_content_type(rec_type);
cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
cpl->pld_len = htobe32(plen);
if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM)
- cpl->type_protover = htobe32(
- V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
+ cpl->type_protover = htobe32(V_CPL_TX_TLS_SFO_TYPE(rec_type));
cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
V_SCMD_NUM_IVS(1));
cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
@@ -498,6 +511,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
int tls_size, tx_credits, shove, sowwakeup;
struct ofld_tx_sdesc *txsd;
char *buf;
+ bool tls13;
INP_WLOCK_ASSERT(inp);
KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
@@ -533,6 +547,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
return;
}
+ tls13 = toep->tls.tls13;
txsd = &toep->txsd[toep->txsd_pidx];
for (;;) {
tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
@@ -599,9 +614,11 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
sizeof(struct cpl_tx_tls_sfo) +
sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd);
- /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
- MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
- wr_len += AES_BLOCK_LEN;
+ if (!tls13) {
+ /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
+ MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
+ wr_len += AES_BLOCK_LEN;
+ }
/* Account for SGL in work request length. */
nsegs = count_ext_pgs_segs(m);
@@ -671,8 +688,10 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
expn_size = m->m_epg_hdrlen +
m->m_epg_trllen;
tls_size = m->m_len - expn_size;
- write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove);
- write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno);
+ write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove,
+ tls13 ? 0 : 1);
+ write_tlstx_cpl(cpl, toep, thdr, tls_size,
+ tls13 ? m->m_epg_record_type : thdr->type, m->m_epg_seqno);
idata = (struct ulptx_idata *)(cpl + 1);
idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
@@ -683,10 +702,12 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4));
memrd->addr = htobe32(toep->tls.tx_key_addr >> 5);
- /* Copy IV. */
buf = (char *)(memrd + 1);
- memcpy(buf, thdr + 1, toep->tls.iv_len);
- buf += AES_BLOCK_LEN;
+ if (!tls13) {
+ /* Copy IV. */
+ memcpy(buf, thdr + 1, toep->tls.iv_len);
+ buf += AES_BLOCK_LEN;
+ }
write_ktlstx_sgl(buf, m, nsegs);
@@ -808,8 +829,8 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
struct sockbuf *sb;
struct mbuf *tls_data;
struct tls_get_record *tgr;
- struct mbuf *control;
- int pdu_length, trailer_len;
+ struct mbuf *control, *n;
+ int pdu_length, resid, trailer_len;
#if defined(KTR) || defined(INVARIANTS)
int len;
#endif
@@ -857,7 +878,9 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
/*
* The payload of this CPL is the TLS header followed by
- * additional fields.
+ * additional fields. For TLS 1.3 the type field holds the
+ * inner record type and the length field has been updated to
+ * strip the inner record type, padding, and MAC.
*/
KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
("%s: payload too small", __func__));
@@ -869,7 +892,14 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
("%s: sequence mismatch", __func__));
}
- /* Report decryption errors as EBADMSG. */
+ /*
+ * Report decryption errors as EBADMSG.
+ *
+ * XXX: To support rekeying for TLS 1.3 this will eventually
+ * have to be updated to recrypt the data with the old key and
+ * then decrypt with the new key. Punt for now as KTLS
+ * doesn't yet support rekeying.
+ */
if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) {
CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x",
__func__, toep->tid, tls_hdr_pkt->res_to_mac_error,
@@ -887,6 +917,33 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (0);
}
+ /* For TLS 1.3 trim the header and trailer. */
+ if (toep->tls.tls13) {
+ KASSERT(tls_data != NULL, ("%s: TLS 1.3 record without data",
+ __func__));
+ MPASS(tls_data->m_pkthdr.len == pdu_length);
+ m_adj(tls_data, sizeof(struct tls_record_layer));
+ if (tls_data->m_pkthdr.len > be16toh(tls_hdr_pkt->length))
+ tls_data->m_pkthdr.len = be16toh(tls_hdr_pkt->length);
+ resid = tls_data->m_pkthdr.len;
+ if (resid == 0) {
+ m_freem(tls_data);
+ tls_data = NULL;
+ } else {
+ for (n = tls_data;; n = n->m_next) {
+ if (n->m_len < resid) {
+ resid -= n->m_len;
+ continue;
+ }
+
+ n->m_len = resid;
+ m_freem(n->m_next);
+ n->m_next = NULL;
+ break;
+ }
+ }
+ }
+
/* Handle data received after the socket is closed. */
sb = &so->so_rcv;
SOCKBUF_LOCK(sb);
@@ -1091,33 +1148,60 @@ out:
}
/*
- * Send a work request setting multiple TCB fields to enable
- * ULP_MODE_TLS.
+ * Send a work request setting one or more TCB fields to partially or
+ * fully enable ULP_MODE_TLS.
+ *
+ * - If resid == 0, the socket buffer ends at a record boundary
+ * (either empty or contains one or more complete records). Switch
+ * to ULP_MODE_TLS (if not already) and enable TLS decryption.
+ *
+ * - If resid != 0, the socket buffer contains a partial record. In
+ * this case, switch to ULP_MODE_TLS partially and configure the TCB
+ * to pass along the remaining resid bytes undecrypted. Once they
+ * arrive, this is called again with resid == 0 and enables TLS
+ * decryption.
*/
static void
-tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
+tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno,
+ size_t resid)
{
struct mbuf *m;
struct work_request_hdr *wrh;
struct ulp_txpkt *ulpmc;
int fields, key_offset, len;
- KASSERT(ulp_mode(toep) == ULP_MODE_NONE,
- ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid));
+ /*
+ * If we are already in ULP_MODE_TLS, then we should now be at
+ * a record boundary and ready to finish enabling TLS RX.
+ */
+ KASSERT(resid == 0 || ulp_mode(toep) == ULP_MODE_NONE,
+ ("%s: tid %d needs %zu more data but already ULP_MODE_TLS",
+ __func__, toep->tid, resid));
fields = 0;
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* 2 writes for the overlay region */
+ fields += 2;
+ }
- /* 2 writes for the overlay region */
- fields += 2;
+ if (resid == 0) {
+ /* W_TCB_TLS_SEQ */
+ fields++;
- /* W_TCB_TLS_SEQ */
- fields++;
+ /* W_TCB_ULP_RAW */
+ fields++;
+ } else {
+ /* W_TCB_PDU_LEN */
+ fields++;
- /* W_TCB_ULP_RAW */
- fields++;
+ /* W_TCB_ULP_RAW */
+ fields++;
+ }
- /* W_TCB_ULP_TYPE */
- fields ++;
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* W_TCB_ULP_TYPE */
+ fields ++;
+ }
/* W_TCB_T_FLAGS */
fields++;
@@ -1136,43 +1220,78 @@ tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
INIT_ULPTX_WRH(wrh, len, 1, toep->tid); /* atomic */
ulpmc = (struct ulp_txpkt *)(wrh + 1);
- /*
- * Clear the TLS overlay region: 1023:832.
- *
- * Words 26/27 are always set to zero. Words 28/29
- * contain seqno and are set when enabling TLS
- * decryption. Word 30 is zero and Word 31 contains
- * the keyid.
- */
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
- 0xffffffffffffffff, 0);
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /*
+ * Clear the TLS overlay region: 1023:832.
+ *
+ * Words 26/27 are always set to zero. Words 28/29
+ * contain seqno and are set when enabling TLS
+ * decryption. Word 30 is zero and Word 31 contains
+ * the keyid.
+ */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
+ 0xffffffffffffffff, 0);
- /*
- * RX key tags are an index into the key portion of MA
- * memory stored as an offset from the base address in
- * units of 64 bytes.
- */
- key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
- 0xffffffffffffffff,
- (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
-
- CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
- toep->tid, seqno);
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TLS_SEQ,
- V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), V_TCB_TLS_SEQ(seqno));
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_RAW,
- V_TCB_ULP_RAW(M_TCB_ULP_RAW),
- V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
- V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
-
- toep->flags &= ~TPF_TLS_STARTING;
- toep->flags |= TPF_TLS_RECEIVE;
-
- /* Set the ULP mode to ULP_MODE_TLS. */
- toep->params.ulp_mode = ULP_MODE_TLS;
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE,
- V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_TLS));
+ /*
+ * RX key tags are an index into the key portion of MA
+ * memory stored as an offset from the base address in
+ * units of 64 bytes.
+ */
+ key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
+ 0xffffffffffffffff,
+ (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
+ }
+
+ if (resid == 0) {
+ /*
+ * The socket buffer is empty or only contains
+ * complete TLS records: Set the sequence number and
+ * enable TLS decryption.
+ */
+ CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
+ toep->tid, seqno);
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_RX_TLS_SEQ, V_TCB_RX_TLS_SEQ(M_TCB_RX_TLS_SEQ),
+ V_TCB_RX_TLS_SEQ(seqno));
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
+ V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
+ V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
+
+ toep->flags &= ~TPF_TLS_STARTING;
+ toep->flags |= TPF_TLS_RECEIVE;
+ } else {
+ /*
+ * The socket buffer ends with a partial record with a
+ * full header and needs at least 6 bytes.
+ *
+ * Set PDU length. This is treating the 'resid' bytes
+ * as a TLS PDU, so the first 5 bytes are a fake
+ * header and the rest are the PDU length.
+ */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_PDU_LEN, V_TCB_PDU_LEN(M_TCB_PDU_LEN),
+ V_TCB_PDU_LEN(resid - sizeof(struct tls_hdr)));
+ CTR3(KTR_CXGBE, "%s: tid %d setting PDU_LEN to %zu",
+ __func__, toep->tid, resid - sizeof(struct tls_hdr));
+
+ /* Clear all bits in ULP_RAW except for ENABLE. */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
+ V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
+
+ /* Wait for 'resid' bytes to be delivered as CPL_RX_DATA. */
+ toep->tls.rx_resid = resid;
+ }
+
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* Set the ULP mode to ULP_MODE_TLS. */
+ toep->params.ulp_mode = ULP_MODE_TLS;
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_TYPE, V_TCB_ULP_TYPE(M_TCB_ULP_TYPE),
+ V_TCB_ULP_TYPE(ULP_MODE_TLS));
+ }
/* Clear TF_RX_QUIESCE. */
ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS,
@@ -1205,7 +1324,8 @@ tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
* size of a TLS record, re-enable receive and pause again once
* we get more data to try again.
*/
- if (!have_header || resid != 0) {
+ if (!have_header || (resid != 0 && (resid < sizeof(struct tls_hdr) ||
+ is_t6(sc)))) {
CTR(KTR_CXGBE, "%s: tid %d waiting for more data", __func__,
toep->tid);
toep->flags &= ~TPF_TLS_RX_QUIESCED;
@@ -1213,7 +1333,7 @@ tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
return;
}
- tls_update_tcb(sc, toep, seqno);
+ tls_update_tcb(sc, toep, seqno, resid);
}
void
diff --git a/sys/dev/cxgbe/tom/t4_tls.h b/sys/dev/cxgbe/tom/t4_tls.h
index 753a30890fdc..6faf946e9e3c 100644
--- a/sys/dev/cxgbe/tom/t4_tls.h
+++ b/sys/dev/cxgbe/tom/t4_tls.h
@@ -74,6 +74,7 @@ struct tls_ofld_info {
unsigned short adjusted_plen;
unsigned short expn_per_ulp;
unsigned short pdus_per_ulp;
+ bool tls13;
struct tls_scmd scmd0;
u_int iv_len;
unsigned int tx_key_info_size;
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 0b54fdaa5c80..53a945f8b4cc 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -182,7 +182,7 @@ init_toepcb(struct vi_info *vi, struct toepcb *toep)
}
toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx];
- toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
+ toep->ctrlq = &sc->sge.ctrlq[cp->ctrlq_idx];
tls_init_toep(toep);
MPASS(ulp_mode(toep) != ULP_MODE_TCPDDP);
@@ -494,8 +494,15 @@ send_get_tcb(struct adapter *sc, u_int tid)
bzero(cpl, sizeof(*cpl));
INIT_TP_WR(cpl, tid);
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid));
- cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
- V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
+ if (chip_id(sc) >= CHELSIO_T7) {
+ cpl->reply_ctrl =
+ htobe16(V_T7_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ cpl->reply_ctrl =
+ htobe16(V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
cpl->cookie = 0xff;
commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
@@ -1221,7 +1228,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
- ntuple |= (uint64_t)e->lport << tp->port_shift;
+ ntuple |= (uint64_t)e->hw_port << tp->port_shift;
if (tp->protocol_shift >= 0)
ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
@@ -1232,10 +1239,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
tp->vnic_shift;
}
- if (is_t4(sc))
- return (htobe32((uint32_t)ntuple));
- else
- return (htobe64(V_FILTER_TUPLE(ntuple)));
+ return (ntuple);
}
/*
@@ -1326,6 +1330,9 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
*/
cp->mtu_idx = find_best_mtu_idx(sc, inc, s);
+ /* Control queue. */
+ cp->ctrlq_idx = vi->pi->port_id;
+
/* Tx queue for this connection. */
if (s->txq == QUEUE_RANDOM)
q_idx = arc4random();
@@ -1438,6 +1445,32 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
cp->emss = 0;
}
+void
+update_tid_qid_sel(struct vi_info *vi, struct conn_params *cp, int tid)
+{
+ struct adapter *sc = vi->adapter;
+ const int mask = sc->params.tid_qid_sel_mask;
+ struct sge_ofld_txq *ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
+ uint32_t ngroup;
+ int g, nqpg;
+
+ cp->ctrlq_idx = ofld_txq_group(tid, mask);
+ CTR(KTR_CXGBE, "tid %u is on core %u", tid, cp->ctrlq_idx);
+ if ((ofld_txq->wrq.eq.cntxt_id & mask) == (tid & mask))
+ return;
+
+ ngroup = 1 << bitcount32(mask);
+ MPASS(vi->nofldtxq % ngroup == 0);
+ g = ofld_txq_group(tid, mask);
+ nqpg = vi->nofldtxq / ngroup;
+ cp->txq_idx = vi->first_ofld_txq + g * nqpg + arc4random() % nqpg;
+#ifdef INVARIANTS
+ MPASS(cp->txq_idx < vi->first_ofld_txq + vi->nofldtxq);
+ ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
+ MPASS((ofld_txq->wrq.eq.cntxt_id & mask) == (tid & mask));
+#endif
+}
+
int
negative_advice(int status)
{
@@ -2233,6 +2266,98 @@ t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
return (0);
}
+/*
+ * Request/response structure used to find out the adapter offloading
+ * a socket.
+ */
+struct find_offload_adapter_data {
+ struct socket *so;
+ struct adapter *sc; /* result */
+};
+
+static void
+find_offload_adapter_cb(struct adapter *sc, void *arg)
+{
+ struct find_offload_adapter_data *fa = arg;
+ struct socket *so = fa->so;
+ struct tom_data *td = sc->tom_softc;
+ struct tcpcb *tp;
+ struct inpcb *inp;
+
+ /* Non-TCP were filtered out earlier. */
+ MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
+
+ if (fa->sc != NULL)
+ return; /* Found already. */
+
+ if (td == NULL)
+ return; /* TOE not enabled on this adapter. */
+
+ inp = sotoinpcb(so);
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) == 0) {
+ tp = intotcpcb(inp);
+ if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
+ fa->sc = sc; /* Found. */
+ }
+ INP_WUNLOCK(inp);
+}
+
+struct adapter *
+find_offload_adapter(struct socket *so)
+{
+ struct find_offload_adapter_data fa;
+
+ fa.sc = NULL;
+ fa.so = so;
+ t4_iterate(find_offload_adapter_cb, &fa);
+ return (fa.sc);
+}
+
+void
+send_txdataplen_max_flowc_wr(struct adapter *sc, struct toepcb *toep,
+ int maxlen)
+{
+ struct wrqe *wr;
+ struct fw_flowc_wr *flowc;
+ const u_int nparams = 1;
+ u_int flowclen;
+ struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
+
+ CTR(KTR_CXGBE, "%s: tid %u maxlen=%d", __func__, toep->tid, maxlen);
+
+ flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
+
+ wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
+ if (wr == NULL) {
+ /* XXX */
+ panic("%s: allocation failure.", __func__);
+ }
+ flowc = wrtod(wr);
+ memset(flowc, 0, wr->wr_len);
+
+ flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
+ V_FW_FLOWC_WR_NPARAMS(nparams));
+ flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
+ V_FW_WR_FLOWID(toep->tid));
+
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ flowc->mnemval[0].val = htobe32(maxlen);
+
+ KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
+ txsd->tx_credits = howmany(flowclen, 16);
+ txsd->plen = 0;
+ KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
+ ("%s: not enough credits (%d)", __func__, toep->tx_credits));
+ toep->tx_credits -= txsd->tx_credits;
+ if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
+ toep->txsd_pidx = 0;
+ toep->txsd_avail--;
+
+ t4_wrq_tx(sc, wr);
+}
+
static int
t4_tom_mod_load(void)
{
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 4fb87d92d91e..c8c2d432b8f1 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -113,6 +113,7 @@ struct conn_params {
int8_t mtu_idx;
int8_t ulp_mode;
int8_t tx_align;
+ int8_t ctrlq_idx; /* ctrlq = &sc->sge.ctrlq[ctrlq_idx] */
int16_t txq_idx; /* ofld_txq = &sc->sge.ofld_txq[txq_idx] */
int16_t rxq_idx; /* ofld_rxq = &sc->sge.ofld_rxq[rxq_idx] */
int16_t l2t_idx;
@@ -477,11 +478,14 @@ int select_rcv_wscale(void);
void init_conn_params(struct vi_info *, struct offload_settings *,
struct in_conninfo *, struct socket *, const struct tcp_options *, int16_t,
struct conn_params *cp);
+void update_tid_qid_sel(struct vi_info *, struct conn_params *, int);
__be64 calc_options0(struct vi_info *, struct conn_params *);
__be32 calc_options2(struct vi_info *, struct conn_params *);
uint64_t select_ntuple(struct vi_info *, struct l2t_entry *);
int negative_advice(int);
int add_tid_to_history(struct adapter *, u_int);
+struct adapter *find_offload_adapter(struct socket *);
+void send_txdataplen_max_flowc_wr(struct adapter *, struct toepcb *, int);
void t4_pcb_detach(struct toedev *, struct tcpcb *);
/* t4_connect.c */
@@ -582,4 +586,10 @@ int tls_tx_key(struct toepcb *);
void tls_uninit_toep(struct toepcb *);
int tls_alloc_ktls(struct toepcb *, struct ktls_session *, int);
+/* t4_tpt.c */
+uint32_t t4_pblpool_alloc(struct adapter *, int);
+void t4_pblpool_free(struct adapter *, uint32_t, int);
+int t4_pblpool_create(struct adapter *);
+void t4_pblpool_destroy(struct adapter *);
+
#endif
diff --git a/sys/dev/cxgbe/tom/t4_tom_l2t.c b/sys/dev/cxgbe/tom/t4_tom_l2t.c
index 3fd0d5ca41d4..e245c2b6fd5b 100644
--- a/sys/dev/cxgbe/tom/t4_tom_l2t.c
+++ b/sys/dev/cxgbe/tom/t4_tom_l2t.c
@@ -403,7 +403,7 @@ t4_l2t_get(struct port_info *pi, if_t ifp, struct sockaddr *sa)
l2_store(sa, e);
e->ifp = ifp;
e->hash = hash;
- e->lport = pi->lport;
+ e->hw_port = pi->hw_port;
e->wrq = &sc->sge.ctrlq[pi->port_id];
e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
atomic_store_rel_int(&e->refcnt, 1);
diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c
index 5a60f939dc78..6c6f79227166 100644
--- a/sys/dev/gpio/gpioc.c
+++ b/sys/dev/gpio/gpioc.c
@@ -704,7 +704,7 @@ gpioc_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
* npins isn't a horrible fifo size for that either.
*/
priv->numevents = priv->sc->sc_npins * 2;
- priv->events = malloc(priv->numevents * sizeof(struct gpio_event_detail),
+ priv->events = malloc(priv->numevents * sizeof(struct gpioc_pin_event),
M_GPIOC, M_WAITOK | M_ZERO);
priv->evidx_head = priv->evidx_tail = 0;
@@ -793,6 +793,7 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct gpio_access_32 *a32;
struct gpio_config_32 *c32;
struct gpio_event_config *evcfg;
+ struct gpioc_pin_event *tmp;
uint32_t caps, intrflags;
switch (cmd) {
@@ -908,27 +909,35 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
res = devfs_get_cdevpriv((void **)&priv);
if (res != 0)
break;
- /* If any pins have been configured, changes aren't allowed. */
- if (!SLIST_EMPTY(&priv->pins)) {
- res = EINVAL;
- break;
- }
if (evcfg->gp_report_type != GPIO_EVENT_REPORT_DETAIL &&
evcfg->gp_report_type != GPIO_EVENT_REPORT_SUMMARY) {
res = EINVAL;
break;
}
- priv->report_option = evcfg->gp_report_type;
/* Reallocate the events buffer if the user wants it bigger. */
- if (priv->report_option == GPIO_EVENT_REPORT_DETAIL &&
+ tmp = NULL;
+ if (evcfg->gp_report_type == GPIO_EVENT_REPORT_DETAIL &&
priv->numevents < evcfg->gp_fifo_size) {
+ tmp = malloc(evcfg->gp_fifo_size *
+ sizeof(struct gpioc_pin_event), M_GPIOC,
+ M_WAITOK | M_ZERO);
+ }
+ mtx_lock(&priv->mtx);
+ /* If any pins have been configured, changes aren't allowed. */
+ if (!SLIST_EMPTY(&priv->pins)) {
+ mtx_unlock(&priv->mtx);
+ free(tmp, M_GPIOC);
+ res = EINVAL;
+ break;
+ }
+ if (tmp != NULL) {
free(priv->events, M_GPIOC);
+ priv->events = tmp;
priv->numevents = evcfg->gp_fifo_size;
- priv->events = malloc(priv->numevents *
- sizeof(struct gpio_event_detail), M_GPIOC,
- M_WAITOK | M_ZERO);
priv->evidx_head = priv->evidx_tail = 0;
}
+ priv->report_option = evcfg->gp_report_type;
+ mtx_unlock(&priv->mtx);
break;
case FIONBIO:
/*
diff --git a/sys/dev/gpio/pl061.c b/sys/dev/gpio/pl061.c
index 32109e5982bc..9996b0253c7d 100644
--- a/sys/dev/gpio/pl061.c
+++ b/sys/dev/gpio/pl061.c
@@ -558,8 +558,7 @@ static device_method_t pl061_methods[] = {
/* Bus interface */
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
- DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
- DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, pl061_get_bus),
diff --git a/sys/dev/iwx/if_iwx.c b/sys/dev/iwx/if_iwx.c
index 8422fcb787c3..04ed09f04604 100644
--- a/sys/dev/iwx/if_iwx.c
+++ b/sys/dev/iwx/if_iwx.c
@@ -4805,6 +4805,8 @@ iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
static void
iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
{
+ IWX_ASSERT_LOCKED(sc);
+
if (ring->queued < iwx_lomark) {
sc->qfullmsk &= ~(1 << ring->qid);
if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
@@ -4890,11 +4892,19 @@ iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
+ IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
+ "%s: mac_id=%u, cmslrx=%u, cmb=%u, neb=%d, nrb=%u\n",
+ __func__,
+ le32toh(mbn->mac_id),
+ le32toh(mbn->consec_missed_beacons_since_last_rx),
+ le32toh(mbn->consec_missed_beacons),
+ le32toh(mbn->num_expected_beacons),
+ le32toh(mbn->num_recvd_beacons));
+
missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
if (missed > vap->iv_bmissthreshold) {
ieee80211_beacon_miss(ic);
}
-
}
static int
@@ -5491,6 +5501,9 @@ iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
/* for non-data, use the lowest supported rate */
ridx = min_ridx;
*flags |= IWX_TX_FLAGS_CMD_RATE;
+ } else if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ /* TODO: VHT - the ridx / rate array doesn't have VHT rates yet */
+ ridx = iwx_min_basic_rate(ic);
} else if (ni->ni_flags & IEEE80211_NODE_HT) {
ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
& ~IEEE80211_RATE_MCS];
@@ -5622,6 +5635,8 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
struct mbuf *m1;
size_t txcmd_size;
+ IWX_ASSERT_LOCKED(sc);
+
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
@@ -7308,97 +7323,107 @@ iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
return iwx_rs_init_v3(sc, in);
}
-static void
-iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
+
+/**
+ * @brief Turn the given TX rate control notification into an ieee80211_node_txrate
+ *
+ * This populates the given txrate node with the TX rate control notification.
+ *
+ * @param sc driver softc
+ * @param notif firmware notification
+ * @param ni ieee80211_node update
+ * @returns true if updated, false if not
+ */
+static bool
+iwx_rs_update_node_txrate(struct iwx_softc *sc,
+ const struct iwx_tlc_update_notif *notif, struct ieee80211_node *ni)
{
struct ieee80211com *ic = &sc->sc_ic;
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- struct ieee80211_node *ni = (void *)vap->iv_bss;
+ /* XXX TODO: create an inline function in if_iwxreg.h? */
+ static int cck_idx_to_rate[] = { 2, 4, 11, 22, 2, 2, 2, 2 };
+ static int ofdm_idx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108 };
- struct ieee80211_rateset *rs = &ni->ni_rates;
uint32_t rate_n_flags;
- uint8_t plcp, rval;
- int i, cmd_ver, rate_n_flags_ver2 = 0;
-
- if (notif->sta_id != IWX_STATION_ID ||
- (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
- return;
+ uint32_t type;
+ /* Extract the rate and command version */
rate_n_flags = le32toh(notif->rate);
+ if (sc->sc_rate_n_flags_version != 2) {
+ net80211_ic_printf(ic,
+ "%s: unsupported rate_n_flags version (%d)\n",
+ __func__,
+ sc->sc_rate_n_flags_version);
+ return (false);
+ }
+
if (sc->sc_debug & IWX_DEBUG_TXRATE)
print_ratenflags(__func__, __LINE__,
rate_n_flags, sc->sc_rate_n_flags_version);
- cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
- IWX_TLC_MNG_UPDATE_NOTIF);
- if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
- rate_n_flags_ver2 = 1;
-
- if (rate_n_flags_ver2) {
- uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
- if (mod_type == IWX_RATE_MCS_HT_MSK) {
-
- ieee80211_node_set_txrate_dot11rate(ni,
- IWX_RATE_HT_MCS_INDEX(rate_n_flags) |
- IEEE80211_RATE_MCS);
- IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
- "%s:%d new MCS: %d rate_n_flags: %x\n",
- __func__, __LINE__,
- ieee80211_node_get_txrate_dot11rate(ni) & ~IEEE80211_RATE_MCS,
- rate_n_flags);
- return;
- }
- } else {
- if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
- ieee80211_node_set_txrate_dot11rate(ni,
- rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
- IWX_RATE_HT_MCS_NSS_MSK_V1));
-
- IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
- "%s:%d new MCS idx: %d rate_n_flags: %x\n",
- __func__, __LINE__,
- ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
- return;
- }
+ type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
+ switch (type) {
+ case IWX_RATE_MCS_CCK_MSK:
+ ieee80211_node_set_txrate_dot11rate(ni,
+ cck_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
+ return (true);
+ case IWX_RATE_MCS_LEGACY_OFDM_MSK:
+ ieee80211_node_set_txrate_dot11rate(ni,
+ ofdm_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
+ return (true);
+ case IWX_RATE_MCS_HT_MSK:
+ /*
+ * TODO: the current API doesn't include channel width
+ * and other flags, so we can't accurately store them yet!
+ *
+ * channel width: (flags & IWX_RATE_MCS_CHAN_WIDTH_MSK)
+ * >> IWX_RATE_MCS_CHAN_WIDTH_POS)
+ * LDPC: (flags & (1 << 16))
+ */
+ ieee80211_node_set_txrate_ht_mcsrate(ni,
+ IWX_RATE_HT_MCS_INDEX(rate_n_flags));
+ return (true);
+ case IWX_RATE_MCS_VHT_MSK:
+ /* TODO: same comment on channel width, etc above */
+ ieee80211_node_set_txrate_vht_rate(ni,
+ IWX_RATE_VHT_MCS_CODE(rate_n_flags),
+ IWX_RATE_VHT_MCS_NSS(rate_n_flags));
+ return (true);
+ default:
+ net80211_ic_printf(ic,
+ "%s: unsupported chosen rate type in "
+ "IWX_RATE_MCS_MOD_TYPE (%d)\n", __func__,
+ type >> IWX_RATE_MCS_MOD_TYPE_POS);
+ return (false);
}
- if (rate_n_flags_ver2) {
- const struct ieee80211_rateset *rs;
- uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
- if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
- rs = &ieee80211_std_rateset_11a;
- else
- rs = &ieee80211_std_rateset_11b;
- if (ridx < rs->rs_nrates)
- rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
- else
- rval = 0;
- } else {
- plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
+ /* Default: if we get here, we didn't successfully update anything */
+ return (false);
+}
- rval = 0;
- for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
- if (iwx_rates[i].plcp == plcp) {
- rval = iwx_rates[i].rate;
- break;
- }
- }
- }
+/**
+ * @brief Process a firmware rate control update and update net80211.
+ *
+ * Since firmware is doing rate control, this just needs to update
+ * the txrate in the ieee80211_node entry.
+ */
+static void
+iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ /* XXX TODO: get a node ref! */
+ struct ieee80211_node *ni = (void *)vap->iv_bss;
- if (rval) {
- uint8_t rv;
- for (i = 0; i < rs->rs_nrates; i++) {
- rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
- if (rv == rval) {
- ieee80211_node_set_txrate_dot11rate(ni, i);
- break;
- }
- }
- IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
- "%s:%d new rate %d\n", __func__, __LINE__,
- ieee80211_node_get_txrate_dot11rate(ni));
- }
+ /*
+ * For now the iwx driver only supports a single vdev with a single
+ * node; it doesn't yet support ibss/hostap/multiple vdevs.
+ */
+ if (notif->sta_id != IWX_STATION_ID ||
+ (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
+ return;
+
+ iwx_rs_update_node_txrate(sc, notif, ni);
}
static int
@@ -8526,6 +8551,8 @@ iwx_start(struct iwx_softc *sc)
struct ieee80211_node *ni;
struct mbuf *m;
+ IWX_ASSERT_LOCKED(sc);
+
while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
if (iwx_tx(sc, m, ni) != 0) {
@@ -8985,10 +9012,10 @@ iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
break;
case IWX_MISSED_BEACONS_NOTIFICATION:
+ IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
+ "%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
+ __func__);
iwx_rx_bmiss(sc, pkt, data);
- DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
- __func__));
- ieee80211_beacon_miss(ic);
break;
case IWX_MFUART_LOAD_NOTIFICATION:
diff --git a/sys/dev/iwx/if_iwxreg.h b/sys/dev/iwx/if_iwxreg.h
index 6755b93fa0ba..f3d1f078b48e 100644
--- a/sys/dev/iwx/if_iwxreg.h
+++ b/sys/dev/iwx/if_iwxreg.h
@@ -5176,6 +5176,10 @@ enum {
#define IWX_RATE_HT_MCS_INDEX(r) ((((r) & IWX_RATE_MCS_NSS_MSK) >> 1) | \
((r) & IWX_RATE_HT_MCS_CODE_MSK))
+#define IWX_RATE_VHT_MCS_CODE(r) ((r) & IWX_RATE_HT_MCS_CODE_MSK)
+#define IWX_RATE_VHT_MCS_NSS(r) \
+ ((((r) & IWX_RATE_MCS_NSS_MSK) == 0) >> IWX_RATE_MCS_NSS_POS)
+
/* Bits 7-5: reserved */
/*
diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h
index 52f9e12f8f9a..52e9fcbbebcd 100644
--- a/sys/dev/nvme/nvme_private.h
+++ b/sys/dev/nvme/nvme_private.h
@@ -463,13 +463,13 @@ static __inline void
nvme_completion_poll(struct nvme_completion_poll_status *status)
{
int timeout = ticks + 10 * hz;
- sbintime_t delta_t = SBT_1US;
+ sbintime_t delta = SBT_1US;
while (!atomic_load_acq_int(&status->done)) {
if (timeout - ticks < 0)
panic("NVME polled command failed to complete within 10s.");
- pause_sbt("nvme", delta_t, 0, C_PREL(1));
- delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ pause_sbt("nvme", delta, 0, C_PREL(1));
+ delta = min(SBT_1MS, delta + delta / 2);
}
}
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index 9e43a4c1909f..cde98cb62cef 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -240,6 +240,7 @@ struct pci_quirk {
#define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
#define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
#define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */
+#define PCI_QUIRK_DISABLE_FLR 8 /* Function-Level Reset (FLR) not working. */
int arg1;
int arg2;
};
@@ -319,6 +320,13 @@ static const struct pci_quirk pci_quirks[] = {
* expected place.
*/
{ 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 },
+
+ /*
+ * With some MediaTek mt76 WiFi FLR does not work despite advertised.
+ */
+ { 0x061614c3, PCI_QUIRK_DISABLE_FLR, 0, 0 }, /* mt76 7922 */
+
+ /* end of table */
{ 0 }
};
@@ -6740,6 +6748,8 @@ pcie_flr(device_t dev, u_int max_delay, bool force)
if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR))
return (false);
+ if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_FLR))
+ return (false);
/*
* Disable busmastering to prevent generation of new
diff --git a/sys/dev/sound/pci/hda/hdaa.c b/sys/dev/sound/pci/hda/hdaa.c
index 1e486b01b168..5dbb5c4f4453 100644
--- a/sys/dev/sound/pci/hda/hdaa.c
+++ b/sys/dev/sound/pci/hda/hdaa.c
@@ -532,9 +532,11 @@ static void
hdaa_presence_handler(struct hdaa_widget *w)
{
struct hdaa_devinfo *devinfo = w->devinfo;
- struct hdaa_audio_as *as;
+ struct hdaa_audio_as *as, *asp;
+ char buf[32];
uint32_t res;
- int connected, old;
+ int connected, old, i;
+ bool active;
if (w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
@@ -552,13 +554,6 @@ hdaa_presence_handler(struct hdaa_widget *w)
if (connected == old)
return;
w->wclass.pin.connected = connected;
- HDA_BOOTVERBOSE(
- if (connected || old != 2) {
- device_printf(devinfo->dev,
- "Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
- w->nid, res, !connected ? "dis" : "");
- }
- );
as = &devinfo->as[w->bindas];
if (as->hpredir >= 0 && as->pins[15] == w->nid)
@@ -567,6 +562,38 @@ hdaa_presence_handler(struct hdaa_widget *w)
hdaa_autorecsrc_handler(as, w);
if (old != 2)
hdaa_channels_handler(as);
+
+ if (connected || old != 2) {
+ HDA_BOOTVERBOSE(
+ device_printf(devinfo->dev,
+ "Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
+ w->nid, res, !connected ? "dis" : "");
+ );
+ if (as->hpredir >= 0)
+ return;
+ for (i = 0, active = false; i < devinfo->num_devs; i++) {
+ if (device_get_unit(devinfo->devs[i].dev) == snd_unit) {
+ active = true;
+ break;
+ }
+ }
+ /* Proceed only if we are currently using this codec. */
+ if (!active)
+ return;
+ for (i = 0; i < devinfo->ascnt; i++) {
+ asp = &devinfo->as[i];
+ if (!asp->enable)
+ continue;
+ if ((connected && asp->index == as->index) ||
+ (!connected && asp->dir == as->dir)) {
+ snprintf(buf, sizeof(buf), "cdev=dsp%d",
+ device_get_unit(asp->pdevinfo->dev));
+ devctl_notify("SND", "CONN",
+ asp->dir == HDAA_CTL_IN ? "IN" : "OUT", buf);
+ break;
+ }
+ }
+ }
}
/*
@@ -6194,15 +6221,15 @@ hdaa_configure(device_t dev)
);
hdaa_patch_direct(devinfo);
HDA_BOOTHVERBOSE(
- device_printf(dev, "Pin sense init...\n");
- );
- hdaa_sense_init(devinfo);
- HDA_BOOTHVERBOSE(
device_printf(dev, "Creating PCM devices...\n");
);
hdaa_unlock(devinfo);
hdaa_create_pcms(devinfo);
hdaa_lock(devinfo);
+ HDA_BOOTHVERBOSE(
+ device_printf(dev, "Pin sense init...\n");
+ );
+ hdaa_sense_init(devinfo);
HDA_BOOTVERBOSE(
if (devinfo->quirks != 0) {
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 634ba0de2d55..471c6b3714b2 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -281,7 +281,7 @@ static int vtnet_tso_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
&vtnet_tso_disable, 0, "Disables TSO");
-static int vtnet_lro_disable = 0;
+static int vtnet_lro_disable = 1;
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
&vtnet_lro_disable, 0, "Disables hardware LRO");
diff --git a/sys/dev/vmware/vmxnet3/if_vmx.c b/sys/dev/vmware/vmxnet3/if_vmx.c
index 62b5f313a137..1a314ca6660e 100644
--- a/sys/dev/vmware/vmxnet3/if_vmx.c
+++ b/sys/dev/vmware/vmxnet3/if_vmx.c
@@ -2056,7 +2056,12 @@ vmxnet3_update_admin_status(if_ctx_t ctx)
struct vmxnet3_softc *sc;
sc = iflib_get_softc(ctx);
- if (sc->vmx_ds->event != 0)
+ /*
+ * iflib may invoke this routine before vmxnet3_attach_post() has
+ * run, which is before the top level shared data area is
+ * initialized and the device made aware of it.
+ */
+ if (sc->vmx_ds != NULL && sc->vmx_ds->event != 0)
vmxnet3_evintr(sc);
vmxnet3_refresh_host_stats(sc);
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index b51ef6766de4..bcf67ddc9689 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -195,8 +195,8 @@ static void vt_update_static(void *);
#ifndef SC_NO_CUTPASTE
static void vt_mouse_paste(void);
#endif
-static void vt_suspend_handler(void *priv);
-static void vt_resume_handler(void *priv);
+static void vt_suspend_handler(void *priv, enum power_stype stype);
+static void vt_resume_handler(void *priv, enum power_stype stype);
SET_DECLARE(vt_drv_set, struct vt_driver);
@@ -3330,7 +3330,7 @@ vt_replace_backend(const struct vt_driver *drv, void *softc)
}
static void
-vt_suspend_handler(void *priv)
+vt_suspend_handler(void *priv, enum power_stype stype)
{
struct vt_device *vd;
@@ -3341,7 +3341,7 @@ vt_suspend_handler(void *priv)
}
static void
-vt_resume_handler(void *priv)
+vt_resume_handler(void *priv, enum power_stype stype)
{
struct vt_device *vd;
diff --git a/sys/dev/watchdog/watchdog.c b/sys/dev/watchdog/watchdog.c
index e1b2e08c3f10..c599db56bf95 100644
--- a/sys/dev/watchdog/watchdog.c
+++ b/sys/dev/watchdog/watchdog.c
@@ -204,6 +204,7 @@ wd_valid_act(int act)
return true;
}
+#ifdef COMPAT_FREEBSD14
static int
wd_ioctl_patpat(caddr_t data)
{
@@ -223,6 +224,7 @@ wd_ioctl_patpat(caddr_t data)
return (wdog_kern_pat(u));
}
+#endif
static int
wd_get_time_left(struct thread *td, time_t *remainp)
diff --git a/sys/dev/xen/control/control.c b/sys/dev/xen/control/control.c
index 123df4992894..2c61b48c0451 100644
--- a/sys/dev/xen/control/control.c
+++ b/sys/dev/xen/control/control.c
@@ -91,6 +91,7 @@
#include <sys/smp.h>
#include <sys/eventhandler.h>
#include <sys/timetc.h>
+#include <sys/power.h>
#include <geom/geom.h>
@@ -175,12 +176,12 @@ xctrl_suspend(void)
cpuset_t cpu_suspend_map;
#endif
- EVENTHANDLER_INVOKE(power_suspend_early);
+ EVENTHANDLER_INVOKE(power_suspend_early, POWER_STYPE_SUSPEND_TO_MEM);
xs_lock();
stop_all_proc();
xs_unlock();
suspend_all_fs();
- EVENTHANDLER_INVOKE(power_suspend);
+ EVENTHANDLER_INVOKE(power_suspend, POWER_STYPE_SUSPEND_TO_MEM);
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started);
@@ -297,7 +298,7 @@ xctrl_suspend(void)
resume_all_fs();
resume_all_proc();
- EVENTHANDLER_INVOKE(power_resume);
+ EVENTHANDLER_INVOKE(power_resume, POWER_STYPE_SUSPEND_TO_MEM);
if (bootverbose)
printf("System resumed after suspension\n");
diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h
index 0a93878c859f..ad3f7779e108 100644
--- a/sys/fs/nullfs/null.h
+++ b/sys/fs/nullfs/null.h
@@ -37,6 +37,9 @@
#define NULLM_CACHE 0x0001
+#include <sys/ck.h>
+#include <vm/uma.h>
+
struct null_mount {
struct mount *nullm_vfs;
struct vnode *nullm_lowerrootvp; /* Ref to lower root vnode */
@@ -50,7 +53,7 @@ struct null_mount {
* A cache of vnode references
*/
struct null_node {
- LIST_ENTRY(null_node) null_hash; /* Hash list */
+ CK_SLIST_ENTRY(null_node) null_hash; /* Hash list */
struct vnode *null_lowervp; /* VREFed once */
struct vnode *null_vnode; /* Back pointer */
u_int null_flags;
@@ -61,6 +64,7 @@ struct null_node {
#define MOUNTTONULLMOUNT(mp) ((struct null_mount *)((mp)->mnt_data))
#define VTONULL(vp) ((struct null_node *)(vp)->v_data)
+#define VTONULL_SMR(vp) ((struct null_node *)vn_load_v_data_smr(vp))
#define NULLTOV(xp) ((xp)->null_vnode)
int nullfs_init(struct vfsconf *vfsp);
@@ -79,9 +83,7 @@ struct vnode *null_checkvp(struct vnode *vp, char *fil, int lno);
extern struct vop_vector null_vnodeops;
-#ifdef MALLOC_DECLARE
-MALLOC_DECLARE(M_NULLFSNODE);
-#endif
+extern uma_zone_t null_node_zone;
#ifdef NULLFS_DEBUG
#define NULLFSDEBUG(format, args...) printf(format ,## args)
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index 053614b6910d..d7f847d449d0 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -36,14 +36,19 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/rwlock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/smr.h>
#include <sys/vnode.h>
#include <fs/nullfs/null.h>
+#include <vm/uma.h>
+
+VFS_SMR_DECLARE;
+
/*
* Null layer cache:
* Each cache entry holds a reference to the lower vnode
@@ -54,12 +59,12 @@
#define NULL_NHASH(vp) (&null_node_hashtbl[vfs_hash_index(vp) & null_hash_mask])
-static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
+static CK_SLIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
static struct rwlock null_hash_lock;
static u_long null_hash_mask;
static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table");
-MALLOC_DEFINE(M_NULLFSNODE, "nullfs_node", "NULLFS vnode private part");
+uma_zone_t __read_mostly null_node_zone;
static void null_hashins(struct mount *, struct null_node *);
@@ -73,6 +78,10 @@ nullfs_init(struct vfsconf *vfsp)
null_node_hashtbl = hashinit(desiredvnodes, M_NULLFSHASH,
&null_hash_mask);
rw_init(&null_hash_lock, "nullhs");
+ null_node_zone = uma_zcreate("nullfs node", sizeof(struct null_node),
+ NULL, NULL, NULL, NULL, 0, UMA_ZONE_ZINIT);
+ VFS_SMR_ZONE_SET(null_node_zone);
+
return (0);
}
@@ -80,6 +89,7 @@ int
nullfs_uninit(struct vfsconf *vfsp)
{
+ uma_zdestroy(null_node_zone);
rw_destroy(&null_hash_lock);
hashdestroy(null_node_hashtbl, M_NULLFSHASH, null_hash_mask);
return (0);
@@ -96,7 +106,7 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp)
struct null_node *a;
struct vnode *vp;
- ASSERT_VOP_LOCKED(lowervp, "null_hashget");
+ ASSERT_VOP_LOCKED(lowervp, __func__);
rw_assert(&null_hash_lock, RA_LOCKED);
/*
@@ -106,18 +116,21 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp)
* reference count (but NOT the lower vnode's VREF counter).
*/
hd = NULL_NHASH(lowervp);
- LIST_FOREACH(a, hd, null_hash) {
- if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
- /*
- * Since we have the lower node locked the nullfs
- * node can not be in the process of recycling. If
- * it had been recycled before we grabed the lower
- * lock it would not have been found on the hash.
- */
- vp = NULLTOV(a);
- vref(vp);
- return (vp);
- }
+ CK_SLIST_FOREACH(a, hd, null_hash) {
+ if (a->null_lowervp != lowervp)
+ continue;
+ /*
+ * Since we have the lower node locked the nullfs
+ * node can not be in the process of recycling. If
+ * it had been recycled before we grabed the lower
+ * lock it would not have been found on the hash.
+ */
+ vp = NULLTOV(a);
+ VNPASS(!VN_IS_DOOMED(vp), vp);
+ if (vp->v_mount != mp)
+ continue;
+ vref(vp);
+ return (vp);
}
return (NULL);
}
@@ -126,17 +139,34 @@ struct vnode *
null_hashget(struct mount *mp, struct vnode *lowervp)
{
struct null_node_hashhead *hd;
+ struct null_node *a;
struct vnode *vp;
+ enum vgetstate vs;
- hd = NULL_NHASH(lowervp);
- if (LIST_EMPTY(hd))
- return (NULL);
-
- rw_rlock(&null_hash_lock);
- vp = null_hashget_locked(mp, lowervp);
- rw_runlock(&null_hash_lock);
+ ASSERT_VOP_LOCKED(lowervp, __func__);
+ rw_assert(&null_hash_lock, RA_UNLOCKED);
- return (vp);
+ vfs_smr_enter();
+ hd = NULL_NHASH(lowervp);
+ CK_SLIST_FOREACH(a, hd, null_hash) {
+ if (a->null_lowervp != lowervp)
+ continue;
+ /*
+ * See null_hashget_locked as to why the nullfs vnode can't be
+ * doomed here.
+ */
+ vp = NULLTOV(a);
+ VNPASS(!VN_IS_DOOMED(vp), vp);
+ if (vp->v_mount != mp)
+ continue;
+ vs = vget_prep_smr(vp);
+ vfs_smr_exit();
+ VNPASS(vs != VGET_NONE, vp);
+ vget_finish_ref(vp, vs);
+ return (vp);
+ }
+ vfs_smr_exit();
+ return (NULL);
}
static void
@@ -151,7 +181,7 @@ null_hashins(struct mount *mp, struct null_node *xp)
hd = NULL_NHASH(xp->null_lowervp);
#ifdef INVARIANTS
- LIST_FOREACH(oxp, hd, null_hash) {
+ CK_SLIST_FOREACH(oxp, hd, null_hash) {
if (oxp->null_lowervp == xp->null_lowervp &&
NULLTOV(oxp)->v_mount == mp) {
VNASSERT(0, NULLTOV(oxp),
@@ -159,7 +189,7 @@ null_hashins(struct mount *mp, struct null_node *xp)
}
}
#endif
- LIST_INSERT_HEAD(hd, xp, null_hash);
+ CK_SLIST_INSERT_HEAD(hd, xp, null_hash);
}
static void
@@ -174,7 +204,7 @@ null_destroy_proto(struct vnode *vp, void *xp)
VI_UNLOCK(vp);
vgone(vp);
vput(vp);
- free(xp, M_NULLFSNODE);
+ uma_zfree_smr(null_node_zone, xp);
}
/*
@@ -208,12 +238,12 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
* Note that duplicate can only appear in hash if the lowervp is
* locked LK_SHARED.
*/
- xp = malloc(sizeof(struct null_node), M_NULLFSNODE, M_WAITOK);
+ xp = uma_zalloc_smr(null_node_zone, M_WAITOK);
error = getnewvnode("nullfs", mp, &null_vnodeops, &vp);
if (error) {
vput(lowervp);
- free(xp, M_NULLFSNODE);
+ uma_zfree_smr(null_node_zone, xp);
return (error);
}
@@ -261,8 +291,8 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
return (error);
}
- null_hashins(mp, xp);
vn_set_state(vp, VSTATE_CONSTRUCTED);
+ null_hashins(mp, xp);
rw_wunlock(&null_hash_lock);
*vpp = vp;
@@ -275,9 +305,11 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
void
null_hashrem(struct null_node *xp)
{
+ struct null_node_hashhead *hd;
+ hd = NULL_NHASH(xp->null_lowervp);
rw_wlock(&null_hash_lock);
- LIST_REMOVE(xp, null_hash);
+ CK_SLIST_REMOVE(hd, xp, null_node, null_hash);
rw_wunlock(&null_hash_lock);
}
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index e9d598014a2f..ec8a6b10b13f 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -174,6 +174,8 @@
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/namei.h>
+#include <sys/proc.h>
+#include <sys/smr.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <sys/stat.h>
@@ -185,6 +187,8 @@
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
+VFS_SMR_DECLARE;
+
static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
&null_bug_bypass, 0, "");
@@ -768,83 +772,111 @@ null_rmdir(struct vop_rmdir_args *ap)
}
/*
- * We need to process our own vnode lock and then clear the
- * interlock flag as it applies only to our vnode, not the
- * vnodes below us on the stack.
+ * We need to process our own vnode lock and then clear the interlock flag as
+ * it applies only to our vnode, not the vnodes below us on the stack.
+ *
+ * We have to hold the vnode here to solve a potential reclaim race. If we're
+ * forcibly vgone'd while we still have refs, a thread could be sleeping inside
+ * the lowervp's vop_lock routine. When we vgone we will drop our last ref to
+ * the lowervp, which would allow it to be reclaimed. The lowervp could then
+ * be recycled, in which case it is not legal to be sleeping in its VOP. We
+ * prevent it from being recycled by holding the vnode here.
*/
+static struct vnode *
+null_lock_prep_with_smr(struct vop_lock1_args *ap)
+{
+ struct null_node *nn;
+ struct vnode *lvp;
+
+ lvp = NULL;
+
+ vfs_smr_enter();
+
+ nn = VTONULL_SMR(ap->a_vp);
+ if (__predict_true(nn != NULL)) {
+ lvp = nn->null_lowervp;
+ if (lvp != NULL && !vhold_smr(lvp))
+ lvp = NULL;
+ }
+
+ vfs_smr_exit();
+ return (lvp);
+}
+
+static struct vnode *
+null_lock_prep_with_interlock(struct vop_lock1_args *ap)
+{
+ struct null_node *nn;
+ struct vnode *lvp;
+
+ ASSERT_VI_LOCKED(ap->a_vp, __func__);
+
+ ap->a_flags &= ~LK_INTERLOCK;
+
+ lvp = NULL;
+
+ nn = VTONULL(ap->a_vp);
+ if (__predict_true(nn != NULL)) {
+ lvp = nn->null_lowervp;
+ if (lvp != NULL)
+ vholdnz(lvp);
+ }
+ VI_UNLOCK(ap->a_vp);
+ return (lvp);
+}
+
static int
null_lock(struct vop_lock1_args *ap)
{
- struct vnode *vp = ap->a_vp;
- int flags;
- struct null_node *nn;
struct vnode *lvp;
- int error;
+ int error, flags;
- if ((ap->a_flags & LK_INTERLOCK) == 0)
- VI_LOCK(vp);
- else
- ap->a_flags &= ~LK_INTERLOCK;
- flags = ap->a_flags;
- nn = VTONULL(vp);
+ if (__predict_true((ap->a_flags & LK_INTERLOCK) == 0)) {
+ lvp = null_lock_prep_with_smr(ap);
+ if (__predict_false(lvp == NULL)) {
+ VI_LOCK(ap->a_vp);
+ lvp = null_lock_prep_with_interlock(ap);
+ }
+ } else {
+ lvp = null_lock_prep_with_interlock(ap);
+ }
+
+ ASSERT_VI_UNLOCKED(ap->a_vp, __func__);
+
+ if (__predict_false(lvp == NULL))
+ return (vop_stdlock(ap));
+
+ VNPASS(lvp->v_holdcnt > 0, lvp);
+ error = VOP_LOCK(lvp, ap->a_flags);
/*
- * If we're still active we must ask the lower layer to
- * lock as ffs has special lock considerations in its
- * vop lock.
+ * We might have slept to get the lock and someone might have
+ * clean our vnode already, switching vnode lock from one in
+ * lowervp to v_lock in our own vnode structure. Handle this
+ * case by reacquiring correct lock in requested mode.
*/
- if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
- /*
- * We have to hold the vnode here to solve a potential
- * reclaim race. If we're forcibly vgone'd while we
- * still have refs, a thread could be sleeping inside
- * the lowervp's vop_lock routine. When we vgone we will
- * drop our last ref to the lowervp, which would allow it
- * to be reclaimed. The lowervp could then be recycled,
- * in which case it is not legal to be sleeping in its VOP.
- * We prevent it from being recycled by holding the vnode
- * here.
- */
- vholdnz(lvp);
- VI_UNLOCK(vp);
- error = VOP_LOCK(lvp, flags);
-
- /*
- * We might have slept to get the lock and someone might have
- * clean our vnode already, switching vnode lock from one in
- * lowervp to v_lock in our own vnode structure. Handle this
- * case by reacquiring correct lock in requested mode.
- */
- if (VTONULL(vp) == NULL && error == 0) {
- ap->a_flags &= ~LK_TYPE_MASK;
- switch (flags & LK_TYPE_MASK) {
- case LK_SHARED:
- ap->a_flags |= LK_SHARED;
- break;
- case LK_UPGRADE:
- case LK_EXCLUSIVE:
- ap->a_flags |= LK_EXCLUSIVE;
- break;
- default:
- panic("Unsupported lock request %d\n",
- ap->a_flags);
- }
- VOP_UNLOCK(lvp);
- error = vop_stdlock(ap);
+ if (VTONULL(ap->a_vp) == NULL && error == 0) {
+ VOP_UNLOCK(lvp);
+
+ flags = ap->a_flags;
+ ap->a_flags &= ~LK_TYPE_MASK;
+ switch (flags & LK_TYPE_MASK) {
+ case LK_SHARED:
+ ap->a_flags |= LK_SHARED;
+ break;
+ case LK_UPGRADE:
+ case LK_EXCLUSIVE:
+ ap->a_flags |= LK_EXCLUSIVE;
+ break;
+ default:
+ panic("Unsupported lock request %d\n",
+ flags);
}
- vdrop(lvp);
- } else {
- VI_UNLOCK(vp);
error = vop_stdlock(ap);
}
-
+ vdrop(lvp);
return (error);
}
-/*
- * We need to process our own vnode unlock and then clear the
- * interlock flag as it applies only to our vnode, not the
- * vnodes below us on the stack.
- */
static int
null_unlock(struct vop_unlock_args *ap)
{
@@ -853,11 +885,20 @@ null_unlock(struct vop_unlock_args *ap)
struct vnode *lvp;
int error;
+ /*
+ * Contrary to null_lock, we don't need to hold the vnode around
+ * unlock.
+ *
+ * We hold the lock, which means we can't be racing against vgone.
+ *
+ * At the same time VOP_UNLOCK promises to not touch anything after
+ * it finishes unlock, just like we don't.
+ *
+ * vop_stdunlock for a doomed vnode matches doomed locking in null_lock.
+ */
nn = VTONULL(vp);
if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
- vholdnz(lvp);
error = VOP_UNLOCK(lvp);
- vdrop(lvp);
} else {
error = vop_stdunlock(ap);
}
@@ -961,7 +1002,7 @@ null_reclaim(struct vop_reclaim_args *ap)
vunref(lowervp);
else
vput(lowervp);
- free(xp, M_NULLFSNODE);
+ uma_zfree_smr(null_node_zone, xp);
return (0);
}
diff --git a/sys/geom/part/g_part.c b/sys/geom/part/g_part.c
index 4c0d0c3aa902..1e4236507fa4 100644
--- a/sys/geom/part/g_part.c
+++ b/sys/geom/part/g_part.c
@@ -122,13 +122,13 @@ struct g_part_alias_list {
{ "ntfs", G_PART_ALIAS_MS_NTFS },
{ "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
{ "prep-boot", G_PART_ALIAS_PREP_BOOT },
- { "solaris-boot", G_PART_ALIAS_SOLARIS_BOOT },
- { "solaris-root", G_PART_ALIAS_SOLARIS_ROOT },
- { "solaris-swap", G_PART_ALIAS_SOLARIS_SWAP },
- { "solaris-backup", G_PART_ALIAS_SOLARIS_BACKUP },
- { "solaris-var", G_PART_ALIAS_SOLARIS_VAR },
- { "solaris-home", G_PART_ALIAS_SOLARIS_HOME },
- { "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC },
+ { "solaris-boot", G_PART_ALIAS_SOLARIS_BOOT },
+ { "solaris-root", G_PART_ALIAS_SOLARIS_ROOT },
+ { "solaris-swap", G_PART_ALIAS_SOLARIS_SWAP },
+ { "solaris-backup", G_PART_ALIAS_SOLARIS_BACKUP },
+ { "solaris-var", G_PART_ALIAS_SOLARIS_VAR },
+ { "solaris-home", G_PART_ALIAS_SOLARIS_HOME },
+ { "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC },
{ "solaris-reserved", G_PART_ALIAS_SOLARIS_RESERVED },
{ "u-boot-env", G_PART_ALIAS_U_BOOT_ENV },
{ "vmware-reserved", G_PART_ALIAS_VMRESERVED },
diff --git a/sys/i386/acpica/acpi_wakeup.c b/sys/i386/acpica/acpi_wakeup.c
index 2d60d5e037a0..96be64de017b 100644
--- a/sys/i386/acpica/acpi_wakeup.c
+++ b/sys/i386/acpica/acpi_wakeup.c
@@ -84,7 +84,7 @@ static cpuset_t suspcpus;
static struct susppcb **susppcbs;
#endif
-static void acpi_stop_beep(void *);
+static void acpi_stop_beep(void *, enum power_stype);
#ifdef SMP
static int acpi_wakeup_ap(struct acpi_softc *, int);
@@ -100,7 +100,7 @@ static void acpi_wakeup_cpus(struct acpi_softc *);
} while (0)
static void
-acpi_stop_beep(void *arg)
+acpi_stop_beep(void *arg, enum power_stype stype)
{
if (acpi_resume_beep != 0)
diff --git a/sys/isa/isa_common.c b/sys/isa/isa_common.c
index 8e4064af1455..1a6df7bf6046 100644
--- a/sys/isa/isa_common.c
+++ b/sys/isa/isa_common.c
@@ -1114,7 +1114,7 @@ isab_attach(device_t dev)
{
device_t child;
- child = device_add_child(dev, "isa", 0);
+ child = device_add_child(dev, "isa", DEVICE_UNIT_ANY);
if (child == NULL)
return (ENXIO);
bus_attach_children(dev);
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index a32b5a1b3354..ab8ed32ad189 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -127,6 +127,27 @@ proc_realparent(struct proc *child)
return (parent);
}
+static void
+reaper_clear(struct proc *p, struct proc *rp)
+{
+ struct proc *p1;
+ bool clear;
+
+ sx_assert(&proctree_lock, SX_XLOCKED);
+ LIST_REMOVE(p, p_reapsibling);
+ if (p->p_reapsubtree == 1)
+ return;
+ clear = true;
+ LIST_FOREACH(p1, &rp->p_reaplist, p_reapsibling) {
+ if (p1->p_reapsubtree == p->p_reapsubtree) {
+ clear = false;
+ break;
+ }
+ }
+ if (clear)
+ proc_id_clear(PROC_ID_REAP, p->p_reapsubtree);
+}
+
void
reaper_abandon_children(struct proc *p, bool exiting)
{
@@ -138,7 +159,7 @@ reaper_abandon_children(struct proc *p, bool exiting)
return;
p1 = p->p_reaper;
LIST_FOREACH_SAFE(p2, &p->p_reaplist, p_reapsibling, ptmp) {
- LIST_REMOVE(p2, p_reapsibling);
+ reaper_clear(p2, p);
p2->p_reaper = p1;
p2->p_reapsubtree = p->p_reapsubtree;
LIST_INSERT_HEAD(&p1->p_reaplist, p2, p_reapsibling);
@@ -152,27 +173,6 @@ reaper_abandon_children(struct proc *p, bool exiting)
p->p_treeflag &= ~P_TREE_REAPER;
}
-static void
-reaper_clear(struct proc *p)
-{
- struct proc *p1;
- bool clear;
-
- sx_assert(&proctree_lock, SX_LOCKED);
- LIST_REMOVE(p, p_reapsibling);
- if (p->p_reapsubtree == 1)
- return;
- clear = true;
- LIST_FOREACH(p1, &p->p_reaper->p_reaplist, p_reapsibling) {
- if (p1->p_reapsubtree == p->p_reapsubtree) {
- clear = false;
- break;
- }
- }
- if (clear)
- proc_id_clear(PROC_ID_REAP, p->p_reapsubtree);
-}
-
void
proc_clear_orphan(struct proc *p)
{
@@ -972,7 +972,7 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options)
sx_xunlock(PIDHASHLOCK(p->p_pid));
LIST_REMOVE(p, p_sibling);
reaper_abandon_children(p, true);
- reaper_clear(p);
+ reaper_clear(p, p->p_reaper);
PROC_LOCK(p);
proc_clear_orphan(p);
PROC_UNLOCK(p);
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 31bff6d2c1aa..76f68677e292 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1780,9 +1780,11 @@ lockmgr_chain(struct thread *td, struct thread **ownerp)
lk = td->td_wchan;
- if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
+ if (!TD_ON_SLEEPQ(td) || sleepq_type(td->td_wchan) != SLEEPQ_LK ||
+ LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
return (0);
- db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
+ db_printf("blocked on lock %p (%s) \"%s\" ", &lk->lock_object,
+ lock_class_lockmgr.lc_name, lk->lock_object.lo_name);
if (lk->lk_lock & LK_SHARE)
db_printf("SHARED (count %ju)\n",
(uintmax_t)LK_SHARERS(lk->lk_lock));
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 8b5908f5219a..d67c70984528 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -503,8 +503,8 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
/*
* __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
*
- * We call this if the lock is either contested (i.e. we need to go to
- * sleep waiting for it), or if we need to recurse on it.
+ * We get here if lock profiling is enabled, the lock is already held by
+ * someone else or we are recursing on it.
*/
#if LOCK_DEBUG > 0
void
@@ -660,13 +660,8 @@ retry_turnstile:
}
#endif
- /*
- * If the mutex isn't already contested and a failure occurs
- * setting the contested bit, the mutex was either released
- * or the state of the MTX_RECURSED bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
+ if ((v & MTX_WAITERS) == 0 &&
+ !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_WAITERS)) {
goto retry_turnstile;
}
@@ -869,7 +864,7 @@ _thread_lock(struct thread *td)
WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
return;
}
- _mtx_release_lock_quick(m);
+ atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED);
slowpath_unlocked:
spinlock_exit();
slowpath_noirq:
@@ -959,7 +954,7 @@ retry:
}
if (m == td->td_lock)
break;
- _mtx_release_lock_quick(m);
+ atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED);
}
LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
@@ -1029,8 +1024,8 @@ thread_lock_set(struct thread *td, struct mtx *new)
/*
* __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
*
- * We are only called here if the lock is recursed, contested (i.e. we
- * need to wake up a blocked thread) or lockstat probe is active.
+ * We get here if lock profiling is enabled, the lock is already held by
+ * someone else or we are recursing on it.
*/
#if LOCK_DEBUG > 0
void
@@ -1071,7 +1066,7 @@ __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
* can be removed from the hash list if it is empty.
*/
turnstile_chain_lock(&m->lock_object);
- _mtx_release_lock_quick(m);
+ atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED);
ts = turnstile_lookup(&m->lock_object);
MPASS(ts != NULL);
if (LOCK_LOG_TEST(&m->lock_object, opts))
@@ -1207,7 +1202,7 @@ _mtx_destroy(volatile uintptr_t *c)
if (!mtx_owned(m))
MPASS(mtx_unowned(m));
else {
- MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
+ MPASS((m->mtx_lock & (MTX_RECURSED|MTX_WAITERS)) == 0);
/* Perform the non-mtx related part of mtx_unlock_spin(). */
if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
@@ -1359,8 +1354,8 @@ db_show_mtx(const struct lock_object *lock)
db_printf("DESTROYED");
else {
db_printf("OWNED");
- if (m->mtx_lock & MTX_CONTESTED)
- db_printf(", CONTESTED");
+ if (m->mtx_lock & MTX_WAITERS)
+ db_printf(", WAITERS");
if (m->mtx_lock & MTX_RECURSED)
db_printf(", RECURSED");
}
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index c005e112d3b9..249faf5b1ec4 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -1539,16 +1539,19 @@ sx_chain(struct thread *td, struct thread **ownerp)
/*
* Check to see if this thread is blocked on an sx lock.
- * First, we check the lock class. If that is ok, then we
- * compare the lock name against the wait message.
+ * The thread should be on a sleep queue with type SLEEPQ_SX, the
+ * purported lock should have the lock class index of sx, and the lock
+ * name should match the wait message.
*/
sx = td->td_wchan;
- if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
+ if (!TD_ON_SLEEPQ(td) || sleepq_type(td->td_wchan) != SLEEPQ_SX ||
+ LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
sx->lock_object.lo_name != td->td_wmesg)
return (0);
/* We think we have an sx lock, so output some details. */
- db_printf("blocked on sx \"%s\" ", td->td_wmesg);
+ db_printf("blocked on lock %p (%s) \"%s\" ", &sx->lock_object,
+ lock_class_sx.lc_name, td->td_wmesg);
*ownerp = sx_xholder(sx);
if (sx->sx_lock & SX_LOCK_SHARED)
db_printf("SLOCK (count %ju)\n",
diff --git a/sys/kern/link_elf.c b/sys/kern/link_elf.c
index bbebadc4c395..ebd203858b66 100644
--- a/sys/kern/link_elf.c
+++ b/sys/kern/link_elf.c
@@ -518,9 +518,15 @@ link_elf_init(void* arg)
(void)link_elf_link_common_finish(linker_kernel_file);
linker_kernel_file->flags |= LINKER_FILE_LINKED;
TAILQ_INIT(&set_pcpu_list);
+ ef->pcpu_start = DPCPU_START;
+ ef->pcpu_stop = DPCPU_STOP;
+ ef->pcpu_base = DPCPU_START;
#ifdef VIMAGE
TAILQ_INIT(&set_vnet_list);
vnet_save_init((void *)VNET_START, VNET_STOP - VNET_START);
+ ef->vnet_start = VNET_START;
+ ef->vnet_stop = VNET_STOP;
+ ef->vnet_base = VNET_START;
#endif
}
diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c
index 151aab96f9be..a3a53a39bfd6 100644
--- a/sys/kern/link_elf_obj.c
+++ b/sys/kern/link_elf_obj.c
@@ -70,6 +70,7 @@
typedef struct {
void *addr;
+ void *origaddr; /* Used by debuggers. */
Elf_Off size;
int flags; /* Section flags. */
int sec; /* Original section number. */
@@ -492,7 +493,8 @@ link_elf_link_preload(linker_class_t cls, const char *filename,
case SHT_FINI_ARRAY:
if (shdr[i].sh_addr == 0)
break;
- ef->progtab[pb].addr = (void *)shdr[i].sh_addr;
+ ef->progtab[pb].addr = ef->progtab[pb].origaddr =
+ (void *)shdr[i].sh_addr;
if (shdr[i].sh_type == SHT_PROGBITS)
ef->progtab[pb].name = "<<PROGBITS>>";
#ifdef __amd64__
@@ -1088,6 +1090,8 @@ link_elf_load_file(linker_class_t cls, const char *filename,
ef->progtab[pb].name = "<<NOBITS>>";
if (ef->progtab[pb].name != NULL &&
!strcmp(ef->progtab[pb].name, DPCPU_SETNAME)) {
+ ef->progtab[pb].origaddr =
+ (void *)(uintptr_t)mapbase;
ef->progtab[pb].addr =
dpcpu_alloc(shdr[i].sh_size);
if (ef->progtab[pb].addr == NULL) {
@@ -1101,6 +1105,8 @@ link_elf_load_file(linker_class_t cls, const char *filename,
#ifdef VIMAGE
else if (ef->progtab[pb].name != NULL &&
!strcmp(ef->progtab[pb].name, VNET_SETNAME)) {
+ ef->progtab[pb].origaddr =
+ (void *)(uintptr_t)mapbase;
ef->progtab[pb].addr =
vnet_data_alloc(shdr[i].sh_size);
if (ef->progtab[pb].addr == NULL) {
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index 5606b36f772f..7d666da9f88b 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -729,7 +729,7 @@ kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
{
struct file *fp;
struct filedesc *fdp;
- int error, tmp, locked;
+ int error, f_flag, tmp, locked;
AUDIT_ARG_FD(fd);
AUDIT_ARG_CMD(com);
@@ -782,30 +782,36 @@ kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
goto out;
}
+ f_flag = 0;
switch (com) {
case FIONCLEX:
fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
- goto out;
+ break;
case FIOCLEX:
fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
- goto out;
- case FIONBIO:
- if ((tmp = *(int *)data))
- atomic_set_int(&fp->f_flag, FNONBLOCK);
- else
- atomic_clear_int(&fp->f_flag, FNONBLOCK);
- data = (void *)&tmp;
break;
+ case FIONBIO:
case FIOASYNC:
- if ((tmp = *(int *)data))
- atomic_set_int(&fp->f_flag, FASYNC);
- else
- atomic_clear_int(&fp->f_flag, FASYNC);
- data = (void *)&tmp;
+ f_flag = com == FIONBIO ? FNONBLOCK : FASYNC;
+ tmp = *(int *)data;
+ fsetfl_lock(fp);
+ if (((fp->f_flag & f_flag) != 0) != (tmp != 0)) {
+ error = fo_ioctl(fp, com, (void *)&tmp, td->td_ucred,
+ td);
+ if (error == 0) {
+ if (tmp != 0)
+ atomic_set_int(&fp->f_flag, f_flag);
+ else
+ atomic_clear_int(&fp->f_flag, f_flag);
+ }
+ }
+ fsetfl_unlock(fp);
+ break;
+ default:
+ error = fo_ioctl(fp, com, data, td->td_ucred, td);
break;
}
- error = fo_ioctl(fp, com, data, td->td_ucred, td);
out:
switch (locked) {
case LA_XLOCKED:
diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c
index 30527fdd4fd0..57ebe8dc85f0 100644
--- a/sys/kern/sys_pipe.c
+++ b/sys/kern/sys_pipe.c
@@ -567,7 +567,7 @@ pipespace_new(struct pipe *cpipe, int size)
static int curfail = 0;
static struct timeval lastfail;
- KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
+ PIPE_LOCK_ASSERT(cpipe, MA_NOTOWNED);
KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
("pipespace: resize of direct writes not allowed"));
retry:
@@ -1679,8 +1679,7 @@ static void
pipe_free_kmem(struct pipe *cpipe)
{
- KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
- ("pipe_free_kmem: pipe mutex locked"));
+ PIPE_LOCK_ASSERT(cpipe, MA_NOTOWNED);
if (cpipe->pipe_buffer.buffer != NULL) {
atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 13abb9171234..557e451f9a45 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -3340,12 +3340,10 @@ sys___realpathat(struct thread *td, struct __realpathat_args *uap)
uap->flags, UIO_USERSPACE));
}
-/*
- * Retrieve the full filesystem path that correspond to a vnode from the name
- * cache (if available)
- */
-int
-vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf)
+static int
+vn_fullpath_up_to_pwd_vnode(struct vnode *vp,
+ struct vnode *(*const get_pwd_vnode)(const struct pwd *),
+ char **retbuf, char **freebuf)
{
struct pwd *pwd;
char *buf;
@@ -3359,11 +3357,13 @@ vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf)
buf = malloc(buflen, M_TEMP, M_WAITOK);
vfs_smr_enter();
pwd = pwd_get_smr();
- error = vn_fullpath_any_smr(vp, pwd->pwd_rdir, buf, retbuf, &buflen, 0);
+ error = vn_fullpath_any_smr(vp, get_pwd_vnode(pwd), buf, retbuf,
+ &buflen, 0);
VFS_SMR_ASSERT_NOT_ENTERED();
if (error < 0) {
pwd = pwd_hold(curthread);
- error = vn_fullpath_any(vp, pwd->pwd_rdir, buf, retbuf, &buflen);
+ error = vn_fullpath_any(vp, get_pwd_vnode(pwd), buf, retbuf,
+ &buflen);
pwd_drop(pwd);
}
if (error == 0)
@@ -3373,6 +3373,42 @@ vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf)
return (error);
}
+static inline struct vnode *
+get_rdir(const struct pwd *pwd)
+{
+ return (pwd->pwd_rdir);
+}
+
+/*
+ * Produce a filesystem path that starts from the current chroot directory and
+ * corresponds to the passed vnode, using the name cache (if available).
+ */
+int
+vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf)
+{
+ return (vn_fullpath_up_to_pwd_vnode(vp, get_rdir, retbuf, freebuf));
+}
+
+static inline struct vnode *
+get_jdir(const struct pwd *pwd)
+{
+ return (pwd->pwd_jdir);
+}
+
+/*
+ * Produce a filesystem path that starts from the current jail's root directory
+ * and corresponds to the passed vnode, using the name cache (if available).
+ *
+ * This function allows to ignore chroots done inside a jail (or the host),
+ * allowing path checks to remain unaffected by privileged or unprivileged
+ * chroot calls.
+ */
+int
+vn_fullpath_jail(struct vnode *vp, char **retbuf, char **freebuf)
+{
+ return (vn_fullpath_up_to_pwd_vnode(vp, get_jdir, retbuf, freebuf));
+}
+
/*
* This function is similar to vn_fullpath, but it attempts to lookup the
* pathname relative to the global root mount point. This is required for the
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 3d4567b6ab1e..a53df50c06bd 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -806,9 +806,12 @@ file_v_lock(struct file *fp, short lock_bit, short lock_wait_bit)
flagsp = &fp->f_vflags;
state = atomic_load_16(flagsp);
- if ((state & lock_bit) == 0 &&
- atomic_cmpset_acq_16(flagsp, state, state | lock_bit))
- return;
+ for (;;) {
+ if ((state & lock_bit) != 0)
+ break;
+ if (atomic_fcmpset_acq_16(flagsp, &state, state | lock_bit))
+ return;
+ }
sleepq_lock(flagsp);
state = atomic_load_16(flagsp);
@@ -842,9 +845,12 @@ file_v_unlock(struct file *fp, short lock_bit, short lock_wait_bit)
flagsp = &fp->f_vflags;
state = atomic_load_16(flagsp);
- if ((state & lock_wait_bit) == 0 &&
- atomic_cmpset_rel_16(flagsp, state, state & ~lock_bit))
- return;
+ for (;;) {
+ if ((state & lock_wait_bit) != 0)
+ break;
+ if (atomic_fcmpset_rel_16(flagsp, &state, state & ~lock_bit))
+ return;
+ }
sleepq_lock(flagsp);
MPASS((*flagsp & lock_bit) != 0);
@@ -864,10 +870,6 @@ foffset_lock(struct file *fp, int flags)
FILE_V_FOFFSET_LOCK_WAITING);
}
- /*
- * According to McKusick the vn lock was protecting f_offset here.
- * It is now protected by the FOFFSET_LOCKED flag.
- */
return (atomic_load_long(&fp->f_offset));
}
diff --git a/sys/modules/cxgbe/Makefile b/sys/modules/cxgbe/Makefile
index f94d3ae07f66..c2ee71465789 100644
--- a/sys/modules/cxgbe/Makefile
+++ b/sys/modules/cxgbe/Makefile
@@ -1,6 +1,3 @@
-#
-#
-
SYSDIR?=${SRCTOP}/sys
.include "${SYSDIR}/conf/kern.opts.mk"
@@ -13,6 +10,7 @@ SUBDIR+= if_ccv
SUBDIR+= t4_firmware
SUBDIR+= t5_firmware
SUBDIR+= t6_firmware
+SUBDIR+= t7_firmware
SUBDIR+= ${_tom}
SUBDIR+= ${_iw_cxgbe}
SUBDIR+= ${_cxgbei}
diff --git a/sys/modules/cxgbe/if_cxgbe/Makefile b/sys/modules/cxgbe/if_cxgbe/Makefile
index 981c3466c452..33383c84837f 100644
--- a/sys/modules/cxgbe/if_cxgbe/Makefile
+++ b/sys/modules/cxgbe/if_cxgbe/Makefile
@@ -23,6 +23,7 @@ SRCS+= t4_hw.c
SRCS+= t4_if.c t4_if.h
SRCS+= t4_iov.c
SRCS.KERN_TLS+= t6_kern_tls.c
+SRCS.KERN_TLS+= t7_kern_tls.c
SRCS+= t4_keyctx.c
SRCS+= t4_l2t.c
SRCS+= t4_main.c
@@ -31,6 +32,7 @@ SRCS+= t4_netmap.c
SRCS+= t4_sched.c
SRCS+= t4_sge.c
SRCS+= t4_smt.c
+SRCS+= t4_tpt.c
SRCS+= t4_tracer.c
SRCS+= cudbg_common.c
SRCS+= cudbg_flash_utils.c
diff --git a/sys/modules/cxgbe/t7_firmware/Makefile b/sys/modules/cxgbe/t7_firmware/Makefile
new file mode 100644
index 000000000000..afce06487f22
--- /dev/null
+++ b/sys/modules/cxgbe/t7_firmware/Makefile
@@ -0,0 +1,23 @@
+#
+# $FreeBSD$
+#
+
+T7FW= ${SRCTOP}/sys/dev/cxgbe/firmware
+.PATH: ${T7FW}
+
+KMOD= t7fw_cfg
+FIRMWS= ${KMOD}.txt:${KMOD}:1.0.0.0
+
+# You can have additional configuration files in the ${T7FW} directory.
+# t7fw_cfg_<name>.txt
+CFG_FILES != cd ${T7FW} && echo ${KMOD}_*.txt
+.for F in ${CFG_FILES}
+.if exists(${F})
+FIRMWS+= ${F}:${F:C/.txt//}:1.0.0.0
+.endif
+.endfor
+
+#T7FW_VER= 1.27.0.71
+#FIRMWS+= t7fw-${T7FW_VER}.bin:t7fw:${T7FW_VER}
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index c595030ed4a0..db1b6f33a8ef 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -843,7 +843,7 @@
/* #undef ZFS_DEVICE_MINOR */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.4.99-72-FreeBSD_gb2196fbed"
+#define ZFS_META_ALIAS "zfs-2.4.99-95-FreeBSD_g5605a6d79"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
@@ -852,7 +852,7 @@
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
-#define ZFS_META_KVER_MAX "6.16"
+#define ZFS_META_KVER_MAX "6.17"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "4.18"
@@ -873,7 +873,7 @@
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "72-FreeBSD_gb2196fbed"
+#define ZFS_META_RELEASE "95-FreeBSD_g5605a6d79"
/* Define the project version. */
#define ZFS_META_VERSION "2.4.99"
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index 9eae1e8573c0..8a1802f5480b 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1 +1 @@
-#define ZFS_META_GITREV "zfs-2.4.99-72-gb2196fbed"
+#define ZFS_META_GITREV "zfs-2.4.99-95-g5605a6d79"
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index 1e6d98291c04..d2625da19cd2 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -142,7 +142,9 @@ struct iflib_ctx;
static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
static void iflib_timer(void *arg);
static void iflib_tqg_detach(if_ctx_t ctx);
+#ifndef ALTQ
static int iflib_simple_transmit(if_t ifp, struct mbuf *m);
+#endif
typedef struct iflib_filter_info {
driver_filter_t *ifi_filter;
@@ -200,6 +202,8 @@ struct iflib_ctx {
uint16_t ifc_sysctl_extra_msix_vectors;
bool ifc_cpus_are_physical_cores;
bool ifc_sysctl_simple_tx;
+ uint16_t ifc_sysctl_tx_reclaim_thresh;
+ uint16_t ifc_sysctl_tx_reclaim_ticks;
qidx_t ifc_sysctl_ntxds[8];
qidx_t ifc_sysctl_nrxds[8];
@@ -343,7 +347,9 @@ struct iflib_txq {
uint16_t ift_npending;
uint16_t ift_db_pending;
uint16_t ift_rs_pending;
- /* implicit pad */
+ uint32_t ift_last_reclaim;
+ uint16_t ift_reclaim_thresh;
+ uint16_t ift_reclaim_ticks;
uint8_t ift_txd_size[8];
uint64_t ift_processed;
uint64_t ift_cleaned;
@@ -727,7 +733,7 @@ static void iflib_free_intr_mem(if_ctx_t ctx);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *iflib_fixup_rx(struct mbuf *m);
#endif
-static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh);
+static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq);
static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
SLIST_HEAD_INITIALIZER(cpu_offsets);
@@ -3082,8 +3088,6 @@ txq_max_rs_deferred(iflib_txq_t txq)
#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
-/* XXX we should be setting this to something other than zero */
-#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
#define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
(ctx)->ifc_softc_ctx.isc_tx_nsegments)
@@ -3640,7 +3644,7 @@ defrag:
* cxgb
*/
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
- (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ (void)iflib_completed_tx_reclaim(txq);
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
txq->ift_no_desc_avail++;
bus_dmamap_unload(buf_tag, map);
@@ -3783,14 +3787,21 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
}
static __inline int
-iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
+iflib_completed_tx_reclaim(iflib_txq_t txq)
{
- int reclaim;
+ int reclaim, thresh;
+ uint32_t now;
if_ctx_t ctx = txq->ift_ctx;
+ thresh = txq->ift_reclaim_thresh;
KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
+ now = ticks;
+ if (now <= (txq->ift_last_reclaim + txq->ift_reclaim_ticks) &&
+ txq->ift_in_use < thresh)
+ return (0);
+ txq->ift_last_reclaim = now;
/*
* Need a rate-limiting check so that this isn't called every time
*/
@@ -3871,7 +3882,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
DBG_COUNTER_INC(txq_drain_notready);
return (0);
}
- reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ reclaimed = iflib_completed_tx_reclaim(txq);
rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending);
avail = IDXDIFF(pidx, cidx, r->size);
@@ -3950,7 +3961,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
}
/* deliberate use of bitwise or to avoid gratuitous short-circuit */
- ring = rang ? false : (iflib_min_tx_latency | err);
+ ring = rang ? false : (iflib_min_tx_latency | err | (!!txq->ift_reclaim_thresh));
iflib_txd_db_check(txq, ring);
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
@@ -4030,7 +4041,7 @@ _task_fn_tx(void *context)
#endif
if (ctx->ifc_sysctl_simple_tx) {
mtx_lock(&txq->ift_mtx);
- (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ (void)iflib_completed_tx_reclaim(txq);
mtx_unlock(&txq->ift_mtx);
goto skip_ifmp;
}
@@ -5881,6 +5892,7 @@ iflib_queues_alloc(if_ctx_t ctx)
device_printf(dev, "Unable to allocate buf_ring\n");
goto err_tx_desc;
}
+ txq->ift_reclaim_thresh = ctx->ifc_sysctl_tx_reclaim_thresh;
}
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
@@ -6772,6 +6784,74 @@ mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
return (rc);
}
+static int
+iflib_handle_tx_reclaim_thresh(SYSCTL_HANDLER_ARGS)
+{
+ if_ctx_t ctx = (void *)arg1;
+ iflib_txq_t txq;
+ int i, err;
+ int thresh;
+
+ thresh = ctx->ifc_sysctl_tx_reclaim_thresh;
+ err = sysctl_handle_int(oidp, &thresh, arg2, req);
+ if (err != 0) {
+ return err;
+ }
+
+ if (thresh == ctx->ifc_sysctl_tx_reclaim_thresh)
+ return 0;
+
+ if (thresh > ctx->ifc_softc_ctx.isc_ntxd[0] / 2) {
+ device_printf(ctx->ifc_dev, "TX Reclaim thresh must be <= %d\n",
+ ctx->ifc_softc_ctx.isc_ntxd[0] / 2);
+ return (EINVAL);
+ }
+
+ ctx->ifc_sysctl_tx_reclaim_thresh = thresh;
+ if (ctx->ifc_txqs == NULL)
+ return (err);
+
+ txq = &ctx->ifc_txqs[0];
+ for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
+ txq->ift_reclaim_thresh = thresh;
+ }
+ return (err);
+}
+
+static int
+iflib_handle_tx_reclaim_ticks(SYSCTL_HANDLER_ARGS)
+{
+ if_ctx_t ctx = (void *)arg1;
+ iflib_txq_t txq;
+ int i, err;
+ int ticks;
+
+ ticks = ctx->ifc_sysctl_tx_reclaim_ticks;
+ err = sysctl_handle_int(oidp, &ticks, arg2, req);
+ if (err != 0) {
+ return err;
+ }
+
+ if (ticks == ctx->ifc_sysctl_tx_reclaim_ticks)
+ return 0;
+
+ if (ticks > hz) {
+ device_printf(ctx->ifc_dev,
+ "TX Reclaim ticks must be <= hz (%d)\n", hz);
+ return (EINVAL);
+ }
+
+ ctx->ifc_sysctl_tx_reclaim_ticks = ticks;
+ if (ctx->ifc_txqs == NULL)
+ return (err);
+
+ txq = &ctx->ifc_txqs[0];
+ for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
+ txq->ift_reclaim_ticks = ticks;
+ }
+ return (err);
+}
+
#define NAME_BUFLEN 32
static void
iflib_add_device_sysctl_pre(if_ctx_t ctx)
@@ -6860,6 +6940,16 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
node = ctx->ifc_sysctl_node;
child = SYSCTL_CHILDREN(node);
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_thresh",
+ CTLTYPE_INT | CTLFLAG_RWTUN, ctx,
+ 0, iflib_handle_tx_reclaim_thresh, "I",
+ "Number of TX descs outstanding before reclaim is called");
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_ticks",
+ CTLTYPE_INT | CTLFLAG_RWTUN, ctx,
+ 0, iflib_handle_tx_reclaim_ticks, "I",
+ "Number of ticks before a TX reclaim is forced");
+
if (scctx->isc_ntxqsets > 100)
qfmt = "txq%03d";
else if (scctx->isc_ntxqsets > 10)
@@ -7107,7 +7197,7 @@ iflib_debugnet_poll(if_t ifp, int count)
return (EBUSY);
txq = &ctx->ifc_txqs[0];
- (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ (void)iflib_completed_tx_reclaim(txq);
NET_EPOCH_ENTER(et);
for (i = 0; i < scctx->isc_nrxqsets; i++)
@@ -7117,7 +7207,7 @@ iflib_debugnet_poll(if_t ifp, int count)
}
#endif /* DEBUGNET */
-
+#ifndef ALTQ
static inline iflib_txq_t
iflib_simple_select_queue(if_ctx_t ctx, struct mbuf *m)
{
@@ -7157,7 +7247,7 @@ iflib_simple_transmit(if_t ifp, struct mbuf *m)
else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
- (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ (void)iflib_completed_tx_reclaim(txq);
mtx_unlock(&txq->ift_mtx);
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
@@ -7166,3 +7256,4 @@ iflib_simple_transmit(if_t ifp, struct mbuf *m)
return (error);
}
+#endif
diff --git a/sys/net/iflib.h b/sys/net/iflib.h
index 3817445228d0..e65c936fc4b4 100644
--- a/sys/net/iflib.h
+++ b/sys/net/iflib.h
@@ -272,7 +272,7 @@ struct if_shared_ctx {
int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */
int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */
int __spare0__;
- int isc_tx_reclaim_thresh;
+ int __spare1__;
int isc_flags;
};
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index c6a3448584ac..8aefe514946e 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -1166,7 +1166,6 @@ struct pf_test_ctx {
int rewrite;
u_short reason;
struct pf_src_node *sns[PF_SN_MAX];
- struct pf_krule_slist rules;
struct pf_krule *nr;
struct pf_krule *tr;
struct pf_krule **rm;
@@ -2724,8 +2723,10 @@ int pf_osfp_match(struct pf_osfp_enlist *, pf_osfp_t);
#ifdef _KERNEL
void pf_print_host(struct pf_addr *, u_int16_t, sa_family_t);
-enum pf_test_status pf_step_into_anchor(struct pf_test_ctx *, struct pf_krule *);
-enum pf_test_status pf_match_rule(struct pf_test_ctx *, struct pf_kruleset *);
+enum pf_test_status pf_step_into_anchor(struct pf_test_ctx *, struct pf_krule *,
+ struct pf_krule_slist *match_rules);
+enum pf_test_status pf_match_rule(struct pf_test_ctx *, struct pf_kruleset *,
+ struct pf_krule_slist *);
void pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *,
int *, struct pf_keth_ruleset **,
struct pf_keth_rule **, struct pf_keth_rule **,
diff --git a/sys/net80211/ieee80211.c b/sys/net80211/ieee80211.c
index 2b7cf635b9f5..1299f86ebdc7 100644
--- a/sys/net80211/ieee80211.c
+++ b/sys/net80211/ieee80211.c
@@ -2689,13 +2689,18 @@ ieee80211_channel_type_char(const struct ieee80211_channel *c)
return 'f';
}
-/*
- * Determine whether the given key in the given VAP is a global key.
+/**
+ * @brief Determine whether the given key in the given VAP is a global key.
+ *
* (key index 0..3, shared between all stations on a VAP.)
*
* This is either a WEP key or a GROUP key.
*
* Note this will NOT return true if it is a IGTK key.
+ *
+ * @param vap the current VAP
+ * @param key ieee80211_key to use/check
+ * @returns true if it's a global/WEP key, false otherwise
*/
bool
ieee80211_is_key_global(const struct ieee80211vap *vap,
@@ -2705,8 +2710,23 @@ ieee80211_is_key_global(const struct ieee80211vap *vap,
key < &vap->iv_nw_keys[IEEE80211_WEP_NKID]);
}
-/*
- * Determine whether the given key in the given VAP is a unicast key.
+/**
+ * @brief Determine whether the given key in the given VAP is a unicast key.
+ *
+ * This only returns true if it's a unicast key.
+ *
+ * Note: For now net80211 only supports a single unicast key, stored in
+ * an ieee80211_node entry.
+ *
+ * Code should use this to know if it's a unicast key and then call
+ * ieee80211_crypto_get_keyid() to get the 802.11 key ID (0..3 for
+ * unicast/global keys, 4..5 for IGTK keys.) Since the unicast
+ * and global key indexes "overlap", callers will need to check
+ * both the type and id.
+ *
+ * @param vap the current VAP
+ * @param key ieee80211_key to use/check
+ * @returns true if the key is a unicast key, false if it is not
*/
bool
ieee80211_is_key_unicast(const struct ieee80211vap *vap,
diff --git a/sys/net80211/ieee80211_crypto.c b/sys/net80211/ieee80211_crypto.c
index 1e63ca46f28f..566f0b2e0c23 100644
--- a/sys/net80211/ieee80211_crypto.c
+++ b/sys/net80211/ieee80211_crypto.c
@@ -611,11 +611,15 @@ ieee80211_crypto_setkey(struct ieee80211vap *vap, struct ieee80211_key *key)
return dev_key_set(vap, key);
}
-/*
- * Return index if the key is a WEP key (0..3); -1 otherwise.
+/**
+ * @brief Return index if the key is a WEP key (0..3); -1 otherwise.
*
* This is different to "get_keyid" which defaults to returning
* 0 for unicast keys; it assumes that it won't be used for WEP.
+ *
+ * @param vap the current VAP
+ * @param k ieee80211_key to check
+ * @returns 0..3 if it's a global/WEP key, -1 otherwise.
*/
int
ieee80211_crypto_get_key_wepidx(const struct ieee80211vap *vap,
@@ -628,8 +632,18 @@ ieee80211_crypto_get_key_wepidx(const struct ieee80211vap *vap,
return (-1);
}
-/*
- * Note: only supports a single unicast key (0).
+/**
+ * @brief Return the index of a unicast, global or IGTK key.
+ *
+ * Return the index of a key. For unicast keys the index is 0..1.
+ * For global/WEP keys it's 0..3. For IGTK keys its 4..5.
+ *
+ * TODO: support >1 unicast key
+ * TODO: support IGTK keys
+ *
+ * @param vap the current VAP
+ * @param k ieee80211_key to check
+ * @returns 0..3 for a WEP/global key, 0..1 for unicast key, 4..5 for IGTK key
*/
uint8_t
ieee80211_crypto_get_keyid(struct ieee80211vap *vap, struct ieee80211_key *k)
@@ -641,6 +655,19 @@ ieee80211_crypto_get_keyid(struct ieee80211vap *vap, struct ieee80211_key *k)
return (0);
}
+/**
+ * @param Return the key to use for encrypting an mbuf frame to a node
+ *
+ * This routine chooses a suitable key used to encrypt the given frame with.
+ * It doesn't do the encryption; it only chooses the key. If a key is not
+ * available then the routine will return NULL.
+ *
+ * It's up to the caller to enforce whether a key is absolutely required or not.
+ *
+ * @param ni The ieee80211_node to send the frame to
+ * @param m the mbuf to encrypt
+ * @returns the ieee80211_key to encrypt with, or NULL if there's no suitable key
+ */
struct ieee80211_key *
ieee80211_crypto_get_txkey(struct ieee80211_node *ni, struct mbuf *m)
{
@@ -676,8 +703,28 @@ ieee80211_crypto_get_txkey(struct ieee80211_node *ni, struct mbuf *m)
return &ni->ni_ucastkey;
}
-/*
- * Add privacy headers appropriate for the specified key.
+/**
+ * @brief Privacy encapsulate and encrypt the given mbuf.
+ *
+ * This routine handles the mechanics of encryption - expanding the
+ * mbuf to add privacy headers, IV, ICV, MIC, MMIC, and then encrypts
+ * the given mbuf if required.
+ *
+ * This should be called by the driver in its TX path as part of
+ * encapsulation before passing frames to the hardware/firmware
+ * queues.
+ *
+ * Drivers/hardware which does its own entirely offload path
+ * should still call this for completeness - it indicates to the
+ * driver that the frame itself should be encrypted.
+ *
+ * The driver should have set capability bits in the attach /
+ * key allocation path to disable various encapsulation/encryption
+ * features.
+ *
+ * @param ni ieee80211_node for this frame
+ * @param mbuf mbuf to modify
+ * @returns the key used if the frame is to be encrypted, NULL otherwise
*/
struct ieee80211_key *
ieee80211_crypto_encap(struct ieee80211_node *ni, struct mbuf *m)
@@ -693,9 +740,31 @@ ieee80211_crypto_encap(struct ieee80211_node *ni, struct mbuf *m)
return NULL;
}
-/*
- * Validate and strip privacy headers (and trailer) for a
- * received frame that has the WEP/Privacy bit set.
+/**
+ * @brief Decapsulate and validate an encrypted frame.
+ *
+ * This handles an encrypted frame (one with the privacy bit set.)
+ * It also obeys the key / config / receive packet flags for how
+ * the driver says its already been processed.
+ *
+ * Unlike ieee80211_crypto_encap(), this isn't called in the driver.
+ * Instead, drivers passed the potentially decrypted frame - fully,
+ * partial, or not at all - and net80211 will call this as appropriate.
+ *
+ * This handles NICs (like ath(4)) which have a variable size between
+ * the 802.11 header and 802.11 payload due to DMA alignment / encryption
+ * engine concerns.
+ *
+ * If the frame was decrypted and validated successfully then 1 is returned
+ * and the mbuf can be treated as an 802.11 frame. If it is not decrypted
+ * successfully or it was decrypted but failed validation/checks, then
+ * 0 is returned.
+ *
+ * @param ni ieee80211_node for received frame
+ * @param m mbuf frame to receive
+ * @param hdrlen length of the 802.11 header, including trailing null bytes
+ * @param key pointer to ieee80211_key that will be set if appropriate
+ * @returns 0 if the frame wasn't decrypted/validated, 1 if decrypted/validated.
*/
int
ieee80211_crypto_decap(struct ieee80211_node *ni, struct mbuf *m, int hdrlen,
diff --git a/sys/net80211/ieee80211_var.h b/sys/net80211/ieee80211_var.h
index b9bc2357428d..7b45261f59b1 100644
--- a/sys/net80211/ieee80211_var.h
+++ b/sys/net80211/ieee80211_var.h
@@ -1013,7 +1013,7 @@ ieee80211_get_node_txpower(struct ieee80211_node *ni)
* Debugging facilities compiled in when IEEE80211_DEBUG is defined.
*
* The intent is that any problem in the net80211 layer can be
- * diagnosed by inspecting the statistics (dumped by the wlanstats
+ * diagnosed by inspecting the statistics (dumped by the wlanstat
* program) and/or the msgs generated by net80211. Messages are
* broken into functional classes and can be controlled with the
* wlandebug program. Certain of these msg groups are for facilities
diff --git a/sys/netgraph/netflow/netflow.c b/sys/netgraph/netflow/netflow.c
index 978d6fd0b54d..05c6062463be 100644
--- a/sys/netgraph/netflow/netflow.c
+++ b/sys/netgraph/netflow/netflow.c
@@ -960,7 +960,7 @@ struct ngnf_show_header *resp)
list_id = 0;
TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
- if (hsh->mtx.mtx_lock & MTX_CONTESTED) {
+ if (hsh->mtx.mtx_lock & MTX_WAITERS) {
resp->hash_id = i;
resp->list_id = list_id;
mtx_unlock(&hsh->mtx);
@@ -1111,7 +1111,7 @@ ng_netflow_expire(void *arg)
* Interrupt thread wants this entry!
* Quick! Quick! Bail out!
*/
- if (hsh->mtx.mtx_lock & MTX_CONTESTED)
+ if (hsh->mtx.mtx_lock & MTX_WAITERS)
break;
/*
@@ -1150,7 +1150,7 @@ ng_netflow_expire(void *arg)
* Interrupt thread wants this entry!
* Quick! Quick! Bail out!
*/
- if (hsh->mtx.mtx_lock & MTX_CONTESTED)
+ if (hsh->mtx.mtx_lock & MTX_WAITERS)
break;
/*
diff --git a/sys/netinet/ip_carp.c b/sys/netinet/ip_carp.c
index d3d7957cf087..4f553b9aac5e 100644
--- a/sys/netinet/ip_carp.c
+++ b/sys/netinet/ip_carp.c
@@ -1640,18 +1640,31 @@ carp_iamatch(struct ifaddr *ifa, uint8_t **enaddr)
static void
carp_send_na(struct carp_softc *sc)
{
- static struct in6_addr mcast = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
struct ifaddr *ifa;
- struct in6_addr *in6;
+ int flags;
+ /*
+ * Sending Unsolicited Neighbor Advertisements
+ *
+ * If the node is a router, we MUST set the Router flag to one.
+ * We set Override flag to one and send link-layer address option,
+ * thus neighboring nodes will install the new link-layer address.
+ */
+ flags = ND_NA_FLAG_OVERRIDE;
+ if (V_ip6_forwarding)
+ flags |= ND_NA_FLAG_ROUTER;
CARP_FOREACH_IFA(sc, ifa) {
if (ifa->ifa_addr->sa_family != AF_INET6)
continue;
-
- in6 = IFA_IN6(ifa);
- nd6_na_output(sc->sc_carpdev, &mcast, in6,
- ND_NA_FLAG_OVERRIDE, 1, NULL);
- DELAY(1000); /* XXX */
+ /*
+ * We use unspecified address as destination here to avoid
+ * scope initialization for each call.
+ * nd6_na_output() will use all nodes multicast address if
+ * destinaion address is unspecified.
+ */
+ nd6_na_output(sc->sc_carpdev, &in6addr_any, IFA_IN6(ifa),
+ flags, ND6_NA_OPT_LLA | ND6_NA_CARP_MASTER, NULL);
+ DELAY(1000); /* RetransTimer */
}
}
diff --git a/sys/netinet/sctp_lock_bsd.h b/sys/netinet/sctp_lock_bsd.h
index ec66be0cf371..a60983cb30e3 100644
--- a/sys/netinet/sctp_lock_bsd.h
+++ b/sys/netinet/sctp_lock_bsd.h
@@ -263,10 +263,10 @@
} while (0)
#define SCTP_INP_LOCK_CONTENDED(_inp) \
- ((_inp)->inp_mtx.mtx_lock & MTX_CONTESTED)
+ ((_inp)->inp_mtx.mtx_lock & MTX_WAITERS)
#define SCTP_INP_READ_CONTENDED(_inp) \
- ((_inp)->inp_rdata_mtx.mtx_lock & MTX_CONTESTED)
+ ((_inp)->inp_rdata_mtx.mtx_lock & MTX_WAITERS)
#ifdef SCTP_LOCK_LOGGING
#define SCTP_INP_RLOCK(_inp) do { \
@@ -337,7 +337,7 @@
} while (0)
#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) \
- ((_inp)->inp_create_mtx.mtx_lock & MTX_CONTESTED)
+ ((_inp)->inp_create_mtx.mtx_lock & MTX_WAITERS)
/*
* For the majority of things (once we have found the association) we will
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index 1ee6c6e31f33..f842a5678fa1 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -122,6 +122,7 @@ static void syncache_drop(struct syncache *, struct syncache_head *);
static void syncache_free(struct syncache *);
static void syncache_insert(struct syncache *, struct syncache_head *);
static int syncache_respond(struct syncache *, const struct mbuf *, int);
+static void syncache_send_challenge_ack(struct syncache *, struct mbuf *);
static struct socket *syncache_socket(struct syncache *, struct socket *,
struct mbuf *m);
static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
@@ -694,13 +695,7 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th, struct mbuf *m,
"sending challenge ACK\n",
s, __func__,
th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
- if (syncache_respond(sc, m, TH_ACK) == 0) {
- TCPSTAT_INC(tcps_sndacks);
- TCPSTAT_INC(tcps_sndtotal);
- } else {
- syncache_drop(sc, sch);
- TCPSTAT_INC(tcps_sc_dropped);
- }
+ syncache_send_challenge_ack(sc, m);
}
} else {
if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
@@ -963,6 +958,10 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
if (sc->sc_rxmits > 1)
tp->snd_cwnd = 1;
+ /* Copy over the challenge ACK state. */
+ tp->t_challenge_ack_end = sc->sc_challenge_ack_end;
+ tp->t_challenge_ack_cnt = sc->sc_challenge_ack_cnt;
+
#ifdef TCP_OFFLOAD
/*
* Allow a TOE driver to install its hooks. Note that we hold the
@@ -1202,7 +1201,6 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
*/
if (sc->sc_flags & SCF_TIMESTAMP && to->to_flags & TOF_TS &&
TSTMP_LT(to->to_tsval, sc->sc_tsreflect)) {
- SCH_UNLOCK(sch);
if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
log(LOG_DEBUG,
"%s; %s: SEG.TSval %u < TS.Recent %u, "
@@ -1210,6 +1208,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
to->to_tsval, sc->sc_tsreflect);
free(s, M_TCPLOG);
}
+ SCH_UNLOCK(sch);
return (-1); /* Do not send RST */
}
@@ -1258,6 +1257,38 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
return (-1); /* Do not send RST */
}
}
+
+ /*
+ * SEG.SEQ validation:
+ * The SEG.SEQ must be in the window starting at our
+ * initial receive sequence number + 1.
+ */
+ if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
+ SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, "
+ "sending challenge ACK\n",
+ s, __func__, th->th_seq, sc->sc_irs + 1);
+ syncache_send_challenge_ack(sc, m);
+ SCH_UNLOCK(sch);
+ free(s, M_TCPLOG);
+ return (-1); /* Do not send RST */
+ }
+
+ /*
+ * SEG.ACK validation:
+ * SEG.ACK must match our initial send sequence number + 1.
+ */
+ if (th->th_ack != sc->sc_iss + 1) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, "
+ "segment rejected\n",
+ s, __func__, th->th_ack, sc->sc_iss + 1);
+ SCH_UNLOCK(sch);
+ free(s, M_TCPLOG);
+ return (0); /* Do send RST, do not free sc. */
+ }
+
TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
sch->sch_length--;
#ifdef TCP_OFFLOAD
@@ -1270,29 +1301,6 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
SCH_UNLOCK(sch);
}
- /*
- * Segment validation:
- * ACK must match our initial sequence number + 1 (the SYN|ACK).
- */
- if (th->th_ack != sc->sc_iss + 1) {
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
- log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
- "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
- goto failed;
- }
-
- /*
- * The SEQ must fall in the window starting at the received
- * initial receive sequence number + 1 (the SYN).
- */
- if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
- SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
- log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
- "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
- goto failed;
- }
-
*lsop = syncache_socket(sc, *lsop, m);
if (__predict_false(*lsop == NULL)) {
@@ -2053,6 +2061,18 @@ syncache_respond(struct syncache *sc, const struct mbuf *m0, int flags)
return (error);
}
+static void
+syncache_send_challenge_ack(struct syncache *sc, struct mbuf *m)
+{
+ if (tcp_challenge_ack_check(&sc->sc_challenge_ack_end,
+ &sc->sc_challenge_ack_cnt)) {
+ if (syncache_respond(sc, m, TH_ACK) == 0) {
+ TCPSTAT_INC(tcps_sndacks);
+ TCPSTAT_INC(tcps_sndtotal);
+ }
+ }
+}
+
/*
* The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
* that exceed the capacity of the syncache by avoiding the storage of any
diff --git a/sys/netinet/tcp_syncache.h b/sys/netinet/tcp_syncache.h
index a336571f12c9..37f6ff3d6ca9 100644
--- a/sys/netinet/tcp_syncache.h
+++ b/sys/netinet/tcp_syncache.h
@@ -67,6 +67,8 @@ struct syncache {
u_int8_t sc_requested_s_scale:4,
sc_requested_r_scale:4;
u_int16_t sc_flags;
+ u_int32_t sc_challenge_ack_cnt; /* chall. ACKs sent in epoch */
+ sbintime_t sc_challenge_ack_end; /* End of chall. ack epoch */
#if defined(TCP_OFFLOAD)
struct toedev *sc_tod; /* entry added by this TOE */
void *sc_todctx; /* TOE driver context */
diff --git a/sys/netinet6/in6.c b/sys/netinet6/in6.c
index 8ef755e2dc0a..b98703bdfbfe 100644
--- a/sys/netinet6/in6.c
+++ b/sys/netinet6/in6.c
@@ -1295,8 +1295,8 @@ in6_addifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *i
*/
bzero(&pr0, sizeof(pr0));
pr0.ndpr_ifp = ifp;
- pr0.ndpr_plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr,
- NULL);
+ pr0.ndpr_plen = ia->ia_plen =
+ in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, NULL);
if (pr0.ndpr_plen == 128) {
/* we don't need to install a host route. */
goto aifaddr_out;
@@ -1490,16 +1490,16 @@ in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp)
* positive reference.
*/
remove_lle = 0;
- if (ia->ia6_ndpr == NULL) {
- nd6log((LOG_NOTICE,
- "in6_unlink_ifa: autoconf'ed address "
- "%s has no prefix\n", ip6_sprintf(ip6buf, IA6_IN6(ia))));
- } else {
+ if (ia->ia6_ndpr != NULL) {
ia->ia6_ndpr->ndpr_addrcnt--;
/* Do not delete lles within prefix if refcont != 0 */
if (ia->ia6_ndpr->ndpr_addrcnt == 0)
remove_lle = 1;
ia->ia6_ndpr = NULL;
+ } else if (ia->ia_plen < 128) {
+ nd6log((LOG_NOTICE,
+ "in6_unlink_ifa: autoconf'ed address "
+ "%s has no prefix\n", ip6_sprintf(ip6buf, IA6_IN6(ia))));
}
nd6_rem_ifa_lle(ia, remove_lle);
@@ -2604,8 +2604,6 @@ in6_domifattach(struct ifnet *ifp)
COUNTER_ARRAY_ALLOC(ext->icmp6_ifstat,
sizeof(struct icmp6_ifstat) / sizeof(uint64_t), M_WAITOK);
- ext->dad_failures = counter_u64_alloc(M_WAITOK);
-
ext->nd_ifinfo = nd6_ifattach(ifp);
ext->scope6_id = scope6_ifattach(ifp);
ext->lltable = in6_lltattach(ifp);
@@ -2641,7 +2639,6 @@ in6_domifdetach(struct ifnet *ifp, void *aux)
COUNTER_ARRAY_FREE(ext->icmp6_ifstat,
sizeof(struct icmp6_ifstat) / sizeof(uint64_t));
free(ext->icmp6_ifstat, M_IFADDR);
- counter_u64_free(ext->dad_failures);
free(ext, M_IFADDR);
}
diff --git a/sys/netinet6/in6_ifattach.c b/sys/netinet6/in6_ifattach.c
index 4fde346fb691..090ba610460b 100644
--- a/sys/netinet6/in6_ifattach.c
+++ b/sys/netinet6/in6_ifattach.c
@@ -44,7 +44,6 @@
#include <sys/rmlock.h>
#include <sys/syslog.h>
#include <sys/md5.h>
-#include <crypto/sha2/sha256.h>
#include <net/if.h>
#include <net/if_var.h>
@@ -72,6 +71,9 @@
#include <netinet6/mld6_var.h>
#include <netinet6/scope6_var.h>
+#include <crypto/sha2/sha256.h>
+#include <machine/atomic.h>
+
#ifdef IP6_AUTO_LINKLOCAL
VNET_DEFINE(int, ip6_auto_linklocal) = IP6_AUTO_LINKLOCAL;
#else
@@ -377,7 +379,7 @@ in6_get_stableifid(struct ifnet *ifp, struct in6_addr *in6, int prefixlen)
}
hostuuid_len = strlen(hostuuid);
- dad_failures = counter_u64_fetch(DAD_FAILURES(ifp));
+ dad_failures = atomic_load_int(&DAD_FAILURES(ifp));
/*
* RFC 7217 section 7
diff --git a/sys/netinet6/in6_proto.c b/sys/netinet6/in6_proto.c
index 6669a2ba56ce..f567b42b42ca 100644
--- a/sys/netinet6/in6_proto.c
+++ b/sys/netinet6/in6_proto.c
@@ -167,7 +167,7 @@ VNET_DEFINE(int, ip6_rr_prune) = 5; /* router renumbering prefix
* walk list every 5 sec. */
VNET_DEFINE(int, ip6_mcast_pmtu) = 0; /* enable pMTU discovery for multicast? */
VNET_DEFINE(int, ip6_v6only) = 1;
-VNET_DEFINE(int, ip6_stableaddr_maxretries) = IP6_IDGEN_RETRIES;
+VNET_DEFINE(u_int, ip6_stableaddr_maxretries) = IP6_IDGEN_RETRIES;
#ifdef IPSTEALTH
VNET_DEFINE(int, ip6stealth) = 0;
@@ -317,7 +317,7 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_USETEMPADDR, use_tempaddr,
SYSCTL_BOOL(_net_inet6_ip6, IPV6CTL_USESTABLEADDR, use_stableaddr,
CTLFLAG_VNET | CTLFLAG_RWTUN, &VNET_NAME(ip6_use_stableaddr), 0,
"Create RFC7217 semantically opaque address for autoconfigured addresses (default for new interfaces)");
-SYSCTL_INT(_net_inet6_ip6, IPV6CTL_STABLEADDR_MAXRETRIES, stableaddr_maxretries,
+SYSCTL_UINT(_net_inet6_ip6, IPV6CTL_STABLEADDR_MAXRETRIES, stableaddr_maxretries,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_stableaddr_maxretries), IP6_IDGEN_RETRIES,
"RFC7217 semantically opaque address DAD max retries");
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_STABLEADDR_NETIFSRC, stableaddr_netifsource,
diff --git a/sys/netinet6/in6_src.c b/sys/netinet6/in6_src.c
index dd6864482b3c..3e55c6e5fc05 100644
--- a/sys/netinet6/in6_src.c
+++ b/sys/netinet6/in6_src.c
@@ -132,8 +132,8 @@ static int in6_selectif(struct sockaddr_in6 *, struct ip6_pktopts *,
struct ip6_moptions *, struct ifnet **,
struct ifnet *, u_int);
static int in6_selectsrc(uint32_t, struct sockaddr_in6 *,
- struct ip6_pktopts *, struct inpcb *, struct ucred *,
- struct ifnet **, struct in6_addr *);
+ struct ip6_pktopts *, struct ip6_moptions *, struct inpcb *,
+ struct ucred *, struct ifnet **, struct in6_addr *);
static struct in6_addrpolicy *lookup_addrsel_policy(struct sockaddr_in6 *);
@@ -173,8 +173,8 @@ static struct in6_addrpolicy *match_addrsel_policy(struct sockaddr_in6 *);
static int
in6_selectsrc(uint32_t fibnum, struct sockaddr_in6 *dstsock,
- struct ip6_pktopts *opts, struct inpcb *inp, struct ucred *cred,
- struct ifnet **ifpp, struct in6_addr *srcp)
+ struct ip6_pktopts *opts, struct ip6_moptions *mopts, struct inpcb *inp,
+ struct ucred *cred, struct ifnet **ifpp, struct in6_addr *srcp)
{
struct rm_priotracker in6_ifa_tracker;
struct in6_addr dst, tmp;
@@ -186,7 +186,6 @@ in6_selectsrc(uint32_t fibnum, struct sockaddr_in6 *dstsock,
u_int32_t odstzone;
int prefer_tempaddr;
int error;
- struct ip6_moptions *mopts;
NET_EPOCH_ASSERT();
KASSERT(srcp != NULL, ("%s: srcp is NULL", __func__));
@@ -205,13 +204,6 @@ in6_selectsrc(uint32_t fibnum, struct sockaddr_in6 *dstsock,
*ifpp = NULL;
}
- if (inp != NULL) {
- INP_LOCK_ASSERT(inp);
- mopts = inp->in6p_moptions;
- } else {
- mopts = NULL;
- }
-
/*
* If the source address is explicitly specified by the caller,
* check if the requested source address is indeed a unicast address
@@ -552,10 +544,13 @@ in6_selectsrc_socket(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
uint32_t fibnum;
int error;
+ INP_LOCK_ASSERT(inp);
+
fibnum = inp->inp_inc.inc_fibnum;
retifp = NULL;
- error = in6_selectsrc(fibnum, dstsock, opts, inp, cred, &retifp, srcp);
+ error = in6_selectsrc(fibnum, dstsock, opts, inp->in6p_moptions,
+ inp, cred, &retifp, srcp);
if (error != 0)
return (error);
@@ -583,7 +578,7 @@ in6_selectsrc_socket(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
* Stores selected address to @srcp.
* Returns 0 on success.
*
- * Used by non-socket based consumers (ND code mostly)
+ * Used by non-socket based consumers
*/
int
in6_selectsrc_addr(uint32_t fibnum, const struct in6_addr *dst,
@@ -602,13 +597,42 @@ in6_selectsrc_addr(uint32_t fibnum, const struct in6_addr *dst,
dst_sa.sin6_scope_id = scopeid;
sa6_embedscope(&dst_sa, 0);
- error = in6_selectsrc(fibnum, &dst_sa, NULL, NULL, NULL, &retifp, srcp);
+ error = in6_selectsrc(fibnum, &dst_sa, NULL, NULL,
+ NULL, NULL, &retifp, srcp);
if (hlim != NULL)
*hlim = in6_selecthlim(NULL, retifp);
return (error);
}
+/*
+ * Select source address based on @fibnum, @dst and @mopts.
+ * Stores selected address to @srcp.
+ * Returns 0 on success.
+ *
+ * Used by non-socket based consumers (ND code mostly)
+ */
+int
+in6_selectsrc_nbr(uint32_t fibnum, const struct in6_addr *dst,
+ struct ip6_moptions *mopts, struct ifnet *ifp, struct in6_addr *srcp)
+{
+ struct sockaddr_in6 dst_sa;
+ struct ifnet *retifp;
+ int error;
+
+ retifp = ifp;
+ bzero(&dst_sa, sizeof(dst_sa));
+ dst_sa.sin6_family = AF_INET6;
+ dst_sa.sin6_len = sizeof(dst_sa);
+ dst_sa.sin6_addr = *dst;
+ dst_sa.sin6_scope_id = ntohs(in6_getscope(dst));
+ sa6_embedscope(&dst_sa, 0);
+
+ error = in6_selectsrc(fibnum, &dst_sa, NULL, mopts,
+ NULL, NULL, &retifp, srcp);
+ return (error);
+}
+
static struct nhop_object *
cache_route(uint32_t fibnum, const struct sockaddr_in6 *dst, struct route_in6 *ro,
uint32_t flowid)
diff --git a/sys/netinet6/in6_var.h b/sys/netinet6/in6_var.h
index e511ead24f08..1414cc71388d 100644
--- a/sys/netinet6/in6_var.h
+++ b/sys/netinet6/in6_var.h
@@ -106,7 +106,7 @@ struct in6_ifextra {
struct scope6_id *scope6_id;
struct lltable *lltable;
struct mld_ifsoftc *mld_ifinfo;
- counter_u64_t dad_failures; /* DAD failures when using RFC 7217 stable addresses */
+ u_int dad_failures; /* DAD failures when using RFC 7217 stable addresses */
};
#define LLTABLE6(ifp) (((struct in6_ifextra *)(ifp)->if_afdata[AF_INET6])->lltable)
diff --git a/sys/netinet6/ip6_var.h b/sys/netinet6/ip6_var.h
index e1a4e8678ebb..db1631736c4a 100644
--- a/sys/netinet6/ip6_var.h
+++ b/sys/netinet6/ip6_var.h
@@ -344,7 +344,7 @@ VNET_DECLARE(bool, ip6_use_stableaddr); /* Whether to use stable address generat
#define V_ip6_use_stableaddr VNET(ip6_use_stableaddr)
#define IP6_IDGEN_RETRIES 3 /* RFC 7217 section 7 default max retries */
-VNET_DECLARE(int, ip6_stableaddr_maxretries);
+VNET_DECLARE(u_int, ip6_stableaddr_maxretries);
#define V_ip6_stableaddr_maxretries VNET(ip6_stableaddr_maxretries)
#define IP6_STABLEADDR_NETIFSRC_NAME 0
@@ -440,6 +440,8 @@ int in6_selectsrc_socket(struct sockaddr_in6 *, struct ip6_pktopts *,
struct inpcb *, struct ucred *, int, struct in6_addr *, int *);
int in6_selectsrc_addr(uint32_t, const struct in6_addr *,
uint32_t, struct ifnet *, struct in6_addr *, int *);
+int in6_selectsrc_nbr(uint32_t, const struct in6_addr *,
+ struct ip6_moptions *, struct ifnet *, struct in6_addr *);
int in6_selectroute(struct sockaddr_in6 *, struct ip6_pktopts *,
struct ip6_moptions *, struct route_in6 *, struct ifnet **,
struct nhop_object **, u_int, uint32_t);
diff --git a/sys/netinet6/nd6.h b/sys/netinet6/nd6.h
index 5fe027ac5e7c..e484c709e29a 100644
--- a/sys/netinet6/nd6.h
+++ b/sys/netinet6/nd6.h
@@ -171,6 +171,10 @@ struct in6_ndifreq {
#define NDPRF_ONLINK 0x1
#define NDPRF_DETACHED 0x2
+/* ND6 NA output flags */
+#define ND6_NA_OPT_LLA 0x01
+#define ND6_NA_CARP_MASTER 0x02
+
/* protocol constants */
#define MAX_RTR_SOLICITATION_DELAY 1 /* 1sec */
#define RTR_SOLICITATION_INTERVAL 4 /* 4sec */
diff --git a/sys/netinet6/nd6_nbr.c b/sys/netinet6/nd6_nbr.c
index cc17b4e1a402..29151b29a071 100644
--- a/sys/netinet6/nd6_nbr.c
+++ b/sys/netinet6/nd6_nbr.c
@@ -77,6 +77,8 @@
#include <netinet/ip_carp.h>
#include <netinet6/send.h>
+#include <machine/atomic.h>
+
#define SDL(s) ((struct sockaddr_dl *)s)
struct dadq;
@@ -245,10 +247,9 @@ nd6_ns_input(struct mbuf *m, int off, int icmp6len)
* In implementation, we add target link-layer address by default.
* We do not add one in MUST NOT cases.
*/
- if (!IN6_IS_ADDR_MULTICAST(&daddr6))
- tlladdr = 0;
- else
- tlladdr = 1;
+ tlladdr = 0;
+ if (IN6_IS_ADDR_MULTICAST(&daddr6))
+ tlladdr |= ND6_NA_OPT_LLA;
/*
* Target address (taddr6) must be either:
@@ -257,9 +258,11 @@ nd6_ns_input(struct mbuf *m, int off, int icmp6len)
* (3) "tentative" address on which DAD is being performed.
*/
/* (1) and (3) check. */
- if (ifp->if_carp)
+ if (ifp->if_carp) {
ifa = (*carp_iamatch6_p)(ifp, &taddr6);
- else
+ if (ifa != NULL)
+ tlladdr |= ND6_NA_CARP_MASTER;
+ } else
ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6);
/* (2) check. */
@@ -323,32 +326,28 @@ nd6_ns_input(struct mbuf *m, int off, int icmp6len)
}
/*
+ * If the Target Address is either an anycast address or a unicast
+ * address for which the node is providing proxy service, or the Target
+ * Link-Layer Address option is not included, the Override flag SHOULD
+ * be set to zero. Otherwise, the Override flag SHOULD be set to one.
+ */
+ if (anycast == 0 && proxy == 0 && (tlladdr & ND6_NA_OPT_LLA) != 0)
+ rflag |= ND_NA_FLAG_OVERRIDE;
+ /*
* If the source address is unspecified address, entries must not
* be created or updated.
- * It looks that sender is performing DAD. Output NA toward
- * all-node multicast address, to tell the sender that I'm using
- * the address.
+ * It looks that sender is performing DAD. nd6_na_output() will
+ * send NA toward all-node multicast address, to tell the sender
+ * that I'm using the address.
* S bit ("solicited") must be zero.
*/
- if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) {
- struct in6_addr in6_all;
-
- in6_all = in6addr_linklocal_allnodes;
- if (in6_setscope(&in6_all, ifp, NULL) != 0)
- goto bad;
- nd6_na_output_fib(ifp, &in6_all, &taddr6,
- ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) |
- rflag, tlladdr, proxy ? (struct sockaddr *)&proxydl : NULL,
- M_GETFIB(m));
- goto freeit;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&saddr6)) {
+ nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen,
+ ND_NEIGHBOR_SOLICIT, 0);
+ rflag |= ND_NA_FLAG_SOLICITED;
}
- nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen,
- ND_NEIGHBOR_SOLICIT, 0);
-
- nd6_na_output_fib(ifp, &saddr6, &taddr6,
- ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) |
- rflag | ND_NA_FLAG_SOLICITED, tlladdr,
+ nd6_na_output_fib(ifp, &saddr6, &taddr6, rflag, tlladdr,
proxy ? (struct sockaddr *)&proxydl : NULL, M_GETFIB(m));
freeit:
if (ifa != NULL)
@@ -440,13 +439,6 @@ nd6_ns_output_fib(struct ifnet *ifp, const struct in6_addr *saddr6,
return;
M_SETFIB(m, fibnum);
- if (daddr6 == NULL || IN6_IS_ADDR_MULTICAST(daddr6)) {
- m->m_flags |= M_MCAST;
- im6o.im6o_multicast_ifp = ifp;
- im6o.im6o_multicast_hlim = 255;
- im6o.im6o_multicast_loop = 0;
- }
-
icmp6len = sizeof(*nd_ns);
m->m_pkthdr.len = m->m_len = sizeof(*ip6) + icmp6len;
m->m_data += max_linkhdr; /* or M_ALIGN() equivalent? */
@@ -471,6 +463,12 @@ nd6_ns_output_fib(struct ifnet *ifp, const struct in6_addr *saddr6,
if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0)
goto bad;
}
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ m->m_flags |= M_MCAST;
+ im6o.im6o_multicast_ifp = ifp;
+ im6o.im6o_multicast_hlim = 255;
+ im6o.im6o_multicast_loop = 0;
+ }
if (nonce == NULL) {
char ip6buf[INET6_ADDRSTRLEN];
struct ifaddr *ifa = NULL;
@@ -492,20 +490,16 @@ nd6_ns_output_fib(struct ifnet *ifp, const struct in6_addr *saddr6,
ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, saddr6);
if (ifa == NULL) {
int error;
- struct in6_addr dst6, src6;
- uint32_t scopeid;
- in6_splitscope(&ip6->ip6_dst, &dst6, &scopeid);
- error = in6_selectsrc_addr(fibnum, &dst6,
- scopeid, ifp, &src6, NULL);
+ error = in6_selectsrc_nbr(fibnum, &ip6->ip6_dst, &im6o,
+ ifp, &ip6->ip6_src);
if (error) {
nd6log((LOG_DEBUG, "%s: source can't be "
"determined: dst=%s, error=%d\n", __func__,
- ip6_sprintf(ip6buf, &dst6),
+ ip6_sprintf(ip6buf, &ip6->ip6_dst),
error));
goto bad;
}
- ip6->ip6_src = src6;
} else
ip6->ip6_src = *saddr6;
@@ -968,7 +962,9 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len)
* - proxy advertisement delay rule (RFC2461 7.2.8, last paragraph, SHOULD)
* - anycast advertisement delay rule (RFC2461 7.2.7, SHOULD)
*
- * tlladdr - 1 if include target link-layer address
+ * tlladdr:
+ * - 0x01 if include target link-layer address
+ * - 0x02 if target address is CARP MASTER
* sdl0 - sockaddr_dl (= proxy NA) or NULL
*/
static void
@@ -981,8 +977,7 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
struct ip6_hdr *ip6;
struct nd_neighbor_advert *nd_na;
struct ip6_moptions im6o;
- struct in6_addr daddr6, dst6, src6;
- uint32_t scopeid;
+ struct in6_addr daddr6;
NET_EPOCH_ASSERT();
@@ -1006,13 +1001,6 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
return;
M_SETFIB(m, fibnum);
- if (IN6_IS_ADDR_MULTICAST(&daddr6)) {
- m->m_flags |= M_MCAST;
- im6o.im6o_multicast_ifp = ifp;
- im6o.im6o_multicast_hlim = 255;
- im6o.im6o_multicast_loop = 0;
- }
-
icmp6len = sizeof(*nd_na);
m->m_pkthdr.len = m->m_len = sizeof(struct ip6_hdr) + icmp6len;
m->m_data += max_linkhdr; /* or M_ALIGN() equivalent? */
@@ -1024,26 +1012,24 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
ip6->ip6_vfc |= IPV6_VERSION;
ip6->ip6_nxt = IPPROTO_ICMPV6;
ip6->ip6_hlim = 255;
+
if (IN6_IS_ADDR_UNSPECIFIED(&daddr6)) {
/* reply to DAD */
- daddr6.s6_addr16[0] = IPV6_ADDR_INT16_MLL;
- daddr6.s6_addr16[1] = 0;
- daddr6.s6_addr32[1] = 0;
- daddr6.s6_addr32[2] = 0;
- daddr6.s6_addr32[3] = IPV6_ADDR_INT32_ONE;
+ daddr6 = in6addr_linklocal_allnodes;
if (in6_setscope(&daddr6, ifp, NULL))
goto bad;
flags &= ~ND_NA_FLAG_SOLICITED;
}
- ip6->ip6_dst = daddr6;
+ if (IN6_IS_ADDR_MULTICAST(&daddr6)) {
+ m->m_flags |= M_MCAST;
+ im6o.im6o_multicast_ifp = ifp;
+ im6o.im6o_multicast_hlim = 255;
+ im6o.im6o_multicast_loop = 0;
+ }
- /*
- * Select a source whose scope is the same as that of the dest.
- */
- in6_splitscope(&daddr6, &dst6, &scopeid);
- error = in6_selectsrc_addr(fibnum, &dst6,
- scopeid, ifp, &src6, NULL);
+ ip6->ip6_dst = daddr6;
+ error = in6_selectsrc_nbr(fibnum, &daddr6, &im6o, ifp, &ip6->ip6_src);
if (error) {
char ip6buf[INET6_ADDRSTRLEN];
nd6log((LOG_DEBUG, "nd6_na_output: source can't be "
@@ -1051,7 +1037,6 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
ip6_sprintf(ip6buf, &daddr6), error));
goto bad;
}
- ip6->ip6_src = src6;
nd_na = (struct nd_neighbor_advert *)(ip6 + 1);
nd_na->nd_na_type = ND_NEIGHBOR_ADVERT;
nd_na->nd_na_code = 0;
@@ -1059,20 +1044,24 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
in6_clearscope(&nd_na->nd_na_target); /* XXX */
/*
+ * If we respond from CARP address, we need to prepare mac address
+ * for carp_output().
+ */
+ if (ifp->if_carp && (tlladdr & ND6_NA_CARP_MASTER))
+ mac = (*carp_macmatch6_p)(ifp, m, taddr6);
+ /*
* "tlladdr" indicates NS's condition for adding tlladdr or not.
* see nd6_ns_input() for details.
* Basically, if NS packet is sent to unicast/anycast addr,
* target lladdr option SHOULD NOT be included.
*/
- if (tlladdr) {
+ if (tlladdr & ND6_NA_OPT_LLA) {
/*
* sdl0 != NULL indicates proxy NA. If we do proxy, use
* lladdr in sdl0. If we are not proxying (sending NA for
* my address) use lladdr configured for the interface.
*/
if (sdl0 == NULL) {
- if (ifp->if_carp)
- mac = (*carp_macmatch6_p)(ifp, m, taddr6);
if (mac == NULL)
mac = nd6_ifptomac(ifp);
} else if (sdl0->sa_family == AF_LINK) {
@@ -1082,7 +1071,7 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
mac = LLADDR(sdl);
}
}
- if (tlladdr && mac) {
+ if ((tlladdr & ND6_NA_OPT_LLA) && mac != NULL) {
int optlen = sizeof(struct nd_opt_hdr) + ifp->if_addrlen;
struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd_na + 1);
@@ -1473,7 +1462,7 @@ nd6_dad_timer(void *arg)
if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) == 0) {
ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
if ((ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY))
- counter_u64_zero(DAD_FAILURES(ifp));
+ atomic_store_int(&DAD_FAILURES(ifp), 0);
}
nd6log((LOG_DEBUG,
@@ -1522,10 +1511,10 @@ nd6_dad_duplicated(struct ifaddr *ifa, struct dadq *dp)
* More addresses will be generated as long as retries are not exhausted.
*/
if ((ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY)) {
- uint64_t dad_failures = counter_u64_fetch(DAD_FAILURES(ifp));
+ u_int dad_failures = atomic_load_int(&DAD_FAILURES(ifp));
if (dad_failures <= V_ip6_stableaddr_maxretries) {
- counter_u64_add(DAD_FAILURES(ifp), 1);
+ atomic_add_int(&DAD_FAILURES(ifp), 1);
/* if retries exhausted, output an informative error message */
if (dad_failures == V_ip6_stableaddr_maxretries)
log(LOG_ERR, "%s: manual intervention required, consider disabling \"stableaddr\" on the interface"
diff --git a/sys/netinet6/nd6_rtr.c b/sys/netinet6/nd6_rtr.c
index 78dc55dd292f..10f0342f2bc4 100644
--- a/sys/netinet6/nd6_rtr.c
+++ b/sys/netinet6/nd6_rtr.c
@@ -74,6 +74,8 @@
#include <netinet/icmp6.h>
#include <netinet6/scope6_var.h>
+#include <machine/atomic.h>
+
static struct nd_defrouter *defrtrlist_update(struct nd_defrouter *);
static int prelist_update(struct nd_prefixctl *, struct nd_defrouter *,
struct mbuf *, int);
@@ -1243,9 +1245,8 @@ in6_ifadd(struct nd_prefixctl *pr, int mcast)
/* No suitable LL address, get the ifid directly */
if (ifid_addr == NULL) {
- struct in6_addr taddr;
- ifa = ifa_alloc(sizeof(taddr), M_WAITOK);
- if (ifa) {
+ ifa = ifa_alloc(sizeof(struct in6_ifaddr), M_NOWAIT);
+ if (ifa != NULL) {
ib = (struct in6_ifaddr *)ifa;
ifid_addr = &ib->ia_addr.sin6_addr;
if(in6_get_ifid(ifp, NULL, ifid_addr) != 0) {
@@ -1757,7 +1758,7 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
* to fail and no further retries should happen.
*/
if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR &&
- counter_u64_fetch(DAD_FAILURES(ifp)) <= V_ip6_stableaddr_maxretries &&
+ atomic_load_int(&DAD_FAILURES(ifp)) <= V_ip6_stableaddr_maxretries &&
ifa6->ia6_flags & (IN6_IFF_DUPLICATED | IN6_IFF_TEMPORARY))
continue;
diff --git a/sys/netlink/netlink_snl.h b/sys/netlink/netlink_snl.h
index 6dd8a9cbdb35..57f7e1e29d08 100644
--- a/sys/netlink/netlink_snl.h
+++ b/sys/netlink/netlink_snl.h
@@ -1068,14 +1068,14 @@ snl_init_writer(struct snl_state *ss, struct snl_writer *nw)
{
nw->size = SNL_WRITER_BUFFER_SIZE;
nw->base = (char *)snl_allocz(ss, nw->size);
- if (nw->base == NULL) {
+ if (__predict_false(nw->base == NULL)) {
nw->error = true;
nw->size = 0;
- }
+ } else
+ nw->error = false;
nw->offset = 0;
nw->hdr = NULL;
- nw->error = false;
nw->ss = ss;
}
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index ec6960180413..d6fc24a23fe9 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -344,10 +344,12 @@ static int pf_test_eth_rule(int, struct pfi_kkif *,
struct mbuf **);
static int pf_test_rule(struct pf_krule **, struct pf_kstate **,
struct pf_pdesc *, struct pf_krule **,
- struct pf_kruleset **, u_short *, struct inpcb *);
+ struct pf_kruleset **, u_short *, struct inpcb *,
+ struct pf_krule_slist *);
static int pf_create_state(struct pf_krule *,
struct pf_test_ctx *,
- struct pf_kstate **, u_int16_t, u_int16_t);
+ struct pf_kstate **, u_int16_t, u_int16_t,
+ struct pf_krule_slist *match_rules);
static int pf_state_key_addr_setup(struct pf_pdesc *,
struct pf_state_key_cmp *, int);
static int pf_tcp_track_full(struct pf_kstate *,
@@ -393,7 +395,7 @@ static bool pf_src_connlimit(struct pf_kstate *);
static int pf_match_rcvif(struct mbuf *, struct pf_krule *);
static void pf_counters_inc(int, struct pf_pdesc *,
struct pf_kstate *, struct pf_krule *,
- struct pf_krule *);
+ struct pf_krule *, struct pf_krule_slist *);
static void pf_log_matches(struct pf_pdesc *, struct pf_krule *,
struct pf_krule *, struct pf_kruleset *,
struct pf_krule_slist *);
@@ -489,26 +491,30 @@ BOUND_IFACE(struct pf_kstate *st, struct pf_pdesc *pd)
counter_u64_add(s->anchor->states_cur, 1); \
counter_u64_add(s->anchor->states_tot, 1); \
} \
- if (s->nat_rule != NULL) { \
- counter_u64_add(s->nat_rule->states_cur, 1);\
- counter_u64_add(s->nat_rule->states_tot, 1);\
+ if (s->nat_rule != NULL && s->nat_rule != s->rule) { \
+ counter_u64_add(s->nat_rule->states_cur, 1); \
+ counter_u64_add(s->nat_rule->states_tot, 1); \
} \
SLIST_FOREACH(mrm, &s->match_rules, entry) { \
- counter_u64_add(mrm->r->states_cur, 1); \
- counter_u64_add(mrm->r->states_tot, 1); \
+ if (s->nat_rule != mrm->r) { \
+ counter_u64_add(mrm->r->states_cur, 1); \
+ counter_u64_add(mrm->r->states_tot, 1); \
+ } \
} \
} while (0)
#define STATE_DEC_COUNTERS(s) \
do { \
struct pf_krule_item *mrm; \
- if (s->nat_rule != NULL) \
- counter_u64_add(s->nat_rule->states_cur, -1);\
- if (s->anchor != NULL) \
- counter_u64_add(s->anchor->states_cur, -1); \
counter_u64_add(s->rule->states_cur, -1); \
+ if (s->anchor != NULL) \
+ counter_u64_add(s->anchor->states_cur, -1); \
+ if (s->nat_rule != NULL && s->nat_rule != s->rule) \
+ counter_u64_add(s->nat_rule->states_cur, -1); \
SLIST_FOREACH(mrm, &s->match_rules, entry) \
- counter_u64_add(mrm->r->states_cur, -1); \
+ if (s->nat_rule != mrm->r) { \
+ counter_u64_add(mrm->r->states_cur, -1);\
+ } \
} while (0)
MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
@@ -2869,20 +2875,24 @@ pf_alloc_state(int flags)
return (uma_zalloc(V_pf_state_z, flags | M_ZERO));
}
+static __inline void
+pf_free_match_rules(struct pf_krule_slist *match_rules) {
+ struct pf_krule_item *ri;
+
+ while ((ri = SLIST_FIRST(match_rules))) {
+ SLIST_REMOVE_HEAD(match_rules, entry);
+ free(ri, M_PF_RULE_ITEM);
+ }
+}
+
void
pf_free_state(struct pf_kstate *cur)
{
- struct pf_krule_item *ri;
-
KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
cur->timeout));
- while ((ri = SLIST_FIRST(&cur->match_rules))) {
- SLIST_REMOVE_HEAD(&cur->match_rules, entry);
- free(ri, M_PF_RULE_ITEM);
- }
-
+ pf_free_match_rules(&(cur->match_rules));
pf_normalize_tcp_cleanup(cur);
uma_zfree(V_pf_state_z, cur);
pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
@@ -3354,7 +3364,7 @@ pf_change_ap(struct pf_pdesc *pd, struct pf_addr *a, u_int16_t *p,
u_int16_t po;
uint8_t u = pd->virtual_proto == IPPROTO_UDP;
- MPASS(pd->pcksum);
+ MPASS(pd->pcksum != NULL);
if (pd->af == AF_INET) {
MPASS(pd->ip_sum);
}
@@ -4740,7 +4750,8 @@ pf_tag_packet(struct pf_pdesc *pd, int tag)
} while (0)
enum pf_test_status
-pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r)
+pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r,
+ struct pf_krule_slist *match_rules)
{
enum pf_test_status rv;
@@ -4758,7 +4769,7 @@ pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r)
struct pf_kanchor *child;
rv = PF_TEST_OK;
RB_FOREACH(child, pf_kanchor_node, &r->anchor->children) {
- rv = pf_match_rule(ctx, &child->ruleset);
+ rv = pf_match_rule(ctx, &child->ruleset, match_rules);
if ((rv == PF_TEST_QUICK) || (rv == PF_TEST_FAIL)) {
/*
* we either hit a rule with quick action
@@ -4769,7 +4780,7 @@ pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r)
}
}
} else {
- rv = pf_match_rule(ctx, &r->anchor->ruleset);
+ rv = pf_match_rule(ctx, &r->anchor->ruleset, match_rules);
/*
* Unless errors occured, stop iff any rule matched
* within quick anchors.
@@ -5618,9 +5629,10 @@ pf_rule_apply_nat(struct pf_test_ctx *ctx, struct pf_krule *r)
}
enum pf_test_status
-pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
+pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset,
+ struct pf_krule_slist *match_rules)
{
- struct pf_krule_item *ri;
+ struct pf_krule_item *ri, *rt;
struct pf_krule *r;
struct pf_krule *save_a;
struct pf_kruleset *save_aruleset;
@@ -5777,11 +5789,14 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
return (PF_TEST_FAIL);
}
ri->r = r;
- SLIST_INSERT_HEAD(&ctx->rules, ri, entry);
- pf_counter_u64_critical_enter();
- pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
- pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
- pf_counter_u64_critical_exit();
+
+ if (SLIST_EMPTY(match_rules)) {
+ SLIST_INSERT_HEAD(match_rules, ri, entry);
+ } else {
+ SLIST_INSERT_AFTER(rt, ri, entry);
+ }
+ rt = ri;
+
pf_rule_to_actions(r, &pd->act);
if (r->log)
PFLOG_PACKET(r->action, PFRES_MATCH, r,
@@ -5805,7 +5820,7 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
ctx->arsm = ctx->aruleset;
}
if (pd->act.log & PF_LOG_MATCHES)
- pf_log_matches(pd, r, ctx->a, ruleset, &ctx->rules);
+ pf_log_matches(pd, r, ctx->a, ruleset, match_rules);
if (r->quick) {
ctx->test_status = PF_TEST_QUICK;
break;
@@ -5822,7 +5837,7 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
* Note: we don't need to restore if we are not going
* to continue with ruleset evaluation.
*/
- if (pf_step_into_anchor(ctx, r) != PF_TEST_OK) {
+ if (pf_step_into_anchor(ctx, r, match_rules) != PF_TEST_OK) {
break;
}
ctx->a = save_a;
@@ -5838,11 +5853,11 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
static int
pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
struct pf_pdesc *pd, struct pf_krule **am,
- struct pf_kruleset **rsm, u_short *reason, struct inpcb *inp)
+ struct pf_kruleset **rsm, u_short *reason, struct inpcb *inp,
+ struct pf_krule_slist *match_rules)
{
struct pf_krule *r = NULL;
struct pf_kruleset *ruleset = NULL;
- struct pf_krule_item *ri;
struct pf_test_ctx ctx;
u_short transerror;
int action = PF_PASS;
@@ -5859,7 +5874,6 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
ctx.rsm = rsm;
ctx.th = &pd->hdr.tcp;
ctx.reason = *reason;
- SLIST_INIT(&ctx.rules);
pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
@@ -5951,44 +5965,49 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
ctx.nat_pool = &(ctx.nr->rdr);
}
- ruleset = &pf_main_ruleset;
- rv = pf_match_rule(&ctx, ruleset);
- if (rv == PF_TEST_FAIL) {
- /*
- * Reason has been set in pf_match_rule() already.
- */
- goto cleanup;
- }
-
- r = *ctx.rm; /* matching rule */
- ctx.a = *ctx.am; /* rule that defines an anchor containing 'r' */
- ruleset = *ctx.rsm; /* ruleset of the anchor defined by the rule 'a' */
- ctx.aruleset = ctx.arsm; /* ruleset of the 'a' rule itself */
+ if (ctx.nr && ctx.nr->natpass) {
+ r = ctx.nr;
+ ruleset = *ctx.rsm;
+ } else {
+ ruleset = &pf_main_ruleset;
+ rv = pf_match_rule(&ctx, ruleset, match_rules);
+ if (rv == PF_TEST_FAIL) {
+ /*
+ * Reason has been set in pf_match_rule() already.
+ */
+ goto cleanup;
+ }
- REASON_SET(&ctx.reason, PFRES_MATCH);
+ r = *ctx.rm; /* matching rule */
+ ctx.a = *ctx.am; /* rule that defines an anchor containing 'r' */
+ ruleset = *ctx.rsm; /* ruleset of the anchor defined by the rule 'a' */
+ ctx.aruleset = ctx.arsm; /* ruleset of the 'a' rule itself */
- /* apply actions for last matching pass/block rule */
- pf_rule_to_actions(r, &pd->act);
- transerror = pf_rule_apply_nat(&ctx, r);
- switch (transerror) {
- case PFRES_MATCH:
- /* Translation action found in rule and applied successfully */
- case PFRES_MAX:
- /* No translation action found in rule */
- break;
- default:
- /* Translation action found in rule but failed to apply */
- REASON_SET(&ctx.reason, transerror);
- goto cleanup;
+ /* apply actions for last matching pass/block rule */
+ pf_rule_to_actions(r, &pd->act);
+ transerror = pf_rule_apply_nat(&ctx, r);
+ switch (transerror) {
+ case PFRES_MATCH:
+ /* Translation action found in rule and applied successfully */
+ case PFRES_MAX:
+ /* No translation action found in rule */
+ break;
+ default:
+ /* Translation action found in rule but failed to apply */
+ REASON_SET(&ctx.reason, transerror);
+ goto cleanup;
+ }
}
+ REASON_SET(&ctx.reason, PFRES_MATCH);
+
if (r->log) {
if (ctx.rewrite)
m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any);
PFLOG_PACKET(r->action, ctx.reason, r, ctx.a, ruleset, pd, 1, NULL);
}
if (pd->act.log & PF_LOG_MATCHES)
- pf_log_matches(pd, r, ctx.a, ruleset, &ctx.rules);
+ pf_log_matches(pd, r, ctx.a, ruleset, match_rules);
if (pd->virtual_proto != PF_VPROTO_FRAGMENT &&
(r->action == PF_DROP) &&
((r->rule_flag & PFRULE_RETURNRST) ||
@@ -6033,7 +6052,8 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
(pd->flags & PFDESC_TCP_NORM)))) {
bool nat64;
- action = pf_create_state(r, &ctx, sm, bproto_sum, bip_sum);
+ action = pf_create_state(r, &ctx, sm, bproto_sum, bip_sum,
+ match_rules);
ctx.sk = ctx.nk = NULL;
if (action != PF_PASS) {
pf_udp_mapping_release(ctx.udp_mapping);
@@ -6079,11 +6099,6 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
action = PF_AFRT;
}
} else {
- while ((ri = SLIST_FIRST(&ctx.rules))) {
- SLIST_REMOVE_HEAD(&ctx.rules, entry);
- free(ri, M_PF_RULE_ITEM);
- }
-
uma_zfree(V_pf_state_key_z, ctx.sk);
uma_zfree(V_pf_state_key_z, ctx.nk);
ctx.sk = ctx.nk = NULL;
@@ -6111,11 +6126,6 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
return (action);
cleanup:
- while ((ri = SLIST_FIRST(&ctx.rules))) {
- SLIST_REMOVE_HEAD(&ctx.rules, entry);
- free(ri, M_PF_RULE_ITEM);
- }
-
uma_zfree(V_pf_state_key_z, ctx.sk);
uma_zfree(V_pf_state_key_z, ctx.nk);
pf_udp_mapping_release(ctx.udp_mapping);
@@ -6126,7 +6136,8 @@ cleanup:
static int
pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
- struct pf_kstate **sm, u_int16_t bproto_sum, u_int16_t bip_sum)
+ struct pf_kstate **sm, u_int16_t bproto_sum, u_int16_t bip_sum,
+ struct pf_krule_slist *match_rules)
{
struct pf_pdesc *pd = ctx->pd;
struct pf_kstate *s = NULL;
@@ -6140,7 +6151,6 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
struct tcphdr *th = &pd->hdr.tcp;
u_int16_t mss = V_tcp_mssdflt;
u_short sn_reason;
- struct pf_krule_item *ri;
/* check maximums */
if (r->max_states &&
@@ -6192,7 +6202,7 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
s->rule = r;
s->nat_rule = ctx->nr;
s->anchor = ctx->a;
- memcpy(&s->match_rules, &ctx->rules, sizeof(s->match_rules));
+ s->match_rules = *match_rules;
memcpy(&s->act, &pd->act, sizeof(struct pf_rule_actions));
if (pd->act.allow_opts)
@@ -6356,11 +6366,6 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
return (PF_PASS);
csfailed:
- while ((ri = SLIST_FIRST(&ctx->rules))) {
- SLIST_REMOVE_HEAD(&ctx->rules, entry);
- free(ri, M_PF_RULE_ITEM);
- }
-
uma_zfree(V_pf_state_key_z, ctx->sk);
uma_zfree(V_pf_state_key_z, ctx->nk);
@@ -7518,6 +7523,7 @@ static void
pf_sctp_multihome_delayed(struct pf_pdesc *pd, struct pfi_kkif *kif,
struct pf_kstate *s, int action)
{
+ struct pf_krule_slist match_rules;
struct pf_sctp_multihome_job *j, *tmp;
struct pf_sctp_source *i;
int ret;
@@ -7565,8 +7571,14 @@ again:
if (s->rule->rule_flag & PFRULE_ALLOW_RELATED) {
j->pd.related_rule = s->rule;
}
+ SLIST_INIT(&match_rules);
ret = pf_test_rule(&r, &sm,
- &j->pd, &ra, &rs, &reason, NULL);
+ &j->pd, &ra, &rs, &reason, NULL, &match_rules);
+ /*
+ * Nothing to do about match rules, the processed
+ * packet has already increased the counters.
+ */
+ pf_free_match_rules(&match_rules);
PF_RULES_RUNLOCK();
SDT_PROBE4(pf, sctp, multihome, test, kif, r, j->pd.m, ret);
if (ret != PF_DROP && sm != NULL) {
@@ -7627,6 +7639,7 @@ again:
nj->pd.m = j->pd.m;
nj->op = j->op;
+ MPASS(nj->pd.pcksum);
TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, nj, next);
}
PF_SCTP_ENDPOINTS_UNLOCK();
@@ -7746,6 +7759,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
job->pd.m = pd->m;
job->op = op;
+ MPASS(job->pd.pcksum);
TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next);
break;
}
@@ -7779,6 +7793,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
job->pd.m = pd->m;
job->op = op;
+ MPASS(job->pd.pcksum);
TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next);
break;
}
@@ -9221,28 +9236,41 @@ pf_route(struct pf_krule *r, struct ifnet *oifp,
}
}
- if (r->rt == PF_DUPTO)
+ if (r->rt == PF_DUPTO || (pd->af != pd->naf && s->direction == PF_IN))
skip_test = true;
- if (pd->dir == PF_IN && !skip_test) {
- if (pf_test(AF_INET, PF_OUT, PFIL_FWD, ifp, &m0, inp,
- &pd->act) != PF_PASS) {
- action = PF_DROP;
- SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
- goto bad;
- } else if (m0 == NULL) {
- action = PF_DROP;
- SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
- goto done;
- }
- if (m0->m_len < sizeof(struct ip)) {
- DPFPRINTF(PF_DEBUG_URGENT,
- "%s: m0->m_len < sizeof(struct ip)", __func__);
- SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
- action = PF_DROP;
- goto bad;
+ if (pd->dir == PF_IN) {
+ if (skip_test) {
+ struct pfi_kkif *out_kif = (struct pfi_kkif *)ifp->if_pf_kif;
+ MPASS(s != NULL);
+ pf_counter_u64_critical_enter();
+ pf_counter_u64_add_protected(
+ &out_kif->pfik_bytes[pd->naf == AF_INET6][1]
+ [action != PF_PASS && action != PF_AFRT], pd->tot_len);
+ pf_counter_u64_add_protected(
+ &out_kif->pfik_packets[pd->naf == AF_INET6][1]
+ [action != PF_PASS && action != PF_AFRT], 1);
+ pf_counter_u64_critical_exit();
+ } else {
+ if (pf_test(AF_INET, PF_OUT, PFIL_FWD, ifp, &m0, inp,
+ &pd->act) != PF_PASS) {
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
+ goto bad;
+ } else if (m0 == NULL) {
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
+ goto done;
+ }
+ if (m0->m_len < sizeof(struct ip)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ "%s: m0->m_len < sizeof(struct ip)", __func__);
+ SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
+ action = PF_DROP;
+ goto bad;
+ }
+ ip = mtod(m0, struct ip *);
}
- ip = mtod(m0, struct ip *);
}
if (ifp->if_flags & IFF_LOOPBACK)
@@ -9538,29 +9566,42 @@ pf_route6(struct pf_krule *r, struct ifnet *oifp,
}
}
- if (r->rt == PF_DUPTO)
+ if (r->rt == PF_DUPTO || (pd->af != pd->naf && s->direction == PF_IN))
skip_test = true;
- if (pd->dir == PF_IN && !skip_test) {
- if (pf_test(AF_INET6, PF_OUT, PFIL_FWD | PF_PFIL_NOREFRAGMENT,
- ifp, &m0, inp, &pd->act) != PF_PASS) {
- action = PF_DROP;
- SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
- goto bad;
- } else if (m0 == NULL) {
- action = PF_DROP;
- SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
- goto done;
- }
- if (m0->m_len < sizeof(struct ip6_hdr)) {
- DPFPRINTF(PF_DEBUG_URGENT,
- "%s: m0->m_len < sizeof(struct ip6_hdr)",
- __func__);
- action = PF_DROP;
- SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
- goto bad;
+ if (pd->dir == PF_IN) {
+ if (skip_test) {
+ struct pfi_kkif *out_kif = (struct pfi_kkif *)ifp->if_pf_kif;
+ MPASS(s != NULL);
+ pf_counter_u64_critical_enter();
+ pf_counter_u64_add_protected(
+ &out_kif->pfik_bytes[pd->naf == AF_INET6][1]
+ [action != PF_PASS && action != PF_AFRT], pd->tot_len);
+ pf_counter_u64_add_protected(
+ &out_kif->pfik_packets[pd->naf == AF_INET6][1]
+ [action != PF_PASS && action != PF_AFRT], 1);
+ pf_counter_u64_critical_exit();
+ } else {
+ if (pf_test(AF_INET6, PF_OUT, PFIL_FWD | PF_PFIL_NOREFRAGMENT,
+ ifp, &m0, inp, &pd->act) != PF_PASS) {
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
+ goto bad;
+ } else if (m0 == NULL) {
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
+ goto done;
+ }
+ if (m0->m_len < sizeof(struct ip6_hdr)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ "%s: m0->m_len < sizeof(struct ip6_hdr)",
+ __func__);
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
+ goto bad;
+ }
+ ip6 = mtod(m0, struct ip6_hdr *);
}
- ip6 = mtod(m0, struct ip6_hdr *);
}
if (ifp->if_flags & IFF_LOOPBACK)
@@ -10410,28 +10451,28 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
__func__);
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
h = mtod(pd->m, struct ip *);
if (pd->m->m_pkthdr.len < ntohs(h->ip_len)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
if (pf_normalize_ip(reason, pd) != PF_PASS) {
/* We do IP header normalization and packet reassembly here */
*m0 = pd->m;
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
*m0 = pd->m;
h = mtod(pd->m, struct ip *);
if (pf_walk_header(pd, h, reason) != PF_PASS) {
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
pd->src = (struct pf_addr *)&h->ip_src;
@@ -10461,7 +10502,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
", pullup failed", __func__);
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
h = mtod(pd->m, struct ip6_hdr *);
@@ -10469,7 +10510,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
sizeof(struct ip6_hdr) + ntohs(h->ip6_plen)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
/*
@@ -10478,12 +10519,12 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
*/
if (htons(h->ip6_plen) == 0) {
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
if (pf_walk_header6(pd, h, reason) != PF_PASS) {
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
h = mtod(pd->m, struct ip6_hdr *);
@@ -10505,13 +10546,13 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
PF_PASS) {
*m0 = pd->m;
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
*m0 = pd->m;
if (pd->m == NULL) {
/* packet sits in reassembly queue, no error */
*action = PF_PASS;
- return (-1);
+ return (PF_DROP);
}
/* Update pointers into the packet. */
@@ -10523,7 +10564,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
if (pf_walk_header6(pd, h, reason) != PF_PASS) {
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
if (m_tag_find(pd->m, PACKET_TAG_PF_REASSEMBLED, NULL) != NULL) {
@@ -10553,7 +10594,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->hdrlen = sizeof(*th);
pd->p_len = pd->tot_len - pd->off - (th->th_off << 2);
@@ -10569,7 +10610,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->hdrlen = sizeof(*uh);
if (uh->uh_dport == 0 ||
@@ -10577,7 +10618,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
ntohs(uh->uh_ulen) < sizeof(struct udphdr)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->sport = &uh->uh_sport;
pd->dport = &uh->uh_dport;
@@ -10589,7 +10630,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->hdrlen = sizeof(pd->hdr.sctp);
pd->p_len = pd->tot_len - pd->off;
@@ -10599,19 +10640,23 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
if (pd->hdr.sctp.src_port == 0 || pd->hdr.sctp.dest_port == 0) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
- }
- if (pf_scan_sctp(pd) != PF_PASS) {
- *action = PF_DROP;
- REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
+
/*
* Placeholder. The SCTP checksum is 32-bits, but
* pf_test_state() expects to update a 16-bit checksum.
* Provide a dummy value which we'll subsequently ignore.
+ * Do this before pf_scan_sctp() so any jobs we enqueue
+ * have a pcksum set.
*/
pd->pcksum = &pd->sctp_dummy_sum;
+
+ if (pf_scan_sctp(pd) != PF_PASS) {
+ *action = PF_DROP;
+ REASON_SET(reason, PFRES_SHORT);
+ return (PF_DROP);
+ }
break;
}
case IPPROTO_ICMP: {
@@ -10619,7 +10664,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->pcksum = &pd->hdr.icmp.icmp_cksum;
pd->hdrlen = ICMP_MINLEN;
@@ -10633,7 +10678,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
/* ICMP headers we look further into to match state */
switch (pd->hdr.icmp6.icmp6_type) {
@@ -10659,7 +10704,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->hdrlen = icmp_hlen;
pd->pcksum = &pd->hdr.icmp6.icmp6_cksum;
@@ -10682,111 +10727,173 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
MPASS(pd->pcksum != NULL);
- return (0);
+ return (PF_PASS);
+}
+
+static __inline void
+pf_rule_counters_inc(struct pf_pdesc *pd, struct pf_krule *r, int dir_out,
+ int op_pass, sa_family_t af, struct pf_addr *src_host,
+ struct pf_addr *dst_host)
+{
+ pf_counter_u64_add_protected(&(r->packets[dir_out]), 1);
+ pf_counter_u64_add_protected(&(r->bytes[dir_out]), pd->tot_len);
+ pf_update_timestamp(r);
+
+ if (r->src.addr.type == PF_ADDR_TABLE)
+ pfr_update_stats(r->src.addr.p.tbl, src_host, af,
+ pd->tot_len, dir_out, op_pass, r->src.neg);
+ if (r->dst.addr.type == PF_ADDR_TABLE)
+ pfr_update_stats(r->dst.addr.p.tbl, dst_host, af,
+ pd->tot_len, dir_out, op_pass, r->dst.neg);
}
static void
-pf_counters_inc(int action, struct pf_pdesc *pd,
- struct pf_kstate *s, struct pf_krule *r, struct pf_krule *a)
+pf_counters_inc(int action, struct pf_pdesc *pd, struct pf_kstate *s,
+ struct pf_krule *r, struct pf_krule *a, struct pf_krule_slist *match_rules)
{
- struct pf_krule *tr;
- int dir = pd->dir;
- int dirndx;
+ struct pf_krule_slist *mr = match_rules;
+ struct pf_krule_item *ri;
+ struct pf_krule *nr = NULL;
+ struct pf_addr *src_host = pd->src;
+ struct pf_addr *dst_host = pd->dst;
+ struct pf_state_key *key;
+ int dir_out = (pd->dir == PF_OUT);
+ int op_r_pass = (r->action == PF_PASS);
+ int op_pass = (action == PF_PASS || action == PF_AFRT);
+ int s_dir_in, s_dir_out, s_dir_rev;
+ sa_family_t af = pd->af;
pf_counter_u64_critical_enter();
+
+ /*
+ * Set AF for interface counters, it will be later overwritten for
+ * rule and state counters with value from proper state key.
+ */
+ if (action == PF_AFRT) {
+ MPASS(s != NULL);
+ if (s->direction == PF_OUT && dir_out)
+ af = pd->naf;
+ }
+
pf_counter_u64_add_protected(
- &pd->kif->pfik_bytes[pd->af == AF_INET6][dir == PF_OUT][action != PF_PASS],
+ &pd->kif->pfik_bytes[af == AF_INET6][dir_out][!op_pass],
pd->tot_len);
pf_counter_u64_add_protected(
- &pd->kif->pfik_packets[pd->af == AF_INET6][dir == PF_OUT][action != PF_PASS],
+ &pd->kif->pfik_packets[af == AF_INET6][dir_out][!op_pass],
1);
- if (action == PF_PASS || action == PF_AFRT || r->action == PF_DROP) {
- dirndx = (dir == PF_OUT);
- pf_counter_u64_add_protected(&r->packets[dirndx], 1);
- pf_counter_u64_add_protected(&r->bytes[dirndx], pd->tot_len);
- pf_update_timestamp(r);
+ /* If the rule has failed to apply, don't increase its counters */
+ if (!(op_pass || r->action == PF_DROP)) {
+ pf_counter_u64_critical_exit();
+ return;
+ }
+
+ if (s != NULL) {
+ PF_STATE_LOCK_ASSERT(s);
+ mr = &(s->match_rules);
- if (a != NULL) {
- pf_counter_u64_add_protected(&a->packets[dirndx], 1);
- pf_counter_u64_add_protected(&a->bytes[dirndx], pd->tot_len);
+ /*
+ * For af-to on the inbound direction we can determine
+ * the direction of passing packet only by checking direction
+ * of AF translation. The af-to in "in" direction covers both
+ * the inbound and the outbound side of state tracking,
+ * so pd->dir is always PF_IN. We set dir_out and s_dir_rev
+ * in a way to count packets as if the state was outbound,
+ * because pfctl -ss shows the state with "->", as if it was
+ * oubound.
+ */
+ if (action == PF_AFRT && s->direction == PF_IN) {
+ dir_out = (pd->naf == s->rule->naf);
+ s_dir_in = 1;
+ s_dir_out = 0;
+ s_dir_rev = (pd->naf == s->rule->af);
+ } else {
+ dir_out = (pd->dir == PF_OUT);
+ s_dir_in = (s->direction == PF_IN);
+ s_dir_out = (s->direction == PF_OUT);
+ s_dir_rev = (pd->dir != s->direction);
}
- if (s != NULL) {
- struct pf_krule_item *ri;
- if (s->nat_rule != NULL) {
- pf_counter_u64_add_protected(&s->nat_rule->packets[dirndx],
+ /* pd->tot_len is a problematic with af-to rules. Sure, we can
+ * agree that it's the post-af-to packet length that was
+ * forwarded through a state, but what about tables which match
+ * on pre-af-to addresses? We don't have access the the original
+ * packet length anymore.
+ */
+ s->packets[s_dir_rev]++;
+ s->bytes[s_dir_rev] += pd->tot_len;
+
+ /*
+ * Source nodes are accessed unlocked here. But since we are
+ * operating with stateful tracking and the state is locked,
+ * those SNs could not have been freed.
+ */
+ for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++) {
+ if (s->sns[sn_type] != NULL) {
+ counter_u64_add(
+ s->sns[sn_type]->packets[dir_out],
1);
- pf_counter_u64_add_protected(&s->nat_rule->bytes[dirndx],
+ counter_u64_add(
+ s->sns[sn_type]->bytes[dir_out],
pd->tot_len);
}
- /*
- * Source nodes are accessed unlocked here.
- * But since we are operating with stateful tracking
- * and the state is locked, those SNs could not have
- * been freed.
- */
- for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++) {
- if (s->sns[sn_type] != NULL) {
- counter_u64_add(
- s->sns[sn_type]->packets[dirndx],
- 1);
- counter_u64_add(
- s->sns[sn_type]->bytes[dirndx],
- pd->tot_len);
- }
- }
- dirndx = (dir == s->direction) ? 0 : 1;
- s->packets[dirndx]++;
- s->bytes[dirndx] += pd->tot_len;
-
- SLIST_FOREACH(ri, &s->match_rules, entry) {
- pf_counter_u64_add_protected(&ri->r->packets[dirndx], 1);
- pf_counter_u64_add_protected(&ri->r->bytes[dirndx], pd->tot_len);
+ }
- if (ri->r->src.addr.type == PF_ADDR_TABLE)
- pfr_update_stats(ri->r->src.addr.p.tbl,
- (s == NULL) ? pd->src :
- &s->key[(s->direction == PF_IN)]->
- addr[(s->direction == PF_OUT)],
- pd->af, pd->tot_len, dir == PF_OUT,
- r->action == PF_PASS, ri->r->src.neg);
- if (ri->r->dst.addr.type == PF_ADDR_TABLE)
- pfr_update_stats(ri->r->dst.addr.p.tbl,
- (s == NULL) ? pd->dst :
- &s->key[(s->direction == PF_IN)]->
- addr[(s->direction == PF_IN)],
- pd->af, pd->tot_len, dir == PF_OUT,
- r->action == PF_PASS, ri->r->dst.neg);
+ /* Start with pre-NAT addresses */
+ key = s->key[(s->direction == PF_OUT)];
+ src_host = &(key->addr[s_dir_out]);
+ dst_host = &(key->addr[s_dir_in]);
+ af = key->af;
+ if (s->nat_rule) {
+ /* Old-style NAT rules */
+ if (s->nat_rule->action == PF_NAT ||
+ s->nat_rule->action == PF_RDR ||
+ s->nat_rule->action == PF_BINAT) {
+ nr = s->nat_rule;
+ pf_rule_counters_inc(pd, s->nat_rule, dir_out,
+ op_r_pass, af, src_host, dst_host);
+ /* Use post-NAT addresses from now on */
+ key = s->key[s_dir_in];
+ src_host = &(key->addr[s_dir_out]);
+ dst_host = &(key->addr[s_dir_in]);
+ af = key->af;
}
}
+ }
- tr = r;
- if (s != NULL && s->nat_rule != NULL &&
- r == &V_pf_default_rule)
- tr = s->nat_rule;
-
- if (tr->src.addr.type == PF_ADDR_TABLE)
- pfr_update_stats(tr->src.addr.p.tbl,
- (s == NULL) ? pd->src :
- &s->key[(s->direction == PF_IN)]->
- addr[(s->direction == PF_OUT)],
- pd->af, pd->tot_len, dir == PF_OUT,
- r->action == PF_PASS, tr->src.neg);
- if (tr->dst.addr.type == PF_ADDR_TABLE)
- pfr_update_stats(tr->dst.addr.p.tbl,
- (s == NULL) ? pd->dst :
- &s->key[(s->direction == PF_IN)]->
- addr[(s->direction == PF_IN)],
- pd->af, pd->tot_len, dir == PF_OUT,
- r->action == PF_PASS, tr->dst.neg);
+ SLIST_FOREACH(ri, mr, entry) {
+ pf_rule_counters_inc(pd, ri->r, dir_out, op_r_pass, af,
+ src_host, dst_host);
+ if (s && s->nat_rule == ri->r) {
+ /* Use post-NAT addresses after a match NAT rule */
+ key = s->key[s_dir_in];
+ src_host = &(key->addr[s_dir_out]);
+ dst_host = &(key->addr[s_dir_in]);
+ af = key->af;
+ }
}
+
+ if (s == NULL) {
+ pf_free_match_rules(mr);
+ }
+
+ if (a != NULL) {
+ pf_rule_counters_inc(pd, a, dir_out, op_r_pass, af,
+ src_host, dst_host);
+ }
+
+ if (r != nr) {
+ pf_rule_counters_inc(pd, r, dir_out, op_r_pass, af,
+ src_host, dst_host);
+ }
+
pf_counter_u64_critical_exit();
}
+
static void
pf_log_matches(struct pf_pdesc *pd, struct pf_krule *rm,
struct pf_krule *am, struct pf_kruleset *ruleset,
- struct pf_krule_slist *matchrules)
+ struct pf_krule_slist *match_rules)
{
struct pf_krule_item *ri;
@@ -10794,7 +10901,7 @@ pf_log_matches(struct pf_pdesc *pd, struct pf_krule *rm,
if (rm->log & PF_LOG_MATCHES)
return;
- SLIST_FOREACH(ri, matchrules, entry)
+ SLIST_FOREACH(ri, match_rules, entry)
if (ri->r->log & PF_LOG_MATCHES)
PFLOG_PACKET(rm->action, PFRES_MATCH, rm, am,
ruleset, pd, 1, ri->r);
@@ -10811,6 +10918,8 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
struct pf_krule *a = NULL, *r = &V_pf_default_rule;
struct pf_kstate *s = NULL;
struct pf_kruleset *ruleset = NULL;
+ struct pf_krule_item *ri;
+ struct pf_krule_slist match_rules;
struct pf_pdesc pd;
int use_2nd_queue = 0;
uint16_t tag;
@@ -10847,6 +10956,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
}
pf_init_pdesc(&pd, *m0);
+ SLIST_INIT(&match_rules);
if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_MTAG_FLAG_ROUTE_TO)) {
pd.pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO;
@@ -10879,7 +10989,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
PF_RULES_RLOCK();
if (pf_setup_pdesc(af, dir, &pd, m0, &action, &reason,
- kif, default_actions) == -1) {
+ kif, default_actions) != PF_PASS) {
if (action != PF_PASS)
pd.act.log |= PF_LOG_FORCE;
goto done;
@@ -10943,7 +11053,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
action = PF_DROP;
else
action = pf_test_rule(&r, &s, &pd, &a,
- &ruleset, &reason, inp);
+ &ruleset, &reason, inp, &match_rules);
if (action != PF_PASS)
REASON_SET(&reason, PFRES_FRAG);
break;
@@ -11001,7 +11111,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
break;
} else {
action = pf_test_rule(&r, &s, &pd,
- &a, &ruleset, &reason, inp);
+ &a, &ruleset, &reason, inp, &match_rules);
}
}
break;
@@ -11022,7 +11132,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
a = s->anchor;
} else if (s == NULL) {
action = pf_test_rule(&r, &s,
- &pd, &a, &ruleset, &reason, inp);
+ &pd, &a, &ruleset, &reason, inp, &match_rules);
}
break;
@@ -11050,7 +11160,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
a = s->anchor;
} else if (s == NULL)
action = pf_test_rule(&r, &s, &pd,
- &a, &ruleset, &reason, inp);
+ &a, &ruleset, &reason, inp, &match_rules);
break;
}
@@ -11059,8 +11169,11 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
done:
PF_RULES_RUNLOCK();
- if (pd.m == NULL)
+ /* if packet sits in reassembly queue, return without error */
+ if (pd.m == NULL) {
+ pf_free_match_rules(&match_rules);
goto eat_pkt;
+ }
if (s)
memcpy(&pd.act, &s->act, sizeof(s->act));
@@ -11157,6 +11270,8 @@ done:
(dir == PF_IN) ? PF_DIVERT_MTAG_DIR_IN :
PF_DIVERT_MTAG_DIR_OUT;
+ pf_counters_inc(action, &pd, s, r, a, &match_rules);
+
if (s)
PF_STATE_UNLOCK(s);
@@ -11198,7 +11313,6 @@ done:
if (pd.act.log) {
struct pf_krule *lr;
- struct pf_krule_item *ri;
if (s != NULL && s->nat_rule != NULL &&
s->nat_rule->log & PF_LOG_ALL)
@@ -11217,7 +11331,7 @@ done:
}
}
- pf_counters_inc(action, &pd, s, r, a);
+ pf_counters_inc(action, &pd, s, r, a, &match_rules);
switch (action) {
case PF_SYNPROXY_DROP:
diff --git a/sys/netpfil/pf/pf_lb.c b/sys/netpfil/pf/pf_lb.c
index b8b5157c9b15..fb1b121d0bc0 100644
--- a/sys/netpfil/pf/pf_lb.c
+++ b/sys/netpfil/pf/pf_lb.c
@@ -73,7 +73,7 @@ VNET_DEFINE_STATIC(int, pf_rdr_srcport_rewrite_tries) = 16;
static uint64_t pf_hash(struct pf_addr *, struct pf_addr *,
struct pf_poolhashkey *, sa_family_t);
-struct pf_krule *pf_match_translation(int, struct pf_test_ctx *);
+static struct pf_krule *pf_match_translation(int, struct pf_test_ctx *);
static enum pf_test_status pf_step_into_translation_anchor(int, struct pf_test_ctx *,
struct pf_krule *);
static int pf_get_sport(struct pf_pdesc *, struct pf_krule *,
@@ -273,7 +273,7 @@ pf_step_into_translation_anchor(int rs_num, struct pf_test_ctx *ctx, struct pf_k
return (rv);
}
-struct pf_krule *
+static struct pf_krule *
pf_match_translation(int rs_num, struct pf_test_ctx *ctx)
{
enum pf_test_status rv;
diff --git a/sys/security/mac_do/mac_do.c b/sys/security/mac_do/mac_do.c
index 6f3e63d06198..2bcff7bba973 100644
--- a/sys/security/mac_do/mac_do.c
+++ b/sys/security/mac_do/mac_do.c
@@ -1992,6 +1992,10 @@ check_proc(void)
/*
* Only grant privileges if requested by the right executable.
*
+ * As MAC/do configuration is per-jail, in order to avoid confused
+ * deputy situations in chroots (privileged or unprivileged), make sure
+ * to check the path from the current jail's root.
+ *
* XXXOC: We may want to base this check on a tunable path and/or
* a specific MAC label. Going even further, e.g., envisioning to
* completely replace the path check with the latter, we would need to
@@ -2003,7 +2007,7 @@ check_proc(void)
* setting a MAC label per file (perhaps via additions to mtree(1)). So
* this probably isn't going to happen overnight, if ever.
*/
- if (vn_fullpath(curproc->p_textvp, &path, &to_free) != 0)
+ if (vn_fullpath_jail(curproc->p_textvp, &path, &to_free) != 0)
return (EPERM);
error = strcmp(path, "/usr/bin/mdo") == 0 ? 0 : EPERM;
free(to_free, M_TEMP);
diff --git a/sys/sys/eventhandler.h b/sys/sys/eventhandler.h
index c0d9811dd1b9..29a16b393b52 100644
--- a/sys/sys/eventhandler.h
+++ b/sys/sys/eventhandler.h
@@ -33,6 +33,7 @@
#include <sys/lock.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
+#include <sys/power.h>
#include <sys/queue.h>
#ifdef VIMAGE
@@ -201,7 +202,7 @@ EVENTHANDLER_DECLARE(shutdown_post_sync, shutdown_fn); /* after fs sync */
EVENTHANDLER_DECLARE(shutdown_final, shutdown_fn);
/* Power state change events */
-typedef void (*power_change_fn)(void *);
+typedef void (*power_change_fn)(void *, enum power_stype stype);
EVENTHANDLER_DECLARE(power_resume, power_change_fn);
EVENTHANDLER_DECLARE(power_suspend, power_change_fn);
EVENTHANDLER_DECLARE(power_suspend_early, power_change_fn);
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index 08d4e2d28b33..4f6b45d78a88 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -68,9 +68,9 @@
*/
#define MTX_UNOWNED 0x00000000 /* Cookie for free mutex */
#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
-#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
+#define MTX_WAITERS 0x00000002 /* lock has waiters (for MTX_DEF only) */
#define MTX_DESTROYED 0x00000004 /* lock destroyed */
-#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED)
+#define MTX_FLAGMASK (MTX_RECURSED | MTX_WAITERS | MTX_DESTROYED)
/*
* Prototypes
@@ -217,14 +217,10 @@ void _thread_lock(struct thread *);
#define _mtx_obtain_lock_fetch(mp, vp, tid) \
atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid))
-/* Try to release mtx_lock if it is unrecursed and uncontested. */
+/* Try to release mtx_lock if it is unrecursed and without waiters. */
#define _mtx_release_lock(mp, tid) \
atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
-/* Release mtx_lock quickly, assuming we own it. */
-#define _mtx_release_lock_quick(mp) \
- atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
-
#define _mtx_release_lock_fetch(mp, vp) \
atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, (vp), MTX_UNOWNED)
@@ -246,10 +242,10 @@ void _thread_lock(struct thread *);
})
/*
- * Lock a spin mutex. For spinlocks, we handle recursion inline (it
- * turns out that function calls can be significantly expensive on
- * some architectures). Since spin locks are not _too_ common,
- * inlining this code is not too big a deal.
+ * Lock a spin mutex.
+ *
+ * FIXME: spinlock_enter is a function call, defeating the point of inlining in
+ * this.
*/
#ifdef SMP
#define __mtx_lock_spin(mp, tid, opts, file, line) __extension__ ({ \
@@ -317,10 +313,10 @@ void _thread_lock(struct thread *);
})
/*
- * Unlock a spin mutex. For spinlocks, we can handle everything
- * inline, as it's pretty simple and a function call would be too
- * expensive (at least on some architectures). Since spin locks are
- * not _too_ common, inlining this code is not too big a deal.
+ * Unlock a spin mutex.
+ *
+ * FIXME: spinlock_exit is a function call, defeating the point of inlining in
+ * this.
*
* Since we always perform a spinlock_enter() when attempting to acquire a
* spin lock, we need to always perform a matching spinlock_exit() when
@@ -332,7 +328,7 @@ void _thread_lock(struct thread *);
(mp)->mtx_recurse--; \
else { \
LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(spin__release, mp); \
- _mtx_release_lock_quick((mp)); \
+ atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED); \
} \
spinlock_exit(); \
})
diff --git a/sys/sys/param.h b/sys/sys/param.h
index ce91430909ce..8a71693cff3d 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -74,7 +74,7 @@
* cannot include sys/param.h and should only be updated here.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1600000
+#define __FreeBSD_version 1600001
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 6d50583dc1b3..0bf438a1b821 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -714,6 +714,7 @@ int speedup_syncer(void);
int vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen);
int vn_getcwd(char *buf, char **retbuf, size_t *buflen);
int vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf);
+int vn_fullpath_jail(struct vnode *vp, char **retbuf, char **freebuf);
int vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf);
int vn_fullpath_hardlink(struct vnode *vp, struct vnode *dvp,
const char *hdrl_name, size_t hrdl_name_length, char **retbuf,
diff --git a/sys/tools/gdb/README.txt b/sys/tools/gdb/README.txt
new file mode 100644
index 000000000000..8c31565ddc42
--- /dev/null
+++ b/sys/tools/gdb/README.txt
@@ -0,0 +1,21 @@
+This directory contains Python scripts that can be loaded by GDB to help debug
+FreeBSD kernel crashes.
+
+Add new commands and functions in their own files. Functions with general
+utility should be added to freebsd.py. sys/tools/kernel-gdb.py is installed
+into the kernel debug directory (typically /usr/lib/debug/boot/kernel). It will
+be automatically loaded by kgdb when opening a vmcore, so if you add new GDB
+commands or functions, that script should be updated to import them, and you
+should document them here.
+
+To provide some rudimentary testing, selftest.py tries to exercise all of the
+commands and functions defined here. To use it, run selftest.sh to panic the
+system. Then, create a kernel dump or attach to the panicked kernel, and invoke
+the script with "python import selftest" in (k)gdb.
+
+Commands:
+acttrace Display a backtrace for all on-CPU threads
+
+Functions:
+$PCPU(<field>[, <cpuid>]) Display the value of a PCPU/DPCPU field
+$V(<variable>[, <vnet>]) Display the value of a VNET variable
diff --git a/sys/tools/gdb/acttrace.py b/sys/tools/gdb/acttrace.py
new file mode 100644
index 000000000000..147effbbddf1
--- /dev/null
+++ b/sys/tools/gdb/acttrace.py
@@ -0,0 +1,48 @@
+#
+# Copyright (c) 2022 The FreeBSD Foundation
+#
+# This software was developed by Mark Johnston under sponsorship from the
+# FreeBSD Foundation.
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+from freebsd import *
+from pcpu import *
+
+class acttrace(gdb.Command):
+ """
+ Register an acttrace command with gdb.
+
+ When run, acttrace prints the stack trace of all threads that were on-CPU
+ at the time of the panic.
+ """
+ def __init__(self):
+ super(acttrace, self).__init__("acttrace", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ # Save the current thread so that we can switch back after.
+ curthread = gdb.selected_thread()
+
+ for pcpu in pcpu_foreach():
+ td = pcpu['pc_curthread']
+ tid = td['td_tid']
+
+ gdb_thread = tid_to_gdb_thread(tid)
+ if gdb_thread is None:
+ raise gdb.error(f"failed to find GDB thread with TID {tid}")
+ else:
+ gdb_thread.switch()
+
+ p = td['td_proc']
+ print("Tracing command {} pid {} tid {} (CPU {})".format(
+ p['p_comm'], p['p_pid'], td['td_tid'], pcpu['pc_cpuid']))
+ gdb.execute("bt")
+ print()
+
+ curthread.switch()
+
+
+# Registers the command with gdb, doesn't do anything.
+acttrace()
diff --git a/sys/tools/gdb/freebsd.py b/sys/tools/gdb/freebsd.py
new file mode 100644
index 000000000000..81ea60373348
--- /dev/null
+++ b/sys/tools/gdb/freebsd.py
@@ -0,0 +1,75 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+
+def symval(name):
+ sym = gdb.lookup_global_symbol(name)
+ if sym is None:
+ sym = gdb.lookup_static_symbol(name)
+ if sym is None:
+ raise gdb.GdbError(f"Symbol '{name}' not found")
+ return sym.value()
+
+
+def _queue_foreach(head, field, headf, nextf):
+ elm = head[headf]
+ while elm != 0:
+ yield elm
+ elm = elm[field][nextf]
+
+
+def list_foreach(head, field):
+ """sys/queue.h-style iterator."""
+ return _queue_foreach(head, field, "lh_first", "le_next")
+
+
+def tailq_foreach(head, field):
+ """sys/queue.h-style iterator."""
+ return _queue_foreach(head, field, "tqh_first", "tqe_next")
+
+
+def linker_file_foreach():
+ """Iterate over loaded linker files."""
+ return tailq_foreach(symval("linker_files"), "link")
+
+
+def pcpu_foreach():
+ mp_maxid = symval("mp_maxid")
+ cpuid_to_pcpu = symval("cpuid_to_pcpu")
+
+ cpu = 0
+ while cpu <= mp_maxid:
+ pcpu = cpuid_to_pcpu[cpu]
+ if pcpu:
+ yield pcpu
+ cpu = cpu + 1
+
+
+def tid_to_gdb_thread(tid):
+ """Convert a FreeBSD kernel thread ID to a gdb inferior thread."""
+ for thread in gdb.inferiors()[0].threads():
+ if thread.ptid[2] == tid:
+ return thread
+ else:
+ return None
+
+
+def tdfind(tid, pid=-1):
+ """Convert a FreeBSD kernel thread ID to a struct thread pointer."""
+ td = tdfind.cached_threads.get(int(tid))
+ if td:
+ return td
+
+ for p in list_foreach(symval("allproc"), "p_list"):
+ if pid != -1 and pid != p['p_pid']:
+ continue
+ for td in tailq_foreach(p['p_threads'], "td_plist"):
+ ntid = td['td_tid']
+ tdfind.cached_threads[int(ntid)] = td
+ if ntid == tid:
+ return td
+tdfind.cached_threads = dict()
diff --git a/sys/tools/gdb/pcpu.py b/sys/tools/gdb/pcpu.py
new file mode 100644
index 000000000000..aadc4b2d42df
--- /dev/null
+++ b/sys/tools/gdb/pcpu.py
@@ -0,0 +1,77 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+from freebsd import *
+
+class pcpu(gdb.Function):
+ """
+ Register a function to lookup PCPU and DPCPU variables by name.
+
+ To look up the value of the PCPU field foo on CPU n, use
+ $PCPU("foo", n). This works for DPCPU fields too. If the CPU ID is
+ omitted, and the currently selected thread is on-CPU, that CPU is
+ used, otherwise an error is raised.
+ """
+ def __init__(self):
+ super(pcpu, self).__init__("PCPU")
+
+ def invoke(self, field, cpuid=-1):
+ if cpuid == -1:
+ cpuid = tdfind(gdb.selected_thread().ptid[2])['td_oncpu']
+ if cpuid == -1:
+ raise gdb.error("Currently selected thread is off-CPU")
+ if cpuid < 0 or cpuid > symval("mp_maxid"):
+ raise gdb.error(f"Currently selected on invalid CPU {cpuid}")
+ pcpu = symval("cpuid_to_pcpu")[cpuid]
+
+ # Are we dealing with a PCPU or DPCPU field?
+ field = field.string()
+ for f in gdb.lookup_type("struct pcpu").fields():
+ if f.name == "pc_" + field:
+ return pcpu["pc_" + field]
+
+ def uintptr_t(val):
+ return val.cast(gdb.lookup_type("uintptr_t"))
+
+ # We're dealing with a DPCPU field. This is handled similarly
+ # to VNET symbols, see vnet.py for comments.
+ pcpu_base = pcpu['pc_dynamic']
+ pcpu_entry = symval("pcpu_entry_" + field)
+ pcpu_entry_addr = uintptr_t(pcpu_entry.address)
+
+ for lf in linker_file_foreach():
+ block = gdb.block_for_pc(lf['ops']['cls']['methods'][0]['func'])
+ elf_file_t = gdb.lookup_type("elf_file_t", block).target()
+ ef = lf.cast(elf_file_t)
+
+ file_type = lf['ops']['cls']['name'].string()
+ if file_type == "elf64":
+ start = uintptr_t(ef['pcpu_start'])
+ if start == 0:
+ continue
+ end = uintptr_t(ef['pcpu_stop'])
+ base = uintptr_t(ef['pcpu_base'])
+ elif file_type == "elf64_obj":
+ for i in range(ef['nprogtab']):
+ pe = ef['progtab'][i]
+ if pe['name'].string() == "set_pcpu":
+ start = uintptr_t(pe['origaddr'])
+ end = start + uintptr_t(pe['size'])
+ base = uintptr_t(pe['addr'])
+ break
+ else:
+ continue
+ else:
+ path = lf['pathname'].string()
+ raise gdb.error(f"{path} has unexpected linker file type {file_type}")
+
+ if pcpu_entry_addr >= start and pcpu_entry_addr < end:
+ obj = gdb.Value(pcpu_base + pcpu_entry_addr - start + base)
+ return obj.cast(pcpu_entry.type.pointer()).dereference()
+
+# Register with gdb.
+pcpu()
diff --git a/sys/tools/gdb/selftest.py b/sys/tools/gdb/selftest.py
new file mode 100644
index 000000000000..41e9211c4bb3
--- /dev/null
+++ b/sys/tools/gdb/selftest.py
@@ -0,0 +1,31 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+
+cmds = ["acttrace",
+ "p $V(\"tcbinfo\")",
+ "p $V(\"tcbinfo\", vnet0)",
+ "p $V(\"pf_status\")",
+ "p $V(\"pf_status\", \"gdbselftest\")",
+ "p $PCPU(\"curthread\")",
+ "p $PCPU(\"curthread\", 0)",
+ "p/x $PCPU(\"hardclocktime\", 1)",
+ "p $PCPU(\"pqbatch\")[0][0]",
+ "p $PCPU(\"ss\", 1)",
+ ]
+
+for cmd in cmds:
+ try:
+ print(f"Running command: '{cmd}'")
+ gdb.execute(cmd)
+ except gdb.error as e:
+ print(f"Command '{cmd}' failed: {e}")
+ break
+
+# We didn't hit any unexpected errors. This isn't as good as actually
+# verifying the output, but it's better than nothing.
+print("Everything seems OK")
diff --git a/sys/tools/gdb/selftest.sh b/sys/tools/gdb/selftest.sh
new file mode 100644
index 000000000000..252fae14af17
--- /dev/null
+++ b/sys/tools/gdb/selftest.sh
@@ -0,0 +1,23 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+set -e
+
+n=$(sysctl -n hw.ncpu)
+if [ $n -lt 2 ]; then
+ echo "This test requires at least 2 CPUs"
+ exit 1
+fi
+
+# Set up some things expected by selftest.py.
+kldload -n pf siftr
+pfctl -e || true
+jail -c name=gdbselftest vnet persist
+
+echo "I'm about to panic your system, ctrl-C now if that's not what you want."
+sleep 10
+sysctl debug.debugger_on_panic=0
+sysctl debug.kdb.panic=1
diff --git a/sys/tools/gdb/vnet.py b/sys/tools/gdb/vnet.py
new file mode 100644
index 000000000000..36b4d512a3eb
--- /dev/null
+++ b/sys/tools/gdb/vnet.py
@@ -0,0 +1,100 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+import traceback
+from freebsd import *
+
+class vnet(gdb.Function):
+ """
+ Register a function to look up VNET variables by name.
+
+ To look at the value of a VNET variable V_foo, print $V("foo"). The
+ currently selected thread's VNET is used by default, but can be optionally
+ specified as a second parameter, e.g., $V("foo", <vnet>), where <vnet> is a
+ pointer to a struct vnet (e.g., vnet0 or allprison.tqh_first->pr_vnet) or a
+ string naming a jail.
+ """
+ def __init__(self):
+ super(vnet, self).__init__("V")
+
+ def invoke(self, sym, vnet=None):
+ sym = sym.string()
+ if sym.startswith("V_"):
+ sym = sym[len("V_"):]
+ if gdb.lookup_symbol("sysctl___kern_features_vimage")[0] is None:
+ return symval(sym)
+
+ # Look up the VNET's base address.
+ if vnet is None:
+ vnet = tdfind(gdb.selected_thread().ptid[2])['td_vnet']
+ if not vnet:
+ # If curthread->td_vnet == NULL, vnet0 is the current vnet.
+ vnet = symval("vnet0")
+ elif vnet.type.is_string_like:
+ vnet = vnet.string()
+ for prison in tailq_foreach(symval("allprison"), "pr_list"):
+ if prison['pr_name'].string() == vnet:
+ vnet = prison['pr_vnet']
+ break
+ else:
+ raise gdb.error(f"No prison named {vnet}")
+
+ def uintptr_t(val):
+ return val.cast(gdb.lookup_type("uintptr_t"))
+
+ # Now the tricky part: compute the address of the symbol relative
+ # to the selected VNET. In the compiled kernel this is done at
+ # load time by applying a magic transformation to relocations
+ # against symbols in the vnet linker set. Here we have to apply
+ # the transformation manually.
+ vnet_data_base = vnet['vnet_data_base']
+ vnet_entry = symval("vnet_entry_" + sym)
+ vnet_entry_addr = uintptr_t(vnet_entry.address)
+
+ # First, which kernel module does the symbol belong to?
+ for lf in linker_file_foreach():
+ # Find the bounds of this linker file's VNET linker set. The
+ # struct containing the bounds depends on the type of the linker
+ # file, and unfortunately both are called elf_file_t. So we use a
+ # PC value from the compilation unit (either link_elf.c or
+ # link_elf_obj.c) to disambiguate.
+ block = gdb.block_for_pc(lf['ops']['cls']['methods'][0]['func'])
+ elf_file_t = gdb.lookup_type("elf_file_t", block).target()
+ ef = lf.cast(elf_file_t)
+
+ file_type = lf['ops']['cls']['name'].string()
+ if file_type == "elf64":
+ start = uintptr_t(ef['vnet_start'])
+ if start == 0:
+ # This linker file doesn't have a VNET linker set.
+ continue
+ end = uintptr_t(ef['vnet_stop'])
+ base = uintptr_t(ef['vnet_base'])
+ elif file_type == "elf64_obj":
+ for i in range(ef['nprogtab']):
+ pe = ef['progtab'][i]
+ if pe['name'].string() == "set_vnet":
+ start = uintptr_t(pe['origaddr'])
+ end = start + uintptr_t(pe['size'])
+ base = uintptr_t(pe['addr'])
+ break
+ else:
+ # This linker file doesn't have a VNET linker set.
+ continue
+ else:
+ path = lf['pathname'].string()
+ raise gdb.error(f"{path} has unexpected linker file type {file_type}")
+
+ if vnet_entry_addr >= start and vnet_entry_addr < end:
+ # The symbol belongs to this linker file, so compute the final
+ # address.
+ obj = gdb.Value(vnet_data_base + vnet_entry_addr - start + base)
+ return obj.cast(vnet_entry.type.pointer()).dereference()
+
+
+# Register with gdb.
+vnet()
diff --git a/sys/tools/kernel-gdb.py b/sys/tools/kernel-gdb.py
new file mode 100644
index 000000000000..8a41ef6efab1
--- /dev/null
+++ b/sys/tools/kernel-gdb.py
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), "gdb"))
+
+# Import FreeBSD kernel debugging commands and modules below.
+import acttrace
+import pcpu
+import vnet
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index 0921eee92b9d..736c5a66267e 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -2592,8 +2592,12 @@ ufs_print(
printf("\tnlink=%d, effnlink=%d, size=%jd", ip->i_nlink,
ip->i_effnlink, (intmax_t)ip->i_size);
- if (I_IS_UFS2(ip))
- printf(", extsize %d", ip->i_din2->di_extsize);
+ if (I_IS_UFS2(ip)) {
+ if (ip->i_din2 == NULL)
+ printf(", dinode=NULL (fields omitted)");
+ else
+ printf(", extsize=%d", ip->i_din2->di_extsize);
+ }
printf("\n\tgeneration=%jx, uid=%d, gid=%d, flags=0x%b\n",
(uintmax_t)ip->i_gen, ip->i_uid, ip->i_gid,
(uint32_t)ip->i_flags, PRINT_INODE_FLAGS);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 679b2e20e88b..b80b5cc781f7 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -4009,21 +4009,15 @@ restart:
/*
* Use the keg's policy if upper layers haven't already specified a
* domain (as happens with first-touch zones).
- *
- * To avoid races we run the iterator with the keg lock held, but that
- * means that we cannot allow the vm_domainset layer to sleep. Thus,
- * clear M_WAITOK and handle low memory conditions locally.
*/
rr = rdomain == UMA_ANYDOMAIN;
+ aflags = flags;
if (rr) {
- aflags = (flags & ~M_WAITOK) | M_NOWAIT;
if (vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
&aflags) != 0)
return (NULL);
- } else {
- aflags = flags;
+ } else
domain = rdomain;
- }
for (;;) {
slab = keg_fetch_free_slab(keg, domain, rr, flags);
@@ -4053,13 +4047,8 @@ restart:
if ((flags & M_WAITOK) == 0)
break;
vm_wait_domain(domain);
- } else if (vm_domainset_iter_policy(&di, &domain) != 0) {
- if ((flags & M_WAITOK) != 0) {
- vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
- goto restart;
- }
+ } else if (vm_domainset_iter_policy(&di, &domain) != 0)
break;
- }
}
/*
@@ -5245,7 +5234,7 @@ uma_prealloc(uma_zone_t zone, int items)
KEG_GET(zone, keg);
slabs = howmany(items, keg->uk_ipers);
while (slabs-- > 0) {
- aflags = M_NOWAIT;
+ aflags = M_WAITOK;
if (vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
&aflags) != 0)
panic("%s: Domainset is empty", __func__);
@@ -5266,7 +5255,8 @@ uma_prealloc(uma_zone_t zone, int items)
break;
}
if (vm_domainset_iter_policy(&di, &domain) != 0)
- vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
+ panic("%s: Cannot allocate from any domain",
+ __func__);
}
}
}
diff --git a/sys/x86/include/mca.h b/sys/x86/include/mca.h
index 183480625f6d..553b5d765f17 100644
--- a/sys/x86/include/mca.h
+++ b/sys/x86/include/mca.h
@@ -44,6 +44,31 @@ struct mca_record {
int mr_cpu;
};
+enum mca_stat_types {
+ MCA_T_NONE = 0,
+ MCA_T_UNCLASSIFIED,
+ MCA_T_UCODE_ROM_PARITY,
+ MCA_T_EXTERNAL,
+ MCA_T_FRC,
+ MCA_T_INTERNAL_PARITY,
+ MCA_T_SMM_HANDLER,
+ MCA_T_INTERNAL_TIMER,
+ MCA_T_GENERIC_IO,
+ MCA_T_INTERNAL,
+ MCA_T_MEMORY,
+ MCA_T_TLB,
+ MCA_T_MEMCONTROLLER_GEN,
+ MCA_T_MEMCONTROLLER_RD,
+ MCA_T_MEMCONTROLLER_WR,
+ MCA_T_MEMCONTROLLER_AC,
+ MCA_T_MEMCONTROLLER_MS,
+ MCA_T_MEMCONTROLLER_OTHER,
+ MCA_T_CACHE,
+ MCA_T_BUS,
+ MCA_T_UNKNOWN,
+ MCA_T_COUNT /* Must stay last */
+};
+
#ifdef _KERNEL
void cmc_intr(void);
diff --git a/sys/x86/x86/mca.c b/sys/x86/x86/mca.c
index 4ba49469d3a2..735efe307215 100644
--- a/sys/x86/x86/mca.c
+++ b/sys/x86/x86/mca.c
@@ -46,9 +46,11 @@
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
+#include <sys/syslog.h>
#include <sys/systm.h>
#include <sys/taskqueue.h>
#include <machine/intr_machdep.h>
@@ -124,6 +126,22 @@ SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RDTUN,
&workaround_erratum383, 0,
"Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
+#ifdef DIAGNOSTIC
+static uint64_t fake_status;
+SYSCTL_U64(_hw_mca, OID_AUTO, fake_status, CTLFLAG_RW,
+ &fake_status, 0,
+ "Insert artificial MCA with given status (testing purpose only)");
+static int fake_bank;
+SYSCTL_INT(_hw_mca, OID_AUTO, fake_bank, CTLFLAG_RW,
+ &fake_bank, 0,
+ "Bank to use for artificial MCAs (testing purpose only)");
+#endif
+
+static bool mca_uselog = false;
+SYSCTL_BOOL(_hw_mca, OID_AUTO, uselog, CTLFLAG_RWTUN, &mca_uselog, 0,
+ "Should the system send non-fatal machine check errors to the log "
+ "(instead of the console)?");
+
static STAILQ_HEAD(, mca_internal) mca_freelist;
static int mca_freecount;
static STAILQ_HEAD(, mca_internal) mca_records;
@@ -131,8 +149,44 @@ static STAILQ_HEAD(, mca_internal) mca_pending;
static int mca_ticks = 300;
static struct taskqueue *mca_tq;
static struct task mca_resize_task;
+static struct task mca_postscan_task;
static struct timeout_task mca_scan_task;
static struct mtx mca_lock;
+static bool mca_startup_done = false;
+
+/* Static buffer to compose messages while in an interrupt context. */
+static char mca_msg_buf[1024];
+static struct mtx mca_msg_buf_lock;
+
+/* Statistics on number of MCA events by type, updated with the mca_lock. */
+static uint64_t mca_stats[MCA_T_COUNT];
+SYSCTL_OPAQUE(_hw_mca, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_SKIP,
+ mca_stats, MCA_T_COUNT * sizeof(mca_stats[0]),
+ "S", "Array of MCA events by type");
+
+/* Variables to track and control message rate limiting. */
+static struct timeval mca_last_log_time;
+static struct timeval mca_log_interval;
+static int mca_log_skipped;
+
+static int
+sysctl_mca_log_interval(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ u_int val;
+
+ val = mca_log_interval.tv_sec;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ mca_log_interval.tv_sec = val;
+ return (0);
+}
+SYSCTL_PROC(_hw_mca, OID_AUTO, log_interval,
+ CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, &mca_log_interval, 0,
+ sysctl_mca_log_interval, "IU",
+ "Minimum number of seconds between logging correctable MCAs"
+ " (0 = no limit)");
static unsigned int
mca_ia32_ctl_reg(int bank)
@@ -356,21 +410,27 @@ mca_error_request(uint16_t mca_error)
}
static const char *
-mca_error_mmtype(uint16_t mca_error)
+mca_error_mmtype(uint16_t mca_error, enum mca_stat_types *event_type)
{
switch ((mca_error & 0x70) >> 4) {
case 0x0:
+ *event_type = MCA_T_MEMCONTROLLER_GEN;
return ("GEN");
case 0x1:
+ *event_type = MCA_T_MEMCONTROLLER_RD;
return ("RD");
case 0x2:
+ *event_type = MCA_T_MEMCONTROLLER_WR;
return ("WR");
case 0x3:
+ *event_type = MCA_T_MEMCONTROLLER_AC;
return ("AC");
case 0x4:
+ *event_type = MCA_T_MEMCONTROLLER_MS;
return ("MS");
}
+ *event_type = MCA_T_MEMCONTROLLER_OTHER;
return ("???");
}
@@ -423,87 +483,111 @@ mca_mute(const struct mca_record *rec)
/* Dump details about a single machine check. */
static void
-mca_log(const struct mca_record *rec)
+mca_log(enum scan_mode mode, const struct mca_record *rec, bool fatal)
{
+ int error, numskipped;
uint16_t mca_error;
+ enum mca_stat_types event_type;
+ struct sbuf sb;
+ bool uncor, using_shared_buf;
if (mca_mute(rec))
return;
- if (!log_corrected && (rec->mr_status & MC_STATUS_UC) == 0 &&
- (!tes_supported(rec->mr_mcg_cap) ||
+ uncor = (rec->mr_status & MC_STATUS_UC) != 0;
+
+ if (!log_corrected && !uncor && (!tes_supported(rec->mr_mcg_cap) ||
((rec->mr_status & MC_STATUS_TES_STATUS) >> 53) != 0x2))
return;
- printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
+ /* Try to use an allocated buffer when not in an interrupt context. */
+ if (mode == POLLED && sbuf_new(&sb, NULL, 512, SBUF_AUTOEXTEND) != NULL)
+ using_shared_buf = false;
+ else {
+ using_shared_buf = true;
+ mtx_lock_spin(&mca_msg_buf_lock);
+ sbuf_new(&sb, mca_msg_buf, sizeof(mca_msg_buf), SBUF_FIXEDLEN);
+ }
+
+ sbuf_printf(&sb, "MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
(long long)rec->mr_status);
- printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
+ sbuf_printf(&sb, "MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
(long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
- printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
- rec->mr_cpu_id, rec->mr_apic_id);
- printf("MCA: CPU %d ", rec->mr_cpu);
+ sbuf_printf(&sb, "MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n",
+ cpu_vendor, rec->mr_cpu_id, rec->mr_apic_id);
+ sbuf_printf(&sb, "MCA: CPU %d ", rec->mr_cpu);
if (rec->mr_status & MC_STATUS_UC)
- printf("UNCOR ");
+ sbuf_printf(&sb, "UNCOR ");
else {
- printf("COR ");
+ sbuf_printf(&sb, "COR ");
if (cmci_supported(rec->mr_mcg_cap))
- printf("(%lld) ", ((long long)rec->mr_status &
+ sbuf_printf(&sb, "(%lld) ", ((long long)rec->mr_status &
MC_STATUS_COR_COUNT) >> 38);
if (tes_supported(rec->mr_mcg_cap)) {
switch ((rec->mr_status & MC_STATUS_TES_STATUS) >> 53) {
case 0x1:
- printf("(Green) ");
+ sbuf_printf(&sb, "(Green) ");
break;
case 0x2:
- printf("(Yellow) ");
+ sbuf_printf(&sb, "(Yellow) ");
break;
}
}
}
if (rec->mr_status & MC_STATUS_EN)
- printf("EN ");
+ sbuf_printf(&sb, "EN ");
if (rec->mr_status & MC_STATUS_PCC)
- printf("PCC ");
+ sbuf_printf(&sb, "PCC ");
if (ser_supported(rec->mr_mcg_cap)) {
if (rec->mr_status & MC_STATUS_S)
- printf("S ");
+ sbuf_printf(&sb, "S ");
if (rec->mr_status & MC_STATUS_AR)
- printf("AR ");
+ sbuf_printf(&sb, "AR ");
}
if (rec->mr_status & MC_STATUS_OVER)
- printf("OVER ");
+ sbuf_printf(&sb, "OVER ");
mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
+ event_type = MCA_T_COUNT;
switch (mca_error) {
/* Simple error codes. */
case 0x0000:
- printf("no error");
+ sbuf_printf(&sb, "no error");
+ event_type = MCA_T_NONE;
break;
case 0x0001:
- printf("unclassified error");
+ sbuf_printf(&sb, "unclassified error");
+ event_type = MCA_T_UNCLASSIFIED;
break;
case 0x0002:
- printf("ucode ROM parity error");
+ sbuf_printf(&sb, "ucode ROM parity error");
+ event_type = MCA_T_UCODE_ROM_PARITY;
break;
case 0x0003:
- printf("external error");
+ sbuf_printf(&sb, "external error");
+ event_type = MCA_T_EXTERNAL;
break;
case 0x0004:
- printf("FRC error");
+ sbuf_printf(&sb, "FRC error");
+ event_type = MCA_T_FRC;
break;
case 0x0005:
- printf("internal parity error");
+ sbuf_printf(&sb, "internal parity error");
+ event_type = MCA_T_INTERNAL_PARITY;
break;
case 0x0006:
- printf("SMM handler code access violation");
+ sbuf_printf(&sb, "SMM handler code access violation");
+ event_type = MCA_T_SMM_HANDLER;
break;
case 0x0400:
- printf("internal timer error");
+ sbuf_printf(&sb, "internal timer error");
+ event_type = MCA_T_INTERNAL_TIMER;
break;
case 0x0e0b:
- printf("generic I/O error");
+ sbuf_printf(&sb, "generic I/O error");
+ event_type = MCA_T_GENERIC_IO;
if (rec->mr_cpu_vendor_id == CPU_VENDOR_INTEL &&
(rec->mr_status & MC_STATUS_MISCV)) {
- printf(" (pci%d:%d:%d:%d)",
+ sbuf_printf(&sb, " (pci%d:%d:%d:%d)",
(int)((rec->mr_misc & MC_MISC_PCIE_SEG) >> 32),
(int)((rec->mr_misc & MC_MISC_PCIE_BUS) >> 24),
(int)((rec->mr_misc & MC_MISC_PCIE_SLOT) >> 19),
@@ -512,7 +596,9 @@ mca_log(const struct mca_record *rec)
break;
default:
if ((mca_error & 0xfc00) == 0x0400) {
- printf("internal error %x", mca_error & 0x03ff);
+ sbuf_printf(&sb, "internal error %x",
+ mca_error & 0x03ff);
+ event_type = MCA_T_INTERNAL;
break;
}
@@ -520,101 +606,168 @@ mca_log(const struct mca_record *rec)
/* Memory hierarchy error. */
if ((mca_error & 0xeffc) == 0x000c) {
- printf("%s memory error", mca_error_level(mca_error));
+ sbuf_printf(&sb, "%s memory error",
+ mca_error_level(mca_error));
+ event_type = MCA_T_MEMORY;
break;
}
/* TLB error. */
if ((mca_error & 0xeff0) == 0x0010) {
- printf("%sTLB %s error", mca_error_ttype(mca_error),
+ sbuf_printf(&sb, "%sTLB %s error",
+ mca_error_ttype(mca_error),
mca_error_level(mca_error));
+ event_type = MCA_T_TLB;
break;
}
/* Memory controller error. */
if ((mca_error & 0xef80) == 0x0080) {
- printf("%s channel ", mca_error_mmtype(mca_error));
+ sbuf_printf(&sb, "%s channel ",
+ mca_error_mmtype(mca_error, &event_type));
if ((mca_error & 0x000f) != 0x000f)
- printf("%d", mca_error & 0x000f);
+ sbuf_printf(&sb, "%d", mca_error & 0x000f);
else
- printf("??");
- printf(" memory error");
+ sbuf_printf(&sb, "??");
+ sbuf_printf(&sb, " memory error");
break;
}
/* Cache error. */
if ((mca_error & 0xef00) == 0x0100) {
- printf("%sCACHE %s %s error",
+ sbuf_printf(&sb, "%sCACHE %s %s error",
mca_error_ttype(mca_error),
mca_error_level(mca_error),
mca_error_request(mca_error));
+ event_type = MCA_T_CACHE;
break;
}
/* Extended memory error. */
if ((mca_error & 0xef80) == 0x0280) {
- printf("%s channel ", mca_error_mmtype(mca_error));
+ sbuf_printf(&sb, "%s channel ",
+ mca_error_mmtype(mca_error, &event_type));
if ((mca_error & 0x000f) != 0x000f)
- printf("%d", mca_error & 0x000f);
+ sbuf_printf(&sb, "%d", mca_error & 0x000f);
else
- printf("??");
- printf(" extended memory error");
+ sbuf_printf(&sb, "??");
+ sbuf_printf(&sb, " extended memory error");
break;
}
/* Bus and/or Interconnect error. */
if ((mca_error & 0xe800) == 0x0800) {
- printf("BUS%s ", mca_error_level(mca_error));
+ sbuf_printf(&sb, "BUS%s ", mca_error_level(mca_error));
+ event_type = MCA_T_BUS;
switch ((mca_error & 0x0600) >> 9) {
case 0:
- printf("Source");
+ sbuf_printf(&sb, "Source");
break;
case 1:
- printf("Responder");
+ sbuf_printf(&sb, "Responder");
break;
case 2:
- printf("Observer");
+ sbuf_printf(&sb, "Observer");
break;
default:
- printf("???");
+ sbuf_printf(&sb, "???");
break;
}
- printf(" %s ", mca_error_request(mca_error));
+ sbuf_printf(&sb, " %s ", mca_error_request(mca_error));
switch ((mca_error & 0x000c) >> 2) {
case 0:
- printf("Memory");
+ sbuf_printf(&sb, "Memory");
break;
case 2:
- printf("I/O");
+ sbuf_printf(&sb, "I/O");
break;
case 3:
- printf("Other");
+ sbuf_printf(&sb, "Other");
break;
default:
- printf("???");
+ sbuf_printf(&sb, "???");
break;
}
if (mca_error & 0x0100)
- printf(" timed out");
+ sbuf_printf(&sb, " timed out");
break;
}
- printf("unknown error %x", mca_error);
+ sbuf_printf(&sb, "unknown error %x", mca_error);
+ event_type = MCA_T_UNKNOWN;
break;
}
- printf("\n");
+ sbuf_printf(&sb, "\n");
if (rec->mr_status & MC_STATUS_ADDRV) {
- printf("MCA: Address 0x%llx", (long long)rec->mr_addr);
+ sbuf_printf(&sb, "MCA: Address 0x%llx",
+ (long long)rec->mr_addr);
if (ser_supported(rec->mr_mcg_cap) &&
(rec->mr_status & MC_STATUS_MISCV)) {
- printf(" (Mode: %s, LSB: %d)",
+ sbuf_printf(&sb, " (Mode: %s, LSB: %d)",
mca_addres_mode(rec->mr_misc),
(int)(rec->mr_misc & MC_MISC_RA_LSB));
}
- printf("\n");
+ sbuf_printf(&sb, "\n");
}
if (rec->mr_status & MC_STATUS_MISCV)
- printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
+ sbuf_printf(&sb, "MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
+
+ if (event_type < 0 || event_type >= MCA_T_COUNT) {
+ KASSERT(0, ("%s: invalid event type (%d)", __func__,
+ event_type));
+ event_type = MCA_T_UNKNOWN;
+ }
+ numskipped = 0;
+ if (!fatal && !uncor) {
+ /*
+ * Update statistics and check the rate limit for
+ * correctable errors. The rate limit is only applied
+ * after the system records a reasonable number of errors
+ * of the same type. The goal is to reduce the impact of
+ * the system seeing and attempting to log a burst of
+ * similar errors, which (especially when printed to the
+ * console) can be expensive.
+ */
+ mtx_lock_spin(&mca_lock);
+ mca_stats[event_type]++;
+ if (mca_log_interval.tv_sec > 0 && mca_stats[event_type] > 50 &&
+ ratecheck(&mca_last_log_time, &mca_log_interval) == 0) {
+ mca_log_skipped++;
+ mtx_unlock_spin(&mca_lock);
+ goto done;
+ }
+ numskipped = mca_log_skipped;
+ mca_log_skipped = 0;
+ mtx_unlock_spin(&mca_lock);
+ }
+
+ error = sbuf_finish(&sb);
+ if (fatal || !mca_uselog) {
+ if (numskipped > 0)
+ printf("MCA: %d events skipped due to rate limit\n",
+ numskipped);
+ if (error)
+ printf("MCA: error logging message (sbuf error %d)\n",
+ error);
+ else
+ sbuf_putbuf(&sb);
+ } else {
+ if (numskipped > 0)
+ log(LOG_ERR,
+ "MCA: %d events skipped due to rate limit\n",
+ numskipped);
+ if (error)
+ log(LOG_ERR,
+ "MCA: error logging message (sbuf error %d)\n",
+ error);
+ else
+ log(uncor ? LOG_CRIT : LOG_ERR, "%s", sbuf_data(&sb));
+ }
+
+done:
+ sbuf_delete(&sb);
+ if (using_shared_buf)
+ mtx_unlock_spin(&mca_msg_buf_lock);
}
static bool
@@ -662,8 +815,24 @@ mca_check_status(enum scan_mode mode, uint64_t mcg_cap, int bank,
bool mce, recover;
status = rdmsr(mca_msr_ops.status(bank));
- if (!(status & MC_STATUS_VAL))
+ if (!(status & MC_STATUS_VAL)) {
+#ifdef DIAGNOSTIC
+ /*
+ * Check if we have a pending artificial event to generate.
+ * Note that this is potentially racy with the sysctl. The
+ * tradeoff is deemed acceptable given the test nature
+ * of the code.
+ */
+ if (fake_status && bank == fake_bank) {
+ status = fake_status;
+ fake_status = 0;
+ }
+ if (!(status & MC_STATUS_VAL))
+ return (0);
+#else
return (0);
+#endif
+ }
recover = *recoverablep;
mce = mca_is_mce(mcg_cap, status, &recover);
@@ -757,9 +926,9 @@ mca_record_entry(enum scan_mode mode, const struct mca_record *record)
mtx_lock_spin(&mca_lock);
rec = STAILQ_FIRST(&mca_freelist);
if (rec == NULL) {
- printf("MCA: Unable to allocate space for an event.\n");
- mca_log(record);
mtx_unlock_spin(&mca_lock);
+ printf("MCA: Unable to allocate space for an event.\n");
+ mca_log(mode, record, false);
return;
}
STAILQ_REMOVE_HEAD(&mca_freelist, link);
@@ -916,7 +1085,7 @@ mca_scan(enum scan_mode mode, bool *recoverablep)
if (*recoverablep)
mca_record_entry(mode, &rec);
else
- mca_log(&rec);
+ mca_log(mode, &rec, true);
}
#ifdef DEV_APIC
@@ -978,18 +1147,49 @@ static void
mca_process_records(enum scan_mode mode)
{
struct mca_internal *mca;
+ STAILQ_HEAD(, mca_internal) tmplist;
+
+ /*
+ * If in an interrupt context, defer the post-scan activities to a
+ * task queue.
+ */
+ if (mode != POLLED) {
+ if (mca_startup_done)
+ taskqueue_enqueue(mca_tq, &mca_postscan_task);
+ return;
+ }
+
+ /*
+ * Copy the pending list to the stack so we can drop the spin lock
+ * while we are emitting logs.
+ */
+ STAILQ_INIT(&tmplist);
+ mtx_lock_spin(&mca_lock);
+ STAILQ_SWAP(&mca_pending, &tmplist, mca_internal);
+ mtx_unlock_spin(&mca_lock);
+
+ STAILQ_FOREACH(mca, &tmplist, link)
+ mca_log(mode, &mca->rec, false);
mtx_lock_spin(&mca_lock);
- while ((mca = STAILQ_FIRST(&mca_pending)) != NULL) {
- STAILQ_REMOVE_HEAD(&mca_pending, link);
- mca_log(&mca->rec);
+ while ((mca = STAILQ_FIRST(&tmplist)) != NULL) {
+ STAILQ_REMOVE_HEAD(&tmplist, link);
mca_store_record(mca);
}
mtx_unlock_spin(&mca_lock);
- if (mode == POLLED)
- mca_resize_freelist();
- else if (!cold)
- taskqueue_enqueue(mca_tq, &mca_resize_task);
+ mca_resize_freelist();
+}
+
+/*
+ * Emit log entries and resize the free list. This is intended to be called
+ * from a task queue to handle work which does not need to be done (or cannot
+ * be done) in an interrupt context.
+ */
+static void
+mca_postscan(void *context __unused, int pending __unused)
+{
+
+ mca_process_records(POLLED);
}
/*
@@ -1060,7 +1260,7 @@ sysctl_mca_maxcount(SYSCTL_HANDLER_ARGS)
doresize = true;
}
mtx_unlock_spin(&mca_lock);
- if (doresize && !cold)
+ if (doresize && mca_startup_done)
taskqueue_enqueue(mca_tq, &mca_resize_task);
return (error);
}
@@ -1072,12 +1272,16 @@ mca_startup(void *dummy)
if (mca_banks <= 0)
return;
- /* CMCIs during boot may have claimed items from the freelist. */
- mca_resize_freelist();
-
taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
taskqueue_enqueue_timeout_sbt(mca_tq, &mca_scan_task,
mca_ticks * SBT_1S, 0, C_PREL(1));
+ mca_startup_done = true;
+
+ /*
+ * CMCIs during boot may have recorded entries. Conduct the post-scan
+ * activities now.
+ */
+ mca_postscan(NULL, 0);
}
SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL);
@@ -1130,6 +1334,7 @@ mca_setup(uint64_t mcg_cap)
mca_banks = mcg_cap & MCG_CAP_COUNT;
mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
+ mtx_init(&mca_msg_buf_lock, "mca_msg_buf", NULL, MTX_SPIN);
STAILQ_INIT(&mca_records);
STAILQ_INIT(&mca_pending);
mca_tq = taskqueue_create_fast("mca", M_WAITOK,
@@ -1137,6 +1342,7 @@ mca_setup(uint64_t mcg_cap)
TIMEOUT_TASK_INIT(mca_tq, &mca_scan_task, 0, mca_scan_cpus, NULL);
STAILQ_INIT(&mca_freelist);
TASK_INIT(&mca_resize_task, 0, mca_resize, NULL);
+ TASK_INIT(&mca_postscan_task, 0, mca_postscan, NULL);
mca_resize_freelist();
SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
"count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
@@ -1540,6 +1746,9 @@ mca_intr(void)
panic("Unrecoverable machine check exception");
}
+ if (count)
+ mca_process_records(MCE);
+
/* Clear MCIP. */
wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
}