aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/acpica/acpi_wakeup.c4
-rw-r--r--sys/amd64/amd64/elf_machdep.c14
-rw-r--r--sys/amd64/amd64/machdep.c33
-rw-r--r--sys/amd64/amd64/support.S16
-rw-r--r--sys/amd64/conf/GENERIC7
-rw-r--r--sys/amd64/include/cpufunc.h11
-rw-r--r--sys/amd64/include/md_var.h4
-rw-r--r--sys/amd64/include/param.h11
-rw-r--r--sys/amd64/include/vmm.h100
-rw-r--r--sys/amd64/linux/linux_sysvec.c12
-rw-r--r--sys/amd64/linux32/linux32_sysvec.c12
-rw-r--r--sys/amd64/vmm/intel/vmx_support.S8
-rw-r--r--sys/amd64/vmm/io/vioapic.c9
-rw-r--r--sys/amd64/vmm/vmm.c47
-rw-r--r--sys/amd64/vmm/vmm_dev_machdep.c1
-rw-r--r--sys/amd64/vmm/vmm_lapic.c5
-rw-r--r--sys/amd64/vmm/x86.c18
-rw-r--r--sys/arm/allwinner/aw_gpio.c7
-rw-r--r--sys/arm/allwinner/aw_sid.c2
-rw-r--r--sys/arm/allwinner/axp209.c5
-rw-r--r--sys/arm/allwinner/axp81x.c3
-rw-r--r--sys/arm/arm/generic_timer.c54
-rw-r--r--sys/arm/arm/pmap-v6.c2
-rw-r--r--sys/arm/arm/pmu_fdt.c4
-rw-r--r--sys/arm/arm/unwind.c4
-rw-r--r--sys/arm/broadcom/bcm2835/bcm2835_gpio.c7
-rw-r--r--sys/arm/broadcom/bcm2835/raspberrypi_gpio.c3
-rw-r--r--sys/arm/conf/TEGRA1242
-rw-r--r--sys/arm/freescale/imx/imx_gpio.c7
-rw-r--r--sys/arm/freescale/vybrid/vf_gpio.c3
-rw-r--r--sys/arm/include/atomic.h8
-rw-r--r--sys/arm/mv/a37x0_gpio.c3
-rw-r--r--sys/arm/mv/gpio.c3
-rw-r--r--sys/arm/mv/mvebu_gpio.c7
-rw-r--r--sys/arm/nvidia/tegra_gpio.c7
-rw-r--r--sys/arm/ti/ti_gpio.c7
-rw-r--r--sys/arm/xilinx/zy7_gpio.c3
-rw-r--r--sys/arm64/apple/apple_pinctrl.c3
-rw-r--r--sys/arm64/arm64/cpu_errata.c96
-rw-r--r--sys/arm64/arm64/cpu_feat.c51
-rw-r--r--sys/arm64/arm64/efirt_machdep.c14
-rw-r--r--sys/arm64/arm64/elf32_machdep.c4
-rw-r--r--sys/arm64/arm64/elf_machdep.c7
-rw-r--r--sys/arm64/arm64/exception.S9
-rw-r--r--sys/arm64/arm64/identcpu.c55
-rw-r--r--sys/arm64/arm64/locore.S45
-rw-r--r--sys/arm64/arm64/machdep.c33
-rw-r--r--sys/arm64/arm64/pmap.c140
-rw-r--r--sys/arm64/arm64/ptrauth.c53
-rw-r--r--sys/arm64/arm64/spec_workaround.c166
-rw-r--r--sys/arm64/arm64/trap.c1
-rw-r--r--sys/arm64/conf/std.arm641
-rw-r--r--sys/arm64/coresight/coresight.c2
-rw-r--r--sys/arm64/include/armreg.h39
-rw-r--r--sys/arm64/include/cpu.h30
-rw-r--r--sys/arm64/include/cpu_feat.h52
-rw-r--r--sys/arm64/include/hypervisor.h175
-rw-r--r--sys/arm64/include/pmap.h3
-rw-r--r--sys/arm64/include/proc.h1
-rw-r--r--sys/arm64/include/vmm.h33
-rw-r--r--sys/arm64/linux/linux_sysvec.c10
-rw-r--r--sys/arm64/rockchip/rk_gpio.c204
-rw-r--r--sys/arm64/rockchip/rk_grf_gpio.c3
-rw-r--r--sys/arm64/rockchip/rk_tsadc.c2
-rw-r--r--sys/arm64/vmm/arm64.h41
-rw-r--r--sys/arm64/vmm/hyp.h1
-rw-r--r--sys/arm64/vmm/io/vtimer.c94
-rw-r--r--sys/arm64/vmm/io/vtimer.h2
-rw-r--r--sys/arm64/vmm/vmm.c77
-rw-r--r--sys/arm64/vmm/vmm_arm64.c15
-rw-r--r--sys/arm64/vmm/vmm_hyp.c103
-rw-r--r--sys/arm64/vmm/vmm_reset.c7
-rw-r--r--sys/cam/ata/ata_all.c4
-rw-r--r--sys/cam/ata/ata_da.c9
-rw-r--r--sys/cam/nvme/nvme_da.c5
-rw-r--r--sys/cam/scsi/scsi_all.c6
-rw-r--r--sys/cam/scsi/scsi_da.c4
-rw-r--r--sys/cam/scsi/scsi_enc_ses.c6
-rw-r--r--sys/cddl/boot/zfs/zfsimpl.h10
-rw-r--r--sys/cddl/compat/opensolaris/kern/opensolaris.c2
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c36
-rw-r--r--sys/cddl/dev/dtrace/aarch64/dtrace_subr.c27
-rw-r--r--sys/cddl/dev/dtrace/amd64/dtrace_subr.c25
-rw-r--r--sys/cddl/dev/dtrace/arm/dtrace_subr.c25
-rw-r--r--sys/cddl/dev/dtrace/i386/dtrace_subr.c25
-rw-r--r--sys/cddl/dev/dtrace/powerpc/dtrace_subr.c25
-rw-r--r--sys/cddl/dev/dtrace/riscv/dtrace_subr.c27
-rw-r--r--sys/compat/freebsd32/freebsd32_syscall.h10
-rw-r--r--sys/compat/freebsd32/freebsd32_syscalls.c8
-rw-r--r--sys/compat/freebsd32/freebsd32_sysent.c8
-rw-r--r--sys/compat/freebsd32/freebsd32_systrace_args.c148
-rw-r--r--sys/compat/ia32/ia32_sysvec.c24
-rw-r--r--sys/compat/lindebugfs/lindebugfs.c8
-rw-r--r--sys/compat/linprocfs/linprocfs.c256
-rw-r--r--sys/compat/linsysfs/linsysfs.c159
-rw-r--r--sys/compat/linsysfs/linsysfs_net.c24
-rw-r--r--sys/compat/linux/linux.c26
-rw-r--r--sys/compat/linux/linux_common.h2
-rw-r--r--sys/compat/linux/linux_futex.c2
-rw-r--r--sys/compat/linux/linux_misc.c41
-rw-r--r--sys/compat/linux/linux_netlink.c37
-rw-r--r--sys/compat/linux/linux_socket.c44
-rw-r--r--sys/compat/linux/linux_uid16.c39
-rw-r--r--sys/compat/linuxkpi/common/include/acpi/acpi.h4
-rw-r--r--sys/compat/linuxkpi/common/include/asm/topology.h54
-rw-r--r--sys/compat/linuxkpi/common/include/kunit/static_stub.h15
-rw-r--r--sys/compat/linuxkpi/common/include/linux/bitops.h8
-rw-r--r--sys/compat/linuxkpi/common/include/linux/cleanup.h49
-rw-r--r--sys/compat/linuxkpi/common/include/linux/compiler.h6
-rw-r--r--sys/compat/linuxkpi/common/include/linux/device.h5
-rw-r--r--sys/compat/linuxkpi/common/include/linux/gfp.h1
-rw-r--r--sys/compat/linuxkpi/common/include/linux/idr.h7
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ieee80211.h50
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ioport.h1
-rw-r--r--sys/compat/linuxkpi/common/include/linux/math.h2
-rw-r--r--sys/compat/linuxkpi/common/include/linux/math64.h6
-rw-r--r--sys/compat/linuxkpi/common/include/linux/netdevice.h9
-rw-r--r--sys/compat/linuxkpi/common/include/linux/overflow.h180
-rw-r--r--sys/compat/linuxkpi/common/include/linux/pci.h23
-rw-r--r--sys/compat/linuxkpi/common/include/linux/printk.h6
-rw-r--r--sys/compat/linuxkpi/common/include/linux/rcupdate.h5
-rw-r--r--sys/compat/linuxkpi/common/include/linux/refcount.h1
-rw-r--r--sys/compat/linuxkpi/common/include/linux/seq_file.h15
-rw-r--r--sys/compat/linuxkpi/common/include/linux/skbuff.h18
-rw-r--r--sys/compat/linuxkpi/common/include/linux/slab.h4
-rw-r--r--sys/compat/linuxkpi/common/include/linux/string_choices.h71
-rw-r--r--sys/compat/linuxkpi/common/include/linux/string_helpers.h65
-rw-r--r--sys/compat/linuxkpi/common/include/linux/sysfs.h57
-rw-r--r--sys/compat/linuxkpi/common/include/linux/timer.h21
-rw-r--r--sys/compat/linuxkpi/common/include/linux/topology.h35
-rw-r--r--sys/compat/linuxkpi/common/include/net/cfg80211.h49
-rw-r--r--sys/compat/linuxkpi/common/include/net/mac80211.h26
-rw-r--r--sys/compat/linuxkpi/common/src/linux_80211.c985
-rw-r--r--sys/compat/linuxkpi/common/src/linux_80211.h39
-rw-r--r--sys/compat/linuxkpi/common/src/linux_80211_macops.c2
-rw-r--r--sys/compat/linuxkpi/common/src/linux_acpi.c33
-rw-r--r--sys/compat/linuxkpi/common/src/linux_compat.c11
-rw-r--r--sys/compat/linuxkpi/common/src/linux_devres.c26
-rw-r--r--sys/compat/linuxkpi/common/src/linux_pci.c190
-rw-r--r--sys/compat/linuxkpi/common/src/linux_seq_file.c9
-rw-r--r--sys/compat/linuxkpi/dummy/include/kunit/skbuff.h0
-rw-r--r--sys/compat/linuxkpi/dummy/include/kunit/test-bug.h0
-rw-r--r--sys/compat/linuxkpi/dummy/include/kunit/test.h0
-rw-r--r--sys/conf/NOTES16
-rw-r--r--sys/conf/dtb.build.mk4
-rw-r--r--sys/conf/files51
-rw-r--r--sys/conf/files.amd6410
-rw-r--r--sys/conf/files.arm2
-rw-r--r--sys/conf/files.arm645
-rw-r--r--sys/conf/files.powerpc3
-rw-r--r--sys/conf/files.x861
-rw-r--r--sys/conf/kern.opts.mk10
-rw-r--r--sys/conf/kern.post.mk44
-rw-r--r--sys/conf/kern.pre.mk7
-rw-r--r--sys/conf/kmod.mk23
-rw-r--r--sys/conf/newvers.sh12
-rw-r--r--sys/conf/options9
-rw-r--r--sys/conf/std.debug1
-rw-r--r--sys/conf/std.nodebug1
-rw-r--r--sys/contrib/dev/acpica/changes.txt28
-rw-r--r--sys/contrib/dev/acpica/common/adisasm.c12
-rw-r--r--sys/contrib/dev/acpica/common/ahtable.c1
-rw-r--r--sys/contrib/dev/acpica/common/dmtable.c1
-rw-r--r--sys/contrib/dev/acpica/common/dmtbdump2.c2
-rw-r--r--sys/contrib/dev/acpica/common/dmtbinfo2.c2
-rw-r--r--sys/contrib/dev/acpica/common/dmtbinfo3.c2
-rw-r--r--sys/contrib/dev/acpica/compiler/aslanalyze.c16
-rw-r--r--sys/contrib/dev/acpica/compiler/aslrestype2s.c2
-rw-r--r--sys/contrib/dev/acpica/compiler/dttable2.c77
-rw-r--r--sys/contrib/dev/acpica/compiler/dttemplate.c24
-rw-r--r--sys/contrib/dev/acpica/compiler/dttemplate.h24
-rw-r--r--sys/contrib/dev/acpica/compiler/dtutils.c1
-rw-r--r--sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c2
-rw-r--r--sys/contrib/dev/acpica/components/dispatcher/dsmethod.c29
-rw-r--r--sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c1
-rw-r--r--sys/contrib/dev/acpica/components/events/evglock.c5
-rw-r--r--sys/contrib/dev/acpica/components/executer/extrace.c66
-rw-r--r--sys/contrib/dev/acpica/components/parser/psopinfo.c8
-rw-r--r--sys/contrib/dev/acpica/components/tables/tbprint.c8
-rw-r--r--sys/contrib/dev/acpica/components/utilities/utnonansi.c2
-rw-r--r--sys/contrib/dev/acpica/include/acdebug.h2
-rw-r--r--sys/contrib/dev/acpica/include/acexcep.h9
-rw-r--r--sys/contrib/dev/acpica/include/acinterp.h4
-rw-r--r--sys/contrib/dev/acpica/include/acpixf.h8
-rw-r--r--sys/contrib/dev/acpica/include/actbl.h2
-rw-r--r--sys/contrib/dev/acpica/include/actbl1.h2
-rw-r--r--sys/contrib/dev/acpica/include/actbl2.h25
-rw-r--r--sys/contrib/dev/qat/qat_402xx.binbin0 -> 665360 bytes
-rw-r--r--sys/contrib/dev/qat/qat_402xx_mmp.binbin0 -> 150084 bytes
-rw-r--r--sys/contrib/dev/rtw88/main.c57
-rw-r--r--sys/contrib/dev/rtw89/fw.c4
-rw-r--r--sys/contrib/libnv/bsd_nvpair.c8
-rw-r--r--sys/contrib/libnv/nvlist.c10
-rw-r--r--sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md2
-rw-r--r--sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md5
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py2
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh10
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh16
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh13
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh23
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh39
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh2
-rw-r--r--sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml13
-rw-r--r--sys/contrib/openzfs/.github/workflows/zfs-qemu.yml21
-rw-r--r--sys/contrib/openzfs/.mailmap4
-rw-r--r--sys/contrib/openzfs/AUTHORS13
-rw-r--r--sys/contrib/openzfs/META4
-rw-r--r--sys/contrib/openzfs/Makefile.am4
-rw-r--r--sys/contrib/openzfs/cmd/Makefile.am11
-rwxr-xr-xsys/contrib/openzfs/cmd/zarcstat.in (renamed from sys/contrib/openzfs/cmd/arcstat.in)22
-rwxr-xr-xsys/contrib/openzfs/cmd/zarcsummary (renamed from sys/contrib/openzfs/cmd/arc_summary)6
-rw-r--r--sys/contrib/openzfs/cmd/zdb/zdb.c195
-rw-r--r--sys/contrib/openzfs/cmd/zdb/zdb.h2
-rw-r--r--sys/contrib/openzfs/cmd/zdb/zdb_il.c2
-rw-r--r--sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am24
-rwxr-xr-xsys/contrib/openzfs/cmd/zed/zed.d/deadman-sync-slot_off.sh (renamed from sys/contrib/openzfs/cmd/zed/zed.d/deadman-slot_off.sh)0
l---------sys/contrib/openzfs/cmd/zed/zed.d/pool_import-led.sh1
l---------sys/contrib/openzfs/cmd/zed/zed.d/pool_import-sync-led.sh1
-rwxr-xr-xsys/contrib/openzfs/cmd/zed/zed.d/statechange-sync-led.sh (renamed from sys/contrib/openzfs/cmd/zed/zed.d/statechange-led.sh)0
-rwxr-xr-xsys/contrib/openzfs/cmd/zed/zed.d/statechange-sync-slot_off.sh (renamed from sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh)0
l---------sys/contrib/openzfs/cmd/zed/zed.d/vdev_attach-led.sh1
l---------sys/contrib/openzfs/cmd/zed/zed.d/vdev_attach-sync-led.sh1
l---------sys/contrib/openzfs/cmd/zed/zed.d/vdev_clear-led.sh1
l---------sys/contrib/openzfs/cmd/zed/zed.d/vdev_clear-sync-led.sh1
-rw-r--r--sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh3
-rw-r--r--sys/contrib/openzfs/cmd/zed/zed_exec.c111
-rw-r--r--sys/contrib/openzfs/cmd/zfs/zfs_main.c54
-rw-r--r--sys/contrib/openzfs/cmd/zhack.c345
-rwxr-xr-xsys/contrib/openzfs/cmd/zilstat.in1
-rw-r--r--sys/contrib/openzfs/cmd/zinject/zinject.c81
-rw-r--r--sys/contrib/openzfs/cmd/zpool/Makefile.am5
-rw-r--r--sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.448
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_iter.c118
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_main.c78
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_util.h3
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_vdev.c26
-rw-r--r--sys/contrib/openzfs/cmd/zstream/Makefile.am5
-rw-r--r--sys/contrib/openzfs/cmd/ztest.c29
-rw-r--r--sys/contrib/openzfs/config/Shellcheck.am12
-rw-r--r--sys/contrib/openzfs/config/always-arch.m418
-rw-r--r--sys/contrib/openzfs/config/always-compiler-options.m483
-rw-r--r--sys/contrib/openzfs/config/kernel-blkdev.m49
-rw-r--r--sys/contrib/openzfs/config/kernel-dentry-operations.m437
-rw-r--r--sys/contrib/openzfs/config/kernel-mkdir.m42
-rw-r--r--sys/contrib/openzfs/config/kernel.m42
-rw-r--r--sys/contrib/openzfs/config/toolchain-simd.m4113
-rw-r--r--sys/contrib/openzfs/config/user-statx.m46
-rw-r--r--sys/contrib/openzfs/config/zfs-build.m43
-rw-r--r--sys/contrib/openzfs/contrib/debian/control8
-rw-r--r--sys/contrib/openzfs/contrib/debian/copyright4
-rw-r--r--sys/contrib/openzfs/contrib/debian/not-installed4
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-libnvpair3.install.in2
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.install2
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.postinst2
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-libuutil3.install.in2
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-libzfs-dev.install.in6
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-libzfs6.install.in4
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-libzfsbootenv1.install.in2
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-libzpool6.install.in2
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install2
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-zfs-zed.install2
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install72
-rw-r--r--sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.links3
-rwxr-xr-xsys/contrib/openzfs/contrib/debian/rules.in45
-rwxr-xr-xsys/contrib/openzfs/contrib/debian/tree/zfs-initramfs/usr/share/initramfs-tools/hooks/zdev2
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in3
-rw-r--r--sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/LICENSE253
-rw-r--r--sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/README11
-rw-r--r--sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/aes-gcm-avx2-x86_64-linux.S1328
-rw-r--r--sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in9
-rw-r--r--sys/contrib/openzfs/contrib/initramfs/scripts/zfs3
-rw-r--r--sys/contrib/openzfs/contrib/intel_qat/readme.md2
-rw-r--r--sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c4
-rw-r--r--sys/contrib/openzfs/contrib/pyzfs/libzfs_core/exceptions.py1
-rw-r--r--sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py2
-rw-r--r--sys/contrib/openzfs/etc/init.d/README.md2
-rw-r--r--sys/contrib/openzfs/include/libzfs.h2
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/debug.h4
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/proc.h4
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/time.h11
-rw-r--r--sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h18
-rw-r--r--sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h26
-rw-r--r--sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h47
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/debug.h4
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/mutex.h2
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/rwlock.h2
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/stat.h2
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/time.h8
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h6
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h1
-rw-r--r--sys/contrib/openzfs/include/sys/dmu.h10
-rw-r--r--sys/contrib/openzfs/include/sys/dmu_impl.h4
-rw-r--r--sys/contrib/openzfs/include/sys/dnode.h14
-rw-r--r--sys/contrib/openzfs/include/sys/dsl_deleg.h1
-rw-r--r--sys/contrib/openzfs/include/sys/fm/fs/zfs.h1
-rw-r--r--sys/contrib/openzfs/include/sys/fs/zfs.h5
-rw-r--r--sys/contrib/openzfs/include/sys/range_tree.h5
-rw-r--r--sys/contrib/openzfs/include/sys/spa.h5
-rw-r--r--sys/contrib/openzfs/include/sys/spa_impl.h1
-rw-r--r--sys/contrib/openzfs/include/sys/vdev_impl.h6
-rw-r--r--sys/contrib/openzfs/include/sys/vdev_raidz.h3
-rw-r--r--sys/contrib/openzfs/include/sys/vdev_raidz_impl.h2
-rw-r--r--sys/contrib/openzfs/include/sys/zfs_file.h2
-rw-r--r--sys/contrib/openzfs/include/sys/zfs_ioctl.h1
-rw-r--r--sys/contrib/openzfs/include/sys/zfs_znode.h2
-rw-r--r--sys/contrib/openzfs/include/sys/zil.h33
-rw-r--r--sys/contrib/openzfs/include/sys/zil_impl.h22
-rw-r--r--sys/contrib/openzfs/include/sys/zio.h33
-rw-r--r--sys/contrib/openzfs/include/sys/zvol.h2
-rw-r--r--sys/contrib/openzfs/include/sys/zvol_impl.h5
-rw-r--r--sys/contrib/openzfs/include/zfs_deleg.h1
-rw-r--r--sys/contrib/openzfs/lib/libicp/Makefile.am1
-rw-r--r--sys/contrib/openzfs/lib/libspl/Makefile.am3
-rw-r--r--sys/contrib/openzfs/lib/libspl/include/os/linux/sys/stat.h2
-rw-r--r--sys/contrib/openzfs/lib/libspl/include/sys/debug.h4
-rw-r--r--sys/contrib/openzfs/lib/libspl/include/sys/simd.h28
-rw-r--r--sys/contrib/openzfs/lib/libspl/include/sys/time.h9
-rw-r--r--sys/contrib/openzfs/lib/libuutil/libuutil.abi105
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs.abi176
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_config.c17
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_diff.c4
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_import.c2
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_mount.c2
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_pool.c14
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c8
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_util.c5
-rw-r--r--sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi105
-rw-r--r--sys/contrib/openzfs/lib/libzpool/abd_os.c4
-rw-r--r--sys/contrib/openzfs/lib/libzpool/kernel.c83
-rw-r--r--sys/contrib/openzfs/lib/libzpool/util.c8
-rw-r--r--sys/contrib/openzfs/lib/libzutil/zutil_import.c43
-rw-r--r--sys/contrib/openzfs/man/Makefile.am19
-rw-r--r--sys/contrib/openzfs/man/man1/cstyle.12
-rw-r--r--sys/contrib/openzfs/man/man1/zarcstat.1 (renamed from sys/contrib/openzfs/man/man1/arcstat.1)6
-rw-r--r--sys/contrib/openzfs/man/man1/zhack.122
-rw-r--r--sys/contrib/openzfs/man/man1/ztest.12
-rw-r--r--sys/contrib/openzfs/man/man4/spl.42
-rw-r--r--sys/contrib/openzfs/man/man4/zfs.486
-rw-r--r--sys/contrib/openzfs/man/man5/vdev_id.conf.52
-rw-r--r--sys/contrib/openzfs/man/man7/dracut.zfs.72
-rw-r--r--sys/contrib/openzfs/man/man7/vdevprops.741
-rw-r--r--sys/contrib/openzfs/man/man7/zfsconcepts.72
-rw-r--r--sys/contrib/openzfs/man/man7/zfsprops.724
-rw-r--r--sys/contrib/openzfs/man/man7/zpool-features.72
-rw-r--r--sys/contrib/openzfs/man/man7/zpoolconcepts.72
-rw-r--r--sys/contrib/openzfs/man/man7/zpoolprops.72
-rw-r--r--sys/contrib/openzfs/man/man8/zdb.826
-rw-r--r--sys/contrib/openzfs/man/man8/zed.8.in34
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-allow.85
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-bookmark.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-clone.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-create.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-destroy.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-diff.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-hold.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-jail.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-list.847
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-load-key.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-mount-generator.8.in2
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-mount.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-project.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-promote.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-rename.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-rewrite.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-send.826
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-set.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-share.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-snapshot.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-upgrade.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-userspace.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-wait.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-zone.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs.82
-rw-r--r--sys/contrib/openzfs/man/man8/zfs_ids_to_path.82
-rw-r--r--sys/contrib/openzfs/man/man8/zgenhostid.82
-rw-r--r--sys/contrib/openzfs/man/man8/zinject.814
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-attach.822
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-checkpoint.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-clear.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-create.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-destroy.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-detach.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-events.810
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-export.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-get.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-history.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-import.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-initialize.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-iostat.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-labelclear.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-list.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-offline.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-reguid.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-remove.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-reopen.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-replace.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-resilver.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-scrub.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-split.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-status.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-sync.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-trim.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-upgrade.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-wait.82
-rw-r--r--sys/contrib/openzfs/man/man8/zpool.82
-rw-r--r--sys/contrib/openzfs/man/man8/zstream.82
-rw-r--r--sys/contrib/openzfs/module/Kbuild.in3
-rw-r--r--sys/contrib/openzfs/module/avl/avl.c16
-rw-r--r--sys/contrib/openzfs/module/icp/algs/modes/gcm.c371
-rw-r--r--sys/contrib/openzfs/module/icp/algs/modes/modes.c2
-rw-r--r--sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c3
-rw-r--r--sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c3
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl253
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip1
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S1323
-rw-r--r--sys/contrib/openzfs/module/icp/core/kcf_sched.c2
-rw-r--r--sys/contrib/openzfs/module/icp/include/modes/modes.h13
-rw-r--r--sys/contrib/openzfs/module/icp/io/aes.c10
-rw-r--r--sys/contrib/openzfs/module/nvpair/nvpair.c6
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c4
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c8
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c7
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c41
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c6
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c8
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c4
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c3
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c11
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c117
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c24
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c226
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c8
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-generic.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c24
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-thread.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/abd_os.c6
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c12
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c6
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c8
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c24
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c3
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c33
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c132
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c58
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c55
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c63
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c89
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c4
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c231
-rw-r--r--sys/contrib/openzfs/module/zcommon/simd_stat.c4
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfs_deleg.c1
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfs_prop.c2
-rw-r--r--sys/contrib/openzfs/module/zcommon/zpool_prop.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/abd.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/arc.c69
-rw-r--r--sys/contrib/openzfs/module/zfs/bpobj.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/btree.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/dataset_kstats.c1
-rw-r--r--sys/contrib/openzfs/module/zfs/dbuf.c102
-rw-r--r--sys/contrib/openzfs/module/zfs/ddt.c16
-rw-r--r--sys/contrib/openzfs/module/zfs/ddt_log.c38
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu.c18
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_direct.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_object.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_objset.c10
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_recv.c12
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_redact.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_send.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_tx.c18
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode.c188
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode_sync.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_bookmark.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_crypt.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_dataset.c10
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_deadlist.c5
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_deleg.c20
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_destroy.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_dir.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_pool.c10
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_prop.c31
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_scan.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_userhold.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/fm.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/metaslab.c34
-rw-r--r--sys/contrib/openzfs/module/zfs/mmp.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/multilist.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/range_tree.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/rrwlock.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/sa.c24
-rw-r--r--sys/contrib/openzfs/module/zfs/spa.c66
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_config.c117
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_misc.c56
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_stats.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/space_map.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/space_reftree.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev.c178
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_draid.c44
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_file.c3
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_indirect.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_initialize.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_label.c16
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_queue.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_raidz.c353
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_rebuild.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_removal.c22
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_trim.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/zap.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/zap_micro.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/zcp.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zfeature.c9
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_crrd.c7
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_fuid.c44
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_ioctl.c142
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_log.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_quota.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_rlock.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_sa.c15
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_vnops.c27
-rw-r--r--sys/contrib/openzfs/module/zfs/zil.c780
-rw-r--r--sys/contrib/openzfs/module/zfs/zio.c148
-rw-r--r--sys/contrib/openzfs/module/zfs/zio_checksum.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zio_compress.c15
-rw-r--r--sys/contrib/openzfs/module/zfs/zio_inject.c40
-rw-r--r--sys/contrib/openzfs/module/zfs/zrlock.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zthr.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zvol.c419
-rw-r--r--sys/contrib/openzfs/module/zstd/zfs_zstd.c66
-rw-r--r--sys/contrib/openzfs/rpm/generic/zfs.spec.in7
-rwxr-xr-xsys/contrib/openzfs/scripts/mancheck.sh17
-rwxr-xr-xsys/contrib/openzfs/scripts/spdxcheck.pl5
-rwxr-xr-xsys/contrib/openzfs/scripts/zfs-helpers.sh7
-rwxr-xr-xsys/contrib/openzfs/scripts/zfs-tests.sh9
-rw-r--r--sys/contrib/openzfs/tests/runfiles/common.run53
-rw-r--r--sys/contrib/openzfs/tests/runfiles/linux.run6
-rw-r--r--sys/contrib/openzfs/tests/runfiles/sanity.run6
-rwxr-xr-xsys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in61
-rwxr-xr-xsys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore1
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c4
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c5
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/mmap_write_sync.c84
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg7
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib56
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg4
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am42
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_encrypted_raw.ksh75
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_tunables.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/cleanup.ksh43
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/setup.ksh50
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/zfs_send_test.ksh111
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/library.kshlib80
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_label_repair_001.ksh15
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_metaslab_leak.ksh70
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib42
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_create.ksh (renamed from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh)101
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh204
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_removal.ksh126
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/cleanup.ksh30
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/setup.ksh32
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib235
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh90
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh80
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_date_range_002.ksh76
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcstat_001_pos.ksh (renamed from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh)2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcsummary_001_pos.ksh (renamed from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh)10
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcsummary_002_neg.ksh (renamed from sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh)6
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/cleanup.ksh43
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/setup.ksh50
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/zfs_send_usertest.ksh145
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib6
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_degraded_sit_out.ksh106
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_sit_out.ksh102
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_sit_out_neg.ksh116
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/events/zed_synchronous_zedlet.ksh149
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode.kshlib149
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_fsync_continue.ksh36
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_fsync_wait.ksh36
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_msync_continue.ksh36
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_msync_wait.ksh36
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_osync_continue.ksh36
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_osync_wait.ksh37
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_syncalways_continue.ksh37
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_syncalways_wait.ksh37
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_001_pos.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_002_pos.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/fault_limits.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/mount/mount_loopback.ksh111
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh46
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/attach_resilver_sit_out.ksh189
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/replace_resilver_sit_out.ksh199
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh14
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_014_pos.ksh53
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh40
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh66
-rw-r--r--sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c3
-rw-r--r--sys/crypto/ccp/ccp.c2
-rw-r--r--sys/crypto/chacha20/chacha.c6
-rw-r--r--sys/crypto/openssl/amd64/ossl_aes_gcm_avx512.c232
-rw-r--r--sys/crypto/openssl/arm/ossl_aes_gcm_neon.c (renamed from sys/crypto/openssl/arm/ossl_aes_gcm.c)0
-rw-r--r--sys/crypto/openssl/ossl_aes_gcm.c (renamed from sys/crypto/openssl/amd64/ossl_aes_gcm.c)397
-rw-r--r--sys/crypto/openssl/ossl_ppc.c9
-rw-r--r--sys/crypto/openssl/ossl_sha256.c4
-rw-r--r--sys/crypto/openssl/ossl_x86.c6
-rw-r--r--sys/ddb/db_ps.c11
-rw-r--r--sys/dev/acpica/acpi.c479
-rw-r--r--sys/dev/acpica/acpi_apei.c2
-rw-r--r--sys/dev/acpica/acpi_lid.c4
-rw-r--r--sys/dev/acpica/acpi_timer.c11
-rw-r--r--sys/dev/acpica/acpivar.h16
-rw-r--r--sys/dev/ahci/ahci_pci.c47
-rw-r--r--sys/dev/aic7xxx/aic79xx.c4
-rw-r--r--sys/dev/aic7xxx/aic7xxx.c4
-rw-r--r--sys/dev/amdgpio/amdgpio.c139
-rw-r--r--sys/dev/amdgpio/amdgpio.h9
-rw-r--r--sys/dev/ath/ath_rate/sample/sample.c8
-rw-r--r--sys/dev/ath/if_ath.c3
-rw-r--r--sys/dev/ath/if_ath_tx.c24
-rw-r--r--sys/dev/ath/if_ath_tx_ht.c6
-rw-r--r--sys/dev/axgbe/if_axgbe_pci.c3
-rw-r--r--sys/dev/bce/if_bce.c2
-rw-r--r--sys/dev/bhnd/cores/chipc/chipc_gpio.c4
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_hwrm.c2
-rw-r--r--sys/dev/bnxt/bnxt_en/if_bnxt.c1
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_res.c4
-rw-r--r--sys/dev/bwi/if_bwi.c4
-rw-r--r--sys/dev/bwn/if_bwn.c2
-rw-r--r--sys/dev/cpuctl/cpuctl.c24
-rw-r--r--sys/dev/cxgbe/adapter.h91
-rw-r--r--sys/dev/cxgbe/common/common.h177
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c1967
-rw-r--r--sys/dev/cxgbe/common/t4_hw.h135
-rw-r--r--sys/dev/cxgbe/common/t4_msg.h3011
-rw-r--r--sys/dev/cxgbe/common/t4_regs.h27273
-rw-r--r--sys/dev/cxgbe/common/t4_regs_values.h24
-rw-r--r--sys/dev/cxgbe/common/t4_tcb.h182
-rw-r--r--sys/dev/cxgbe/crypto/t4_crypto.c54
-rw-r--r--sys/dev/cxgbe/crypto/t4_crypto.h1
-rw-r--r--sys/dev/cxgbe/crypto/t4_keyctx.c30
-rw-r--r--sys/dev/cxgbe/crypto/t6_kern_tls.c2
-rw-r--r--sys/dev/cxgbe/crypto/t7_kern_tls.c2196
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_flash_utils.c90
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib.c11
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib_common.h7
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c96
-rw-r--r--sys/dev/cxgbe/firmware/t4fw_interface.h1320
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg.txt644
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt530
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt644
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/device.c20
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h5
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/mem.c103
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/qp.c2
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/resource.c38
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/t4.h1
-rw-r--r--sys/dev/cxgbe/offload.h12
-rw-r--r--sys/dev/cxgbe/t4_filter.c476
-rw-r--r--sys/dev/cxgbe/t4_ioctl.h17
-rw-r--r--sys/dev/cxgbe/t4_iov.c67
-rw-r--r--sys/dev/cxgbe/t4_l2t.c14
-rw-r--r--sys/dev/cxgbe/t4_l2t.h2
-rw-r--r--sys/dev/cxgbe/t4_main.c1577
-rw-r--r--sys/dev/cxgbe/t4_mp_ring.c81
-rw-r--r--sys/dev/cxgbe/t4_mp_ring.h1
-rw-r--r--sys/dev/cxgbe/t4_netmap.c23
-rw-r--r--sys/dev/cxgbe/t4_sched.c6
-rw-r--r--sys/dev/cxgbe/t4_sge.c209
-rw-r--r--sys/dev/cxgbe/t4_tpt.c193
-rw-r--r--sys/dev/cxgbe/t4_tracer.c5
-rw-r--r--sys/dev/cxgbe/t4_vf.c65
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c43
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c151
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c22
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c7
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.c357
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.h1
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c143
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h22
-rw-r--r--sys/dev/cxgbe/tom/t4_tom_l2t.c2
-rw-r--r--sys/dev/cyapa/cyapa.c95
-rw-r--r--sys/dev/e1000/e1000_phy.c5
-rw-r--r--sys/dev/e1000/if_em.c398
-rw-r--r--sys/dev/e1000/if_em.h19
-rw-r--r--sys/dev/enetc/if_enetc.c9
-rw-r--r--sys/dev/fdt/fdt_common.c2
-rw-r--r--sys/dev/fdt/fdt_common.h7
-rw-r--r--sys/dev/fdt/fdt_slicer.c6
-rw-r--r--sys/dev/ftgpio/ftgpio.c3
-rw-r--r--sys/dev/gpio/acpi_gpiobus.c168
-rw-r--r--sys/dev/gpio/acpi_gpiobusvar.h6
-rw-r--r--sys/dev/gpio/bytgpio.c3
-rw-r--r--sys/dev/gpio/chvgpio.c3
-rw-r--r--sys/dev/gpio/dwgpio/dwgpio.c3
-rw-r--r--sys/dev/gpio/gpio_if.m26
-rw-r--r--sys/dev/gpio/gpioaei.c204
-rw-r--r--sys/dev/gpio/gpiobus.c138
-rw-r--r--sys/dev/gpio/gpiobus_if.m30
-rw-r--r--sys/dev/gpio/gpiobus_internal.h3
-rw-r--r--sys/dev/gpio/gpiobusvar.h1
-rw-r--r--sys/dev/gpio/gpioc.c188
-rw-r--r--sys/dev/gpio/gpioled.c106
-rw-r--r--sys/dev/gpio/ofw_gpiobus.c17
-rw-r--r--sys/dev/gpio/pl061.c6
-rw-r--r--sys/dev/gpio/qoriq_gpio.c3
-rw-r--r--sys/dev/hid/hidbus.c41
-rw-r--r--sys/dev/hid/hidquirk.h1
-rw-r--r--sys/dev/hid/hidraw.c12
-rw-r--r--sys/dev/hid/hkbd.c19
-rw-r--r--sys/dev/hid/ietp.c86
-rw-r--r--sys/dev/hid/u2f.c603
-rw-r--r--sys/dev/hpt27xx/hptintf.h6
-rw-r--r--sys/dev/hptmv/entry.c33
-rw-r--r--sys/dev/hptmv/gui_lib.c12
-rw-r--r--sys/dev/hptmv/hptproc.c2
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c21
-rw-r--r--sys/dev/hwt/hwt_ioctl.c9
-rw-r--r--sys/dev/ice/ice_bitops.h4
-rw-r--r--sys/dev/ice/ice_common.c9
-rw-r--r--sys/dev/ice/ice_devids.h18
-rw-r--r--sys/dev/ice/ice_drv_info.h39
-rw-r--r--sys/dev/ice/ice_fw_logging.c2
-rw-r--r--sys/dev/ice/ice_lan_tx_rx.h2
-rw-r--r--sys/dev/ice/ice_lib.c6
-rw-r--r--sys/dev/ice/ice_lib.h2
-rw-r--r--sys/dev/ice/ice_protocol_type.h2
-rw-r--r--sys/dev/ichsmb/ichsmb_pci.c3
-rw-r--r--sys/dev/ichwd/i6300esbwd.c245
-rw-r--r--sys/dev/ichwd/i6300esbwd.h46
-rw-r--r--sys/dev/ichwd/ichwd.c2
-rw-r--r--sys/dev/ichwd/ichwd.h3
-rw-r--r--sys/dev/igc/if_igc.c4
-rw-r--r--sys/dev/iicbus/gpio/pcf8574.c7
-rw-r--r--sys/dev/iicbus/gpio/tca64xx.c7
-rw-r--r--sys/dev/iicbus/iicbb.c7
-rw-r--r--sys/dev/iicbus/iichid.c23
-rw-r--r--sys/dev/iommu/busdma_iommu.c54
-rw-r--r--sys/dev/iommu/iommu.h2
-rw-r--r--sys/dev/iommu/iommu_gas.c2
-rw-r--r--sys/dev/ipw/if_ipw.c3
-rw-r--r--sys/dev/irdma/irdma_cm.c2
-rw-r--r--sys/dev/irdma/irdma_utils.c4
-rw-r--r--sys/dev/isci/scil/intel_sata.h2
-rw-r--r--sys/dev/iwi/if_iwi.c4
-rw-r--r--sys/dev/iwm/if_iwm.c7
-rw-r--r--sys/dev/iwn/if_iwn.c12
-rw-r--r--sys/dev/iwx/if_iwx.c197
-rw-r--r--sys/dev/iwx/if_iwxreg.h4
-rw-r--r--sys/dev/ixgbe/if_ix.c788
-rw-r--r--sys/dev/ixgbe/if_ixv.c6
-rw-r--r--sys/dev/ixgbe/ixgbe.h63
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c16
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c25
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.c5533
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.h224
-rw-r--r--sys/dev/ixgbe/ixgbe_features.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.c26
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h31
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h69
-rw-r--r--sys/dev/ixgbe/ixgbe_type_e610.h2278
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c3
-rw-r--r--sys/dev/ixl/if_ixl.c56
-rw-r--r--sys/dev/ixl/ixl.h1
-rw-r--r--sys/dev/ixl/ixl_pf_main.c110
-rw-r--r--sys/dev/malo/if_malo.c4
-rw-r--r--sys/dev/mii/mv88e151x.c8
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c19
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c2
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c4
-rw-r--r--sys/dev/mpi3mr/mpi3mr.c9
-rw-r--r--sys/dev/mpi3mr/mpi3mr_cam.c5
-rw-r--r--sys/dev/mpr/mpr.c10
-rw-r--r--sys/dev/mpr/mpr_mapping.c18
-rw-r--r--sys/dev/mpr/mprvar.h1
-rw-r--r--sys/dev/mps/mps_sas.c4
-rw-r--r--sys/dev/mpt/mpt_raid.c4
-rw-r--r--sys/dev/mwl/if_mwl.c8
-rw-r--r--sys/dev/nctgpio/nctgpio.c3
-rw-r--r--sys/dev/netmap/if_ptnet.c6
-rw-r--r--sys/dev/nfe/if_nfe.c4
-rw-r--r--sys/dev/null/null.c48
-rw-r--r--sys/dev/nvme/nvme.c5
-rw-r--r--sys/dev/nvme/nvme.h8
-rw-r--r--sys/dev/nvme/nvme_ahci.c1
-rw-r--r--sys/dev/nvme/nvme_ctrlr.c117
-rw-r--r--sys/dev/nvme/nvme_ctrlr_cmd.c3
-rw-r--r--sys/dev/nvme/nvme_ns.c3
-rw-r--r--sys/dev/nvme/nvme_pci.c1
-rw-r--r--sys/dev/nvme/nvme_private.h9
-rw-r--r--sys/dev/nvme/nvme_qpair.c3
-rw-r--r--sys/dev/nvme/nvme_sim.c5
-rw-r--r--sys/dev/nvme/nvme_sysctl.c1
-rw-r--r--sys/dev/nvme/nvme_util.c23
-rw-r--r--sys/dev/nvmf/nvmf_tcp.c2
-rw-r--r--sys/dev/ocs_fc/ocs_mgmt.c14
-rw-r--r--sys/dev/otus/if_otus.c11
-rw-r--r--sys/dev/p2sb/lewisburg_gpio.c3
-rw-r--r--sys/dev/pci/pci.c10
-rw-r--r--sys/dev/pci/pci_user.c121
-rw-r--r--sys/dev/ppc/ppc.c2
-rw-r--r--sys/dev/psci/smccc_trng.c2
-rw-r--r--sys/dev/puc/pucdata.c43
-rw-r--r--sys/dev/qat/include/common/adf_accel_devices.h4
-rw-r--r--sys/dev/qat/qat_api/include/icp_sal_versions.h2
-rw-r--r--sys/dev/qat/qat_common/adf_gen4_timer.c2
-rw-r--r--sys/dev/qat/qat_common/qat_uclo.c1
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c58
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h6
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c15
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c15
-rw-r--r--sys/dev/qcom_rnd/qcom_rnd.c2
-rw-r--r--sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c3
-rw-r--r--sys/dev/qlnx/qlnxe/ecore_dev.c6
-rw-r--r--sys/dev/qlnx/qlnxe/ecore_mcp.c2
-rw-r--r--sys/dev/qlnx/qlnxe/qlnx_def.h16
-rw-r--r--sys/dev/qlnx/qlnxe/qlnx_os.c25
-rw-r--r--sys/dev/ral/rt2560.c4
-rw-r--r--sys/dev/ral/rt2661.c4
-rw-r--r--sys/dev/ral/rt2860.c3
-rw-r--r--sys/dev/random/armv8rng.c2
-rw-r--r--sys/dev/random/darn.c2
-rw-r--r--sys/dev/random/fenestrasX/fx_pool.c3
-rw-r--r--sys/dev/random/ivy.c2
-rw-r--r--sys/dev/random/nehemiah.c2
-rw-r--r--sys/dev/random/random_harvestq.c158
-rw-r--r--sys/dev/random/randomdev.h7
-rw-r--r--sys/dev/rccgpio/rccgpio.c3
-rw-r--r--sys/dev/re/if_re.c5
-rw-r--r--sys/dev/rtwn/if_rtwn.c8
-rw-r--r--sys/dev/rtwn/if_rtwn_tx.c8
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_tx.c11
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_tx.c16
-rw-r--r--sys/dev/smartpqi/smartpqi_event.c6
-rw-r--r--sys/dev/smartpqi/smartpqi_queue.c4
-rw-r--r--sys/dev/sound/pci/hda/hdaa.c53
-rw-r--r--sys/dev/sound/pci/hda/hdaa_patches.c6
-rw-r--r--sys/dev/sound/pci/hda/hdac.c11
-rw-r--r--sys/dev/sound/pci/hda/hdac.h2
-rw-r--r--sys/dev/sound/pcm/channel.h2
-rw-r--r--sys/dev/sound/pcm/dsp.c123
-rw-r--r--sys/dev/sym/sym_hipd.c12
-rw-r--r--sys/dev/thunderbolt/hcm.c223
-rw-r--r--sys/dev/thunderbolt/hcm_var.h47
-rw-r--r--sys/dev/thunderbolt/nhi.c1170
-rw-r--r--sys/dev/thunderbolt/nhi_pci.c529
-rw-r--r--sys/dev/thunderbolt/nhi_reg.h332
-rw-r--r--sys/dev/thunderbolt/nhi_var.h277
-rw-r--r--sys/dev/thunderbolt/nhi_wmi.c198
-rw-r--r--sys/dev/thunderbolt/router.c939
-rw-r--r--sys/dev/thunderbolt/router_var.h242
-rw-r--r--sys/dev/thunderbolt/tb_acpi_pcib.c181
-rw-r--r--sys/dev/thunderbolt/tb_debug.c334
-rw-r--r--sys/dev/thunderbolt/tb_debug.h93
-rw-r--r--sys/dev/thunderbolt/tb_dev.c331
-rw-r--r--sys/dev/thunderbolt/tb_dev.h41
-rw-r--r--sys/dev/thunderbolt/tb_if.m (renamed from sys/dev/virtio/mmio/virtio_mmio_if.m)112
-rw-r--r--sys/dev/thunderbolt/tb_ioctl.h52
-rw-r--r--sys/dev/thunderbolt/tb_pcib.c614
-rw-r--r--sys/dev/thunderbolt/tb_pcib.h93
-rw-r--r--sys/dev/thunderbolt/tb_reg.h52
-rw-r--r--sys/dev/thunderbolt/tb_var.h54
-rw-r--r--sys/dev/thunderbolt/tbcfg_reg.h363
-rw-r--r--sys/dev/tpm/tpm20.c13
-rw-r--r--sys/dev/tpm/tpm_tis_core.c7
-rw-r--r--sys/dev/tws/tws.c13
-rw-r--r--sys/dev/tws/tws_services.c2
-rw-r--r--sys/dev/uart/uart_bus_pci.c2
-rw-r--r--sys/dev/uart/uart_cpu_acpi.c20
-rw-r--r--sys/dev/uart/uart_cpu_acpi.h17
-rw-r--r--sys/dev/uart/uart_dev_ns8250.c42
-rw-r--r--sys/dev/uart/uart_dev_pl011.c16
-rw-r--r--sys/dev/ufshci/ufshci.h173
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c431
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr_cmd.c26
-rw-r--r--sys/dev/ufshci/ufshci_dev.c354
-rw-r--r--sys/dev/ufshci/ufshci_pci.c6
-rw-r--r--sys/dev/ufshci/ufshci_private.h90
-rw-r--r--sys/dev/ufshci/ufshci_reg.h2
-rw-r--r--sys/dev/ufshci/ufshci_req_queue.c401
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c210
-rw-r--r--sys/dev/ufshci/ufshci_sim.c1
-rw-r--r--sys/dev/ufshci/ufshci_sysctl.c20
-rw-r--r--sys/dev/ufshci/ufshci_uic_cmd.c19
-rw-r--r--sys/dev/usb/controller/xhci.c85
-rw-r--r--sys/dev/usb/controller/xhci_pci.c2
-rw-r--r--sys/dev/usb/controller/xhcireg.h5
-rw-r--r--sys/dev/usb/input/uhid.c6
-rw-r--r--sys/dev/usb/input/usbhid.c8
-rw-r--r--sys/dev/usb/misc/cp2112.c5
-rw-r--r--sys/dev/usb/net/if_ipheth.c218
-rw-r--r--sys/dev/usb/net/if_iphethvar.h21
-rw-r--r--sys/dev/usb/net/if_umb.c6
-rw-r--r--sys/dev/usb/serial/udbc.c404
-rw-r--r--sys/dev/usb/usb_device.c48
-rw-r--r--sys/dev/usb/usb_generic.c37
-rw-r--r--sys/dev/usb/usb_hub.c3
-rw-r--r--sys/dev/usb/usbdi.h3
-rw-r--r--sys/dev/usb/wlan/if_mtw.c5
-rw-r--r--sys/dev/usb/wlan/if_rsu.c66
-rw-r--r--sys/dev/usb/wlan/if_rsureg.h9
-rw-r--r--sys/dev/usb/wlan/if_run.c14
-rw-r--r--sys/dev/usb/wlan/if_uath.c4
-rw-r--r--sys/dev/usb/wlan/if_upgt.c5
-rw-r--r--sys/dev/usb/wlan/if_ural.c6
-rw-r--r--sys/dev/usb/wlan/if_urtw.c6
-rw-r--r--sys/dev/usb/wlan/if_zyd.c4
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.c48
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.h1
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio_fdt.c47
-rw-r--r--sys/dev/virtio/network/if_vtnet.c498
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h2
-rw-r--r--sys/dev/virtio/random/virtio_random.c2
-rw-r--r--sys/dev/virtio/virtio_bus_if.m4
-rw-r--r--sys/dev/virtio/virtqueue.c2
-rw-r--r--sys/dev/vmgenc/vmgenc_acpi.c8
-rw-r--r--sys/dev/vmm/vmm_dev.c1
-rw-r--r--sys/dev/vmm/vmm_mem.c31
-rw-r--r--sys/dev/vmm/vmm_mem.h6
-rw-r--r--sys/dev/vmware/vmxnet3/if_vmx.c7
-rw-r--r--sys/dev/vt/vt_core.c8
-rw-r--r--sys/dev/watchdog/watchdog.c214
-rw-r--r--sys/dev/xdma/xdma.c2
-rw-r--r--sys/dev/xen/bus/xen_intr.c6
-rw-r--r--sys/dev/xen/control/control.c7
-rw-r--r--sys/fs/cd9660/cd9660_lookup.c2
-rw-r--r--sys/fs/cd9660/cd9660_vfsops.c8
-rw-r--r--sys/fs/cd9660/cd9660_vnops.c14
-rw-r--r--sys/fs/devfs/devfs_dir.c2
-rw-r--r--sys/fs/devfs/devfs_vnops.c6
-rw-r--r--sys/fs/ext2fs/ext2_lookup.c4
-rw-r--r--sys/fs/ext2fs/ext2_vfsops.c4
-rw-r--r--sys/fs/ext2fs/ext2_vnops.c20
-rw-r--r--sys/fs/fdescfs/fdesc_vnops.c8
-rw-r--r--sys/fs/fuse/fuse_ipc.c4
-rw-r--r--sys/fs/fuse/fuse_vfsops.c4
-rw-r--r--sys/fs/fuse/fuse_vnops.c18
-rw-r--r--sys/fs/msdosfs/bootsect.h2
-rw-r--r--sys/fs/msdosfs/msdosfs_lookup.c4
-rw-r--r--sys/fs/msdosfs/msdosfs_vfsops.c2
-rw-r--r--sys/fs/nfs/nfs_commonkrpc.c1
-rw-r--r--sys/fs/nfs/nfs_commonport.c2
-rw-r--r--sys/fs/nfs/nfs_commonsubs.c45
-rw-r--r--sys/fs/nfs/nfs_var.h17
-rw-r--r--sys/fs/nfs/nfsport.h21
-rw-r--r--sys/fs/nfs/nfsproto.h13
-rw-r--r--sys/fs/nfsclient/nfs_clcomsubs.c2
-rw-r--r--sys/fs/nfsclient/nfs_clport.c26
-rw-r--r--sys/fs/nfsclient/nfs_clrpcops.c245
-rw-r--r--sys/fs/nfsclient/nfs_clstate.c2
-rw-r--r--sys/fs/nfsclient/nfs_clvfsops.c10
-rw-r--r--sys/fs/nfsclient/nfs_clvnops.c172
-rw-r--r--sys/fs/nfsclient/nfsmount.h1
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c60
-rw-r--r--sys/fs/nfsserver/nfs_nfsdserv.c220
-rw-r--r--sys/fs/nfsserver/nfs_nfsdsocket.c14
-rw-r--r--sys/fs/nfsserver/nfs_nfsdstate.c8
-rw-r--r--sys/fs/nullfs/null.h23
-rw-r--r--sys/fs/nullfs/null_subr.c102
-rw-r--r--sys/fs/nullfs/null_vfsops.c20
-rw-r--r--sys/fs/nullfs/null_vnops.c195
-rw-r--r--sys/fs/p9fs/p9_transport.c3
-rw-r--r--sys/fs/p9fs/p9fs_vfsops.c10
-rw-r--r--sys/fs/p9fs/p9fs_vnops.c8
-rw-r--r--sys/fs/procfs/procfs.c68
-rw-r--r--sys/fs/procfs/procfs_status.c8
-rw-r--r--sys/fs/pseudofs/pseudofs.c69
-rw-r--r--sys/fs/pseudofs/pseudofs.h19
-rw-r--r--sys/fs/pseudofs/pseudofs_vncache.c2
-rw-r--r--sys/fs/pseudofs/pseudofs_vnops.c2
-rw-r--r--sys/fs/smbfs/smbfs_vnops.c6
-rw-r--r--sys/fs/tarfs/tarfs_vfsops.c6
-rw-r--r--sys/fs/tarfs/tarfs_vnops.c15
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c2
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c23
-rw-r--r--sys/fs/udf/osta.c4
-rw-r--r--sys/fs/udf/udf_vfsops.c4
-rw-r--r--sys/fs/udf/udf_vnops.c2
-rw-r--r--sys/fs/unionfs/union_subr.c108
-rw-r--r--sys/fs/unionfs/union_vfsops.c6
-rw-r--r--sys/fs/unionfs/union_vnops.c316
-rw-r--r--sys/geom/cache/g_cache.c4
-rw-r--r--sys/geom/concat/g_concat.c6
-rw-r--r--sys/geom/eli/g_eli.c2
-rw-r--r--sys/geom/gate/g_gate.c2
-rw-r--r--sys/geom/geom.h5
-rw-r--r--sys/geom/geom_dev.c2
-rw-r--r--sys/geom/geom_event.c9
-rw-r--r--sys/geom/geom_slice.c2
-rw-r--r--sys/geom/geom_subr.c33
-rw-r--r--sys/geom/journal/g_journal.c2
-rw-r--r--sys/geom/label/g_label.c2
-rw-r--r--sys/geom/linux_lvm/g_linux_lvm.c4
-rw-r--r--sys/geom/mirror/g_mirror.c4
-rw-r--r--sys/geom/mirror/g_mirror_ctl.c2
-rw-r--r--sys/geom/mountver/g_mountver.c2
-rw-r--r--sys/geom/multipath/g_multipath.c7
-rw-r--r--sys/geom/nop/g_nop.c2
-rw-r--r--sys/geom/part/g_part.c26
-rw-r--r--sys/geom/raid/g_raid.c6
-rw-r--r--sys/geom/raid3/g_raid3.c4
-rw-r--r--sys/geom/raid3/g_raid3_ctl.c2
-rw-r--r--sys/geom/shsec/g_shsec.c4
-rw-r--r--sys/geom/stripe/g_stripe.c10
-rw-r--r--sys/geom/union/g_union.c4
-rw-r--r--sys/geom/virstor/g_virstor.c4
-rw-r--r--sys/geom/zero/g_zero.c2
-rw-r--r--sys/i386/acpica/acpi_wakeup.c4
-rw-r--r--sys/i386/conf/GENERIC2
-rw-r--r--sys/i386/i386/in_cksum_machdep.c2
-rw-r--r--sys/i386/i386/machdep.c2
-rw-r--r--sys/i386/i386/pmap.c2
-rw-r--r--sys/i386/include/cpufunc.h2
-rw-r--r--sys/isa/isa_common.c2
-rw-r--r--sys/isa/isareg.h2
-rw-r--r--sys/kern/imgact_elf.c94
-rw-r--r--sys/kern/init_main.c9
-rw-r--r--sys/kern/init_sysent.c8
-rw-r--r--sys/kern/kern_boottrace.c2
-rw-r--r--sys/kern/kern_descrip.c37
-rw-r--r--sys/kern/kern_devctl.c2
-rw-r--r--sys/kern/kern_environment.c32
-rw-r--r--sys/kern/kern_event.c102
-rw-r--r--sys/kern/kern_exec.c4
-rw-r--r--sys/kern/kern_exit.c48
-rw-r--r--sys/kern/kern_fork.c10
-rw-r--r--sys/kern/kern_jail.c417
-rw-r--r--sys/kern/kern_jaildesc.c412
-rw-r--r--sys/kern/kern_jailmeta.c8
-rw-r--r--sys/kern/kern_linker.c2
-rw-r--r--sys/kern/kern_lock.c6
-rw-r--r--sys/kern/kern_malloc.c15
-rw-r--r--sys/kern/kern_mutex.c33
-rw-r--r--sys/kern/kern_proc.c16
-rw-r--r--sys/kern/kern_prot.c172
-rw-r--r--sys/kern/kern_racct.c4
-rw-r--r--sys/kern/kern_rangelock.c2
-rw-r--r--sys/kern/kern_rctl.c4
-rw-r--r--sys/kern/kern_rmlock.c4
-rw-r--r--sys/kern/kern_rwlock.c4
-rw-r--r--sys/kern/kern_sharedpage.c3
-rw-r--r--sys/kern/kern_sig.c6
-rw-r--r--sys/kern/kern_sx.c15
-rw-r--r--sys/kern/kern_thr.c11
-rw-r--r--sys/kern/kern_thread.c6
-rw-r--r--sys/kern/kern_time.c4
-rw-r--r--sys/kern/kern_tslog.c10
-rw-r--r--sys/kern/link_elf.c6
-rw-r--r--sys/kern/link_elf_obj.c8
-rw-r--r--sys/kern/subr_asan.c1
-rw-r--r--sys/kern/subr_bus.c13
-rw-r--r--sys/kern/subr_devstat.c2
-rw-r--r--sys/kern/subr_msan.c1
-rw-r--r--sys/kern/subr_param.c13
-rw-r--r--sys/kern/subr_pcpu.c2
-rw-r--r--sys/kern/subr_power.c130
-rw-r--r--sys/kern/subr_prf.c2
-rw-r--r--sys/kern/subr_witness.c144
-rw-r--r--sys/kern/sys_generic.c36
-rw-r--r--sys/kern/sys_pipe.c27
-rw-r--r--sys/kern/sys_procdesc.c2
-rw-r--r--sys/kern/sys_socket.c2
-rw-r--r--sys/kern/sys_timerfd.c1
-rw-r--r--sys/kern/syscalls.c8
-rw-r--r--sys/kern/syscalls.master26
-rw-r--r--sys/kern/systrace_args.c148
-rw-r--r--sys/kern/uipc_mqueue.c6
-rw-r--r--sys/kern/uipc_shm.c3
-rw-r--r--sys/kern/uipc_usrreq.c64
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/kern/vfs_cache.c214
-rw-r--r--sys/kern/vfs_default.c6
-rw-r--r--sys/kern/vfs_init.c37
-rw-r--r--sys/kern/vfs_inotify.c1
-rw-r--r--sys/kern/vfs_lookup.c14
-rw-r--r--sys/kern/vfs_mount.c7
-rw-r--r--sys/kern/vfs_mountroot.c2
-rw-r--r--sys/kern/vfs_subr.c75
-rw-r--r--sys/kern/vfs_syscalls.c23
-rw-r--r--sys/kern/vfs_vnops.c188
-rw-r--r--sys/libkern/arc4random.c4
-rw-r--r--sys/libkern/arm64/crc32c_armv8.S8
-rw-r--r--sys/libkern/qsort.c14
-rw-r--r--sys/libkern/x86/crc32_sse42.c4
-rw-r--r--sys/modules/Makefile8
-rw-r--r--sys/modules/aic7xxx/ahc/Makefile2
-rw-r--r--sys/modules/cxgb/Makefile3
-rw-r--r--sys/modules/cxgbe/Makefile4
-rw-r--r--sys/modules/cxgbe/if_cxgbe/Makefile2
-rw-r--r--sys/modules/cxgbe/t7_firmware/Makefile23
-rw-r--r--sys/modules/dpdk_lpm4/Makefile3
-rw-r--r--sys/modules/dpdk_lpm6/Makefile3
-rw-r--r--sys/modules/dtb/rockchip/Makefile3
-rw-r--r--sys/modules/dtrace/dtraceall/dtraceall.c6
-rw-r--r--sys/modules/e6000sw/Makefile2
-rw-r--r--sys/modules/etherswitch/Makefile2
-rw-r--r--sys/modules/evdev/Makefile2
-rw-r--r--sys/modules/fib_dxr/Makefile3
-rw-r--r--sys/modules/gpio/gpioaei/Makefile2
-rw-r--r--sys/modules/gve/Makefile2
-rw-r--r--sys/modules/hid/Makefile1
-rw-r--r--sys/modules/hid/u2f/Makefile8
-rw-r--r--sys/modules/ichwd/Makefile2
-rw-r--r--sys/modules/if_enc/Makefile2
-rw-r--r--sys/modules/if_gif/Makefile2
-rw-r--r--sys/modules/if_gre/Makefile1
-rw-r--r--sys/modules/if_infiniband/Makefile3
-rw-r--r--sys/modules/if_vlan/Makefile2
-rw-r--r--sys/modules/irdma/Makefile6
-rw-r--r--sys/modules/iser/Makefile2
-rw-r--r--sys/modules/ix/Makefile2
-rw-r--r--sys/modules/ixv/Makefile2
-rw-r--r--sys/modules/ktest/Makefile6
-rw-r--r--sys/modules/ktest/ktest/Makefile6
-rw-r--r--sys/modules/ktest/ktest_example/Makefile5
-rw-r--r--sys/modules/ktest/ktest_netlink_message_writer/Makefile2
-rw-r--r--sys/modules/ktest/ktest_tcphpts/Makefile13
-rw-r--r--sys/modules/linux64/Makefile1
-rw-r--r--sys/modules/md/Makefile2
-rw-r--r--sys/modules/miiproxy/Makefile2
-rw-r--r--sys/modules/mlx5/Makefile2
-rw-r--r--sys/modules/mlx5en/Makefile2
-rw-r--r--sys/modules/netgraph/Makefile2
-rw-r--r--sys/modules/netgraph/checksum/Makefile3
-rw-r--r--sys/modules/netgraph/ksocket/Makefile2
-rw-r--r--sys/modules/netmap/Makefile3
-rw-r--r--sys/modules/nvmf/nvmf/Makefile3
-rw-r--r--sys/modules/opensolaris/Makefile2
-rw-r--r--sys/modules/ossl/Makefile8
-rw-r--r--sys/modules/ow/Makefile3
-rw-r--r--sys/modules/qatfw/qat_4xxx/Makefile5
-rw-r--r--sys/modules/qlnx/Makefile3
-rw-r--r--sys/modules/qlnx/qlnxev/Makefile1
-rw-r--r--sys/modules/rtw88/Makefile1
-rw-r--r--sys/modules/rtw89/Makefile1
-rw-r--r--sys/modules/rtwn/Makefile4
-rw-r--r--sys/modules/rtwn_pci/Makefile4
-rw-r--r--sys/modules/rtwn_usb/Makefile4
-rw-r--r--sys/modules/sound/driver/Makefile3
-rw-r--r--sys/modules/sound/driver/hda/Makefile4
-rw-r--r--sys/modules/sound/sound/Makefile1
-rw-r--r--sys/modules/tests/fib_lookup/Makefile3
-rw-r--r--sys/modules/thunderbolt/Makefile13
-rw-r--r--sys/modules/uinput/Makefile2
-rw-r--r--sys/modules/usb/Makefile7
-rw-r--r--sys/modules/usb/udbc/Makefile9
-rw-r--r--sys/modules/usb/usie/Makefile2
-rw-r--r--sys/modules/usb/wmt/Makefile2
-rw-r--r--sys/modules/vnic/Makefile3
-rw-r--r--sys/modules/vnic/mrmlbus/Makefile3
-rw-r--r--sys/modules/vnic/thunder_bgx/Makefile3
-rw-r--r--sys/modules/vnic/thunder_mdio/Makefile3
-rw-r--r--sys/modules/vnic/vnicpf/Makefile3
-rw-r--r--sys/modules/vnic/vnicvf/Makefile3
-rw-r--r--sys/modules/zfs/Makefile7
-rw-r--r--sys/modules/zfs/zfs_config.h27
-rw-r--r--sys/modules/zfs/zfs_gitrev.h2
-rw-r--r--sys/net/if.c64
-rw-r--r--sys/net/if_bridge.c84
-rw-r--r--sys/net/if_bridgevar.h2
-rw-r--r--sys/net/if_clone.h2
-rw-r--r--sys/net/if_epair.c62
-rw-r--r--sys/net/if_ethersubr.c2
-rw-r--r--sys/net/if_ovpn.c6
-rw-r--r--sys/net/if_pfsync.h11
-rw-r--r--sys/net/if_tap.h2
-rw-r--r--sys/net/if_tun.h2
-rw-r--r--sys/net/if_tuntap.c88
-rw-r--r--sys/net/if_var.h1
-rw-r--r--sys/net/iflib.c339
-rw-r--r--sys/net/iflib.h2
-rw-r--r--sys/net/pfvar.h163
-rw-r--r--sys/net/route.c2
-rw-r--r--sys/net/route/route_tables.c2
-rw-r--r--sys/net/rtsock.c2
-rw-r--r--sys/net80211/ieee80211.c28
-rw-r--r--sys/net80211/ieee80211_crypto.c87
-rw-r--r--sys/net80211/ieee80211_ddb.c2
-rw-r--r--sys/net80211/ieee80211_freebsd.h33
-rw-r--r--sys/net80211/ieee80211_ht.c20
-rw-r--r--sys/net80211/ieee80211_hwmp.c2
-rw-r--r--sys/net80211/ieee80211_mesh.c2
-rw-r--r--sys/net80211/ieee80211_node.c4
-rw-r--r--sys/net80211/ieee80211_node.h34
-rw-r--r--sys/net80211/ieee80211_output.c17
-rw-r--r--sys/net80211/ieee80211_phy.c32
-rw-r--r--sys/net80211/ieee80211_phy.h8
-rw-r--r--sys/net80211/ieee80211_proto.c2
-rw-r--r--sys/net80211/ieee80211_sta.c2
-rw-r--r--sys/net80211/ieee80211_var.h5
-rw-r--r--sys/net80211/ieee80211_vht.c22
-rw-r--r--sys/net80211/ieee80211_vht.h4
-rw-r--r--sys/netgraph/bluetooth/include/ng_hci.h2
-rw-r--r--sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c4
-rw-r--r--sys/netgraph/netflow/netflow.c6
-rw-r--r--sys/netgraph/ng_device.c106
-rw-r--r--sys/netgraph/ng_nat.c95
-rw-r--r--sys/netgraph/ng_parse.c4
-rw-r--r--sys/netgraph/ng_tty.c6
-rw-r--r--sys/netinet/cc/cc.c4
-rw-r--r--sys/netinet/icmp6.h3
-rw-r--r--sys/netinet/icmp_var.h1
-rw-r--r--sys/netinet/in.c15
-rw-r--r--sys/netinet/in_fib_algo.c2
-rw-r--r--sys/netinet/in_mcast.c113
-rw-r--r--sys/netinet/ip_carp.c27
-rw-r--r--sys/netinet/ip_icmp.c5
-rw-r--r--sys/netinet/ip_output.c13
-rw-r--r--sys/netinet/ip_var.h1
-rw-r--r--sys/netinet/sctp_lock_bsd.h6
-rw-r--r--sys/netinet/siftr.c2
-rw-r--r--sys/netinet/tcp_hostcache.c6
-rw-r--r--sys/netinet/tcp_hpts.c935
-rw-r--r--sys/netinet/tcp_hpts.h50
-rw-r--r--sys/netinet/tcp_hpts_internal.h184
-rw-r--r--sys/netinet/tcp_hpts_test.c1682
-rw-r--r--sys/netinet/tcp_input.c558
-rw-r--r--sys/netinet/tcp_log_buf.c3
-rw-r--r--sys/netinet/tcp_lro.c20
-rw-r--r--sys/netinet/tcp_lro_hpts.c3
-rw-r--r--sys/netinet/tcp_output.c2
-rw-r--r--sys/netinet/tcp_sack.c21
-rw-r--r--sys/netinet/tcp_stacks/bbr.c159
-rw-r--r--sys/netinet/tcp_stacks/rack.c280
-rw-r--r--sys/netinet/tcp_stacks/rack_bbr_common.c12
-rw-r--r--sys/netinet/tcp_stacks/rack_bbr_common.h4
-rw-r--r--sys/netinet/tcp_stacks/rack_pcm.c2
-rw-r--r--sys/netinet/tcp_stacks/tailq_hash.c2
-rw-r--r--sys/netinet/tcp_subr.c58
-rw-r--r--sys/netinet/tcp_syncache.c186
-rw-r--r--sys/netinet/tcp_syncache.h6
-rw-r--r--sys/netinet/tcp_timewait.c2
-rw-r--r--sys/netinet/tcp_var.h4
-rw-r--r--sys/netinet/udp_usrreq.c53
-rw-r--r--sys/netinet6/icmp6.c38
-rw-r--r--sys/netinet6/in6.c35
-rw-r--r--sys/netinet6/in6.h7
-rw-r--r--sys/netinet6/in6_fib_algo.c2
-rw-r--r--sys/netinet6/in6_ifattach.c284
-rw-r--r--sys/netinet6/in6_ifattach.h3
-rw-r--r--sys/netinet6/in6_proto.c10
-rw-r--r--sys/netinet6/in6_src.c54
-rw-r--r--sys/netinet6/in6_var.h2
-rw-r--r--sys/netinet6/ip6_input.c54
-rw-r--r--sys/netinet6/ip6_output.c107
-rw-r--r--sys/netinet6/ip6_var.h14
-rw-r--r--sys/netinet6/nd6.c5
-rw-r--r--sys/netinet6/nd6.h5
-rw-r--r--sys/netinet6/nd6_nbr.c158
-rw-r--r--sys/netinet6/nd6_rtr.c121
-rw-r--r--sys/netinet6/udp6_usrreq.c2
-rw-r--r--sys/netipsec/xform_ipcomp.c4
-rw-r--r--sys/netlink/netlink_io.c13
-rw-r--r--sys/netlink/netlink_linux.h2
-rw-r--r--sys/netlink/netlink_message_writer.h6
-rw-r--r--sys/netlink/netlink_snl.h17
-rw-r--r--sys/netlink/route/iface.c1
-rw-r--r--sys/netlink/route/iface_drivers.c47
-rw-r--r--sys/netlink/route/route_var.h1
-rw-r--r--sys/netpfil/ipfw/ip_dn_io.c6
-rw-r--r--sys/netpfil/ipfw/ip_dummynet.c4
-rw-r--r--sys/netpfil/ipfw/ip_fw2.c16
-rw-r--r--sys/netpfil/ipfw/ip_fw_nat.c16
-rw-r--r--sys/netpfil/pf/if_pfsync.c247
-rw-r--r--sys/netpfil/pf/pf.c988
-rw-r--r--sys/netpfil/pf/pf.h9
-rw-r--r--sys/netpfil/pf/pf_if.c6
-rw-r--r--sys/netpfil/pf/pf_ioctl.c422
-rw-r--r--sys/netpfil/pf/pf_lb.c155
-rw-r--r--sys/netpfil/pf/pf_nl.c16
-rw-r--r--sys/netpfil/pf/pf_nl.h5
-rw-r--r--sys/netpfil/pf/pf_norm.c24
-rw-r--r--sys/netpfil/pf/pf_nv.c7
-rw-r--r--sys/netpfil/pf/pf_osfp.c2
-rw-r--r--sys/netpfil/pf/pf_ruleset.c10
-rw-r--r--sys/netpfil/pf/pf_syncookies.c8
-rw-r--r--sys/nfs/nfs_diskless.c2
-rw-r--r--sys/powerpc/aim/mmu_oea64.c4
-rw-r--r--sys/powerpc/conf/GENERIC642
-rw-r--r--sys/powerpc/conf/GENERIC64LE2
-rw-r--r--sys/powerpc/cpufreq/pmcr.c3
-rw-r--r--sys/powerpc/include/atomic.h33
-rw-r--r--sys/powerpc/mpc85xx/mpc85xx_gpio.c3
-rw-r--r--sys/powerpc/powerpc/busdma_machdep.c1
-rw-r--r--sys/riscv/include/atomic.h3
-rw-r--r--sys/riscv/include/ieeefp.h5
-rw-r--r--sys/riscv/include/vmm.h25
-rw-r--r--sys/riscv/riscv/busdma_bounce.c1
-rw-r--r--sys/riscv/sifive/sifive_gpio.c3
-rw-r--r--sys/riscv/starfive/jh7110_gpio.c3
-rw-r--r--sys/riscv/starfive/jh7110_pcie.c12
-rw-r--r--sys/riscv/vmm/riscv.h23
-rw-r--r--sys/riscv/vmm/vmm.c41
-rw-r--r--sys/rpc/auth.h4
-rw-r--r--sys/rpc/authunix_prot.c93
-rw-r--r--sys/rpc/svc_auth_unix.c94
-rw-r--r--sys/security/audit/audit.c2
-rw-r--r--sys/security/audit/audit_syscalls.c47
-rw-r--r--sys/security/mac/mac_framework.c4
-rw-r--r--sys/security/mac_bsdextended/mac_bsdextended.c4
-rw-r--r--sys/security/mac_do/mac_do.c56
-rw-r--r--sys/sys/_atomic_subword.h28
-rw-r--r--sys/sys/bus.h38
-rw-r--r--sys/sys/conf.h1
-rw-r--r--sys/sys/cpu.h22
-rw-r--r--sys/sys/efi.h67
-rw-r--r--sys/sys/event.h15
-rw-r--r--sys/sys/eventhandler.h3
-rw-r--r--sys/sys/exterrvar.h2
-rw-r--r--sys/sys/file.h11
-rw-r--r--sys/sys/imgact_elf.h8
-rw-r--r--sys/sys/jail.h23
-rw-r--r--sys/sys/jaildesc.h87
-rw-r--r--sys/sys/kernel.h27
-rw-r--r--sys/sys/mount.h1
-rw-r--r--sys/sys/mutex.h30
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/pciio.h5
-rw-r--r--sys/sys/power.h55
-rw-r--r--sys/sys/proc.h2
-rw-r--r--sys/sys/random.h3
-rw-r--r--sys/sys/rmlock.h2
-rw-r--r--sys/sys/rwlock.h2
-rw-r--r--sys/sys/sockbuf.h2
-rw-r--r--sys/sys/socket.h1
-rw-r--r--sys/sys/sockopt.h6
-rw-r--r--sys/sys/sx.h2
-rw-r--r--sys/sys/syscall.h10
-rw-r--r--sys/sys/syscall.mk10
-rw-r--r--sys/sys/sysent.h3
-rw-r--r--sys/sys/sysproto.h46
-rw-r--r--sys/sys/tree.h57
-rw-r--r--sys/sys/ttycom.h4
-rw-r--r--sys/sys/types.h2
-rw-r--r--sys/sys/ucred.h14
-rw-r--r--sys/sys/unistd.h9
-rw-r--r--sys/sys/user.h7
-rw-r--r--sys/sys/vnode.h36
-rw-r--r--sys/sys/watchdog.h25
-rw-r--r--sys/tests/ktest.h10
-rw-r--r--sys/tools/amd64_ia32_vdso.sh2
-rw-r--r--sys/tools/amd64_vdso.sh2
-rw-r--r--sys/tools/gdb/README.txt21
-rw-r--r--sys/tools/gdb/acttrace.py48
-rw-r--r--sys/tools/gdb/freebsd.py75
-rw-r--r--sys/tools/gdb/pcpu.py77
-rw-r--r--sys/tools/gdb/selftest.py31
-rw-r--r--sys/tools/gdb/selftest.sh23
-rw-r--r--sys/tools/gdb/vnet.py100
-rw-r--r--sys/tools/kernel-gdb.py15
-rw-r--r--sys/tools/makeobjops.awk4
-rw-r--r--sys/tools/vnode_if.awk13
-rw-r--r--sys/ufs/ffs/ffs_inode.c4
-rw-r--r--sys/ufs/ffs/ffs_rawread.c2
-rw-r--r--sys/ufs/ffs/ffs_softdep.c2
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c2
-rw-r--r--sys/ufs/ffs/ffs_vnops.c12
-rw-r--r--sys/ufs/ufs/ufs_lookup.c4
-rw-r--r--sys/ufs/ufs/ufs_quota.c38
-rw-r--r--sys/ufs/ufs/ufs_vnops.c34
-rw-r--r--sys/vm/uma_core.c32
-rw-r--r--sys/vm/vm_domainset.c265
-rw-r--r--sys/vm/vm_domainset.h15
-rw-r--r--sys/vm/vm_extern.h2
-rw-r--r--sys/vm/vm_fault.c290
-rw-r--r--sys/vm/vm_glue.c2
-rw-r--r--sys/vm/vm_kern.c12
-rw-r--r--sys/vm/vm_meter.c2
-rw-r--r--sys/vm/vm_object.c12
-rw-r--r--sys/vm/vm_object.h2
-rw-r--r--sys/vm/vm_page.c21
-rw-r--r--sys/vm/vm_pageout.c4
-rw-r--r--sys/vm/vnode_pager.c3
-rw-r--r--sys/x86/acpica/acpi_apm.c25
-rw-r--r--sys/x86/include/apicreg.h2
-rw-r--r--sys/x86/include/mca.h25
-rw-r--r--sys/x86/include/ucode.h8
-rw-r--r--sys/x86/iommu/amd_intrmap.c14
-rw-r--r--sys/x86/iommu/intel_intrmap.c8
-rw-r--r--sys/x86/x86/busdma_bounce.c1
-rw-r--r--sys/x86/x86/identcpu.c4
-rw-r--r--sys/x86/x86/mca.c355
-rw-r--r--sys/x86/x86/tsc.c2
-rw-r--r--sys/x86/x86/ucode.c59
-rw-r--r--sys/x86/x86/ucode_subr.c10
-rw-r--r--sys/x86/xen/xen_apic.c2
1394 files changed, 89239 insertions, 15532 deletions
diff --git a/sys/amd64/acpica/acpi_wakeup.c b/sys/amd64/acpica/acpi_wakeup.c
index 99565fbb69ca..8cada2f4f911 100644
--- a/sys/amd64/acpica/acpi_wakeup.c
+++ b/sys/amd64/acpica/acpi_wakeup.c
@@ -74,7 +74,7 @@ extern int acpi_susp_bounce;
extern struct susppcb **susppcbs;
static cpuset_t suspcpus;
-static void acpi_stop_beep(void *);
+static void acpi_stop_beep(void *, enum power_stype);
static int acpi_wakeup_ap(struct acpi_softc *, int);
static void acpi_wakeup_cpus(struct acpi_softc *);
@@ -88,7 +88,7 @@ static void acpi_wakeup_cpus(struct acpi_softc *);
} while (0)
static void
-acpi_stop_beep(void *arg)
+acpi_stop_beep(void *arg, enum power_stype stype)
{
if (acpi_resume_beep != 0)
diff --git a/sys/amd64/amd64/elf_machdep.c b/sys/amd64/amd64/elf_machdep.c
index 6cc2d58bbbcc..933f1ac0051f 100644
--- a/sys/amd64/amd64/elf_machdep.c
+++ b/sys/amd64/amd64/elf_machdep.c
@@ -179,7 +179,7 @@ freebsd_brand_info_la57_img_compat(const struct image_params *imgp,
return (!prefer_uva_la48);
}
-static Elf64_Brandinfo freebsd_brand_info_la48 = {
+static const Elf64_Brandinfo freebsd_brand_info_la48 = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_X86_64,
.compat_3_brand = "FreeBSD",
@@ -190,7 +190,7 @@ static Elf64_Brandinfo freebsd_brand_info_la48 = {
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE,
};
-static Elf64_Brandinfo freebsd_brand_info_la57 = {
+static const Elf64_Brandinfo freebsd_brand_info_la57 = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_X86_64,
.compat_3_brand = "FreeBSD",
@@ -216,7 +216,7 @@ sysinit_register_elf64_brand_entries(void *arg __unused)
SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST,
sysinit_register_elf64_brand_entries, NULL);
-static Elf64_Brandinfo freebsd_brand_oinfo = {
+static const Elf64_Brandinfo freebsd_brand_oinfo = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_X86_64,
.compat_3_brand = "FreeBSD",
@@ -226,11 +226,10 @@ static Elf64_Brandinfo freebsd_brand_oinfo = {
.brand_note = &elf64_freebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-
-SYSINIT(oelf64, SI_SUB_EXEC, SI_ORDER_ANY,
+C_SYSINIT(oelf64, SI_SUB_EXEC, SI_ORDER_ANY,
(sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_oinfo);
-static Elf64_Brandinfo kfreebsd_brand_info = {
+static const Elf64_Brandinfo kfreebsd_brand_info = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_X86_64,
.compat_3_brand = "FreeBSD",
@@ -240,8 +239,7 @@ static Elf64_Brandinfo kfreebsd_brand_info = {
.brand_note = &elf64_kfreebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE_MANDATORY
};
-
-SYSINIT(kelf64, SI_SUB_EXEC, SI_ORDER_ANY,
+C_SYSINIT(kelf64, SI_SUB_EXEC, SI_ORDER_ANY,
(sysinit_cfunc_t)elf64_insert_brand_entry, &kfreebsd_brand_info);
void
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 9ff60439d1ec..2fce1a7e64b6 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -1822,6 +1822,39 @@ clear_pcb_flags(struct pcb *pcb, const u_int flags)
: "cc", "memory");
}
+extern const char wrmsr_early_safe_gp_handler[];
+static struct region_descriptor wrmsr_early_safe_orig_efi_idt;
+
+void
+wrmsr_early_safe_start(void)
+{
+ struct region_descriptor efi_idt;
+ struct gate_descriptor *gpf_descr;
+
+ sidt(&wrmsr_early_safe_orig_efi_idt);
+ efi_idt.rd_limit = 32 * sizeof(idt0[0]);
+ efi_idt.rd_base = (uintptr_t)idt0;
+ lidt(&efi_idt);
+
+ gpf_descr = &idt0[IDT_GP];
+ gpf_descr->gd_looffset = (uintptr_t)wrmsr_early_safe_gp_handler;
+ gpf_descr->gd_hioffset = (uintptr_t)wrmsr_early_safe_gp_handler >> 16;
+ gpf_descr->gd_selector = rcs();
+ gpf_descr->gd_type = SDT_SYSTGT;
+ gpf_descr->gd_p = 1;
+}
+
+void
+wrmsr_early_safe_end(void)
+{
+ struct gate_descriptor *gpf_descr;
+
+ lidt(&wrmsr_early_safe_orig_efi_idt);
+
+ gpf_descr = &idt0[IDT_GP];
+ memset(gpf_descr, 0, sizeof(*gpf_descr));
+}
+
#ifdef KDB
/*
diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
index 870cd255abb7..27694a95653c 100644
--- a/sys/amd64/amd64/support.S
+++ b/sys/amd64/amd64/support.S
@@ -1565,6 +1565,22 @@ msr_onfault:
POP_FRAME_POINTER
ret
+ENTRY(wrmsr_early_safe)
+ movl %edi,%ecx
+ movl %esi,%eax
+ sarq $32,%rsi
+ movl %esi,%edx
+ wrmsr
+ xorl %eax,%eax
+wrmsr_early_faulted:
+ ret
+
+ENTRY(wrmsr_early_safe_gp_handler)
+ addq $8,%rsp
+ movl $EFAULT,%eax
+ movq $wrmsr_early_faulted,(%rsp)
+ iretq
+
/*
* void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
* Invalidates address space addressed by ucr3, then returns to kcr3.
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index 385eb9667652..2e41ed26403a 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -26,7 +26,7 @@ makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support
options SCHED_ULE # ULE scheduler
options NUMA # Non-Uniform Memory Architecture support
options PREEMPTION # Enable kernel thread preemption
-options BLOAT_KERNEL_WITH_EXTERR
+options EXTERR_STRINGS
options VIMAGE # Subsystem virtualization, e.g. VNET
options INET # InterNETworking
options INET6 # IPv6 communications protocols
@@ -184,6 +184,9 @@ device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s
device nvme # base NVMe driver
device nvd # expose NVMe namespaces as disks, depends on nvme
+# Universal Flash Storage Host Controller Interface support
+device ufshci # UFS host controller
+
# Intel Volume Management Device (VMD) support
device vmd
@@ -284,9 +287,9 @@ device wlan # 802.11 support
options IEEE80211_DEBUG # enable debug msgs
options IEEE80211_SUPPORT_MESH # enable 802.11s draft support
device wlan_wep # 802.11 WEP support
+device wlan_tkip # 802.11 TKIP support
device wlan_ccmp # 802.11 CCMP support
device wlan_gcmp # 802.11 GCMP support
-device wlan_tkip # 802.11 TKIP support
device wlan_amrr # AMRR transmit rate control algorithm
device ath # Atheros CardBus/PCI NICs
device ath_hal # Atheros CardBus/PCI chip support
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index d180f5c76afb..9a4c82275a99 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -76,7 +76,7 @@ static __inline void
clflushopt(u_long addr)
{
- __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
+ __asm __volatile("clflushopt %0" : : "m" (*(char *)addr));
}
static __inline void
@@ -572,6 +572,15 @@ rss(void)
return (sel);
}
+static __inline u_short
+rcs(void)
+{
+ u_short sel;
+
+ __asm __volatile("movw %%cs,%0" : "=rm" (sel));
+ return (sel);
+}
+
static __inline void
load_ds(u_short sel)
{
diff --git a/sys/amd64/include/md_var.h b/sys/amd64/include/md_var.h
index b6ddc6eaaebe..b6d8c469cdf6 100644
--- a/sys/amd64/include/md_var.h
+++ b/sys/amd64/include/md_var.h
@@ -99,6 +99,10 @@ void get_fpcontext(struct thread *td, struct __mcontext *mcp,
int set_fpcontext(struct thread *td, struct __mcontext *mcp,
char *xfpustate, size_t xfpustate_len);
+void wrmsr_early_safe_start(void);
+void wrmsr_early_safe_end(void);
+int wrmsr_early_safe(u_int msr, uint64_t data);
+
#endif /* !_MACHINE_MD_VAR_H_ */
#endif /* __i386__ */
diff --git a/sys/amd64/include/param.h b/sys/amd64/include/param.h
index 5a9c3162e14c..0654bb9de790 100644
--- a/sys/amd64/include/param.h
+++ b/sys/amd64/include/param.h
@@ -150,6 +150,15 @@
(((va) >= kva_layout.dmap_low && (va) < kva_layout.dmap_high) || \
((va) >= kva_layout.km_low && (va) < kva_layout.km_high))
-#define SC_TABLESIZE 1024 /* Must be power of 2. */
+/*
+ * Must be power of 2.
+ *
+ * Perhaps should be autosized on boot based on found ncpus.
+ */
+#if MAXCPU > 256
+#define SC_TABLESIZE 2048
+#else
+#define SC_TABLESIZE 1024
+#endif
#endif /* !_AMD64_INCLUDE_PARAM_H_ */
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 0b3daed4f69e..66d8991d36e8 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -46,6 +46,7 @@ enum vm_suspend_how {
VM_SUSPEND_POWEROFF,
VM_SUSPEND_HALT,
VM_SUSPEND_TRIPLEFAULT,
+ VM_SUSPEND_DESTROY,
VM_SUSPEND_LAST
};
@@ -169,55 +170,63 @@ struct vm_eventinfo {
int *iptr; /* reqidle cookie */
};
-typedef int (*vmm_init_func_t)(int ipinum);
-typedef int (*vmm_cleanup_func_t)(void);
-typedef void (*vmm_suspend_func_t)(void);
-typedef void (*vmm_resume_func_t)(void);
-typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
-typedef int (*vmi_run_func_t)(void *vcpui, register_t rip,
- struct pmap *pmap, struct vm_eventinfo *info);
-typedef void (*vmi_cleanup_func_t)(void *vmi);
-typedef void * (*vmi_vcpu_init_func_t)(void *vmi, struct vcpu *vcpu,
- int vcpu_id);
-typedef void (*vmi_vcpu_cleanup_func_t)(void *vcpui);
-typedef int (*vmi_get_register_t)(void *vcpui, int num, uint64_t *retval);
-typedef int (*vmi_set_register_t)(void *vcpui, int num, uint64_t val);
-typedef int (*vmi_get_desc_t)(void *vcpui, int num, struct seg_desc *desc);
-typedef int (*vmi_set_desc_t)(void *vcpui, int num, struct seg_desc *desc);
-typedef int (*vmi_get_cap_t)(void *vcpui, int num, int *retval);
-typedef int (*vmi_set_cap_t)(void *vcpui, int num, int val);
-typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
-typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
-typedef struct vlapic * (*vmi_vlapic_init)(void *vcpui);
-typedef void (*vmi_vlapic_cleanup)(struct vlapic *vlapic);
-typedef int (*vmi_snapshot_vcpu_t)(void *vcpui, struct vm_snapshot_meta *meta);
-typedef int (*vmi_restore_tsc_t)(void *vcpui, uint64_t now);
+#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
+ typedef ret_type (*vmmops_##opname##_t) args; \
+ ret_type vmmops_##opname args
+
+DECLARE_VMMOPS_FUNC(int, modinit, (int ipinum));
+DECLARE_VMMOPS_FUNC(int, modcleanup, (void));
+DECLARE_VMMOPS_FUNC(void, modresume, (void));
+DECLARE_VMMOPS_FUNC(void, modsuspend, (void));
+DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap));
+DECLARE_VMMOPS_FUNC(int, run, (void *vcpui, register_t pc,
+ struct pmap *pmap, struct vm_eventinfo *info));
+DECLARE_VMMOPS_FUNC(void, cleanup, (void *vmi));
+DECLARE_VMMOPS_FUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
+ int vcpu_id));
+DECLARE_VMMOPS_FUNC(void, vcpu_cleanup, (void *vcpui));
+DECLARE_VMMOPS_FUNC(int, getreg, (void *vcpui, int num, uint64_t *retval));
+DECLARE_VMMOPS_FUNC(int, setreg, (void *vcpui, int num, uint64_t val));
+DECLARE_VMMOPS_FUNC(int, getdesc, (void *vcpui, int num,
+ struct seg_desc *desc));
+DECLARE_VMMOPS_FUNC(int, setdesc, (void *vcpui, int num,
+ struct seg_desc *desc));
+DECLARE_VMMOPS_FUNC(int, getcap, (void *vcpui, int num, int *retval));
+DECLARE_VMMOPS_FUNC(int, setcap, (void *vcpui, int num, int val));
+DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc,
+ (vm_offset_t min, vm_offset_t max));
+DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace));
+DECLARE_VMMOPS_FUNC(struct vlapic *, vlapic_init, (void *vcpui));
+DECLARE_VMMOPS_FUNC(void, vlapic_cleanup, (struct vlapic *vlapic));
+DECLARE_VMMOPS_FUNC(int, vcpu_snapshot, (void *vcpui,
+ struct vm_snapshot_meta *meta));
+DECLARE_VMMOPS_FUNC(int, restore_tsc, (void *vcpui, uint64_t now));
struct vmm_ops {
- vmm_init_func_t modinit; /* module wide initialization */
- vmm_cleanup_func_t modcleanup;
- vmm_resume_func_t modsuspend;
- vmm_resume_func_t modresume;
-
- vmi_init_func_t init; /* vm-specific initialization */
- vmi_run_func_t run;
- vmi_cleanup_func_t cleanup;
- vmi_vcpu_init_func_t vcpu_init;
- vmi_vcpu_cleanup_func_t vcpu_cleanup;
- vmi_get_register_t getreg;
- vmi_set_register_t setreg;
- vmi_get_desc_t getdesc;
- vmi_set_desc_t setdesc;
- vmi_get_cap_t getcap;
- vmi_set_cap_t setcap;
- vmi_vmspace_alloc vmspace_alloc;
- vmi_vmspace_free vmspace_free;
- vmi_vlapic_init vlapic_init;
- vmi_vlapic_cleanup vlapic_cleanup;
+ vmmops_modinit_t modinit; /* module wide initialization */
+ vmmops_modcleanup_t modcleanup;
+ vmmops_modresume_t modsuspend;
+ vmmops_modresume_t modresume;
+
+ vmmops_init_t init; /* vm-specific initialization */
+ vmmops_run_t run;
+ vmmops_cleanup_t cleanup;
+ vmmops_vcpu_init_t vcpu_init;
+ vmmops_vcpu_cleanup_t vcpu_cleanup;
+ vmmops_getreg_t getreg;
+ vmmops_setreg_t setreg;
+ vmmops_getdesc_t getdesc;
+ vmmops_setdesc_t setdesc;
+ vmmops_getcap_t getcap;
+ vmmops_setcap_t setcap;
+ vmmops_vmspace_alloc_t vmspace_alloc;
+ vmmops_vmspace_free_t vmspace_free;
+ vmmops_vlapic_init_t vlapic_init;
+ vmmops_vlapic_cleanup_t vlapic_cleanup;
/* checkpoint operations */
- vmi_snapshot_vcpu_t vcpu_snapshot;
- vmi_restore_tsc_t restore_tsc;
+ vmmops_vcpu_snapshot_t vcpu_snapshot;
+ vmmops_restore_tsc_t restore_tsc;
};
extern const struct vmm_ops vmm_ops_intel;
@@ -374,7 +383,6 @@ vcpu_should_yield(struct vcpu *vcpu)
void *vcpu_stats(struct vcpu *vcpu);
void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr);
-struct vmspace *vm_vmspace(struct vm *vm);
struct vm_mem *vm_mem(struct vm *vm);
struct vatpic *vm_atpic(struct vm *vm);
struct vatpit *vm_atpit(struct vm *vm);
diff --git a/sys/amd64/linux/linux_sysvec.c b/sys/amd64/linux/linux_sysvec.c
index c8579c5da4ad..890cf01c46a0 100644
--- a/sys/amd64/linux/linux_sysvec.c
+++ b/sys/amd64/linux/linux_sysvec.c
@@ -857,7 +857,7 @@ linux_vdso_reloc(char *mapping, Elf_Addr offset)
}
}
-static Elf_Brandnote linux64_brandnote = {
+static const Elf_Brandnote linux64_brandnote = {
.hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
.hdr.n_descsz = 16,
.hdr.n_type = 1,
@@ -866,7 +866,7 @@ static Elf_Brandnote linux64_brandnote = {
.trans_osrel = linux_trans_osrel
};
-static Elf64_Brandinfo linux_glibc2brand = {
+static const Elf64_Brandinfo linux_glibc2brand = {
.brand = ELFOSABI_LINUX,
.machine = EM_X86_64,
.compat_3_brand = "Linux",
@@ -877,7 +877,7 @@ static Elf64_Brandinfo linux_glibc2brand = {
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-static Elf64_Brandinfo linux_glibc2brandshort = {
+static const Elf64_Brandinfo linux_glibc2brandshort = {
.brand = ELFOSABI_LINUX,
.machine = EM_X86_64,
.compat_3_brand = "Linux",
@@ -888,7 +888,7 @@ static Elf64_Brandinfo linux_glibc2brandshort = {
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-static Elf64_Brandinfo linux_muslbrand = {
+static const Elf64_Brandinfo linux_muslbrand = {
.brand = ELFOSABI_LINUX,
.machine = EM_X86_64,
.compat_3_brand = "Linux",
@@ -900,7 +900,7 @@ static Elf64_Brandinfo linux_muslbrand = {
LINUX_BI_FUTEX_REQUEUE
};
-static Elf64_Brandinfo *linux_brandlist[] = {
+static const Elf64_Brandinfo *linux_brandlist[] = {
&linux_glibc2brand,
&linux_glibc2brandshort,
&linux_muslbrand,
@@ -910,7 +910,7 @@ static Elf64_Brandinfo *linux_brandlist[] = {
static int
linux64_elf_modevent(module_t mod, int type, void *data)
{
- Elf64_Brandinfo **brandinfo;
+ const Elf64_Brandinfo **brandinfo;
int error;
struct linux_ioctl_handler **lihp;
diff --git a/sys/amd64/linux32/linux32_sysvec.c b/sys/amd64/linux32/linux32_sysvec.c
index 8fac626f9053..735ebb151017 100644
--- a/sys/amd64/linux32/linux32_sysvec.c
+++ b/sys/amd64/linux32/linux32_sysvec.c
@@ -954,7 +954,7 @@ linux_vdso_reloc(char *mapping, Elf_Addr offset)
}
}
-static Elf_Brandnote linux32_brandnote = {
+static const Elf_Brandnote linux32_brandnote = {
.hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
.hdr.n_descsz = 16, /* XXX at least 16 */
.hdr.n_type = 1,
@@ -963,7 +963,7 @@ static Elf_Brandnote linux32_brandnote = {
.trans_osrel = linux_trans_osrel
};
-static Elf32_Brandinfo linux_brand = {
+static const Elf32_Brandinfo linux_brand = {
.brand = ELFOSABI_LINUX,
.machine = EM_386,
.compat_3_brand = "Linux",
@@ -974,7 +974,7 @@ static Elf32_Brandinfo linux_brand = {
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-static Elf32_Brandinfo linux_glibc2brand = {
+static const Elf32_Brandinfo linux_glibc2brand = {
.brand = ELFOSABI_LINUX,
.machine = EM_386,
.compat_3_brand = "Linux",
@@ -985,7 +985,7 @@ static Elf32_Brandinfo linux_glibc2brand = {
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-static Elf32_Brandinfo linux_muslbrand = {
+static const Elf32_Brandinfo linux_muslbrand = {
.brand = ELFOSABI_LINUX,
.machine = EM_386,
.compat_3_brand = "Linux",
@@ -997,7 +997,7 @@ static Elf32_Brandinfo linux_muslbrand = {
LINUX_BI_FUTEX_REQUEUE
};
-static Elf32_Brandinfo *linux_brandlist[] = {
+static const Elf32_Brandinfo *linux_brandlist[] = {
&linux_brand,
&linux_glibc2brand,
&linux_muslbrand,
@@ -1007,7 +1007,7 @@ static Elf32_Brandinfo *linux_brandlist[] = {
static int
linux_elf_modevent(module_t mod, int type, void *data)
{
- Elf32_Brandinfo **brandinfo;
+ const Elf32_Brandinfo **brandinfo;
int error;
struct linux_ioctl_handler **lihp;
diff --git a/sys/amd64/vmm/intel/vmx_support.S b/sys/amd64/vmm/intel/vmx_support.S
index 130130b64541..877e377f892d 100644
--- a/sys/amd64/vmm/intel/vmx_support.S
+++ b/sys/amd64/vmm/intel/vmx_support.S
@@ -171,13 +171,11 @@ do_launch:
*/
movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
movl $VMX_VMLAUNCH_ERROR, %eax
- jmp decode_inst_error
-
+ /* FALLTHROUGH */
decode_inst_error:
movl $VM_FAIL_VALID, %r11d
- jz inst_error
- movl $VM_FAIL_INVALID, %r11d
-inst_error:
+ movl $VM_FAIL_INVALID, %esi
+ cmovnzl %esi, %r11d
movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
/*
diff --git a/sys/amd64/vmm/io/vioapic.c b/sys/amd64/vmm/io/vioapic.c
index 8869dc1383e6..7df6193d6dc0 100644
--- a/sys/amd64/vmm/io/vioapic.c
+++ b/sys/amd64/vmm/io/vioapic.c
@@ -130,6 +130,15 @@ vioapic_send_intr(struct vioapic *vioapic, int pin)
vector = low & IOART_INTVEC;
dest = high >> APIC_ID_SHIFT;
+ /*
+ * Ideally we'd just call lapic_intr_msi() here with the
+ * constructed MSI instead of interpreting it for ourselves.
+ * But until/unless we support emulated IOMMUs with interrupt
+ * remapping, interpretation is simple. We just need to mask
+ * in the Extended Destination ID bits for the 15-bit
+ * enlightenment (http://david.woodhou.se/ExtDestId.pdf)
+ */
+ dest |= ((high & APIC_EXT_ID_MASK) >> APIC_EXT_ID_SHIFT) << 8;
vlapic_deliver_intr(vioapic->vm, level, dest, phys, delmode, vector);
}
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index c42da02d0bf6..2ac076551165 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -163,7 +163,6 @@ struct vm {
void *rendezvous_arg; /* (x) [r] rendezvous func/arg */
vm_rendezvous_func_t rendezvous_func;
struct mtx rendezvous_mtx; /* (o) rendezvous lock */
- struct vmspace *vmspace; /* (o) guest's address space */
struct vm_mem mem; /* (i) [m+v] guest memory */
char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */
struct vcpu **vcpu; /* (o) guest vcpus */
@@ -201,7 +200,7 @@ vmmops_panic(void)
}
#define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \
- DEFINE_IFUNC(static, ret_type, vmmops_##opname, args) \
+ DEFINE_IFUNC(, ret_type, vmmops_##opname, args) \
{ \
if (vmm_is_intel()) \
return (vmm_ops_intel.opname); \
@@ -499,7 +498,7 @@ MODULE_VERSION(vmm, 1);
static void
vm_init(struct vm *vm, bool create)
{
- vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
+ vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
vm->iommu = NULL;
vm->vioapic = vioapic_init(vm);
vm->vhpet = vhpet_init(vm);
@@ -584,7 +583,7 @@ int
vm_create(const char *name, struct vm **retvm)
{
struct vm *vm;
- struct vmspace *vmspace;
+ int error;
/*
* If vmm.ko could not be successfully initialized then don't attempt
@@ -597,14 +596,13 @@ vm_create(const char *name, struct vm **retvm)
VM_MAX_NAMELEN + 1)
return (EINVAL);
- vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48);
- if (vmspace == NULL)
- return (ENOMEM);
-
vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
+ error = vm_mem_init(&vm->mem, 0, VM_MAXUSER_ADDRESS_LA48);
+ if (error != 0) {
+ free(vm, M_VM);
+ return (error);
+ }
strcpy(vm->name, name);
- vm->vmspace = vmspace;
- vm_mem_init(&vm->mem);
mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
sx_init(&vm->vcpus_init_lock, "vm vcpus");
vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK |
@@ -685,9 +683,6 @@ vm_cleanup(struct vm *vm, bool destroy)
if (destroy) {
vm_mem_destroy(vm);
- vmmops_vmspace_free(vm->vmspace);
- vm->vmspace = NULL;
-
free(vm->vcpu, M_VM);
sx_destroy(&vm->vcpus_init_lock);
mtx_destroy(&vm->rendezvous_mtx);
@@ -731,7 +726,7 @@ vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
{
vm_object_t obj;
- if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
+ if ((obj = vmm_mmio_alloc(vm_vmspace(vm), gpa, len, hpa)) == NULL)
return (ENOMEM);
else
return (0);
@@ -741,19 +736,21 @@ int
vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
{
- vmm_mmio_free(vm->vmspace, gpa, len);
+ vmm_mmio_free(vm_vmspace(vm), gpa, len);
return (0);
}
static int
vm_iommu_map(struct vm *vm)
{
+ pmap_t pmap;
vm_paddr_t gpa, hpa;
struct vm_mem_map *mm;
int error, i;
sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED);
+ pmap = vmspace_pmap(vm_vmspace(vm));
for (i = 0; i < VM_MAX_MEMMAPS; i++) {
if (!vm_memseg_sysmem(vm, i))
continue;
@@ -767,7 +764,7 @@ vm_iommu_map(struct vm *vm)
mm->flags |= VM_MEMMAP_F_IOMMU;
for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
- hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
+ hpa = pmap_extract(pmap, gpa);
/*
* All mappings in the vmm vmspace must be
@@ -816,7 +813,7 @@ vm_iommu_unmap(struct vm *vm)
for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract(
- vmspace_pmap(vm->vmspace), gpa))),
+ vmspace_pmap(vm_vmspace(vm)), gpa))),
("vm_iommu_unmap: vm %p gpa %jx not wired",
vm, (uintmax_t)gpa));
iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
@@ -1249,7 +1246,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
("vm_handle_paging: invalid fault_type %d", ftype));
if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
- rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
+ rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm_vmspace(vm)),
vme->u.paging.gpa, ftype);
if (rv == 0) {
VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
@@ -1259,7 +1256,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
}
}
- map = &vm->vmspace->vm_map;
+ map = &vm_vmspace(vm)->vm_map;
rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
@@ -1560,7 +1557,7 @@ vm_run(struct vcpu *vcpu)
if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
return (EINVAL);
- pmap = vmspace_pmap(vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vm));
vme = &vcpu->exitinfo;
evinfo.rptr = &vm->rendezvous_req_cpus;
evinfo.sptr = &vm->suspend;
@@ -2302,12 +2299,6 @@ vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
vcpu_unlock(vcpu);
}
-struct vmspace *
-vm_vmspace(struct vm *vm)
-{
- return (vm->vmspace);
-}
-
struct vm_mem *
vm_mem(struct vm *vm)
{
@@ -2519,7 +2510,7 @@ vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
if (vcpu->vcpuid == 0) {
vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
- vmspace_resident_count(vcpu->vm->vmspace));
+ vmspace_resident_count(vm_vmspace(vcpu->vm)));
}
}
@@ -2529,7 +2520,7 @@ vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
if (vcpu->vcpuid == 0) {
vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
- pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
+ pmap_wired_count(vmspace_pmap(vm_vmspace(vcpu->vm))));
}
}
diff --git a/sys/amd64/vmm/vmm_dev_machdep.c b/sys/amd64/vmm/vmm_dev_machdep.c
index d8d2b460404c..dfebc9dcadbf 100644
--- a/sys/amd64/vmm/vmm_dev_machdep.c
+++ b/sys/amd64/vmm/vmm_dev_machdep.c
@@ -48,6 +48,7 @@
#include <x86/apicreg.h>
#include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_mem.h>
#include <dev/vmm/vmm_stat.h>
#include "vmm_lapic.h"
diff --git a/sys/amd64/vmm/vmm_lapic.c b/sys/amd64/vmm/vmm_lapic.c
index fd511733492e..0cae01f172ec 100644
--- a/sys/amd64/vmm/vmm_lapic.c
+++ b/sys/amd64/vmm/vmm_lapic.c
@@ -115,6 +115,11 @@ lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
* physical otherwise.
*/
dest = (addr >> 12) & 0xff;
+ /*
+ * Extended Destination ID support uses bits 5-11 of the address:
+ * http://david.woodhou.se/ExtDestId.pdf
+ */
+ dest |= ((addr >> 5) & 0x7f) << 8;
phys = ((addr & (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG)) !=
(MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG));
delmode = msg & APIC_DELMODE_MASK;
diff --git a/sys/amd64/vmm/x86.c b/sys/amd64/vmm/x86.c
index 366f1da9f850..2e2224595ab4 100644
--- a/sys/amd64/vmm/x86.c
+++ b/sys/amd64/vmm/x86.c
@@ -48,7 +48,12 @@ SYSCTL_DECL(_hw_vmm);
static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
NULL);
-#define CPUID_VM_HIGH 0x40000000
+#define CPUID_VM_SIGNATURE 0x40000000
+#define CPUID_BHYVE_FEATURES 0x40000001
+#define CPUID_VM_HIGH CPUID_BHYVE_FEATURES
+
+/* Features advertised in CPUID_BHYVE_FEATURES %eax */
+#define CPUID_BHYVE_FEAT_EXT_DEST_ID (1UL << 0) /* MSI Extended Dest ID */
static const char bhyve_id[12] = "bhyve bhyve ";
@@ -100,7 +105,7 @@ x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
if (cpu_exthigh != 0 && func >= 0x80000000) {
if (func > cpu_exthigh)
func = cpu_exthigh;
- } else if (func >= 0x40000000) {
+ } else if (func >= CPUID_VM_SIGNATURE) {
if (func > CPUID_VM_HIGH)
func = CPUID_VM_HIGH;
} else if (func > cpu_high) {
@@ -601,13 +606,20 @@ x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
regs[3] = 0;
break;
- case 0x40000000:
+ case CPUID_VM_SIGNATURE:
regs[0] = CPUID_VM_HIGH;
bcopy(bhyve_id, &regs[1], 4);
bcopy(bhyve_id + 4, &regs[2], 4);
bcopy(bhyve_id + 8, &regs[3], 4);
break;
+ case CPUID_BHYVE_FEATURES:
+ regs[0] = CPUID_BHYVE_FEAT_EXT_DEST_ID;
+ regs[1] = 0;
+ regs[2] = 0;
+ regs[3] = 0;
+ break;
+
default:
default_leaf:
/*
diff --git a/sys/arm/allwinner/aw_gpio.c b/sys/arm/allwinner/aw_gpio.c
index 2061e38a155f..c90d61f7b45e 100644
--- a/sys/arm/allwinner/aw_gpio.c
+++ b/sys/arm/allwinner/aw_gpio.c
@@ -1162,11 +1162,12 @@ aw_gpio_attach(device_t dev)
fdt_pinctrl_register(dev, "allwinner,pins");
fdt_pinctrl_configure_tree(dev);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL)
goto fail;
config_intrhook_oneshot(aw_gpio_enable_bank_supply, sc);
+ bus_attach_children(dev);
return (0);
@@ -1530,6 +1531,10 @@ static device_method_t aw_gpio_methods[] = {
DEVMETHOD(device_attach, aw_gpio_attach),
DEVMETHOD(device_detach, aw_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, aw_gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, aw_gpio_pic_enable_intr),
diff --git a/sys/arm/allwinner/aw_sid.c b/sys/arm/allwinner/aw_sid.c
index ba5faca33c5e..932c2f189e51 100644
--- a/sys/arm/allwinner/aw_sid.c
+++ b/sys/arm/allwinner/aw_sid.c
@@ -297,7 +297,7 @@ aw_sid_attach(device_t dev)
/* Register ourself so device can resolve who we are */
OF_device_register_xref(OF_xref_from_node(node), dev);
- for (i = 0; i < sc->sid_conf->nfuses ;i++) {\
+ for (i = 0; i < sc->sid_conf->nfuses; i++) {
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, sc->sid_conf->efuses[i].name,
diff --git a/sys/arm/allwinner/axp209.c b/sys/arm/allwinner/axp209.c
index 239ead02d0e0..ff999a0f9b9b 100644
--- a/sys/arm/allwinner/axp209.c
+++ b/sys/arm/allwinner/axp209.c
@@ -1322,7 +1322,7 @@ axp2xx_attach(device_t dev)
case AXP209:
sc->pins = axp209_pins;
sc->npins = nitems(axp209_pins);
- sc->gpiodev = gpiobus_attach_bus(dev);
+ sc->gpiodev = gpiobus_add_bus(dev);
sc->sensors = axp209_sensors;
sc->nsensors = nitems(axp209_sensors);
@@ -1333,7 +1333,7 @@ axp2xx_attach(device_t dev)
case AXP221:
sc->pins = axp221_pins;
sc->npins = nitems(axp221_pins);
- sc->gpiodev = gpiobus_attach_bus(dev);
+ sc->gpiodev = gpiobus_add_bus(dev);
sc->sensors = axp221_sensors;
sc->nsensors = nitems(axp221_sensors);
@@ -1374,6 +1374,7 @@ axp2xx_attach(device_t dev)
}
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm/allwinner/axp81x.c b/sys/arm/allwinner/axp81x.c
index fc1a168595e5..71f0c8156a0d 100644
--- a/sys/arm/allwinner/axp81x.c
+++ b/sys/arm/allwinner/axp81x.c
@@ -1609,7 +1609,8 @@ axp8xx_attach(device_t dev)
EVENTHANDLER_REGISTER(shutdown_final, axp8xx_shutdown, dev,
SHUTDOWN_PRI_LAST);
- sc->gpiodev = gpiobus_attach_bus(dev);
+ sc->gpiodev = gpiobus_add_bus(dev);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm/arm/generic_timer.c b/sys/arm/arm/generic_timer.c
index a8c779dcba6d..e3ba56a6f6ac 100644
--- a/sys/arm/arm/generic_timer.c
+++ b/sys/arm/arm/generic_timer.c
@@ -231,6 +231,25 @@ get_cntxct(bool physical)
return (val);
}
+#ifdef __aarch64__
+/*
+ * Read the self-syncronized counter. These cannot be read speculatively so
+ * don't need an isb before them.
+ */
+static uint64_t
+get_cntxctss(bool physical)
+{
+ uint64_t val;
+
+ if (physical)
+ val = READ_SPECIALREG(CNTPCTSS_EL0_REG);
+ else
+ val = READ_SPECIALREG(CNTVCTSS_EL0_REG);
+
+ return (val);
+}
+#endif
+
static int
set_ctrl(uint32_t val, bool physical)
{
@@ -631,6 +650,7 @@ arm_tmr_attach(device_t dev)
pcell_t clock;
#endif
#ifdef __aarch64__
+ uint64_t id_aa64mmfr0_el1;
int user_phys;
#endif
int error;
@@ -641,6 +661,11 @@ arm_tmr_attach(device_t dev)
return (ENXIO);
sc->get_cntxct = &get_cntxct;
+#ifdef __aarch64__
+ if (get_kernel_reg(ID_AA64MMFR0_EL1, &id_aa64mmfr0_el1) &&
+ ID_AA64MMFR0_ECV_VAL(id_aa64mmfr0_el1) >= ID_AA64MMFR0_ECV_IMPL)
+ sc->get_cntxct = &get_cntxctss;
+#endif
#ifdef FDT
/* Get the base clock frequency */
node = ofw_bus_get_node(dev);
@@ -882,32 +907,39 @@ DELAY(int usec)
TSEXIT();
}
-static bool
+static cpu_feat_en
wfxt_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t id_aa64isar2;
if (!get_kernel_reg(ID_AA64ISAR2_EL1, &id_aa64isar2))
- return (false);
- return (ID_AA64ISAR2_WFxT_VAL(id_aa64isar2) != ID_AA64ISAR2_WFxT_NONE);
+ return (FEAT_ALWAYS_DISABLE);
+ if (ID_AA64ISAR2_WFxT_VAL(id_aa64isar2) >= ID_AA64ISAR2_WFxT_IMPL)
+ return (FEAT_DEFAULT_ENABLE);
+
+ return (FEAT_ALWAYS_DISABLE);
}
-static void
+static bool
wfxt_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
u_int errata_count __unused)
{
/* will be called if wfxt_check returns true */
enable_wfxt = true;
+ return (true);
}
-static struct cpu_feat feat_wfxt = {
- .feat_name = "FEAT_WFXT",
- .feat_check = wfxt_check,
- .feat_enable = wfxt_enable,
- .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_SYSTEM,
-};
-DATA_SET(cpu_feat_set, feat_wfxt);
+static void
+wfxt_disabled(const struct cpu_feat *feat __unused)
+{
+ if (PCPU_GET(cpuid) == 0)
+ update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_WFxT_MASK, 0);
+}
+
+CPU_FEAT(feat_wfxt, "WFE and WFI instructions with timeout",
+ wfxt_check, NULL, wfxt_enable, wfxt_disabled,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_SYSTEM);
#endif
static uint32_t
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 78883296c5b7..6a0ece1e4d98 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -1246,7 +1246,7 @@ pmap_bootstrap(vm_offset_t firstaddr)
}
static void
-pmap_init_reserved_pages(void)
+pmap_init_reserved_pages(void *dummy __unused)
{
struct pcpu *pc;
vm_offset_t pages;
diff --git a/sys/arm/arm/pmu_fdt.c b/sys/arm/arm/pmu_fdt.c
index 3e733f3e1b18..dd6087652e38 100644
--- a/sys/arm/arm/pmu_fdt.c
+++ b/sys/arm/arm/pmu_fdt.c
@@ -152,7 +152,7 @@ pmu_parse_intr(device_t dev, struct pmu_softc *sc)
if (intr_is_per_cpu(sc->irq[0].res)) {
if (has_affinity) {
device_printf(dev,
- "Per CPU interupt have declared affinity\n");
+ "Per CPU interrupt have declared affinity\n");
err = ENXIO;
goto done;
}
@@ -179,7 +179,7 @@ pmu_parse_intr(device_t dev, struct pmu_softc *sc)
if (intr_is_per_cpu(sc->irq[i].res))
{
- device_printf(dev, "Unexpected per CPU interupt\n");
+ device_printf(dev, "Unexpected per CPU interrupt\n");
err = ENXIO;
goto done;
}
diff --git a/sys/arm/arm/unwind.c b/sys/arm/arm/unwind.c
index 7ad91a3e01a5..0d77074fae34 100644
--- a/sys/arm/arm/unwind.c
+++ b/sys/arm/arm/unwind.c
@@ -278,7 +278,7 @@ unwind_module_unloaded(struct linker_file *lf)
* the unwind tables might be stripped, so instead we have to use the
* _exidx_start/end symbols created by ldscript.arm.
*/
-static int
+static void
module_info_init(void *arg __unused)
{
struct linker_file thekernel;
@@ -291,8 +291,6 @@ module_info_init(void *arg __unused)
thekernel.exidx_addr = CADDR(&_exidx_start);
thekernel.exidx_size = UADDR(&_exidx_end) - UADDR(&_exidx_start);
populate_module_info(create_module_info(), &thekernel);
-
- return (0);
}
SYSINIT(unwind_init, SI_SUB_KMEM, SI_ORDER_ANY, module_info_init, NULL);
diff --git a/sys/arm/broadcom/bcm2835/bcm2835_gpio.c b/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
index 48d1d2af5abc..ff5c4043dd86 100644
--- a/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
+++ b/sys/arm/broadcom/bcm2835/bcm2835_gpio.c
@@ -840,10 +840,11 @@ bcm_gpio_attach(device_t dev)
fdt_pinctrl_register(dev, "brcm,pins");
fdt_pinctrl_configure_tree(dev);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL)
goto fail;
+ bus_attach_children(dev);
return (0);
fail:
@@ -1320,6 +1321,10 @@ static device_method_t bcm_gpio_methods[] = {
DEVMETHOD(device_attach, bcm_gpio_attach),
DEVMETHOD(device_detach, bcm_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, bcm_gpio_get_bus),
DEVMETHOD(gpio_pin_max, bcm_gpio_pin_max),
diff --git a/sys/arm/broadcom/bcm2835/raspberrypi_gpio.c b/sys/arm/broadcom/bcm2835/raspberrypi_gpio.c
index 5a0f5cf2b1b3..b286654c6f18 100644
--- a/sys/arm/broadcom/bcm2835/raspberrypi_gpio.c
+++ b/sys/arm/broadcom/bcm2835/raspberrypi_gpio.c
@@ -404,10 +404,11 @@ rpi_fw_gpio_attach(device_t dev)
}
}
free(names, M_OFWPROP);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL)
goto fail;
+ bus_attach_children(dev);
return (0);
fail:
diff --git a/sys/arm/conf/TEGRA124 b/sys/arm/conf/TEGRA124
index ad5532427eda..ff23e63f77bd 100644
--- a/sys/arm/conf/TEGRA124
+++ b/sys/arm/conf/TEGRA124
@@ -107,9 +107,9 @@ device ums # USB mouse
# Wireless NIC cards
#device wlan # 802.11 support
#device wlan_wep # 802.11 WEP support
+#device wlan_tkip # 802.11 TKIP support
#device wlan_ccmp # 802.11 CCMP support
#device wlan_gcmp # 802.11 GCMP support
-#device wlan_tkip # 802.11 TKIP support
#device wlan_amrr # AMRR transmit rate control algorithm
# PCI
diff --git a/sys/arm/freescale/imx/imx_gpio.c b/sys/arm/freescale/imx/imx_gpio.c
index 7610d28af90e..60b8d79ab27e 100644
--- a/sys/arm/freescale/imx/imx_gpio.c
+++ b/sys/arm/freescale/imx/imx_gpio.c
@@ -861,13 +861,14 @@ imx51_gpio_attach(device_t dev)
gpio_pic_register_isrcs(sc);
intr_pic_register(dev, OF_xref_from_node(ofw_bus_get_node(dev)));
#endif
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
imx51_gpio_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
@@ -917,6 +918,10 @@ static device_method_t imx51_gpio_methods[] = {
DEVMETHOD(device_detach, imx51_gpio_detach),
#ifdef INTRNG
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, gpio_pic_enable_intr),
diff --git a/sys/arm/freescale/vybrid/vf_gpio.c b/sys/arm/freescale/vybrid/vf_gpio.c
index c81524a8a27e..b4e1ba9af586 100644
--- a/sys/arm/freescale/vybrid/vf_gpio.c
+++ b/sys/arm/freescale/vybrid/vf_gpio.c
@@ -147,13 +147,14 @@ vf_gpio_attach(device_t dev)
"vf_gpio%d.%d", device_get_unit(dev), i);
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
bus_release_resources(dev, vf_gpio_spec, sc->res);
mtx_destroy(&sc->sc_mtx);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm/include/atomic.h b/sys/arm/include/atomic.h
index f3313b136656..f66953710615 100644
--- a/sys/arm/include/atomic.h
+++ b/sys/arm/include/atomic.h
@@ -1103,11 +1103,9 @@ atomic_thread_fence_seq_cst(void)
#define atomic_store_rel_int atomic_store_rel_32
#define atomic_swap_int atomic_swap_32
-/*
- * For:
- * - atomic_load_acq_8
- * - atomic_load_acq_16
- */
#include <sys/_atomic_subword.h>
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+
#endif /* _MACHINE_ATOMIC_H_ */
diff --git a/sys/arm/mv/a37x0_gpio.c b/sys/arm/mv/a37x0_gpio.c
index 86110ff87ab1..754663d2991e 100644
--- a/sys/arm/mv/a37x0_gpio.c
+++ b/sys/arm/mv/a37x0_gpio.c
@@ -291,10 +291,11 @@ a37x0_gpio_attach(device_t dev)
if (sc->sc_npins > sc->sc_max_pins)
return (ENXIO);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL)
return (ENXIO);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm/mv/gpio.c b/sys/arm/mv/gpio.c
index 934c00236153..b3c2314fb2d6 100644
--- a/sys/arm/mv/gpio.c
+++ b/sys/arm/mv/gpio.c
@@ -340,7 +340,7 @@ mv_gpio_attach(device_t dev)
if (rv != 0)
return (rv);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
mtx_destroy(&sc->mutex);
bus_release_resource(dev, SYS_RES_IRQ,
@@ -348,6 +348,7 @@ mv_gpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm/mv/mvebu_gpio.c b/sys/arm/mv/mvebu_gpio.c
index 7acdfff539dc..c27d5a204052 100644
--- a/sys/arm/mv/mvebu_gpio.c
+++ b/sys/arm/mv/mvebu_gpio.c
@@ -804,12 +804,13 @@ mvebu_gpio_attach(device_t dev)
}
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
mvebu_gpio_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
@@ -838,6 +839,10 @@ static device_method_t mvebu_gpio_methods[] = {
DEVMETHOD(device_attach, mvebu_gpio_attach),
DEVMETHOD(device_detach, mvebu_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, mvebu_gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, mvebu_gpio_pic_enable_intr),
diff --git a/sys/arm/nvidia/tegra_gpio.c b/sys/arm/nvidia/tegra_gpio.c
index e37fd69a121e..ce24fccd3a40 100644
--- a/sys/arm/nvidia/tegra_gpio.c
+++ b/sys/arm/nvidia/tegra_gpio.c
@@ -818,12 +818,13 @@ tegra_gpio_attach(device_t dev)
return (ENXIO);
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
tegra_gpio_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
@@ -852,6 +853,10 @@ static device_method_t tegra_gpio_methods[] = {
DEVMETHOD(device_attach, tegra_gpio_attach),
DEVMETHOD(device_detach, tegra_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, tegra_gpio_pic_disable_intr),
DEVMETHOD(pic_enable_intr, tegra_gpio_pic_enable_intr),
diff --git a/sys/arm/ti/ti_gpio.c b/sys/arm/ti/ti_gpio.c
index aceb3d63204e..b7e9909b8548 100644
--- a/sys/arm/ti/ti_gpio.c
+++ b/sys/arm/ti/ti_gpio.c
@@ -674,12 +674,13 @@ ti_gpio_attach(device_t dev)
}
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
ti_gpio_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
@@ -1047,6 +1048,10 @@ static device_method_t ti_gpio_methods[] = {
DEVMETHOD(device_attach, ti_gpio_attach),
DEVMETHOD(device_detach, ti_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, ti_gpio_get_bus),
DEVMETHOD(gpio_pin_max, ti_gpio_pin_max),
diff --git a/sys/arm/xilinx/zy7_gpio.c b/sys/arm/xilinx/zy7_gpio.c
index 71b6fc3c0586..2434e43bf27c 100644
--- a/sys/arm/xilinx/zy7_gpio.c
+++ b/sys/arm/xilinx/zy7_gpio.c
@@ -441,12 +441,13 @@ zy7_gpio_attach(device_t dev)
return (ENOMEM);
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
zy7_gpio_detach(dev);
return (ENOMEM);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm64/apple/apple_pinctrl.c b/sys/arm64/apple/apple_pinctrl.c
index ebaaccea1d99..c28b1c62d78c 100644
--- a/sys/arm64/apple/apple_pinctrl.c
+++ b/sys/arm64/apple/apple_pinctrl.c
@@ -171,12 +171,13 @@ apple_pinctrl_attach(device_t dev)
OF_xref_from_node(ofw_bus_get_node(dev)));
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
device_printf(dev, "failed to attach gpiobus\n");
goto error;
}
+ bus_attach_children(dev);
return (0);
error:
mtx_destroy(&sc->sc_mtx);
diff --git a/sys/arm64/arm64/cpu_errata.c b/sys/arm64/arm64/cpu_errata.c
index 989924bc0567..b876703a2a15 100644
--- a/sys/arm64/arm64/cpu_errata.c
+++ b/sys/arm64/arm64/cpu_errata.c
@@ -52,56 +52,11 @@ struct cpu_quirks {
u_int flags;
};
-static enum {
- SSBD_FORCE_ON,
- SSBD_FORCE_OFF,
- SSBD_KERNEL,
-} ssbd_method = SSBD_KERNEL;
-
-static cpu_quirk_install install_psci_bp_hardening;
-static cpu_quirk_install install_ssbd_workaround;
static cpu_quirk_install install_thunderx_bcast_tlbi_workaround;
static struct cpu_quirks cpu_quirks[] = {
{
.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
- .midr_value =
- CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0),
- .quirk_install = install_psci_bp_hardening,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = 0,
- .midr_value = 0,
- .quirk_install = install_ssbd_workaround,
- .flags = CPU_QUIRK_POST_DEVICE,
- },
- {
- .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
.midr_value =
CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0),
.quirk_install = install_thunderx_bcast_tlbi_workaround,
@@ -114,57 +69,6 @@ static struct cpu_quirks cpu_quirks[] = {
},
};
-static void
-install_psci_bp_hardening(void)
-{
- /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
- if (!psci_present)
- return;
-
- if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) != SMCCC_RET_SUCCESS)
- return;
-
- PCPU_SET(bp_harden, smccc_arch_workaround_1);
-}
-
-static void
-install_ssbd_workaround(void)
-{
- char *env;
-
- if (PCPU_GET(cpuid) == 0) {
- env = kern_getenv("kern.cfg.ssbd");
- if (env != NULL) {
- if (strcmp(env, "force-on") == 0) {
- ssbd_method = SSBD_FORCE_ON;
- } else if (strcmp(env, "force-off") == 0) {
- ssbd_method = SSBD_FORCE_OFF;
- }
- }
- }
-
- /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
- if (!psci_present)
- return;
-
- /* Enable the workaround on this CPU if it's enabled in the firmware */
- if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS)
- return;
-
- switch(ssbd_method) {
- case SSBD_FORCE_ON:
- smccc_arch_workaround_2(1);
- break;
- case SSBD_FORCE_OFF:
- smccc_arch_workaround_2(0);
- break;
- case SSBD_KERNEL:
- default:
- PCPU_SET(ssbd, smccc_arch_workaround_2);
- break;
- }
-}
-
/*
* Workaround Cavium erratum 27456.
*
diff --git a/sys/arm64/arm64/cpu_feat.c b/sys/arm64/arm64/cpu_feat.c
index cc262394913d..94114d47f846 100644
--- a/sys/arm64/arm64/cpu_feat.c
+++ b/sys/arm64/arm64/cpu_feat.c
@@ -32,16 +32,21 @@
#include <machine/cpu.h>
#include <machine/cpu_feat.h>
+SYSCTL_NODE(_hw, OID_AUTO, feat, CTLFLAG_RD, 0, "CPU features/errata");
+
/* TODO: Make this a list if we ever grow a callback other than smccc_errata */
static cpu_feat_errata_check_fn cpu_feat_check_cb = NULL;
void
enable_cpu_feat(uint32_t stage)
{
+ char tunable[32];
struct cpu_feat **featp, *feat;
uint32_t midr;
u_int errata_count, *errata_list;
cpu_feat_errata errata_status;
+ cpu_feat_en check_status;
+ bool val;
MPASS((stage & ~CPU_FEAT_STAGE_MASK) == 0);
@@ -49,6 +54,21 @@ enable_cpu_feat(uint32_t stage)
SET_FOREACH(featp, cpu_feat_set) {
feat = *featp;
+ /* Read any tunable the user may have set */
+ if (stage == CPU_FEAT_EARLY_BOOT && PCPU_GET(cpuid) == 0) {
+ snprintf(tunable, sizeof(tunable), "hw.feat.%s",
+ feat->feat_name);
+ if (TUNABLE_BOOL_FETCH(tunable, &val)) {
+ if (val) {
+ feat->feat_flags |=
+ CPU_FEAT_USER_ENABLED;
+ } else {
+ feat->feat_flags |=
+ CPU_FEAT_USER_DISABLED;
+ }
+ }
+ }
+
/* Run the enablement code at the correct stage of boot */
if ((feat->feat_flags & CPU_FEAT_STAGE_MASK) != stage)
continue;
@@ -58,8 +78,26 @@ enable_cpu_feat(uint32_t stage)
PCPU_GET(cpuid) != 0)
continue;
- if (feat->feat_check != NULL && !feat->feat_check(feat, midr))
- continue;
+ if (feat->feat_check != NULL) {
+ check_status = feat->feat_check(feat, midr);
+ } else {
+ check_status = FEAT_DEFAULT_ENABLE;
+ }
+ /* Ignore features that are not present */
+ if (check_status == FEAT_ALWAYS_DISABLE)
+ goto next;
+
+ /* The user disabled the feature */
+ if ((feat->feat_flags & CPU_FEAT_USER_DISABLED) != 0)
+ goto next;
+
+ /*
+ * The feature was disabled by default and the user
+ * didn't enable it then skip.
+ */
+ if (check_status == FEAT_DEFAULT_DISABLE &&
+ (feat->feat_flags & CPU_FEAT_USER_ENABLED) == 0)
+ goto next;
/*
* Check if the feature has any errata that may need a
@@ -97,8 +135,13 @@ enable_cpu_feat(uint32_t stage)
/* Shouldn't be possible */
MPASS(errata_status != ERRATA_UNKNOWN);
- feat->feat_enable(feat, errata_status, errata_list,
- errata_count);
+ if (feat->feat_enable(feat, errata_status, errata_list,
+ errata_count))
+ feat->feat_enabled = true;
+
+next:
+ if (!feat->feat_enabled && feat->feat_disabled != NULL)
+ feat->feat_disabled(feat);
}
}
diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c
index 0301eb91c9ef..bde0d4f784dc 100644
--- a/sys/arm64/arm64/efirt_machdep.c
+++ b/sys/arm64/arm64/efirt_machdep.c
@@ -106,7 +106,8 @@ efi_1t1_l3(vm_offset_t va)
if (*l0 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l0 = PHYS_TO_PTE(mphys) | L0_TABLE;
+ *l0 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE |
+ TATTR_AP_TABLE_NO_EL0 | L0_TABLE;
} else {
mphys = PTE_TO_PHYS(*l0);
}
@@ -117,7 +118,8 @@ efi_1t1_l3(vm_offset_t va)
if (*l1 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l1 = PHYS_TO_PTE(mphys) | L1_TABLE;
+ *l1 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE |
+ TATTR_AP_TABLE_NO_EL0 | L1_TABLE;
} else {
mphys = PTE_TO_PHYS(*l1);
}
@@ -128,7 +130,8 @@ efi_1t1_l3(vm_offset_t va)
if (*l2 == 0) {
m = efi_1t1_page();
mphys = VM_PAGE_TO_PHYS(m);
- *l2 = PHYS_TO_PTE(mphys) | L2_TABLE;
+ *l2 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE |
+ TATTR_AP_TABLE_NO_EL0 | L2_TABLE;
} else {
mphys = PTE_TO_PHYS(*l2);
}
@@ -218,8 +221,9 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
p->md_phys, mode, p->md_pages);
}
- l3_attr = ATTR_AF | pmap_sh_attr | ATTR_S1_IDX(mode) |
- ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
+ l3_attr = ATTR_S1_UXN | ATTR_AF | pmap_sh_attr |
+ ATTR_S1_IDX(mode) | ATTR_S1_AP(ATTR_S1_AP_RW) |
+ ATTR_S1_nG | L3_PAGE;
if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
l3_attr |= ATTR_S1_XN;
diff --git a/sys/arm64/arm64/elf32_machdep.c b/sys/arm64/arm64/elf32_machdep.c
index 7cd5327b9f1b..8f8a934ad520 100644
--- a/sys/arm64/arm64/elf32_machdep.c
+++ b/sys/arm64/arm64/elf32_machdep.c
@@ -195,7 +195,7 @@ freebsd32_fetch_syscall_args(struct thread *td)
register_t *ap;
struct syscall_args *sa;
int error, i, nap, narg;
- unsigned int args[4];
+ unsigned int args[6];
nap = 4;
p = td->td_proc;
@@ -225,7 +225,7 @@ freebsd32_fetch_syscall_args(struct thread *td)
sa->args[i] = ap[i];
if (narg > nap) {
if (narg - nap > nitems(args))
- panic("Too many system call arguiments");
+ panic("Too many system call arguments");
error = copyin((void *)td->td_frame->tf_x[13], args,
(narg - nap) * sizeof(int));
if (error != 0)
diff --git a/sys/arm64/arm64/elf_machdep.c b/sys/arm64/arm64/elf_machdep.c
index 13af5c5065d6..207b37180a26 100644
--- a/sys/arm64/arm64/elf_machdep.c
+++ b/sys/arm64/arm64/elf_machdep.c
@@ -121,7 +121,7 @@ static struct sysentvec elf64_freebsd_sysvec = {
};
INIT_SYSENTVEC(elf64_sysvec, &elf64_freebsd_sysvec);
-static Elf64_Brandinfo freebsd_brand_info = {
+static const Elf64_Brandinfo freebsd_brand_info = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_AARCH64,
.compat_3_brand = "FreeBSD",
@@ -131,8 +131,7 @@ static Elf64_Brandinfo freebsd_brand_info = {
.brand_note = &elf64_freebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-
-SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST,
+C_SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST,
(sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_info);
static bool
@@ -336,7 +335,7 @@ elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused)
return (0);
}
-static Elf_Note gnu_property_note = {
+static const Elf_Note gnu_property_note = {
.n_namesz = sizeof(GNU_ABI_VENDOR),
.n_descsz = 16,
.n_type = NT_GNU_PROPERTY_TYPE_0,
diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S
index 13095def8b00..5a4181348a54 100644
--- a/sys/arm64/arm64/exception.S
+++ b/sys/arm64/arm64/exception.S
@@ -42,10 +42,9 @@
*/
.macro save_registers_head el
.if \el == 1
- mov x18, sp
- stp x0, x1, [sp, #(TF_X - TF_SIZE - 128)]!
+ stp x0, x1, [sp, #-(TF_SIZE - TF_X + 128)]!
.else
- stp x0, x1, [sp, #(TF_X - TF_SIZE)]!
+ stp x0, x1, [sp, #-(TF_SIZE - TF_X)]!
.endif
stp x2, x3, [sp, #(2 * 8)]
stp x4, x5, [sp, #(4 * 8)]
@@ -61,7 +60,9 @@
stp x24, x25, [sp, #(24 * 8)]
stp x26, x27, [sp, #(26 * 8)]
stp x28, x29, [sp, #(28 * 8)]
-.if \el == 0
+.if \el == 1
+ add x18, sp, #(TF_SIZE - TF_X + 128)
+.else
mrs x18, sp_el0
.endif
mrs x10, elr_el1
diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c
index bcacea43ad2f..2d07420bcdb0 100644
--- a/sys/arm64/arm64/identcpu.c
+++ b/sys/arm64/arm64/identcpu.c
@@ -232,6 +232,10 @@ static const struct cpu_parts cpu_parts_arm[] = {
{ CPU_PART_CORTEX_X2, "Cortex-X2" },
{ CPU_PART_CORTEX_X3, "Cortex-X3" },
{ CPU_PART_CORTEX_X4, "Cortex-X4" },
+ { CPU_PART_C1_NANO, "C1-Nano" },
+ { CPU_PART_C1_PRO, "C1-Pro" },
+ { CPU_PART_C1_PREMIUM, "C1-Premium" },
+ { CPU_PART_C1_ULTRA, "C1-Ultra" },
{ CPU_PART_NEOVERSE_E1, "Neoverse-E1" },
{ CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
{ CPU_PART_NEOVERSE_N2, "Neoverse-N2" },
@@ -2272,37 +2276,25 @@ static const struct mrs_user_reg user_regs[] = {
static bool
user_ctr_has_neoverse_n1_1542419(uint32_t midr, uint64_t ctr)
{
- /* Skip non-Neoverse-N1 */
- if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM,
- CPU_PART_NEOVERSE_N1, 0, 0))
- return (false);
-
- switch (CPU_VAR(midr)) {
- default:
- break;
- case 4:
- /* Fixed in r4p1 */
- if (CPU_REV(midr) > 0)
- break;
- /* FALLTHROUGH */
- case 3:
- /* If DIC is enabled (coherent icache) then we are affected */
- return (CTR_DIC_VAL(ctr) != 0);
- }
-
- return (false);
+ /*
+ * Neoverse-N1 erratum 1542419
+ * Present in r3p0 - r4p0
+ * Fixed in r4p1
+ */
+ return (midr_check_var_part_range(midr, CPU_IMPL_ARM,
+ CPU_PART_NEOVERSE_N1, 3, 0, 4, 0) && CTR_DIC_VAL(ctr) != 0);
}
-static bool
-user_ctr_check(const struct cpu_feat *feat __unused, u_int midr __unused)
+static cpu_feat_en
+user_ctr_check(const struct cpu_feat *feat __unused, u_int midr)
{
if (emulate_ctr)
- return (true);
+ return (FEAT_DEFAULT_ENABLE);
if (user_ctr_has_neoverse_n1_1542419(midr, READ_SPECIALREG(ctr_el0)))
- return (true);
+ return (FEAT_DEFAULT_ENABLE);
- return (false);
+ return (FEAT_ALWAYS_DISABLE);
}
static bool
@@ -2320,7 +2312,7 @@ user_ctr_has_errata(const struct cpu_feat *feat __unused, u_int midr,
return (false);
}
-static void
+static bool
user_ctr_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status, u_int *errata_list, u_int errata_count)
{
@@ -2356,16 +2348,13 @@ user_ctr_enable(const struct cpu_feat *feat __unused,
WRITE_SPECIALREG(sctlr_el1,
READ_SPECIALREG(sctlr_el1) & ~SCTLR_UCT);
isb();
+
+ return (true);
}
-static struct cpu_feat user_ctr = {
- .feat_name = "Trap CTR_EL0",
- .feat_check = user_ctr_check,
- .feat_has_errata = user_ctr_has_errata,
- .feat_enable = user_ctr_enable,
- .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU,
-};
-DATA_SET(cpu_feat_set, user_ctr);
+CPU_FEAT(trap_ctr, "Trap CTR_EL0",
+ user_ctr_check, user_ctr_has_errata, user_ctr_enable, NULL,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
static bool
user_ctr_handler(uint64_t esr, struct trapframe *frame)
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index 4a10a2b4f2d3..d35e334905a7 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -39,6 +39,23 @@
#define VIRT_BITS 48
+/*
+ * Loads a 64-bit value into reg using 1 to 4 mov/movk instructions.
+ * This can be used early on when we don't know the CPUs endianness.
+ */
+.macro mov_q reg, val
+ mov \reg, :abs_g0_nc:\val
+.if (\val >> 16) & 0xffff != 0
+ movk \reg, :abs_g1_nc:\val
+.endif
+.if (\val >> 32) & 0xffff != 0
+ movk \reg, :abs_g2_nc:\val
+.endif
+.if (\val >> 48) & 0xffff != 0
+ movk \reg, :abs_g3:\val
+.endif
+.endm
+
#if PAGE_SIZE == PAGE_SIZE_16K
/*
* The number of level 3 tables to create. 32 will allow for 1G of address
@@ -324,15 +341,23 @@ LENTRY(enter_kernel_el)
cmp x23, #(CURRENTEL_EL_EL2)
b.eq 1f
- ldr x2, =SCTLR_MMU_OFF
+ /*
+ * Ensure there are no memory operations here. If the boot loader
+ * enters the kernel in big-endian mode then loading sctlr will
+ * be incorrect. As instructions are the same in both endians it is
+ * safe to use mov instructions.
+ */
+ mov_q x2, SCTLR_MMU_OFF
msr sctlr_el1, x2
- /* SCTLR_EOS is set so eret is a context synchronizing event so we
+ /*
+ * SCTLR_EOS is set to make eret a context synchronizing event. We
* need an isb here to ensure it's observed by later instructions,
* but don't need it in the eret below.
*/
isb
- /* Ensure SPSR_EL1 and pstate are in sync. The only wat to set the
+ /*
+ * Ensure SPSR_EL1 and pstate are in sync. The only way to set the
* latter is to set the former and return from an exception with eret.
*/
mov x2, #(PSR_DAIF | PSR_M_EL1h)
@@ -346,11 +371,19 @@ LENTRY(enter_kernel_el)
* Set just the reserved bits in sctlr_el2. This will disable the
* MMU which may have broken the kernel if we enter the kernel in
* EL2, e.g. when using VHE.
+ *
+ * As with sctlr_el1 above use mov instructions to ensure there are
+ * no memory operations.
*/
- ldr x2, =(SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS)
+ mov_q x2, (SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS)
msr sctlr_el2, x2
isb
+ /*
+ * The hardware is now in little-endian mode so memory operations
+ * are safe.
+ */
+
/* Configure the Hypervisor */
ldr x2, =(HCR_RW | HCR_APK | HCR_API)
msr hcr_el2, x2
@@ -385,7 +418,7 @@ LENTRY(enter_kernel_el)
msr SCTLR_EL12_REG, x2
mov x2, xzr /* CPTR_EL2 is managed by vfp.c */
- ldr x3, =(CNTHCTL_E2H_EL1PCTEN | CNTHCTL_E2H_EL1PTEN)
+ ldr x3, =(CNTHCTL_E2H_EL1PCTEN_NOTRAP | CNTHCTL_E2H_EL1PTEN_NOTRAP)
ldr x5, =(PSR_DAIF | PSR_M_EL2h)
b .Ldone_vhe
@@ -396,7 +429,7 @@ LENTRY(enter_kernel_el)
msr vbar_el2, x2
ldr x2, =(CPTR_RES1)
- ldr x3, =(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
+ ldr x3, =(CNTHCTL_EL1PCTEN_NOTRAP | CNTHCTL_EL1PCEN_NOTRAP)
ldr x5, =(PSR_DAIF | PSR_M_EL1h)
.Ldone_vhe:
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
index 53856dd90cae..322bad273a08 100644
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -173,16 +173,20 @@ SYSINIT(ssp_warn, SI_SUB_COPYRIGHT, SI_ORDER_ANY, print_ssp_warning, NULL);
SYSINIT(ssp_warn2, SI_SUB_LAST, SI_ORDER_ANY, print_ssp_warning, NULL);
#endif
-static bool
+static cpu_feat_en
pan_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t id_aa64mfr1;
- id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- return (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE);
+ if (!get_kernel_reg(ID_AA64MMFR1_EL1, &id_aa64mfr1))
+ return (FEAT_ALWAYS_DISABLE);
+ if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) == ID_AA64MMFR1_PAN_NONE)
+ return (FEAT_ALWAYS_DISABLE);
+
+ return (FEAT_DEFAULT_ENABLE);
}
-static void
+static bool
pan_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
u_int errata_count __unused)
@@ -200,15 +204,20 @@ pan_enable(const struct cpu_feat *feat __unused,
".arch_extension pan \n"
"msr pan, #1 \n"
".arch_extension nopan \n");
+
+ return (true);
}
-static struct cpu_feat feat_pan = {
- .feat_name = "FEAT_PAN",
- .feat_check = pan_check,
- .feat_enable = pan_enable,
- .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU,
-};
-DATA_SET(cpu_feat_set, feat_pan);
+static void
+pan_disabled(const struct cpu_feat *feat __unused)
+{
+ if (PCPU_GET(cpuid) == 0)
+ update_special_reg(ID_AA64MMFR1_EL1, ID_AA64MMFR1_PAN_MASK, 0);
+}
+
+CPU_FEAT(feat_pan, "Privileged access never",
+ pan_check, NULL, pan_enable, pan_disabled,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
bool
has_hyp(void)
@@ -857,7 +866,7 @@ initarm(struct arm64_bootparams *abp)
cninit();
set_ttbr0(abp->kern_ttbr0);
- cpu_tlb_flushID();
+ pmap_s1_invalidate_all_kernel();
if (!valid)
panic("Invalid bus configuration: %s",
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index ec89c4573799..dbf5c820d20b 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -190,6 +190,8 @@ pt_entry_t __read_mostly pmap_gp_attr;
#define PMAP_SAN_PTE_BITS (ATTR_AF | ATTR_S1_XN | pmap_sh_attr | \
ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
+static bool __read_mostly pmap_multiple_tlbi = false;
+
struct pmap_large_md_page {
struct rwlock pv_lock;
struct md_page pv_page;
@@ -1297,7 +1299,7 @@ pmap_bootstrap_dmap(vm_size_t kernlen)
}
}
- cpu_tlb_flushID();
+ pmap_s1_invalidate_all_kernel();
bs_state.dmap_valid = true;
@@ -1399,7 +1401,7 @@ pmap_bootstrap(void)
/* And the l3 tables for the early devmap */
pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE));
- cpu_tlb_flushID();
+ pmap_s1_invalidate_all_kernel();
#define alloc_pages(var, np) \
(var) = bs_state.freemempos; \
@@ -1656,14 +1658,17 @@ pmap_init_pv_table(void)
}
}
-static bool
+static cpu_feat_en
pmap_dbm_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t id_aa64mmfr1;
id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- return (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >=
- ID_AA64MMFR1_HAFDBS_AF_DBS);
+ if (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >=
+ ID_AA64MMFR1_HAFDBS_AF_DBS)
+ return (FEAT_DEFAULT_ENABLE);
+
+ return (FEAT_ALWAYS_DISABLE);
}
static bool
@@ -1671,8 +1676,8 @@ pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr,
u_int **errata_list, u_int *errata_count)
{
/* Disable on Cortex-A55 for erratum 1024718 - all revisions */
- if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM,
- CPU_PART_CORTEX_A55, 0, 0)) {
+ if (CPU_IMPL(midr) == CPU_IMPL_ARM &&
+ CPU_PART(midr) == CPU_PART_CORTEX_A55) {
static u_int errata_id = 1024718;
*errata_list = &errata_id;
@@ -1681,21 +1686,19 @@ pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr,
}
/* Disable on Cortex-A510 for erratum 2051678 - r0p0 to r0p2 */
- if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_VAR_MASK,
- CPU_IMPL_ARM, CPU_PART_CORTEX_A510, 0, 0)) {
- if (CPU_REV(PCPU_GET(midr)) < 3) {
- static u_int errata_id = 2051678;
+ if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A510,
+ 0, 0, 0, 2)) {
+ static u_int errata_id = 2051678;
- *errata_list = &errata_id;
- *errata_count = 1;
- return (true);
- }
+ *errata_list = &errata_id;
+ *errata_count = 1;
+ return (true);
}
return (false);
}
-static void
+static bool
pmap_dbm_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status, u_int *errata_list __unused,
u_int errata_count)
@@ -1704,7 +1707,7 @@ pmap_dbm_enable(const struct cpu_feat *feat __unused,
/* Skip if there is an erratum affecting DBM */
if (errata_status != ERRATA_NONE)
- return;
+ return (false);
tcr = READ_SPECIALREG(tcr_el1) | TCR_HD;
WRITE_SPECIALREG(tcr_el1, tcr);
@@ -1714,16 +1717,58 @@ pmap_dbm_enable(const struct cpu_feat *feat __unused,
__asm __volatile("tlbi vmalle1");
dsb(nsh);
isb();
+
+ return (true);
}
-static struct cpu_feat feat_dbm = {
- .feat_name = "FEAT_HAFDBS (DBM)",
- .feat_check = pmap_dbm_check,
- .feat_has_errata = pmap_dbm_has_errata,
- .feat_enable = pmap_dbm_enable,
- .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU,
-};
-DATA_SET(cpu_feat_set, feat_dbm);
+CPU_FEAT(feat_hafdbs, "Hardware management of the Access flag and dirty state",
+ pmap_dbm_check, pmap_dbm_has_errata, pmap_dbm_enable, NULL,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
+
+static cpu_feat_en
+pmap_multiple_tlbi_check(const struct cpu_feat *feat __unused, u_int midr)
+{
+ /*
+ * Cortex-A55 erratum 2441007 (Cat B rare)
+ * Present in all revisions
+ */
+ if (CPU_IMPL(midr) == CPU_IMPL_ARM &&
+ CPU_PART(midr) == CPU_PART_CORTEX_A55)
+ return (FEAT_DEFAULT_DISABLE);
+
+ /*
+ * Cortex-A76 erratum 1286807 (Cat B rare)
+ * Present in r0p0 - r3p0
+ * Fixed in r3p1
+ */
+ if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A76,
+ 0, 0, 3, 0))
+ return (FEAT_DEFAULT_DISABLE);
+
+ /*
+ * Cortex-A510 erratum 2441009 (Cat B rare)
+ * Present in r0p0 - r1p1
+ * Fixed in r1p2
+ */
+ if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A510,
+ 0, 0, 1, 1))
+ return (FEAT_DEFAULT_DISABLE);
+
+ return (FEAT_ALWAYS_DISABLE);
+}
+
+static bool
+pmap_multiple_tlbi_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status, u_int *errata_list __unused,
+ u_int errata_count __unused)
+{
+ pmap_multiple_tlbi = true;
+ return (true);
+}
+
+CPU_FEAT(errata_multi_tlbi, "Multiple TLBI errata",
+ pmap_multiple_tlbi_check, NULL, pmap_multiple_tlbi_enable, NULL,
+ CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU);
/*
* Initialize the pmap module.
@@ -1878,9 +1923,17 @@ pmap_s1_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
r = TLBI_VA(va);
if (pmap == kernel_pmap) {
pmap_s1_invalidate_kernel(r, final_only);
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ pmap_s1_invalidate_kernel(r, final_only);
+ }
} else {
r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
pmap_s1_invalidate_user(r, final_only);
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ pmap_s1_invalidate_user(r, final_only);
+ }
}
dsb(ish);
isb();
@@ -1922,12 +1975,24 @@ pmap_s1_invalidate_strided(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
end = TLBI_VA(eva);
for (r = start; r < end; r += TLBI_VA(stride))
pmap_s1_invalidate_kernel(r, final_only);
+
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ for (r = start; r < end; r += TLBI_VA(stride))
+ pmap_s1_invalidate_kernel(r, final_only);
+ }
} else {
start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
start |= TLBI_VA(sva);
end |= TLBI_VA(eva);
for (r = start; r < end; r += TLBI_VA(stride))
pmap_s1_invalidate_user(r, final_only);
+
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ for (r = start; r < end; r += TLBI_VA(stride))
+ pmap_s1_invalidate_user(r, final_only);
+ }
}
dsb(ish);
isb();
@@ -1963,6 +2028,19 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pmap_s2_invalidate_range(pmap, sva, eva, final_only);
}
+void
+pmap_s1_invalidate_all_kernel(void)
+{
+ dsb(ishst);
+ __asm __volatile("tlbi vmalle1is");
+ dsb(ish);
+ if (pmap_multiple_tlbi) {
+ __asm __volatile("tlbi vmalle1is");
+ dsb(ish);
+ }
+ isb();
+}
+
/*
* Invalidates all cached intermediate- and final-level TLB entries for the
* given virtual address space.
@@ -1977,9 +2055,17 @@ pmap_s1_invalidate_all(pmap_t pmap)
dsb(ishst);
if (pmap == kernel_pmap) {
__asm __volatile("tlbi vmalle1is");
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ __asm __volatile("tlbi vmalle1is");
+ }
} else {
r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
__asm __volatile("tlbi aside1is, %0" : : "r" (r));
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ __asm __volatile("tlbi aside1is, %0" : : "r" (r));
+ }
}
dsb(ish);
isb();
@@ -7967,7 +8053,7 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
pa += L2_SIZE;
}
if ((old_l2e & ATTR_DESCR_VALID) != 0)
- pmap_s1_invalidate_all(kernel_pmap);
+ pmap_s1_invalidate_all_kernel();
else {
/*
* Because the old entries were invalid and the new
@@ -8058,7 +8144,7 @@ pmap_unmapbios(void *p, vm_size_t size)
}
}
if (preinit_map) {
- pmap_s1_invalidate_all(kernel_pmap);
+ pmap_s1_invalidate_all_kernel();
return;
}
diff --git a/sys/arm64/arm64/ptrauth.c b/sys/arm64/arm64/ptrauth.c
index dbe0c69b8d60..ab40b72887e9 100644
--- a/sys/arm64/arm64/ptrauth.c
+++ b/sys/arm64/arm64/ptrauth.c
@@ -82,7 +82,7 @@ ptrauth_disable(void)
return (false);
}
-static bool
+static cpu_feat_en
ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
{
uint64_t isar;
@@ -97,11 +97,11 @@ ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
if (!pac_enable) {
if (boothowto & RB_VERBOSE)
printf("Pointer authentication is disabled\n");
- goto out;
+ return (FEAT_ALWAYS_DISABLE);
}
if (ptrauth_disable())
- goto out;
+ return (FEAT_ALWAYS_DISABLE);
/*
* This assumes if there is pointer authentication on the boot CPU
@@ -116,32 +116,21 @@ ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused)
if (get_kernel_reg(ID_AA64ISAR1_EL1, &isar)) {
if (ID_AA64ISAR1_APA_VAL(isar) > 0 ||
ID_AA64ISAR1_API_VAL(isar) > 0) {
- return (true);
+ return (FEAT_DEFAULT_ENABLE);
}
}
/* The QARMA3 algorithm is reported in ID_AA64ISAR2_EL1. */
if (get_kernel_reg(ID_AA64ISAR2_EL1, &isar)) {
if (ID_AA64ISAR2_APA3_VAL(isar) > 0) {
- return (true);
+ return (FEAT_DEFAULT_ENABLE);
}
}
-out:
- /*
- * Pointer authentication may be disabled, mask out the ID fields we
- * expose to userspace and the rest of the kernel so they don't try
- * to use it.
- */
- update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK |
- ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK |
- ID_AA64ISAR1_GPI_MASK, 0);
- update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0);
-
- return (false);
+ return (FEAT_ALWAYS_DISABLE);
}
-static void
+static bool
ptrauth_enable(const struct cpu_feat *feat __unused,
cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
u_int errata_count __unused)
@@ -153,16 +142,30 @@ ptrauth_enable(const struct cpu_feat *feat __unused,
elf64_addr_mask_14.code |= PAC_ADDR_MASK_14;
elf64_addr_mask_14.data |= PAC_ADDR_MASK_14;
#endif
+
+ return (true);
}
+static void
+ptrauth_disabled(const struct cpu_feat *feat __unused)
+{
+ /*
+ * Pointer authentication may be disabled, mask out the ID fields we
+ * expose to userspace and the rest of the kernel so they don't try
+ * to use it.
+ */
+ if (PCPU_GET(cpuid) == 0) {
+ update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK |
+ ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK |
+ ID_AA64ISAR1_GPI_MASK, 0);
+ update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0);
+ }
+
+}
-static struct cpu_feat feat_pauth = {
- .feat_name = "FEAT_PAuth",
- .feat_check = ptrauth_check,
- .feat_enable = ptrauth_enable,
- .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM,
-};
-DATA_SET(cpu_feat_set, feat_pauth);
+CPU_FEAT(feat_pauth, "Pointer Authentication",
+ ptrauth_check, NULL, ptrauth_enable, ptrauth_disabled,
+ CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM);
/* Copy the keys when forking a new process */
void
diff --git a/sys/arm64/arm64/spec_workaround.c b/sys/arm64/arm64/spec_workaround.c
new file mode 100644
index 000000000000..7f4f86cdb48c
--- /dev/null
+++ b/sys/arm64/arm64/spec_workaround.c
@@ -0,0 +1,166 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Arm Ltd
+ * Copyright (c) 2018 Andrew Turner
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/pcpu.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/cpu_feat.h>
+
+#include <dev/psci/psci.h>
+#include <dev/psci/smccc.h>
+
+static enum {
+ SSBD_FORCE_ON,
+ SSBD_FORCE_OFF,
+ SSBD_KERNEL,
+} ssbd_method = SSBD_KERNEL;
+
+struct psci_bp_hardening_impl {
+ u_int midr_mask;
+ u_int midr_value;
+};
+
+static struct psci_bp_hardening_impl psci_bp_hardening_impl[] = {
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0),
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0),
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0),
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0),
+ },
+ {
+ .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
+ .midr_value =
+ CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0),
+ }
+};
+
+static cpu_feat_en
+psci_bp_hardening_check(const struct cpu_feat *feat __unused, u_int midr)
+{
+ size_t i;
+
+ for (i = 0; i < nitems(psci_bp_hardening_impl); i++) {
+ if ((midr & psci_bp_hardening_impl[i].midr_mask) ==
+ psci_bp_hardening_impl[i].midr_value) {
+ /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
+ if (!psci_present)
+ return (FEAT_ALWAYS_DISABLE);
+
+ if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) !=
+ SMCCC_RET_SUCCESS)
+ return (FEAT_ALWAYS_DISABLE);
+
+ return (FEAT_DEFAULT_ENABLE);
+ }
+ }
+
+ return (FEAT_ALWAYS_DISABLE);
+}
+
+static bool
+psci_bp_hardening_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
+ u_int errata_count __unused)
+{
+ PCPU_SET(bp_harden, smccc_arch_workaround_1);
+
+ return (true);
+}
+
+CPU_FEAT(feat_csv2_missing, "Branch Predictor Hardening",
+ psci_bp_hardening_check, NULL, psci_bp_hardening_enable, NULL,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
+
+static cpu_feat_en
+ssbd_workaround_check(const struct cpu_feat *feat __unused, u_int midr __unused)
+{
+ char *env;
+
+ if (PCPU_GET(cpuid) == 0) {
+ env = kern_getenv("kern.cfg.ssbd");
+ if (env != NULL) {
+ if (strcmp(env, "force-on") == 0) {
+ ssbd_method = SSBD_FORCE_ON;
+ } else if (strcmp(env, "force-off") == 0) {
+ ssbd_method = SSBD_FORCE_OFF;
+ }
+ }
+ }
+
+ /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
+ if (!psci_present)
+ return (FEAT_ALWAYS_DISABLE);
+
+ /* Enable the workaround on this CPU if it's enabled in the firmware */
+ if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS)
+ return (FEAT_ALWAYS_DISABLE);
+
+ return (FEAT_DEFAULT_ENABLE);
+}
+
+static bool
+ssbd_workaround_enable(const struct cpu_feat *feat __unused,
+ cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
+ u_int errata_count __unused)
+{
+ switch(ssbd_method) {
+ case SSBD_FORCE_ON:
+ smccc_arch_workaround_2(1);
+ break;
+ case SSBD_FORCE_OFF:
+ smccc_arch_workaround_2(0);
+ break;
+ case SSBD_KERNEL:
+ default:
+ PCPU_SET(ssbd, smccc_arch_workaround_2);
+ break;
+ }
+
+ return (true);
+}
+
+CPU_FEAT(feat_ssbs_missing, "Speculator Store Bypass Disable Workaround",
+ ssbd_workaround_check, NULL, ssbd_workaround_enable, NULL,
+ CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c
index bed58095201a..75c9b5f87892 100644
--- a/sys/arm64/arm64/trap.c
+++ b/sys/arm64/arm64/trap.c
@@ -246,6 +246,7 @@ external_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
print_registers(frame);
print_gp_register("far", far);
+ printf(" esr: 0x%.16lx\n", esr);
panic("Unhandled external data abort");
}
diff --git a/sys/arm64/conf/std.arm64 b/sys/arm64/conf/std.arm64
index c83e98c17a33..a0568466cfaf 100644
--- a/sys/arm64/conf/std.arm64
+++ b/sys/arm64/conf/std.arm64
@@ -7,6 +7,7 @@ makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support
options SCHED_ULE # ULE scheduler
options NUMA # Non-Uniform Memory Architecture support
options PREEMPTION # Enable kernel thread preemption
+options EXTERR_STRINGS
options VIMAGE # Subsystem virtualization, e.g. VNET
options INET # InterNETworking
options INET6 # IPv6 communications protocols
diff --git a/sys/arm64/coresight/coresight.c b/sys/arm64/coresight/coresight.c
index 5928c153f4ae..9b9d3c65ecc9 100644
--- a/sys/arm64/coresight/coresight.c
+++ b/sys/arm64/coresight/coresight.c
@@ -113,7 +113,7 @@ coresight_get_output_device(struct endpoint *endp, struct endpoint **out_endp)
}
static void
-coresight_init(void)
+coresight_init(void *dummy __unused)
{
mtx_init(&cs_mtx, "ARM Coresight", NULL, MTX_DEF);
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 500f35c48787..393d6d89da0c 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -232,6 +232,14 @@
#define CNTP_CTL_IMASK (1 << 1)
#define CNTP_CTL_ISTATUS (1 << 2)
+/* CNTP_CTL_EL02 - Counter-timer Physical Timer Control register */
+#define CNTP_CTL_EL02_REG MRS_REG_ALT_NAME(CNTP_CTL_EL02)
+#define CNTP_CTL_EL02_op0 3
+#define CNTP_CTL_EL02_op1 5
+#define CNTP_CTL_EL02_CRn 14
+#define CNTP_CTL_EL02_CRm 2
+#define CNTP_CTL_EL02_op2 1
+
/* CNTP_CVAL_EL0 - Counter-timer Physical Timer CompareValue register */
#define CNTP_CVAL_EL0_op0 3
#define CNTP_CVAL_EL0_op1 3
@@ -239,6 +247,14 @@
#define CNTP_CVAL_EL0_CRm 2
#define CNTP_CVAL_EL0_op2 2
+/* CNTP_CVAL_EL02 - Counter-timer Physical Timer CompareValue register */
+#define CNTP_CVAL_EL02_REG MRS_REG_ALT_NAME(CNTP_CVAL_EL02)
+#define CNTP_CVAL_EL02_op0 3
+#define CNTP_CVAL_EL02_op1 5
+#define CNTP_CVAL_EL02_CRn 14
+#define CNTP_CVAL_EL02_CRm 2
+#define CNTP_CVAL_EL02_op2 2
+
/* CNTP_TVAL_EL0 - Counter-timer Physical Timer TimerValue register */
#define CNTP_TVAL_EL0_op0 3
#define CNTP_TVAL_EL0_op1 3
@@ -254,6 +270,14 @@
#define CNTPCT_EL0_CRm 0
#define CNTPCT_EL0_op2 1
+/* CNTPCTSS_EL0 - Counter-timer Self-Synchronized Physical Count register */
+#define CNTPCTSS_EL0_REG MRS_REG_ALT_NAME(CNTPCTSS_EL0)
+#define CNTPCTSS_EL0_op0 3
+#define CNTPCTSS_EL0_op1 3
+#define CNTPCTSS_EL0_CRn 14
+#define CNTPCTSS_EL0_CRm 0
+#define CNTPCTSS_EL0_op2 5
+
/* CNTV_CTL_EL0 - Counter-timer Virtual Timer Control register */
#define CNTV_CTL_EL0_op0 3
#define CNTV_CTL_EL0_op1 3
@@ -282,6 +306,14 @@
#define CNTV_CVAL_EL02_CRm 3
#define CNTV_CVAL_EL02_op2 2
+/* CNTVCTSS_EL0 - Counter-timer Self-Synchronized Virtual Count register */
+#define CNTVCTSS_EL0_REG MRS_REG_ALT_NAME(CNTVCTSS_EL0)
+#define CNTVCTSS_EL0_op0 3
+#define CNTVCTSS_EL0_op1 3
+#define CNTVCTSS_EL0_CRn 14
+#define CNTVCTSS_EL0_CRm 0
+#define CNTVCTSS_EL0_op2 6
+
/* CONTEXTIDR_EL1 - Context ID register */
#define CONTEXTIDR_EL1_REG MRS_REG_ALT_NAME(CONTEXTIDR_EL1)
#define CONTEXTIDR_EL1_op0 3
@@ -2148,6 +2180,7 @@
#define OSLAR_EL1_CRn 1
#define OSLAR_EL1_CRm 0
#define OSLAR_EL1_op2 4
+#define OSLAR_OSLK (0x1ul << 0)
/* OSLSR_EL1 */
#define OSLSR_EL1_op0 2
@@ -2155,6 +2188,10 @@
#define OSLSR_EL1_CRn 1
#define OSLSR_EL1_CRm 1
#define OSLSR_EL1_op2 4
+#define OSLSR_OSLM_1 (0x1ul << 3)
+#define OSLSR_nTT (0x1ul << 2)
+#define OSLSR_OSLK (0x1ul << 1)
+#define OSLSR_OSLM_0 (0x1ul << 0)
/* PAR_EL1 - Physical Address Register */
#define PAR_F_SHIFT 0
@@ -2612,10 +2649,12 @@
(SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS)
#define SCTLR_MMU_ON \
(SCTLR_MMU_OFF | \
+ SCTLR_EPAN | \
SCTLR_BT1 | \
SCTLR_BT0 | \
SCTLR_UCI | \
SCTLR_SPAN | \
+ SCTLR_IESB | \
SCTLR_nTWE | \
SCTLR_nTWI | \
SCTLR_UCT | \
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
index 59cda36f275e..124da8c215ed 100644
--- a/sys/arm64/include/cpu.h
+++ b/sys/arm64/include/cpu.h
@@ -125,7 +125,11 @@
#define CPU_PART_NEOVERSE_V3 0xD84
#define CPU_PART_CORTEX_X925 0xD85
#define CPU_PART_CORTEX_A725 0xD87
+#define CPU_PART_C1_NANO 0xD8A
+#define CPU_PART_C1_PRO 0xD8B
+#define CPU_PART_C1_ULTRA 0xD8C
#define CPU_PART_NEOVERSE_N3 0xD8E
+#define CPU_PART_C1_PREMIUM 0xD90
/* Cavium Part numbers */
#define CPU_PART_THUNDERX 0x0A1
@@ -193,8 +197,30 @@
(((mask) & PCPU_GET(midr)) == \
((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
-#define CPU_MATCH_RAW(mask, devid) \
- (((mask) & PCPU_GET(midr)) == ((mask) & (devid)))
+#if !defined(__ASSEMBLER__)
+static inline bool
+midr_check_var_part_range(u_int midr, u_int impl, u_int part, u_int var_low,
+ u_int part_low, u_int var_high, u_int part_high)
+{
+ /* Check for the correct part */
+ if (CPU_IMPL(midr) != impl || CPU_PART(midr) != part)
+ return (false);
+
+ /* Check if the variant is between var_low and var_high inclusive */
+ if (CPU_VAR(midr) < var_low || CPU_VAR(midr) > var_high)
+ return (false);
+
+ /* If the variant is the low value, check if the part is high enough */
+ if (CPU_VAR(midr) == var_low && CPU_PART(midr) < part_low)
+ return (false);
+
+ /* If the variant is the high value, check if the part is low enough */
+ if (CPU_VAR(midr) == var_high && CPU_PART(midr) > part_high)
+ return (false);
+
+ return (true);
+}
+#endif
/*
* Chip-specific errata. This defines are intended to be
diff --git a/sys/arm64/include/cpu_feat.h b/sys/arm64/include/cpu_feat.h
index 9fe6a9dd95d9..6a311d4000bb 100644
--- a/sys/arm64/include/cpu_feat.h
+++ b/sys/arm64/include/cpu_feat.h
@@ -29,6 +29,7 @@
#define _MACHINE_CPU_FEAT_H_
#include <sys/linker_set.h>
+#include <sys/sysctl.h>
typedef enum {
ERRATA_UNKNOWN, /* Unknown erratum */
@@ -39,6 +40,31 @@ typedef enum {
/* kernel component. */
} cpu_feat_errata;
+typedef enum {
+ /*
+ * Don't implement the feature or erratum wrokarount,
+ * e.g. the feature is not implemented or erratum is
+ * for another CPU.
+ */
+ FEAT_ALWAYS_DISABLE,
+
+ /*
+ * Disable by default, but allow the user to enable,
+ * e.g. For a rare erratum with a workaround, Arm
+ * Category B (rare) or similar.
+ */
+ FEAT_DEFAULT_DISABLE,
+
+ /*
+ * Enabled by default, bit allow the user to disable,
+ * e.g. For a common erratum with a workaround, Arm
+ * Category A or B or similar.
+ */
+ FEAT_DEFAULT_ENABLE,
+
+ /* We could add FEAT_ALWAYS_ENABLE if a need was found. */
+} cpu_feat_en;
+
#define CPU_FEAT_STAGE_MASK 0x00000001
#define CPU_FEAT_EARLY_BOOT 0x00000000
#define CPU_FEAT_AFTER_DEV 0x00000001
@@ -47,23 +73,45 @@ typedef enum {
#define CPU_FEAT_PER_CPU 0x00000000
#define CPU_FEAT_SYSTEM 0x00000010
+#define CPU_FEAT_USER_ENABLED 0x40000000
+#define CPU_FEAT_USER_DISABLED 0x80000000
+
struct cpu_feat;
-typedef bool (cpu_feat_check)(const struct cpu_feat *, u_int);
+typedef cpu_feat_en (cpu_feat_check)(const struct cpu_feat *, u_int);
typedef bool (cpu_feat_has_errata)(const struct cpu_feat *, u_int,
u_int **, u_int *);
-typedef void (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
+typedef bool (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata,
u_int *, u_int);
+typedef void (cpu_feat_disabled)(const struct cpu_feat *);
struct cpu_feat {
const char *feat_name;
cpu_feat_check *feat_check;
cpu_feat_has_errata *feat_has_errata;
cpu_feat_enable *feat_enable;
+ cpu_feat_disabled *feat_disabled;
uint32_t feat_flags;
+ bool feat_enabled;
};
SET_DECLARE(cpu_feat_set, struct cpu_feat);
+SYSCTL_DECL(_hw_feat);
+
+#define CPU_FEAT(name, descr, check, has_errata, enable, disabled, flags) \
+static struct cpu_feat name = { \
+ .feat_name = #name, \
+ .feat_check = check, \
+ .feat_has_errata = has_errata, \
+ .feat_enable = enable, \
+ .feat_disabled = disabled, \
+ .feat_flags = flags, \
+ .feat_enabled = false, \
+}; \
+DATA_SET(cpu_feat_set, name); \
+SYSCTL_BOOL(_hw_feat, OID_AUTO, name, CTLFLAG_RD, &name.feat_enabled, \
+ 0, descr)
+
/*
* Allow drivers to mark an erratum as worked around, e.g. the Errata
* Management ABI may know the workaround isn't needed on a given system.
diff --git a/sys/arm64/include/hypervisor.h b/sys/arm64/include/hypervisor.h
index e3a880afbe9c..8feabd2b981b 100644
--- a/sys/arm64/include/hypervisor.h
+++ b/sys/arm64/include/hypervisor.h
@@ -36,20 +36,77 @@
*/
/* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */
-#define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */
/* Valid if HCR_EL2.E2H == 0 */
-#define CNTHCTL_EL1PCTEN (1 << 0) /* Allow physical counter access */
-#define CNTHCTL_EL1PCEN (1 << 1) /* Allow physical timer access */
+#define CNTHCTL_EL1PCTEN_SHIFT 0
+#define CNTHCTL_EL1PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCTEN_NOTRAP (0x1ul << CNTHCTL_EL1PCTEN_SHIFT)
+#define CNTHCTL_EL1PCEN_SHIFT 1
+#define CNTHCTL_EL1PCEN_MASK (0x1ul << CNTHCTL_EL1PCEN_SHIFT)
+#define CNTHCTL_EL1PCEN_TRAP (0x0ul << CNTHCTL_EL1PCEN_SHIFT)
+#define CNTHCTL_EL1PCEN_NOTRAP (0x1ul << CNTHCTL_EL1PCEN_SHIFT)
/* Valid if HCR_EL2.E2H == 1 */
-#define CNTHCTL_E2H_EL0PCTEN (1 << 0) /* Allow EL0 physical counter access */
-#define CNTHCTL_E2H_EL0VCTEN (1 << 1) /* Allow EL0 virtual counter access */
-#define CNTHCTL_E2H_EL0VTEN (1 << 8)
-#define CNTHCTL_E2H_EL0PTEN (1 << 9)
-#define CNTHCTL_E2H_EL1PCTEN (1 << 10) /* Allow physical counter access */
-#define CNTHCTL_E2H_EL1PTEN (1 << 11) /* Allow physical timer access */
+#define CNTHCTL_E2H_EL0PCTEN_SHIFT 0
+#define CNTHCTL_E2H_EL0PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_SHIFT 1
+#define CNTHCTL_E2H_EL0VCTEN_MASK (0x1ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0VCTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_SHIFT 8
+#define CNTHCTL_E2H_EL0VTEN_MASK (0x1ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0VTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0VTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_SHIFT 9
+#define CNTHCTL_E2H_EL0PTEN_MASK (0x1ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_TRAP (0x0ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL0PTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL0PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_SHIFT 10
+#define CNTHCTL_E2H_EL1PCTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PCTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL1PCTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_SHIFT 11
+#define CNTHCTL_E2H_EL1PTEN_MASK (0x1ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_TRAP (0x0ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
+#define CNTHCTL_E2H_EL1PTEN_NOTRAP (0x1ul << CNTHCTL_E2H_EL1PTEN_SHIFT)
/* Unconditionally valid */
-#define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */
-#define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */
+#define CNTHCTL_EVNTEN_SHIFT 2
+#define CNTHCTL_EVNTEN_MASK (0x1ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTEN_DIS (0x0ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTEN_EN (0x1ul << CNTHCTL_EVNTEN_SHIFT)
+#define CNTHCTL_EVNTDIR_SHIFT 3
+#define CNTHCTL_EVNTDIR_MASK (0x1ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTDIR_HIGH (0x0ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTDIR_LOW (0x1ul << CNTHCTL_EVNTDIR_SHIFT)
+#define CNTHCTL_EVNTI_SHIFT 4
+#define CNTHCTL_EVNTI_MASK (0xful << CNTHCTL_EVNTI_SHIFT)
+#define CNTHCTL_ECV_SHIFT 12
+#define CNTHCTL_ECV_MASK (0x1ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_ECV_DIS (0x0ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_ECV_EN (0x1ul << CNTHCTL_ECV_SHIFT)
+#define CNTHCTL_EL1TVT_SHIFT 13
+#define CNTHCTL_EL1TVT_MASK (0x1ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVT_NOTRAP (0x0ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVT_TRAP (0x1ul << CNTHCTL_EL1TVT_SHIFT)
+#define CNTHCTL_EL1TVCT_SHIFT 14
+#define CNTHCTL_EL1TVCT_MASK (0x1ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1TVCT_NOTRAP (0x0ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1TVCT_TRAP (0x1ul << CNTHCTL_EL1TVCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_SHIFT 15
+#define CNTHCTL_EL1NVPCT_MASK (0x1ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_NOTRAP (0x0ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVPCT_TRAP (0x1ul << CNTHCTL_EL1NVPCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_SHIFT 16
+#define CNTHCTL_EL1NVVCT_MASK (0x1ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_NOTRAP (0x0ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EL1NVVCT_TRAP (0x1ul << CNTHCTL_EL1NVVCT_SHIFT)
+#define CNTHCTL_EVNTIS_SHIFT 17
+#define CNTHCTL_EVNTIS_MASK (0x1ul << CNTHCTL_EVNTIS_SHIFT)
+#define CNTHCTL_CNTVMASK_SHIFT 18
+#define CNTHCTL_CNTVMASK_MASK (0x1ul << CNTHCTL_CNTVMASK_SHIFT)
+#define CNTHCTL_CNTPMASK_SHIFT 19
+#define CNTHCTL_CNTPMASK_MASK (0x1ul << CNTHCTL_CNTPMASK_SHIFT)
/* CNTPOFF_EL2 - Counter-timer Physical Offset Register */
#define CNTPOFF_EL2_REG MRS_REG_ALT_NAME(CNTPOFF_EL2)
@@ -190,6 +247,54 @@
#define ICC_SRE_EL2_SRE (1UL << 0)
#define ICC_SRE_EL2_EN (1UL << 3)
+/* MDCR_EL2 - Hyp Debug Control Register */
+#define MDCR_EL2_HPMN_MASK 0x1f
+#define MDCR_EL2_HPMN_SHIFT 0
+#define MDCR_EL2_TPMCR_SHIFT 5
+#define MDCR_EL2_TPMCR (0x1UL << MDCR_EL2_TPMCR_SHIFT)
+#define MDCR_EL2_TPM_SHIFT 6
+#define MDCR_EL2_TPM (0x1UL << MDCR_EL2_TPM_SHIFT)
+#define MDCR_EL2_HPME_SHIFT 7
+#define MDCR_EL2_HPME (0x1UL << MDCR_EL2_HPME_SHIFT)
+#define MDCR_EL2_TDE_SHIFT 8
+#define MDCR_EL2_TDE (0x1UL << MDCR_EL2_TDE_SHIFT)
+#define MDCR_EL2_TDA_SHIFT 9
+#define MDCR_EL2_TDA (0x1UL << MDCR_EL2_TDA_SHIFT)
+#define MDCR_EL2_TDOSA_SHIFT 10
+#define MDCR_EL2_TDOSA (0x1UL << MDCR_EL2_TDOSA_SHIFT)
+#define MDCR_EL2_TDRA_SHIFT 11
+#define MDCR_EL2_TDRA (0x1UL << MDCR_EL2_TDRA_SHIFT)
+#define MDCR_EL2_E2PB_SHIFT 12
+#define MDCR_EL2_E2PB_MASK (0x3UL << MDCR_EL2_E2PB_SHIFT)
+#define MDCR_EL2_TPMS_SHIFT 14
+#define MDCR_EL2_TPMS (0x1UL << MDCR_EL2_TPMS_SHIFT)
+#define MDCR_EL2_EnSPM_SHIFT 15
+#define MDCR_EL2_EnSPM (0x1UL << MDCR_EL2_EnSPM_SHIFT)
+#define MDCR_EL2_HPMD_SHIFT 17
+#define MDCR_EL2_HPMD (0x1UL << MDCR_EL2_HPMD_SHIFT)
+#define MDCR_EL2_TTRF_SHIFT 19
+#define MDCR_EL2_TTRF (0x1UL << MDCR_EL2_TTRF_SHIFT)
+#define MDCR_EL2_HCCD_SHIFT 23
+#define MDCR_EL2_HCCD (0x1UL << MDCR_EL2_HCCD_SHIFT)
+#define MDCR_EL2_E2TB_SHIFT 24
+#define MDCR_EL2_E2TB_MASK (0x3UL << MDCR_EL2_E2TB_SHIFT)
+#define MDCR_EL2_HLP_SHIFT 26
+#define MDCR_EL2_HLP (0x1UL << MDCR_EL2_HLP_SHIFT)
+#define MDCR_EL2_TDCC_SHIFT 27
+#define MDCR_EL2_TDCC (0x1UL << MDCR_EL2_TDCC_SHIFT)
+#define MDCR_EL2_MTPME_SHIFT 28
+#define MDCR_EL2_MTPME (0x1UL << MDCR_EL2_MTPME_SHIFT)
+#define MDCR_EL2_HPMFZO_SHIFT 29
+#define MDCR_EL2_HPMFZO (0x1UL << MDCR_EL2_HPMFZO_SHIFT)
+#define MDCR_EL2_PMSSE_SHIFT 30
+#define MDCR_EL2_PMSSE_MASK (0x3UL << MDCR_EL2_PMSSE_SHIFT)
+#define MDCR_EL2_HPMFZS_SHIFT 36
+#define MDCR_EL2_HPMFZS (0x1UL << MDCR_EL2_HPMFZS_SHIFT)
+#define MDCR_EL2_PMEE_SHIFT 40
+#define MDCR_EL2_PMEE_MASK (0x3UL << MDCR_EL2_PMEE_SHIFT)
+#define MDCR_EL2_EBWE_SHIFT 43
+#define MDCR_EL2_EBWE (0x1UL << MDCR_EL2_EBWE_SHIFT)
+
/* SCTLR_EL2 - System Control Register */
#define SCTLR_EL2_RES1 0x30c50830
#define SCTLR_EL2_M_SHIFT 0
@@ -299,52 +404,4 @@
/* Assumed to be 0 by locore.S */
#define VTTBR_HOST 0x0000000000000000
-/* MDCR_EL2 - Hyp Debug Control Register */
-#define MDCR_EL2_HPMN_MASK 0x1f
-#define MDCR_EL2_HPMN_SHIFT 0
-#define MDCR_EL2_TPMCR_SHIFT 5
-#define MDCR_EL2_TPMCR (0x1UL << MDCR_EL2_TPMCR_SHIFT)
-#define MDCR_EL2_TPM_SHIFT 6
-#define MDCR_EL2_TPM (0x1UL << MDCR_EL2_TPM_SHIFT)
-#define MDCR_EL2_HPME_SHIFT 7
-#define MDCR_EL2_HPME (0x1UL << MDCR_EL2_HPME_SHIFT)
-#define MDCR_EL2_TDE_SHIFT 8
-#define MDCR_EL2_TDE (0x1UL << MDCR_EL2_TDE_SHIFT)
-#define MDCR_EL2_TDA_SHIFT 9
-#define MDCR_EL2_TDA (0x1UL << MDCR_EL2_TDA_SHIFT)
-#define MDCR_EL2_TDOSA_SHIFT 10
-#define MDCR_EL2_TDOSA (0x1UL << MDCR_EL2_TDOSA_SHIFT)
-#define MDCR_EL2_TDRA_SHIFT 11
-#define MDCR_EL2_TDRA (0x1UL << MDCR_EL2_TDRA_SHIFT)
-#define MDCR_E2PB_SHIFT 12
-#define MDCR_E2PB_MASK (0x3UL << MDCR_E2PB_SHIFT)
-#define MDCR_TPMS_SHIFT 14
-#define MDCR_TPMS (0x1UL << MDCR_TPMS_SHIFT)
-#define MDCR_EnSPM_SHIFT 15
-#define MDCR_EnSPM (0x1UL << MDCR_EnSPM_SHIFT)
-#define MDCR_HPMD_SHIFT 17
-#define MDCR_HPMD (0x1UL << MDCR_HPMD_SHIFT)
-#define MDCR_TTRF_SHIFT 19
-#define MDCR_TTRF (0x1UL << MDCR_TTRF_SHIFT)
-#define MDCR_HCCD_SHIFT 23
-#define MDCR_HCCD (0x1UL << MDCR_HCCD_SHIFT)
-#define MDCR_E2TB_SHIFT 24
-#define MDCR_E2TB_MASK (0x3UL << MDCR_E2TB_SHIFT)
-#define MDCR_HLP_SHIFT 26
-#define MDCR_HLP (0x1UL << MDCR_HLP_SHIFT)
-#define MDCR_TDCC_SHIFT 27
-#define MDCR_TDCC (0x1UL << MDCR_TDCC_SHIFT)
-#define MDCR_MTPME_SHIFT 28
-#define MDCR_MTPME (0x1UL << MDCR_MTPME_SHIFT)
-#define MDCR_HPMFZO_SHIFT 29
-#define MDCR_HPMFZO (0x1UL << MDCR_HPMFZO_SHIFT)
-#define MDCR_PMSSE_SHIFT 30
-#define MDCR_PMSSE_MASK (0x3UL << MDCR_PMSSE_SHIFT)
-#define MDCR_HPMFZS_SHIFT 36
-#define MDCR_HPMFZS (0x1UL << MDCR_HPMFZS_SHIFT)
-#define MDCR_PMEE_SHIFT 40
-#define MDCR_PMEE_MASK (0x3UL << MDCR_PMEE_SHIFT)
-#define MDCR_EBWE_SHIFT 43
-#define MDCR_EBWE (0x1UL << MDCR_EBWE_SHIFT)
-
#endif /* !_MACHINE_HYPERVISOR_H_ */
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
index 0f23f200f0f6..406b6e2c5e0a 100644
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -69,6 +69,7 @@ struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
int pv_gen;
vm_memattr_t pv_memattr;
+ uint8_t pv_reserve[3];
};
enum pmap_stage {
@@ -174,6 +175,8 @@ int pmap_fault(pmap_t, uint64_t, uint64_t);
struct pcb *pmap_switch(struct thread *);
+void pmap_s1_invalidate_all_kernel(void);
+
extern void (*pmap_clean_stage2_tlbi)(void);
extern void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t,
bool);
diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h
index 184743d4cc80..b40990e89385 100644
--- a/sys/arm64/include/proc.h
+++ b/sys/arm64/include/proc.h
@@ -75,6 +75,7 @@ struct mdthread {
struct mdproc {
uint64_t md_tcr; /* TCR_EL1 fields to update */
+ uint64_t md_reserved[2];
};
#endif /* !LOCORE */
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index 73b5b4a09591..84b286a60b38 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -42,6 +42,7 @@ enum vm_suspend_how {
VM_SUSPEND_RESET,
VM_SUSPEND_POWEROFF,
VM_SUSPEND_HALT,
+ VM_SUSPEND_DESTROY,
VM_SUSPEND_LAST
};
@@ -142,6 +143,37 @@ struct vm_eventinfo {
int *iptr; /* reqidle cookie */
};
+#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
+ ret_type vmmops_##opname args
+
+DECLARE_VMMOPS_FUNC(int, modinit, (int ipinum));
+DECLARE_VMMOPS_FUNC(int, modcleanup, (void));
+DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap));
+DECLARE_VMMOPS_FUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
+ uint64_t gla, int prot, uint64_t *gpa, int *is_fault));
+DECLARE_VMMOPS_FUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
+ struct vm_eventinfo *info));
+DECLARE_VMMOPS_FUNC(void, cleanup, (void *vmi));
+DECLARE_VMMOPS_FUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
+ int vcpu_id));
+DECLARE_VMMOPS_FUNC(void, vcpu_cleanup, (void *vcpui));
+DECLARE_VMMOPS_FUNC(int, exception, (void *vcpui, uint64_t esr, uint64_t far));
+DECLARE_VMMOPS_FUNC(int, getreg, (void *vcpui, int num, uint64_t *retval));
+DECLARE_VMMOPS_FUNC(int, setreg, (void *vcpui, int num, uint64_t val));
+DECLARE_VMMOPS_FUNC(int, getcap, (void *vcpui, int num, int *retval));
+DECLARE_VMMOPS_FUNC(int, setcap, (void *vcpui, int num, int val));
+DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
+ vm_offset_t max));
+DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace));
+#ifdef notyet
+#ifdef BHYVE_SNAPSHOT
+DECLARE_VMMOPS_FUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta));
+DECLARE_VMMOPS_FUNC(int, vcpu_snapshot, (void *vcpui,
+ struct vm_snapshot_meta *meta));
+DECLARE_VMMOPS_FUNC(int, restore_tsc, (void *vcpui, uint64_t now));
+#endif
+#endif
+
int vm_create(const char *name, struct vm **retvm);
struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
void vm_disable_vcpu_creation(struct vm *vm);
@@ -231,7 +263,6 @@ vcpu_should_yield(struct vcpu *vcpu)
void *vcpu_stats(struct vcpu *vcpu);
void vcpu_notify_event(struct vcpu *vcpu);
-struct vmspace *vm_vmspace(struct vm *vm);
struct vm_mem *vm_mem(struct vm *vm);
enum vm_reg_name vm_segment_name(int seg_encoding);
diff --git a/sys/arm64/linux/linux_sysvec.c b/sys/arm64/linux/linux_sysvec.c
index 084b7a11b01f..ac05820f89bc 100644
--- a/sys/arm64/linux/linux_sysvec.c
+++ b/sys/arm64/linux/linux_sysvec.c
@@ -584,7 +584,7 @@ linux_vdso_reloc(char *mapping, Elf_Addr offset)
}
}
-static Elf_Brandnote linux64_brandnote = {
+static const Elf_Brandnote linux64_brandnote = {
.hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
.hdr.n_descsz = 16,
.hdr.n_type = 1,
@@ -593,7 +593,7 @@ static Elf_Brandnote linux64_brandnote = {
.trans_osrel = linux_trans_osrel
};
-static Elf64_Brandinfo linux_glibc2brand = {
+static const Elf64_Brandinfo linux_glibc2brand = {
.brand = ELFOSABI_LINUX,
.machine = EM_AARCH64,
.compat_3_brand = "Linux",
@@ -604,7 +604,7 @@ static Elf64_Brandinfo linux_glibc2brand = {
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
-Elf64_Brandinfo *linux_brandlist[] = {
+const Elf64_Brandinfo *linux_brandlist[] = {
&linux_glibc2brand,
NULL
};
@@ -612,8 +612,8 @@ Elf64_Brandinfo *linux_brandlist[] = {
static int
linux64_elf_modevent(module_t mod, int type, void *data)
{
- Elf64_Brandinfo **brandinfo;
- struct linux_ioctl_handler**lihp;
+ const Elf64_Brandinfo **brandinfo;
+ struct linux_ioctl_handler **lihp;
int error;
error = 0;
diff --git a/sys/arm64/rockchip/rk_gpio.c b/sys/arm64/rockchip/rk_gpio.c
index 847bc7394dd0..8da37d516802 100644
--- a/sys/arm64/rockchip/rk_gpio.c
+++ b/sys/arm64/rockchip/rk_gpio.c
@@ -90,6 +90,11 @@ struct rk_pin_irqsrc {
uint32_t mode;
};
+struct rk_gpio_reg {
+ uint8_t single;
+ uint8_t offset;
+};
+
struct rk_gpio_softc {
device_t sc_dev;
device_t sc_busdev;
@@ -103,7 +108,7 @@ struct rk_gpio_softc {
uint32_t swporta_ddr;
uint32_t version;
struct pin_cached pin_cached[RK_GPIO_MAX_PINS];
- uint8_t regs[RK_GPIO_REGNUM];
+ struct rk_gpio_reg regs[RK_GPIO_REGNUM];
void *ihandle;
struct rk_pin_irqsrc isrcs[RK_GPIO_MAX_PINS];
};
@@ -138,14 +143,15 @@ static int rk_gpio_detach(device_t dev);
static int
rk_gpio_read_bit(struct rk_gpio_softc *sc, int reg, int bit)
{
- int offset = sc->regs[reg];
+ struct rk_gpio_reg *rk_reg = &sc->regs[reg];
uint32_t value;
- if (sc->version == RK_GPIO_TYPE_V1) {
- value = RK_GPIO_READ(sc, offset);
+ if (rk_reg->single) {
+ value = RK_GPIO_READ(sc, rk_reg->offset);
value >>= bit;
} else {
- value = RK_GPIO_READ(sc, bit > 15 ? offset + 4 : offset);
+ value = RK_GPIO_READ(sc, bit > 15 ?
+ rk_reg->offset + 4 : rk_reg->offset);
value >>= (bit % 16);
}
return (value & 1);
@@ -154,50 +160,53 @@ rk_gpio_read_bit(struct rk_gpio_softc *sc, int reg, int bit)
static void
rk_gpio_write_bit(struct rk_gpio_softc *sc, int reg, int bit, int data)
{
- int offset = sc->regs[reg];
+ struct rk_gpio_reg *rk_reg = &sc->regs[reg];
uint32_t value;
- if (sc->version == RK_GPIO_TYPE_V1) {
- value = RK_GPIO_READ(sc, offset);
+ if (rk_reg->single) {
+ value = RK_GPIO_READ(sc, rk_reg->offset);
if (data)
value |= (1 << bit);
else
value &= ~(1 << bit);
- RK_GPIO_WRITE(sc, offset, value);
+ RK_GPIO_WRITE(sc, rk_reg->offset, value);
} else {
if (data)
value = (1 << (bit % 16));
else
value = 0;
value |= (1 << ((bit % 16) + 16));
- RK_GPIO_WRITE(sc, bit > 15 ? offset + 4 : offset, value);
+ RK_GPIO_WRITE(sc, bit > 15 ?
+ rk_reg->offset + 4 : rk_reg->offset, value);
}
}
static uint32_t
rk_gpio_read_4(struct rk_gpio_softc *sc, int reg)
{
- int offset = sc->regs[reg];
+ struct rk_gpio_reg *rk_reg = &sc->regs[reg];
uint32_t value;
- if (sc->version == RK_GPIO_TYPE_V1)
- value = RK_GPIO_READ(sc, offset);
+ if (rk_reg->single)
+ value = RK_GPIO_READ(sc, rk_reg->offset);
else
- value = (RK_GPIO_READ(sc, offset) & 0xffff) |
- (RK_GPIO_READ(sc, offset + 4) << 16);
+ value = (RK_GPIO_READ(sc, rk_reg->offset) & 0xffff) |
+ (RK_GPIO_READ(sc, rk_reg->offset + 4) << 16);
return (value);
}
static void
rk_gpio_write_4(struct rk_gpio_softc *sc, int reg, uint32_t value)
{
- int offset = sc->regs[reg];
+ struct rk_gpio_reg *rk_reg = &sc->regs[reg];
- if (sc->version == RK_GPIO_TYPE_V1)
- RK_GPIO_WRITE(sc, offset, value);
+ if (rk_reg->single)
+ RK_GPIO_WRITE(sc, rk_reg->offset, value);
else {
- RK_GPIO_WRITE(sc, offset, (value & 0xffff) | 0xffff0000);
- RK_GPIO_WRITE(sc, offset + 4, (value >> 16) | 0xffff0000);
+ RK_GPIO_WRITE(sc, rk_reg->offset,
+ (value & 0xffff) | 0xffff0000);
+ RK_GPIO_WRITE(sc, rk_reg->offset + 4,
+ (value >> 16) | 0xffff0000);
}
}
@@ -313,31 +322,31 @@ rk_gpio_attach(device_t dev)
switch (sc->version) {
case RK_GPIO_TYPE_V1:
- sc->regs[RK_GPIO_SWPORTA_DR] = 0x00;
- sc->regs[RK_GPIO_SWPORTA_DDR] = 0x04;
- sc->regs[RK_GPIO_INTEN] = 0x30;
- sc->regs[RK_GPIO_INTMASK] = 0x34;
- sc->regs[RK_GPIO_INTTYPE_LEVEL] = 0x38;
- sc->regs[RK_GPIO_INT_POLARITY] = 0x3c;
- sc->regs[RK_GPIO_INT_STATUS] = 0x40;
- sc->regs[RK_GPIO_INT_RAWSTATUS] = 0x44;
- sc->regs[RK_GPIO_DEBOUNCE] = 0x48;
- sc->regs[RK_GPIO_PORTA_EOI] = 0x4c;
- sc->regs[RK_GPIO_EXT_PORTA] = 0x50;
+ sc->regs[RK_GPIO_SWPORTA_DR] = (struct rk_gpio_reg){ 1, 0x00 };
+ sc->regs[RK_GPIO_SWPORTA_DDR] = (struct rk_gpio_reg){ 1, 0x04 };
+ sc->regs[RK_GPIO_INTEN] = (struct rk_gpio_reg){ 1, 0x30 };
+ sc->regs[RK_GPIO_INTMASK] = (struct rk_gpio_reg){ 1, 0x34 };
+ sc->regs[RK_GPIO_INTTYPE_LEVEL] = (struct rk_gpio_reg){ 1, 0x38 };
+ sc->regs[RK_GPIO_INT_POLARITY] = (struct rk_gpio_reg){ 1, 0x3c };
+ sc->regs[RK_GPIO_INT_STATUS] = (struct rk_gpio_reg){ 1, 0x40 };
+ sc->regs[RK_GPIO_INT_RAWSTATUS] = (struct rk_gpio_reg){ 1, 0x44 };
+ sc->regs[RK_GPIO_DEBOUNCE] = (struct rk_gpio_reg){ 1, 0x48 };
+ sc->regs[RK_GPIO_PORTA_EOI] = (struct rk_gpio_reg){ 1, 0x4c };
+ sc->regs[RK_GPIO_EXT_PORTA] = (struct rk_gpio_reg){ 1, 0x50 };
break;
case RK_GPIO_TYPE_V2:
- sc->regs[RK_GPIO_SWPORTA_DR] = 0x00;
- sc->regs[RK_GPIO_SWPORTA_DDR] = 0x08;
- sc->regs[RK_GPIO_INTEN] = 0x10;
- sc->regs[RK_GPIO_INTMASK] = 0x18;
- sc->regs[RK_GPIO_INTTYPE_LEVEL] = 0x20;
- sc->regs[RK_GPIO_INTTYPE_BOTH] = 0x30;
- sc->regs[RK_GPIO_INT_POLARITY] = 0x28;
- sc->regs[RK_GPIO_INT_STATUS] = 0x50;
- sc->regs[RK_GPIO_INT_RAWSTATUS] = 0x58;
- sc->regs[RK_GPIO_DEBOUNCE] = 0x38;
- sc->regs[RK_GPIO_PORTA_EOI] = 0x60;
- sc->regs[RK_GPIO_EXT_PORTA] = 0x70;
+ sc->regs[RK_GPIO_SWPORTA_DR] = (struct rk_gpio_reg){ 0, 0x00 };
+ sc->regs[RK_GPIO_SWPORTA_DDR] = (struct rk_gpio_reg){ 0, 0x08 };
+ sc->regs[RK_GPIO_INTEN] = (struct rk_gpio_reg){ 0, 0x10 };
+ sc->regs[RK_GPIO_INTMASK] = (struct rk_gpio_reg){ 0, 0x18 };
+ sc->regs[RK_GPIO_INTTYPE_LEVEL] = (struct rk_gpio_reg){ 0, 0x20 };
+ sc->regs[RK_GPIO_INTTYPE_BOTH] = (struct rk_gpio_reg){ 0, 0x30 };
+ sc->regs[RK_GPIO_INT_POLARITY] = (struct rk_gpio_reg){ 0, 0x28 };
+ sc->regs[RK_GPIO_INT_STATUS] = (struct rk_gpio_reg){ 1, 0x50 };
+ sc->regs[RK_GPIO_INT_RAWSTATUS] = (struct rk_gpio_reg){ 1, 0x58 };
+ sc->regs[RK_GPIO_DEBOUNCE] = (struct rk_gpio_reg){ 0, 0x38 };
+ sc->regs[RK_GPIO_PORTA_EOI] = (struct rk_gpio_reg){ 0, 0x60 };
+ sc->regs[RK_GPIO_EXT_PORTA] = (struct rk_gpio_reg){ 1, 0x70 };
break;
default:
device_printf(dev, "Unknown gpio version %08x\n", sc->version);
@@ -371,12 +380,13 @@ rk_gpio_attach(device_t dev)
sc->swporta_ddr = rk_gpio_read_4(sc, RK_GPIO_SWPORTA_DDR);
RK_GPIO_UNLOCK(sc);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
rk_gpio_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
@@ -393,7 +403,7 @@ rk_gpio_detach(device_t dev)
mtx_destroy(&sc->sc_mtx);
clk_disable(sc->clk);
- return(0);
+ return (0);
}
static device_t
@@ -470,7 +480,7 @@ rk_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps)
{
if (pin >= RK_GPIO_MAX_PINS)
- return EINVAL;
+ return (EINVAL);
*caps = RK_GPIO_DEFAULT_CAPS;
return (0);
@@ -653,46 +663,108 @@ rk_gpio_get_node(device_t bus, device_t dev)
}
static int
-rk_pic_map_intr(device_t dev, struct intr_map_data *data,
- struct intr_irqsrc **isrcp)
+rk_gpio_pic_map_fdt(struct rk_gpio_softc *sc,
+ struct intr_map_data_fdt *daf,
+ u_int *irqp, uint32_t *modep)
{
- struct rk_gpio_softc *sc = device_get_softc(dev);
- struct intr_map_data_gpio *gdata;
uint32_t irq;
+ uint32_t mode;
- if (data->type != INTR_MAP_DATA_GPIO) {
- device_printf(dev, "Wrong type\n");
- return (ENOTSUP);
- }
- gdata = (struct intr_map_data_gpio *)data;
- irq = gdata->gpio_pin_num;
+ if (daf->ncells != 2)
+ return (EINVAL);
+
+ irq = daf->cells[0];
+ if (irq >= RK_GPIO_MAX_PINS)
+ return (EINVAL);
+
+ /* Only reasonable modes are supported. */
+ if (daf->cells[1] == 1)
+ mode = GPIO_INTR_EDGE_RISING;
+ else if (daf->cells[1] == 2)
+ mode = GPIO_INTR_EDGE_FALLING;
+ else if (daf->cells[1] == 3)
+ mode = GPIO_INTR_EDGE_BOTH;
+ else if (daf->cells[1] == 4)
+ mode = GPIO_INTR_LEVEL_HIGH;
+ else if (daf->cells[1] == 8)
+ mode = GPIO_INTR_LEVEL_LOW;
+ else
+ return (EINVAL);
+
+ *irqp = irq;
+ if (modep != NULL)
+ *modep = mode;
+ return (0);
+}
+
+static int
+rk_gpio_pic_map_gpio(struct rk_gpio_softc *sc,
+ struct intr_map_data_gpio *dag,
+ u_int *irqp, uint32_t *modep)
+{
+ uint32_t irq;
+ irq = dag->gpio_pin_num;
if (irq >= RK_GPIO_MAX_PINS) {
- device_printf(dev, "Invalid interrupt %u\n", irq);
+ device_printf(sc->sc_dev, "Invalid interrupt %u\n",
+ irq);
return (EINVAL);
}
- *isrcp = RK_GPIO_ISRC(sc, irq);
+
+ *irqp = irq;
+ if (modep != NULL)
+ *modep = dag->gpio_intr_mode;
return (0);
}
static int
+rk_gpio_pic_map(struct rk_gpio_softc *sc, struct intr_map_data *data,
+ u_int *irqp, uint32_t *modep)
+{
+ switch (data->type) {
+ case INTR_MAP_DATA_FDT:
+ return (rk_gpio_pic_map_fdt(sc,
+ (struct intr_map_data_fdt *)data, irqp, modep));
+ case INTR_MAP_DATA_GPIO:
+ return (rk_gpio_pic_map_gpio(sc,
+ (struct intr_map_data_gpio *)data, irqp, modep));
+ default:
+ device_printf(sc->sc_dev, "Wrong type\n");
+ return (ENOTSUP);
+ }
+}
+
+static int
+rk_pic_map_intr(device_t dev, struct intr_map_data *data,
+ struct intr_irqsrc **isrcp)
+{
+ int error;
+ struct rk_gpio_softc *sc = device_get_softc(dev);
+ uint32_t irq;
+
+ error = rk_gpio_pic_map(sc, data, &irq, NULL);
+ if (error == 0)
+ *isrcp = RK_GPIO_ISRC(sc, irq);
+ return (error);
+}
+
+static int
rk_pic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
struct rk_gpio_softc *sc = device_get_softc(dev);
struct rk_pin_irqsrc *rkisrc = (struct rk_pin_irqsrc *)isrc;
- struct intr_map_data_gpio *gdata;
uint32_t mode;
- uint8_t pin;
+ uint32_t pin;
if (!data) {
device_printf(dev, "No map data\n");
return (ENOTSUP);
}
- gdata = (struct intr_map_data_gpio *)data;
- mode = gdata->gpio_intr_mode;
- pin = gdata->gpio_pin_num;
- if (rkisrc->irq != gdata->gpio_pin_num) {
+ if (rk_gpio_pic_map(sc, data, &pin, &mode) != 0)
+ return (EINVAL);
+
+ if (rkisrc->irq != pin) {
device_printf(dev, "Interrupts don't match\n");
return (EINVAL);
}
@@ -779,6 +851,10 @@ static device_method_t rk_gpio_methods[] = {
DEVMETHOD(device_attach, rk_gpio_attach),
DEVMETHOD(device_detach, rk_gpio_detach),
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, rk_gpio_get_bus),
DEVMETHOD(gpio_pin_max, rk_gpio_pin_max),
diff --git a/sys/arm64/rockchip/rk_grf_gpio.c b/sys/arm64/rockchip/rk_grf_gpio.c
index 6818bd85bb95..6ac419889614 100644
--- a/sys/arm64/rockchip/rk_grf_gpio.c
+++ b/sys/arm64/rockchip/rk_grf_gpio.c
@@ -181,11 +181,12 @@ rk_grf_gpio_attach(device_t dev)
return (ENXIO);
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm64/rockchip/rk_tsadc.c b/sys/arm64/rockchip/rk_tsadc.c
index e6cbad36f697..d83b09480a0c 100644
--- a/sys/arm64/rockchip/rk_tsadc.c
+++ b/sys/arm64/rockchip/rk_tsadc.c
@@ -484,7 +484,7 @@ tsadc_init_tsensor(struct tsadc_softc *sc, struct tsensor *sensor)
WR4(sc, TSADC_INT_EN, val);
/* Shutdown temperature */
- val = tsadc_raw_to_temp(sc, sc->shutdown_temp);
+ val = tsadc_temp_to_raw(sc, sc->shutdown_temp);
WR4(sc, TSADC_COMP_SHUT(sensor->channel), val);
val = RD4(sc, TSADC_AUTO_CON);
val |= TSADC_AUTO_SRC_EN(sensor->channel);
diff --git a/sys/arm64/vmm/arm64.h b/sys/arm64/vmm/arm64.h
index 6a0c4c78e568..f530dab05331 100644
--- a/sys/arm64/vmm/arm64.h
+++ b/sys/arm64/vmm/arm64.h
@@ -78,14 +78,16 @@ struct hypctx {
uint64_t pmcr_el0; /* Performance Monitors Control Register */
uint64_t pmccntr_el0;
uint64_t pmccfiltr_el0;
+ uint64_t pmuserenr_el0;
+ uint64_t pmselr_el0;
+ uint64_t pmxevcntr_el0;
uint64_t pmcntenset_el0;
uint64_t pmintenset_el1;
uint64_t pmovsset_el0;
- uint64_t pmselr_el0;
- uint64_t pmuserenr_el0;
uint64_t pmevcntr_el0[31];
uint64_t pmevtyper_el0[31];
+ uint64_t dbgclaimset_el1;
uint64_t dbgbcr_el1[16]; /* Debug Breakpoint Control Registers */
uint64_t dbgbvr_el1[16]; /* Debug Breakpoint Value Registers */
uint64_t dbgwcr_el1[16]; /* Debug Watchpoint Control Registers */
@@ -117,6 +119,7 @@ struct hypctx {
struct vgic_v3_regs vgic_v3_regs;
struct vgic_v3_cpu *vgic_cpu;
bool has_exception;
+ bool dbg_oslock;
};
struct hyp {
@@ -125,42 +128,14 @@ struct hyp {
uint64_t vmid_generation;
uint64_t vttbr_el2;
uint64_t el2_addr; /* The address of this in el2 space */
+ uint64_t feats; /* Which features are enabled */
+#define HYP_FEAT_HCX (0x1ul << 0)
+#define HYP_FEAT_ECV_POFF (0x1ul << 1)
bool vgic_attached;
struct vgic_v3 *vgic;
struct hypctx *ctx[];
};
-#define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \
- ret_type vmmops_##opname args;
-
-DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
-DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
-DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
-DEFINE_VMMOPS_IFUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
- uint64_t gla, int prot, uint64_t *gpa, int *is_fault))
-DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
- struct vm_eventinfo *info))
-DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
-DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
- int vcpu_id))
-DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
-DEFINE_VMMOPS_IFUNC(int, exception, (void *vcpui, uint64_t esr, uint64_t far))
-DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
-DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
-DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
-DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
-DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
- vm_offset_t max))
-DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
-#ifdef notyet
-#ifdef BHYVE_SNAPSHOT
-DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta))
-DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
- struct vm_snapshot_meta *meta))
-DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
-#endif
-#endif
-
uint64_t vmm_call_hyp(uint64_t, ...);
#if 0
diff --git a/sys/arm64/vmm/hyp.h b/sys/arm64/vmm/hyp.h
index 0b2977c73960..0c8d0fb28b18 100644
--- a/sys/arm64/vmm/hyp.h
+++ b/sys/arm64/vmm/hyp.h
@@ -80,7 +80,6 @@
#define HYP_ENTER_GUEST 0x00000002
#define HYP_READ_REGISTER 0x00000003
#define HYP_REG_ICH_VTR 0x1
-#define HYP_REG_CNTHCTL 0x2
#define HYP_CLEAN_S2_TLBI 0x00000004
#define HYP_DC_CIVAC 0x00000005
#define HYP_EL2_TLBI 0x00000006
diff --git a/sys/arm64/vmm/io/vtimer.c b/sys/arm64/vmm/io/vtimer.c
index f59d7ebc1ad4..da0f0d96c431 100644
--- a/sys/arm64/vmm/io/vtimer.c
+++ b/sys/arm64/vmm/io/vtimer.c
@@ -36,6 +36,7 @@
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
+#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/timeet.h>
#include <sys/timetc.h>
@@ -55,11 +56,18 @@
#define timer_enabled(ctl) \
(!((ctl) & CNTP_CTL_IMASK) && ((ctl) & CNTP_CTL_ENABLE))
-static uint64_t cnthctl_el2_reg;
static uint32_t tmr_frq;
#define timer_condition_met(ctl) ((ctl) & CNTP_CTL_ISTATUS)
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, vtimer, CTLFLAG_RW, NULL, NULL);
+
+static bool allow_ecv_phys = false;
+SYSCTL_BOOL(_hw_vmm_vtimer, OID_AUTO, allow_ecv_phys, CTLFLAG_RW,
+ &allow_ecv_phys, 0,
+ "Enable hardware access to the physical timer if FEAT_ECV_POFF is supported");
+
static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys);
static int
@@ -111,9 +119,8 @@ out:
}
int
-vtimer_init(uint64_t cnthctl_el2)
+vtimer_init(void)
{
- cnthctl_el2_reg = cnthctl_el2;
/*
* The guest *MUST* use the same timer frequency as the host. The
* register CNTFRQ_EL0 is accessible to the guest and a different value
@@ -128,8 +135,12 @@ void
vtimer_vminit(struct hyp *hyp)
{
uint64_t now;
+ bool ecv_poff;
- hyp->vtimer.cnthctl_el2 = cnthctl_el2_reg;
+ ecv_poff = false;
+
+ if (allow_ecv_phys && (hyp->feats & HYP_FEAT_ECV_POFF) != 0)
+ ecv_poff = true;
/*
* Configure the Counter-timer Hypervisor Control Register for the VM.
@@ -137,35 +148,58 @@ vtimer_vminit(struct hyp *hyp)
if (in_vhe()) {
/*
* CNTHCTL_E2H_EL0PCTEN: trap EL0 access to CNTP{CT,CTSS}_EL0
- * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to
- * CNTV{CT,CTSS}_EL0
+ * CNTHCTL_E2H_EL0VCTEN: don't trap EL0 access to
+ * CNTV{CT,CTXX}_EL0
* CNTHCTL_E2H_EL0VTEN: don't trap EL0 access to
* CNTV_{CTL,CVAL,TVAL}_EL0
* CNTHCTL_E2H_EL0PTEN: trap EL0 access to
* CNTP_{CTL,CVAL,TVAL}_EL0
- * CNTHCTL_E2H_EL1PCEN: trap EL1 access to
- CNTP_{CTL,CVAL,TVAL}_EL0
* CNTHCTL_E2H_EL1PCTEN: trap access to CNTPCT_EL0
+ * CNTHCTL_E2H_EL1PTEN: trap access to
+ * CNTP_{CTL,CVAL,TVAL}_EL0
+ * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to
+ * CNTV{CT,CTSS}_EL0
+ * CNTHCTL_E2H_EL1PCEN: trap EL1 access to
+ * CNTP_{CTL,CVAL,TVAL}_EL0
*
* TODO: Don't trap when FEAT_ECV is present
*/
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL0PCTEN;
- hyp->vtimer.cnthctl_el2 |= CNTHCTL_E2H_EL0VCTEN;
- hyp->vtimer.cnthctl_el2 |= CNTHCTL_E2H_EL0VTEN;
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL0PTEN;
-
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL1PTEN;
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL1PCTEN;
+ hyp->vtimer.cnthctl_el2 =
+ CNTHCTL_E2H_EL0VCTEN_NOTRAP |
+ CNTHCTL_E2H_EL0VTEN_NOTRAP;
+ if (ecv_poff) {
+ hyp->vtimer.cnthctl_el2 |=
+ CNTHCTL_E2H_EL0PCTEN_NOTRAP |
+ CNTHCTL_E2H_EL0PTEN_NOTRAP |
+ CNTHCTL_E2H_EL1PCTEN_NOTRAP |
+ CNTHCTL_E2H_EL1PTEN_NOTRAP;
+ } else {
+ hyp->vtimer.cnthctl_el2 |=
+ CNTHCTL_E2H_EL0PCTEN_TRAP |
+ CNTHCTL_E2H_EL0PTEN_TRAP |
+ CNTHCTL_E2H_EL1PCTEN_TRAP |
+ CNTHCTL_E2H_EL1PTEN_TRAP;
+ }
} else {
/*
* CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0
* from EL1
* CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0
*/
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCEN;
- hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCTEN;
+ if (ecv_poff) {
+ hyp->vtimer.cnthctl_el2 =
+ CNTHCTL_EL1PCTEN_NOTRAP |
+ CNTHCTL_EL1PCEN_NOTRAP;
+ } else {
+ hyp->vtimer.cnthctl_el2 =
+ CNTHCTL_EL1PCTEN_TRAP |
+ CNTHCTL_EL1PCEN_TRAP;
+ }
}
+ if (ecv_poff)
+ hyp->vtimer.cnthctl_el2 |= CNTHCTL_ECV_EN;
+
now = READ_SPECIALREG(cntpct_el0);
hyp->vtimer.cntvoff_el2 = now;
@@ -231,15 +265,10 @@ vtimer_cleanup(void)
{
}
-void
-vtimer_sync_hwstate(struct hypctx *hypctx)
+static void
+vtime_sync_timer(struct hypctx *hypctx, struct vtimer_timer *timer,
+ uint64_t cntpct_el0)
{
- struct vtimer_timer *timer;
- uint64_t cntpct_el0;
-
- timer = &hypctx->vtimer_cpu.virt_timer;
- cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
- hypctx->hyp->vtimer.cntvoff_el2;
if (!timer_enabled(timer->cntx_ctl_el0)) {
vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
timer->irqid, false);
@@ -253,6 +282,21 @@ vtimer_sync_hwstate(struct hypctx *hypctx)
}
}
+void
+vtimer_sync_hwstate(struct hypctx *hypctx)
+{
+ uint64_t cntpct_el0;
+
+ cntpct_el0 = READ_SPECIALREG(cntpct_el0) -
+ hypctx->hyp->vtimer.cntvoff_el2;
+ vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.virt_timer, cntpct_el0);
+ /* If FEAT_ECV_POFF is in use then we need to sync the physical timer */
+ if ((hypctx->hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0) {
+ vtime_sync_timer(hypctx, &hypctx->vtimer_cpu.phys_timer,
+ cntpct_el0);
+ }
+}
+
static void
vtimer_inject_irq_callout_phys(void *context)
{
diff --git a/sys/arm64/vmm/io/vtimer.h b/sys/arm64/vmm/io/vtimer.h
index 71a20344d05e..92ce025968d2 100644
--- a/sys/arm64/vmm/io/vtimer.h
+++ b/sys/arm64/vmm/io/vtimer.h
@@ -66,7 +66,7 @@ struct vtimer_cpu {
uint32_t cntkctl_el1;
};
-int vtimer_init(uint64_t cnthctl_el2);
+int vtimer_init(void);
void vtimer_vminit(struct hyp *);
void vtimer_cpuinit(struct hypctx *);
void vtimer_cpucleanup(struct hypctx *);
diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c
index 3082d2941221..aeda689f3b1a 100644
--- a/sys/arm64/vmm/vmm.c
+++ b/sys/arm64/vmm/vmm.c
@@ -88,7 +88,6 @@ struct vcpu {
struct vfpstate *guestfpu; /* (a,i) guest fpu state */
};
-#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
#define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
@@ -126,7 +125,6 @@ struct vm {
bool dying; /* (o) is dying */
volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
- struct vmspace *vmspace; /* (o) guest's address space */
struct vm_mem mem; /* (i) guest memory */
char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
struct vcpu **vcpu; /* (i) guest vcpus */
@@ -274,6 +272,7 @@ vcpu_cleanup(struct vcpu *vcpu, bool destroy)
vmm_stat_free(vcpu->stats);
fpu_save_area_free(vcpu->guestfpu);
vcpu_lock_destroy(vcpu);
+ free(vcpu, M_VMM);
}
}
@@ -407,7 +406,7 @@ vm_init(struct vm *vm, bool create)
{
int i;
- vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
+ vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
MPASS(vm->cookie != NULL);
CPU_ZERO(&vm->active_cpus);
@@ -485,7 +484,7 @@ int
vm_create(const char *name, struct vm **retvm)
{
struct vm *vm;
- struct vmspace *vmspace;
+ int error;
/*
* If vmm.ko could not be successfully initialized then don't attempt
@@ -497,14 +496,13 @@ vm_create(const char *name, struct vm **retvm)
if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
return (EINVAL);
- vmspace = vmmops_vmspace_alloc(0, 1ul << 39);
- if (vmspace == NULL)
- return (ENOMEM);
-
vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO);
+ error = vm_mem_init(&vm->mem, 0, 1ul << 39);
+ if (error != 0) {
+ free(vm, M_VMM);
+ return (error);
+ }
strcpy(vm->name, name);
- vm->vmspace = vmspace;
- vm_mem_init(&vm->mem);
sx_init(&vm->vcpus_init_lock, "vm vcpus");
vm->sockets = 1;
@@ -558,7 +556,7 @@ vm_cleanup(struct vm *vm, bool destroy)
if (destroy) {
vm_xlock_memsegs(vm);
- pmap = vmspace_pmap(vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vm));
sched_pin();
PCPU_SET(curvmpmap, NULL);
sched_unpin();
@@ -582,11 +580,6 @@ vm_cleanup(struct vm *vm, bool destroy)
if (destroy) {
vm_mem_destroy(vm);
- vmmops_vmspace_free(vm->vmspace);
- vm->vmspace = NULL;
-
- for (i = 0; i < vm->maxcpus; i++)
- free(vm->vcpu[i], M_VMM);
free(vm->vcpu, M_VMM);
sx_destroy(&vm->vcpus_init_lock);
}
@@ -651,6 +644,33 @@ vmm_reg_wi(struct vcpu *vcpu, uint64_t wval, void *arg)
return (0);
}
+static int
+vmm_write_oslar_el1(struct vcpu *vcpu, uint64_t wval, void *arg)
+{
+ struct hypctx *hypctx;
+
+ hypctx = vcpu_get_cookie(vcpu);
+ /* All other fields are RES0 & we don't do anything with this */
+ /* TODO: Disable access to other debug state when locked */
+ hypctx->dbg_oslock = (wval & OSLAR_OSLK) == OSLAR_OSLK;
+ return (0);
+}
+
+static int
+vmm_read_oslsr_el1(struct vcpu *vcpu, uint64_t *rval, void *arg)
+{
+ struct hypctx *hypctx;
+ uint64_t val;
+
+ hypctx = vcpu_get_cookie(vcpu);
+ val = OSLSR_OSLM_1;
+ if (hypctx->dbg_oslock)
+ val |= OSLSR_OSLK;
+ *rval = val;
+
+ return (0);
+}
+
static const struct vmm_special_reg vmm_special_regs[] = {
#define SPECIAL_REG(_reg, _read, _write) \
{ \
@@ -707,6 +727,13 @@ static const struct vmm_special_reg vmm_special_regs[] = {
SPECIAL_REG(CNTP_TVAL_EL0, vtimer_phys_tval_read,
vtimer_phys_tval_write),
SPECIAL_REG(CNTPCT_EL0, vtimer_phys_cnt_read, vtimer_phys_cnt_write),
+
+ /* Debug registers */
+ SPECIAL_REG(DBGPRCR_EL1, vmm_reg_raz, vmm_reg_wi),
+ SPECIAL_REG(OSDLR_EL1, vmm_reg_raz, vmm_reg_wi),
+ /* TODO: Exceptions on invalid access */
+ SPECIAL_REG(OSLAR_EL1, vmm_reg_raz, vmm_write_oslar_el1),
+ SPECIAL_REG(OSLSR_EL1, vmm_read_oslsr_el1, vmm_reg_wi),
#undef SPECIAL_REG
};
@@ -1056,12 +1083,6 @@ vcpu_notify_event(struct vcpu *vcpu)
vcpu_unlock(vcpu);
}
-struct vmspace *
-vm_vmspace(struct vm *vm)
-{
- return (vm->vmspace);
-}
-
struct vm_mem *
vm_mem(struct vm *vm)
{
@@ -1342,8 +1363,14 @@ vm_handle_smccc_call(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
static int
vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
{
+ struct vm *vm;
+
+ vm = vcpu->vm;
vcpu_lock(vcpu);
while (1) {
+ if (vm->suspend)
+ break;
+
if (vgic_has_pending_irq(vcpu->cookie))
break;
@@ -1376,7 +1403,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
vme = &vcpu->exitinfo;
- pmap = vmspace_pmap(vcpu->vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vcpu->vm));
addr = vme->u.paging.gpa;
esr = vme->u.paging.esr;
@@ -1393,7 +1420,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
panic("%s: Invalid exception (esr = %lx)", __func__, esr);
}
- map = &vm->vmspace->vm_map;
+ map = &vm_vmspace(vm)->vm_map;
rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
if (rv != KERN_SUCCESS)
return (EFAULT);
@@ -1467,7 +1494,7 @@ vm_run(struct vcpu *vcpu)
if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
return (EINVAL);
- pmap = vmspace_pmap(vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vm));
vme = &vcpu->exitinfo;
evinfo.rptr = NULL;
evinfo.sptr = &vm->suspend;
diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c
index e293c99a6646..618f4afaf8ee 100644
--- a/sys/arm64/vmm/vmm_arm64.c
+++ b/sys/arm64/vmm/vmm_arm64.c
@@ -238,7 +238,6 @@ vmmops_modinit(int ipinum)
vm_offset_t next_hyp_va;
vm_paddr_t vmm_base;
uint64_t id_aa64mmfr0_el1, pa_range_bits, pa_range_field;
- uint64_t cnthctl_el2;
int cpu, i;
bool rv __diagused;
@@ -444,10 +443,9 @@ vmmops_modinit(int ipinum)
vmem_add(el2_mem_alloc, next_hyp_va,
HYP_VM_MAX_ADDRESS - next_hyp_va, M_WAITOK);
}
- cnthctl_el2 = vmm_read_reg(HYP_REG_CNTHCTL);
vgic_init();
- vtimer_init(cnthctl_el2);
+ vtimer_init();
return (0);
}
@@ -517,6 +515,7 @@ vmmops_init(struct vm *vm, pmap_t pmap)
{
struct hyp *hyp;
vm_size_t size;
+ uint64_t idreg;
size = el2_hyp_size(vm);
hyp = malloc_aligned(size, PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO);
@@ -524,6 +523,16 @@ vmmops_init(struct vm *vm, pmap_t pmap)
hyp->vm = vm;
hyp->vgic_attached = false;
+ if (get_kernel_reg(ID_AA64MMFR0_EL1, &idreg)) {
+ if (ID_AA64MMFR0_ECV_VAL(idreg) >= ID_AA64MMFR0_ECV_POFF)
+ hyp->feats |= HYP_FEAT_ECV_POFF;
+ }
+
+ if (get_kernel_reg(ID_AA64MMFR1_EL1, &idreg)) {
+ if (ID_AA64MMFR1_HCX_VAL(idreg) >= ID_AA64MMFR1_HCX_IMPL)
+ hyp->feats |= HYP_FEAT_HCX;
+ }
+
vtimer_vminit(hyp);
vgic_vminit(hyp);
diff --git a/sys/arm64/vmm/vmm_hyp.c b/sys/arm64/vmm/vmm_hyp.c
index d61885c15871..b8c6d2ab7a9a 100644
--- a/sys/arm64/vmm/vmm_hyp.c
+++ b/sys/arm64/vmm/vmm_hyp.c
@@ -42,11 +42,11 @@ struct hypctx;
uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
static void
-vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
+vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest,
+ bool ecv_poff)
{
uint64_t dfr0;
- /* Store the guest VFP registers */
if (guest) {
/* Store the timer registers */
hypctx->vtimer_cpu.cntkctl_el1 =
@@ -55,7 +55,20 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
READ_SPECIALREG(EL0_REG(CNTV_CVAL));
hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
READ_SPECIALREG(EL0_REG(CNTV_CTL));
+ }
+ if (guest_or_nonvhe(guest) && ecv_poff) {
+ /*
+ * If we have ECV then the guest could modify these registers.
+ * If VHE is enabled then the kernel will see a different view
+ * of the registers, so doesn't need to handle them.
+ */
+ hypctx->vtimer_cpu.phys_timer.cntx_cval_el0 =
+ READ_SPECIALREG(EL0_REG(CNTP_CVAL));
+ hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0 =
+ READ_SPECIALREG(EL0_REG(CNTP_CTL));
+ }
+ if (guest) {
/* Store the GICv3 registers */
hypctx->vgic_v3_regs.ich_eisr_el2 =
READ_SPECIALREG(ich_eisr_el2);
@@ -108,6 +121,8 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
}
}
+ hypctx->dbgclaimset_el1 = READ_SPECIALREG(dbgclaimset_el1);
+
dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
#define STORE_DBG_BRP(x) \
@@ -167,10 +182,13 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
+ hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
+ hypctx->pmselr_el0 = READ_SPECIALREG(pmselr_el0);
+ hypctx->pmxevcntr_el0 = READ_SPECIALREG(pmxevcntr_el0);
hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
- hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
+
switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
#define STORE_PMU(x) \
case (x + 1): \
@@ -259,29 +277,20 @@ vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
-
-#ifndef VMM_VHE
- /* hcrx_el2 depends on feat_hcx */
- uint64_t mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- if (ID_AA64MMFR1_HCX_VAL(mmfr1) >> ID_AA64MMFR1_HCX_SHIFT) {
- hypctx->hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
- }
-#endif
}
static void
-vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
+vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest,
+ bool ecv_poff)
{
uint64_t dfr0;
/* Restore the special registers */
WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
- if (guest_or_nonvhe(guest)) {
- uint64_t mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
- if (ID_AA64MMFR1_HCX_VAL(mmfr1) >> ID_AA64MMFR1_HCX_SHIFT) {
- WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hypctx->hcrx_el2);
- }
+ if (guest) {
+ if ((hyp->feats & HYP_FEAT_HCX) != 0)
+ WRITE_SPECIALREG(HCRX_EL2_REG, hypctx->hcrx_el2);
}
isb();
@@ -333,12 +342,15 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
+ WRITE_SPECIALREG(pmuserenr_el0, hypctx->pmuserenr_el0);
+ WRITE_SPECIALREG(pmselr_el0, hypctx->pmselr_el0);
+ WRITE_SPECIALREG(pmxevcntr_el0, hypctx->pmxevcntr_el0);
/* Clear all events/interrupts then enable them */
- WRITE_SPECIALREG(pmcntenclr_el0, 0xfffffffful);
+ WRITE_SPECIALREG(pmcntenclr_el0, ~0ul);
WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
- WRITE_SPECIALREG(pmintenclr_el1, 0xfffffffful);
+ WRITE_SPECIALREG(pmintenclr_el1, ~0ul);
WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
- WRITE_SPECIALREG(pmovsclr_el0, 0xfffffffful);
+ WRITE_SPECIALREG(pmovsclr_el0, ~0ul);
WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
@@ -384,6 +396,9 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
#undef LOAD_PMU
}
+ WRITE_SPECIALREG(dbgclaimclr_el1, ~0ul);
+ WRITE_SPECIALREG(dbgclaimclr_el1, hypctx->dbgclaimset_el1);
+
dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
#define LOAD_DBG_BRP(x) \
@@ -450,6 +465,29 @@ vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
+ if (ecv_poff) {
+ /*
+ * Load the same offset as the virtual timer
+ * to keep in sync.
+ */
+ WRITE_SPECIALREG(CNTPOFF_EL2_REG,
+ hyp->vtimer.cntvoff_el2);
+ isb();
+ }
+ }
+ if (guest_or_nonvhe(guest) && ecv_poff) {
+ /*
+ * If we have ECV then the guest could modify these registers.
+ * If VHE is enabled then the kernel will see a different view
+ * of the registers, so doesn't need to handle them.
+ */
+ WRITE_SPECIALREG(EL0_REG(CNTP_CVAL),
+ hypctx->vtimer_cpu.phys_timer.cntx_cval_el0);
+ WRITE_SPECIALREG(EL0_REG(CNTP_CTL),
+ hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0);
+ }
+
+ if (guest) {
/* Load the GICv3 registers */
WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
WRITE_SPECIALREG(ich_vmcr_el2,
@@ -502,11 +540,19 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
struct hypctx host_hypctx;
uint64_t cntvoff_el2;
uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
+#ifndef VMM_VHE
+ uint64_t hcrx_el2;
+#endif
uint64_t ret;
uint64_t s1e1r, hpfar_el2;
- bool hpfar_valid;
+ bool ecv_poff, hpfar_valid;
- vmm_hyp_reg_store(&host_hypctx, NULL, false);
+ ecv_poff = (hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0;
+ vmm_hyp_reg_store(&host_hypctx, NULL, false, ecv_poff);
+#ifndef VMM_VHE
+ if ((hyp->feats & HYP_FEAT_HCX) != 0)
+ hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
+#endif
/* Save the host special registers */
cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
@@ -516,7 +562,7 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
- vmm_hyp_reg_restore(hypctx, hyp, true);
+ vmm_hyp_reg_restore(hypctx, hyp, true, ecv_poff);
/* Load the common hypervisor registers */
WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
@@ -532,7 +578,7 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
/* Store the exit info */
hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
- vmm_hyp_reg_store(hypctx, hyp, true);
+ vmm_hyp_reg_store(hypctx, hyp, true, ecv_poff);
hpfar_valid = true;
if (ret == EXCP_TYPE_EL1_SYNC) {
@@ -582,7 +628,12 @@ vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
}
}
- vmm_hyp_reg_restore(&host_hypctx, NULL, false);
+ vmm_hyp_reg_restore(&host_hypctx, NULL, false, ecv_poff);
+
+#ifndef VMM_VHE
+ if ((hyp->feats & HYP_FEAT_HCX) != 0)
+ WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hcrx_el2);
+#endif
/* Restore the host special registers */
WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
@@ -613,8 +664,6 @@ VMM_HYP_FUNC(read_reg)(uint64_t reg)
switch (reg) {
case HYP_REG_ICH_VTR:
return (READ_SPECIALREG(ich_vtr_el2));
- case HYP_REG_CNTHCTL:
- return (READ_SPECIALREG(cnthctl_el2));
}
return (0);
diff --git a/sys/arm64/vmm/vmm_reset.c b/sys/arm64/vmm/vmm_reset.c
index 79d022cf33e8..1240c3ed16ec 100644
--- a/sys/arm64/vmm/vmm_reset.c
+++ b/sys/arm64/vmm/vmm_reset.c
@@ -100,10 +100,12 @@ reset_vm_el01_regs(void *vcpu)
el2ctx->pmcr_el0 |= PMCR_LC;
set_arch_unknown(el2ctx->pmccntr_el0);
set_arch_unknown(el2ctx->pmccfiltr_el0);
+ set_arch_unknown(el2ctx->pmuserenr_el0);
+ set_arch_unknown(el2ctx->pmselr_el0);
+ set_arch_unknown(el2ctx->pmxevcntr_el0);
set_arch_unknown(el2ctx->pmcntenset_el0);
set_arch_unknown(el2ctx->pmintenset_el1);
set_arch_unknown(el2ctx->pmovsset_el0);
- set_arch_unknown(el2ctx->pmuserenr_el0);
memset(el2ctx->pmevcntr_el0, 0, sizeof(el2ctx->pmevcntr_el0));
memset(el2ctx->pmevtyper_el0, 0, sizeof(el2ctx->pmevtyper_el0));
}
@@ -143,7 +145,8 @@ reset_vm_el2_regs(void *vcpu)
/* Set the Extended Hypervisor Configuration Register */
el2ctx->hcrx_el2 = 0;
/* TODO: Trap all extensions we don't support */
- el2ctx->mdcr_el2 = 0;
+ el2ctx->mdcr_el2 = MDCR_EL2_TDOSA | MDCR_EL2_TDRA | MDCR_EL2_TPMS |
+ MDCR_EL2_TTRF;
/* PMCR_EL0.N is read from MDCR_EL2.HPMN */
el2ctx->mdcr_el2 |= (el2ctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT;
diff --git a/sys/cam/ata/ata_all.c b/sys/cam/ata/ata_all.c
index f9a2b86f0c06..7cd768a9811a 100644
--- a/sys/cam/ata/ata_all.c
+++ b/sys/cam/ata/ata_all.c
@@ -1151,7 +1151,7 @@ ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries,
/*
* For SEND FPDMA QUEUED, the transfer length is
* encoded in the FEATURE register, and 0 means
- * that 65536 512 byte blocks are to be tranferred.
+ * that 65536 512 byte blocks are to be transferred.
* In practice, it seems unlikely that we'll see
* a transfer that large.
*/
@@ -1220,7 +1220,7 @@ ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries,
/*
* For RECEIVE FPDMA QUEUED, the transfer length is
* encoded in the FEATURE register, and 0 means
- * that 65536 512 byte blocks are to be tranferred.
+ * that 65536 512 byte blocks are to be transferred.
* In practice, it is unlikely we will see a transfer that
* large.
*/
diff --git a/sys/cam/ata/ata_da.c b/sys/cam/ata/ata_da.c
index 1facab47473c..0d844a6fbf9e 100644
--- a/sys/cam/ata/ata_da.c
+++ b/sys/cam/ata/ata_da.c
@@ -44,6 +44,7 @@
#include <sys/malloc.h>
#include <sys/endian.h>
#include <sys/cons.h>
+#include <sys/power.h>
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/sbuf.h>
@@ -878,8 +879,8 @@ static int adaerror(union ccb *ccb, uint32_t cam_flags,
uint32_t sense_flags);
static callout_func_t adasendorderedtag;
static void adashutdown(void *arg, int howto);
-static void adasuspend(void *arg);
-static void adaresume(void *arg);
+static void adasuspend(void *arg, enum power_stype stype);
+static void adaresume(void *arg, enum power_stype stype);
#ifndef ADA_DEFAULT_TIMEOUT
#define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */
@@ -3747,7 +3748,7 @@ adashutdown(void *arg, int howto)
}
static void
-adasuspend(void *arg)
+adasuspend(void *arg, enum power_stype stype)
{
adaflush();
@@ -3760,7 +3761,7 @@ adasuspend(void *arg)
}
static void
-adaresume(void *arg)
+adaresume(void *arg, enum power_stype stype)
{
struct cam_periph *periph;
struct ada_softc *softc;
diff --git a/sys/cam/nvme/nvme_da.c b/sys/cam/nvme/nvme_da.c
index 1c0d5e8381d8..9c4707da482c 100644
--- a/sys/cam/nvme/nvme_da.c
+++ b/sys/cam/nvme/nvme_da.c
@@ -43,6 +43,7 @@
#include <sys/eventhandler.h>
#include <sys/malloc.h>
#include <sys/cons.h>
+#include <sys/power.h>
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/sbuf.h>
@@ -159,7 +160,7 @@ static void ndadone(struct cam_periph *periph,
static int ndaerror(union ccb *ccb, uint32_t cam_flags,
uint32_t sense_flags);
static void ndashutdown(void *arg, int howto);
-static void ndasuspend(void *arg);
+static void ndasuspend(void *arg, enum power_stype stype);
#ifndef NDA_DEFAULT_SEND_ORDERED
#define NDA_DEFAULT_SEND_ORDERED 1
@@ -1365,7 +1366,7 @@ ndashutdown(void *arg, int howto)
}
static void
-ndasuspend(void *arg)
+ndasuspend(void *arg, enum power_stype stype)
{
ndaflush();
diff --git a/sys/cam/scsi/scsi_all.c b/sys/cam/scsi/scsi_all.c
index b518f84454ad..fd128e69f1f1 100644
--- a/sys/cam/scsi/scsi_all.c
+++ b/sys/cam/scsi/scsi_all.c
@@ -112,7 +112,7 @@ static void fetchtableentries(int sense_key, int asc, int ascq,
const struct asc_table_entry **);
#ifdef _KERNEL
-static void init_scsi_delay(void);
+static void init_scsi_delay(void *);
static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS);
static int set_scsi_delay(int delay);
#endif
@@ -686,7 +686,7 @@ scsi_op_desc(uint16_t opcode, struct scsi_inquiry_data *inq_data)
opmask = 1 << pd_type;
for (j = 0; j < num_tables; j++) {
- for (i = 0;i < num_ops[j] && table[j][i].opcode <= opcode; i++){
+ for (i = 0; i < num_ops[j] && table[j][i].opcode <= opcode; i++) {
if ((table[j][i].opcode == opcode)
&& ((table[j][i].opmask & opmask) != 0))
return(table[j][i].desc);
@@ -9379,7 +9379,7 @@ scsi_vpd_supported_page(struct cam_periph *periph, uint8_t page_id)
}
static void
-init_scsi_delay(void)
+init_scsi_delay(void *dummy __unused)
{
int delay;
diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c
index d02750aaacaf..fc8c0413448d 100644
--- a/sys/cam/scsi/scsi_da.c
+++ b/sys/cam/scsi/scsi_da.c
@@ -6830,7 +6830,7 @@ scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
/*
* For SEND FPDMA QUEUED, the transfer length is
* encoded in the FEATURE register, and 0 means
- * that 65536 512 byte blocks are to be tranferred.
+ * that 65536 512 byte blocks are to be transferred.
* In practice, it seems unlikely that we'll see
* a transfer that large, and it may confuse the
* the SAT layer, because generally that means that
@@ -6916,7 +6916,7 @@ scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
/*
* For RECEIVE FPDMA QUEUED, the transfer length is
* encoded in the FEATURE register, and 0 means
- * that 65536 512 byte blocks are to be tranferred.
+ * that 65536 512 byte blocks are to be transferred.
* In practice, it seems unlikely that we'll see
* a transfer that large, and it may confuse the
* the SAT layer, because generally that means that
diff --git a/sys/cam/scsi/scsi_enc_ses.c b/sys/cam/scsi/scsi_enc_ses.c
index 435874a9874a..3a362eaf11a4 100644
--- a/sys/cam/scsi/scsi_enc_ses.c
+++ b/sys/cam/scsi/scsi_enc_ses.c
@@ -2302,7 +2302,7 @@ ses_print_addl_data_sas_type0(char *sesname, struct sbuf *sbp,
sbuf_putc(sbp, '\n');
if (addl->proto_data.sasdev_phys == NULL)
return;
- for (i = 0;i < addl->proto_hdr.sas->base_hdr.num_phys;i++) {
+ for (i = 0; i < addl->proto_hdr.sas->base_hdr.num_phys; i++) {
phy = &addl->proto_data.sasdev_phys[i];
sbuf_printf(sbp, "%s: phy %d:", sesname, i);
if (ses_elm_sas_dev_phy_sata_dev(phy))
@@ -2349,7 +2349,7 @@ ses_print_addl_data_sas_type1(char *sesname, struct sbuf *sbp,
sbuf_printf(sbp, "Expander: %d phys", num_phys);
if (addl->proto_data.sasexp_phys == NULL)
return;
- for (i = 0;i < num_phys;i++) {
+ for (i = 0; i < num_phys; i++) {
exp_phy = &addl->proto_data.sasexp_phys[i];
sbuf_printf(sbp, "%s: phy %d: connector %d other %d\n",
sesname, i, exp_phy->connector_index,
@@ -2360,7 +2360,7 @@ ses_print_addl_data_sas_type1(char *sesname, struct sbuf *sbp,
sbuf_printf(sbp, "Port: %d phys", num_phys);
if (addl->proto_data.sasport_phys == NULL)
return;
- for (i = 0;i < num_phys;i++) {
+ for (i = 0; i < num_phys; i++) {
port_phy = &addl->proto_data.sasport_phys[i];
sbuf_printf(sbp,
"%s: phy %d: id %d connector %d other %d\n",
diff --git a/sys/cddl/boot/zfs/zfsimpl.h b/sys/cddl/boot/zfs/zfsimpl.h
index 83d964360343..c9de1fe4c391 100644
--- a/sys/cddl/boot/zfs/zfsimpl.h
+++ b/sys/cddl/boot/zfs/zfsimpl.h
@@ -536,6 +536,12 @@ typedef struct zio_gbh {
offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
+#define ASHIFT_UBERBLOCK_SHIFT(ashift) \
+ MIN(MAX(ashift, UBERBLOCK_SHIFT), \
+ MAX_UBERBLOCK_SHIFT)
+#define ASHIFT_UBERBLOCK_SIZE(ashift) \
+ (1ULL << ASHIFT_UBERBLOCK_SHIFT(ashift))
+
typedef struct vdev_phys {
char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
zio_eck_t vp_zbt;
@@ -2015,11 +2021,11 @@ typedef struct vdev_indirect_config {
typedef struct vdev {
STAILQ_ENTRY(vdev) v_childlink; /* link in parent's child list */
- STAILQ_ENTRY(vdev) v_alllink; /* link in global vdev list */
vdev_list_t v_children; /* children of this vdev */
const char *v_name; /* vdev name */
uint64_t v_guid; /* vdev guid */
- uint64_t v_txg; /* most recent transaction */
+ uint64_t v_label; /* label instantiated from (top vdev) */
+ uint64_t v_txg; /* most recent transaction (top vdev) */
uint64_t v_id; /* index in parent */
uint64_t v_psize; /* physical device capacity */
int v_ashift; /* offset to block shift */
diff --git a/sys/cddl/compat/opensolaris/kern/opensolaris.c b/sys/cddl/compat/opensolaris/kern/opensolaris.c
index 10924977c20d..898b2ea49f96 100644
--- a/sys/cddl/compat/opensolaris/kern/opensolaris.c
+++ b/sys/cddl/compat/opensolaris/kern/opensolaris.c
@@ -67,7 +67,7 @@ opensolaris_load(void *dummy)
SYSINIT(opensolaris_register, SI_SUB_OPENSOLARIS, SI_ORDER_FIRST, opensolaris_load, NULL);
static void
-opensolaris_unload(void)
+opensolaris_unload(void *dummy __unused)
{
mutex_destroy(&cpu_lock);
}
diff --git a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
index 7192df200ae2..853cfb845878 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
@@ -707,6 +707,31 @@ dtrace_error(uint32_t *counter)
} while (dtrace_cas32(counter, oval, nval) != oval);
}
+void
+dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
+{
+ cpuset_t cpus;
+
+ if (cpu == DTRACE_CPUALL)
+ cpus = all_cpus;
+ else
+ CPU_SETOF(cpu, &cpus);
+
+ smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
+ smp_no_rendezvous_barrier, arg);
+}
+
+static void
+dtrace_sync_func(void)
+{
+}
+
+void
+dtrace_sync(void)
+{
+ dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
+}
+
/*
* Use the DTRACE_LOADFUNC macro to define functions for each of loading a
* uint8_t, a uint16_t, a uint32_t and a uint64_t.
@@ -7761,7 +7786,8 @@ dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
if (!DTRACE_INSCRATCHPTR(&mstate,
- (uintptr_t)memref, 2 * sizeof(uintptr_t))) {
+ (uintptr_t) memref,
+ sizeof (uintptr_t) + sizeof (size_t))) {
*flags |= CPU_DTRACE_BADADDR;
continue;
}
@@ -7773,21 +7799,21 @@ dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
* Check if the size exceeds the allocated
* buffer size.
*/
- if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
+ if (size + sizeof (size_t) >
+ dp->dtdo_rtype.dtdt_size) {
/* Flag a drop! */
*flags |= CPU_DTRACE_DROP;
continue;
}
/* Store the size in the buffer first. */
- DTRACE_STORE(uintptr_t, tomax,
- valoffs, size);
+ DTRACE_STORE(size_t, tomax, valoffs, size);
/*
* Offset the buffer address to the start
* of the data.
*/
- valoffs += sizeof(uintptr_t);
+ valoffs += sizeof(size_t);
/*
* Reset to the memory address rather than
diff --git a/sys/cddl/dev/dtrace/aarch64/dtrace_subr.c b/sys/cddl/dev/dtrace/aarch64/dtrace_subr.c
index 32e84d8fbfe9..ff880e804770 100644
--- a/sys/cddl/dev/dtrace/aarch64/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/aarch64/dtrace_subr.c
@@ -124,33 +124,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
(*func)(0, (uintptr_t)VM_MIN_KERNEL_ADDRESS);
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-
-}
-
-void
-dtrace_sync(void)
-{
-
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
static uint64_t nsec_scale;
#define SCALE_SHIFT 25
diff --git a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
index 81aa53d00bd8..877d52fe18a7 100644
--- a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
@@ -142,31 +142,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
(*func)(0, la57 ? (uintptr_t)addr_P5Tmap : (uintptr_t)addr_P4Tmap);
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-}
-
-void
-dtrace_sync(void)
-{
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
#ifdef notyet
void
dtrace_safe_synchronous_signal(void)
diff --git a/sys/cddl/dev/dtrace/arm/dtrace_subr.c b/sys/cddl/dev/dtrace/arm/dtrace_subr.c
index bb42044aa477..10e9281709b6 100644
--- a/sys/cddl/dev/dtrace/arm/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/arm/dtrace_subr.c
@@ -138,31 +138,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
*/
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-}
-
-void
-dtrace_sync(void)
-{
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
/*
* DTrace needs a high resolution time function which can
* be called from a probe context and guaranteed not to have
diff --git a/sys/cddl/dev/dtrace/i386/dtrace_subr.c b/sys/cddl/dev/dtrace/i386/dtrace_subr.c
index 026581f5a899..ebe2194a4b2f 100644
--- a/sys/cddl/dev/dtrace/i386/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/i386/dtrace_subr.c
@@ -139,31 +139,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
(*func)(0, kernelbase);
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-}
-
-void
-dtrace_sync(void)
-{
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
#ifdef notyet
void
dtrace_safe_synchronous_signal(void)
diff --git a/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c b/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c
index 5dd083310e6f..ee8be8da642f 100644
--- a/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c
@@ -123,31 +123,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
*/
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-}
-
-void
-dtrace_sync(void)
-{
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
static int64_t tgt_cpu_tsc;
static int64_t hst_cpu_tsc;
static int64_t timebase_skew[MAXCPU];
diff --git a/sys/cddl/dev/dtrace/riscv/dtrace_subr.c b/sys/cddl/dev/dtrace/riscv/dtrace_subr.c
index 3a6aacd86fcd..ed2c0bdba7e2 100644
--- a/sys/cddl/dev/dtrace/riscv/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/riscv/dtrace_subr.c
@@ -127,33 +127,6 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
(*func)(0, (uintptr_t)VM_MIN_KERNEL_ADDRESS);
}
-void
-dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
-{
- cpuset_t cpus;
-
- if (cpu == DTRACE_CPUALL)
- cpus = all_cpus;
- else
- CPU_SETOF(cpu, &cpus);
-
- smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
- smp_no_rendezvous_barrier, arg);
-}
-
-static void
-dtrace_sync_func(void)
-{
-
-}
-
-void
-dtrace_sync(void)
-{
-
- dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
-}
-
/*
* DTrace needs a high resolution time function which can
* be called from a probe context and guaranteed not to have
diff --git a/sys/compat/freebsd32/freebsd32_syscall.h b/sys/compat/freebsd32/freebsd32_syscall.h
index 971510ebb6b6..54063150eef9 100644
--- a/sys/compat/freebsd32/freebsd32_syscall.h
+++ b/sys/compat/freebsd32/freebsd32_syscall.h
@@ -83,8 +83,8 @@
/* 76 is obsolete vhangup */
/* 77 is obsolete vlimit */
#define FREEBSD32_SYS_mincore 78
-#define FREEBSD32_SYS_getgroups 79
-#define FREEBSD32_SYS_setgroups 80
+#define FREEBSD32_SYS_freebsd14_getgroups 79
+#define FREEBSD32_SYS_freebsd14_setgroups 80
#define FREEBSD32_SYS_getpgrp 81
#define FREEBSD32_SYS_setpgid 82
#define FREEBSD32_SYS_freebsd32_setitimer 83
@@ -513,4 +513,8 @@
#define FREEBSD32_SYS_exterrctl 592
#define FREEBSD32_SYS_inotify_add_watch_at 593
#define FREEBSD32_SYS_inotify_rm_watch 594
-#define FREEBSD32_SYS_MAXSYSCALL 595
+#define FREEBSD32_SYS_getgroups 595
+#define FREEBSD32_SYS_setgroups 596
+#define FREEBSD32_SYS_jail_attach_jd 597
+#define FREEBSD32_SYS_jail_remove_jd 598
+#define FREEBSD32_SYS_MAXSYSCALL 599
diff --git a/sys/compat/freebsd32/freebsd32_syscalls.c b/sys/compat/freebsd32/freebsd32_syscalls.c
index 79c414b675b1..f7cc4c284e4d 100644
--- a/sys/compat/freebsd32/freebsd32_syscalls.c
+++ b/sys/compat/freebsd32/freebsd32_syscalls.c
@@ -84,8 +84,8 @@ const char *freebsd32_syscallnames[] = {
"obs_vhangup", /* 76 = obsolete vhangup */
"obs_vlimit", /* 77 = obsolete vlimit */
"mincore", /* 78 = mincore */
- "getgroups", /* 79 = getgroups */
- "setgroups", /* 80 = setgroups */
+ "compat14.getgroups", /* 79 = freebsd14 getgroups */
+ "compat14.setgroups", /* 80 = freebsd14 setgroups */
"getpgrp", /* 81 = getpgrp */
"setpgid", /* 82 = setpgid */
"freebsd32_setitimer", /* 83 = freebsd32_setitimer */
@@ -600,4 +600,8 @@ const char *freebsd32_syscallnames[] = {
"exterrctl", /* 592 = exterrctl */
"inotify_add_watch_at", /* 593 = inotify_add_watch_at */
"inotify_rm_watch", /* 594 = inotify_rm_watch */
+ "getgroups", /* 595 = getgroups */
+ "setgroups", /* 596 = setgroups */
+ "jail_attach_jd", /* 597 = jail_attach_jd */
+ "jail_remove_jd", /* 598 = jail_remove_jd */
};
diff --git a/sys/compat/freebsd32/freebsd32_sysent.c b/sys/compat/freebsd32/freebsd32_sysent.c
index 1a4b0d87722c..18f809ef04e3 100644
--- a/sys/compat/freebsd32/freebsd32_sysent.c
+++ b/sys/compat/freebsd32/freebsd32_sysent.c
@@ -146,8 +146,8 @@ struct sysent freebsd32_sysent[] = {
{ .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 76 = obsolete vhangup */
{ .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 77 = obsolete vlimit */
{ .sy_narg = AS(mincore_args), .sy_call = (sy_call_t *)sys_mincore, .sy_auevent = AUE_MINCORE, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 78 = mincore */
- { .sy_narg = AS(getgroups_args), .sy_call = (sy_call_t *)sys_getgroups, .sy_auevent = AUE_GETGROUPS, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 79 = getgroups */
- { .sy_narg = AS(setgroups_args), .sy_call = (sy_call_t *)sys_setgroups, .sy_auevent = AUE_SETGROUPS, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 80 = setgroups */
+ { compat14(AS(freebsd14_getgroups_args),getgroups), .sy_auevent = AUE_GETGROUPS, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 79 = freebsd14 getgroups */
+ { compat14(AS(freebsd14_setgroups_args),setgroups), .sy_auevent = AUE_SETGROUPS, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 80 = freebsd14 setgroups */
{ .sy_narg = 0, .sy_call = (sy_call_t *)sys_getpgrp, .sy_auevent = AUE_GETPGRP, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 81 = getpgrp */
{ .sy_narg = AS(setpgid_args), .sy_call = (sy_call_t *)sys_setpgid, .sy_auevent = AUE_SETPGRP, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 82 = setpgid */
{ .sy_narg = AS(freebsd32_setitimer_args), .sy_call = (sy_call_t *)freebsd32_setitimer, .sy_auevent = AUE_SETITIMER, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 83 = freebsd32_setitimer */
@@ -662,4 +662,8 @@ struct sysent freebsd32_sysent[] = {
{ .sy_narg = AS(exterrctl_args), .sy_call = (sy_call_t *)sys_exterrctl, .sy_auevent = AUE_NULL, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 592 = exterrctl */
{ .sy_narg = AS(inotify_add_watch_at_args), .sy_call = (sy_call_t *)sys_inotify_add_watch_at, .sy_auevent = AUE_INOTIFY, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 593 = inotify_add_watch_at */
{ .sy_narg = AS(inotify_rm_watch_args), .sy_call = (sy_call_t *)sys_inotify_rm_watch, .sy_auevent = AUE_INOTIFY, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 594 = inotify_rm_watch */
+ { .sy_narg = AS(getgroups_args), .sy_call = (sy_call_t *)sys_getgroups, .sy_auevent = AUE_GETGROUPS, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 595 = getgroups */
+ { .sy_narg = AS(setgroups_args), .sy_call = (sy_call_t *)sys_setgroups, .sy_auevent = AUE_SETGROUPS, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 596 = setgroups */
+ { .sy_narg = AS(jail_attach_jd_args), .sy_call = (sy_call_t *)sys_jail_attach_jd, .sy_auevent = AUE_JAIL_ATTACH, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 597 = jail_attach_jd */
+ { .sy_narg = AS(jail_remove_jd_args), .sy_call = (sy_call_t *)sys_jail_remove_jd, .sy_auevent = AUE_JAIL_REMOVE, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 598 = jail_remove_jd */
};
diff --git a/sys/compat/freebsd32/freebsd32_systrace_args.c b/sys/compat/freebsd32/freebsd32_systrace_args.c
index f9dc514bee7d..29a5497e9efa 100644
--- a/sys/compat/freebsd32/freebsd32_systrace_args.c
+++ b/sys/compat/freebsd32/freebsd32_systrace_args.c
@@ -457,22 +457,6 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 3;
break;
}
- /* getgroups */
- case 79: {
- struct getgroups_args *p = params;
- iarg[a++] = p->gidsetsize; /* int */
- uarg[a++] = (intptr_t)p->gidset; /* gid_t * */
- *n_args = 2;
- break;
- }
- /* setgroups */
- case 80: {
- struct setgroups_args *p = params;
- iarg[a++] = p->gidsetsize; /* int */
- uarg[a++] = (intptr_t)p->gidset; /* const gid_t * */
- *n_args = 2;
- break;
- }
/* getpgrp */
case 81: {
*n_args = 0;
@@ -3413,6 +3397,36 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 2;
break;
}
+ /* getgroups */
+ case 595: {
+ struct getgroups_args *p = params;
+ iarg[a++] = p->gidsetsize; /* int */
+ uarg[a++] = (intptr_t)p->gidset; /* gid_t * */
+ *n_args = 2;
+ break;
+ }
+ /* setgroups */
+ case 596: {
+ struct setgroups_args *p = params;
+ iarg[a++] = p->gidsetsize; /* int */
+ uarg[a++] = (intptr_t)p->gidset; /* const gid_t * */
+ *n_args = 2;
+ break;
+ }
+ /* jail_attach_jd */
+ case 597: {
+ struct jail_attach_jd_args *p = params;
+ iarg[a++] = p->fd; /* int */
+ *n_args = 1;
+ break;
+ }
+ /* jail_remove_jd */
+ case 598: {
+ struct jail_remove_jd_args *p = params;
+ iarg[a++] = p->fd; /* int */
+ *n_args = 1;
+ break;
+ }
default:
*n_args = 0;
break;
@@ -4112,32 +4126,6 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
- /* getgroups */
- case 79:
- switch (ndx) {
- case 0:
- p = "int";
- break;
- case 1:
- p = "userland gid_t *";
- break;
- default:
- break;
- };
- break;
- /* setgroups */
- case 80:
- switch (ndx) {
- case 0:
- p = "int";
- break;
- case 1:
- p = "userland const gid_t *";
- break;
- default:
- break;
- };
- break;
/* getpgrp */
case 81:
break;
@@ -9222,6 +9210,52 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
+ /* getgroups */
+ case 595:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland gid_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setgroups */
+ case 596:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland const gid_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* jail_attach_jd */
+ case 597:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* jail_remove_jd */
+ case 598:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
default:
break;
};
@@ -9488,16 +9522,6 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
- /* getgroups */
- case 79:
- if (ndx == 0 || ndx == 1)
- p = "int";
- break;
- /* setgroups */
- case 80:
- if (ndx == 0 || ndx == 1)
- p = "int";
- break;
/* getpgrp */
case 81:
/* setpgid */
@@ -11130,6 +11154,26 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
+ /* getgroups */
+ case 595:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setgroups */
+ case 596:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* jail_attach_jd */
+ case 597:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* jail_remove_jd */
+ case 598:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
default:
break;
};
diff --git a/sys/compat/ia32/ia32_sysvec.c b/sys/compat/ia32/ia32_sysvec.c
index 0ea7d072e911..b9dada4eee7b 100644
--- a/sys/compat/ia32/ia32_sysvec.c
+++ b/sys/compat/ia32/ia32_sysvec.c
@@ -145,7 +145,7 @@ struct sysentvec ia32_freebsd_sysvec = {
};
INIT_SYSENTVEC(elf_ia32_sysvec, &ia32_freebsd_sysvec);
-static Elf32_Brandinfo ia32_brand_info = {
+static const Elf32_Brandinfo ia32_brand_info = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_386,
.compat_3_brand = "FreeBSD",
@@ -155,12 +155,10 @@ static Elf32_Brandinfo ia32_brand_info = {
.brand_note = &elf32_freebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
+C_SYSINIT(ia32, SI_SUB_EXEC, SI_ORDER_MIDDLE,
+ (sysinit_cfunc_t)elf32_insert_brand_entry, &ia32_brand_info);
-SYSINIT(ia32, SI_SUB_EXEC, SI_ORDER_MIDDLE,
- (sysinit_cfunc_t) elf32_insert_brand_entry,
- &ia32_brand_info);
-
-static Elf32_Brandinfo ia32_brand_oinfo = {
+static const Elf32_Brandinfo ia32_brand_oinfo = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_386,
.compat_3_brand = "FreeBSD",
@@ -170,12 +168,10 @@ static Elf32_Brandinfo ia32_brand_oinfo = {
.brand_note = &elf32_freebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
};
+C_SYSINIT(oia32, SI_SUB_EXEC, SI_ORDER_ANY,
+ (sysinit_cfunc_t)elf32_insert_brand_entry, &ia32_brand_oinfo);
-SYSINIT(oia32, SI_SUB_EXEC, SI_ORDER_ANY,
- (sysinit_cfunc_t) elf32_insert_brand_entry,
- &ia32_brand_oinfo);
-
-static Elf32_Brandinfo kia32_brand_info = {
+static const Elf32_Brandinfo kia32_brand_info = {
.brand = ELFOSABI_FREEBSD,
.machine = EM_386,
.compat_3_brand = "FreeBSD",
@@ -184,10 +180,8 @@ static Elf32_Brandinfo kia32_brand_info = {
.brand_note = &elf32_kfreebsd_brandnote,
.flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE_MANDATORY
};
-
-SYSINIT(kia32, SI_SUB_EXEC, SI_ORDER_ANY,
- (sysinit_cfunc_t) elf32_insert_brand_entry,
- &kia32_brand_info);
+C_SYSINIT(kia32, SI_SUB_EXEC, SI_ORDER_ANY,
+ (sysinit_cfunc_t)elf32_insert_brand_entry, &kia32_brand_info);
void
elf32_dump_thread(struct thread *td, void *dst, size_t *off)
diff --git a/sys/compat/lindebugfs/lindebugfs.c b/sys/compat/lindebugfs/lindebugfs.c
index 50f9377ffec3..8cddc6f390bc 100644
--- a/sys/compat/lindebugfs/lindebugfs.c
+++ b/sys/compat/lindebugfs/lindebugfs.c
@@ -206,7 +206,7 @@ debugfs_create_file(const char *name, umode_t mode,
pnode = debugfs_root;
flags = fops->write ? PFS_RDWR : PFS_RD;
- dnode->d_pfs_node = pfs_create_file(pnode, name, debugfs_fill,
+ pfs_create_file(pnode, &dnode->d_pfs_node, name, debugfs_fill,
debugfs_attr, NULL, debugfs_destroy, flags | PFS_NOWAIT);
if (dnode->d_pfs_node == NULL) {
free(dm, M_DFSINT);
@@ -283,7 +283,8 @@ debugfs_create_dir(const char *name, struct dentry *parent)
else
pnode = debugfs_root;
- dnode->d_pfs_node = pfs_create_dir(pnode, name, debugfs_attr, NULL, debugfs_destroy, PFS_RD | PFS_NOWAIT);
+ pfs_create_dir(pnode, &dnode->d_pfs_node, name, debugfs_attr, NULL,
+ debugfs_destroy, PFS_RD | PFS_NOWAIT);
if (dnode->d_pfs_node == NULL) {
free(dm, M_DFSINT);
return (NULL);
@@ -316,7 +317,8 @@ debugfs_create_symlink(const char *name, struct dentry *parent,
else
pnode = debugfs_root;
- dnode->d_pfs_node = pfs_create_link(pnode, name, &debugfs_fill_data, NULL, NULL, NULL, PFS_NOWAIT);
+ pfs_create_link(pnode, &dnode->d_pfs_node, name, &debugfs_fill_data,
+ NULL, NULL, NULL, PFS_NOWAIT);
if (dnode->d_pfs_node == NULL)
goto fail;
dnode->d_pfs_node->pn_data = dm;
diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c
index 1c6d64d6b8bc..95b212be1306 100644
--- a/sys/compat/linprocfs/linprocfs.c
+++ b/sys/compat/linprocfs/linprocfs.c
@@ -2320,165 +2320,165 @@ linprocfs_init(PFS_INIT_ARGS)
root = pi->pi_root;
/* /proc/... */
- pfs_create_file(root, "cmdline", &linprocfs_docmdline,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "cpuinfo", &linprocfs_docpuinfo,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "devices", &linprocfs_dodevices,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "filesystems", &linprocfs_dofilesystems,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "loadavg", &linprocfs_doloadavg,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "meminfo", &linprocfs_domeminfo,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "modules", &linprocfs_domodules,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "mounts", &linprocfs_domtab,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "mtab", &linprocfs_domtab,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "partitions", &linprocfs_dopartitions,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_link(root, "self", &procfs_docurproc,
- NULL, NULL, NULL, 0);
- pfs_create_file(root, "stat", &linprocfs_dostat,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "swaps", &linprocfs_doswaps,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "uptime", &linprocfs_douptime,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(root, "version", &linprocfs_doversion,
+ pfs_create_file(root, NULL, "cmdline", &linprocfs_docmdline, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "cpuinfo", &linprocfs_docpuinfo, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "devices", &linprocfs_dodevices, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "filesystems", &linprocfs_dofilesystems,
NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(root, NULL, "loadavg", &linprocfs_doloadavg, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "meminfo", &linprocfs_domeminfo, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "modules", &linprocfs_domodules, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "mounts", &linprocfs_domtab, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "mtab", &linprocfs_domtab, NULL, NULL, NULL,
+ PFS_RD);
+ pfs_create_file(root, NULL, "partitions", &linprocfs_dopartitions, NULL,
+ NULL, NULL, PFS_RD);
+ pfs_create_link(root, NULL, "self", &procfs_docurproc, NULL, NULL, NULL,
+ 0);
+ pfs_create_file(root, NULL, "stat", &linprocfs_dostat, NULL, NULL, NULL,
+ PFS_RD);
+ pfs_create_file(root, NULL, "swaps", &linprocfs_doswaps, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "uptime", &linprocfs_douptime, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(root, NULL, "version", &linprocfs_doversion, NULL, NULL,
+ NULL, PFS_RD);
/* /proc/bus/... */
- dir = pfs_create_dir(root, "bus", NULL, NULL, NULL, 0);
- dir = pfs_create_dir(dir, "pci", NULL, NULL, NULL, 0);
- dir = pfs_create_dir(dir, "devices", NULL, NULL, NULL, 0);
+ pfs_create_dir(root, &dir, "bus", NULL, NULL, NULL, 0);
+ pfs_create_dir(dir, &dir, "pci", NULL, NULL, NULL, 0);
+ pfs_create_dir(dir, &dir, "devices", NULL, NULL, NULL, 0);
/* /proc/net/... */
- dir = pfs_create_dir(root, "net", NULL, NULL, NULL, 0);
- pfs_create_file(dir, "dev", &linprocfs_donetdev,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "route", &linprocfs_donetroute,
- NULL, NULL, NULL, PFS_RD);
+ pfs_create_dir(root, &dir, "net", NULL, NULL, NULL, 0);
+ pfs_create_file(dir, NULL, "dev", &linprocfs_donetdev, NULL, NULL, NULL,
+ PFS_RD);
+ pfs_create_file(dir, NULL, "route", &linprocfs_donetroute, NULL, NULL,
+ NULL, PFS_RD);
/* /proc/<pid>/... */
- dir = pfs_create_dir(root, "pid", NULL, NULL, NULL, PFS_PROCDEP);
- pfs_create_file(dir, "cmdline", &linprocfs_doproccmdline,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_link(dir, "cwd", &linprocfs_doproccwd,
- NULL, NULL, NULL, 0);
- pfs_create_file(dir, "environ", &linprocfs_doprocenviron,
- NULL, &procfs_candebug, NULL, PFS_RD);
- pfs_create_link(dir, "exe", &procfs_doprocfile,
- NULL, &procfs_notsystem, NULL, 0);
- pfs_create_file(dir, "maps", &linprocfs_doprocmaps,
- NULL, NULL, NULL, PFS_RD | PFS_AUTODRAIN);
- pfs_create_file(dir, "mem", &linprocfs_doprocmem,
- procfs_attr_rw, &procfs_candebug, NULL, PFS_RDWR | PFS_RAW);
- pfs_create_file(dir, "mountinfo", &linprocfs_doprocmountinfo,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "mounts", &linprocfs_domtab,
+ pfs_create_dir(root, &dir, "pid", NULL, NULL, NULL, PFS_PROCDEP);
+ pfs_create_file(dir, NULL, "cmdline", &linprocfs_doproccmdline, NULL,
+ NULL, NULL, PFS_RD);
+ pfs_create_link(dir, NULL, "cwd", &linprocfs_doproccwd, NULL, NULL,
+ NULL, 0);
+ pfs_create_file(dir, NULL, "environ", &linprocfs_doprocenviron, NULL,
+ &procfs_candebug, NULL, PFS_RD);
+ pfs_create_link(dir, NULL, "exe", &procfs_doprocfile, NULL,
+ &procfs_notsystem, NULL, 0);
+ pfs_create_file(dir, NULL, "maps", &linprocfs_doprocmaps, NULL, NULL,
+ NULL, PFS_RD | PFS_AUTODRAIN);
+ pfs_create_file(dir, NULL, "mem", &linprocfs_doprocmem, procfs_attr_rw,
+ &procfs_candebug, NULL, PFS_RDWR | PFS_RAW);
+ pfs_create_file(dir, NULL, "mountinfo", &linprocfs_doprocmountinfo,
NULL, NULL, NULL, PFS_RD);
- pfs_create_link(dir, "root", &linprocfs_doprocroot,
- NULL, NULL, NULL, 0);
- pfs_create_file(dir, "stat", &linprocfs_doprocstat,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "statm", &linprocfs_doprocstatm,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "status", &linprocfs_doprocstatus,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_link(dir, "fd", &linprocfs_dofdescfs,
- NULL, NULL, NULL, 0);
- pfs_create_file(dir, "auxv", &linprocfs_doauxv,
- NULL, &procfs_candebug, NULL, PFS_RD|PFS_RAWRD);
- pfs_create_file(dir, "limits", &linprocfs_doproclimits,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "oom_score_adj", &linprocfs_do_oom_score_adj,
+ pfs_create_file(dir, NULL, "mounts", &linprocfs_domtab, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_link(dir, NULL, "root", &linprocfs_doprocroot, NULL, NULL,
+ NULL, 0);
+ pfs_create_file(dir, NULL, "stat", &linprocfs_doprocstat, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "statm", &linprocfs_doprocstatm, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "status", &linprocfs_doprocstatus, NULL,
+ NULL, NULL, PFS_RD);
+ pfs_create_link(dir, NULL, "fd", &linprocfs_dofdescfs, NULL, NULL, NULL,
+ 0);
+ pfs_create_file(dir, NULL, "auxv", &linprocfs_doauxv, NULL,
+ &procfs_candebug, NULL, PFS_RD | PFS_RAWRD);
+ pfs_create_file(dir, NULL, "limits", &linprocfs_doproclimits, NULL,
+ NULL, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "oom_score_adj", &linprocfs_do_oom_score_adj,
procfs_attr_rw, &procfs_candebug, NULL, PFS_RDWR);
/* /proc/<pid>/task/... */
- dir = pfs_create_dir(dir, "task", linprocfs_dotaskattr, NULL, NULL, 0);
- pfs_create_file(dir, ".dummy", &linprocfs_dotaskdummy,
- NULL, NULL, NULL, PFS_RD);
+ pfs_create_dir(dir, &dir, "task", linprocfs_dotaskattr, NULL, NULL, 0);
+ pfs_create_file(dir, NULL, ".dummy", &linprocfs_dotaskdummy, NULL, NULL,
+ NULL, PFS_RD);
/* /proc/scsi/... */
- dir = pfs_create_dir(root, "scsi", NULL, NULL, NULL, 0);
- pfs_create_file(dir, "device_info", &linprocfs_doscsidevinfo,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "scsi", &linprocfs_doscsiscsi,
+ pfs_create_dir(root, &dir, "scsi", NULL, NULL, NULL, 0);
+ pfs_create_file(dir, NULL, "device_info", &linprocfs_doscsidevinfo,
NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "scsi", &linprocfs_doscsiscsi, NULL, NULL,
+ NULL, PFS_RD);
/* /proc/sys/... */
- sys = pfs_create_dir(root, "sys", NULL, NULL, NULL, 0);
+ pfs_create_dir(root, &sys, "sys", NULL, NULL, NULL, 0);
/* /proc/sys/kernel/... */
- dir = pfs_create_dir(sys, "kernel", NULL, NULL, NULL, 0);
- pfs_create_file(dir, "osrelease", &linprocfs_doosrelease,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "ostype", &linprocfs_doostype,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "version", &linprocfs_doosbuild,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "msgmax", &linprocfs_domsgmax,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "msgmni", &linprocfs_domsgmni,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "msgmnb", &linprocfs_domsgmnb,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "ngroups_max", &linprocfs_dongroups_max,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "pid_max", &linprocfs_dopid_max,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "sem", &linprocfs_dosem,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "shmall", &linprocfs_doshmall,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "shmmax", &linprocfs_doshmmax,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "shmmni", &linprocfs_doshmmni,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "tainted", &linprocfs_dotainted,
+ pfs_create_dir(sys, &dir, "kernel", NULL, NULL, NULL, 0);
+ pfs_create_file(dir, NULL, "osrelease", &linprocfs_doosrelease, NULL,
+ NULL, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "ostype", &linprocfs_doostype, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "version", &linprocfs_doosbuild, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "msgmax", &linprocfs_domsgmax, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "msgmni", &linprocfs_domsgmni, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "msgmnb", &linprocfs_domsgmnb, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "ngroups_max", &linprocfs_dongroups_max,
NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "pid_max", &linprocfs_dopid_max, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "sem", &linprocfs_dosem, NULL, NULL, NULL,
+ PFS_RD);
+ pfs_create_file(dir, NULL, "shmall", &linprocfs_doshmall, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "shmmax", &linprocfs_doshmmax, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "shmmni", &linprocfs_doshmmni, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "tainted", &linprocfs_dotainted, NULL, NULL,
+ NULL, PFS_RD);
/* /proc/sys/kernel/random/... */
- dir = pfs_create_dir(dir, "random", NULL, NULL, NULL, 0);
- pfs_create_file(dir, "uuid", &linprocfs_douuid,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "boot_id", &linprocfs_doboot_id,
- NULL, NULL, NULL, PFS_RD);
+ pfs_create_dir(dir, &dir, "random", NULL, NULL, NULL, 0);
+ pfs_create_file(dir, NULL, "uuid", &linprocfs_douuid, NULL, NULL, NULL,
+ PFS_RD);
+ pfs_create_file(dir, NULL, "boot_id", &linprocfs_doboot_id, NULL, NULL,
+ NULL, PFS_RD);
/* /proc/sys/vm/.... */
- dir = pfs_create_dir(sys, "vm", NULL, NULL, NULL, 0);
- pfs_create_file(dir, "min_free_kbytes", &linprocfs_dominfree,
+ pfs_create_dir(sys, &dir, "vm", NULL, NULL, NULL, 0);
+ pfs_create_file(dir, NULL, "min_free_kbytes", &linprocfs_dominfree,
NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "max_map_count", &linprocfs_domax_map_cnt,
+ pfs_create_file(dir, NULL, "max_map_count", &linprocfs_domax_map_cnt,
NULL, NULL, NULL, PFS_RD);
/* /proc/sysvipc/... */
- dir = pfs_create_dir(root, "sysvipc", NULL, NULL, NULL, 0);
- pfs_create_file(dir, "msg", &linprocfs_dosysvipc_msg,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "sem", &linprocfs_dosysvipc_sem,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "shm", &linprocfs_dosysvipc_shm,
- NULL, NULL, NULL, PFS_RD);
+ pfs_create_dir(root, &dir, "sysvipc", NULL, NULL, NULL, 0);
+ pfs_create_file(dir, NULL, "msg", &linprocfs_dosysvipc_msg, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "sem", &linprocfs_dosysvipc_sem, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "shm", &linprocfs_dosysvipc_shm, NULL, NULL,
+ NULL, PFS_RD);
/* /proc/sys/fs/... */
- dir = pfs_create_dir(sys, "fs", NULL, NULL, NULL, 0);
+ pfs_create_dir(sys, &dir, "fs", NULL, NULL, NULL, 0);
/* /proc/sys/fs/mqueue/... */
- dir = pfs_create_dir(dir, "mqueue", NULL, NULL, NULL, 0);
- pfs_create_file(dir, "msg_default", &linprocfs_domqueue_msg_default,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "msgsize_default", &linprocfs_domqueue_msgsize_default,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "msg_max", &linprocfs_domqueue_msg_max,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "msgsize_max", &linprocfs_domqueue_msgsize_max,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "queues_max", &linprocfs_domqueue_queues_max,
+ pfs_create_dir(dir, &dir, "mqueue", NULL, NULL, NULL, 0);
+ pfs_create_file(dir, NULL, "msg_default",
+ &linprocfs_domqueue_msg_default, NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "msgsize_default",
+ &linprocfs_domqueue_msgsize_default, NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "msg_max", &linprocfs_domqueue_msg_max, NULL,
+ NULL, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "msgsize_max",
+ &linprocfs_domqueue_msgsize_max, NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "queues_max", &linprocfs_domqueue_queues_max,
NULL, NULL, NULL, PFS_RD);
return (0);
diff --git a/sys/compat/linsysfs/linsysfs.c b/sys/compat/linsysfs/linsysfs.c
index 7f70221b420d..5a41c5193415 100644
--- a/sys/compat/linsysfs/linsysfs.c
+++ b/sys/compat/linsysfs/linsysfs.c
@@ -267,6 +267,8 @@ linsysfs_run_bus(device_t dev, struct pfs_node *dir, struct pfs_node *scsi,
struct pci_devinfo *dinfo;
char *device, *host, *new_path, *devname;
+ children = NULL;
+ device = host = NULL;
new_path = path;
devname = malloc(16, M_TEMP, M_WAITOK);
@@ -292,39 +294,43 @@ linsysfs_run_bus(device_t dev, struct pfs_node *dir, struct pfs_node *scsi,
dinfo->cfg.func);
strcat(new_path, "/");
strcat(new_path, device);
- dir = pfs_create_dir(dir, device,
+ error = pfs_create_dir(dir, &dir, device,
NULL, NULL, NULL, 0);
- cur_file = pfs_create_file(dir, "vendor",
+ if (error != 0)
+ goto out;
+ pfs_create_dir(dir, &dir, device, NULL, NULL,
+ NULL, 0);
+ pfs_create_file(dir, &cur_file, "vendor",
&linsysfs_fill_vendor, NULL, NULL, NULL,
PFS_RD);
cur_file->pn_data = (void*)dev;
- cur_file = pfs_create_file(dir, "device",
+ pfs_create_file(dir, &cur_file, "device",
&linsysfs_fill_device, NULL, NULL, NULL,
PFS_RD);
cur_file->pn_data = (void*)dev;
- cur_file = pfs_create_file(dir,
+ pfs_create_file(dir, &cur_file,
"subsystem_vendor",
&linsysfs_fill_subvendor, NULL, NULL, NULL,
PFS_RD);
cur_file->pn_data = (void*)dev;
- cur_file = pfs_create_file(dir,
+ pfs_create_file(dir, &cur_file,
"subsystem_device",
&linsysfs_fill_subdevice, NULL, NULL, NULL,
PFS_RD);
cur_file->pn_data = (void*)dev;
- cur_file = pfs_create_file(dir, "revision",
+ pfs_create_file(dir, &cur_file, "revision",
&linsysfs_fill_revid, NULL, NULL, NULL,
PFS_RD);
cur_file->pn_data = (void*)dev;
- cur_file = pfs_create_file(dir, "config",
+ pfs_create_file(dir, &cur_file, "config",
&linsysfs_fill_config, NULL, NULL, NULL,
PFS_RD);
cur_file->pn_data = (void*)dev;
- cur_file = pfs_create_file(dir, "uevent",
- &linsysfs_fill_uevent_pci, NULL, NULL,
- NULL, PFS_RD);
+ pfs_create_file(dir, &cur_file, "uevent",
+ &linsysfs_fill_uevent_pci, NULL, NULL, NULL,
+ PFS_RD);
cur_file->pn_data = (void*)dev;
- cur_file = pfs_create_link(dir, "subsystem",
+ pfs_create_link(dir, &cur_file, "subsystem",
&linsysfs_fill_data, NULL, NULL, NULL, 0);
/* libdrm just checks that the link ends in "/pci" */
cur_file->pn_data = "/sys/bus/pci";
@@ -334,34 +340,32 @@ linsysfs_run_bus(device_t dev, struct pfs_node *dir, struct pfs_node *scsi,
sprintf(host, "host%d", host_number++);
strcat(new_path, "/");
strcat(new_path, host);
- pfs_create_dir(dir, host,
- NULL, NULL, NULL, 0);
+ pfs_create_dir(dir, NULL, host, NULL,
+ NULL, NULL, 0);
scsi_host = malloc(sizeof(
struct scsi_host_queue),
- M_DEVBUF, M_NOWAIT);
+ M_DEVBUF, M_WAITOK);
scsi_host->path = malloc(
strlen(new_path) + 1,
- M_DEVBUF, M_NOWAIT);
+ M_DEVBUF, M_WAITOK);
scsi_host->path[0] = '\000';
bcopy(new_path, scsi_host->path,
strlen(new_path) + 1);
scsi_host->name = "unknown";
- sub_dir = pfs_create_dir(scsi, host,
+ pfs_create_dir(scsi, &sub_dir, host,
NULL, NULL, NULL, 0);
- pfs_create_link(sub_dir, "device",
- &linsysfs_link_scsi_host,
- NULL, NULL, NULL, 0);
- pfs_create_file(sub_dir, "proc_name",
- &linsysfs_scsiname,
+ pfs_create_link(sub_dir, NULL, "device",
+ &linsysfs_link_scsi_host, NULL,
+ NULL, NULL, 0);
+ pfs_create_file(sub_dir, NULL,
+ "proc_name", &linsysfs_scsiname,
NULL, NULL, NULL, PFS_RD);
scsi_host->name
= linux_driver_get_name_dev(dev);
TAILQ_INSERT_TAIL(&scsi_host_q,
scsi_host, scsi_host_next);
}
- free(device, M_TEMP);
- free(host, M_TEMP);
}
}
@@ -374,26 +378,27 @@ linsysfs_run_bus(device_t dev, struct pfs_node *dir, struct pfs_node *scsi,
device_get_unit(dev) >= 0) {
dinfo = device_get_ivars(parent);
if (dinfo != NULL && dinfo->cfg.baseclass == PCIC_DISPLAY) {
- pfs_create_dir(dir, "drm", NULL, NULL, NULL, 0);
+ pfs_create_dir(dir, NULL, "drm", NULL, NULL,
+ NULL, 0);
sprintf(devname, "226:%d",
device_get_unit(dev));
- sub_dir = pfs_create_dir(chardev,
- devname, NULL, NULL, NULL, 0);
- cur_file = pfs_create_link(sub_dir,
- "device", &linsysfs_fill_vgapci, NULL,
- NULL, NULL, PFS_RD);
+ pfs_create_dir(chardev, &sub_dir, devname, NULL,
+ NULL, NULL, 0);
+ pfs_create_link(sub_dir, &cur_file, "device",
+ &linsysfs_fill_vgapci, NULL, NULL, NULL,
+ PFS_RD);
cur_file->pn_data = (void*)dir;
- cur_file = pfs_create_file(sub_dir,
- "uevent", &linsysfs_fill_uevent_drm, NULL,
- NULL, NULL, PFS_RD);
+ pfs_create_file(sub_dir, &cur_file, "uevent",
+ &linsysfs_fill_uevent_drm, NULL, NULL, NULL,
+ PFS_RD);
cur_file->pn_data = (void*)dev;
sprintf(devname, "card%d",
device_get_unit(dev));
- sub_dir = pfs_create_dir(drm,
- devname, NULL, NULL, NULL, 0);
- cur_file = pfs_create_link(sub_dir,
- "device", &linsysfs_fill_vgapci, NULL,
- NULL, NULL, PFS_RD);
+ pfs_create_dir(drm, &sub_dir, devname, NULL,
+ NULL, NULL, 0);
+ pfs_create_link(sub_dir, &cur_file, "device",
+ &linsysfs_fill_vgapci, NULL, NULL, NULL,
+ PFS_RD);
cur_file->pn_data = (void*)dir;
}
}
@@ -401,17 +406,37 @@ linsysfs_run_bus(device_t dev, struct pfs_node *dir, struct pfs_node *scsi,
error = device_get_children(dev, &children, &nchildren);
if (error == 0) {
- for (i = 0; i < nchildren; i++)
- if (children[i])
- linsysfs_run_bus(children[i], dir, scsi,
+ for (i = 0; i < nchildren; i++) {
+ if (children[i]) {
+ error = linsysfs_run_bus(children[i], dir, scsi,
chardev, drm, new_path, prefix);
- free(children, M_TEMP);
+ if (error != 0) {
+ printf(
+ "linsysfs_run_bus: %s omitted from sysfs tree, error %d\n",
+ device_get_nameunit(children[i]),
+ error);
+ }
+ }
+ }
+
+ /*
+ * We override the error to avoid cascading failures; the
+ * innermost device that failed in a tree is probably the most
+ * significant one for diagnostics, its parents would be noise.
+ */
+ error = 0;
}
+
+out:
+ free(host, M_TEMP);
+ free(device, M_TEMP);
+ if (children != NULL)
+ free(children, M_TEMP);
if (new_path != path)
free(new_path, M_TEMP);
free(devname, M_TEMP);
- return (1);
+ return (error);
}
/*
@@ -455,10 +480,10 @@ linsysfs_listcpus(struct pfs_node *dir)
for (i = 0; i < mp_ncpus; ++i) {
/* /sys/devices/system/cpu/cpuX */
sprintf(name, "cpu%d", i);
- cpu = pfs_create_dir(dir, name, NULL, NULL, NULL, 0);
+ pfs_create_dir(dir, &cpu, name, NULL, NULL, NULL, 0);
- pfs_create_file(cpu, "online", &linsysfs_cpuxonline,
- NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(cpu, NULL, "online", &linsysfs_cpuxonline, NULL,
+ NULL, NULL, PFS_RD);
}
free(name, M_TEMP);
}
@@ -485,52 +510,56 @@ linsysfs_init(PFS_INIT_ARGS)
root = pi->pi_root;
/* /sys/bus/... */
- dir = pfs_create_dir(root, "bus", NULL, NULL, NULL, 0);
+ pfs_create_dir(root, &dir, "bus", NULL, NULL, NULL, 0);
/* /sys/class/... */
- class = pfs_create_dir(root, "class", NULL, NULL, NULL, 0);
- scsi = pfs_create_dir(class, "scsi_host", NULL, NULL, NULL, 0);
- drm = pfs_create_dir(class, "drm", NULL, NULL, NULL, 0);
- pfs_create_dir(class, "power_supply", NULL, NULL, NULL, 0);
+ pfs_create_dir(root, &class, "class", NULL, NULL, NULL, 0);
+ pfs_create_dir(class, &scsi, "scsi_host", NULL, NULL, NULL, 0);
+ pfs_create_dir(class, &drm, "drm", NULL, NULL, NULL, 0);
+ pfs_create_dir(class, NULL, "power_supply", NULL, NULL, NULL, 0);
/* /sys/class/net/.. */
- net = pfs_create_dir(class, "net", NULL, NULL, NULL, 0);
+ pfs_create_dir(class, &net, "net", NULL, NULL, NULL, 0);
/* /sys/dev/... */
- devdir = pfs_create_dir(root, "dev", NULL, NULL, NULL, 0);
- chardev = pfs_create_dir(devdir, "char", NULL, NULL, NULL, 0);
+ pfs_create_dir(root, &devdir, "dev", NULL, NULL, NULL, 0);
+ pfs_create_dir(devdir, &chardev, "char", NULL, NULL, NULL, 0);
/* /sys/devices/... */
- dir = pfs_create_dir(root, "devices", NULL, NULL, NULL, 0);
- pci = pfs_create_dir(dir, "pci0000:00", NULL, NULL, NULL, 0);
+ pfs_create_dir(root, &dir, "devices", NULL, NULL, NULL, 0);
+ pfs_create_dir(dir, &pci, "pci0000:00", NULL, NULL, NULL, 0);
devclass = devclass_find("root");
if (devclass == NULL) {
return (0);
}
+ /*
+ * This assumes that the root node is unlikely to error out in
+ * linsysfs_run_bus, which may or may not be true.
+ */
dev = devclass_get_device(devclass, 0);
linsysfs_run_bus(dev, pci, scsi, chardev, drm, "/pci0000:00", "0000");
/* /sys/devices/system */
- sys = pfs_create_dir(dir, "system", NULL, NULL, NULL, 0);
+ pfs_create_dir(dir, &sys, "system", NULL, NULL, NULL, 0);
/* /sys/devices/system/cpu */
- cpu = pfs_create_dir(sys, "cpu", NULL, NULL, NULL, 0);
+ pfs_create_dir(sys, &cpu, "cpu", NULL, NULL, NULL, 0);
- pfs_create_file(cpu, "online", &linsysfs_cpuonline,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(cpu, "possible", &linsysfs_cpuonline,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(cpu, "present", &linsysfs_cpuonline,
- NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(cpu, NULL, "online", &linsysfs_cpuonline, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(cpu, NULL, "possible", &linsysfs_cpuonline, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(cpu, NULL, "present", &linsysfs_cpuonline, NULL, NULL,
+ NULL, PFS_RD);
linsysfs_listcpus(cpu);
/* /sys/kernel */
- kernel = pfs_create_dir(root, "kernel", NULL, NULL, NULL, 0);
+ pfs_create_dir(root, &kernel, "kernel", NULL, NULL, NULL, 0);
/* /sys/kernel/debug, mountpoint for lindebugfs. */
- pfs_create_dir(kernel, "debug", NULL, NULL, NULL, 0);
+ pfs_create_dir(kernel, NULL, "debug", NULL, NULL, NULL, 0);
linsysfs_net_init();
diff --git a/sys/compat/linsysfs/linsysfs_net.c b/sys/compat/linsysfs/linsysfs_net.c
index 73602b0132a4..751dbb5b3713 100644
--- a/sys/compat/linsysfs/linsysfs_net.c
+++ b/sys/compat/linsysfs/linsysfs_net.c
@@ -237,22 +237,22 @@ linsysfs_net_addif(if_t ifp, void *arg)
nic = pfs_find_node(dir, ifname);
if (nic == NULL) {
- nic = pfs_create_dir(dir, ifname, NULL, linsysfs_if_visible,
+ pfs_create_dir(dir, &nic, ifname, NULL, linsysfs_if_visible,
NULL, 0);
- pfs_create_file(nic, "address", &linsysfs_if_addr,
+ pfs_create_file(nic, NULL, "address", &linsysfs_if_addr, NULL,
+ NULL, NULL, PFS_RD);
+ pfs_create_file(nic, NULL, "addr_len", &linsysfs_if_addrlen,
NULL, NULL, NULL, PFS_RD);
- pfs_create_file(nic, "addr_len", &linsysfs_if_addrlen,
+ pfs_create_file(nic, NULL, "flags", &linsysfs_if_flags, NULL,
+ NULL, NULL, PFS_RD);
+ pfs_create_file(nic, NULL, "ifindex", &linsysfs_if_ifindex,
NULL, NULL, NULL, PFS_RD);
- pfs_create_file(nic, "flags", &linsysfs_if_flags,
+ pfs_create_file(nic, NULL, "mtu", &linsysfs_if_mtu, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(nic, NULL, "tx_queue_len", &linsysfs_if_txq_len,
NULL, NULL, NULL, PFS_RD);
- pfs_create_file(nic, "ifindex", &linsysfs_if_ifindex,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(nic, "mtu", &linsysfs_if_mtu,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(nic, "tx_queue_len", &linsysfs_if_txq_len,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(nic, "type", &linsysfs_if_type,
- NULL, NULL, NULL, PFS_RD);
+ pfs_create_file(nic, NULL, "type", &linsysfs_if_type, NULL,
+ NULL, NULL, PFS_RD);
}
/*
* There is a small window between registering the if_arrival
diff --git a/sys/compat/linux/linux.c b/sys/compat/linux/linux.c
index 61b207070963..a40f110634f7 100644
--- a/sys/compat/linux/linux.c
+++ b/sys/compat/linux/linux.c
@@ -578,8 +578,13 @@ bsd_to_linux_sockaddr(const struct sockaddr *sa, struct l_sockaddr **lsa,
return (0);
}
+/*
+ * If sap is NULL, then osa points at already copied in linux sockaddr that
+ * should be edited in place. Otherwise memory is allocated, sockaddr
+ * copied in and returned in *sap.
+ */
int
-linux_to_bsd_sockaddr(const struct l_sockaddr *osa, struct sockaddr **sap,
+linux_to_bsd_sockaddr(struct l_sockaddr *osa, struct sockaddr **sap,
socklen_t *len)
{
struct sockaddr *sa;
@@ -609,10 +614,12 @@ linux_to_bsd_sockaddr(const struct l_sockaddr *osa, struct sockaddr **sap,
}
#endif
- kosa = malloc(salen, M_SONAME, M_WAITOK);
-
- if ((error = copyin(osa, kosa, *len)))
- goto out;
+ if (sap != NULL) {
+ kosa = malloc(salen, M_SONAME, M_WAITOK);
+ if ((error = copyin(osa, kosa, *len)))
+ goto out;
+ } else
+ kosa = osa;
bdom = linux_to_bsd_domain(kosa->sa_family);
if (bdom == AF_UNKNOWN) {
@@ -686,12 +693,15 @@ linux_to_bsd_sockaddr(const struct l_sockaddr *osa, struct sockaddr **sap,
sa->sa_family = bdom;
sa->sa_len = salen;
- *sap = sa;
- *len = salen;
+ if (sap != NULL) {
+ *sap = sa;
+ *len = salen;
+ }
return (0);
out:
- free(kosa, M_SONAME);
+ if (sap != NULL)
+ free(kosa, M_SONAME);
return (error);
}
diff --git a/sys/compat/linux/linux_common.h b/sys/compat/linux/linux_common.h
index 97f5a259f300..814c183b338a 100644
--- a/sys/compat/linux/linux_common.h
+++ b/sys/compat/linux/linux_common.h
@@ -43,7 +43,7 @@ sa_family_t bsd_to_linux_domain(sa_family_t domain);
#define AF_UNKNOWN UINT8_MAX
int bsd_to_linux_sockaddr(const struct sockaddr *sa,
struct l_sockaddr **lsa, socklen_t len);
-int linux_to_bsd_sockaddr(const struct l_sockaddr *lsa,
+int linux_to_bsd_sockaddr(struct l_sockaddr *lsa,
struct sockaddr **sap, socklen_t *len);
void linux_to_bsd_poll_events(struct thread *td, int fd,
short lev, short *bev);
diff --git a/sys/compat/linux/linux_futex.c b/sys/compat/linux/linux_futex.c
index 37d0142bae8b..0586eb55a8f3 100644
--- a/sys/compat/linux/linux_futex.c
+++ b/sys/compat/linux/linux_futex.c
@@ -251,7 +251,7 @@ linux_futex(struct thread *td, struct linux_futex_args *args)
* set LINUX_BI_FUTEX_REQUEUE bit of Brandinfo flags.
*/
p = td->td_proc;
- Elf_Brandinfo *bi = p->p_elf_brandinfo;
+ const Elf_Brandinfo *bi = p->p_elf_brandinfo;
if (bi == NULL || ((bi->flags & LINUX_BI_FUTEX_REQUEUE)) == 0)
return (EINVAL);
args->val3_compare = false;
diff --git a/sys/compat/linux/linux_misc.c b/sys/compat/linux/linux_misc.c
index 5e32353c6b8e..0925ffb64480 100644
--- a/sys/compat/linux/linux_misc.c
+++ b/sys/compat/linux/linux_misc.c
@@ -1028,24 +1028,24 @@ linux_nice(struct thread *td, struct linux_nice_args *args)
int
linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
{
+ const int ngrp = args->gidsetsize;
struct ucred *newcred, *oldcred;
l_gid_t *linux_gidset;
- int ngrp, error;
+ int error;
struct proc *p;
- ngrp = args->gidsetsize;
- if (ngrp < 0 || ngrp >= ngroups_max)
+ if (ngrp < 0 || ngrp > ngroups_max)
return (EINVAL);
linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
if (error)
goto out;
+
newcred = crget();
crextend(newcred, ngrp);
p = td->td_proc;
PROC_LOCK(p);
- oldcred = p->p_ucred;
- crcopy(newcred, oldcred);
+ oldcred = crcopysafe(p, newcred);
if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) {
PROC_UNLOCK(p);
@@ -1071,34 +1071,29 @@ out:
int
linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
{
- struct ucred *cred;
+ const struct ucred *const cred = td->td_ucred;
l_gid_t *linux_gidset;
- gid_t *bsd_gidset;
- int bsd_gidsetsz, ngrp, error;
+ int ngrp, error;
- cred = td->td_ucred;
- bsd_gidset = cred->cr_groups;
- bsd_gidsetsz = cred->cr_ngroups;
+ ngrp = args->gidsetsize;
- if ((ngrp = args->gidsetsize) == 0) {
- td->td_retval[0] = bsd_gidsetsz;
+ if (ngrp == 0) {
+ td->td_retval[0] = cred->cr_ngroups;
return (0);
}
-
- if (ngrp < bsd_gidsetsz)
+ if (ngrp < cred->cr_ngroups)
return (EINVAL);
- ngrp = 0;
- linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset),
- M_LINUX, M_WAITOK);
- while (ngrp < bsd_gidsetsz) {
- linux_gidset[ngrp] = bsd_gidset[ngrp];
- ngrp++;
- }
+ ngrp = cred->cr_ngroups;
+
+ linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
+ for (int i = 0; i < ngrp; ++i)
+ linux_gidset[i] = cred->cr_groups[i];
error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t));
free(linux_gidset, M_LINUX);
- if (error)
+
+ if (error != 0)
return (error);
td->td_retval[0] = ngrp;
diff --git a/sys/compat/linux/linux_netlink.c b/sys/compat/linux/linux_netlink.c
index f51838ee00d7..6aeafe84adc6 100644
--- a/sys/compat/linux/linux_netlink.c
+++ b/sys/compat/linux/linux_netlink.c
@@ -563,22 +563,15 @@ nlmsg_to_linux(struct nlmsghdr *hdr, struct nlpcb *nlp, struct nl_writer *nw)
}
}
-static bool
-nlmsgs_to_linux(struct nl_writer *nw, struct nlpcb *nlp)
+static struct nl_buf *
+nlmsgs_to_linux(struct nl_buf *orig, struct nlpcb *nlp)
{
- struct nl_buf *nb, *orig;
- u_int offset, msglen, orig_messages;
-
- RT_LOG(LOG_DEBUG3, "%p: in %u bytes %u messages", __func__,
- nw->buf->datalen, nw->num_messages);
+ struct nl_writer nw;
+ u_int offset, msglen;
- orig = nw->buf;
- nb = nl_buf_alloc(orig->datalen + SCRATCH_BUFFER_SIZE, M_NOWAIT);
- if (__predict_false(nb == NULL))
- return (false);
- nw->buf = nb;
- orig_messages = nw->num_messages;
- nw->num_messages = 0;
+ if (__predict_false(!nl_writer_unicast(&nw,
+ orig->datalen + SCRATCH_BUFFER_SIZE, nlp, false)))
+ return (NULL);
/* Assume correct headers. Buffer IS mutable */
for (offset = 0;
@@ -587,22 +580,18 @@ nlmsgs_to_linux(struct nl_writer *nw, struct nlpcb *nlp)
struct nlmsghdr *hdr = (struct nlmsghdr *)&orig->data[offset];
msglen = NLMSG_ALIGN(hdr->nlmsg_len);
- if (!nlmsg_to_linux(hdr, nlp, nw)) {
+ if (!nlmsg_to_linux(hdr, nlp, &nw)) {
RT_LOG(LOG_DEBUG, "failed to process msg type %d",
hdr->nlmsg_type);
- nl_buf_free(nb);
- nw->buf = orig;
- nw->num_messages = orig_messages;
- return (false);
+ nl_buf_free(nw.buf);
+ return (NULL);
}
}
- MPASS(nw->num_messages == orig_messages);
- MPASS(nw->buf == nb);
- nl_buf_free(orig);
- RT_LOG(LOG_DEBUG3, "%p: out %u bytes", __func__, offset);
+ RT_LOG(LOG_DEBUG3, "%p: in %u bytes %u messages", __func__,
+ nw.buf->datalen, nw.num_messages);
- return (true);
+ return (nw.buf);
}
static struct linux_netlink_provider linux_netlink_v1 = {
diff --git a/sys/compat/linux/linux_socket.c b/sys/compat/linux/linux_socket.c
index 539d153431c4..b1a483ce611c 100644
--- a/sys/compat/linux/linux_socket.c
+++ b/sys/compat/linux/linux_socket.c
@@ -2146,7 +2146,8 @@ linux_setsockopt(struct thread *td, struct linux_setsockopt_args *args)
return (ENOPROTOOPT);
}
- if (name == IPV6_NEXTHOP) {
+ switch (name) {
+ case IPV6_NEXTHOP: {
len = args->optlen;
error = linux_to_bsd_sockaddr(PTRIN(args->optval), &sa, &len);
if (error != 0)
@@ -2155,7 +2156,34 @@ linux_setsockopt(struct thread *td, struct linux_setsockopt_args *args)
error = kern_setsockopt(td, args->s, level,
name, sa, UIO_SYSSPACE, len);
free(sa, M_SONAME);
- } else {
+ break;
+ }
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP: {
+ struct group_source_req req;
+ size_t size;
+
+ size = (name == MCAST_JOIN_SOURCE_GROUP ||
+ name == MCAST_LEAVE_SOURCE_GROUP) ?
+ sizeof(struct group_source_req) : sizeof(struct group_req);
+
+ if ((error = copyin(PTRIN(args->optval), &req, size)))
+ return (error);
+ len = sizeof(struct sockaddr_storage);
+ if ((error = linux_to_bsd_sockaddr(
+ (struct l_sockaddr *)&req.gsr_group, NULL, &len)))
+ return (error);
+ if (size == sizeof(struct group_source_req) &&
+ (error = linux_to_bsd_sockaddr(
+ (struct l_sockaddr *)&req.gsr_source, NULL, &len)))
+ return (error);
+ error = kern_setsockopt(td, args->s, level, name, &req,
+ UIO_SYSSPACE, size);
+ break;
+ }
+ default:
error = kern_setsockopt(td, args->s, level,
name, PTRIN(args->optval), UIO_USERSPACE, args->optlen);
}
@@ -2179,6 +2207,7 @@ static int
linux_getsockopt_so_peergroups(struct thread *td,
struct linux_getsockopt_args *args)
{
+ l_gid_t *out = PTRIN(args->optval);
struct xucred xu;
socklen_t xulen, len;
int error, i;
@@ -2197,13 +2226,12 @@ linux_getsockopt_so_peergroups(struct thread *td,
return (error);
}
- /*
- * "- 1" to skip the primary group.
- */
+ /* "- 1" to skip the primary group. */
for (i = 0; i < xu.cr_ngroups - 1; i++) {
- error = copyout(xu.cr_groups + i + 1,
- (void *)(args->optval + i * sizeof(l_gid_t)),
- sizeof(l_gid_t));
+ /* Copy to cope with a possible type discrepancy. */
+ const l_gid_t g = xu.cr_groups[i + 1];
+
+ error = copyout(&g, out + i, sizeof(l_gid_t));
if (error != 0)
return (error);
}
diff --git a/sys/compat/linux/linux_uid16.c b/sys/compat/linux/linux_uid16.c
index 1d9a19916412..8ac093e004d0 100644
--- a/sys/compat/linux/linux_uid16.c
+++ b/sys/compat/linux/linux_uid16.c
@@ -85,13 +85,13 @@ linux_lchown16(struct thread *td, struct linux_lchown16_args *args)
int
linux_setgroups16(struct thread *td, struct linux_setgroups16_args *args)
{
+ const int ngrp = args->gidsetsize;
struct ucred *newcred, *oldcred;
l_gid16_t *linux_gidset;
- int ngrp, error;
+ int error;
struct proc *p;
- ngrp = args->gidsetsize;
- if (ngrp < 0 || ngrp >= ngroups_max)
+ if (ngrp < 0 || ngrp > ngroups_max)
return (EINVAL);
linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
error = copyin(args->gidset, linux_gidset, ngrp * sizeof(l_gid16_t));
@@ -100,7 +100,9 @@ linux_setgroups16(struct thread *td, struct linux_setgroups16_args *args)
free(linux_gidset, M_LINUX);
return (error);
}
+
newcred = crget();
+ crextend(newcred, ngrp);
p = td->td_proc;
PROC_LOCK(p);
oldcred = crcopysafe(p, newcred);
@@ -133,34 +135,29 @@ out:
int
linux_getgroups16(struct thread *td, struct linux_getgroups16_args *args)
{
- struct ucred *cred;
+ const struct ucred *const cred = td->td_ucred;
l_gid16_t *linux_gidset;
- gid_t *bsd_gidset;
- int bsd_gidsetsz, ngrp, error;
+ int ngrp, error;
- cred = td->td_ucred;
- bsd_gidset = cred->cr_groups;
- bsd_gidsetsz = cred->cr_ngroups;
+ ngrp = args->gidsetsize;
- if ((ngrp = args->gidsetsize) == 0) {
- td->td_retval[0] = bsd_gidsetsz;
+ if (ngrp == 0) {
+ td->td_retval[0] = cred->cr_ngroups;
return (0);
}
-
- if (ngrp < bsd_gidsetsz)
+ if (ngrp < cred->cr_ngroups)
return (EINVAL);
- ngrp = 0;
- linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset),
- M_LINUX, M_WAITOK);
- while (ngrp < bsd_gidsetsz) {
- linux_gidset[ngrp] = bsd_gidset[ngrp];
- ngrp++;
- }
+ ngrp = cred->cr_ngroups;
+
+ linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
+ for (int i = 0; i < ngrp; ++i)
+ linux_gidset[i] = cred->cr_groups[i];
error = copyout(linux_gidset, args->gidset, ngrp * sizeof(l_gid16_t));
free(linux_gidset, M_LINUX);
- if (error) {
+
+ if (error != 0) {
LIN_SDT_PROBE1(uid16, linux_getgroups16, copyout_error, error);
return (error);
}
diff --git a/sys/compat/linuxkpi/common/include/acpi/acpi.h b/sys/compat/linuxkpi/common/include/acpi/acpi.h
index 1e398d05ba20..9bb435591daa 100644
--- a/sys/compat/linuxkpi/common/include/acpi/acpi.h
+++ b/sys/compat/linuxkpi/common/include/acpi/acpi.h
@@ -37,7 +37,7 @@
/*
* LINUXKPI_WANT_LINUX_ACPI is a temporary workaround to allow drm-kmod
* to update all needed branches without breaking builds.
- * Once that happened and checks are implemented based on __FreeBSD_verison
+ * Once that happened and checks are implemented based on __FreeBSD_version
* we will remove these conditions again.
*/
@@ -131,7 +131,7 @@ acpi_format_exception(ACPI_STATUS Exception)
}
static inline ACPI_STATUS
-acpi_get_handle(ACPI_HANDLE Parent, ACPI_STRING Pathname,
+acpi_get_handle(ACPI_HANDLE Parent, const char *Pathname,
ACPI_HANDLE *RetHandle)
{
return (AcpiGetHandle(Parent, Pathname, RetHandle));
diff --git a/sys/compat/linuxkpi/common/include/asm/topology.h b/sys/compat/linuxkpi/common/include/asm/topology.h
new file mode 100644
index 000000000000..f334d3253cfb
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/asm/topology.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2025 The FreeBSD Foundation
+ * Copyright (c) 2025 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_ASM_TOPOLOGY_H_
+#define _LINUXKPI_ASM_TOPOLOGY_H_
+
+#if defined(__i386__) || defined(__amd64__)
+#include <sys/smp.h>
+
+/*
+ * The following functions are defined in `arch/x86/include/asm/topology.h`
+ * and thus are specific to i386 and amd64.
+ */
+
+static inline unsigned int
+topology_num_cores_per_package(void)
+{
+ return (mp_ncores);
+}
+
+static inline unsigned int
+topology_num_threads_per_package(void)
+{
+ return (mp_ncpus);
+}
+#endif
+
+#endif /* _LINUXKPI_ASM_TOPOLOGY_H_ */
diff --git a/sys/compat/linuxkpi/common/include/kunit/static_stub.h b/sys/compat/linuxkpi/common/include/kunit/static_stub.h
new file mode 100644
index 000000000000..9d425d46dbb0
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/kunit/static_stub.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef _LINUXKPI_KUNIT_STATIC_STUB_H
+#define _LINUXKPI_KUNIT_STATIC_STUB_H
+
+#define KUNIT_STATIC_STUB_REDIRECT(_fn, ...) do { } while(0)
+
+#endif /* _LINUXKPI_KUNIT_STATIC_STUB_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/bitops.h b/sys/compat/linuxkpi/common/include/linux/bitops.h
index bc776a0db9c4..00dd1f9a1ec0 100644
--- a/sys/compat/linuxkpi/common/include/linux/bitops.h
+++ b/sys/compat/linuxkpi/common/include/linux/bitops.h
@@ -62,10 +62,10 @@
#define hweight64(x) bitcount64(x)
#define hweight_long(x) bitcountl(x)
-#define HWEIGHT8(x) (bitcount8((uint8_t)(x)))
-#define HWEIGHT16(x) (bitcount16(x))
-#define HWEIGHT32(x) (bitcount32(x))
-#define HWEIGHT64(x) (bitcount64(x))
+#define HWEIGHT8(x) (__builtin_popcountg((uint8_t)(x)))
+#define HWEIGHT16(x) (__builtin_popcountg((uint16_t)(x)))
+#define HWEIGHT32(x) (__builtin_popcountg((uint32_t)(x)))
+#define HWEIGHT64(x) (__builtin_popcountg((uint64_t)(x)))
static inline int
__ffs(int mask)
diff --git a/sys/compat/linuxkpi/common/include/linux/cleanup.h b/sys/compat/linuxkpi/common/include/linux/cleanup.h
index 01f234f0cbe7..5bb146f082ed 100644
--- a/sys/compat/linuxkpi/common/include/linux/cleanup.h
+++ b/sys/compat/linuxkpi/common/include/linux/cleanup.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2024 The FreeBSD Foundation
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
*
* This software was developed by Björn Zeeb under sponsorship from
* the FreeBSD Foundation.
@@ -43,4 +43,51 @@
guard_ ## _n ## _t guard_ ## _n ## _ ## __COUNTER__ \
__cleanup(guard_ ## _n ## _destroy) = guard_ ## _n ## _create
+#define DEFINE_FREE(_n, _t, _f) \
+ static inline void \
+ __free_ ## _n(void *p) \
+ { \
+ _t _T; \
+ \
+ _T = *(_t *)p; \
+ _f; \
+ }
+
+#define __free(_n) __cleanup(__free_##_n)
+
+/*
+ * Given this is a _0 version it should likely be broken up into parts.
+ * But we have no idead what a _1, _2, ... version would do different
+ * until we see a call.
+ * This is used for a not-real-type (rcu). We use a bool to "simulate"
+ * the lock held. Also _T still special, may not always be used, so tag
+ * with __unused (or better the LinuxKPI __maybe_unused).
+ */
+#define DEFINE_LOCK_GUARD_0(_n, _lock, _unlock, ...) \
+ \
+ typedef struct { \
+ bool lock; \
+ __VA_ARGS__; \
+ } guard_ ## _n ## _t; \
+ \
+ static inline void \
+ guard_ ## _n ## _destroy(guard_ ## _n ## _t *_T) \
+ { \
+ if (_T->lock) { \
+ _unlock; \
+ } \
+ } \
+ \
+ static inline guard_ ## _n ## _t \
+ guard_ ## _n ## _create(void) \
+ { \
+ guard_ ## _n ## _t _tmp; \
+ guard_ ## _n ## _t *_T __maybe_unused; \
+ \
+ _tmp.lock = true; \
+ _T = &_tmp; \
+ _lock; \
+ return (_tmp); \
+ }
+
#endif /* _LINUXKPI_LINUX_CLEANUP_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/compiler.h b/sys/compat/linuxkpi/common/include/linux/compiler.h
index fb5ad3bf4fe4..948396144ad6 100644
--- a/sys/compat/linuxkpi/common/include/linux/compiler.h
+++ b/sys/compat/linuxkpi/common/include/linux/compiler.h
@@ -130,4 +130,10 @@
#define is_signed_type(t) ((t)-1 < (t)1)
#define is_unsigned_type(t) ((t)-1 > (t)1)
+#if __has_builtin(__builtin_dynamic_object_size)
+#define __struct_size(_s) __builtin_dynamic_object_size(_s, 0)
+#else
+#define __struct_size(_s) __builtin_object_size(_s, 0)
+#endif
+
#endif /* _LINUXKPI_LINUX_COMPILER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/device.h b/sys/compat/linuxkpi/common/include/linux/device.h
index 2556b0c45e49..7dd6340746d2 100644
--- a/sys/compat/linuxkpi/common/include/linux/device.h
+++ b/sys/compat/linuxkpi/common/include/linux/device.h
@@ -4,7 +4,7 @@
* Copyright (c) 2010 Panasas, Inc.
* Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* All rights reserved.
- * Copyright (c) 2021-2022 The FreeBSD Foundation
+ * Copyright (c) 2021-2025 The FreeBSD Foundation
*
* Portions of this software were developed by Björn Zeeb
* under sponsorship from the FreeBSD Foundation.
@@ -284,7 +284,8 @@ int lkpi_devres_destroy(struct device *, void(*release)(struct device *, void *)
void lkpi_devres_release_free_list(struct device *);
void lkpi_devres_unlink(struct device *, void *);
void lkpi_devm_kmalloc_release(struct device *, void *);
-#define devm_kfree(_d, _p) lkpi_devm_kmalloc_release(_d, _p)
+void lkpi_devm_kfree(struct device *, const void *);
+#define devm_kfree(_d, _p) lkpi_devm_kfree(_d, _p)
static inline const char *
dev_driver_string(const struct device *dev)
diff --git a/sys/compat/linuxkpi/common/include/linux/gfp.h b/sys/compat/linuxkpi/common/include/linux/gfp.h
index 4c4caa621789..7a32e7862338 100644
--- a/sys/compat/linuxkpi/common/include/linux/gfp.h
+++ b/sys/compat/linuxkpi/common/include/linux/gfp.h
@@ -34,6 +34,7 @@
#include <sys/malloc.h>
#include <linux/page.h>
+#include <linux/topology.h>
#include <vm/vm_param.h>
#include <vm/vm_object.h>
diff --git a/sys/compat/linuxkpi/common/include/linux/idr.h b/sys/compat/linuxkpi/common/include/linux/idr.h
index 535d8ce07fb4..06850c94a5e9 100644
--- a/sys/compat/linuxkpi/common/include/linux/idr.h
+++ b/sys/compat/linuxkpi/common/include/linux/idr.h
@@ -147,6 +147,13 @@ ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
return (ida_simple_get(ida, 0, max, gfp));
}
+static inline int
+ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, gfp_t gfp)
+{
+
+ return (ida_simple_get(ida, min, max, gfp));
+}
+
static inline int ida_alloc(struct ida *ida, gfp_t gfp)
{
return (ida_alloc_max(ida, ~0u, gfp));
diff --git a/sys/compat/linuxkpi/common/include/linux/ieee80211.h b/sys/compat/linuxkpi/common/include/linux/ieee80211.h
index 3644ef80861b..17041bb03ce8 100644
--- a/sys/compat/linuxkpi/common/include/linux/ieee80211.h
+++ b/sys/compat/linuxkpi/common/include/linux/ieee80211.h
@@ -35,6 +35,7 @@
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/if_ether.h>
/* linux_80211.c */
@@ -121,7 +122,20 @@ enum ieee80211_rate_control_changed_flags {
/* 802.11-2016, 9.4.2.158.3 Supported VHT-MCS and NSS Set field. */
#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13) /* part of tx_highest */
-#define IEEE80211_VHT_MAX_AMPDU_1024K 7 /* 9.4.2.56.3 A-MPDU Parameters field, Table 9-163 */
+/*
+ * 802.11-2020, 9.4.2.157.2 VHT Capabilities Information field,
+ * Table 9-271-Subfields of the VHT Capabilities Information field (continued).
+ */
+enum ieee80211_vht_max_ampdu_len_exp {
+ IEEE80211_VHT_MAX_AMPDU_8K = 0,
+ IEEE80211_VHT_MAX_AMPDU_16K = 1,
+ IEEE80211_VHT_MAX_AMPDU_32K = 2,
+ IEEE80211_VHT_MAX_AMPDU_64K = 3,
+ IEEE80211_VHT_MAX_AMPDU_128K = 4,
+ IEEE80211_VHT_MAX_AMPDU_256K = 5,
+ IEEE80211_VHT_MAX_AMPDU_512K = 6,
+ IEEE80211_VHT_MAX_AMPDU_1024K = 7,
+};
#define IEEE80211_WEP_IV_LEN 3 /* net80211: IEEE80211_WEP_IVLEN */
#define IEEE80211_WEP_ICV_LEN 4
@@ -133,9 +147,9 @@ enum ieee80211_rate_control_changed_flags {
enum wlan_ht_cap_sm_ps {
WLAN_HT_CAP_SM_PS_STATIC = 0,
- WLAN_HT_CAP_SM_PS_DYNAMIC,
- WLAN_HT_CAP_SM_PS_INVALID,
- WLAN_HT_CAP_SM_PS_DISABLED,
+ WLAN_HT_CAP_SM_PS_DYNAMIC = 1,
+ WLAN_HT_CAP_SM_PS_INVALID = 2,
+ WLAN_HT_CAP_SM_PS_DISABLED = 3
};
#define WLAN_MAX_KEY_LEN 32
@@ -394,6 +408,14 @@ enum ieee80211_sta_state {
IEEE80211_STA_AUTHORIZED = 4, /* 802.1x */
};
+enum ieee80211_sta_rx_bandwidth {
+ IEEE80211_STA_RX_BW_20 = 0,
+ IEEE80211_STA_RX_BW_40,
+ IEEE80211_STA_RX_BW_80,
+ IEEE80211_STA_RX_BW_160,
+ IEEE80211_STA_RX_BW_320,
+};
+
enum ieee80211_tx_info_flags {
/* XXX TODO .. right shift numbers - not sure where that came from? */
IEEE80211_TX_CTL_AMPDU = BIT(0),
@@ -510,24 +532,24 @@ struct ieee80211_mgmt {
uint16_t beacon_int;
uint16_t capab_info;
uint8_t variable[0];
- } beacon;
+ } __packed beacon;
/* 9.3.3.5 Association Request frame format */
struct {
uint16_t capab_info;
uint16_t listen_interval;
uint8_t variable[0];
- } assoc_req;
+ } __packed assoc_req;
/* 9.3.3.10 Probe Request frame format */
struct {
uint8_t variable[0];
- } probe_req;
+ } __packed probe_req;
/* 9.3.3.11 Probe Response frame format */
struct {
uint64_t timestamp;
uint16_t beacon_int;
uint16_t capab_info;
uint8_t variable[0];
- } probe_resp;
+ } __packed probe_resp;
/* 9.3.3.14 Action frame format */
struct {
/* 9.4.1.11 Action field */
@@ -543,7 +565,7 @@ struct ieee80211_mgmt {
uint8_t tpc_elem_length;
uint8_t tpc_elem_tx_power;
uint8_t tpc_elem_link_margin;
- } tpc_report;
+ } __packed tpc_report;
/* 9.6.8.33 Fine Timing Measurement frame format */
struct {
uint8_t dialog_token;
@@ -553,7 +575,7 @@ struct ieee80211_mgmt {
uint16_t tod_error;
uint16_t toa_error;
uint8_t variable[0];
- } ftm;
+ } __packed ftm;
/* 802.11-2016, 9.6.5.2 ADDBA Request frame format */
struct {
uint8_t action_code;
@@ -563,16 +585,16 @@ struct ieee80211_mgmt {
uint16_t start_seq_num;
/* Optional follows... */
uint8_t variable[0];
- } addba_req;
+ } __packed addba_req;
/* XXX */
struct {
uint8_t dialog_token;
- } wnm_timing_msr;
+ } __packed wnm_timing_msr;
} u;
- } action;
+ } __packed action;
DECLARE_FLEX_ARRAY(uint8_t, body);
} u;
-};
+} __packed __aligned(2);
struct ieee80211_cts { /* net80211::ieee80211_frame_cts */
__le16 frame_control;
diff --git a/sys/compat/linuxkpi/common/include/linux/ioport.h b/sys/compat/linuxkpi/common/include/linux/ioport.h
index 444f3ad94602..763af2de7c4f 100644
--- a/sys/compat/linuxkpi/common/include/linux/ioport.h
+++ b/sys/compat/linuxkpi/common/include/linux/ioport.h
@@ -40,6 +40,7 @@
struct resource {
resource_size_t start;
resource_size_t end;
+ const char *name;
};
static inline resource_size_t
diff --git a/sys/compat/linuxkpi/common/include/linux/math.h b/sys/compat/linuxkpi/common/include/linux/math.h
index 5a348a57747b..1d50e011f66d 100644
--- a/sys/compat/linuxkpi/common/include/linux/math.h
+++ b/sys/compat/linuxkpi/common/include/linux/math.h
@@ -56,7 +56,7 @@
__ret; \
})
-#if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 60600
+#if !defined(LINUXKPI_VERSION) || (LINUXKPI_VERSION >= 60600)
#define abs_diff(x, y) ({ \
__typeof(x) _x = (x); \
__typeof(y) _y = (y); \
diff --git a/sys/compat/linuxkpi/common/include/linux/math64.h b/sys/compat/linuxkpi/common/include/linux/math64.h
index a216d350570f..25ca9da1b622 100644
--- a/sys/compat/linuxkpi/common/include/linux/math64.h
+++ b/sys/compat/linuxkpi/common/include/linux/math64.h
@@ -98,6 +98,12 @@ div64_u64_round_up(uint64_t dividend, uint64_t divisor)
return ((dividend + divisor - 1) / divisor);
}
+static inline uint64_t
+roundup_u64(uint64_t x1, uint32_t x2)
+{
+ return (div_u64(x1 + x2 - 1, x2) * x2);
+}
+
#define DIV64_U64_ROUND_UP(...) \
div64_u64_round_up(__VA_ARGS__)
diff --git a/sys/compat/linuxkpi/common/include/linux/netdevice.h b/sys/compat/linuxkpi/common/include/linux/netdevice.h
index cd7d23077a62..3b808a4a1749 100644
--- a/sys/compat/linuxkpi/common/include/linux/netdevice.h
+++ b/sys/compat/linuxkpi/common/include/linux/netdevice.h
@@ -4,7 +4,7 @@
* Copyright (c) 2010 Panasas, Inc.
* Copyright (c) 2013-2019 Mellanox Technologies, Ltd.
* All rights reserved.
- * Copyright (c) 2020-2021 The FreeBSD Foundation
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
* Copyright (c) 2020-2022 Bjoern A. Zeeb
*
* Portions of this software were developed by Björn Zeeb
@@ -302,6 +302,13 @@ netdev_rss_key_fill(uint32_t *buf, size_t len)
get_random_bytes(buf, len);
}
+static inline void
+__hw_addr_init(struct netdev_hw_addr_list *list)
+{
+ list->count = 0;
+ INIT_LIST_HEAD(&list->addr_list);
+}
+
static inline int
netdev_hw_addr_list_count(struct netdev_hw_addr_list *list)
{
diff --git a/sys/compat/linuxkpi/common/include/linux/overflow.h b/sys/compat/linuxkpi/common/include/linux/overflow.h
index 9ba9b9500f11..e811037b8ecc 100644
--- a/sys/compat/linuxkpi/common/include/linux/overflow.h
+++ b/sys/compat/linuxkpi/common/include/linux/overflow.h
@@ -33,8 +33,10 @@
* credit to Christian Biere.
*/
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
-#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
-#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+#define __type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_max(t) __type_max(typeof(t))
+#define __type_min(T) ((T)((T)-type_max(T)-(T)1))
+#define type_min(t) __type_min(typeof(t))
/*
* Avoids triggering -Wtype-limits compilation warning,
@@ -59,46 +61,123 @@ static inline bool __must_check __must_check_overflow(bool overflow)
* @b: second addend
* @d: pointer to store sum
*
- * Returns 0 on success.
+ * Returns true on wrap-around, false otherwise.
*
- * *@d holds the results of the attempted addition, but is not considered
- * "safe for use" on a non-zero return value, which indicates that the
- * sum has overflowed or been truncated.
+ * *@d holds the results of the attempted addition, regardless of whether
+ * wrap-around occurred.
*/
#define check_add_overflow(a, b, d) \
__must_check_overflow(__builtin_add_overflow(a, b, d))
/**
+ * wrapping_add() - Intentionally perform a wrapping addition
+ * @type: type for result of calculation
+ * @a: first addend
+ * @b: second addend
+ *
+ * Return the potentially wrapped-around addition without
+ * tripping any wrap-around sanitizers that may be enabled.
+ */
+#define wrapping_add(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_add_overflow(a, b, &__val); \
+ __val; \
+ })
+
+/**
+ * wrapping_assign_add() - Intentionally perform a wrapping increment assignment
+ * @var: variable to be incremented
+ * @offset: amount to add
+ *
+ * Increments @var by @offset with wrap-around. Returns the resulting
+ * value of @var. Will not trip any wrap-around sanitizers.
+ *
+ * Returns the new value of @var.
+ */
+#define wrapping_assign_add(var, offset) \
+ ({ \
+ typeof(var) *__ptr = &(var); \
+ *__ptr = wrapping_add(typeof(var), *__ptr, offset); \
+ })
+
+/**
* check_sub_overflow() - Calculate subtraction with overflow checking
* @a: minuend; value to subtract from
* @b: subtrahend; value to subtract from @a
* @d: pointer to store difference
*
- * Returns 0 on success.
+ * Returns true on wrap-around, false otherwise.
*
- * *@d holds the results of the attempted subtraction, but is not considered
- * "safe for use" on a non-zero return value, which indicates that the
- * difference has underflowed or been truncated.
+ * *@d holds the results of the attempted subtraction, regardless of whether
+ * wrap-around occurred.
*/
#define check_sub_overflow(a, b, d) \
__must_check_overflow(__builtin_sub_overflow(a, b, d))
/**
+ * wrapping_sub() - Intentionally perform a wrapping subtraction
+ * @type: type for result of calculation
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ *
+ * Return the potentially wrapped-around subtraction without
+ * tripping any wrap-around sanitizers that may be enabled.
+ */
+#define wrapping_sub(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_sub_overflow(a, b, &__val); \
+ __val; \
+ })
+
+/**
+ * wrapping_assign_sub() - Intentionally perform a wrapping decrement assign
+ * @var: variable to be decremented
+ * @offset: amount to subtract
+ *
+ * Decrements @var by @offset with wrap-around. Returns the resulting
+ * value of @var. Will not trip any wrap-around sanitizers.
+ *
+ * Returns the new value of @var.
+ */
+#define wrapping_assign_sub(var, offset) \
+ ({ \
+ typeof(var) *__ptr = &(var); \
+ *__ptr = wrapping_sub(typeof(var), *__ptr, offset); \
+ })
+
+/**
* check_mul_overflow() - Calculate multiplication with overflow checking
* @a: first factor
* @b: second factor
* @d: pointer to store product
*
- * Returns 0 on success.
+ * Returns true on wrap-around, false otherwise.
*
- * *@d holds the results of the attempted multiplication, but is not
- * considered "safe for use" on a non-zero return value, which indicates
- * that the product has overflowed or been truncated.
+ * *@d holds the results of the attempted multiplication, regardless of whether
+ * wrap-around occurred.
*/
#define check_mul_overflow(a, b, d) \
__must_check_overflow(__builtin_mul_overflow(a, b, d))
/**
+ * wrapping_mul() - Intentionally perform a wrapping multiplication
+ * @type: type for result of calculation
+ * @a: first factor
+ * @b: second factor
+ *
+ * Return the potentially wrapped-around multiplication without
+ * tripping any wrap-around sanitizers that may be enabled.
+ */
+#define wrapping_mul(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_mul_overflow(a, b, &__val); \
+ __val; \
+ })
+
+/**
* check_shl_overflow() - Calculate a left-shifted value and check overflow
* @a: Value to be shifted
* @s: How many bits left to shift
@@ -122,7 +201,7 @@ static inline bool __must_check __must_check_overflow(bool overflow)
typeof(a) _a = a; \
typeof(s) _s = s; \
typeof(d) _d = d; \
- u64 _a_full = _a; \
+ unsigned long long _a_full = _a; \
unsigned int _to_shift = \
is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
*_d = (_a_full << _to_shift); \
@@ -132,10 +211,10 @@ static inline bool __must_check __must_check_overflow(bool overflow)
#define __overflows_type_constexpr(x, T) ( \
is_unsigned_type(typeof(x)) ? \
- (x) > type_max(typeof(T)) : \
+ (x) > type_max(T) : \
is_unsigned_type(typeof(T)) ? \
- (x) < 0 || (x) > type_max(typeof(T)) : \
- (x) < type_min(typeof(T)) || (x) > type_max(typeof(T)))
+ (x) < 0 || (x) > type_max(T) : \
+ (x) < type_min(T) || (x) > type_max(T))
#define __overflows_type(x, T) ({ \
typeof(T) v = 0; \
@@ -312,27 +391,40 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
struct_size((type *)NULL, member, count)
/**
- * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
- * Enables caller macro to pass (different) initializer.
+ * __DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass arbitrary trailing expressions
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
- * @initializer: initializer expression (could be empty for no init).
+ * @trailer: Trailing expressions for attributes and/or initializers.
*/
-#define _DEFINE_FLEX(type, name, member, count, initializer) \
+#define __DEFINE_FLEX(type, name, member, count, trailer...) \
_Static_assert(__builtin_constant_p(count), \
"onstack flex array members require compile-time const count"); \
union { \
u8 bytes[struct_size_t(type, member, count)]; \
type obj; \
- } name##_u initializer; \
+ } name##_u trailer; \
type *name = (type *)&name##_u
/**
- * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
- * flexible array member.
+ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass (different) initializer.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @initializer: Initializer expression (e.g., pass `= { }` at minimum).
+ */
+#define _DEFINE_FLEX(type, name, member, count, initializer...) \
+ __DEFINE_FLEX(type, name, member, count, = { .obj initializer })
+
+/**
+ * DEFINE_RAW_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member, when it does not have a __counted_by annotation.
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
@@ -342,8 +434,42 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* Define a zeroed, on-stack, instance of @type structure with a trailing
* flexible array member.
* Use __struct_size(@name) to get compile-time size of it afterwards.
+ * Use __member_size(@name->member) to get compile-time size of @name members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
+ */
+#define DEFINE_RAW_FLEX(type, name, member, count) \
+ __DEFINE_FLEX(type, name, member, count, = { })
+
+/**
+ * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member.
+ *
+ * @TYPE: structure type name, including "struct" keyword.
+ * @NAME: Name for a variable to define.
+ * @MEMBER: Name of the array member.
+ * @COUNTER: Name of the __counted_by member.
+ * @COUNT: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @TYPE structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@NAME) to get compile-time size of it afterwards.
+ * Use __member_size(@NAME->member) to get compile-time size of @NAME members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
+ */
+#define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \
+ _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .COUNTER = COUNT, })
+
+/**
+ * STACK_FLEX_ARRAY_SIZE() - helper macro for DEFINE_FLEX() family.
+ * Returns the number of elements in @array.
+ *
+ * @name: Name for a variable defined in DEFINE_RAW_FLEX()/DEFINE_FLEX().
+ * @array: Name of the array member.
*/
-#define DEFINE_FLEX(type, name, member, count) \
- _DEFINE_FLEX(type, name, member, count, = {})
+#define STACK_FLEX_ARRAY_SIZE(name, array) \
+ (__member_size((name)->array) / sizeof(*(name)->array) + \
+ __must_be_array((name)->array))
#endif /* _LINUXKPI_LINUX_OVERFLOW_H */
diff --git a/sys/compat/linuxkpi/common/include/linux/pci.h b/sys/compat/linuxkpi/common/include/linux/pci.h
index 3fd4191b9917..ffc2be600c22 100644
--- a/sys/compat/linuxkpi/common/include/linux/pci.h
+++ b/sys/compat/linuxkpi/common/include/linux/pci.h
@@ -355,7 +355,6 @@ struct pci_dev {
TAILQ_HEAD(, pci_mmio_region) mmio;
};
-int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name);
int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
unsigned int flags);
bool pci_device_is_present(struct pci_dev *pdev);
@@ -365,10 +364,13 @@ void __iomem **linuxkpi_pcim_iomap_table(struct pci_dev *pdev);
void *linuxkpi_pci_iomap_range(struct pci_dev *, int,
unsigned long, unsigned long);
void *linuxkpi_pci_iomap(struct pci_dev *, int, unsigned long);
+void *linuxkpi_pcim_iomap(struct pci_dev *, int, unsigned long);
void linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res);
int linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask,
const char *name);
+int linuxkpi_pci_request_region(struct pci_dev *, int, const char *);
int linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name);
+int linuxkpi_pcim_request_all_regions(struct pci_dev *, const char *);
void linuxkpi_pci_release_region(struct pci_dev *pdev, int bar);
void linuxkpi_pci_release_regions(struct pci_dev *pdev);
int linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
@@ -561,12 +563,16 @@ done:
return (pdev->bus->self);
}
+#define pci_request_region(pdev, bar, res_name) \
+ linuxkpi_pci_request_region(pdev, bar, res_name)
#define pci_release_region(pdev, bar) \
linuxkpi_pci_release_region(pdev, bar)
-#define pci_release_regions(pdev) \
- linuxkpi_pci_release_regions(pdev)
#define pci_request_regions(pdev, res_name) \
linuxkpi_pci_request_regions(pdev, res_name)
+#define pci_release_regions(pdev) \
+ linuxkpi_pci_release_regions(pdev)
+#define pcim_request_all_regions(pdev, name) \
+ linuxkpi_pcim_request_all_regions(pdev, name)
static inline void
lkpi_pci_disable_msix(struct pci_dev *pdev)
@@ -803,6 +809,8 @@ static inline void pci_disable_sriov(struct pci_dev *dev)
linuxkpi_pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size)
#define pci_iomap(pdev, mmio_bar, mmio_size) \
linuxkpi_pci_iomap(pdev, mmio_bar, mmio_size)
+#define pcim_iomap(pdev, bar, maxlen) \
+ linuxkpi_pcim_iomap(pdev, bar, maxlen)
#define pci_iounmap(pdev, res) \
linuxkpi_pci_iounmap(pdev, res)
@@ -1324,6 +1332,12 @@ struct pci_dev *lkpi_pci_get_domain_bus_and_slot(int domain,
#define pci_get_domain_bus_and_slot(domain, bus, devfn) \
lkpi_pci_get_domain_bus_and_slot(domain, bus, devfn)
+struct pci_dev *lkpi_pci_get_slot(struct pci_bus *, unsigned int);
+#ifndef WANT_NATIVE_PCI_GET_SLOT
+#define pci_get_slot(_pbus, _devfn) \
+ lkpi_pci_get_slot(_pbus, _devfn)
+#endif
+
static inline int
pci_domain_nr(struct pci_bus *pbus)
{
@@ -1445,6 +1459,9 @@ linuxkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev)
return (lkpi_pci_get_device(vendor, device, odev));
}
+#define for_each_pci_dev(_pdev) \
+ while ((_pdev = linuxkpi_pci_get_device(PCI_ANY_ID, PCI_ANY_ID, _pdev)) != NULL)
+
/* This is a FreeBSD extension so we can use bus_*(). */
static inline void
linuxkpi_pcim_want_to_use_bus_functions(struct pci_dev *pdev)
diff --git a/sys/compat/linuxkpi/common/include/linux/printk.h b/sys/compat/linuxkpi/common/include/linux/printk.h
index da9d45122d4d..d2d197682782 100644
--- a/sys/compat/linuxkpi/common/include/linux/printk.h
+++ b/sys/compat/linuxkpi/common/include/linux/printk.h
@@ -94,4 +94,10 @@ print_hex_dump_bytes(const char *prefix_str, const int prefix_type,
0; \
})
+#define FW_BUG "[Firmware Bug]: "
+#define FW_WARN "[Firmware Warn]: "
+#define FW_INFO "[Firmware Info]: "
+#define HW_ERR "[Hardware Error]: "
+#define DEPRECATED "[Deprecated]: "
+
#endif /* _LINUXKPI_LINUX_PRINTK_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/rcupdate.h b/sys/compat/linuxkpi/common/include/linux/rcupdate.h
index 85d766c8dbc9..4aceb7296cd6 100644
--- a/sys/compat/linuxkpi/common/include/linux/rcupdate.h
+++ b/sys/compat/linuxkpi/common/include/linux/rcupdate.h
@@ -1,7 +1,7 @@
/*-
* Copyright (c) 2016-2017 Mellanox Technologies, Ltd.
* All rights reserved.
- * Copyright (c) 2024 The FreeBSD Foundation
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
*
* Portions of this software were developed by Björn Zeeb
* under sponsorship from the FreeBSD Foundation.
@@ -35,6 +35,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/cleanup.h>
#include <machine/atomic.h>
@@ -162,4 +163,6 @@ void linux_synchronize_rcu(unsigned type);
#define init_rcu_head_on_stack(...)
#define destroy_rcu_head_on_stack(...)
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+
#endif /* _LINUXKPI_LINUX_RCUPDATE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/refcount.h b/sys/compat/linuxkpi/common/include/linux/refcount.h
index 02a7eda3f4a9..46e501a65396 100644
--- a/sys/compat/linuxkpi/common/include/linux/refcount.h
+++ b/sys/compat/linuxkpi/common/include/linux/refcount.h
@@ -30,6 +30,7 @@
#define _LINUXKPI_LINUX_REFCOUNT_H
#include <linux/atomic.h>
+#include <linux/spinlock.h>
typedef atomic_t refcount_t;
diff --git a/sys/compat/linuxkpi/common/include/linux/seq_file.h b/sys/compat/linuxkpi/common/include/linux/seq_file.h
index 876ef9e8dfe5..47da16ab8688 100644
--- a/sys/compat/linuxkpi/common/include/linux/seq_file.h
+++ b/sys/compat/linuxkpi/common/include/linux/seq_file.h
@@ -55,6 +55,21 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
+#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct linux_file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
struct seq_file {
struct sbuf *buf;
size_t size;
diff --git a/sys/compat/linuxkpi/common/include/linux/skbuff.h b/sys/compat/linuxkpi/common/include/linux/skbuff.h
index c8ad90281e34..6e41c368a8b8 100644
--- a/sys/compat/linuxkpi/common/include/linux/skbuff.h
+++ b/sys/compat/linuxkpi/common/include/linux/skbuff.h
@@ -1,6 +1,6 @@
/*-
* Copyright (c) 2020-2025 The FreeBSD Foundation
- * Copyright (c) 2021-2023 Bjoern A. Zeeb
+ * Copyright (c) 2021-2025 Bjoern A. Zeeb
*
* This software was developed by Björn Zeeb under sponsorship from
* the FreeBSD Foundation.
@@ -47,13 +47,11 @@
#include <linux/ktime.h>
#include <linux/compiler.h>
-#include "opt_wlan.h"
-
-/* Currently this is only used for wlan so we can depend on that. */
-#if defined(IEEE80211_DEBUG) && !defined(SKB_DEBUG)
-#define SKB_DEBUG
-#endif
-
+/*
+ * At least the net/intel-irdma-kmod port pulls this header in; likely through
+ * if_ether.h (see PR289268). This means we no longer can rely on
+ * IEEE80211_DEBUG (opt_wlan.h) to automatically set SKB_DEBUG.
+ */
/* #define SKB_DEBUG */
#ifdef SKB_DEBUG
@@ -120,7 +118,7 @@ enum sk_checksum_flags {
CHECKSUM_NONE = 0x00,
CHECKSUM_UNNECESSARY = 0x01,
CHECKSUM_PARTIAL = 0x02,
- CHECKSUM_COMPLETE = 0x04,
+ CHECKSUM_COMPLETE = 0x03,
};
struct skb_frag {
@@ -170,7 +168,7 @@ struct sk_buff {
};
};
uint16_t protocol;
- uint8_t ip_summed;
+ uint8_t ip_summed; /* 2 bit only. */
/* uint8_t */
/* "Scratch" area for layers to store metadata. */
diff --git a/sys/compat/linuxkpi/common/include/linux/slab.h b/sys/compat/linuxkpi/common/include/linux/slab.h
index 47e3d133eb6c..0e649e1e3c4a 100644
--- a/sys/compat/linuxkpi/common/include/linux/slab.h
+++ b/sys/compat/linuxkpi/common/include/linux/slab.h
@@ -40,8 +40,10 @@
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/gfp.h>
+#include <linux/err.h>
#include <linux/llist.h>
#include <linux/overflow.h>
+#include <linux/cleanup.h>
MALLOC_DECLARE(M_KMALLOC);
@@ -153,6 +155,8 @@ kfree(const void *ptr)
lkpi_kfree(ptr);
}
+DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
+
/*
* Other k*alloc() funtions using the above as underlying allocator.
*/
diff --git a/sys/compat/linuxkpi/common/include/linux/string_choices.h b/sys/compat/linuxkpi/common/include/linux/string_choices.h
new file mode 100644
index 000000000000..74aa3fd019b2
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/string_choices.h
@@ -0,0 +1,71 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_STRING_CHOICES_H_
+#define _LINUXKPI_LINUX_STRING_CHOICES_H_
+
+#include <sys/types.h>
+
+static inline const char *
+str_yes_no(bool value)
+{
+ if (value)
+ return "yes";
+ else
+ return "no";
+}
+
+static inline const char *
+str_on_off(bool value)
+{
+ if (value)
+ return "on";
+ else
+ return "off";
+}
+
+static inline const char *
+str_enabled_disabled(bool value)
+{
+ if (value)
+ return "enabled";
+ else
+ return "disabled";
+}
+
+static inline const char *
+str_enable_disable(bool value)
+{
+ if (value)
+ return "enable";
+ else
+ return "disable";
+}
+
+#define str_disable_enable(_v) str_enable_disable(!(_v))
+
+#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/string_helpers.h b/sys/compat/linuxkpi/common/include/linux/string_helpers.h
index 1bdff2730361..07d113c0cb21 100644
--- a/sys/compat/linuxkpi/common/include/linux/string_helpers.h
+++ b/sys/compat/linuxkpi/common/include/linux/string_helpers.h
@@ -1,69 +1,12 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2023 Jean-Sébastien Pédron <dumbbell@FreeBSD.org>
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-2-Clause
*/
#ifndef _LINUXKPI_LINUX_STRING_HELPERS_H_
#define _LINUXKPI_LINUX_STRING_HELPERS_H_
-#include <sys/types.h>
-
-static inline const char *
-str_yes_no(bool value)
-{
- if (value)
- return "yes";
- else
- return "no";
-}
-
-static inline const char *
-str_on_off(bool value)
-{
- if (value)
- return "on";
- else
- return "off";
-}
-
-static inline const char *
-str_enabled_disabled(bool value)
-{
- if (value)
- return "enabled";
- else
- return "disabled";
-}
-
-static inline const char *
-str_enable_disable(bool value)
-{
- if (value)
- return "enable";
- else
- return "disable";
-}
+#include <linux/string_choices.h>
#endif
diff --git a/sys/compat/linuxkpi/common/include/linux/sysfs.h b/sys/compat/linuxkpi/common/include/linux/sysfs.h
index 65e023031bb2..470c224a9778 100644
--- a/sys/compat/linuxkpi/common/include/linux/sysfs.h
+++ b/sys/compat/linuxkpi/common/include/linux/sysfs.h
@@ -189,6 +189,50 @@ sysfs_create_file(struct kobject *kobj, const struct attribute *attr)
return (0);
}
+static inline struct kobject *
+__sysfs_lookup_group(struct kobject *kobj, const char *group)
+{
+ int found;
+ struct sysctl_oid *group_oidp;
+ struct kobject *group_kobj;
+
+ found = 0;
+ if (group != NULL) {
+ SYSCTL_FOREACH(group_oidp, SYSCTL_CHILDREN(kobj->oidp)) {
+ if (strcmp(group_oidp->oid_name, group) != 0)
+ continue;
+ found = 1;
+ break;
+ }
+ } else {
+ found = 1;
+ group_oidp = kobj->oidp;
+ }
+
+ if (!found)
+ return (NULL);
+
+ group_kobj = group_oidp->oid_arg1;
+
+ return (group_kobj);
+}
+
+static inline int
+sysfs_add_file_to_group(struct kobject *kobj,
+ const struct attribute *attr, const char *group)
+{
+ int ret;
+ struct kobject *group_kobj;
+
+ group_kobj = __sysfs_lookup_group(kobj, group);
+ if (group_kobj == NULL)
+ return (-ENOENT);
+
+ ret = sysfs_create_file(group_kobj, attr);
+
+ return (ret);
+}
+
static inline void
sysfs_remove_file(struct kobject *kobj, const struct attribute *attr)
{
@@ -197,6 +241,19 @@ sysfs_remove_file(struct kobject *kobj, const struct attribute *attr)
sysctl_remove_name(kobj->oidp, attr->name, 1, 1);
}
+static inline void
+sysfs_remove_file_from_group(struct kobject *kobj,
+ const struct attribute *attr, const char *group)
+{
+ struct kobject *group_kobj;
+
+ group_kobj = __sysfs_lookup_group(kobj, group);
+ if (group_kobj == NULL)
+ return;
+
+ sysfs_remove_file(group_kobj, attr);
+}
+
static inline int
sysctl_handle_bin_attr(SYSCTL_HANDLER_ARGS)
{
diff --git a/sys/compat/linuxkpi/common/include/linux/timer.h b/sys/compat/linuxkpi/common/include/linux/timer.h
index a635f0faea59..d48939e28a02 100644
--- a/sys/compat/linuxkpi/common/include/linux/timer.h
+++ b/sys/compat/linuxkpi/common/include/linux/timer.h
@@ -49,8 +49,13 @@ extern unsigned long linux_timer_hz_mask;
#define TIMER_IRQSAFE 0x0001
+#if defined(LINUXKPI_VERSION) && (LINUXKPI_VERSION < 61600)
#define from_timer(var, arg, field) \
container_of(arg, typeof(*(var)), field)
+#else
+#define timer_container_of(var, arg, field) \
+ container_of(arg, typeof(*(var)), field)
+#endif
#define timer_setup(timer, func, flags) do { \
CTASSERT(((flags) & ~TIMER_IRQSAFE) == 0); \
@@ -79,11 +84,23 @@ extern unsigned long linux_timer_hz_mask;
extern int mod_timer(struct timer_list *, unsigned long);
extern void add_timer(struct timer_list *);
extern void add_timer_on(struct timer_list *, int cpu);
-extern int del_timer(struct timer_list *);
-extern int del_timer_sync(struct timer_list *);
+
+extern int timer_delete(struct timer_list *);
extern int timer_delete_sync(struct timer_list *);
extern int timer_shutdown_sync(struct timer_list *);
+static inline int
+del_timer(struct timer_list *tl)
+{
+ return (timer_delete(tl));
+}
+
+static inline int
+del_timer_sync(struct timer_list *tl)
+{
+ return (timer_delete_sync(tl));
+}
+
#define timer_pending(timer) callout_pending(&(timer)->callout)
#define round_jiffies(j) \
((unsigned long)(((j) + linux_timer_hz_mask) & ~linux_timer_hz_mask))
diff --git a/sys/compat/linuxkpi/common/include/linux/topology.h b/sys/compat/linuxkpi/common/include/linux/topology.h
new file mode 100644
index 000000000000..16baffc024d1
--- /dev/null
+++ b/sys/compat/linuxkpi/common/include/linux/topology.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2025 The FreeBSD Foundation
+ * Copyright (c) 2025 Jean-Sébastien Pédron
+ *
+ * This software was developed by Jean-Sébastien Pédron under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUXKPI_LINUX_TOPOLOGY_H_
+#define _LINUXKPI_LINUX_TOPOLOGY_H_
+
+#include <asm/topology.h>
+
+#endif /* _LINUXKPI_LINUX_TOPOLOGY_H_ */
diff --git a/sys/compat/linuxkpi/common/include/net/cfg80211.h b/sys/compat/linuxkpi/common/include/net/cfg80211.h
index 18b34f0e90ec..239b4a5ae7b8 100644
--- a/sys/compat/linuxkpi/common/include/net/cfg80211.h
+++ b/sys/compat/linuxkpi/common/include/net/cfg80211.h
@@ -36,6 +36,7 @@
#include <linux/mutex.h>
#include <linux/if_ether.h>
#include <linux/ethtool.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/random.h>
@@ -56,8 +57,8 @@ extern int linuxkpi_debug_80211;
#endif
#define TODO(fmt, ...) if (linuxkpi_debug_80211 & D80211_TODO) \
printf("%s:%d: XXX LKPI80211 TODO " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
-#define IMPROVE(...) if (linuxkpi_debug_80211 & D80211_IMPROVE) \
- printf("%s:%d: XXX LKPI80211 IMPROVE\n", __func__, __LINE__)
+#define IMPROVE(fmt, ...) if (linuxkpi_debug_80211 & D80211_IMPROVE) \
+ printf("%s:%d: XXX LKPI80211 IMPROVE " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
enum rfkill_hard_block_reasons {
RFKILL_HARD_BLOCK_NOT_OWNER = BIT(0),
@@ -127,19 +128,24 @@ struct ieee80211_txrx_stypes {
uint16_t rx;
};
-/* XXX net80211 has an ieee80211_channel as well. */
+/*
+ * net80211 has an ieee80211_channel as well; we use the linuxkpi_ version
+ * interally in LinuxKPI and re-define ieee80211_channel for the drivers
+ * at the end of the file.
+ */
struct linuxkpi_ieee80211_channel {
- /* TODO FIXME */
- uint32_t hw_value; /* ic_ieee */
- uint32_t center_freq; /* ic_freq */
- enum ieee80211_channel_flags flags; /* ic_flags */
+ uint32_t center_freq;
+ uint16_t hw_value;
+ enum ieee80211_channel_flags flags;
enum nl80211_band band;
- int8_t max_power; /* ic_maxpower */
bool beacon_found;
- int max_antenna_gain, max_reg_power;
- int orig_flags;
- int dfs_cac_ms, dfs_state;
- int orig_mpwr;
+ enum nl80211_dfs_state dfs_state;
+ unsigned int dfs_cac_ms;
+ int max_antenna_gain;
+ int max_power;
+ int max_reg_power;
+ uint32_t orig_flags;
+ int orig_mpwr;
};
struct cfg80211_bitrate_mask {
@@ -722,8 +728,10 @@ struct linuxkpi_ieee80211_regdomain {
#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 0x04
#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x08
#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 0x10
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 0x10
#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x20
#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 0x40
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 0x40
#define VENDOR_CMD_RAW_DATA (void *)(uintptr_t)(-ENOENT)
@@ -1296,10 +1304,9 @@ reg_query_regdb_wmm(uint8_t *alpha2, uint32_t center_freq,
struct ieee80211_reg_rule *rule)
{
- /* ETSI has special rules. FreeBSD regdb needs to learn about them. */
- TODO();
+ IMPROVE("regdomain.xml needs to grow wmm information for at least ETSI");
- return (-ENXIO);
+ return (-ENODATA);
}
static __inline const u8 *
@@ -2065,6 +2072,18 @@ nl80211_chan_width_to_mhz(enum nl80211_chan_width width)
}
static __inline ssize_t
+wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize, const char __user *userbuf, size_t count,
+ loff_t *ppos,
+ ssize_t (*handler)(struct wiphy *, struct file *, char *, size_t, void *),
+ void *data)
+{
+ TODO();
+ return (-ENXIO);
+}
+
+
+static __inline ssize_t
wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
char *buf, size_t bufsize, const char __user *userbuf, size_t count,
ssize_t (*handler)(struct wiphy *, struct file *, char *, size_t, void *),
diff --git a/sys/compat/linuxkpi/common/include/net/mac80211.h b/sys/compat/linuxkpi/common/include/net/mac80211.h
index af3199c38939..8de03410c6b6 100644
--- a/sys/compat/linuxkpi/common/include/net/mac80211.h
+++ b/sys/compat/linuxkpi/common/include/net/mac80211.h
@@ -87,6 +87,9 @@ enum mcast_filter_flags {
FIF_PSPOLL = BIT(5),
FIF_CONTROL = BIT(6),
FIF_MCAST_ACTION = BIT(7),
+
+ /* Must stay last. */
+ FIF_FLAGS_MASK = BIT(8)-1,
};
enum ieee80211_bss_changed {
@@ -734,7 +737,7 @@ struct ieee80211_link_sta {
struct ieee80211_he_6ghz_capa he_6ghz_capa;
struct ieee80211_sta_eht_cap eht_cap;
uint8_t rx_nss;
- enum ieee80211_sta_rx_bw bandwidth;
+ enum ieee80211_sta_rx_bandwidth bandwidth;
enum ieee80211_smps_mode smps_mode;
struct ieee80211_sta_agg agg;
struct ieee80211_sta_txpwr txpwr;
@@ -1135,7 +1138,7 @@ extern const struct cfg80211_ops linuxkpi_mac80211cfgops;
struct ieee80211_hw *linuxkpi_ieee80211_alloc_hw(size_t,
const struct ieee80211_ops *);
void linuxkpi_ieee80211_iffree(struct ieee80211_hw *);
-void linuxkpi_set_ieee80211_dev(struct ieee80211_hw *, char *);
+void linuxkpi_set_ieee80211_dev(struct ieee80211_hw *);
int linuxkpi_ieee80211_ifattach(struct ieee80211_hw *);
void linuxkpi_ieee80211_ifdetach(struct ieee80211_hw *);
void linuxkpi_ieee80211_unregister_hw(struct ieee80211_hw *);
@@ -1184,7 +1187,7 @@ struct wireless_dev *linuxkpi_ieee80211_vif_to_wdev(struct ieee80211_vif *);
void linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *);
void linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *);
struct sk_buff *linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *,
- uint8_t *, uint8_t *, size_t, size_t);
+ const uint8_t *, const uint8_t *, size_t, size_t);
void linuxkpi_ieee80211_tx_status(struct ieee80211_hw *, struct sk_buff *);
void linuxkpi_ieee80211_tx_status_ext(struct ieee80211_hw *,
struct ieee80211_tx_status *);
@@ -1255,7 +1258,7 @@ SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev)
{
set_wiphy_dev(hw->wiphy, dev);
- linuxkpi_set_ieee80211_dev(hw, dev_name(dev));
+ linuxkpi_set_ieee80211_dev(hw);
IMPROVE();
}
@@ -1741,12 +1744,15 @@ ieee80211_request_smps(struct ieee80211_vif *vif, u_int link_id,
"SMPS_STATIC",
"SMPS_DYNAMIC",
"SMPS_AUTOMATIC",
- "SMPS_NUM_MODES"
};
- if (linuxkpi_debug_80211 & D80211_TODO)
- printf("%s:%d: XXX LKPI80211 TODO smps %d %s\n",
- __func__, __LINE__, smps, smps_mode_name[smps]);
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (smps >= nitems(smps_mode_name))
+ panic("%s: unsupported smps value: %d\n", __func__, smps);
+
+ IMPROVE("XXX LKPI80211 TODO smps %d %s\n", smps, smps_mode_name[smps]);
}
static __inline void
@@ -2161,8 +2167,8 @@ ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static __inline struct sk_buff *
-ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr,
- uint8_t *ssid, size_t ssid_len, size_t tailroom)
+ieee80211_probereq_get(struct ieee80211_hw *hw, const uint8_t *addr,
+ const uint8_t *ssid, size_t ssid_len, size_t tailroom)
{
return (linuxkpi_ieee80211_probereq_get(hw, addr, ssid, ssid_len,
diff --git a/sys/compat/linuxkpi/common/src/linux_80211.c b/sys/compat/linuxkpi/common/src/linux_80211.c
index 1d00e8da8f9a..bc4b334de28e 100644
--- a/sys/compat/linuxkpi/common/src/linux_80211.c
+++ b/sys/compat/linuxkpi/common/src/linux_80211.c
@@ -77,6 +77,8 @@
#include <linux/rculist.h>
#include "linux_80211.h"
+/* #define LKPI_80211_USE_SCANLIST */
+/* #define LKPI_80211_BGSCAN */
#define LKPI_80211_WME
#define LKPI_80211_HW_CRYPTO
#define LKPI_80211_HT
@@ -103,6 +105,10 @@ SYSCTL_DECL(_compat_linuxkpi);
SYSCTL_NODE(_compat_linuxkpi, OID_AUTO, 80211, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"LinuxKPI 802.11 compatibility layer");
+static bool lkpi_order_scanlist = false;
+SYSCTL_BOOL(_compat_linuxkpi_80211, OID_AUTO, order_scanlist, CTLFLAG_RW,
+ &lkpi_order_scanlist, 0, "Enable LinuxKPI 802.11 scan list shuffeling");
+
#if defined(LKPI_80211_HW_CRYPTO)
static bool lkpi_hwcrypto = false;
SYSCTL_BOOL(_compat_linuxkpi_80211, OID_AUTO, hw_crypto, CTLFLAG_RDTUN,
@@ -167,6 +173,7 @@ const struct cfg80211_ops linuxkpi_mac80211cfgops = {
static struct lkpi_sta *lkpi_find_lsta_by_ni(struct lkpi_vif *,
struct ieee80211_node *);
#endif
+static void lkpi_sw_scan_task(void *, int);
static void lkpi_80211_txq_tx_one(struct lkpi_sta *, struct mbuf *);
static void lkpi_80211_txq_task(void *, int);
static void lkpi_80211_lhw_rxq_task(void *, int);
@@ -274,48 +281,40 @@ lkpi_nl80211_sta_info_to_str(struct sbuf *s, const char *prefix,
sbuf_printf(s, "\n");
}
-static int
-lkpi_80211_dump_stas(SYSCTL_HANDLER_ARGS)
+static void
+lkpi_80211_dump_lvif_stas(struct lkpi_vif *lvif, struct sbuf *s)
{
struct lkpi_hw *lhw;
struct ieee80211_hw *hw;
struct ieee80211vap *vap;
- struct lkpi_vif *lvif;
struct ieee80211_vif *vif;
struct lkpi_sta *lsta;
struct ieee80211_sta *sta;
struct station_info sinfo;
- struct sbuf s;
int error;
- if (req->newptr)
- return (EPERM);
-
- lvif = (struct lkpi_vif *)arg1;
vif = LVIF_TO_VIF(lvif);
vap = LVIF_TO_VAP(lvif);
lhw = vap->iv_ic->ic_softc;
hw = LHW_TO_HW(lhw);
- sbuf_new_for_sysctl(&s, NULL, 1024, req);
-
wiphy_lock(hw->wiphy);
list_for_each_entry(lsta, &lvif->lsta_list, lsta_list) {
sta = LSTA_TO_STA(lsta);
- sbuf_putc(&s, '\n');
- sbuf_printf(&s, "lsta %p sta %p added_to_drv %d\n", lsta, sta, lsta->added_to_drv);
+ sbuf_putc(s, '\n');
+ sbuf_printf(s, "lsta %p sta %p added_to_drv %d\n", lsta, sta, lsta->added_to_drv);
memset(&sinfo, 0, sizeof(sinfo));
error = lkpi_80211_mo_sta_statistics(hw, vif, sta, &sinfo);
if (error == EEXIST) /* Not added to driver. */
continue;
if (error == ENOTSUPP) {
- sbuf_printf(&s, " sta_statistics not supported\n");
+ sbuf_printf(s, " sta_statistics not supported\n");
continue;
}
if (error != 0) {
- sbuf_printf(&s, " sta_statistics failed: %d\n", error);
+ sbuf_printf(s, " sta_statistics failed: %d\n", error);
continue;
}
@@ -325,51 +324,76 @@ lkpi_80211_dump_stas(SYSCTL_HANDLER_ARGS)
memcpy(&sinfo.rxrate, &lsta->sinfo.rxrate, sizeof(sinfo.rxrate));
sinfo.filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
+ /* If no CHAIN_SIGNAL is reported, try to fill it in from the lsta sinfo. */
+ if ((sinfo.filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) == 0 &&
+ (lsta->sinfo.filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) != 0) {
+ sinfo.chains = lsta->sinfo.chains;
+ memcpy(sinfo.chain_signal, lsta->sinfo.chain_signal,
+ sizeof(sinfo.chain_signal));
+ sinfo.filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ }
- lkpi_nl80211_sta_info_to_str(&s, " nl80211_sta_info (valid fields)", sinfo.filled);
- sbuf_printf(&s, " connected_time %u inactive_time %u\n",
+ lkpi_nl80211_sta_info_to_str(s, " nl80211_sta_info (valid fields)", sinfo.filled);
+ sbuf_printf(s, " connected_time %u inactive_time %u\n",
sinfo.connected_time, sinfo.inactive_time);
- sbuf_printf(&s, " rx_bytes %ju rx_packets %u rx_dropped_misc %u\n",
+ sbuf_printf(s, " rx_bytes %ju rx_packets %u rx_dropped_misc %u\n",
(uintmax_t)sinfo.rx_bytes, sinfo.rx_packets, sinfo.rx_dropped_misc);
- sbuf_printf(&s, " rx_duration %ju rx_beacon %u rx_beacon_signal_avg %d\n",
+ sbuf_printf(s, " rx_duration %ju rx_beacon %u rx_beacon_signal_avg %d\n",
(uintmax_t)sinfo.rx_duration, sinfo.rx_beacon, (int8_t)sinfo.rx_beacon_signal_avg);
- sbuf_printf(&s, " tx_bytes %ju tx_packets %u tx_failed %u\n",
+ sbuf_printf(s, " tx_bytes %ju tx_packets %u tx_failed %u\n",
(uintmax_t)sinfo.tx_bytes, sinfo.tx_packets, sinfo.tx_failed);
- sbuf_printf(&s, " tx_duration %ju tx_retries %u\n",
+ sbuf_printf(s, " tx_duration %ju tx_retries %u\n",
(uintmax_t)sinfo.tx_duration, sinfo.tx_retries);
- sbuf_printf(&s, " signal %d signal_avg %d ack_signal %d avg_ack_signal %d\n",
+ sbuf_printf(s, " signal %d signal_avg %d ack_signal %d avg_ack_signal %d\n",
sinfo.signal, sinfo.signal_avg, sinfo.ack_signal, sinfo.avg_ack_signal);
-
- sbuf_printf(&s, " generation %d assoc_req_ies_len %zu chains %d\n",
+ sbuf_printf(s, " generation %d assoc_req_ies_len %zu chains %#04x\n",
sinfo.generation, sinfo.assoc_req_ies_len, sinfo.chains);
- for (int i = 0; i < sinfo.chains && i < IEEE80211_MAX_CHAINS; i++) {
- sbuf_printf(&s, " chain[%d] signal %d signal_avg %d\n",
+ for (int i = 0; i < nitems(sinfo.chain_signal) && i < IEEE80211_MAX_CHAINS; i++) {
+ if (!(sinfo.chains & BIT(i)))
+ continue;
+ sbuf_printf(s, " chain[%d] signal %d signal_avg %d\n",
i, (int8_t)sinfo.chain_signal[i], (int8_t)sinfo.chain_signal_avg[i]);
}
/* assoc_req_ies, bss_param, sta_flags */
- sbuf_printf(&s, " rxrate: flags %b bw %u(%s) legacy %u kbit/s mcs %u nss %u\n",
+ sbuf_printf(s, " rxrate: flags %b bw %u(%s) legacy %u kbit/s mcs %u nss %u\n",
sinfo.rxrate.flags, CFG80211_RATE_INFO_FLAGS_BITS,
sinfo.rxrate.bw, lkpi_rate_info_bw_to_str(sinfo.rxrate.bw),
sinfo.rxrate.legacy * 100,
sinfo.rxrate.mcs, sinfo.rxrate.nss);
- sbuf_printf(&s, " he_dcm %u he_gi %u he_ru_alloc %u eht_gi %u\n",
+ sbuf_printf(s, " he_dcm %u he_gi %u he_ru_alloc %u eht_gi %u\n",
sinfo.rxrate.he_dcm, sinfo.rxrate.he_gi, sinfo.rxrate.he_ru_alloc,
sinfo.rxrate.eht_gi);
- sbuf_printf(&s, " txrate: flags %b bw %u(%s) legacy %u kbit/s mcs %u nss %u\n",
+ sbuf_printf(s, " txrate: flags %b bw %u(%s) legacy %u kbit/s mcs %u nss %u\n",
sinfo.txrate.flags, CFG80211_RATE_INFO_FLAGS_BITS,
sinfo.txrate.bw, lkpi_rate_info_bw_to_str(sinfo.txrate.bw),
sinfo.txrate.legacy * 100,
sinfo.txrate.mcs, sinfo.txrate.nss);
- sbuf_printf(&s, " he_dcm %u he_gi %u he_ru_alloc %u eht_gi %u\n",
+ sbuf_printf(s, " he_dcm %u he_gi %u he_ru_alloc %u eht_gi %u\n",
sinfo.txrate.he_dcm, sinfo.txrate.he_gi, sinfo.txrate.he_ru_alloc,
sinfo.txrate.eht_gi);
}
wiphy_unlock(hw->wiphy);
+}
+
+static int
+lkpi_80211_dump_stas(SYSCTL_HANDLER_ARGS)
+{
+ struct lkpi_vif *lvif;
+ struct sbuf s;
+
+ if (req->newptr)
+ return (EPERM);
+
+ lvif = (struct lkpi_vif *)arg1;
+
+ sbuf_new_for_sysctl(&s, NULL, 1024, req);
+
+ lkpi_80211_dump_lvif_stas(lvif, &s);
sbuf_finish(&s);
sbuf_delete(&s);
@@ -377,7 +401,7 @@ lkpi_80211_dump_stas(SYSCTL_HANDLER_ARGS)
return (0);
}
-static enum ieee80211_sta_rx_bw
+static enum ieee80211_sta_rx_bandwidth
lkpi_cw_to_rx_bw(enum nl80211_chan_width cw)
{
switch (cw) {
@@ -401,7 +425,7 @@ lkpi_cw_to_rx_bw(enum nl80211_chan_width cw)
}
static enum nl80211_chan_width
-lkpi_rx_bw_to_cw(enum ieee80211_sta_rx_bw rx_bw)
+lkpi_rx_bw_to_cw(enum ieee80211_sta_rx_bandwidth rx_bw)
{
switch (rx_bw) {
case IEEE80211_STA_RX_BW_20:
@@ -422,7 +446,7 @@ lkpi_sync_chanctx_cw_from_rx_bw(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct ieee80211_chanctx_conf *chanctx_conf;
- enum ieee80211_sta_rx_bw old_bw;
+ enum ieee80211_sta_rx_bandwidth old_bw;
uint32_t changed;
chanctx_conf = rcu_dereference_protected(vif->bss_conf.chanctx_conf,
@@ -527,7 +551,7 @@ static void
lkpi_sta_sync_vht_from_ni(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_node *ni)
{
- enum ieee80211_sta_rx_bw bw;
+ enum ieee80211_sta_rx_bandwidth bw;
uint32_t width;
int rx_nss;
uint16_t rx_mcs_map;
@@ -938,6 +962,30 @@ lkpi_nl80211_band_to_net80211_band(enum nl80211_band band)
return (0x00);
}
+#ifdef LINUXKPI_DEBUG_80211
+static const char *
+lkpi_nl80211_band_name(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return "2Ghz";
+ break;
+ case NL80211_BAND_5GHZ:
+ return "5Ghz";
+ break;
+ case NL80211_BAND_60GHZ:
+ return "60Ghz";
+ break;
+ case NL80211_BAND_6GHZ:
+ return "6Ghz";
+ break;
+ default:
+ panic("%s: unsupported band %u\n", __func__, band);
+ break;
+ }
+}
+#endif
+
#if 0
static enum ieee80211_ac_numbers
lkpi_ac_net_to_l80211(int ac)
@@ -1162,7 +1210,7 @@ lkpi_find_lkpi80211_chan(struct lkpi_hw *lhw,
channels = hw->wiphy->bands[band]->channels;
for (i = 0; i < nchans; i++) {
- if (channels[i].hw_value == c->ic_ieee)
+ if (channels[i].center_freq == c->ic_freq)
return (&channels[i]);
}
@@ -1302,6 +1350,7 @@ lkpi_iv_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
lhw = ic->ic_softc;
hw = LHW_TO_HW(lhw);
lvif = VAP_TO_LVIF(vap);
+ vif = LVIF_TO_VIF(lvif);
/*
* Make sure we do not make it here without going through
@@ -1309,6 +1358,23 @@ lkpi_iv_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
*/
lockdep_assert_wiphy(hw->wiphy);
+ /*
+ * While we are assoc we may still send packets. We cannot delete the
+ * keys as otherwise packets could go out unencrypted. Some firmware
+ * does not like this and will fire an assert.
+ * net80211 needs to drive this better but given we want the disassoc
+ * frame out and have to unlock we are open to a race currently.
+ * This check should prevent problems.
+ * How to test: run 800Mbit/s UDP traffic and during that restart your
+ * supplicant. You want to survive that.
+ */
+ if (vif->cfg.assoc) {
+ if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO)
+ ic_printf(ic, "%d %lu %s: vif still assoc; not deleting keys\n",
+ curthread->td_tid, jiffies, __func__);
+ return (0);
+ }
+
if (IEEE80211_KEY_UNDEFINED(k)) {
ic_printf(ic, "%s: vap %p key %p is undefined: %p %u\n",
__func__, vap, k, k->wk_cipher, k->wk_keyix);
@@ -1353,7 +1419,6 @@ lkpi_iv_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS);
#endif
- vif = LVIF_TO_VIF(lvif);
error = lkpi_80211_mo_set_key(hw, DISABLE_KEY, vif, sta, kc);
if (error != 0) {
ic_printf(ic, "%d %lu %s: set_key cmd %d(%s) for sta %6D failed: %d\n",
@@ -1700,6 +1765,24 @@ lkpi_iv_key_update_end(struct ieee80211vap *vap)
}
#endif
+static void
+lkpi_cleanup_mcast_list_locked(struct lkpi_hw *lhw)
+{
+ struct list_head *le, *next;
+ struct netdev_hw_addr *addr;
+
+ if (lhw->mc_list.count != 0) {
+ list_for_each_safe(le, next, &lhw->mc_list.addr_list) {
+ addr = list_entry(le, struct netdev_hw_addr, addr_list);
+ list_del(le);
+ lhw->mc_list.count--;
+ free(addr, M_LKPI80211);
+ }
+ }
+ KASSERT(lhw->mc_list.count == 0, ("%s: mc_list %p count %d != 0\n",
+ __func__, &lhw->mc_list, lhw->mc_list.count));
+}
+
static u_int
lkpi_ic_update_mcast_copy(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
@@ -1736,16 +1819,13 @@ lkpi_ic_update_mcast_copy(void *arg, struct sockaddr_dl *sdl, u_int cnt)
}
static void
-lkpi_update_mcast_filter(struct ieee80211com *ic, bool force)
+lkpi_update_mcast_filter(struct ieee80211com *ic)
{
struct lkpi_hw *lhw;
struct ieee80211_hw *hw;
- struct netdev_hw_addr_list mc_list;
- struct list_head *le, *next;
- struct netdev_hw_addr *addr;
- struct ieee80211vap *vap;
u64 mc;
- unsigned int changed_flags, total_flags;
+ unsigned int changed_flags, flags;
+ bool scanning;
lhw = ic->ic_softc;
@@ -1753,44 +1833,32 @@ lkpi_update_mcast_filter(struct ieee80211com *ic, bool force)
lhw->ops->configure_filter == NULL)
return;
- if (!lhw->update_mc && !force)
- return;
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ scanning = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
- changed_flags = total_flags = 0;
- mc_list.count = 0;
- INIT_LIST_HEAD(&mc_list.addr_list);
- if (ic->ic_allmulti == 0) {
- TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
- if_foreach_llmaddr(vap->iv_ifp,
- lkpi_ic_update_mcast_copy, &mc_list);
- } else {
- changed_flags |= FIF_ALLMULTI;
- }
+ LKPI_80211_LHW_MC_LOCK(lhw);
+
+ flags = 0;
+ if (scanning)
+ flags |= FIF_BCN_PRBRESP_PROMISC;
+ if (lhw->mc_all_multi)
+ flags |= FIF_ALLMULTI;
hw = LHW_TO_HW(lhw);
- mc = lkpi_80211_mo_prepare_multicast(hw, &mc_list);
- /*
- * XXX-BZ make sure to get this sorted what is a change,
- * what gets all set; what was already set?
- */
- total_flags = changed_flags;
- lkpi_80211_mo_configure_filter(hw, changed_flags, &total_flags, mc);
+ mc = lkpi_80211_mo_prepare_multicast(hw, &lhw->mc_list);
+
+ changed_flags = (lhw->mc_flags ^ flags) & FIF_FLAGS_MASK;
+ lkpi_80211_mo_configure_filter(hw, changed_flags, &flags, mc);
+ lhw->mc_flags = flags;
#ifdef LINUXKPI_DEBUG_80211
if (linuxkpi_debug_80211 & D80211_TRACE)
- printf("%s: changed_flags %#06x count %d total_flags %#010x\n",
- __func__, changed_flags, mc_list.count, total_flags);
+ printf("%s: changed_flags %#06x count %d mc_flags %#010x\n",
+ __func__, changed_flags, lhw->mc_list.count, lhw->mc_flags);
#endif
- if (mc_list.count != 0) {
- list_for_each_safe(le, next, &mc_list.addr_list) {
- addr = list_entry(le, struct netdev_hw_addr, addr_list);
- free(addr, M_LKPI80211);
- mc_list.count--;
- }
- }
- KASSERT(mc_list.count == 0, ("%s: mc_list %p count %d != 0\n",
- __func__, &mc_list, mc_list.count));
+ LKPI_80211_LHW_MC_UNLOCK(lhw);
}
static enum ieee80211_bss_changed
@@ -1822,13 +1890,31 @@ lkpi_update_dtim_tsf(struct ieee80211_vif *vif, struct ieee80211_node *ni,
vif->bss_conf.beacon_int = 16;
bss_changed |= BSS_CHANGED_BEACON_INT;
}
- if (vif->bss_conf.dtim_period != vap->iv_dtim_period &&
- vap->iv_dtim_period > 0) {
- vif->bss_conf.dtim_period = vap->iv_dtim_period;
+
+ /*
+ * lkpi_iv_sta_recv_mgmt() will directly call into this function.
+ * iwlwifi(4) in iwl_mvm_bss_info_changed_station_common() will
+ * stop seesion protection the moment it sees
+ * BSS_CHANGED_BEACON_INFO (with the expectations that it was
+ * "a beacon from the associated AP"). It will also update
+ * the beacon filter in that case. This is the only place
+ * we set the BSS_CHANGED_BEACON_INFO on the non-teardown
+ * path so make sure we only do run this check once we are
+ * assoc. (*iv_recv_mgmt)() will be called before we enter
+ * here so the ni will be updates with information from the
+ * beacon via net80211::sta_recv_mgmt(). We also need to
+ * make sure we do not do it on every beacon we still may
+ * get so only do if something changed. vif->bss_conf.dtim_period
+ * should be 0 as we start up (we also reset it on teardown).
+ */
+ if (vif->cfg.assoc &&
+ vif->bss_conf.dtim_period != ni->ni_dtim_period &&
+ ni->ni_dtim_period > 0) {
+ vif->bss_conf.dtim_period = ni->ni_dtim_period;
bss_changed |= BSS_CHANGED_BEACON_INFO;
}
- vif->bss_conf.sync_dtim_count = vap->iv_dtim_count;
+ vif->bss_conf.sync_dtim_count = ni->ni_dtim_count;
vif->bss_conf.sync_tsf = le64toh(ni->ni_tstamp.tsf);
/* vif->bss_conf.sync_device_ts = set in linuxkpi_ieee80211_rx. */
@@ -1856,6 +1942,8 @@ lkpi_stop_hw_scan(struct lkpi_hw *lhw, struct ieee80211_vif *vif)
int error;
bool cancel;
+ TRACE_SCAN(lhw->ic, "scan_flags %b", lhw->scan_flags, LKPI_LHW_SCAN_BITS);
+
LKPI_80211_LHW_SCAN_LOCK(lhw);
cancel = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0;
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
@@ -1909,19 +1997,19 @@ lkpi_disassoc(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
struct lkpi_hw *lhw)
{
enum ieee80211_bss_changed changed;
+ struct lkpi_vif *lvif;
changed = 0;
sta->aid = 0;
if (vif->cfg.assoc) {
- lhw->update_mc = true;
- lkpi_update_mcast_filter(lhw->ic, true);
-
vif->cfg.assoc = false;
vif->cfg.aid = 0;
changed |= BSS_CHANGED_ASSOC;
IMPROVE();
+ lkpi_update_mcast_filter(lhw->ic);
+
/*
* Executing the bss_info_changed(BSS_CHANGED_ASSOC) with
* assoc = false right away here will remove the sta from
@@ -1932,6 +2020,9 @@ lkpi_disassoc(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
* bss_info_changed() update.
* See lkpi_sta_run_to_init() for more detailed comment.
*/
+
+ lvif = VIF_TO_LVIF(vif);
+ lvif->beacons = 0;
}
return (changed);
@@ -2202,6 +2293,7 @@ lkpi_sta_scan_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int
/* vif->bss_conf.basic_rates ? Where exactly? */
+ lvif->beacons = 0;
/* Should almost assert it is this. */
vif->cfg.assoc = false;
vif->cfg.aid = 0;
@@ -2391,6 +2483,7 @@ lkpi_sta_auth_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int
struct lkpi_sta *lsta;
struct ieee80211_sta *sta;
struct ieee80211_prep_tx_info prep_tx_info;
+ enum ieee80211_bss_changed bss_changed;
int error;
lhw = vap->iv_ic->ic_softc;
@@ -2462,6 +2555,11 @@ lkpi_sta_auth_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int
lsta->added_to_drv = false; /* mo manages. */
#endif
+ bss_changed = 0;
+ vif->bss_conf.dtim_period = 0; /* go back to 0. */
+ bss_changed |= BSS_CHANGED_BEACON_INFO;
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
+
lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
LKPI_80211_LVIF_LOCK(lvif);
@@ -2470,12 +2568,6 @@ lkpi_sta_auth_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int
lvif->lvif_bss_synched = false;
LKPI_80211_LVIF_UNLOCK(lvif);
lkpi_lsta_remove(lsta, lvif);
- /*
- * The very last release the reference on the ni for the ni/lsta on
- * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
- * and potentially freed.
- */
- ieee80211_free_node(ni);
/* conf_tx */
@@ -2484,6 +2576,18 @@ lkpi_sta_auth_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int
out:
wiphy_unlock(hw->wiphy);
IEEE80211_LOCK(vap->iv_ic);
+ if (error == 0) {
+ /*
+ * We do this outside the wiphy lock as net80211::node_free() may call
+ * into crypto code to delete keys and we have a recursed on
+ * non-recursive sx panic. Also only do this if we get here w/o error.
+ *
+ * The very last release the reference on the ni for the ni/lsta on
+ * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
+ * and potentially freed.
+ */
+ ieee80211_free_node(ni);
+ }
return (error);
}
@@ -2768,6 +2872,14 @@ _lkpi_sta_assoc_to_down(struct ieee80211vap *vap, enum ieee80211_state nstate, i
bss_changed = 0;
bss_changed |= lkpi_disassoc(sta, vif, lhw);
+#ifdef LKPI_80211_HW_CRYPTO
+ /*
+ * In theory we remove keys here but there must not exist any for this
+ * state change until we clean them up again into small steps and no
+ * code duplication.
+ */
+#endif
+
lkpi_lsta_dump(lsta, ni, __func__, __LINE__);
/* Adjust sta and change state (from NONE) to NOTEXIST. */
@@ -2790,6 +2902,8 @@ _lkpi_sta_assoc_to_down(struct ieee80211vap *vap, enum ieee80211_state nstate, i
vif->cfg.ssid_len = 0;
memset(vif->cfg.ssid, '\0', sizeof(vif->cfg.ssid));
bss_changed |= BSS_CHANGED_BSSID;
+ vif->bss_conf.dtim_period = 0; /* go back to 0. */
+ bss_changed |= BSS_CHANGED_BEACON_INFO;
lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
LKPI_80211_LVIF_LOCK(lvif);
@@ -2798,12 +2912,6 @@ _lkpi_sta_assoc_to_down(struct ieee80211vap *vap, enum ieee80211_state nstate, i
lvif->lvif_bss_synched = false;
LKPI_80211_LVIF_UNLOCK(lvif);
lkpi_lsta_remove(lsta, lvif);
- /*
- * The very last release the reference on the ni for the ni/lsta on
- * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
- * and potentially freed.
- */
- ieee80211_free_node(ni);
/* conf_tx */
@@ -2813,6 +2921,18 @@ _lkpi_sta_assoc_to_down(struct ieee80211vap *vap, enum ieee80211_state nstate, i
out:
wiphy_unlock(hw->wiphy);
IEEE80211_LOCK(vap->iv_ic);
+ if (error == EALREADY) {
+ /*
+ * We do this outside the wiphy lock as net80211::node_free() may call
+ * into crypto code to delete keys and we have a recursed on
+ * non-recursive sx panic. Also only do this if we get here w/o error.
+ *
+ * The very last release the reference on the ni for the ni/lsta on
+ * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
+ * and potentially freed.
+ */
+ ieee80211_free_node(ni);
+ }
outni:
return (error);
}
@@ -2922,6 +3042,7 @@ lkpi_sta_assoc_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int
bss_changed |= lkpi_wme_update(lhw, vap, true);
#endif
if (!vif->cfg.assoc || vif->cfg.aid != IEEE80211_NODE_AID(ni)) {
+ lvif->beacons = 0;
vif->cfg.assoc = true;
vif->cfg.aid = IEEE80211_NODE_AID(ni);
bss_changed |= BSS_CHANGED_ASSOC;
@@ -2970,9 +3091,6 @@ lkpi_sta_assoc_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int
* - set_key (?)
* - ipv6_addr_change (?)
*/
- /* Prepare_multicast && configure_filter. */
- lhw->update_mc = true;
- lkpi_update_mcast_filter(vap->iv_ic, true);
if (!ieee80211_node_is_authorized(ni)) {
IMPROVE("net80211 does not consider node authorized");
@@ -3011,6 +3129,9 @@ lkpi_sta_assoc_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int
bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__);
lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
+ /* Prepare_multicast && configure_filter. */
+ lkpi_update_mcast_filter(vap->iv_ic);
+
out:
wiphy_unlock(hw->wiphy);
IEEE80211_LOCK(vap->iv_ic);
@@ -3300,6 +3421,16 @@ lkpi_sta_run_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int
#ifdef LKPI_80211_HW_CRYPTO
if (lkpi_hwcrypto) {
+ /*
+ * In theory we only need to do this if we changed assoc.
+ * If we were not assoc, there should be no keys and we
+ * should not be here.
+ */
+#ifdef notyet
+ KASSERT((bss_changed & BSS_CHANGED_ASSOC) != 0, ("%s: "
+ "trying to remove keys but were not assoc: %#010jx, lvif %p\n",
+ __func__, (uintmax_t)bss_changed, lvif));
+#endif
error = lkpi_sta_del_keys(hw, vif, lsta);
if (error != 0) {
ic_printf(vap->iv_ic, "%s:%d: lkpi_sta_del_keys "
@@ -3361,6 +3492,9 @@ lkpi_sta_run_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int
* 4) call unassign_vif_chanctx
* 5) call lkpi_hw_conf_idle
* 6) call remove_chanctx
+ *
+ * Note: vif->driver_flags & IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC
+ * might change this.
*/
bss_changed |= lkpi_disassoc(sta, vif, lhw);
@@ -3391,6 +3525,8 @@ lkpi_sta_run_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int
vif->bss_conf.use_short_preamble = false;
vif->bss_conf.qos = false;
/* XXX BSS_CHANGED_???? */
+ vif->bss_conf.dtim_period = 0; /* go back to 0. */
+ bss_changed |= BSS_CHANGED_BEACON_INFO;
lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
LKPI_80211_LVIF_LOCK(lvif);
@@ -3398,12 +3534,6 @@ lkpi_sta_run_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int
lvif->lvif_bss = NULL;
lvif->lvif_bss_synched = false;
LKPI_80211_LVIF_UNLOCK(lvif);
- /*
- * The very last release the reference on the ni for the ni/lsta on
- * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
- * and potentially freed.
- */
- ieee80211_free_node(ni);
/* conf_tx */
@@ -3413,6 +3543,18 @@ lkpi_sta_run_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int
out:
wiphy_unlock(hw->wiphy);
IEEE80211_LOCK(vap->iv_ic);
+ if (error == EALREADY) {
+ /*
+ * We do this outside the wiphy lock as net80211::node_free() may call
+ * into crypto code to delete keys and we have a recursed on
+ * non-recursive sx panic. Also only do this if we get here w/o error.
+ *
+ * The very last release the reference on the ni for the ni/lsta on
+ * lvif->lvif_bss. Upon return from this both ni and lsta are invalid
+ * and potentially freed.
+ */
+ ieee80211_free_node(ni);
+ }
outni:
return (error);
}
@@ -3510,7 +3652,7 @@ lkpi_iv_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
vif = LVIF_TO_VIF(lvif);
/* No need to replicate this in most state handlers. */
- if (ostate == IEEE80211_S_SCAN && nstate != IEEE80211_S_SCAN)
+ if (nstate > IEEE80211_S_SCAN)
lkpi_stop_hw_scan(lhw, vif);
s = sta_state_fsm;
@@ -3693,6 +3835,48 @@ lkpi_ic_wme_update(struct ieee80211com *ic)
return (0); /* unused */
}
+static void
+lkpi_iv_sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
+ int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf)
+{
+ struct lkpi_hw *lhw;
+ struct ieee80211_hw *hw;
+ struct lkpi_vif *lvif;
+ struct ieee80211_vif *vif;
+ enum ieee80211_bss_changed bss_changed;
+
+ lvif = VAP_TO_LVIF(ni->ni_vap);
+ vif = LVIF_TO_VIF(lvif);
+
+ lvif->iv_recv_mgmt(ni, m0, subtype, rxs, rssi, nf);
+
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ break;
+ case IEEE80211_FC0_SUBTYPE_BEACON:
+ /*
+ * Only count beacons when assoc. SCAN has its own logging.
+ * This is for connection/beacon loss/session protection almost
+ * over debugging when trying to get into a stable RUN state.
+ */
+ if (vif->cfg.assoc)
+ lvif->beacons++;
+ break;
+ default:
+ return;
+ }
+
+ lhw = ni->ni_ic->ic_softc;
+ hw = LHW_TO_HW(lhw);
+
+ /*
+ * If this direct call to mo_bss_info_changed will not work due to
+ * locking, see if queue_work() is fast enough.
+ */
+ bss_changed = lkpi_update_dtim_tsf(vif, ni, ni->ni_vap, __func__, __LINE__);
+ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed);
+}
+
/*
* Change link-layer address on the vif (if the vap is not started/"UP").
* This can happen if a user changes 'ether' using ifconfig.
@@ -3753,6 +3937,7 @@ lkpi_ic_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ],
lvif = malloc(len, M_80211_VAP, M_WAITOK | M_ZERO);
mtx_init(&lvif->mtx, "lvif", NULL, MTX_DEF);
+ TASK_INIT(&lvif->sw_scan_task, 0, lkpi_sw_scan_task, lvif);
INIT_LIST_HEAD(&lvif->lsta_list);
lvif->lvif_bss = NULL;
refcount_init(&lvif->nt_unlocked, 0);
@@ -3826,8 +4011,10 @@ lkpi_ic_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ],
IMPROVE();
+ wiphy_lock(hw->wiphy);
error = lkpi_80211_mo_start(hw);
if (error != 0) {
+ wiphy_unlock(hw->wiphy);
ic_printf(ic, "%s: failed to start hw: %d\n", __func__, error);
mtx_destroy(&lvif->mtx);
free(lvif, M_80211_VAP);
@@ -3837,11 +4024,13 @@ lkpi_ic_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ],
error = lkpi_80211_mo_add_interface(hw, vif);
if (error != 0) {
IMPROVE(); /* XXX-BZ mo_stop()? */
+ wiphy_unlock(hw->wiphy);
ic_printf(ic, "%s: failed to add interface: %d\n", __func__, error);
mtx_destroy(&lvif->mtx);
free(lvif, M_80211_VAP);
return (NULL);
}
+ wiphy_unlock(hw->wiphy);
LKPI_80211_LHW_LVIF_LOCK(lhw);
TAILQ_INSERT_TAIL(&lhw->lvif_head, lvif, lvif_entry);
@@ -3871,17 +4060,21 @@ lkpi_ic_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ],
lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed);
/* Force MC init. */
- lkpi_update_mcast_filter(ic, true);
-
- IMPROVE();
+ lkpi_update_mcast_filter(ic);
ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
+ /* Now we have a valid vap->iv_ifp. Any checksum offloading goes below. */
+
+ IMPROVE();
+
/* Override with LinuxKPI method so we can drive mac80211/cfg80211. */
lvif->iv_newstate = vap->iv_newstate;
vap->iv_newstate = lkpi_iv_newstate;
lvif->iv_update_bss = vap->iv_update_bss;
vap->iv_update_bss = lkpi_iv_update_bss;
+ lvif->iv_recv_mgmt = vap->iv_recv_mgmt;
+ vap->iv_recv_mgmt = lkpi_iv_sta_recv_mgmt;
#ifdef LKPI_80211_HW_CRYPTO
/* Key management. */
@@ -3908,13 +4101,9 @@ lkpi_ic_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ],
* Modern chipset/fw/drv will do A-MPDU in drv/fw and fail
* to do so if they cannot do the crypto too.
*/
- if (!lkpi_hwcrypto && ieee80211_hw_check(hw, AMPDU_AGGREGATION))
+ if (!lkpi_hwcrypto && IEEE80211_CONF_AMPDU_OFFLOAD(ic))
vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_RX;
#endif
-#if defined(LKPI_80211_HT)
- /* 20250125-BZ Keep A-MPDU TX cleared until we sorted out AddBA for all drivers. */
- vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_TX;
-#endif
if (hw->max_listen_interval == 0)
hw->max_listen_interval = 7 * (ic->ic_lintval / ic->ic_bintval);
@@ -3983,6 +4172,8 @@ lkpi_ic_vap_delete(struct ieee80211vap *vap)
/* Clear up per-VIF/VAP sysctls. */
sysctl_ctx_free(&lvif->sysctl_ctx);
+ ieee80211_draintask(ic, &lvif->sw_scan_task);
+
LKPI_80211_LHW_LVIF_LOCK(lhw);
TAILQ_REMOVE(&lhw->lvif_head, lvif, lvif_entry);
LKPI_80211_LHW_LVIF_UNLOCK(lhw);
@@ -4004,8 +4195,30 @@ lkpi_ic_vap_delete(struct ieee80211vap *vap)
static void
lkpi_ic_update_mcast(struct ieee80211com *ic)
{
+ struct ieee80211vap *vap;
+ struct lkpi_hw *lhw;
+
+ lhw = ic->ic_softc;
+ if (lhw->ops->prepare_multicast == NULL ||
+ lhw->ops->configure_filter == NULL)
+ return;
+
+ LKPI_80211_LHW_MC_LOCK(lhw);
+ /* Cleanup anything on the current list. */
+ lkpi_cleanup_mcast_list_locked(lhw);
+
+ /* Build up the new list (or allmulti). */
+ if (ic->ic_allmulti == 0) {
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if_foreach_llmaddr(vap->iv_ifp,
+ lkpi_ic_update_mcast_copy, &lhw->mc_list);
+ lhw->mc_all_multi = false;
+ } else {
+ lhw->mc_all_multi = true;
+ }
+ LKPI_80211_LHW_MC_UNLOCK(lhw);
- lkpi_update_mcast_filter(ic, false);
+ lkpi_update_mcast_filter(ic);
TRACEOK();
}
@@ -4202,6 +4415,113 @@ lkpi_scan_ies_add(uint8_t *p, struct ieee80211_scan_ies *scan_ies,
}
static void
+lkpi_enable_hw_scan(struct lkpi_hw *lhw)
+{
+
+ if (lhw->ops->hw_scan) {
+ /*
+ * Advertise full-offload scanning.
+ *
+ * Not limiting to SINGLE_SCAN_ON_ALL_BANDS here as otherwise
+ * we essentially disable hw_scan for all drivers not setting
+ * the flag.
+ */
+ lhw->ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ lhw->scan_flags |= LKPI_LHW_SCAN_HW;
+ }
+}
+
+#ifndef LKPI_80211_USE_SCANLIST
+static const uint32_t chan_pri[] = {
+ 5180, 5500, 5745,
+ 5260, 5580, 5660, 5825,
+ 5220, 5300, 5540, 5620, 5700, 5785, 5865,
+ 2437, 2412, 2422, 2462, 2472, 2432, 2452
+};
+
+static int
+lkpi_scan_chan_list_idx(const struct linuxkpi_ieee80211_channel *lc)
+{
+ int i;
+
+ for (i = 0; i < nitems(chan_pri); i++) {
+ if (lc->center_freq == chan_pri[i])
+ return (i);
+ }
+
+ return (-1);
+}
+
+static int
+lkpi_scan_chan_list_comp(const struct linuxkpi_ieee80211_channel *lc1,
+ const struct linuxkpi_ieee80211_channel *lc2)
+{
+ int idx1, idx2;
+
+ /* Find index in list. */
+ idx1 = lkpi_scan_chan_list_idx(lc1);
+ idx2 = lkpi_scan_chan_list_idx(lc2);
+
+ if (idx1 == -1 && idx2 != -1)
+ return (1);
+ if (idx1 != -1 && idx2 == -1)
+ return (-1);
+
+ /* Neither on the list, use center_freq. */
+ if (idx1 == -1 && idx2 == -1)
+ return (lc1->center_freq - lc2->center_freq);
+
+ /* Whichever is first in the list. */
+ return (idx1 - idx2);
+}
+
+static void
+lkpi_scan_chan_list_resort(struct linuxkpi_ieee80211_channel **cpp, size_t nchan)
+{
+ struct linuxkpi_ieee80211_channel *lc, *nc;
+ size_t i, j;
+ int rc;
+
+ for (i = (nchan - 1); i > 0; i--) {
+ for (j = i; j > 0 ; j--) {
+ lc = *(cpp + j);
+ nc = *(cpp + j - 1);
+ rc = lkpi_scan_chan_list_comp(lc, nc);
+ if (rc < 0) {
+ *(cpp + j) = nc;
+ *(cpp + j - 1) = lc;
+ }
+ }
+ }
+}
+
+static bool
+lkpi_scan_chan(struct linuxkpi_ieee80211_channel *c,
+ struct ieee80211com *ic, bool log)
+{
+
+ if ((c->flags & IEEE80211_CHAN_DISABLED) != 0) {
+ if (log)
+ TRACE_SCAN(ic, "Skipping disabled chan "
+ "on band %s [%#x/%u/%#x]",
+ lkpi_nl80211_band_name(c->band), c->hw_value,
+ c->center_freq, c->flags);
+ return (false);
+ }
+ if (isclr(ic->ic_chan_active, ieee80211_mhz2ieee(c->center_freq,
+ lkpi_nl80211_band_to_net80211_band(c->band)))) {
+ if (log)
+ TRACE_SCAN(ic, "Skipping !active chan "
+ "on band %s [%#x/%u/%#x]",
+ lkpi_nl80211_band_name(c->band), c->hw_value,
+ c->center_freq, c->flags);
+ return (false);
+ }
+ return (true);
+}
+#endif
+
+static void
lkpi_ic_scan_start(struct ieee80211com *ic)
{
struct lkpi_hw *lhw;
@@ -4214,33 +4534,52 @@ lkpi_ic_scan_start(struct ieee80211com *ic)
bool is_hw_scan;
lhw = ic->ic_softc;
+ ss = ic->ic_scan;
+ vap = ss->ss_vap;
+ TRACE_SCAN(ic, "scan_flags %b", lhw->scan_flags, LKPI_LHW_SCAN_BITS);
+
LKPI_80211_LHW_SCAN_LOCK(lhw);
if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) {
/* A scan is still running. */
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ TRACE_SCAN(ic, "Trying to start new scan while still running; "
+ "cancelling new net80211 scan; scan_flags %b",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS);
+ ieee80211_cancel_scan(vap);
return;
}
is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0;
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
- ss = ic->ic_scan;
- vap = ss->ss_vap;
+#if 0
if (vap->iv_state != IEEE80211_S_SCAN) {
- IMPROVE("We need to be able to scan if not in S_SCAN");
+ TODO("We need to be able to scan if not in S_SCAN");
+ TRACE_SCAN(ic, "scan_flags %b iv_state %d",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS, vap->iv_state);
+ ieee80211_cancel_scan(vap);
return;
}
+#endif
hw = LHW_TO_HW(lhw);
if (!is_hw_scan) {
/* If hw_scan is cleared clear FEXT_SCAN_OFFLOAD too. */
vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_OFFLOAD;
-sw_scan:
+
lvif = VAP_TO_LVIF(vap);
vif = LVIF_TO_VIF(lvif);
if (vap->iv_state == IEEE80211_S_SCAN)
lkpi_hw_conf_idle(hw, false);
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ lhw->scan_flags |= LKPI_LHW_SCAN_RUNNING;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+
+ lkpi_update_mcast_filter(ic);
+
+ TRACE_SCAN(vap->iv_ic, "Starting SW_SCAN: scan_flags %b",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS);
lkpi_80211_mo_sw_scan_start(hw, vif, vif->addr);
/* net80211::scan_start() handled PS for us. */
IMPROVE();
@@ -4255,6 +4594,9 @@ sw_scan:
struct cfg80211_scan_6ghz_params *s6gp;
size_t chan_len, nchan, ssids_len, s6ghzlen;
int band, i, ssid_count, common_ie_len;
+#ifndef LKPI_80211_USE_SCANLIST
+ int n;
+#endif
uint32_t band_mask;
uint8_t *ie, *ieend;
bool running;
@@ -4266,7 +4608,8 @@ sw_scan:
band_mask = 0;
nchan = 0;
if (ieee80211_hw_check(hw, SINGLE_SCAN_ON_ALL_BANDS)) {
-#if 0 /* Avoid net80211 scan lists until it has proper scan offload support. */
+#ifdef LKPI_80211_USE_SCANLIST
+ /* Avoid net80211 scan lists until it has proper scan offload support. */
for (i = ss->ss_next; i < ss->ss_last; i++) {
nchan++;
band = lkpi_net80211_chan_to_nl80211_band(
@@ -4284,8 +4627,17 @@ sw_scan:
continue;
}
if (hw->wiphy->bands[band] != NULL) {
- nchan += hw->wiphy->bands[band]->n_channels;
+ struct linuxkpi_ieee80211_channel *channels;
+ int n;
+
band_mask |= (1 << band);
+
+ channels = hw->wiphy->bands[band]->channels;
+ n = hw->wiphy->bands[band]->n_channels;
+ for (i = 0; i < n; i++) {
+ if (lkpi_scan_chan(&channels[i], ic, true))
+ nchan++;
+ }
}
}
#endif
@@ -4324,11 +4676,32 @@ sw_scan:
/* hw_req->req.wdev */
hw_req->req.wiphy = hw->wiphy;
hw_req->req.no_cck = false; /* XXX */
-#if 0
- /* This seems to pessimise default scanning behaviour. */
- hw_req->req.duration_mandatory = TICKS_2_USEC(ss->ss_mindwell);
- hw_req->req.duration = TICKS_2_USEC(ss->ss_maxdwell);
-#endif
+
+ /*
+ * In general setting duration[_mandatory] seems to pessimise
+ * default scanning behaviour. We only use it for BGSCANnig
+ * to keep the dwell times small.
+ * Setting duration_mandatory makes this the maximum dwell
+ * time (otherwise may be shorter). Duration is in TU.
+ */
+ if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) != 0) {
+ unsigned long dwell;
+
+ if ((ic->ic_caps & IEEE80211_C_BGSCAN) == 0 ||
+ (vap->iv_flags & IEEE80211_F_BGSCAN) == 0)
+ ic_printf(ic, "BGSCAN despite off: %b, %b, %b\n",
+ ic->ic_flags_ext, IEEE80211_FEXT_BITS,
+ vap->iv_flags, IEEE80211_F_BITS,
+ ic->ic_caps, IEEE80211_C_BITS);
+
+ dwell = ss->ss_mindwell;
+ if (dwell == 0)
+ dwell = msecs_to_ticks(20);
+
+ hw_req->req.duration_mandatory = true;
+ hw_req->req.duration = TICKS_2_USEC(dwell) / 1024;
+ }
+
#ifdef __notyet__
hw_req->req.flags |= NL80211_SCAN_FLAG_RANDOM_ADDR;
memcpy(hw_req->req.mac_addr, xxx, IEEE80211_ADDR_LEN);
@@ -4339,16 +4712,16 @@ sw_scan:
hw_req->req.n_channels = nchan;
cpp = (struct linuxkpi_ieee80211_channel **)(hw_req + 1);
lc = (struct linuxkpi_ieee80211_channel *)(cpp + nchan);
+#ifdef LKPI_80211_USE_SCANLIST
for (i = 0; i < nchan; i++) {
*(cpp + i) =
(struct linuxkpi_ieee80211_channel *)(lc + i);
}
-#if 0 /* Avoid net80211 scan lists until it has proper scan offload support. */
+ /* Avoid net80211 scan lists until it has proper scan offload support. */
for (i = 0; i < nchan; i++) {
struct ieee80211_channel *c;
c = ss->ss_chans[ss->ss_next + i];
- lc->hw_value = c->ic_ieee;
lc->center_freq = c->ic_freq; /* XXX */
/* lc->flags */
lc->band = lkpi_net80211_chan_to_nl80211_band(c);
@@ -4357,7 +4730,9 @@ sw_scan:
lc++;
}
#else
- for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ /* Add bands in reverse order for scanning. */
+ n = 0;
+ for (band = NUM_NL80211_BANDS - 1; band >= 0; band--) {
struct ieee80211_supported_band *supband;
struct linuxkpi_ieee80211_channel *channels;
@@ -4372,9 +4747,27 @@ sw_scan:
channels = supband->channels;
for (i = 0; i < supband->n_channels; i++) {
- *lc = channels[i];
- lc++;
+ if (lkpi_scan_chan(&channels[i], ic, false))
+ *(cpp + n++) = &channels[i];
+ }
+ }
+ if (lkpi_order_scanlist)
+ lkpi_scan_chan_list_resort(cpp, nchan);
+
+ if ((linuxkpi_debug_80211 & D80211_SCAN) != 0) {
+ printf("%s:%d: %s SCAN Channel List (nchan=%zu): ",
+ __func__, __LINE__, ic->ic_name, nchan);
+ for (i = 0; i < nchan; i++) {
+ struct linuxkpi_ieee80211_channel *xc;
+
+ xc = *(cpp + i);
+ printf(" %d(%d)",
+ ieee80211_mhz2ieee(xc->center_freq,
+ lkpi_nl80211_band_to_net80211_band(
+ xc->band)),
+ xc->center_freq);
}
+ printf("\n");
}
#endif
@@ -4404,6 +4797,7 @@ sw_scan:
ieend = lkpi_scan_ies_add(ie, &hw_req->ies, band_mask, vap, hw);
hw_req->req.ie = ie;
hw_req->req.ie_len = ieend - ie;
+ hw_req->req.scan_start = jiffies;
lvif = VAP_TO_LVIF(vap);
vif = LVIF_TO_VIF(lvif);
@@ -4421,11 +4815,30 @@ sw_scan:
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
if (running) {
free(hw_req, M_LKPI80211);
+ TRACE_SCAN(ic, "Trying to start new scan while still "
+ "running (2); cancelling new net80211 scan; "
+ "scan_flags %b",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS);
+ ieee80211_cancel_scan(vap);
return;
}
+ lkpi_update_mcast_filter(ic);
+ TRACE_SCAN(ic, "Starting HW_SCAN: scan_flags %b, "
+ "ie_len %d, n_ssids %d, n_chan %d, common_ie_len %d [%d, %d]",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS, hw_req->req.ie_len,
+ hw_req->req.n_ssids, hw_req->req.n_channels,
+ hw_req->ies.common_ie_len,
+ hw_req->ies.len[NL80211_BAND_2GHZ],
+ hw_req->ies.len[NL80211_BAND_5GHZ]);
+
error = lkpi_80211_mo_hw_scan(hw, vif, hw_req);
if (error != 0) {
+ bool scan_done;
+ int e;
+
+ TRACE_SCAN(ic, "hw_scan failed; scan_flags %b, error %d",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS, error);
ieee80211_cancel_scan(vap);
/*
@@ -4442,13 +4855,35 @@ sw_scan:
* So we cannot rely on that behaviour and have to check
* and balance between both code paths.
*/
+ e = 0;
+ scan_done = true;
LKPI_80211_LHW_SCAN_LOCK(lhw);
if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) {
+
free(lhw->hw_req, M_LKPI80211);
lhw->hw_req = NULL;
+ /*
+ * The ieee80211_cancel_scan() above runs in a
+ * taskq and it may take ages for the previous
+ * scan to clear; starting a new one right away
+ * we run into the problem that the old one is
+ * still active.
+ */
+ e = msleep(lhw, &lhw->scan_mtx, 0, "lhwscanstop", hz);
+ scan_done = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0;
+
+ /*
+ * Now we can clear running if no one else did.
+ */
lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING;
}
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+ lkpi_update_mcast_filter(ic);
+ if (!scan_done) {
+ ic_printf(ic, "ERROR: %s: timeout/error to wait "
+ "for ieee80211_cancel_scan: %d\n", __func__, e);
+ return;
+ }
/*
* XXX-SIGH magic number.
@@ -4456,24 +4891,15 @@ sw_scan:
* not possible. Fall back to sw scan in that case.
*/
if (error == 1) {
- LKPI_80211_LHW_SCAN_LOCK(lhw);
- lhw->scan_flags &= ~LKPI_LHW_SCAN_HW;
- LKPI_80211_LHW_SCAN_UNLOCK(lhw);
/*
- * XXX If we clear this now and later a driver
- * thinks it * can do a hw_scan again, we will
- * currently not re-enable it?
+ * We need to put this into some defered context
+ * the net80211 scan may not be done yet
+ * (ic_flags & IEEE80211_F_SCAN) and we cannot
+ * wait here; if we do scan_curchan_task always
+ * runs after our timeout to finalize the scan.
*/
- vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_OFFLOAD;
- ieee80211_start_scan(vap,
- IEEE80211_SCAN_ACTIVE |
- IEEE80211_SCAN_NOPICK |
- IEEE80211_SCAN_ONCE,
- IEEE80211_SCAN_FOREVER,
- ss->ss_mindwell ? ss->ss_mindwell : msecs_to_ticks(20),
- ss->ss_maxdwell ? ss->ss_maxdwell : msecs_to_ticks(200),
- vap->iv_des_nssid, vap->iv_des_ssid);
- goto sw_scan;
+ ieee80211_runtask(ic, &lvif->sw_scan_task);
+ return;
}
ic_printf(ic, "ERROR: %s: hw_scan returned %d\n",
@@ -4483,12 +4909,50 @@ sw_scan:
}
static void
+lkpi_sw_scan_task(void *arg, int pending __unused)
+{
+ struct lkpi_hw *lhw;
+ struct lkpi_vif *lvif;
+ struct ieee80211vap *vap;
+ struct ieee80211_scan_state *ss;
+
+ lvif = arg;
+ vap = LVIF_TO_VAP(lvif);
+ lhw = vap->iv_ic->ic_softc;
+ ss = vap->iv_ic->ic_scan;
+
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ /*
+ * We will re-enable this at scan_end calling lkpi_enable_hw_scan().
+ * IEEE80211_FEXT_SCAN_OFFLOAD will be cleared by lkpi_ic_scan_start.
+ */
+ lhw->scan_flags &= ~LKPI_LHW_SCAN_HW;
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
+
+ TRACE_SCAN(vap->iv_ic, "Triggering SW_SCAN: pending %d, scan_flags %b",
+ pending, lhw->scan_flags, LKPI_LHW_SCAN_BITS);
+
+ /*
+ * This will call ic_scan_start() and we will get into the right path
+ * unless other scans started in between.
+ */
+ ieee80211_start_scan(vap,
+ IEEE80211_SCAN_ONCE,
+ msecs_to_ticks(10000), /* 10000 ms (=~ 50 chan * 200 ms) */
+ ss->ss_mindwell ? ss->ss_mindwell : msecs_to_ticks(20),
+ ss->ss_maxdwell ? ss->ss_maxdwell : msecs_to_ticks(200),
+ vap->iv_des_nssid, vap->iv_des_ssid);
+}
+
+static void
lkpi_ic_scan_end(struct ieee80211com *ic)
{
struct lkpi_hw *lhw;
bool is_hw_scan;
lhw = ic->ic_softc;
+ TRACE_SCAN(ic, "scan_flags %b", lhw->scan_flags, LKPI_LHW_SCAN_BITS);
+
LKPI_80211_LHW_SCAN_LOCK(lhw);
if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) == 0) {
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
@@ -4517,6 +4981,16 @@ lkpi_ic_scan_end(struct ieee80211com *ic)
if (vap->iv_state == IEEE80211_S_SCAN)
lkpi_hw_conf_idle(hw, true);
}
+
+ /*
+ * In case we disabled the hw_scan in lkpi_ic_scan_start() and
+ * switched to swscan, re-enable hw_scan if available.
+ */
+ lkpi_enable_hw_scan(lhw);
+
+ LKPI_80211_LHW_SCAN_LOCK(lhw);
+ wakeup(lhw);
+ LKPI_80211_LHW_SCAN_UNLOCK(lhw);
}
static void
@@ -4527,6 +5001,10 @@ lkpi_ic_scan_curchan(struct ieee80211_scan_state *ss,
bool is_hw_scan;
lhw = ss->ss_ic->ic_softc;
+ TRACE_SCAN(ss->ss_ic, "scan_flags %b chan %d maxdwell %lu",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS,
+ ss->ss_ic->ic_curchan->ic_ieee, maxdwell);
+
LKPI_80211_LHW_SCAN_LOCK(lhw);
is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0;
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
@@ -4541,6 +5019,10 @@ lkpi_ic_scan_mindwell(struct ieee80211_scan_state *ss)
bool is_hw_scan;
lhw = ss->ss_ic->ic_softc;
+ TRACE_SCAN(ss->ss_ic, "scan_flags %b chan %d mindwell %lu",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS,
+ ss->ss_ic->ic_curchan->ic_ieee, ss->ss_mindwell);
+
LKPI_80211_LHW_SCAN_LOCK(lhw);
is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0;
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
@@ -5571,6 +6053,12 @@ lkpi_ic_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap
return (-ENXIO);
}
+ if (lsta->state != IEEE80211_STA_AUTHORIZED) {
+ ic_printf(ic, "%s: lsta %p ni %p vap %p, sta %p state %d not AUTHORIZED\n",
+ __func__, lsta, ni, vap, sta, lsta->state);
+ return (-ENXIO);
+ }
+
params.sta = sta;
params.action = IEEE80211_AMPDU_RX_START;
params.buf_size = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
@@ -5647,13 +6135,35 @@ lkpi_ic_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
lvif = VAP_TO_LVIF(vap);
vif = LVIF_TO_VIF(lvif);
lsta = ni->ni_drv_data;
+ if (lsta == NULL) {
+ ic_printf(ic, "%s: lsta %p ni %p vap %p, lsta is NULL\n",
+ __func__, lsta, ni, vap);
+ goto net80211_only;
+ }
sta = LSTA_TO_STA(lsta);
+ if (!lsta->added_to_drv) {
+ ic_printf(ic, "%s: lsta %p ni %p vap %p, sta %p not added to firmware\n",
+ __func__, lsta, ni, vap, sta);
+ goto net80211_only;
+ }
+
+ if (lsta->state != IEEE80211_STA_AUTHORIZED) {
+ ic_printf(ic, "%s: lsta %p ni %p vap %p, sta %p state %d not AUTHORIZED\n",
+ __func__, lsta, ni, vap, sta, lsta->state);
+ goto net80211_only;
+ }
+
IMPROVE_HT("This really should be passed from ht_recv_action_ba_delba.");
for (tid = 0; tid < WME_NUM_TID; tid++) {
if (&ni->ni_rx_ampdu[tid] == rap)
break;
}
+ if (tid == WME_NUM_TID) {
+ ic_printf(ic, "%s: lsta %p ni %p vap %p, sta %p TID not found\n",
+ __func__, lsta, ni, vap, sta);
+ goto net80211_only;
+ }
params.sta = sta;
params.action = IEEE80211_AMPDU_RX_STOP;
@@ -5788,8 +6298,9 @@ lkpi_ic_getradiocaps(struct ieee80211com *ic, int maxchan,
cflags &= ~NET80211_CBW_FLAG_HT40;
error = ieee80211_add_channel_cbw(c, maxchan, n,
- channels[i].hw_value, channels[i].center_freq,
- channels[i].max_power,
+ ieee80211_mhz2ieee(channels[i].center_freq,
+ lkpi_nl80211_band_to_net80211_band(channels[i].band)),
+ channels[i].center_freq, channels[i].max_power,
nflags, bands, cflags);
/* net80211::ENOBUFS: *n >= maxchans */
if (error != 0 && error != ENOBUFS)
@@ -5860,8 +6371,9 @@ lkpi_ic_getradiocaps(struct ieee80211com *ic, int maxchan,
cflags &= ~NET80211_CBW_FLAG_HT40;
error = ieee80211_add_channel_cbw(c, maxchan, n,
- channels[i].hw_value, channels[i].center_freq,
- channels[i].max_power,
+ ieee80211_mhz2ieee(channels[i].center_freq,
+ lkpi_nl80211_band_to_net80211_band(channels[i].band)),
+ channels[i].center_freq, channels[i].max_power,
nflags, bands, cflags);
/* net80211::ENOBUFS: *n >= maxchans */
if (error != 0 && error != ENOBUFS)
@@ -5908,8 +6420,11 @@ linuxkpi_ieee80211_alloc_hw(size_t priv_len, const struct ieee80211_ops *ops)
LKPI_80211_LHW_SCAN_LOCK_INIT(lhw);
LKPI_80211_LHW_TXQ_LOCK_INIT(lhw);
+ spin_lock_init(&lhw->txq_lock);
sx_init_flags(&lhw->lvif_sx, "lhw-lvif", SX_RECURSE | SX_DUPOK);
+ LKPI_80211_LHW_MC_LOCK_INIT(lhw);
TAILQ_INIT(&lhw->lvif_head);
+ __hw_addr_init(&lhw->mc_list);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
lhw->txq_generation[ac] = 1;
TAILQ_INIT(&lhw->scheduled_txqs[ac]);
@@ -6006,25 +6521,44 @@ linuxkpi_ieee80211_iffree(struct ieee80211_hw *hw)
}
}
+ LKPI_80211_LHW_MC_LOCK(lhw);
+ lkpi_cleanup_mcast_list_locked(lhw);
+ LKPI_80211_LHW_MC_UNLOCK(lhw);
+
/* Cleanup more of lhw here or in wiphy_free()? */
+ spin_lock_destroy(&lhw->txq_lock);
LKPI_80211_LHW_TXQ_LOCK_DESTROY(lhw);
LKPI_80211_LHW_SCAN_LOCK_DESTROY(lhw);
sx_destroy(&lhw->lvif_sx);
+ LKPI_80211_LHW_MC_LOCK_DESTROY(lhw)
IMPROVE();
}
void
-linuxkpi_set_ieee80211_dev(struct ieee80211_hw *hw, char *name)
+linuxkpi_set_ieee80211_dev(struct ieee80211_hw *hw)
{
struct lkpi_hw *lhw;
struct ieee80211com *ic;
+ struct device *dev;
lhw = HW_TO_LHW(hw);
ic = lhw->ic;
- /* Now set a proper name before ieee80211_ifattach(). */
+ /* Save the backpointer from net80211 to LinuxKPI. */
ic->ic_softc = lhw;
- ic->ic_name = name;
+
+ /*
+ * Set a proper name before ieee80211_ifattach() if dev is set.
+ * ath1xk also unset the dev so we need to check.
+ */
+ dev = wiphy_dev(hw->wiphy);
+ if (dev != NULL) {
+ ic->ic_name = dev_name(dev);
+ } else {
+ TODO("adjust arguments to still have the old dev or go through "
+ "the hoops of getting the bsddev from hw and detach; "
+ "or do in XXX; check ath1kx drivers");
+ }
/* XXX-BZ do we also need to set wiphy name? */
}
@@ -6101,26 +6635,26 @@ linuxkpi_ieee80211_ifattach(struct ieee80211_hw *hw)
IEEE80211_C_SHSLOT | /* short slot time supported */
IEEE80211_C_SHPREAMBLE /* short preamble supported */
;
-#if 0
- /* Scanning is a different kind of beast to re-work. */
- ic->ic_caps |= IEEE80211_C_BGSCAN;
+
+#ifdef LKPI_80211_BGSCAN
+ if (lhw->ops->hw_scan)
+ ic->ic_caps |= IEEE80211_C_BGSCAN;
#endif
- if (lhw->ops->hw_scan) {
- /*
- * Advertise full-offload scanning.
- *
- * Not limiting to SINGLE_SCAN_ON_ALL_BANDS here as otherwise
- * we essentially disable hw_scan for all drivers not setting
- * the flag.
- */
- ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
- lhw->scan_flags |= LKPI_LHW_SCAN_HW;
- }
+
+ lkpi_enable_hw_scan(lhw);
/* Does HW support Fragmentation offload? */
if (ieee80211_hw_check(hw, SUPPORTS_TX_FRAG))
ic->ic_flags_ext |= IEEE80211_FEXT_FRAG_OFFLOAD;
+ /* Does HW support full AMPDU[-TX] offload? */
+ if (ieee80211_hw_check(hw, AMPDU_AGGREGATION))
+ ic->ic_flags_ext |= IEEE80211_FEXT_AMPDU_OFFLOAD;
+#ifdef __notyet__
+ if (ieee80211_hw_check(hw, TX_AMSDU))
+ if (ieee80211_hw_check(hw, SUPPORTS_AMSDU_IN_AMPDU))
+#endif
+
/*
* The wiphy variables report bitmasks of avail antennas.
* (*get_antenna) get the current bitmask sets which can be
@@ -6332,8 +6866,10 @@ linuxkpi_ieee80211_ifattach(struct ieee80211_hw *hw)
hw->wiphy->max_scan_ie_len -= lhw->scan_ie_len;
}
- if (bootverbose)
+ if (bootverbose) {
+ ic_printf(ic, "netdev_features %b\n", hw->netdev_features, NETIF_F_BITS);
ieee80211_announce(ic);
+ }
return (0);
err:
@@ -6570,13 +7106,19 @@ linuxkpi_ieee80211_scan_completed(struct ieee80211_hw *hw,
ic = lhw->ic;
ss = ic->ic_scan;
+ TRACE_SCAN(ic, "scan_flags %b info { %ju, %6D, aborted %d }",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS,
+ (uintmax_t)info->scan_start_tsf, info->tsf_bssid, ":",
+ info->aborted);
+
ieee80211_scan_done(ss->ss_vap);
LKPI_80211_LHW_SCAN_LOCK(lhw);
free(lhw->hw_req, M_LKPI80211);
lhw->hw_req = NULL;
lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING;
- wakeup(lhw);
+ /* The wakeup(lhw) will be called from lkpi_ic_scan_end(). */
+ /* wakeup(lhw); */
LKPI_80211_LHW_SCAN_UNLOCK(lhw);
return;
@@ -6832,11 +7374,76 @@ lkpi_convert_rx_status(struct ieee80211_hw *hw, struct lkpi_sta *lsta,
rx_stats->c_pktflags |= IEEE80211_RX_F_FAIL_FCSCRC;
#endif
+ /* Fill in some sinfo bits to fill gaps not reported byt the driver. */
if (lsta != NULL) {
memcpy(&lsta->sinfo.rxrate, &rxrate, sizeof(rxrate));
lsta->sinfo.filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+
+ if (rx_status->chains != 0) {
+ lsta->sinfo.chains = rx_status->chains;
+ memcpy(lsta->sinfo.chain_signal, rx_status->chain_signal,
+ sizeof(lsta->sinfo.chain_signal));
+ lsta->sinfo.filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ }
+ }
+}
+
+#ifdef LINUXKPI_DEBUG_80211
+static void
+lkpi_rx_log_beacon(struct mbuf *m, struct lkpi_hw *lhw,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_mgmt *f;
+ uint8_t *e;
+ char ssid[IEEE80211_NWID_LEN * 4 + 1];
+
+ memset(ssid, '\0', sizeof(ssid));
+
+ f = mtod(m, struct ieee80211_mgmt *);
+ e = f->u.beacon.variable;
+ /*
+ * Usually SSID is right after the fixed part and for debugging we will
+ * be fine should we miss it if it is not.
+ */
+ while ((e - (uint8_t *)f) < m->m_len) {
+ if (*e == IEEE80211_ELEMID_SSID)
+ break;
+ e += (2 + *(e + 1));
+ }
+ if (*e == IEEE80211_ELEMID_SSID) {
+ int i, len;
+ char *p;
+
+ p = ssid;
+ len = m->m_len - ((e + 2) - (uint8_t *)f);
+ if (len > *(e + 1))
+ len = *(e + 1);
+ e += 2;
+ for (i = 0; i < len; i++) {
+ /* Printable character? */
+ if (*e >= 0x20 && *e < 0x7f) {
+ *p++ = *e++;
+ } else {
+ snprintf(p, 5, "%#04x", *e++);
+ p += 4;
+ }
+ }
+ *p = '\0';
}
+
+ /* We print skb, skb->data, m as we are seeing 'ghost beacons'. */
+ TRACE_SCAN_BEACON(lhw->ic, "Beacon: scan_flags %b, band %s freq %u chan %-4d "
+ "len %d { %#06x %#06x %6D %6D %6D %#06x %ju %u %#06x SSID '%s' }",
+ lhw->scan_flags, LKPI_LHW_SCAN_BITS,
+ lkpi_nl80211_band_name(rx_status->band), rx_status->freq,
+ linuxkpi_ieee80211_frequency_to_channel(rx_status->freq, 0),
+ m->m_pkthdr.len, f->frame_control, f->duration_id,
+ f->da, ":", f->sa, ":", f->bssid, ":", f->seq_ctrl,
+ (uintmax_t)le64_to_cpu(f->u.beacon.timestamp),
+ le16_to_cpu(f->u.beacon.beacon_int),
+ le16_to_cpu(f->u.beacon.capab_info), ssid);
}
+#endif
/* For %list see comment towards the end of the function. */
void
@@ -6894,7 +7501,15 @@ linuxkpi_ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
is_beacon = ieee80211_is_beacon(hdr->frame_control);
#ifdef LINUXKPI_DEBUG_80211
- if (is_beacon && (linuxkpi_debug_80211 & D80211_TRACE_RX_BEACONS) == 0)
+ /*
+ * We use the mbuf here as otherwise the variable part might
+ * be in skb frags.
+ */
+ if (is_beacon && ((linuxkpi_debug_80211 & D80211_SCAN_BEACON) != 0))
+ lkpi_rx_log_beacon(m, lhw, rx_status);
+
+ if (is_beacon && (linuxkpi_debug_80211 & D80211_TRACE_RX_BEACONS) == 0 &&
+ (linuxkpi_debug_80211 & D80211_SCAN_BEACON) == 0)
goto no_trace_beacons;
if (linuxkpi_debug_80211 & D80211_TRACE_RX)
@@ -6909,7 +7524,8 @@ linuxkpi_ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
hexdump(mtod(m, const void *), m->m_len, "RX (raw) ", 0);
/* Implement a dump_rxcb() !!! */
- if (linuxkpi_debug_80211 & D80211_TRACE_RX)
+ if ((linuxkpi_debug_80211 & D80211_TRACE_RX) != 0 ||
+ (linuxkpi_debug_80211 & D80211_SCAN_BEACON) != 0)
printf("TRACE-RX: %s: RXCB: %ju %ju %u, %b, %u, %#0x, %#0x, "
"%u band %u, %u { %d %d %d %d }, %d, %#x %#x %#x %#x %u %u %u\n",
__func__,
@@ -7216,7 +7832,7 @@ lkpi_wiphy_delayed_work_timer(struct timer_list *tl)
{
struct wiphy_delayed_work *wdwk;
- wdwk = from_timer(wdwk, tl, timer);
+ wdwk = timer_container_of(wdwk, tl, timer);
wiphy_work_queue(wdwk->wiphy, &wdwk->work);
}
@@ -7668,8 +8284,8 @@ linuxkpi_ieee80211_queue_work(struct ieee80211_hw *hw,
}
struct sk_buff *
-linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr,
- uint8_t *ssid, size_t ssid_len, size_t tailroom)
+linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *hw, const uint8_t *addr,
+ const uint8_t *ssid, size_t ssid_len, size_t tailroom)
{
struct sk_buff *skb;
struct ieee80211_frame *wh;
@@ -7791,8 +8407,11 @@ linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *vif)
nstate = IEEE80211_S_INIT;
arg = 0; /* Not a valid reason. */
- ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s\n", __func__,
- vif, vap, ieee80211_state_name[vap->iv_state]);
+ ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s (synched %d, assoc %d "
+ "beacons %d dtim_period %d)\n", __func__, vif, vap,
+ ieee80211_state_name[vap->iv_state],
+ lvif->lvif_bss_synched, vif->cfg.assoc, lvif->beacons,
+ vif->bss_conf.dtim_period);
ieee80211_new_state(vap, nstate, arg);
}
@@ -7805,8 +8424,11 @@ linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *vif)
lvif = VIF_TO_LVIF(vif);
vap = LVIF_TO_VAP(lvif);
- ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s\n", __func__,
- vif, vap, ieee80211_state_name[vap->iv_state]);
+ ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s (synched %d, assoc %d "
+ "beacons %d dtim_period %d)\n", __func__, vif, vap,
+ ieee80211_state_name[vap->iv_state],
+ lvif->lvif_bss_synched, vif->cfg.assoc, lvif->beacons,
+ vif->bss_conf.dtim_period);
ieee80211_beacon_miss(vap->iv_ic);
}
@@ -7954,21 +8576,30 @@ lkpi_ieee80211_wake_queues_locked(struct ieee80211_hw *hw)
void
linuxkpi_ieee80211_wake_queues(struct ieee80211_hw *hw)
{
- wiphy_lock(hw->wiphy);
+ struct lkpi_hw *lhw;
+ unsigned long flags;
+
+ lhw = HW_TO_LHW(hw);
+
+ spin_lock_irqsave(&lhw->txq_lock, flags);
lkpi_ieee80211_wake_queues_locked(hw);
- wiphy_unlock(hw->wiphy);
+ spin_unlock_irqrestore(&lhw->txq_lock, flags);
}
void
linuxkpi_ieee80211_wake_queue(struct ieee80211_hw *hw, int qnum)
{
+ struct lkpi_hw *lhw;
+ unsigned long flags;
KASSERT(qnum < hw->queues, ("%s: qnum %d >= hw->queues %d, hw %p\n",
__func__, qnum, hw->queues, hw));
- wiphy_lock(hw->wiphy);
+ lhw = HW_TO_LHW(hw);
+
+ spin_lock_irqsave(&lhw->txq_lock, flags);
lkpi_ieee80211_wake_queues(hw, qnum);
- wiphy_unlock(hw->wiphy);
+ spin_unlock_irqrestore(&lhw->txq_lock, flags);
}
/* This is just hardware queues. */
diff --git a/sys/compat/linuxkpi/common/src/linux_80211.h b/sys/compat/linuxkpi/common/src/linux_80211.h
index 89afec1235bd..0dfcd7646c34 100644
--- a/sys/compat/linuxkpi/common/src/linux_80211.h
+++ b/sys/compat/linuxkpi/common/src/linux_80211.h
@@ -59,6 +59,8 @@
#define D80211_IMPROVE_TXQ 0x00000004
#define D80211_TRACE 0x00000010
#define D80211_TRACEOK 0x00000020
+#define D80211_SCAN 0x00000040
+#define D80211_SCAN_BEACON 0x00000080
#define D80211_TRACE_TX 0x00000100
#define D80211_TRACE_TX_DUMP 0x00000200
#define D80211_TRACE_RX 0x00001000
@@ -75,6 +77,20 @@
#define D80211_TRACE_MODE_HE 0x04000000
#define D80211_TRACE_MODE_EHT 0x08000000
+#ifdef LINUXKPI_DEBUG_80211
+#define TRACE_SCAN(ic, fmt, ...) \
+ if (linuxkpi_debug_80211 & D80211_SCAN) \
+ printf("%s:%d: %s SCAN " fmt "\n", \
+ __func__, __LINE__, ic->ic_name, ##__VA_ARGS__)
+#define TRACE_SCAN_BEACON(ic, fmt, ...) \
+ if (linuxkpi_debug_80211 & D80211_SCAN_BEACON) \
+ printf("%s:%d: %s SCAN " fmt "\n", \
+ __func__, __LINE__, ic->ic_name, ##__VA_ARGS__)
+#else
+#define TRACE_SCAN(...) do {} while (0)
+#define TRACE_SCAN_BEACON(...) do {} while (0)
+#endif
+
#define IMPROVE_TXQ(...) \
if (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) \
printf("%s:%d: XXX LKPI80211 IMPROVE_TXQ\n", __func__, __LINE__)
@@ -187,6 +203,12 @@ struct lkpi_vif {
enum ieee80211_state, int);
struct ieee80211_node * (*iv_update_bss)(struct ieee80211vap *,
struct ieee80211_node *);
+ void (*iv_recv_mgmt)(struct ieee80211_node *,
+ struct mbuf *, int,
+ const struct ieee80211_rx_stats *,
+ int, int);
+ struct task sw_scan_task;
+
struct list_head lsta_list;
struct lkpi_sta *lvif_bss;
@@ -194,6 +216,7 @@ struct lkpi_vif {
struct ieee80211_node *key_update_iv_bss;
int ic_unlocked; /* Count of ic unlocks pending (*mo_set_key) */
int nt_unlocked; /* Count of nt unlocks pending (*mo_set_key) */
+ int beacons; /* # of beacons since assoc */
bool lvif_bss_synched;
bool added_to_drv; /* Driver knows; i.e. we called add_interface(). */
@@ -223,10 +246,14 @@ struct lkpi_hw { /* name it mac80211_sc? */
struct sx lvif_sx;
struct list_head lchanctx_list;
+ struct netdev_hw_addr_list mc_list;
+ unsigned int mc_flags;
+ struct sx mc_sx;
struct mtx txq_mtx;
uint32_t txq_generation[IEEE80211_NUM_ACS];
TAILQ_HEAD(, lkpi_txq) scheduled_txqs[IEEE80211_NUM_ACS];
+ spinlock_t txq_lock;
/* Deferred RX path. */
struct task rxq_task;
@@ -279,7 +306,7 @@ struct lkpi_hw { /* name it mac80211_sc? */
int max_rates; /* Maximum number of bitrates supported in any channel. */
int scan_ie_len; /* Length of common per-band scan IEs. */
- bool update_mc;
+ bool mc_all_multi;
bool update_wme;
bool rxq_stopped;
@@ -289,6 +316,9 @@ struct lkpi_hw { /* name it mac80211_sc? */
#define LHW_TO_HW(_lhw) (&(_lhw)->hw)
#define HW_TO_LHW(_hw) container_of(_hw, struct lkpi_hw, hw)
+#define LKPI_LHW_SCAN_BITS \
+ "\010\1RUNING\2HW"
+
struct lkpi_chanctx {
struct list_head entry;
@@ -369,6 +399,13 @@ struct lkpi_wiphy {
#define LKPI_80211_LHW_LVIF_LOCK(_lhw) sx_xlock(&(_lhw)->lvif_sx)
#define LKPI_80211_LHW_LVIF_UNLOCK(_lhw) sx_xunlock(&(_lhw)->lvif_sx)
+#define LKPI_80211_LHW_MC_LOCK_INIT(_lhw) \
+ sx_init_flags(&lhw->mc_sx, "lhw-mc", 0);
+#define LKPI_80211_LHW_MC_LOCK_DESTROY(_lhw) \
+ sx_destroy(&lhw->mc_sx);
+#define LKPI_80211_LHW_MC_LOCK(_lhw) sx_xlock(&(_lhw)->mc_sx)
+#define LKPI_80211_LHW_MC_UNLOCK(_lhw) sx_xunlock(&(_lhw)->mc_sx)
+
#define LKPI_80211_LVIF_LOCK(_lvif) mtx_lock(&(_lvif)->mtx)
#define LKPI_80211_LVIF_UNLOCK(_lvif) mtx_unlock(&(_lvif)->mtx)
diff --git a/sys/compat/linuxkpi/common/src/linux_80211_macops.c b/sys/compat/linuxkpi/common/src/linux_80211_macops.c
index 78b2120f2d8c..1046b753574f 100644
--- a/sys/compat/linuxkpi/common/src/linux_80211_macops.c
+++ b/sys/compat/linuxkpi/common/src/linux_80211_macops.c
@@ -53,6 +53,8 @@ lkpi_80211_mo_start(struct ieee80211_hw *hw)
struct lkpi_hw *lhw;
int error;
+ lockdep_assert_wiphy(hw->wiphy);
+
lhw = HW_TO_LHW(hw);
if (lhw->ops->start == NULL) {
error = EOPNOTSUPP;
diff --git a/sys/compat/linuxkpi/common/src/linux_acpi.c b/sys/compat/linuxkpi/common/src/linux_acpi.c
index 43783bb8727b..c7d62c745c7e 100644
--- a/sys/compat/linuxkpi/common/src/linux_acpi.c
+++ b/sys/compat/linuxkpi/common/src/linux_acpi.c
@@ -33,6 +33,7 @@
#include <sys/bus.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
+#include <sys/power.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <dev/acpica/acpivar.h>
@@ -118,20 +119,32 @@ acpi_evaluate_dsm(ACPI_HANDLE ObjHandle, const guid_t *guid,
}
static void
-linux_handle_power_suspend_event(void *arg __unused)
+linux_handle_power_suspend_event(void *arg __unused, enum power_stype stype)
{
- /*
- * Only support S3 for now.
- * acpi_sleep_event isn't always called so we use power_suspend_early
- * instead which means we don't know what state we're switching to.
- * TODO: Make acpi_sleep_event consistent
- */
- linux_acpi_target_sleep_state = ACPI_STATE_S3;
- pm_suspend_target_state = PM_SUSPEND_MEM;
+ switch (stype) {
+ case POWER_STYPE_SUSPEND_TO_IDLE:
+ /*
+ * XXX: obiwac Not 100% sure this is correct, but
+ * acpi_target_sleep_state does seem to be set to
+ * ACPI_STATE_S3 during s2idle on Linux.
+ */
+ linux_acpi_target_sleep_state = ACPI_STATE_S3;
+ pm_suspend_target_state = PM_SUSPEND_TO_IDLE;
+ break;
+ case POWER_STYPE_SUSPEND_TO_MEM:
+ linux_acpi_target_sleep_state = ACPI_STATE_S3;
+ pm_suspend_target_state = PM_SUSPEND_MEM;
+ break;
+ default:
+ printf("%s: sleep type %d not yet supported\n",
+ __func__, stype);
+ break;
+ }
}
static void
-linux_handle_power_resume_event(void *arg __unused)
+linux_handle_power_resume_event(void *arg __unused,
+ enum power_stype stype __unused)
{
linux_acpi_target_sleep_state = ACPI_STATE_S0;
pm_suspend_target_state = PM_SUSPEND_ON;
diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c
index dcdec0dfcc78..458744a9fec6 100644
--- a/sys/compat/linuxkpi/common/src/linux_compat.c
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c
@@ -2120,7 +2120,7 @@ add_timer_on(struct timer_list *timer, int cpu)
}
int
-del_timer(struct timer_list *timer)
+timer_delete(struct timer_list *timer)
{
if (callout_stop(&(timer)->callout) == -1)
@@ -2129,7 +2129,7 @@ del_timer(struct timer_list *timer)
}
int
-del_timer_sync(struct timer_list *timer)
+timer_delete_sync(struct timer_list *timer)
{
if (callout_drain(&(timer)->callout) == -1)
@@ -2138,13 +2138,6 @@ del_timer_sync(struct timer_list *timer)
}
int
-timer_delete_sync(struct timer_list *timer)
-{
-
- return (del_timer_sync(timer));
-}
-
-int
timer_shutdown_sync(struct timer_list *timer)
{
diff --git a/sys/compat/linuxkpi/common/src/linux_devres.c b/sys/compat/linuxkpi/common/src/linux_devres.c
index 84f03ba0dd7d..23c91cb5ab2f 100644
--- a/sys/compat/linuxkpi/common/src/linux_devres.c
+++ b/sys/compat/linuxkpi/common/src/linux_devres.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2020-2021 The FreeBSD Foundation
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
*
* This software was developed by Bj\xc3\xb6rn Zeeb under sponsorship from
* the FreeBSD Foundation.
@@ -223,6 +223,30 @@ lkpi_devm_kmalloc_release(struct device *dev __unused, void *p __unused)
/* Nothing to do. Freed with the devres. */
}
+static int
+lkpi_devm_kmalloc_match(struct device *dev __unused, void *p, void *mp)
+{
+ return (p == mp);
+}
+
+void
+lkpi_devm_kfree(struct device *dev, const void *p)
+{
+ void *mp;
+ int error;
+
+ if (p == NULL)
+ return;
+
+ /* I assume Linux simply casts the const away... */
+ mp = __DECONST(void *, p);
+ error = lkpi_devres_destroy(dev, lkpi_devm_kmalloc_release,
+ lkpi_devm_kmalloc_match, mp);
+ if (error != 0)
+ dev_warn(dev, "%s: lkpi_devres_destroy failed with %d\n",
+ __func__, error);
+}
+
struct devres_action {
void *data;
void (*action)(void *);
diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c
index d5bbbea1eb2c..8507a59a8df3 100644
--- a/sys/compat/linuxkpi/common/src/linux_pci.c
+++ b/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -67,6 +67,7 @@
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
+#define WANT_NATIVE_PCI_GET_SLOT
#include <linux/pci.h>
#include <linux/compat.h>
@@ -111,6 +112,9 @@ static device_method_t pci_methods[] = {
DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),
DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),
+ /* Bus interface. */
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
+
/* backlight interface */
DEVMETHOD(backlight_update_status, linux_backlight_update_status),
DEVMETHOD(backlight_get_status, linux_backlight_get_status),
@@ -145,6 +149,23 @@ struct linux_dma_priv {
#define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
#define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
+static void
+lkpi_set_pcim_iomap_devres(struct pcim_iomap_devres *dr, int bar,
+ void *res)
+{
+ dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
+ dr->res_table[bar] = res;
+}
+
+static bool
+lkpi_pci_bar_id_valid(int bar)
+{
+ if (bar < 0 || bar > PCIR_MAX_BAR_0)
+ return (false);
+
+ return (true);
+}
+
static int
linux_pdev_dma_uninit(struct pci_dev *pdev)
{
@@ -289,12 +310,18 @@ lkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev)
{
struct pci_dev *pdev, *found;
- KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__));
-
found = NULL;
spin_lock(&pci_lock);
list_for_each_entry(pdev, &pci_devices, links) {
- if (pdev->vendor == vendor && pdev->device == device) {
+ /* Walk until we find odev. */
+ if (odev != NULL) {
+ if (pdev == odev)
+ odev = NULL;
+ continue;
+ }
+
+ if ((pdev->vendor == vendor || vendor == PCI_ANY_ID) &&
+ (pdev->device == device || device == PCI_ANY_ID)) {
found = pdev;
break;
}
@@ -316,6 +343,7 @@ lkpi_pci_dev_release(struct device *dev)
static int
lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
{
+ struct pci_devinfo *dinfo;
int error;
error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype,
@@ -336,15 +364,24 @@ lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d",
pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
pci_get_function(dev));
+
pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
- /*
- * This should be the upstream bridge; pci_upstream_bridge()
- * handles that case on demand as otherwise we'll shadow the
- * entire PCI hierarchy.
- */
- pdev->bus->self = pdev;
pdev->bus->number = pci_get_bus(dev);
pdev->bus->domain = pci_get_domain(dev);
+
+ /* Check if we have reached the root to satisfy pci_is_root_bus() */
+ dinfo = device_get_ivars(dev);
+ if (dinfo->cfg.pcie.pcie_location != 0 &&
+ dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) {
+ pdev->bus->self = NULL;
+ } else {
+ /*
+ * This should be the upstream bridge; pci_upstream_bridge()
+ * handles that case on demand as otherwise we'll shadow the
+ * entire PCI hierarchy.
+ */
+ pdev->bus->self = pdev;
+ }
pdev->dev.bsddev = dev;
pdev->dev.parent = &linux_root_device;
pdev->dev.release = lkpi_pci_dev_release;
@@ -369,7 +406,7 @@ lkpinew_pci_dev_release(struct device *dev)
pdev = to_pci_dev(dev);
if (pdev->root != NULL)
pci_dev_put(pdev->root);
- if (pdev->bus->self != pdev)
+ if (pdev->bus->self != pdev && pdev->bus->self != NULL)
pci_dev_put(pdev->bus->self);
free(pdev->bus, M_DEVBUF);
if (pdev->msi_desc != NULL) {
@@ -449,6 +486,20 @@ lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,
return (pdev);
}
+struct pci_dev *
+lkpi_pci_get_slot(struct pci_bus *pbus, unsigned int devfn)
+{
+ device_t dev;
+ struct pci_dev *pdev;
+
+ dev = pci_find_bsf(pbus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ if (dev == NULL)
+ return (NULL);
+
+ pdev = lkpinew_pci_dev(dev);
+ return (pdev);
+}
+
static int
linux_pci_probe(device_t dev)
{
@@ -525,6 +576,7 @@ linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
{
struct resource_list_entry *rle;
device_t parent;
+ struct pci_dev *pbus, *ppbus;
uintptr_t rid;
int error;
bool isdrm;
@@ -568,6 +620,27 @@ linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
list_add(&pdev->links, &pci_devices);
spin_unlock(&pci_lock);
+ /*
+ * Create the hierarchy now as we cannot on demand later.
+ * Take special care of DRM as there is a non-PCI device in the chain.
+ */
+ pbus = pdev;
+ if (isdrm) {
+ pbus = lkpinew_pci_dev(parent);
+ if (pbus == NULL) {
+ error = ENXIO;
+ goto out_dma_init;
+ }
+ }
+ pcie_find_root_port(pbus);
+ if (isdrm)
+ pdev->root = pbus->root;
+ ppbus = pci_upstream_bridge(pbus);
+ while (ppbus != NULL && ppbus != pbus) {
+ pbus = ppbus;
+ ppbus = pci_upstream_bridge(pbus);
+ }
+
if (pdrv != NULL) {
error = pdrv->probe(pdev, id);
if (error)
@@ -575,6 +648,7 @@ linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
}
return (0);
+/* XXX the cleanup does not match the allocation up there. */
out_probe:
free(pdev->bus, M_DEVBUF);
spin_lock_destroy(&pdev->pcie_cap_lock);
@@ -757,6 +831,9 @@ _lkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen __unused)
struct pci_mmio_region *mmio, *p;
int type;
+ if (!lkpi_pci_bar_id_valid(bar))
+ return (NULL);
+
type = pci_resource_type(pdev, bar);
if (type < 0) {
device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
@@ -797,6 +874,9 @@ linuxkpi_pci_iomap_range(struct pci_dev *pdev, int bar,
{
struct resource *res;
+ if (!lkpi_pci_bar_id_valid(bar))
+ return (NULL);
+
res = _lkpi_pci_iomap(pdev, bar, maxlen);
if (res == NULL)
return (NULL);
@@ -810,9 +890,41 @@ linuxkpi_pci_iomap_range(struct pci_dev *pdev, int bar,
void *
linuxkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
+ if (!lkpi_pci_bar_id_valid(bar))
+ return (NULL);
+
return (linuxkpi_pci_iomap_range(pdev, bar, 0, maxlen));
}
+void *
+linuxkpi_pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ struct pcim_iomap_devres *dr;
+ void *res;
+
+ if (!lkpi_pci_bar_id_valid(bar))
+ return (NULL);
+
+ dr = lkpi_pcim_iomap_devres_find(pdev);
+ if (dr == NULL)
+ return (NULL);
+
+ if (dr->res_table[bar] != NULL)
+ return (dr->res_table[bar]);
+
+ res = linuxkpi_pci_iomap(pdev, bar, maxlen);
+ if (res == NULL) {
+ /*
+ * Do not free the devres in case there were
+ * other valid mappings before already.
+ */
+ return (NULL);
+ }
+ lkpi_set_pcim_iomap_devres(dr, bar, res);
+
+ return (res);
+}
+
void
linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res)
{
@@ -864,8 +976,7 @@ linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *nam
res = _lkpi_pci_iomap(pdev, bar, 0);
if (res == NULL)
goto err;
- dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
- dr->res_table[bar] = res;
+ lkpi_set_pcim_iomap_devres(dr, bar, res);
mappings |= (1 << bar);
}
@@ -1099,8 +1210,9 @@ pci_resource_len(struct pci_dev *pdev, int bar)
return (rle->count);
}
-int
-pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+static int
+lkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
+ bool managed)
{
struct resource *res;
struct pci_devres *dr;
@@ -1108,9 +1220,20 @@ pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
int rid;
int type;
+ if (!lkpi_pci_bar_id_valid(bar))
+ return (-EINVAL);
+
+ /*
+ * If the bar is not valid, return success without adding the BAR;
+ * otherwise linuxkpi_pcim_request_all_regions() will error.
+ */
+ if (pci_resource_len(pdev, bar) == 0)
+ return (0);
+ /* Likewise if it is neither IO nor MEM, nothing to do for us. */
type = pci_resource_type(pdev, bar);
if (type < 0)
- return (-ENODEV);
+ return (0);
+
rid = PCIR_BAR(bar);
res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
RF_ACTIVE|RF_SHAREABLE);
@@ -1123,11 +1246,16 @@ pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
/*
* It seems there is an implicit devres tracking on these if the device
- * is managed; otherwise the resources are not automatiaclly freed on
- * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux
- * drivers.
+ * is managed (lkpi_pci_devres_find() case); otherwise the resources are
+ * not automatically freed on FreeBSD/LinuxKPI though they should be/are
+ * expected to be by Linux drivers.
+ * Otherwise if we are called from a pcim-function with the managed
+ * argument set, we need to track devres independent of pdev->managed.
*/
- dr = lkpi_pci_devres_find(pdev);
+ if (managed)
+ dr = lkpi_pci_devres_get_alloc(pdev);
+ else
+ dr = lkpi_pci_devres_find(pdev);
if (dr != NULL) {
dr->region_mask |= (1 << bar);
dr->region_table[bar] = res;
@@ -1144,6 +1272,12 @@ pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
}
int
+linuxkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+{
+ return (lkpi_pci_request_region(pdev, bar, res_name, false));
+}
+
+int
linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
int error;
@@ -1159,6 +1293,24 @@ linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)
return (0);
}
+int
+linuxkpi_pcim_request_all_regions(struct pci_dev *pdev, const char *res_name)
+{
+ int bar, error;
+
+ for (bar = 0; bar <= PCIR_MAX_BAR_0; bar++) {
+ error = lkpi_pci_request_region(pdev, bar, res_name, true);
+ if (error != 0) {
+ device_printf(pdev->dev.bsddev, "%s: bar %d res_name '%s': "
+ "lkpi_pci_request_region returned %d\n", __func__,
+ bar, res_name, error);
+ pci_release_regions(pdev);
+ return (error);
+ }
+ }
+ return (0);
+}
+
void
linuxkpi_pci_release_region(struct pci_dev *pdev, int bar)
{
diff --git a/sys/compat/linuxkpi/common/src/linux_seq_file.c b/sys/compat/linuxkpi/common/src/linux_seq_file.c
index 8b426825cc78..9c06fe27bebe 100644
--- a/sys/compat/linuxkpi/common/src/linux_seq_file.c
+++ b/sys/compat/linuxkpi/common/src/linux_seq_file.c
@@ -64,13 +64,10 @@ seq_read(struct linux_file *f, char *ubuf, size_t size, off_t *ppos)
return (-EINVAL);
size = min(rc - *ppos, size);
- rc = strscpy(ubuf, sbuf_data(sbuf) + *ppos, size + 1);
+ memcpy(ubuf, sbuf_data(sbuf) + *ppos, size);
+ *ppos += size;
- /* add 1 for null terminator */
- if (rc > 0)
- rc += 1;
-
- return (rc);
+ return (size);
}
int
diff --git a/sys/compat/linuxkpi/dummy/include/kunit/skbuff.h b/sys/compat/linuxkpi/dummy/include/kunit/skbuff.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/sys/compat/linuxkpi/dummy/include/kunit/skbuff.h
diff --git a/sys/compat/linuxkpi/dummy/include/kunit/test-bug.h b/sys/compat/linuxkpi/dummy/include/kunit/test-bug.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/sys/compat/linuxkpi/dummy/include/kunit/test-bug.h
diff --git a/sys/compat/linuxkpi/dummy/include/kunit/test.h b/sys/compat/linuxkpi/dummy/include/kunit/test.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/sys/compat/linuxkpi/dummy/include/kunit/test.h
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 2458756ae350..a25ee8f6e1af 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -679,6 +679,7 @@ options TCP_OFFLOAD # TCP offload support.
options TCP_RFC7413 # TCP Fast Open
options TCPHPTS
+#options TCP_HPTS_KTEST # Add KTEST support for HPTS
# In order to enable IPSEC you MUST also add device crypto to
# your kernel configuration
@@ -888,13 +889,13 @@ options IEEE80211_DEBUG_REFCNT
options IEEE80211_SUPPORT_MESH #enable 802.11s D3.0 support
options IEEE80211_SUPPORT_TDMA #enable TDMA support
-# The `wlan_wep', `wlan_tkip', and `wlan_ccmp' devices provide
-# support for WEP, TKIP, AES-CCMP and AES-GCMP crypto protocols optionally
-# used with 802.11 devices that depend on the `wlan' module.
+# The `wlan_wep', `wlan_tkip', `wlan_ccmp', and `wlan_gcmp' devices provide
+# support for WEP, TKIP, AES-CCMP and AES-GCMP crypto protocols optionally used
+# with 802.11 devices that depend on the `wlan' module.
device wlan_wep
+device wlan_tkip
device wlan_ccmp
device wlan_gcmp
-device wlan_tkip
# The `wlan_xauth' device provides support for external (i.e. user-mode)
# authenticators for use with 802.11 drivers that use the `wlan'
@@ -1249,7 +1250,7 @@ options MAC
options MAC_BIBA
options MAC_BSDEXTENDED
options MAC_DDB
-options MAC_DO
+options MAC_DO
options MAC_IFOFF
options MAC_IPACL
options MAC_LOMAC
@@ -2436,7 +2437,7 @@ options HID_DEBUG # enable debug msgs
device hidbus # HID bus
device hidmap # HID to evdev mapping
device hidraw # Raw access driver
-options HIDRAW_MAKE_UHID_ALIAS # install /dev/uhid alias
+options HIDRAW_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/hidraw
device hconf # Multitouch configuration TLC
device hcons # Consumer controls
device hgame # Generic game controllers
@@ -2446,6 +2447,9 @@ device hmt # HID multitouch (MS-compatible)
device hpen # Generic pen driver
device hsctrl # System controls
device ps4dshock # Sony PS4 DualShock 4 gamepad driver
+device u2f # FIDO/U2F authenticator
+options U2F_DROP_UHID_ALIAS # Do not install /dev/uhid alias for
+ # /dev/u2f/ and rename driver from uhid to u2f
device xb360gp # XBox 360 gamepad driver
#####################################################################
diff --git a/sys/conf/dtb.build.mk b/sys/conf/dtb.build.mk
index 327d69106244..7eb0db5e8b80 100644
--- a/sys/conf/dtb.build.mk
+++ b/sys/conf/dtb.build.mk
@@ -1,7 +1,3 @@
-
-.include <bsd.init.mk>
-# Grab all the options for a kernel build. For backwards compat, we need to
-# do this after bsd.own.mk.
.include "kern.opts.mk"
DTC?= dtc
diff --git a/sys/conf/files b/sys/conf/files
index be65ed20d6aa..d9730e6bf55b 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1393,6 +1393,8 @@ dev/cxgbe/t4_smt.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_l2t.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
+dev/cxgbe/t4_tpt.c optional cxgbe pci \
+ compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_tracer.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_vf.c optional cxgbev pci \
@@ -1403,6 +1405,8 @@ dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/crypto/t6_kern_tls.c optional cxgbe pci kern_tls \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
+dev/cxgbe/crypto/t7_kern_tls.c optional cxgbe pci kern_tls \
+ compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/crypto/t4_keyctx.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_common.c optional cxgbe \
@@ -1519,6 +1523,30 @@ t6fw.fw optional cxgbe \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t6fw.fw"
+t7fw_cfg.c optional cxgbe \
+ compile-with "${AWK} -f $S/tools/fw_stub.awk t7fw_cfg.fw:t7fw_cfg t7fw_cfg_uwire.fw:t7fw_cfg_uwire -mt7fw_cfg -c${.TARGET}" \
+ no-ctfconvert no-implicit-rule before-depend local \
+ clean "t7fw_cfg.c"
+t7fw_cfg.fwo optional cxgbe \
+ dependency "t7fw_cfg.fw" \
+ compile-with "${NORMAL_FWO}" \
+ no-implicit-rule \
+ clean "t7fw_cfg.fwo"
+t7fw_cfg.fw optional cxgbe \
+ dependency "$S/dev/cxgbe/firmware/t7fw_cfg.txt" \
+ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
+ no-obj no-implicit-rule \
+ clean "t7fw_cfg.fw"
+t7fw_cfg_uwire.fwo optional cxgbe \
+ dependency "t7fw_cfg_uwire.fw" \
+ compile-with "${NORMAL_FWO}" \
+ no-implicit-rule \
+ clean "t7fw_cfg_uwire.fwo"
+t7fw_cfg_uwire.fw optional cxgbe \
+ dependency "$S/dev/cxgbe/firmware/t7fw_cfg_uwire.txt" \
+ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
+ no-obj no-implicit-rule \
+ clean "t7fw_cfg_uwire.fw"
dev/cxgbe/crypto/t4_crypto.c optional ccr \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cyapa/cyapa.c optional cyapa iicbus
@@ -1750,6 +1778,7 @@ dev/hid/hpen.c optional hpen
dev/hid/hsctrl.c optional hsctrl
dev/hid/ietp.c optional ietp
dev/hid/ps4dshock.c optional ps4dshock
+dev/hid/u2f.c optional u2f
dev/hid/xb360gp.c optional xb360gp
dev/hifn/hifn7751.c optional hifn
dev/hptiop/hptiop.c optional hptiop scbus
@@ -2280,6 +2309,8 @@ dev/ixgbe/ixgbe_x540.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_x550.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
+dev/ixgbe/ixgbe_e610.c optional ix inet | ixv inet \
+ compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb_82598.c optional ix inet | ixv inet \
@@ -3448,7 +3479,6 @@ dev/virtio/mmio/virtio_mmio.c optional virtio_mmio
dev/virtio/mmio/virtio_mmio_acpi.c optional virtio_mmio acpi
dev/virtio/mmio/virtio_mmio_cmdline.c optional virtio_mmio
dev/virtio/mmio/virtio_mmio_fdt.c optional virtio_mmio fdt
-dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio
dev/virtio/network/if_vtnet.c optional vtnet
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
dev/virtio/block/virtio_blk.c optional virtio_blk
@@ -3806,6 +3836,7 @@ kern/kern_hhook.c standard
kern/kern_idle.c standard
kern/kern_intr.c standard
kern/kern_jail.c standard
+kern/kern_jaildesc.c standard
kern/kern_jailmeta.c standard
kern/kern_kcov.c optional kcov \
compile-with "${NOSAN_C} ${MSAN_CFLAGS}"
@@ -4366,15 +4397,23 @@ netinet/cc/cc.c optional cc_newreno inet | cc_vegas inet | \
cc_chd inet | cc_cdg inet | cc_newreno inet6 | cc_vegas inet6 | \
cc_htcp inet6 | cc_hd inet6 |cc_dctcp inet6 | cc_cubic inet6 | \
cc_chd inet6 | cc_cdg inet6
-netinet/cc/cc_cdg.c optional inet cc_cdg tcp_hhook
-netinet/cc/cc_chd.c optional inet cc_chd tcp_hhook
+netinet/cc/cc_cdg.c optional inet cc_cdg tcp_hhook | \
+ inet6 cc_cdg tcp_hhook
+netinet/cc/cc_chd.c optional inet cc_chd tcp_hhook | \
+ inet6 cc_chd tcp_hhook
netinet/cc/cc_cubic.c optional inet cc_cubic | inet6 cc_cubic
netinet/cc/cc_dctcp.c optional inet cc_dctcp | inet6 cc_dctcp
-netinet/cc/cc_hd.c optional inet cc_hd tcp_hhook
+netinet/cc/cc_hd.c optional inet cc_hd tcp_hhook | \
+ inet6 cc_hd tcp_hhook
netinet/cc/cc_htcp.c optional inet cc_htcp | inet6 cc_htcp
netinet/cc/cc_newreno.c optional inet cc_newreno | inet6 cc_newreno
-netinet/cc/cc_vegas.c optional inet cc_vegas tcp_hhook
-netinet/khelp/h_ertt.c optional inet tcp_hhook
+netinet/cc/cc_vegas.c optional inet cc_vegas tcp_hhook | \
+ inet6 cc_vegas tcp_hhook
+netinet/khelp/h_ertt.c optional inet tcp_hhook cc_cdg | \
+ inet tcp_hhook cc_chd | inet tcp_hhook cc_hd | \
+ inet tcp_hhook cc_vegas | inet6 tcp_hhook cc_cdg | \
+ inet6 tcp_hhook cc_chd | inet6 tcp_hhook cc_hd | \
+ inet6 tcp_hhook cc_vegas
netinet/sctp_asconf.c optional inet sctp | inet6 sctp
netinet/sctp_auth.c optional inet sctp | inet6 sctp
netinet/sctp_bsd_addr.c optional inet sctp | inet6 sctp
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 80548320c3fc..a342242ac66e 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -13,14 +13,14 @@ include "conf/files.x86"
#
elf-vdso.so.o standard \
dependency "$S/amd64/amd64/sigtramp.S assym.inc $S/conf/vdso_amd64.ldscript $S/tools/amd64_vdso.sh" \
- compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' DEBUG='${DEBUG}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_vdso.sh" \
+ compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_vdso.sh" \
no-ctfconvert \
no-implicit-rule before-depend \
clean "elf-vdso.so.o elf-vdso.so.1 vdso_offsets.h sigtramp.pico"
#
elf-vdso32.so.o optional compat_freebsd32 \
dependency "$S/amd64/ia32/ia32_sigtramp.S ia32_assym.h $S/conf/vdso_amd64_ia32.ldscript $S/tools/amd64_ia32_vdso.sh" \
- compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' DEBUG='${DEBUG}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_ia32_vdso.sh" \
+ compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_ia32_vdso.sh" \
no-ctfconvert \
no-implicit-rule before-depend \
clean "elf-vdso32.so.o elf-vdso32.so.1 vdso_ia32_offsets.h ia32_sigtramp.pico"
@@ -107,7 +107,8 @@ crypto/openssl/amd64/poly1305-x86_64.S optional ossl
crypto/openssl/amd64/sha1-x86_64.S optional ossl
crypto/openssl/amd64/sha256-x86_64.S optional ossl
crypto/openssl/amd64/sha512-x86_64.S optional ossl
-crypto/openssl/amd64/ossl_aes_gcm.c optional ossl
+crypto/openssl/amd64/ossl_aes_gcm_avx512.c optional ossl
+crypto/openssl/ossl_aes_gcm.c optional ossl
dev/amdgpio/amdgpio.c optional amdgpio
dev/axgbe/if_axgbe_pci.c optional axp
dev/axgbe/xgbe-desc.c optional axp
@@ -419,6 +420,9 @@ contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_avx512.S optional zfs com
contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_sse2.S optional zfs compile-with "${ZFS_S}"
contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_sse41.S optional zfs compile-with "${ZFS_S}"
+# zfs AVX2 implementation of aes-gcm from BoringSSL
+contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S optional zfs compile-with "${ZFS_S}"
+
# zfs sha2 hash support
zfs-sha256-x86_64.o optional zfs \
dependency "$S/contrib/openzfs/module/icp/asm-x86_64/sha2/sha256-x86_64.S" \
diff --git a/sys/conf/files.arm b/sys/conf/files.arm
index 91b01845519e..880e804b6c95 100644
--- a/sys/conf/files.arm
+++ b/sys/conf/files.arm
@@ -132,7 +132,7 @@ libkern/udivdi3.c standard
libkern/umoddi3.c standard
crypto/openssl/ossl_arm.c optional ossl
-crypto/openssl/arm/ossl_aes_gcm.c optional ossl
+crypto/openssl/arm/ossl_aes_gcm_neon.c optional ossl
crypto/openssl/arm/aes-armv4.S optional ossl \
compile-with "${NORMAL_C} -I${SRCTOP}/sys/crypto/openssl"
crypto/openssl/arm/bsaes-armv7.S optional ossl \
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 641001efab5e..2f412fa3cb1b 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -73,6 +73,7 @@ arm64/arm64/pmap.c standard
arm64/arm64/ptrace_machdep.c standard
arm64/arm64/sdt_machdep.c optional kdtrace_hooks
arm64/arm64/sigtramp.S standard
+arm64/arm64/spec_workaround.c standard
arm64/arm64/stack_machdep.c optional ddb | stack
arm64/arm64/strcmp.S standard
arm64/arm64/strncmp.S standard
@@ -127,9 +128,11 @@ arm64/vmm/vmm_reset.c optional vmm
arm64/vmm/vmm_handlers.c optional vmm
arm64/vmm/vmm_call.S optional vmm
arm64/vmm/vmm_nvhe_exception.S optional vmm \
+ dependency "$S/arm64/vmm/vmm_hyp_exception.S" \
compile-with "${NOSAN_C} -fpie" \
no-obj
arm64/vmm/vmm_nvhe.c optional vmm \
+ dependency "$S/arm64/vmm/vmm_hyp.c" \
compile-with "${NOSAN_C} -fpie" \
no-obj
vmm_hyp_blob.elf.full optional vmm \
@@ -178,6 +181,8 @@ crypto/des/des_enc.c optional netsmb
crypto/openssl/ossl_aarch64.c optional ossl
crypto/openssl/aarch64/chacha-armv8.S optional ossl \
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}"
+crypto/openssl/aarch64/chacha-armv8-sve.S optional ossl \
+ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}"
crypto/openssl/aarch64/poly1305-armv8.S optional ossl \
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}"
crypto/openssl/aarch64/sha1-armv8.S optional ossl \
diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc
index d2c3aa260cd9..0deada385f31 100644
--- a/sys/conf/files.powerpc
+++ b/sys/conf/files.powerpc
@@ -31,9 +31,11 @@ zfs-sha512-ppc.o optional zfs \
# openssl ppc common files
crypto/openssl/ossl_ppc.c optional ossl powerpc64 | ossl powerpc64le
+crypto/openssl/ossl_aes_gcm.c optional ossl powerpc64 | ossl powerpc64le
# openssl assembly files (powerpc64le)
crypto/openssl/powerpc64le/aes-ppc.S optional ossl powerpc64le
+crypto/openssl/powerpc64le/aes-gcm-ppc.S optional ossl powerpc64le
crypto/openssl/powerpc64le/aesp8-ppc.S optional ossl powerpc64le
crypto/openssl/powerpc64le/chacha-ppc.S optional ossl powerpc64le
crypto/openssl/powerpc64le/ecp_nistz256-ppc64.S optional ossl powerpc64le
@@ -54,6 +56,7 @@ crypto/openssl/powerpc64le/x25519-ppc64.S optional ossl powerpc64le
# openssl assembly files (powerpc64)
crypto/openssl/powerpc64/aes-ppc.S optional ossl powerpc64
+crypto/openssl/powerpc64/aes-gcm-ppc.S optional ossl powerpc64
crypto/openssl/powerpc64/aesp8-ppc.S optional ossl powerpc64
crypto/openssl/powerpc64/chacha-ppc.S optional ossl powerpc64
crypto/openssl/powerpc64/ecp_nistz256-ppc64.S optional ossl powerpc64
diff --git a/sys/conf/files.x86 b/sys/conf/files.x86
index 9976e9cfec5d..953da7dd1284 100644
--- a/sys/conf/files.x86
+++ b/sys/conf/files.x86
@@ -146,6 +146,7 @@ dev/hyperv/vmbus/vmbus_et.c optional hyperv
dev/hyperv/vmbus/vmbus_if.m optional hyperv
dev/hyperv/vmbus/vmbus_res.c optional hyperv
dev/hyperv/vmbus/vmbus_xact.c optional hyperv
+dev/ichwd/i6300esbwd.c optional ichwd
dev/ichwd/ichwd.c optional ichwd
dev/imcsmb/imcsmb.c optional imcsmb
dev/imcsmb/imcsmb_pci.c optional imcsmb pci
diff --git a/sys/conf/kern.opts.mk b/sys/conf/kern.opts.mk
index 045e55d1b19a..cef4dd11ba58 100644
--- a/sys/conf/kern.opts.mk
+++ b/sys/conf/kern.opts.mk
@@ -4,6 +4,7 @@
# parts to omit (eg CDDL or SOURCELESS_HOST). Some of these will cause
# config.mk to define symbols in various opt_*.h files.
+
#
# Define MK_* variables (which are either "yes" or "no") for users
# to set via WITH_*/WITHOUT_* in /etc/src.conf and override in the
@@ -13,17 +14,12 @@
# that haven't been converted over.
#
-# Note: bsd.own.mk must be included before the rest of kern.opts.mk to make
-# building on 10.x and earlier work. This should be removed when that's no
-# longer supported since it confounds the defaults (since it uses the host's
-# notion of defaults rather than what's default in current when building
-# within sys/modules).
-.include <bsd.own.mk>
-
# These options are used by the kernel build process (kern.mk and kmod.mk)
# They have to be listed here so we can build modules outside of the
# src tree.
+.include <bsd.init.mk>
+
KLDXREF_CMD?= kldxref
__DEFAULT_YES_OPTIONS = \
diff --git a/sys/conf/kern.post.mk b/sys/conf/kern.post.mk
index 59e51c394a35..7cdfd17778db 100644
--- a/sys/conf/kern.post.mk
+++ b/sys/conf/kern.post.mk
@@ -124,7 +124,10 @@ PORTSMODULESENV=\
all:
.for __i in ${PORTS_MODULES}
@${ECHO} "===> Ports module ${__i} (all)"
- cd ${PORTSDIR:U/usr/ports}/${__i}; ${PORTSMODULESENV} ${MAKE} -B clean build
+ port=${__i}; flavor=$${port#*@}; port=$${port%@*}; flavor=$${flavor%$${port}}; \
+ cd ${PORTSDIR:U/usr/ports}/$${port}; \
+ ${PORTSMODULESENV} ${MAKE} -B $${flavor:+FLAVOR=}$${flavor} \
+ clean build
.endfor
.for __target in install reinstall clean
@@ -132,7 +135,10 @@ ${__target}: ports-${__target}
ports-${__target}:
.for __i in ${PORTS_MODULES}
@${ECHO} "===> Ports module ${__i} (${__target})"
- cd ${PORTSDIR:U/usr/ports}/${__i}; ${PORTSMODULESENV} ${MAKE} -B ${__target:C/(re)?install/deinstall reinstall/}
+ port=${__i}; flavor=$${port#*@}; port=$${port%@*}; flavor=$${flavor%$${port}}; \
+ cd ${PORTSDIR:U/usr/ports}/$${port}; \
+ ${PORTSMODULESENV} ${MAKE} -B $${flavor:+FLAVOR=}$${flavor} \
+ ${__target:C/(re)?install/deinstall reinstall/}
.endfor
.endfor
.endif
@@ -366,6 +372,19 @@ _ILINKS+= x86
_ILINKS+= i386
.endif
+.if ${MK_REPRODUCIBLE_BUILD} != "no"
+PREFIX_SYSDIR=/usr/src/sys
+PREFIX_OBJDIR=/usr/obj/usr/src/${MACHINE}.${MACHINE_CPUARCH}/sys/${KERN_IDENT}
+CFLAGS+= -ffile-prefix-map=${SYSDIR}=${PREFIX_SYSDIR}
+CFLAGS+= -ffile-prefix-map=${.OBJDIR}=${PREFIX_OBJDIR}
+.if defined(SYSROOT)
+CFLAGS+= -ffile-prefix-map=${SYSROOT}=/sysroot
+.endif
+.else
+PREFIX_SYSDIR=${SYSDIR}
+PREFIX_OBJDIR=${.OBJDIR}
+.endif
+
# Ensure that the link exists without depending on it when it exists.
# Ensure that debug info references the path in the source tree.
.for _link in ${_ILINKS}
@@ -373,12 +392,20 @@ _ILINKS+= i386
${SRCS} ${DEPENDOBJS}: ${_link}
.endif
.if ${_link} == "machine"
-CFLAGS+= -fdebug-prefix-map=./machine=${SYSDIR}/${MACHINE}/include
+CFLAGS+= -fdebug-prefix-map=./machine=${PREFIX_SYSDIR}/${MACHINE}/include
.else
-CFLAGS+= -fdebug-prefix-map=./${_link}=${SYSDIR}/${_link}/include
+CFLAGS+= -fdebug-prefix-map=./${_link}=${PREFIX_SYSDIR}/${_link}/include
.endif
.endfor
+# Install GDB plugins that are useful for kernel debugging. See the
+# README in sys/tools/gdb for more information.
+GDB_FILES= acttrace.py \
+ freebsd.py \
+ pcpu.py \
+ selftest.py \
+ vnet.py
+
${_ILINKS}:
@case ${.TARGET} in \
machine) \
@@ -428,6 +455,13 @@ kernel-install: .PHONY
.if defined(DEBUG) && !defined(INSTALL_NODEBUG) && ${MK_KERNEL_SYMBOLS} != "no"
mkdir -p ${DESTDIR}${KERN_DEBUGDIR}${KODIR}
${INSTALL} -p -m ${KMODMODE} -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_KO}.debug ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/
+ ${INSTALL} -m ${KMODMODE} -o ${KMODOWN} -g ${KMODGRP} \
+ $S/tools/kernel-gdb.py ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/${KERNEL_KO}-gdb.py
+ mkdir -p ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/gdb
+.for file in ${GDB_FILES}
+ ${INSTALL} -m ${KMODMODE} -o ${KMODOWN} -g ${KMODGRP} \
+ $S/tools/gdb/${file} ${DESTDIR}${KERN_DEBUGDIR}${KODIR}/gdb/${file}
+.endfor
.endif
.if defined(KERNEL_EXTRA_INSTALL)
${INSTALL} -p -m ${KMODMODE} -o ${KMODOWN} -g ${KMODGRP} ${KERNEL_EXTRA_INSTALL} ${DESTDIR}${KODIR}/
@@ -448,7 +482,7 @@ config.o env.o hints.o vers.o vnode_if.o:
NEWVERS_ENV+= MAKE="${MAKE}"
.if ${MK_REPRODUCIBLE_BUILD} != "no"
-NEWVERS_ARGS+= -R
+NEWVERS_ARGS+= -R -d ${PREFIX_OBJDIR}
.endif
vers.c: .NOMETA_CMP $S/conf/newvers.sh $S/sys/param.h ${SYSTEM_DEP:Nvers.*}
${NEWVERS_ENV} sh $S/conf/newvers.sh ${NEWVERS_ARGS} ${KERN_IDENT}
diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk
index 78178065e15b..0251486247da 100644
--- a/sys/conf/kern.pre.mk
+++ b/sys/conf/kern.pre.mk
@@ -8,7 +8,11 @@
# the rest of /usr/src, but they still always process SRCCONF even though
# the normal mechanisms to prevent that (compiling out of tree) won't
# work. To ensure they do work, we have to duplicate thee few lines here.
+.if exists(${SRCTOP}/src.conf)
+SRCCONF?= ${SRCTOP}/src.conf
+.else
SRCCONF?= /etc/src.conf
+.endif
.if (exists(${SRCCONF}) || ${SRCCONF} != "/etc/src.conf") && !target(_srcconf_included_)
.include "${SRCCONF}"
_srcconf_included_:
@@ -214,7 +218,8 @@ ZFS_CFLAGS+= -I$S/contrib/openzfs/module/icp/include \
.if ${MACHINE_ARCH} == "amd64"
ZFS_CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
- -DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW
+ -DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW \
+ -DHAVE_VAES -DHAVE_VPCLMULQDQ
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
diff --git a/sys/conf/kmod.mk b/sys/conf/kmod.mk
index 645c04cdd135..0fd2d4050cf1 100644
--- a/sys/conf/kmod.mk
+++ b/sys/conf/kmod.mk
@@ -303,6 +303,25 @@ all: ${PROG}
beforedepend: ${_ILINKS}
beforebuild: ${_ILINKS}
+.if ${MK_REPRODUCIBLE_BUILD} != "no"
+PREFIX_SYSDIR=/usr/src/sys
+CFLAGS+= -ffile-prefix-map=${SYSDIR}=${PREFIX_SYSDIR}
+.if defined(KERNBUILDDIR)
+PREFIX_KERNBUILDDIR=/usr/obj/usr/src/${MACHINE}.${MACHINE_CPUARCH}/sys/${KERNBUILDDIR:T}
+PREFIX_OBJDIR=${PREFIX_KERNBUILDDIR}/modules/usr/src/sys/modules/${.OBJDIR:T}
+CFLAGS+= -ffile-prefix-map=${KERNBUILDDIR}=${PREFIX_KERNBUILDDIR}
+.else
+PREFIX_OBJDIR=/usr/obj/usr/src/${MACHINE}.${MACHINE_CPUARCH}/sys/modules/${.OBJDIR:T}
+.endif
+CFLAGS+= -ffile-prefix-map=${.OBJDIR}=${PREFIX_OBJDIR}
+.if defined(SYSROOT)
+CFLAGS+= -ffile-prefix-map=${SYSROOT}=/sysroot
+.endif
+.else
+PREFIX_SYSDIR=${SYSDIR}
+PREFIX_OBJDIR=${.OBJDIR}
+.endif
+
# Ensure that the links exist without depending on it when it exists which
# causes all the modules to be rebuilt when the directory pointed to changes.
# Ensure that debug info references the path in the source tree.
@@ -311,9 +330,9 @@ beforebuild: ${_ILINKS}
OBJS_DEPEND_GUESS+= ${_link}
.endif
.if ${_link} == "machine"
-CFLAGS+= -fdebug-prefix-map=./machine=${SYSDIR}/${MACHINE}/include
+CFLAGS+= -fdebug-prefix-map=./machine=${PREFIX_SYSDIR}/${MACHINE}/include
.else
-CFLAGS+= -fdebug-prefix-map=./${_link}=${SYSDIR}/${_link}/include
+CFLAGS+= -fdebug-prefix-map=./${_link}=${PREFIX_SYSDIR}/${_link}/include
.endif
.endfor
diff --git a/sys/conf/newvers.sh b/sys/conf/newvers.sh
index 66926805052c..145377c1e75e 100644
--- a/sys/conf/newvers.sh
+++ b/sys/conf/newvers.sh
@@ -50,8 +50,8 @@
#
TYPE="FreeBSD"
-REVISION="15.0"
-BRANCH="PRERELEASE"
+REVISION="16.0"
+BRANCH="CURRENT"
if [ -n "${BRANCH_OVERRIDE}" ]; then
BRANCH=${BRANCH_OVERRIDE}
fi
@@ -110,14 +110,18 @@ COPYRIGHT="$COPYRIGHT
# We expand include_metadata later since we may set it to the
# future value of modified.
+builddir=$(pwd)
include_metadata=yes
modified=no
-while getopts crRvV: opt; do
+while getopts cd:rRvV: opt; do
case "$opt" in
c)
echo "$COPYRIGHT"
exit 0
;;
+ d)
+ builddir=$OPTARG
+ ;;
r)
include_metadata=no
;;
@@ -187,7 +191,7 @@ fi
touch version
v=$(cat version)
u=${USER:-root}
-d=$(pwd)
+d=$builddir
h=${HOSTNAME:-$(hostname)}
if [ -n "$SOURCE_DATE_EPOCH" ]; then
if ! t=$(date -ur $SOURCE_DATE_EPOCH 2>/dev/null); then
diff --git a/sys/conf/options b/sys/conf/options
index a637b0b74a77..0b795a8d28fb 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -53,7 +53,7 @@ DDB_CAPTURE_MAXBUFSIZE opt_ddb.h
DDB_CTF opt_ddb.h
DDB_NUMSYM opt_ddb.h
EARLY_PRINTF opt_global.h
-BLOAT_KERNEL_WITH_EXTERR opt_global.h
+EXTERR_STRINGS opt_global.h
FULL_BUF_TRACKING opt_global.h
GDB
KDB opt_global.h
@@ -231,15 +231,11 @@ SYSVSEM opt_sysvipc.h
SYSVSHM opt_sysvipc.h
SW_WATCHDOG opt_watchdog.h
TCPHPTS
+TCP_HPTS_KTEST opt_inet.h
TCP_REQUEST_TRK opt_global.h
TCP_ACCOUNTING opt_global.h
TCP_BBR opt_inet.h
TCP_RACK opt_inet.h
-#
-# TCP SaD Detection is an experimental Sack attack Detection (SaD)
-# algorithm that uses "normal" behaviour with SACK's to detect
-# a possible attack. It is strictly experimental at this point.
-#
TURNSTILE_PROFILING
UMTX_PROFILING
UMTX_CHAINS opt_global.h
@@ -1009,6 +1005,7 @@ IICHID_DEBUG opt_hid.h
IICHID_SAMPLING opt_hid.h
HKBD_DFLT_KEYMAP opt_hkbd.h
HIDRAW_MAKE_UHID_ALIAS opt_hid.h
+U2F_DROP_UHID_ALIAS opt_hid.h
# kenv options
# The early kernel environment (loader environment, config(8)-provided static)
diff --git a/sys/conf/std.debug b/sys/conf/std.debug
index f5ed5582c78d..0149779b3e5c 100644
--- a/sys/conf/std.debug
+++ b/sys/conf/std.debug
@@ -16,3 +16,4 @@ options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones
options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default
options ALT_BREAK_TO_DEBUGGER # Enter debugger on keyboard escape sequence
options KDTRACE_MIB_SDT # Add SDT probes to network counters
+options TCP_HPTS_KTEST # Add KTEST support for HPTS
diff --git a/sys/conf/std.nodebug b/sys/conf/std.nodebug
index 4035e28d2a62..79676a1d618f 100644
--- a/sys/conf/std.nodebug
+++ b/sys/conf/std.nodebug
@@ -16,6 +16,7 @@ nooptions KCOV
nooptions MALLOC_DEBUG_MAXZONES
nooptions QUEUE_MACRO_DEBUG_TRASH
nooptions KDTRACE_MIB_SDT
+nooptions TCP_HPTS_KTEST
# Net80211 debugging
nooptions IEEE80211_DEBUG
diff --git a/sys/contrib/dev/acpica/changes.txt b/sys/contrib/dev/acpica/changes.txt
index 435540b254f1..4e3cf4f2f41c 100644
--- a/sys/contrib/dev/acpica/changes.txt
+++ b/sys/contrib/dev/acpica/changes.txt
@@ -1,11 +1,29 @@
----------------------------------------
+7 August 2025. Summary of changes for version 20250807:
+
+Major changes:
+
+Added option to skip the global lock for SMM - Huacai Chen
+
+Fixed non-NUL terminated string implementations - Ahmed Salem
+
+Fixed CCEL and CDAT templates - Ahmed Salem
+
+Fixed a major Linux kernel bug (UAF) that was triggered by unequal number of method parameters (definition) vs arguments (invocation) in different places - Peter Williams, Hans de Goede, Rafael Wysocki
+
+Define distinct D3 states (D3Hot and D3Cold) that help clarify the device behavior support - Aymeric Wibo
+
+A few cleanups, improvements to existing table supports, small fixes, spelling corrections etc.
+
+
+----------------------------------------
4 April 2025. Summary of changes for version 20250404:
Major changes:
Update all the copyright continuation year to 2025 in the license header of all files
-Add complete support for 3 new ACPI tables ? MRRM,ERDT and RIMT (Tony Luck & V L Sunil)
+Add complete support for 3 new ACPI tables - MRRM,ERDT and RIMT (Tony Luck & V L Sunil)
Add a license file to the project which is a great improvement (Dionna Glaze)
@@ -21,11 +39,11 @@ Major changes:
Fix 2 critical CVE addressing memory leaks - Seunghun Han
-EINJ V2 updates ? Zaid Alali (Ampere Computing)
+EINJ V2 updates - Zaid Alali (Ampere Computing)
-CDAT updates ? Ira Weiny (Intel Corporation)
+CDAT updates - Ira Weiny (Intel Corporation)
-Fix mutex handling, don?t release ones that were never acquired ? Daniil Tatianin
+Fix mutex handling, do not release ones that were never acquired - Daniil Tatianin
Experiment with new tag name format Ryyyy_mm_dd to solve chronological sorting problems
@@ -39,7 +57,7 @@ Fix the acpixf.h file which caused issues for the last release (before this) 202
Fix the pointer offset for the SLIC table
-Verify the local environment and GitHub commits are all in sync which was a problem with the second from last release (before this)20240322 (aka 20240323 – date issue)
+Verify the local environment and GitHub commits are all in sync which was a problem with the second from last release (before this)20240322 (aka 20240323 - date issue)
diff --git a/sys/contrib/dev/acpica/common/adisasm.c b/sys/contrib/dev/acpica/common/adisasm.c
index 96cd6c7f5d3c..83125098cbd1 100644
--- a/sys/contrib/dev/acpica/common/adisasm.c
+++ b/sys/contrib/dev/acpica/common/adisasm.c
@@ -481,12 +481,12 @@ AdDisassembleOneTable (
"FieldName : FieldValue (in hex)\n */\n\n");
AcpiDmDumpDataTable (Table);
- fprintf (stderr, "Acpi Data Table [%4.4s] decoded\n",
+ fprintf (stdout, "Acpi Data Table [%4.4s] decoded\n",
AcpiGbl_CDAT ? (char *) AcpiGbl_CDAT : Table->Signature);
if (File)
{
- fprintf (stderr, "Formatted output: %s - %u bytes\n",
+ fprintf (stdout, "Formatted output: %s - %u bytes\n",
DisasmFilename, CmGetFileSize (File));
}
@@ -584,16 +584,16 @@ AdDisassembleOneTable (
AcpiDmDumpDataTable (Table);
- fprintf (stderr, "Disassembly completed\n");
+ fprintf (stdout, "Disassembly completed\n");
if (File)
{
- fprintf (stderr, "ASL Output: %s - %u bytes\n",
+ fprintf (stdout, "ASL Output: %s - %u bytes\n",
DisasmFilename, CmGetFileSize (File));
}
if (AslGbl_MapfileFlag)
{
- fprintf (stderr, "%14s %s - %u bytes\n",
+ fprintf (stdout, "%14s %s - %u bytes\n",
AslGbl_FileDescs[ASL_FILE_MAP_OUTPUT].ShortDescription,
AslGbl_Files[ASL_FILE_MAP_OUTPUT].Filename,
FlGetFileSize (ASL_FILE_MAP_OUTPUT));
@@ -630,7 +630,7 @@ AdReparseOneTable (
ACPI_COMMENT_ADDR_NODE *AddrListHead;
- fprintf (stderr,
+ fprintf (stdout,
"\nFound %u external control methods, "
"reparsing with new information\n",
AcpiDmGetUnresolvedExternalMethodCount ());
diff --git a/sys/contrib/dev/acpica/common/ahtable.c b/sys/contrib/dev/acpica/common/ahtable.c
index 898b2d09f609..587bf61016f0 100644
--- a/sys/contrib/dev/acpica/common/ahtable.c
+++ b/sys/contrib/dev/acpica/common/ahtable.c
@@ -265,6 +265,7 @@ const AH_TABLE AcpiGbl_SupportedTables[] =
{ACPI_SIG_SSDT, "Secondary System Description Table (AML table)"},
{ACPI_SIG_STAO, "Status Override Table"},
{ACPI_SIG_SVKL, "Storage Volume Key Location Table"},
+ {ACPI_SIG_SWFT, "SoundWire File Table"},
{ACPI_SIG_TCPA, "Trusted Computing Platform Alliance Table"},
{ACPI_SIG_TDEL, "TD-Event Log Table"},
{ACPI_SIG_TPM2, "Trusted Platform Module hardware interface Table"},
diff --git a/sys/contrib/dev/acpica/common/dmtable.c b/sys/contrib/dev/acpica/common/dmtable.c
index fcff97a304ae..702f4f7965e4 100644
--- a/sys/contrib/dev/acpica/common/dmtable.c
+++ b/sys/contrib/dev/acpica/common/dmtable.c
@@ -721,6 +721,7 @@ const ACPI_DMTABLE_DATA AcpiDmTableData[] =
{ACPI_SIG_SRAT, NULL, AcpiDmDumpSrat, DtCompileSrat, TemplateSrat},
{ACPI_SIG_STAO, NULL, AcpiDmDumpStao, DtCompileStao, TemplateStao},
{ACPI_SIG_SVKL, AcpiDmTableInfoSvkl, AcpiDmDumpSvkl, DtCompileSvkl, TemplateSvkl},
+ {ACPI_SIG_SWFT, NULL, NULL, NULL, NULL},
{ACPI_SIG_TCPA, NULL, AcpiDmDumpTcpa, DtCompileTcpa, TemplateTcpa},
{ACPI_SIG_TDEL, AcpiDmTableInfoTdel, NULL, NULL, TemplateTdel},
{ACPI_SIG_TPM2, AcpiDmTableInfoTpm2, AcpiDmDumpTpm2, DtCompileTpm2, TemplateTpm2},
diff --git a/sys/contrib/dev/acpica/common/dmtbdump2.c b/sys/contrib/dev/acpica/common/dmtbdump2.c
index 822920d2ea94..d29a60be0f67 100644
--- a/sys/contrib/dev/acpica/common/dmtbdump2.c
+++ b/sys/contrib/dev/acpica/common/dmtbdump2.c
@@ -2637,7 +2637,7 @@ AcpiDmDumpRhct (
RhctIsaString, RhctIsaString->IsaLength, AcpiDmTableInfoRhctIsa1);
if (Subtable->Length > IsaPadOffset)
{
- Status = AcpiDmDumpTable (Table->Length, Offset + SubtableOffset,
+ Status = AcpiDmDumpTable (Table->Length, Offset + IsaPadOffset,
ACPI_ADD_PTR (UINT8, Subtable, IsaPadOffset),
(Subtable->Length - IsaPadOffset), AcpiDmTableInfoRhctIsaPad);
}
diff --git a/sys/contrib/dev/acpica/common/dmtbinfo2.c b/sys/contrib/dev/acpica/common/dmtbinfo2.c
index 9ecf877fcfb0..b7c6d3b8d536 100644
--- a/sys/contrib/dev/acpica/common/dmtbinfo2.c
+++ b/sys/contrib/dev/acpica/common/dmtbinfo2.c
@@ -2180,7 +2180,7 @@ ACPI_DMTABLE_INFO AcpiDmTableInfoRhct[] =
ACPI_DMTABLE_INFO AcpiDmTableInfoRhctNodeHdr[] =
{
{ACPI_DMT_RHCT, ACPI_RHCTH_OFFSET (Type), "Subtable Type", 0},
- {ACPI_DMT_UINT16, ACPI_RHCTH_OFFSET (Length), "Length", 0},
+ {ACPI_DMT_UINT16, ACPI_RHCTH_OFFSET (Length), "Length", DT_LENGTH},
{ACPI_DMT_UINT16, ACPI_RHCTH_OFFSET (Revision), "Revision", 0},
ACPI_DMT_TERMINATOR
};
diff --git a/sys/contrib/dev/acpica/common/dmtbinfo3.c b/sys/contrib/dev/acpica/common/dmtbinfo3.c
index 75b580e0d890..0935fc86aff9 100644
--- a/sys/contrib/dev/acpica/common/dmtbinfo3.c
+++ b/sys/contrib/dev/acpica/common/dmtbinfo3.c
@@ -200,7 +200,7 @@ ACPI_DMTABLE_INFO AcpiDmTableInfoCcel[] =
{
{ACPI_DMT_UINT8, ACPI_CCEL_OFFSET (CCType), "CC Type", 0},
{ACPI_DMT_UINT8, ACPI_CCEL_OFFSET (CCSubType), "CC Sub Type", 0},
- {ACPI_DMT_UINT32, ACPI_CCEL_OFFSET (Reserved), "Reserved", 0},
+ {ACPI_DMT_UINT16, ACPI_CCEL_OFFSET (Reserved), "Reserved", 0},
{ACPI_DMT_UINT64, ACPI_CCEL_OFFSET (LogAreaMinimumLength), "Log Area Minimum Length", 0},
{ACPI_DMT_UINT64, ACPI_CCEL_OFFSET (LogAreaStartAddress), "Log Area Start Address", 0},
ACPI_DMT_TERMINATOR
diff --git a/sys/contrib/dev/acpica/compiler/aslanalyze.c b/sys/contrib/dev/acpica/compiler/aslanalyze.c
index 17e2674817a9..625611a630de 100644
--- a/sys/contrib/dev/acpica/compiler/aslanalyze.c
+++ b/sys/contrib/dev/acpica/compiler/aslanalyze.c
@@ -572,10 +572,22 @@ ApCheckForGpeNameConflict (
ACPI_PARSE_OBJECT *NextOp;
UINT32 GpeNumber;
char Name[ACPI_NAMESEG_SIZE + 1];
- char Target[ACPI_NAMESEG_SIZE];
+ char Target[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
- /* Need a null-terminated string version of NameSeg */
+ /**
+ * Need a null-terminated string version of NameSeg
+ *
+ * NOTE: during a review on Name[ACPI_NAMESEG_SIZE + 1] having an extra
+ * byte[1], compiler testing exhibited a difference in behavior between
+ * GCC and Clang[2] (at least; MSVC may also exhibit the same) in
+ * how optimization is done. The extra byte is needed to ensure
+ * the signature does not get mangled, subsequently avoiding
+ * GpeNumber being a completely different return value from strtoul.
+ *
+ * [1] https://github.com/acpica/acpica/pull/1019#discussion_r2058687704
+ * [2] https://github.com/acpica/acpica/pull/1019#discussion_r2061953039
+ */
ACPI_MOVE_32_TO_32 (Name, Op->Asl.NameSeg);
Name[ACPI_NAMESEG_SIZE] = 0;
diff --git a/sys/contrib/dev/acpica/compiler/aslrestype2s.c b/sys/contrib/dev/acpica/compiler/aslrestype2s.c
index 096862290384..f47402d4e025 100644
--- a/sys/contrib/dev/acpica/compiler/aslrestype2s.c
+++ b/sys/contrib/dev/acpica/compiler/aslrestype2s.c
@@ -1469,7 +1469,7 @@ RsDoCsi2SerialBusDescriptor (
case 2: /* Local Port Instance [Integer] (_PRT) */
- RsSetFlagBits16 ((UINT16 *) &Descriptor->Csi2SerialBus.TypeSpecificFlags, InitializerOp, 0, 0);
+ RsSetFlagBits16 ((UINT16 *) &Descriptor->Csi2SerialBus.TypeSpecificFlags, InitializerOp, 2, 0);
RsCreateMultiBitField (InitializerOp, ACPI_RESTAG_LOCALPORT,
CurrentByteOffset + ASL_RESDESC_OFFSET (Csi2SerialBus.TypeSpecificFlags), 2, 6);
break;
diff --git a/sys/contrib/dev/acpica/compiler/dttable2.c b/sys/contrib/dev/acpica/compiler/dttable2.c
index 6203a382ad62..754880346299 100644
--- a/sys/contrib/dev/acpica/compiler/dttable2.c
+++ b/sys/contrib/dev/acpica/compiler/dttable2.c
@@ -1929,24 +1929,30 @@ DtCompileRhct (
{
ACPI_STATUS Status;
ACPI_RHCT_NODE_HEADER *RhctHeader;
- ACPI_RHCT_HART_INFO *RhctHartInfo = NULL;
+ ACPI_RHCT_HART_INFO *RhctHartInfo;
DT_SUBTABLE *Subtable;
DT_SUBTABLE *ParentTable;
ACPI_DMTABLE_INFO *InfoTable;
DT_FIELD **PFieldList = (DT_FIELD **) List;
DT_FIELD *SubtableStart;
+ ACPI_TABLE_RHCT *Table;
+ BOOLEAN FirstNode = TRUE;
/* Compile the main table */
+ ParentTable = DtPeekSubtable ();
Status = DtCompileTable (PFieldList, AcpiDmTableInfoRhct,
&Subtable);
if (ACPI_FAILURE (Status))
{
return (Status);
}
+ DtInsertSubtable (ParentTable, Subtable);
+ Table = ACPI_CAST_PTR (ACPI_TABLE_RHCT, ParentTable->Buffer);
+ Table->NodeCount = 0;
+ Table->NodeOffset = sizeof (ACPI_TABLE_RHCT);
- ParentTable = DtPeekSubtable ();
while (*PFieldList)
{
SubtableStart = *PFieldList;
@@ -1961,7 +1967,10 @@ DtCompileRhct (
}
DtInsertSubtable (ParentTable, Subtable);
RhctHeader = ACPI_CAST_PTR (ACPI_RHCT_NODE_HEADER, Subtable->Buffer);
- RhctHeader->Length = (UINT16)(Subtable->Length);
+
+ DtPushSubtable (Subtable);
+ ParentTable = DtPeekSubtable ();
+ Table->NodeCount++;
switch (RhctHeader->Type)
{
@@ -1999,37 +2008,54 @@ DtCompileRhct (
return (Status);
}
DtInsertSubtable (ParentTable, Subtable);
- RhctHeader->Length += (UINT16)(Subtable->Length);
+ if (FirstNode)
+ {
+ Table->NodeOffset = ACPI_PTR_DIFF(ParentTable->Buffer, Table);
+ FirstNode = FALSE;
+ }
/* Compile RHCT subtable additionals */
switch (RhctHeader->Type)
{
- case ACPI_RHCT_NODE_TYPE_HART_INFO:
+ case ACPI_RHCT_NODE_TYPE_ISA_STRING:
- RhctHartInfo = ACPI_SUB_PTR (ACPI_RHCT_HART_INFO,
- Subtable->Buffer, sizeof (ACPI_RHCT_NODE_HEADER));
- if (RhctHartInfo)
+ /*
+ * Padding - Variable-length data
+ * Optionally allows the padding of the ISA string to be used
+ * for filling this field.
+ */
+ Status = DtCompileTable (PFieldList, AcpiDmTableInfoRhctIsaPad,
+ &Subtable);
+ if (ACPI_FAILURE (Status))
+ {
+ return (Status);
+ }
+ if (Subtable)
{
+ DtInsertSubtable (ParentTable, Subtable);
+ }
+ break;
- RhctHartInfo->NumOffsets = 0;
- while (*PFieldList)
- {
- Status = DtCompileTable (PFieldList,
- AcpiDmTableInfoRhctHartInfo2, &Subtable);
- if (ACPI_FAILURE (Status))
- {
- return (Status);
- }
- if (!Subtable)
- {
- break;
- }
+ case ACPI_RHCT_NODE_TYPE_HART_INFO:
- DtInsertSubtable (ParentTable, Subtable);
- RhctHeader->Length += (UINT16)(Subtable->Length);
- RhctHartInfo->NumOffsets++;
+ RhctHartInfo = ACPI_CAST_PTR (ACPI_RHCT_HART_INFO,
+ Subtable->Buffer);
+ RhctHartInfo->NumOffsets = 0;
+ while (*PFieldList)
+ {
+ Status = DtCompileTable (PFieldList,
+ AcpiDmTableInfoRhctHartInfo2, &Subtable);
+ if (ACPI_FAILURE (Status))
+ {
+ return (Status);
}
+ if (!Subtable)
+ {
+ break;
+ }
+ DtInsertSubtable (ParentTable, Subtable);
+ RhctHartInfo->NumOffsets++;
}
break;
@@ -2037,6 +2063,9 @@ DtCompileRhct (
break;
}
+
+ DtPopSubtable ();
+ ParentTable = DtPeekSubtable ();
}
return (AE_OK);
diff --git a/sys/contrib/dev/acpica/compiler/dttemplate.c b/sys/contrib/dev/acpica/compiler/dttemplate.c
index 67b13bb82d1b..d7140712d4e6 100644
--- a/sys/contrib/dev/acpica/compiler/dttemplate.c
+++ b/sys/contrib/dev/acpica/compiler/dttemplate.c
@@ -255,7 +255,7 @@ DtCreateTemplates (
if (AcpiGbl_Optind < 3)
{
- fprintf (stderr, "Creating default template: [DSDT]\n");
+ fprintf (stdout, "Creating default template: [DSDT]\n");
Status = DtCreateOneTemplateFile (ACPI_SIG_DSDT, 0);
goto Exit;
}
@@ -411,7 +411,7 @@ DtCreateAllTemplates (
ACPI_STATUS Status;
- fprintf (stderr, "Creating all supported Template files\n");
+ fprintf (stdout, "Creating all supported Template files\n");
/* Walk entire ACPI table data structure */
@@ -421,8 +421,13 @@ DtCreateAllTemplates (
if (TableData->Template)
{
- Status = DtCreateOneTemplate (TableData->Signature,
- 0, TableData);
+ if (ACPI_COMPARE_NAMESEG (TableData->Signature, ACPI_SIG_CDAT))
+ /* Special handling of CDAT */
+ Status = DtCreateOneTemplate (TableData->Signature,
+ 0, NULL);
+ else
+ Status = DtCreateOneTemplate (TableData->Signature,
+ 0, TableData);
if (ACPI_FAILURE (Status))
{
return (Status);
@@ -563,7 +568,7 @@ DtCreateOneTemplate (
}
else
{
- /* Special ACPI tables - DSDT, SSDT, OSDT, FACS, RSDP */
+ /* Special ACPI tables - DSDT, SSDT, OSDT, FACS, RSDP, CDAT */
AcpiOsPrintf (" (AML byte code table)\n");
AcpiOsPrintf (" */\n");
@@ -621,6 +626,11 @@ DtCreateOneTemplate (
AcpiDmDumpDataTable (ACPI_CAST_PTR (ACPI_TABLE_HEADER,
TemplateRsdp));
}
+ else if (ACPI_COMPARE_NAMESEG (Signature, ACPI_SIG_CDAT))
+ {
+ AcpiDmDumpCdat (ACPI_CAST_PTR (ACPI_TABLE_HEADER,
+ TemplateCdat));
+ }
else
{
fprintf (stderr,
@@ -632,14 +642,14 @@ DtCreateOneTemplate (
if (TableCount == 0)
{
- fprintf (stderr,
+ fprintf (stdout,
"Created ACPI table template for [%4.4s], "
"written to \"%s\"\n",
Signature, DisasmFilename);
}
else
{
- fprintf (stderr,
+ fprintf (stdout,
"Created ACPI table templates for [%4.4s] "
"and %u [SSDT] in same file, written to \"%s\"\n",
Signature, TableCount, DisasmFilename);
diff --git a/sys/contrib/dev/acpica/compiler/dttemplate.h b/sys/contrib/dev/acpica/compiler/dttemplate.h
index 0fdd90f73a23..51a34be5c36b 100644
--- a/sys/contrib/dev/acpica/compiler/dttemplate.h
+++ b/sys/contrib/dev/acpica/compiler/dttemplate.h
@@ -389,7 +389,7 @@ const unsigned char TemplateBoot[] =
const unsigned char TemplateCcel[] =
{
0x43,0x43,0x45,0x4C,0x38,0x00,0x00,0x00, /* 00000000 "CCEL8..." */
- 0x04,0x1C,0x49,0x4E,0x54,0x45,0x4C,0x20, /* 00000008 "..INTEL " */
+ 0x04,0x2E,0x49,0x4E,0x54,0x45,0x4C,0x20, /* 00000008 "..INTEL " */
0x54,0x65,0x6D,0x70,0x6C,0x61,0x74,0x65, /* 00000010 "Template" */
0x00,0x00,0x00,0x00,0x49,0x4E,0x54,0x4C, /* 00000018 "....INTL" */
0x30,0x09,0x21,0x20,0x00,0x00,0x00,0x00, /* 00000020 "0.! ...." */
@@ -1951,25 +1951,25 @@ const unsigned char TemplateRgrt[] =
const unsigned char TemplateRhct[] =
{
- 0x52,0x48,0x43,0x54,0x96,0x00,0x00,0x00, /* 00000000 "RHCT|..." */
- 0x01,0x24,0x4F,0x45,0x4D,0x43,0x41,0x00, /* 00000008 "..OEMCA." */
+ 0x52,0x48,0x43,0x54,0x96,0x00,0x00,0x00, /* 00000000 "RHCT...." */
+ 0x01,0x6D,0x4F,0x45,0x4D,0x43,0x41,0x00, /* 00000008 ".mOEMCA." */
0x54,0x45,0x4D,0x50,0x4C,0x41,0x54,0x45, /* 00000010 "TEMPLATE" */
0x01,0x00,0x00,0x00,0x49,0x4E,0x54,0x4C, /* 00000018 "....INTL" */
- 0x28,0x09,0x22,0x20,0x00,0x00,0x00,0x00, /* 00000020 "... ...." */
+ 0x04,0x04,0x25,0x20,0x00,0x00,0x00,0x00, /* 00000020 "..% ...." */
0x80,0x96,0x98,0x00,0x00,0x00,0x00,0x00, /* 00000028 "........" */
- 0x02,0x00,0x00,0x00,0x38,0x00,0x00,0x00, /* 00000030 "....8..." */
- 0x00,0x00,0x34,0x00,0x01,0x00,0x2B,0x00, /* 00000038 "..4...*." */
+ 0x04,0x00,0x00,0x00,0x38,0x00,0x00,0x00, /* 00000030 "....8..." */
+ 0x00,0x00,0x34,0x00,0x01,0x00,0x2B,0x00, /* 00000038 "..4...+." */
0x72,0x76,0x36,0x34,0x69,0x6D,0x61,0x66, /* 00000040 "rv64imaf" */
0x64,0x63,0x68,0x5F,0x7A,0x69,0x63,0x73, /* 00000048 "dch_zics" */
0x72,0x5F,0x7A,0x69,0x66,0x65,0x6E,0x63, /* 00000050 "r_zifenc" */
0x65,0x69,0x5F,0x7A,0x62,0x61,0x5F,0x7A, /* 00000058 "ei_zba_z" */
0x62,0x62,0x5F,0x7A,0x62,0x63,0x5F,0x7A, /* 00000060 "bb_zbc_z" */
- 0x62,0x73,0x00,0x00,0xFF,0xFF,0x18,0x00, /* 00000068 "bs......" */
- 0x01,0x00,0x03,0x00,0x00,0x00,0x00,0x00, /* 00000070 "........" */
- 0x38,0x00,0x00,0x00,0x7c,0x00,0x00,0x00, /* 00000078 "........" */
- 0x8E,0x00,0x00,0x00,0x01,0x00,0x0A,0x00, /* 00000080 "........" */
- 0x01,0x00,0x00,0x06,0x06,0x06,0x02,0x00, /* 00000088 "........" */
- 0x08,0x00,0x01,0x00,0x00,0x02 /* 00000090 "........" */
+ 0x62,0x73,0x00,0x00,0x01,0x00,0x0A,0x00, /* 00000068 "bs......" */
+ 0x01,0x00,0x00,0x06,0x06,0x06,0x02,0x00, /* 00000070 "........" */
+ 0x08,0x00,0x01,0x00,0x00,0x02,0xFF,0xFF, /* 00000078 "........" */
+ 0x18,0x00,0x01,0x00,0x03,0x00,0x00,0x00, /* 00000080 "........" */
+ 0x00,0x00,0x3B,0x00,0x00,0x00,0x6C,0x00, /* 00000088 "..;...l." */
+ 0x00,0x00,0x76,0x00,0x00,0x00 /* 00000090 "..v..." */
};
const unsigned char TemplateRimt[] =
diff --git a/sys/contrib/dev/acpica/compiler/dtutils.c b/sys/contrib/dev/acpica/compiler/dtutils.c
index f2463f74b8fc..18ea18cefdd6 100644
--- a/sys/contrib/dev/acpica/compiler/dtutils.c
+++ b/sys/contrib/dev/acpica/compiler/dtutils.c
@@ -623,6 +623,7 @@ DtGetFieldLength (
case ACPI_DMT_NFIT:
case ACPI_DMT_PCI_PATH:
case ACPI_DMT_PHAT:
+ case ACPI_DMT_RHCT:
ByteLength = 2;
break;
diff --git a/sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c b/sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c
index dd8cf4889885..551cf8178d94 100644
--- a/sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c
+++ b/sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c
@@ -778,7 +778,7 @@ AcpiDmCsi2SerialBusDescriptor (
AcpiOsPrintf (" 0x%2.2X, 0x%2.2X,\n",
Resource->Csi2SerialBus.TypeSpecificFlags & 0x03,
- Resource->Csi2SerialBus.TypeSpecificFlags & 0xFC);
+ (Resource->Csi2SerialBus.TypeSpecificFlags & 0xFC) >> 2);
/* ResourceSource is a required field */
diff --git a/sys/contrib/dev/acpica/components/dispatcher/dsmethod.c b/sys/contrib/dev/acpica/components/dispatcher/dsmethod.c
index 8b6efc070b1b..becdb95f8b83 100644
--- a/sys/contrib/dev/acpica/components/dispatcher/dsmethod.c
+++ b/sys/contrib/dev/acpica/components/dispatcher/dsmethod.c
@@ -646,8 +646,6 @@ AcpiDsCallControlMethod (
ACPI_WALK_STATE *NextWalkState = NULL;
ACPI_OPERAND_OBJECT *ObjDesc;
ACPI_EVALUATE_INFO *Info;
- UINT32 i;
-
ACPI_FUNCTION_TRACE_PTR (DsCallControlMethod, ThisWalkState);
@@ -670,6 +668,23 @@ AcpiDsCallControlMethod (
return_ACPI_STATUS (AE_NULL_OBJECT);
}
+ if (ThisWalkState->NumOperands < ObjDesc->Method.ParamCount)
+ {
+ ACPI_ERROR ((AE_INFO, "Missing argument(s) for method [%4.4s]",
+ AcpiUtGetNodeName (MethodNode)));
+
+ return_ACPI_STATUS (AE_AML_TOO_FEW_ARGUMENTS);
+ }
+
+ else if (ThisWalkState->NumOperands > ObjDesc->Method.ParamCount)
+ {
+ ACPI_ERROR ((AE_INFO, "Too many arguments for method [%4.4s]",
+ AcpiUtGetNodeName (MethodNode)));
+
+ return_ACPI_STATUS (AE_AML_TOO_MANY_ARGUMENTS);
+ }
+
+
/* Init for new method, possibly wait on method mutex */
Status = AcpiDsBeginMethodExecution (
@@ -726,15 +741,7 @@ AcpiDsCallControlMethod (
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
*/
- for (i = 0; i < ObjDesc->Method.ParamCount; i++)
- {
- AcpiUtRemoveReference (ThisWalkState->Operands [i]);
- ThisWalkState->Operands [i] = NULL;
- }
-
- /* Clear the operand stack */
-
- ThisWalkState->NumOperands = 0;
+ AcpiDsClearOperands (ThisWalkState);
ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
diff --git a/sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c b/sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c
index 42e1aa505d02..2c45e8c91f57 100644
--- a/sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c
+++ b/sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c
@@ -357,6 +357,7 @@ AcpiDsMethodDataInitArgs (
Index++;
}
+ AcpiExTraceArgs(Params, Index);
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%u args passed to method\n", Index));
return_ACPI_STATUS (AE_OK);
diff --git a/sys/contrib/dev/acpica/components/events/evglock.c b/sys/contrib/dev/acpica/components/events/evglock.c
index 872e7b499a8f..395ca14fb315 100644
--- a/sys/contrib/dev/acpica/components/events/evglock.c
+++ b/sys/contrib/dev/acpica/components/events/evglock.c
@@ -195,6 +195,11 @@ AcpiEvInitGlobalLockHandler (
return_ACPI_STATUS (AE_OK);
}
+ if (!AcpiGbl_UseGlobalLock)
+ {
+ return_ACPI_STATUS (AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
Status = AcpiInstallFixedEventHandler (ACPI_EVENT_GLOBAL,
diff --git a/sys/contrib/dev/acpica/components/executer/extrace.c b/sys/contrib/dev/acpica/components/executer/extrace.c
index 0eceb0ffccb1..b48a5fcb289b 100644
--- a/sys/contrib/dev/acpica/components/executer/extrace.c
+++ b/sys/contrib/dev/acpica/components/executer/extrace.c
@@ -269,6 +269,68 @@ AcpiExGetTraceEventName (
#endif
+/*******************************************************************************
+ *
+ * FUNCTION: AcpiExTraceArgs
+ *
+ * PARAMETERS: Params - AML method arguments
+ * Count - numer of method arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Trace any arguments
+ *
+ ******************************************************************************/
+
+void
+AcpiExTraceArgs(ACPI_OPERAND_OBJECT **Params, UINT32 Count)
+{
+ UINT32 i;
+
+ ACPI_FUNCTION_NAME(ExTraceArgs);
+
+ for (i = 0; i < Count; i++)
+ {
+ ACPI_OPERAND_OBJECT *obj_desc = Params[i];
+
+ if (!i)
+ {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, " "));
+ }
+
+ switch (obj_desc->Common.Type)
+ {
+ case ACPI_TYPE_INTEGER:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "%jx", (uintmax_t)obj_desc->Integer.Value));
+ break;
+
+ case ACPI_TYPE_STRING:
+ if (!obj_desc->String.Length)
+ {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "NULL"));
+ break;
+ }
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_TRACE_POINT, _COMPONENT))
+ {
+ AcpiUtPrintString(obj_desc->String.Pointer, ACPI_UINT8_MAX);
+ }
+ break;
+
+ default:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "Unknown"));
+ break;
+ }
+
+ if ((i + 1) == Count)
+ {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "\n"));
+ }
+ else
+ {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, ", "));
+ }
+ }
+}
/*******************************************************************************
*
@@ -299,9 +361,9 @@ AcpiExTracePoint (
if (Pathname)
{
ACPI_DEBUG_PRINT ((ACPI_DB_TRACE_POINT,
- "%s %s [0x%p:%s] execution.\n",
+ "%s %s [%s] execution.\n",
AcpiExGetTraceEventName (Type), Begin ? "Begin" : "End",
- Aml, Pathname));
+ Pathname));
}
else
{
diff --git a/sys/contrib/dev/acpica/components/parser/psopinfo.c b/sys/contrib/dev/acpica/components/parser/psopinfo.c
index 21c2b831ef24..1db32f4e8246 100644
--- a/sys/contrib/dev/acpica/components/parser/psopinfo.c
+++ b/sys/contrib/dev/acpica/components/parser/psopinfo.c
@@ -180,8 +180,8 @@ const ACPI_OPCODE_INFO *
AcpiPsGetOpcodeInfo (
UINT16 Opcode)
{
-#ifdef ACPI_DEBUG_OUTPUT
- const char *OpcodeName = "Unknown AML opcode";
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
+ const char *OpcodeName = "Unknown AML opcode";
#endif
ACPI_FUNCTION_NAME (PsGetOpcodeInfo);
@@ -207,7 +207,7 @@ AcpiPsGetOpcodeInfo (
#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
#include <contrib/dev/acpica/compiler/asldefine.h>
-
+
switch (Opcode)
{
case AML_RAW_DATA_BYTE:
@@ -249,12 +249,12 @@ AcpiPsGetOpcodeInfo (
default:
break;
}
-#endif
/* Unknown AML opcode */
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"%s [%4.4X]\n", OpcodeName, Opcode));
+#endif
return (&AcpiGbl_AmlOpInfo [_UNK]);
}
diff --git a/sys/contrib/dev/acpica/components/tables/tbprint.c b/sys/contrib/dev/acpica/components/tables/tbprint.c
index 7211673c42a2..8b812a890a07 100644
--- a/sys/contrib/dev/acpica/components/tables/tbprint.c
+++ b/sys/contrib/dev/acpica/components/tables/tbprint.c
@@ -279,6 +279,14 @@ AcpiTbPrintTableHeader (
ACPI_CAST_PTR (ACPI_TABLE_RSDP, Header)->Revision,
LocalHeader.OemId));
}
+ else if (AcpiGbl_CDAT && !AcpiUtValidNameseg (Header->Signature))
+ {
+ /* CDAT does not use the common ACPI table header */
+
+ ACPI_INFO (("%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_SIG_CDAT, ACPI_FORMAT_UINT64 (Address),
+ ACPI_CAST_PTR (ACPI_TABLE_CDAT, Header)->Length));
+ }
else
{
/* Standard ACPI table with full common header */
diff --git a/sys/contrib/dev/acpica/components/utilities/utnonansi.c b/sys/contrib/dev/acpica/components/utilities/utnonansi.c
index bfbe1194ceae..f8b3a29e3283 100644
--- a/sys/contrib/dev/acpica/components/utilities/utnonansi.c
+++ b/sys/contrib/dev/acpica/components/utilities/utnonansi.c
@@ -353,7 +353,7 @@ AcpiUtSafeStrncpy (
{
/* Always terminate destination string */
- memcpy (Dest, Source, DestSize);
+ strncpy (Dest, Source, DestSize);
Dest[DestSize - 1] = 0;
}
diff --git a/sys/contrib/dev/acpica/include/acdebug.h b/sys/contrib/dev/acpica/include/acdebug.h
index e335752148b9..63f39051a8ac 100644
--- a/sys/contrib/dev/acpica/include/acdebug.h
+++ b/sys/contrib/dev/acpica/include/acdebug.h
@@ -187,7 +187,7 @@ typedef struct acpi_db_execute_walk
{
UINT32 Count;
UINT32 MaxCount;
- char NameSeg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
+ char NameSeg[ACPI_NAMESEG_SIZE + 1];
} ACPI_DB_EXECUTE_WALK;
diff --git a/sys/contrib/dev/acpica/include/acexcep.h b/sys/contrib/dev/acpica/include/acexcep.h
index 57f98ab4540f..7216e0d49148 100644
--- a/sys/contrib/dev/acpica/include/acexcep.h
+++ b/sys/contrib/dev/acpica/include/acexcep.h
@@ -322,8 +322,11 @@ typedef struct acpi_exception_info
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
#define AE_AML_PROTOCOL EXCEP_AML (0x0024)
#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025)
+#define AE_AML_TOO_FEW_ARGUMENTS EXCEP_AML (0x0026)
+#define AE_AML_TOO_MANY_ARGUMENTS EXCEP_AML (0x0027)
-#define AE_CODE_AML_MAX 0x0025
+
+#define AE_CODE_AML_MAX 0x0027
/*
@@ -456,7 +459,9 @@ static const ACPI_EXCEPTION_INFO AcpiGbl_ExceptionNames_Aml[] =
EXCEP_TXT ("AE_AML_UNINITIALIZED_NODE", "A namespace node is uninitialized or unresolved"),
EXCEP_TXT ("AE_AML_TARGET_TYPE", "A target operand of an incorrect type was encountered"),
EXCEP_TXT ("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"),
- EXCEP_TXT ("AE_AML_BUFFER_LENGTH", "The length of the buffer is invalid/incorrect")
+ EXCEP_TXT ("AE_AML_BUFFER_LENGTH", "The length of the buffer is invalid/incorrect"),
+ EXCEP_TXT ("AE_AML_TOO_FEW_ARGUMENTS", "There are fewer than expected method arguments"),
+ EXCEP_TXT ("AE_AML_TOO_MANY_ARGUMENTS", "There are too many arguments for this method")
};
static const ACPI_EXCEPTION_INFO AcpiGbl_ExceptionNames_Ctrl[] =
diff --git a/sys/contrib/dev/acpica/include/acinterp.h b/sys/contrib/dev/acpica/include/acinterp.h
index 74166384f172..b7f9e8f615e4 100644
--- a/sys/contrib/dev/acpica/include/acinterp.h
+++ b/sys/contrib/dev/acpica/include/acinterp.h
@@ -280,6 +280,10 @@ AcpiExTracePoint (
UINT8 *Aml,
char *Pathname);
+void
+AcpiExTraceArgs(
+ ACPI_OPERAND_OBJECT **Params,
+ UINT32 Count);
/*
* exfield - ACPI AML (p-code) execution - field manipulation
diff --git a/sys/contrib/dev/acpica/include/acpixf.h b/sys/contrib/dev/acpica/include/acpixf.h
index 193b0e6a70dc..b5961e21bb9b 100644
--- a/sys/contrib/dev/acpica/include/acpixf.h
+++ b/sys/contrib/dev/acpica/include/acpixf.h
@@ -154,7 +154,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20250404
+#define ACPI_CA_VERSION 0x20250807
#include <contrib/dev/acpica/include/acconfig.h>
#include <contrib/dev/acpica/include/actypes.h>
@@ -358,6 +358,12 @@ ACPI_INIT_GLOBAL (UINT8, AcpiGbl_OsiData, 0);
ACPI_INIT_GLOBAL (BOOLEAN, AcpiGbl_ReducedHardware, FALSE);
/*
+ * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems
+ * (such as LoongArch) may not have and not use Global Lock.
+ */
+ACPI_INIT_GLOBAL (BOOLEAN, AcpiGbl_UseGlobalLock, TRUE);
+
+/*
* Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
diff --git a/sys/contrib/dev/acpica/include/actbl.h b/sys/contrib/dev/acpica/include/actbl.h
index eafd5d8a0f8b..ae52bd452c90 100644
--- a/sys/contrib/dev/acpica/include/actbl.h
+++ b/sys/contrib/dev/acpica/include/actbl.h
@@ -220,7 +220,7 @@ typedef struct acpi_table_header
char OemId[ACPI_OEM_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM identification */
char OemTableId[ACPI_OEM_TABLE_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM table identification */
UINT32 OemRevision; /* OEM revision number */
- char AslCompilerId[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
+ char AslCompilerId[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII ASL compiler vendor ID */
UINT32 AslCompilerRevision; /* ASL compiler version */
} ACPI_TABLE_HEADER;
diff --git a/sys/contrib/dev/acpica/include/actbl1.h b/sys/contrib/dev/acpica/include/actbl1.h
index 876b721068c6..ec04f0a0ab9f 100644
--- a/sys/contrib/dev/acpica/include/actbl1.h
+++ b/sys/contrib/dev/acpica/include/actbl1.h
@@ -262,7 +262,7 @@ typedef struct acpi_whea_header
/* Larger subtable header (when Length can exceed 255) */
-typedef struct acpi_subtable_header_16
+typedef struct acpi_subtbl_hdr_16
{
UINT16 Type;
UINT16 Length;
diff --git a/sys/contrib/dev/acpica/include/actbl2.h b/sys/contrib/dev/acpica/include/actbl2.h
index 4899929b2d45..a74b6d555a3a 100644
--- a/sys/contrib/dev/acpica/include/actbl2.h
+++ b/sys/contrib/dev/acpica/include/actbl2.h
@@ -201,6 +201,7 @@
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
+#define ACPI_SIG_SWFT "SWFT" /* SoundWire File Table */
#define ACPI_SIG_TDEL "TDEL" /* TD Event Log Table */
@@ -4094,6 +4095,30 @@ enum acpi_svkl_format
ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */
};
+/*******************************************************************************
+ *
+ * SWFT - SoundWire File Table
+ * as described in Discovery and Configuration (DisCo) Specification
+ * for SoundWire®
+ * Version 1
+ *
+ ******************************************************************************/
+
+typedef struct acpi_table_swft
+{
+ ACPI_TABLE_HEADER Header; /* Common ACPI table header */
+
+} ACPI_TABLE_SWFT;
+
+typedef struct acpi_swft_file
+{
+ UINT16 VendorID;
+ UINT32 FileID;
+ UINT16 FileVersion;
+ UINT16 FileLength;
+ UINT8 FileData[];
+
+} ACPI_SWFT_FILE;
/*******************************************************************************
*
diff --git a/sys/contrib/dev/qat/qat_402xx.bin b/sys/contrib/dev/qat/qat_402xx.bin
new file mode 100644
index 000000000000..74151547edce
--- /dev/null
+++ b/sys/contrib/dev/qat/qat_402xx.bin
Binary files differ
diff --git a/sys/contrib/dev/qat/qat_402xx_mmp.bin b/sys/contrib/dev/qat/qat_402xx_mmp.bin
new file mode 100644
index 000000000000..6404eb009d2f
--- /dev/null
+++ b/sys/contrib/dev/qat/qat_402xx_mmp.bin
Binary files differ
diff --git a/sys/contrib/dev/rtw88/main.c b/sys/contrib/dev/rtw88/main.c
index 021d076808e0..963b73f35350 100644
--- a/sys/contrib/dev/rtw88/main.c
+++ b/sys/contrib/dev/rtw88/main.c
@@ -57,6 +57,62 @@ module_param_named(support_vht, rtw_vht_support, bool, 0644);
MODULE_PARM_DESC(support_vht, "Set to Y to enable VHT support");
#endif
+#if defined(__FreeBSD__)
+/* Macros based on rtw89::core.c. */
+#define RTW88_DEF_CHAN(_freq, _hw_val, _flags, _band) \
+ { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, }
+#define RTW88_DEF_CHAN_2G(_freq, _hw_val) \
+ RTW88_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ)
+#define RTW88_DEF_CHAN_5G(_freq, _hw_val) \
+ RTW88_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ)
+#define RTW88_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \
+ RTW88_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ)
+
+static struct ieee80211_channel rtw_channeltable_2g[] = {
+ RTW88_DEF_CHAN_2G(2412, 1),
+ RTW88_DEF_CHAN_2G(2417, 2),
+ RTW88_DEF_CHAN_2G(2422, 3),
+ RTW88_DEF_CHAN_2G(2427, 4),
+ RTW88_DEF_CHAN_2G(2432, 5),
+ RTW88_DEF_CHAN_2G(2437, 6),
+ RTW88_DEF_CHAN_2G(2442, 7),
+ RTW88_DEF_CHAN_2G(2447, 8),
+ RTW88_DEF_CHAN_2G(2452, 9),
+ RTW88_DEF_CHAN_2G(2457, 10),
+ RTW88_DEF_CHAN_2G(2462, 11),
+ RTW88_DEF_CHAN_2G(2467, 12),
+ RTW88_DEF_CHAN_2G(2472, 13),
+ RTW88_DEF_CHAN_2G(2484, 14),
+};
+
+static struct ieee80211_channel rtw_channeltable_5g[] = {
+ RTW88_DEF_CHAN_5G(5180, 36),
+ RTW88_DEF_CHAN_5G(5200, 40),
+ RTW88_DEF_CHAN_5G(5220, 44),
+ RTW88_DEF_CHAN_5G(5240, 48),
+ RTW88_DEF_CHAN_5G(5260, 52),
+ RTW88_DEF_CHAN_5G(5280, 56),
+ RTW88_DEF_CHAN_5G(5300, 60),
+ RTW88_DEF_CHAN_5G(5320, 64),
+ RTW88_DEF_CHAN_5G(5500, 100),
+ RTW88_DEF_CHAN_5G(5520, 104),
+ RTW88_DEF_CHAN_5G(5540, 108),
+ RTW88_DEF_CHAN_5G(5560, 112),
+ RTW88_DEF_CHAN_5G(5580, 116),
+ RTW88_DEF_CHAN_5G(5600, 120),
+ RTW88_DEF_CHAN_5G(5620, 124),
+ RTW88_DEF_CHAN_5G(5640, 128),
+ RTW88_DEF_CHAN_5G(5660, 132),
+ RTW88_DEF_CHAN_5G(5680, 136),
+ RTW88_DEF_CHAN_5G(5700, 140),
+ RTW88_DEF_CHAN_5G(5720, 144),
+ RTW88_DEF_CHAN_5G(5745, 149),
+ RTW88_DEF_CHAN_5G(5765, 153),
+ RTW88_DEF_CHAN_5G(5785, 157),
+ RTW88_DEF_CHAN_5G(5805, 161),
+ RTW88_DEF_CHAN_5G_NO_HT40MINUS(5825, 165),
+};
+#elif deifned(__linux__)
static struct ieee80211_channel rtw_channeltable_2g[] = {
{.center_freq = 2412, .hw_value = 1,},
{.center_freq = 2417, .hw_value = 2,},
@@ -102,6 +158,7 @@ static struct ieee80211_channel rtw_channeltable_5g[] = {
{.center_freq = 5825, .hw_value = 165,
.flags = IEEE80211_CHAN_NO_HT40MINUS},
};
+#endif
static struct ieee80211_rate rtw_ratetable[] = {
{.bitrate = 10, .hw_value = 0x00,},
diff --git a/sys/contrib/dev/rtw89/fw.c b/sys/contrib/dev/rtw89/fw.c
index e360f27c2ade..b4c0f864bc75 100644
--- a/sys/contrib/dev/rtw89/fw.c
+++ b/sys/contrib/dev/rtw89/fw.c
@@ -908,11 +908,7 @@ int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
case RTW89_FW_ELEMENT_ID_RADIO_B:
case RTW89_FW_ELEMENT_ID_RADIO_C:
case RTW89_FW_ELEMENT_ID_RADIO_D:
-#if defined(__linux__)
rf_path = arg.rf_path;
-#elif defined(__FreeBSD__)
- rf_path = __DECONST(enum rtw89_rf_path, arg.rf_path);
-#endif
idx = elm->u.reg2.idx;
elm_info->rf_radio[idx] = tbl;
diff --git a/sys/contrib/libnv/bsd_nvpair.c b/sys/contrib/libnv/bsd_nvpair.c
index c73bc2189121..b884dd260b84 100644
--- a/sys/contrib/libnv/bsd_nvpair.c
+++ b/sys/contrib/libnv/bsd_nvpair.c
@@ -985,13 +985,13 @@ nvpair_unpack_string_array(bool isbe __unused, nvpair_t *nvp,
size = nvp->nvp_datasize;
tmp = (const char *)ptr;
for (ii = 0; ii < nvp->nvp_nitems; ii++) {
- len = strnlen(tmp, size - 1) + 1;
- size -= len;
- if (tmp[len - 1] != '\0') {
+ if (size <= 0) {
ERRNO_SET(EINVAL);
return (NULL);
}
- if (size < 0) {
+ len = strnlen(tmp, size - 1) + 1;
+ size -= len;
+ if (tmp[len - 1] != '\0') {
ERRNO_SET(EINVAL);
return (NULL);
}
diff --git a/sys/contrib/libnv/nvlist.c b/sys/contrib/libnv/nvlist.c
index 41edc72322c3..73226ee51a78 100644
--- a/sys/contrib/libnv/nvlist.c
+++ b/sys/contrib/libnv/nvlist.c
@@ -478,7 +478,7 @@ nvlist_dump_error_check(const nvlist_t *nvl, int fd, int level)
void
nvlist_dump(const nvlist_t *nvl, int fd)
{
- const nvlist_t *tmpnvl;
+ const nvlist_t *tmpnvl, *top;
nvpair_t *nvp, *tmpnvp;
void *cookie;
int level;
@@ -487,6 +487,7 @@ nvlist_dump(const nvlist_t *nvl, int fd)
if (nvlist_dump_error_check(nvl, fd, level))
return;
+ top = nvl;
nvp = nvlist_first_nvpair(nvl);
while (nvp != NULL) {
dprintf(fd, "%*s%s (%s):", level * 4, "", nvpair_name(nvp),
@@ -645,6 +646,8 @@ nvlist_dump(const nvlist_t *nvl, int fd)
while ((nvp = nvlist_next_nvpair(nvl, nvp)) == NULL) {
do {
+ if (nvl == top)
+ return;
cookie = NULL;
if (nvlist_in_array(nvl))
dprintf(fd, "%*s,\n", level * 4, "");
@@ -847,7 +850,7 @@ nvlist_xpack(const nvlist_t *nvl, int64_t *fdidxp, size_t *sizep)
{
unsigned char *buf, *ptr;
size_t left, size;
- const nvlist_t *tmpnvl;
+ const nvlist_t *tmpnvl, *top;
nvpair_t *nvp, *tmpnvp;
void *cookie;
@@ -868,6 +871,7 @@ nvlist_xpack(const nvlist_t *nvl, int64_t *fdidxp, size_t *sizep)
ptr = nvlist_pack_header(nvl, ptr, &left);
+ top = nvl;
nvp = nvlist_first_nvpair(nvl);
while (nvp != NULL) {
NVPAIR_ASSERT(nvp);
@@ -958,6 +962,8 @@ nvlist_xpack(const nvlist_t *nvl, int64_t *fdidxp, size_t *sizep)
goto fail;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) == NULL) {
do {
+ if (nvl == top)
+ goto out;
cookie = NULL;
if (nvlist_in_array(nvl)) {
ptr = nvpair_pack_nvlist_array_next(ptr,
diff --git a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md
index 9b50a4a3d96e..f3d4316f6f67 100644
--- a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md
@@ -14,7 +14,7 @@ Please check our issue tracker before opening a new feature request.
Filling out the following template will help other contributors better understand your proposed feature.
-->
-### Describe the feature would like to see added to OpenZFS
+### Describe the feature you would like to see added to OpenZFS
<!--
Provide a clear and concise description of the feature.
diff --git a/sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md b/sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md
index 79809179cf13..47edc8174603 100644
--- a/sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md
+++ b/sys/contrib/openzfs/.github/PULL_REQUEST_TEMPLATE.md
@@ -2,11 +2,6 @@
<!--- Provide a general summary of your changes in the Title above -->
-<!---
-Documentation on ZFS Buildbot options can be found at
-https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.html
--->
-
### Motivation and Context
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. -->
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py b/sys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py
index b49255e8381d..08021aabcb61 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py
+++ b/sys/contrib/openzfs/.github/workflows/scripts/generate-ci-type.py
@@ -65,7 +65,7 @@ if __name__ == '__main__':
# check last (HEAD) commit message
last_commit_message_raw = subprocess.run([
- 'git', 'show', '-s', '--format=%B', 'HEAD'
+ 'git', 'show', '-s', '--format=%B', head
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in last_commit_message_raw.stdout.decode().splitlines():
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh
index de29ad1f57b6..0278264d9279 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-1-setup.sh
@@ -6,6 +6,13 @@
set -eu
+# We've been seeing this script take over 15min to run. This may or
+# may not be normal. Just to get a little more insight, print out
+# a message to stdout with the top running process, and do this every
+# 30 seconds. We can delete this watchdog later once we get a better
+# handle on what the timeout value should be.
+(while [ 1 ] ; do sleep 30 && echo "[watchdog: $(ps -eo cmd --sort=-pcpu | head -n 2 | tail -n 1)}')]"; done) &
+
# install needed packages
export DEBIAN_FRONTEND="noninteractive"
sudo apt-get -y update
@@ -65,3 +72,6 @@ sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 -O relatime=off \
for i in /sys/block/s*/queue/scheduler; do
echo "none" | sudo tee $i
done
+
+# Kill off our watchdog
+kill $(jobs -p)
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
index 885a64037f89..1c608348ffcd 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
@@ -25,6 +25,10 @@ UBMIRROR="https://cloud-images.ubuntu.com"
# default nic model for vm's
NIC="virtio"
+# additional options for virt-install
+OPTS[0]=""
+OPTS[1]=""
+
case "$OS" in
almalinux8)
OSNAME="AlmaLinux 8"
@@ -61,6 +65,14 @@ case "$OS" in
OSNAME="Debian 12"
URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
;;
+ debian13)
+ OSNAME="Debian 13"
+ # TODO: Overwrite OSv to debian13 for virt-install until it's added to osinfo
+ OSv="debian12"
+ URL="https://cloud.debian.org/images/cloud/trixie/latest/debian-13-generic-amd64.qcow2"
+ OPTS[0]="--boot"
+ OPTS[1]="uefi=on"
+ ;;
fedora41)
OSNAME="Fedora 41"
OSv="fedora-unknown"
@@ -109,7 +121,7 @@ case "$OS" in
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
;;
freebsd15-0c)
- FreeBSD="15.0-CURRENT"
+ FreeBSD="15.0-ALPHA3"
OSNAME="FreeBSD $FreeBSD"
OSv="freebsd14.0"
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
@@ -242,7 +254,7 @@ sudo virt-install \
--network bridge=virbr0,model=$NIC,mac='52:54:00:83:79:00' \
--cloud-init user-data=/tmp/user-data \
--disk $DISK,bus=virtio,cache=none,format=raw,driver.discard=unmap \
- --import --noautoconsole >/dev/null
+ --import --noautoconsole ${OPTS[0]} ${OPTS[1]} >/dev/null
# Give the VMs hostnames so we don't have to refer to them with
# hardcoded IP addresses.
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh
index c41ecd09d52e..f67bb2f68e94 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-3-deps-vm.sh
@@ -20,7 +20,7 @@ function archlinux() {
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
- samba sysstat rng-tools rsync wget xxhash
+ samba strace sysstat rng-tools rsync wget xxhash
echo "##[endgroup]"
}
@@ -41,9 +41,10 @@ function debian() {
libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
- python3-cffi python3-dev python3-distlib python3-packaging \
+ python3-cffi python3-dev python3-distlib python3-packaging libtirpc-dev \
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
- rsync samba sysstat uuid-dev watchdog wget xfslibs-dev xxhash zlib1g-dev
+ rsync samba strace sysstat uuid-dev watchdog wget xfslibs-dev xxhash \
+ zlib1g-dev
echo "##[endgroup]"
}
@@ -87,8 +88,8 @@ function rhel() {
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
parted perf python3 python3-cffi python3-devel python3-packaging \
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
- rpm-build rsync samba sysstat systemd watchdog wget xfsprogs-devel xxhash \
- zlib-devel
+ rpm-build rsync samba strace sysstat systemd watchdog wget xfsprogs-devel \
+ xxhash zlib-devel
echo "##[endgroup]"
}
@@ -104,7 +105,7 @@ function install_fedora_experimental_kernel {
our_version="$1"
sudo dnf -y copr enable @kernel-vanilla/stable
sudo dnf -y copr enable @kernel-vanilla/mainline
- all="$(sudo dnf list --showduplicates kernel-*)"
+ all="$(sudo dnf list --showduplicates kernel-* python3-perf* perf* bpftool*)"
echo "Available versions:"
echo "$all"
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh
index 17e976ebcc39..2807d9e77127 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh
@@ -5,12 +5,13 @@
#
# Usage:
#
-# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--poweroff]
-# [--release][--repo][--tarball]
+# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--patch-level NUM]
+# [--poweroff][--release][--repo][--tarball]
#
# OS: OS name like 'fedora41'
# --enable-debug: Build RPMs with '--enable-debug' (for testing)
# --dkms: Build DKMS RPMs as well
+# --patch-level NUM: Use a custom patch level number for packages.
# --poweroff: Power-off the VM after building
# --release Build zfs-release*.rpm as well
# --repo After building everything, copy RPMs into /tmp/repo
@@ -21,6 +22,7 @@
ENABLE_DEBUG=""
DKMS=""
+PATCH_LEVEL=""
POWEROFF=""
RELEASE=""
REPO=""
@@ -35,6 +37,11 @@ while [[ $# -gt 0 ]]; do
DKMS=1
shift
;;
+ --patch-level)
+ PATCH_LEVEL=$2
+ shift
+ shift
+ ;;
--poweroff)
POWEROFF=1
shift
@@ -215,6 +222,10 @@ function rpm_build_and_install() {
run ./autogen.sh
echo "##[endgroup]"
+ if [ -n "$PATCH_LEVEL" ] ; then
+ sed -i -E 's/(Release:\s+)1/\1'$PATCH_LEVEL'/g' META
+ fi
+
echo "##[group]Configure"
run ./configure --enable-debuginfo $extra
echo "##[endgroup]"
@@ -328,7 +339,13 @@ fi
# almalinux9.5
# fedora42
source /etc/os-release
-sudo hostname "$ID$VERSION_ID"
+ if which hostnamectl &> /dev/null ; then
+ # Fedora 42+ use hostnamectl
+ sudo hostnamectl set-hostname "$ID$VERSION_ID"
+ sudo hostnamectl set-hostname --pretty "$ID$VERSION_ID"
+else
+ sudo hostname "$ID$VERSION_ID"
+fi
# save some sysinfo
uname -a > /var/tmp/uname.txt
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh
index 6bf10024a1a6..4869c1003e48 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-5-setup.sh
@@ -12,16 +12,26 @@ source /var/tmp/env.txt
# wait for poweroff to succeed
PID=$(pidof /usr/bin/qemu-system-x86_64)
tail --pid=$PID -f /dev/null
-sudo virsh undefine openzfs
+sudo virsh undefine --nvram openzfs
# cpu pinning
CPUSET=("0,1" "2,3")
+# additional options for virt-install
+OPTS[0]=""
+OPTS[1]=""
+
case "$OS" in
freebsd*)
# FreeBSD needs only 6GiB
RAM=6
;;
+ debian13)
+ RAM=8
+ # Boot Debian 13 with uefi=on and secureboot=off (ZFS Kernel Module not signed)
+ OPTS[0]="--boot"
+ OPTS[1]="firmware=efi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
+ ;;
*)
# Linux needs more memory, but can be optimized to share it via KSM
RAM=8
@@ -79,7 +89,7 @@ EOF
--network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
--disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
--disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
- --import --noautoconsole >/dev/null
+ --import --noautoconsole ${OPTS[0]} ${OPTS[1]}
done
# generate some memory stats
@@ -98,19 +108,30 @@ echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
sudo crontab crontab.txt
rm crontab.txt
-# check if the machines are okay
-echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
-for ((i=1; i<=VMs; i++)); do
- .github/workflows/scripts/qemu-wait-for-vm.sh vm$i
-done
-echo "All $VMs VMs are up now."
-
# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
for ((i=1; i<=VMs; i++)); do
mkdir -p $RESPATH/vm$i
read "pty" <<< $(sudo virsh ttyconsole vm$i)
+
+ # Create the file so we can tail it, even if there's no output.
+ touch $RESPATH/vm$i/console.txt
+
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
+
+ # Write all VM boot lines to the console to aid in debugging failed boots.
+ # The boot lines from all the VMs will be munged together, so prepend each
+ # line with the vm hostname (like 'vm1:').
+ (while IFS=$'\n' read -r line; do echo "vm$i: $line" ; done < <(sudo tail -f $RESPATH/vm$i/console.txt)) &
+
done
echo "Console logging for ${VMs}x $OS started."
+
+
+# check if the machines are okay
+echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
+for ((i=1; i<=VMs; i++)); do
+ .github/workflows/scripts/qemu-wait-for-vm.sh vm$i
+done
+echo "All $VMs VMs are up now."
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh
index 5ab822f4f076..ca6ac77f146d 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-6-tests.sh
@@ -111,7 +111,7 @@ fi
sudo dmesg -c > dmesg-prerun.txt
mount > mount.txt
df -h > df-prerun.txt
-$TDIR/zfs-tests.sh -vK -s 3GB -T $TAGS
+$TDIR/zfs-tests.sh -vKO -s 3GB -T $TAGS
RV=$?
df -h > df-postrun.txt
echo $RV > tests-exitcode.txt
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml b/sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml
index 5b5afe746859..d8a95954fe1a 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml
@@ -32,6 +32,11 @@ on:
options:
- "Build RPMs"
- "Test repo"
+ patch_level:
+ type: string
+ required: false
+ default: ""
+ description: "(optional) patch level number"
repo_url:
type: string
required: false
@@ -78,7 +83,13 @@ jobs:
mkdir -p /tmp/repo
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-test-repo-vm.sh' ${{ github.event.inputs.repo_url }}
else
- .github/workflows/scripts/qemu-4-build.sh --repo --release --dkms --tarball ${{ matrix.os }}
+ EXTRA=""
+ if [ -n "${{ github.event.inputs.patch_level }}" ] ; then
+ EXTRA="--patch-level ${{ github.event.inputs.patch_level }}"
+ fi
+
+ .github/workflows/scripts/qemu-4-build.sh $EXTRA \
+ --repo --release --dkms --tarball ${{ matrix.os }}
fi
- name: Prepare artifacts
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-qemu.yml b/sys/contrib/openzfs/.github/workflows/zfs-qemu.yml
index cda620313189..69349678d84c 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-qemu.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-qemu.yml
@@ -29,7 +29,7 @@ jobs:
- name: Generate OS config and CI type
id: os
run: |
- FULL_OS='["almalinux8", "almalinux9", "almalinux10", "centos-stream9", "centos-stream10", "debian11", "debian12", "fedora41", "fedora42", "freebsd13-5r", "freebsd14-3s", "freebsd15-0c", "ubuntu22", "ubuntu24"]'
+ FULL_OS='["almalinux8", "almalinux9", "almalinux10", "centos-stream9", "centos-stream10", "debian12", "debian13", "fedora41", "fedora42", "freebsd13-5r", "freebsd14-3s", "freebsd15-0c", "ubuntu22", "ubuntu24"]'
QUICK_OS='["almalinux8", "almalinux9", "almalinux10", "debian12", "fedora42", "freebsd14-3s", "ubuntu24"]'
# determine CI type when running on PR
ci_type="full"
@@ -44,7 +44,7 @@ jobs:
os_selection="$FULL_OS"
fi
- if [ ${{ github.event.inputs.fedora_kernel_ver }} != "" ] ; then
+ if ${{ github.event.inputs.fedora_kernel_ver != '' }}; then
# They specified a custom kernel version for Fedora. Use only
# Fedora runners.
os_json=$(echo ${os_selection} | jq -c '[.[] | select(startswith("fedora"))]')
@@ -53,9 +53,8 @@ jobs:
os_json=$(echo ${os_selection} | jq -c)
fi
- echo $os_json
- echo "os=$os_json" >> $GITHUB_OUTPUT
- echo "ci_type=$ci_type" >> $GITHUB_OUTPUT
+ echo "os=$os_json" | tee -a $GITHUB_OUTPUT
+ echo "ci_type=$ci_type" | tee -a $GITHUB_OUTPUT
qemu-vm:
name: qemu-x86
@@ -63,8 +62,8 @@ jobs:
strategy:
fail-fast: false
matrix:
- # rhl: almalinux8, almalinux9, centos-stream9, fedora41
- # debian: debian11, debian12, ubuntu22, ubuntu24
+ # rhl: almalinux8, almalinux9, centos-stream9, fedora4x
+ # debian: debian12, debian13, ubuntu22, ubuntu24
# misc: archlinux, tumbleweed
# FreeBSD variants of 2025-06:
# FreeBSD Release: freebsd13-5r, freebsd14-2r, freebsd14-3r
@@ -78,8 +77,12 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
- name: Setup QEMU
- timeout-minutes: 10
- run: .github/workflows/scripts/qemu-1-setup.sh
+ timeout-minutes: 20
+ run: |
+ # Add a timestamp to each line to debug timeouts
+ while IFS=$'\n' read -r line; do
+ echo "$(date +'%H:%M:%S') $line"
+ done < <(.github/workflows/scripts/qemu-1-setup.sh)
- name: Start build machine
timeout-minutes: 10
diff --git a/sys/contrib/openzfs/.mailmap b/sys/contrib/openzfs/.mailmap
index b6d942c000b8..e6f09c6c9d43 100644
--- a/sys/contrib/openzfs/.mailmap
+++ b/sys/contrib/openzfs/.mailmap
@@ -23,6 +23,7 @@
# These maps are making names consistent where they have varied but the email
# address has never changed. In most cases, the full name is in the
# Signed-off-by of a commit with a matching author.
+Achill Gilgenast <achill@achill.org>
Ahelenia Ziemiańska <nabijaczleweli@gmail.com>
Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Alex John <alex@stty.io>
@@ -37,6 +38,7 @@ Crag Wang <crag0715@gmail.com>
Damian Szuberski <szuberskidamian@gmail.com>
Daniel Kolesa <daniel@octaforge.org>
Debabrata Banerjee <dbavatar@gmail.com>
+Diwakar Kristappagari <diwakar-k@hpe.com>
Finix Yan <yanchongwen@hotmail.com>
Gaurav Kumar <gauravk.18@gmail.com>
Gionatan Danti <g.danti@assyoma.it>
@@ -145,6 +147,7 @@ Gaurav Kumar <gauravk.18@gmail.com> <gaurkuma@users.noreply.github.com>
George Gaydarov <git@gg7.io> <gg7@users.noreply.github.com>
Georgy Yakovlev <gyakovlev@gentoo.org> <168902+gyakovlev@users.noreply.github.com>
Gerardwx <gerardw@alum.mit.edu> <Gerardwx@users.noreply.github.com>
+Germano Massullo <germano.massullo@gmail.com> <Germano0@users.noreply.github.com>
Gian-Carlo DeFazio <defazio1@llnl.gov> <defaziogiancarlo@users.noreply.github.com>
Giuseppe Di Natale <dinatale2@llnl.gov> <dinatale2@users.noreply.github.com>
Hajo Möller <dasjoe@gmail.com> <dasjoe@users.noreply.github.com>
@@ -164,6 +167,7 @@ John Ramsden <johnramsden@riseup.net> <johnramsden@users.noreply.github.com>
Jonathon Fernyhough <jonathon@m2x.dev> <559369+jonathonf@users.noreply.github.com>
Jose Luis Duran <jlduran@gmail.com> <jlduran@users.noreply.github.com>
Justin Hibbits <chmeeedalf@gmail.com> <chmeeedalf@users.noreply.github.com>
+Kaitlin Hoang <kthoang@amazon.com> <khoang98@users.noreply.github.com>
Kevin Greene <kevin.greene@delphix.com> <104801862+kxgreene@users.noreply.github.com>
Kevin Jin <lostking2008@hotmail.com> <33590050+jxdking@users.noreply.github.com>
Kevin P. Fleming <kevin@km6g.us> <kpfleming@users.noreply.github.com>
diff --git a/sys/contrib/openzfs/AUTHORS b/sys/contrib/openzfs/AUTHORS
index a9d249a66f1e..6c34c07f39ef 100644
--- a/sys/contrib/openzfs/AUTHORS
+++ b/sys/contrib/openzfs/AUTHORS
@@ -10,6 +10,7 @@ PAST MAINTAINERS:
CONTRIBUTORS:
Aaron Fineman <abyxcos@gmail.com>
+ Achill Gilgenast <achill@achill.org>
Adam D. Moss <c@yotes.com>
Adam Leventhal <ahl@delphix.com>
Adam Stevko <adam.stevko@gmail.com>
@@ -59,6 +60,7 @@ CONTRIBUTORS:
Andreas Buschmann <andreas.buschmann@tech.net.de>
Andreas Dilger <adilger@intel.com>
Andreas Vögele <andreas@andreasvoegele.com>
+ Andres <a-d-j-i@users.noreply.github.com>
Andrew Barnes <barnes333@gmail.com>
Andrew Hamilton <ahamilto@tjhsst.edu>
Andrew Innes <andrew.c12@gmail.com>
@@ -72,6 +74,7 @@ CONTRIBUTORS:
Andrey Prokopenko <job@terem.fr>
Andrey Vesnovaty <andrey.vesnovaty@gmail.com>
Andriy Gapon <avg@freebsd.org>
+ Andriy Tkachuk <andriy.tkachuk@seagate.com>
Andy Bakun <github@thwartedefforts.org>
Andy Fiddaman <omnios@citrus-it.co.uk>
Aniruddha Shankar <k@191a.net>
@@ -120,6 +123,7 @@ CONTRIBUTORS:
Caleb James DeLisle <calebdelisle@lavabit.com>
Cameron Harr <harr1@llnl.gov>
Cao Xuewen <cao.xuewen@zte.com.cn>
+ Carl George <carlwgeorge@gmail.com>
Carlo Landmeter <clandmeter@gmail.com>
Carlos Alberto Lopez Perez <clopez@igalia.com>
Cedric Maunoury <cedric.maunoury@gmail.com>
@@ -200,6 +204,7 @@ CONTRIBUTORS:
Dimitri John Ledkov <xnox@ubuntu.com>
Dimitry Andric <dimitry@andric.com>
Dirkjan Bussink <d.bussink@gmail.com>
+ Diwakar Kristappagari <diwakar-k@hpe.com>
Dmitry Khasanov <pik4ez@gmail.com>
Dominic Pearson <dsp@technoanimal.net>
Dominik Hassler <hadfl@omniosce.org>
@@ -250,6 +255,7 @@ CONTRIBUTORS:
George Wilson <gwilson@delphix.com>
Georgy Yakovlev <ya@sysdump.net>
Gerardwx <gerardw@alum.mit.edu>
+ Germano Massullo <germano.massullo@gmail.com>
Gian-Carlo DeFazio <defazio1@llnl.gov>
Gionatan Danti <g.danti@assyoma.it>
Giuseppe Di Natale <guss80@gmail.com>
@@ -287,6 +293,7 @@ CONTRIBUTORS:
Igor K <igor@dilos.org>
Igor Kozhukhov <ikozhukhov@gmail.com>
Igor Lvovsky <ilvovsky@gmail.com>
+ Igor Ostapenko <pm@igoro.pro>
ilbsmart <wgqimut@gmail.com>
Ilkka Sovanto <github@ilkka.kapsi.fi>
illiliti <illiliti@protonmail.com>
@@ -326,6 +333,7 @@ CONTRIBUTORS:
Jinshan Xiong <jinshan.xiong@intel.com>
Jitendra Patidar <jitendra.patidar@nutanix.com>
JK Dingwall <james@dingwall.me.uk>
+ Joel Low <joel@joelsplace.sg>
Joe Stein <joe.stein@delphix.com>
John-Mark Gurney <jmg@funkthat.com>
John Albietz <inthecloud247@gmail.com>
@@ -374,6 +382,7 @@ CONTRIBUTORS:
Kevin Jin <lostking2008@hotmail.com>
Kevin P. Fleming <kevin@km6g.us>
Kevin Tanguy <kevin.tanguy@ovh.net>
+ khoang98 <khoang98@users.noreply.github.com>
KireinaHoro <i@jsteward.moe>
Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Kleber Tarcísio <klebertarcisio@yahoo.com.br>
@@ -447,6 +456,7 @@ CONTRIBUTORS:
Max Zettlmeißl <max@zettlmeissl.de>
Md Islam <mdnahian@outlook.com>
megari <megari@iki.fi>
+ Meriel Luna Mittelbach <lunarlambda@gmail.com>
Michael D Labriola <michael.d.labriola@gmail.com>
Michael Franzl <michael@franzl.name>
Michael Gebetsroither <michael@mgeb.org>
@@ -494,6 +504,7 @@ CONTRIBUTORS:
Orivej Desh <orivej@gmx.fr>
Pablo Correa Gómez <ablocorrea@hotmail.com>
Palash Gandhi <pbg4930@rit.edu>
+ Patrick Fasano <patrick@patrickfasano.com>
Patrick Mooney <pmooney@pfmooney.com>
Patrik Greco <sikevux@sikevux.se>
Paul B. Henson <henson@acm.org>
@@ -535,6 +546,7 @@ CONTRIBUTORS:
Remy Blank <remy.blank@pobox.com>
renelson <bnelson@nelsonbe.com>
Reno Reckling <e-github@wthack.de>
+ René Wirnata <rene.wirnata@pandascience.net>
Ricardo M. Correia <ricardo.correia@oracle.com>
Riccardo Schirone <rschirone91@gmail.com>
Richard Allen <belperite@gmail.com>
@@ -640,6 +652,7 @@ CONTRIBUTORS:
tleydxdy <shironeko.github@tesaguri.club>
Tobin Harding <me@tobin.cc>
Todd Seidelmann <seidelma@users.noreply.github.com>
+ Todd Zullinger <tmz@pobox.com>
Tom Caputi <tcaputi@datto.com>
Tom Matthews <tom@axiom-partners.com>
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
diff --git a/sys/contrib/openzfs/META b/sys/contrib/openzfs/META
index 47f0795bfa11..bdb7aee48041 100644
--- a/sys/contrib/openzfs/META
+++ b/sys/contrib/openzfs/META
@@ -1,10 +1,10 @@
Meta: 1
Name: zfs
Branch: 1.0
-Version: 2.3.99
+Version: 2.4.99
Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS
-Linux-Maximum: 6.15
+Linux-Maximum: 6.17
Linux-Minimum: 4.18
diff --git a/sys/contrib/openzfs/Makefile.am b/sys/contrib/openzfs/Makefile.am
index 5f09d170e730..30f78e490b78 100644
--- a/sys/contrib/openzfs/Makefile.am
+++ b/sys/contrib/openzfs/Makefile.am
@@ -1,6 +1,7 @@
CLEANFILES =
dist_noinst_DATA =
INSTALL_DATA_HOOKS =
+INSTALL_EXEC_HOOKS =
ALL_LOCAL =
CLEAN_LOCAL =
CHECKS = shellcheck checkbashisms
@@ -71,6 +72,9 @@ all: gitrev
PHONY += install-data-hook $(INSTALL_DATA_HOOKS)
install-data-hook: $(INSTALL_DATA_HOOKS)
+PHONY += install-exec-hook $(INSTALL_EXEC_HOOKS)
+install-exec-hook: $(INSTALL_EXEC_HOOKS)
+
PHONY += maintainer-clean-local
maintainer-clean-local:
-$(RM) $(GITREV)
diff --git a/sys/contrib/openzfs/cmd/Makefile.am b/sys/contrib/openzfs/cmd/Makefile.am
index 96040976e53e..ca94f6b77e06 100644
--- a/sys/contrib/openzfs/cmd/Makefile.am
+++ b/sys/contrib/openzfs/cmd/Makefile.am
@@ -98,17 +98,16 @@ endif
if USING_PYTHON
-bin_SCRIPTS += arc_summary arcstat dbufstat zilstat
-CLEANFILES += arc_summary arcstat dbufstat zilstat
-dist_noinst_DATA += %D%/arc_summary %D%/arcstat.in %D%/dbufstat.in %D%/zilstat.in
+bin_SCRIPTS += zarcsummary zarcstat dbufstat zilstat
+CLEANFILES += zarcsummary zarcstat dbufstat zilstat
+dist_noinst_DATA += %D%/zarcsummary %D%/zarcstat.in %D%/dbufstat.in %D%/zilstat.in
-$(call SUBST,arcstat,%D%/)
+$(call SUBST,zarcstat,%D%/)
$(call SUBST,dbufstat,%D%/)
$(call SUBST,zilstat,%D%/)
-arc_summary: %D%/arc_summary
+zarcsummary: %D%/zarcsummary
$(AM_V_at)cp $< $@
endif
-
PHONY += cmd
cmd: $(bin_SCRIPTS) $(bin_PROGRAMS) $(sbin_SCRIPTS) $(sbin_PROGRAMS) $(dist_bin_SCRIPTS) $(zfsexec_PROGRAMS) $(mounthelper_PROGRAMS)
diff --git a/sys/contrib/openzfs/cmd/arcstat.in b/sys/contrib/openzfs/cmd/zarcstat.in
index 6f9abb39c3fb..8ffd20481166 100755
--- a/sys/contrib/openzfs/cmd/arcstat.in
+++ b/sys/contrib/openzfs/cmd/zarcstat.in
@@ -2,7 +2,7 @@
# SPDX-License-Identifier: CDDL-1.0
#
# Print out ZFS ARC Statistics exported via kstat(1)
-# For a definition of fields, or usage, use arcstat -v
+# For a definition of fields, or usage, use zarcstat -v
#
# This script was originally a fork of the original arcstat.pl (0.1)
# by Neelakanth Nadgir, originally published on his Sun blog on
@@ -56,6 +56,7 @@ import time
import getopt
import re
import copy
+import os
from signal import signal, SIGINT, SIGWINCH, SIG_DFL
@@ -171,7 +172,7 @@ cols = {
"zactive": [7, 1000, "zfetch prefetches active per second"],
}
-# ARC structural breakdown from arc_summary
+# ARC structural breakdown from zarcsummary
structfields = {
"cmp": ["compressed", "Compressed"],
"ovh": ["overhead", "Overhead"],
@@ -187,7 +188,7 @@ structstats = { # size stats
"sz": ["_size", "size"],
}
-# ARC types breakdown from arc_summary
+# ARC types breakdown from zarcsummary
typefields = {
"data": ["data", "ARC data"],
"meta": ["metadata", "ARC metadata"],
@@ -198,7 +199,7 @@ typestats = { # size stats
"sz": ["_size", "size"],
}
-# ARC states breakdown from arc_summary
+# ARC states breakdown from zarcsummary
statefields = {
"ano": ["anon", "Anonymous"],
"mfu": ["mfu", "MFU"],
@@ -261,7 +262,7 @@ hdr_intr = 20 # Print header every 20 lines of output
opfile = None
sep = " " # Default separator is 2 spaces
l2exist = False
-cmd = ("Usage: arcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
+cmd = ("Usage: zarcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
"[count]]\n")
cur = {}
d = {}
@@ -348,10 +349,10 @@ def usage():
"character or string\n")
sys.stderr.write("\t -p : Disable auto-scaling of numerical fields\n")
sys.stderr.write("\nExamples:\n")
- sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n")
- sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n")
- sys.stderr.write("\tarcstat -v\n")
- sys.stderr.write("\tarcstat -f time,hit%,dh%,ph%,mh% 1\n")
+ sys.stderr.write("\tzarcstat -o /tmp/a.log 2 10\n")
+ sys.stderr.write("\tzarcstat -s \",\" -o /tmp/a.log 2 10\n")
+ sys.stderr.write("\tzarcstat -v\n")
+ sys.stderr.write("\tzarcstat -f time,hit%,dh%,ph%,mh% 1\n")
sys.stderr.write("\n")
sys.exit(1)
@@ -366,7 +367,7 @@ def snap_stats():
cur = kstat
- # fill in additional values from arc_summary
+ # fill in additional values from zarcsummary
cur["caches_size"] = caches_size = cur["anon_data"]+cur["anon_metadata"]+\
cur["mfu_data"]+cur["mfu_metadata"]+cur["mru_data"]+cur["mru_metadata"]+\
cur["uncached_data"]+cur["uncached_metadata"]
@@ -766,6 +767,7 @@ def calculate():
def main():
+
global sint
global count
global hdr_intr
diff --git a/sys/contrib/openzfs/cmd/arc_summary b/sys/contrib/openzfs/cmd/zarcsummary
index e60c6b64e8a1..24a129d9ca70 100755
--- a/sys/contrib/openzfs/cmd/arc_summary
+++ b/sys/contrib/openzfs/cmd/zarcsummary
@@ -34,7 +34,7 @@ Provides basic information on the ARC, its efficiency, the L2ARC (if present),
the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
the in-source documentation and code at
https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details.
-The original introduction to arc_summary can be found at
+The original introduction to zarcsummary can be found at
http://cuddletech.com/?p=454
"""
@@ -161,7 +161,7 @@ elif sys.platform.startswith('linux'):
return get_params(TUNABLES_PATH)
def get_version_impl(request):
- # The original arc_summary called /sbin/modinfo/{spl,zfs} to get
+ # The original zarcsummary called /sbin/modinfo/{spl,zfs} to get
# the version information. We switch to /sys/module/{spl,zfs}/version
# to make sure we get what is really loaded in the kernel
try:
@@ -439,7 +439,7 @@ def print_header():
"""
# datetime is now recommended over time but we keep the exact formatting
- # from the older version of arc_summary in case there are scripts
+ # from the older version of zarcsummary in case there are scripts
# that expect it in this way
daydate = time.strftime(DATE_FORMAT)
spc_date = LINE_LENGTH-len(daydate)
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index bf44d9c322b4..70a4ed46f263 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -107,7 +107,9 @@ extern uint_t zfs_reconstruct_indirect_combinations_max;
extern uint_t zfs_btree_verify_intensity;
static const char cmdname[] = "zdb";
-uint8_t dump_opt[256];
+uint8_t dump_opt[512];
+
+#define ALLOCATED_OPT 256
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
@@ -127,6 +129,7 @@ static zfs_range_tree_t *mos_refd_objs;
static spa_t *spa;
static objset_t *os;
static boolean_t kernel_init_done;
+static boolean_t corruption_found = B_FALSE;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
@@ -176,7 +179,7 @@ static int
sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx)
{
- ASSERT3P(tx, ==, NULL);
+ ASSERT0P(tx);
struct sublivelist_verify *sv = arg;
sublivelist_verify_block_refcnt_t current = {
.svbr_blk = *bp,
@@ -250,6 +253,7 @@ sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
+ corruption_found = B_TRUE;
}
zfs_btree_destroy(&sv->sv_pair);
@@ -381,7 +385,7 @@ verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
sublivelist_verify_block_t svb = {{{0}}};
DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid);
DVA_SET_OFFSET(&svb.svb_dva, offset);
- DVA_SET_ASIZE(&svb.svb_dva, size);
+ DVA_SET_ASIZE(&svb.svb_dva, 0);
zfs_btree_index_t where;
uint64_t end_offset = offset + size;
@@ -405,6 +409,7 @@ verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
+ corruption_found = B_TRUE;
}
}
}
@@ -426,6 +431,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
+ corruption_found = B_TRUE;
} else {
zfs_range_tree_add(mv->mv_allocated,
offset, size);
@@ -439,6 +445,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
+ corruption_found = B_TRUE;
} else {
zfs_range_tree_remove(mv->mv_allocated,
offset, size);
@@ -526,6 +533,7 @@ mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
+ corruption_found = B_TRUE;
continue;
}
@@ -542,6 +550,7 @@ mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
+ corruption_found = B_TRUE;
continue;
}
@@ -655,6 +664,7 @@ livelist_metaslab_validate(spa_t *spa)
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
+ corruption_found = B_TRUE;
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
@@ -827,7 +837,7 @@ usage(void)
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
- zdb_exit(1);
+ zdb_exit(2);
}
static void
@@ -892,9 +902,9 @@ dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
- VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
+ VERIFY0(dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
- VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
+ VERIFY0(nvlist_unpack(packed, nvsize, &nv, 0));
umem_free(packed, nvsize);
@@ -1455,8 +1465,8 @@ get_obsolete_refcount(vdev_t *vd)
refcount++;
}
} else {
- ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
- ASSERT3U(obsolete_sm_object, ==, 0);
+ ASSERT0P(vd->vdev_obsolete_sm);
+ ASSERT0(obsolete_sm_object);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
@@ -1578,9 +1588,8 @@ dump_spacemap(objset_t *os, space_map_t *sm)
continue;
}
- uint8_t words;
char entry_type;
- uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
+ uint64_t entry_off, entry_run, entry_vdev;
if (sm_entry_is_single_word(word)) {
entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
@@ -1588,35 +1597,43 @@ dump_spacemap(objset_t *os, space_map_t *sm)
entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
sm->sm_start;
entry_run = SM_RUN_DECODE(word) << mapshift;
- words = 1;
+
+ (void) printf("\t [%6llu] %c "
+ "range: %012llx-%012llx size: %08llx\n",
+ (u_longlong_t)entry_id, entry_type,
+ (u_longlong_t)entry_off,
+ (u_longlong_t)(entry_off + entry_run - 1),
+ (u_longlong_t)entry_run);
} else {
/* it is a two-word entry so we read another word */
ASSERT(sm_entry_is_double_word(word));
uint64_t extra_word;
offset += sizeof (extra_word);
+ ASSERT3U(offset, <, space_map_length(sm));
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (extra_word), &extra_word,
DMU_READ_PREFETCH));
- ASSERT3U(offset, <=, space_map_length(sm));
-
entry_run = SM2_RUN_DECODE(word) << mapshift;
entry_vdev = SM2_VDEV_DECODE(word);
entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM2_OFFSET_DECODE(extra_word) <<
mapshift) + sm->sm_start;
- words = 2;
- }
- (void) printf("\t [%6llu] %c range:"
- " %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
- (u_longlong_t)entry_id,
- entry_type, (u_longlong_t)entry_off,
- (u_longlong_t)(entry_off + entry_run),
- (u_longlong_t)entry_run,
- (u_longlong_t)entry_vdev, words);
+ if (zopt_metaslab_args == 0 ||
+ zopt_metaslab[0] == entry_vdev) {
+ (void) printf("\t [%6llu] %c "
+ "range: %012llx-%012llx size: %08llx "
+ "vdev: %llu\n",
+ (u_longlong_t)entry_id, entry_type,
+ (u_longlong_t)entry_off,
+ (u_longlong_t)(entry_off + entry_run - 1),
+ (u_longlong_t)entry_run,
+ (u_longlong_t)entry_vdev);
+ }
+ }
if (entry_type == 'A')
alloc += entry_run;
@@ -1652,6 +1669,16 @@ dump_metaslab_stats(metaslab_t *msp)
}
static void
+dump_allocated(void *arg, uint64_t start, uint64_t size)
+{
+ uint64_t *off = arg;
+ if (*off != start)
+ (void) printf("ALLOC: %"PRIu64" %"PRIu64"\n", *off,
+ start - *off);
+ *off = start + size;
+}
+
+static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
@@ -1667,13 +1694,24 @@ dump_metaslab(metaslab_t *msp)
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
- if (dump_opt['m'] > 2 && !dump_opt['L']) {
+ if (dump_opt[ALLOCATED_OPT] ||
+ (dump_opt['m'] > 2 && !dump_opt['L'])) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
+ }
+
+ if (dump_opt['m'] > 2 && !dump_opt['L']) {
zfs_range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
- metaslab_unload(msp);
- mutex_exit(&msp->ms_lock);
+ }
+
+ if (dump_opt[ALLOCATED_OPT]) {
+ uint64_t off = msp->ms_start;
+ zfs_range_tree_walk(msp->ms_allocatable, dump_allocated,
+ &off);
+ if (off != msp->ms_start + msp->ms_size)
+ (void) printf("ALLOC: %"PRIu64" %"PRIu64"\n", off,
+ msp->ms_size - off);
}
if (dump_opt['m'] > 1 && sm != NULL &&
@@ -1688,6 +1726,12 @@ dump_metaslab(metaslab_t *msp)
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
+ if (dump_opt[ALLOCATED_OPT] ||
+ (dump_opt['m'] > 2 && !dump_opt['L'])) {
+ metaslab_unload(msp);
+ mutex_exit(&msp->ms_lock);
+ }
+
if (vd->vdev_ops == &vdev_draid_ops)
ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
else
@@ -1724,8 +1768,9 @@ print_vdev_metaslab_header(vdev_t *vd)
}
}
- (void) printf("\tvdev %10llu %s",
- (u_longlong_t)vd->vdev_id, bias_str);
+ (void) printf("\tvdev %10llu\t%s metaslab shift %4llu",
+ (u_longlong_t)vd->vdev_id, bias_str,
+ (u_longlong_t)vd->vdev_ms_shift);
if (ms_flush_data_obj != 0) {
(void) printf(" ms_unflushed_phys object %llu",
@@ -1792,7 +1837,7 @@ print_vdev_indirect(vdev_t *vd)
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
if (vim == NULL) {
- ASSERT3P(vib, ==, NULL);
+ ASSERT0P(vib);
return;
}
@@ -1865,7 +1910,7 @@ dump_metaslabs(spa_t *spa)
(void) printf("\nMetaslabs:\n");
- if (!dump_opt['d'] && zopt_metaslab_args > 0) {
+ if (zopt_metaslab_args > 0) {
c = zopt_metaslab[0];
if (c >= children)
@@ -2043,10 +2088,10 @@ dump_ddt_object(ddt_t *ddt, ddt_type_t type, ddt_class_t class)
if (error == ENOENT)
return;
- ASSERT(error == 0);
+ ASSERT0(error);
error = ddt_object_count(ddt, type, class, &count);
- ASSERT(error == 0);
+ ASSERT0(error);
if (count == 0)
return;
@@ -2583,19 +2628,17 @@ snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
}
}
-static void
+static u_longlong_t
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
+ u_longlong_t offset;
int l;
- if (!BP_IS_EMBEDDED(bp)) {
- ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
- ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
- }
+ offset = (u_longlong_t)blkid2offset(dnp, bp, zb);
- (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
+ (void) printf("%16llx ", offset);
ASSERT(zb->zb_level >= 0);
@@ -2610,19 +2653,38 @@ print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
- (void) printf("%s\n", blkbuf);
+ (void) printf("%s", blkbuf);
+
+ if (!BP_IS_EMBEDDED(bp)) {
+ if (BP_GET_TYPE(bp) != dnp->dn_type) {
+ (void) printf(" (ERROR: Block pointer type "
+ "(%llu) does not match dnode type (%hhu))",
+ BP_GET_TYPE(bp), dnp->dn_type);
+ corruption_found = B_TRUE;
+ }
+ if (BP_GET_LEVEL(bp) != zb->zb_level) {
+ (void) printf(" (ERROR: Block pointer level "
+ "(%llu) does not match bookmark level (%lld))",
+ BP_GET_LEVEL(bp), (longlong_t)zb->zb_level);
+ corruption_found = B_TRUE;
+ }
+ }
+ (void) printf("\n");
+
+ return (offset);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
+ u_longlong_t offset;
int err = 0;
if (BP_GET_BIRTH(bp) == 0)
return (0);
- print_indirect(spa, bp, zb, dnp);
+ offset = print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
@@ -2652,8 +2714,15 @@ visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
break;
fill += BP_GET_FILL(cbp);
}
- if (!err)
- ASSERT3U(fill, ==, BP_GET_FILL(bp));
+ if (!err) {
+ if (fill != BP_GET_FILL(bp)) {
+ (void) printf("%16llx: Block pointer "
+ "fill (%llu) does not match calculated "
+ "value (%llu)\n", offset, BP_GET_FILL(bp),
+ (u_longlong_t)fill);
+ corruption_found = B_TRUE;
+ }
+ }
arc_buf_destroy(buf, &buf);
}
@@ -2909,6 +2978,7 @@ dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
+ corruption_found = B_TRUE;
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
@@ -3088,6 +3158,7 @@ bpobj_count_refd(bpobj_t *bpo)
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
+ corruption_found = B_TRUE;
continue;
}
bpobj_count_refd(&subbpo);
@@ -3109,7 +3180,7 @@ dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
static int
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
{
- ASSERT(arg == NULL);
+ ASSERT0P(arg);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
@@ -3230,6 +3301,7 @@ zdb_derive_key(dsl_dir_t *dd, uint8_t *key_out)
uint64_t keyformat, salt, iters;
int i;
unsigned char c;
+ FILE *f;
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), sizeof (uint64_t),
@@ -3262,6 +3334,25 @@ zdb_derive_key(dsl_dir_t *dd, uint8_t *key_out)
break;
+ case ZFS_KEYFORMAT_RAW:
+ if ((f = fopen(key_material, "r")) == NULL)
+ return (B_FALSE);
+
+ if (fread(key_out, 1, WRAPPING_KEY_LEN, f) !=
+ WRAPPING_KEY_LEN) {
+ (void) fclose(f);
+ return (B_FALSE);
+ }
+
+ /* Check the key length */
+ if (fgetc(f) != EOF) {
+ (void) fclose(f);
+ return (B_FALSE);
+ }
+
+ (void) fclose(f);
+ break;
+
default:
fatal("no support for key format %u\n",
(unsigned int) keyformat);
@@ -3347,7 +3438,7 @@ open_objset(const char *path, const void *tag, objset_t **osp)
uint64_t sa_attrs = 0;
uint64_t version = 0;
- VERIFY3P(sa_os, ==, NULL);
+ VERIFY0P(sa_os);
/*
* We can't own an objset if it's redacted. Therefore, we do this
@@ -3520,8 +3611,8 @@ dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
- VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
- 8, 1, &fuid_obj) == 0);
+ VERIFY0(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
+ 8, 1, &fuid_obj));
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
@@ -7016,7 +7107,7 @@ deleted_livelists_count_blocks(spa_t *spa, zdb_cb_t *zbc)
static void
dump_livelist_cb(dsl_deadlist_t *ll, void *arg)
{
- ASSERT3P(arg, ==, NULL);
+ ASSERT0P(arg);
global_feature_count[SPA_FEATURE_LIVELIST]++;
dump_blkptr_list(ll, "Deleted Livelist");
dsl_deadlist_iterate(ll, sublivelist_verify_lightweight, NULL);
@@ -7913,7 +8004,7 @@ verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
for (uint64_t c = ckpoint_rvd->vdev_children;
c < current_rvd->vdev_children; c++) {
vdev_t *current_vd = current_rvd->vdev_child[c];
- VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
+ VERIFY0P(current_vd->vdev_checkpoint_sm);
}
}
@@ -9334,6 +9425,8 @@ main(int argc, char **argv)
{"all-reconstruction", no_argument, NULL, 'Y'},
{"livelist", no_argument, NULL, 'y'},
{"zstd-headers", no_argument, NULL, 'Z'},
+ {"allocated-map", no_argument, NULL,
+ ALLOCATED_OPT},
{0, 0, 0, 0}
};
@@ -9364,6 +9457,7 @@ main(int argc, char **argv)
case 'u':
case 'y':
case 'Z':
+ case ALLOCATED_OPT:
dump_opt[c]++;
dump_all = 0;
break;
@@ -9634,7 +9728,7 @@ main(int argc, char **argv)
} else if (objset_str && !zdb_numeric(objset_str + 1) &&
dump_opt['N']) {
printf("Supply a numeric objset ID with -N\n");
- error = 1;
+ error = 2;
goto fini;
}
} else {
@@ -9743,7 +9837,7 @@ main(int argc, char **argv)
if (error == 0) {
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
ASSERT(checkpoint_pool != NULL);
- ASSERT(checkpoint_target == NULL);
+ ASSERT0P(checkpoint_target);
error = spa_open(checkpoint_pool, &spa, FTAG);
if (error != 0) {
@@ -9936,5 +10030,8 @@ fini:
if (kernel_init_done)
kernel_fini();
+ if (corruption_found && error == 0)
+ error = 3;
+
return (error);
}
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.h b/sys/contrib/openzfs/cmd/zdb/zdb.h
index 6b6c9169816b..48b561eb202c 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.h
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.h
@@ -29,6 +29,6 @@
#define _ZDB_H
void dump_intent_log(zilog_t *);
-extern uint8_t dump_opt[256];
+extern uint8_t dump_opt[512];
#endif /* _ZDB_H */
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb_il.c b/sys/contrib/openzfs/cmd/zdb/zdb_il.c
index 62e290cd122c..3d91fb28a4c7 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb_il.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb_il.c
@@ -48,8 +48,6 @@
#include "zdb.h"
-extern uint8_t dump_opt[256];
-
static char tab_prefix[4] = "\t\t\t";
static void
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am b/sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am
index 093a04c4636a..c0b161ecf248 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/Makefile.am
@@ -9,18 +9,18 @@ dist_zedexec_SCRIPTS = \
%D%/all-debug.sh \
%D%/all-syslog.sh \
%D%/data-notify.sh \
- %D%/deadman-slot_off.sh \
+ %D%/deadman-sync-slot_off.sh \
%D%/generic-notify.sh \
- %D%/pool_import-led.sh \
+ %D%/pool_import-sync-led.sh \
%D%/resilver_finish-notify.sh \
%D%/resilver_finish-start-scrub.sh \
%D%/scrub_finish-notify.sh \
- %D%/statechange-led.sh \
+ %D%/statechange-sync-led.sh \
%D%/statechange-notify.sh \
- %D%/statechange-slot_off.sh \
+ %D%/statechange-sync-slot_off.sh \
%D%/trim_finish-notify.sh \
- %D%/vdev_attach-led.sh \
- %D%/vdev_clear-led.sh
+ %D%/vdev_attach-sync-led.sh \
+ %D%/vdev_clear-sync-led.sh
nodist_zedexec_SCRIPTS = \
%D%/history_event-zfs-list-cacher.sh
@@ -30,17 +30,17 @@ SUBSTFILES += $(nodist_zedexec_SCRIPTS)
zedconfdefaults = \
all-syslog.sh \
data-notify.sh \
- deadman-slot_off.sh \
+ deadman-sync-slot_off.sh \
history_event-zfs-list-cacher.sh \
- pool_import-led.sh \
+ pool_import-sync-led.sh \
resilver_finish-notify.sh \
resilver_finish-start-scrub.sh \
scrub_finish-notify.sh \
- statechange-led.sh \
+ statechange-sync-led.sh \
statechange-notify.sh \
- statechange-slot_off.sh \
- vdev_attach-led.sh \
- vdev_clear-led.sh
+ statechange-sync-slot_off.sh \
+ vdev_attach-sync-led.sh \
+ vdev_clear-sync-led.sh
dist_noinst_DATA += %D%/README
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/deadman-slot_off.sh b/sys/contrib/openzfs/cmd/zed/zed.d/deadman-sync-slot_off.sh
index 7b339b3add01..7b339b3add01 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/deadman-slot_off.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/deadman-sync-slot_off.sh
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/pool_import-led.sh b/sys/contrib/openzfs/cmd/zed/zed.d/pool_import-led.sh
deleted file mode 120000
index 7d7404398a4a..000000000000
--- a/sys/contrib/openzfs/cmd/zed/zed.d/pool_import-led.sh
+++ /dev/null
@@ -1 +0,0 @@
-statechange-led.sh \ No newline at end of file
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/pool_import-sync-led.sh b/sys/contrib/openzfs/cmd/zed/zed.d/pool_import-sync-led.sh
new file mode 120000
index 000000000000..8b9c10c11ebb
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/pool_import-sync-led.sh
@@ -0,0 +1 @@
+statechange-sync-led.sh \ No newline at end of file
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-led.sh b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-sync-led.sh
index 40cb61f17307..40cb61f17307 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-led.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-sync-led.sh
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-sync-slot_off.sh
index 06acce93b8aa..06acce93b8aa 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-slot_off.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-sync-slot_off.sh
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/vdev_attach-led.sh b/sys/contrib/openzfs/cmd/zed/zed.d/vdev_attach-led.sh
deleted file mode 120000
index 7d7404398a4a..000000000000
--- a/sys/contrib/openzfs/cmd/zed/zed.d/vdev_attach-led.sh
+++ /dev/null
@@ -1 +0,0 @@
-statechange-led.sh \ No newline at end of file
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/vdev_attach-sync-led.sh b/sys/contrib/openzfs/cmd/zed/zed.d/vdev_attach-sync-led.sh
new file mode 120000
index 000000000000..8b9c10c11ebb
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/vdev_attach-sync-led.sh
@@ -0,0 +1 @@
+statechange-sync-led.sh \ No newline at end of file
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/vdev_clear-led.sh b/sys/contrib/openzfs/cmd/zed/zed.d/vdev_clear-led.sh
deleted file mode 120000
index 7d7404398a4a..000000000000
--- a/sys/contrib/openzfs/cmd/zed/zed.d/vdev_clear-led.sh
+++ /dev/null
@@ -1 +0,0 @@
-statechange-led.sh \ No newline at end of file
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/vdev_clear-sync-led.sh b/sys/contrib/openzfs/cmd/zed/zed.d/vdev_clear-sync-led.sh
new file mode 120000
index 000000000000..8b9c10c11ebb
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/vdev_clear-sync-led.sh
@@ -0,0 +1 @@
+statechange-sync-led.sh \ No newline at end of file
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh b/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
index 6e00f153be1c..78d8f658ddd8 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
@@ -441,8 +441,9 @@ zed_notify_slack_webhook()
"${pathname}")"
# Construct the JSON message for posting.
+ # shellcheck disable=SC2016
#
- msg_json="$(printf '{"text": "*%s*\\n%s"}' "${subject}" "${msg_body}" )"
+ msg_json="$(printf '{"text": "*%s*\\n```%s```"}' "${subject}" "${msg_body}" )"
# Send the POST request and check for errors.
#
diff --git a/sys/contrib/openzfs/cmd/zed/zed_exec.c b/sys/contrib/openzfs/cmd/zed/zed_exec.c
index 036081decd64..a14af4f20a85 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_exec.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_exec.c
@@ -196,37 +196,29 @@ _nop(int sig)
(void) sig;
}
-static void *
-_reap_children(void *arg)
+static void
+wait_for_children(boolean_t do_pause, boolean_t wait)
{
- (void) arg;
- struct launched_process_node node, *pnode;
pid_t pid;
- int status;
struct rusage usage;
- struct sigaction sa = {};
-
- (void) sigfillset(&sa.sa_mask);
- (void) sigdelset(&sa.sa_mask, SIGCHLD);
- (void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
-
- (void) sigemptyset(&sa.sa_mask);
- sa.sa_handler = _nop;
- sa.sa_flags = SA_NOCLDSTOP;
- (void) sigaction(SIGCHLD, &sa, NULL);
+ int status;
+ struct launched_process_node node, *pnode;
for (_reap_children_stop = B_FALSE; !_reap_children_stop; ) {
(void) pthread_mutex_lock(&_launched_processes_lock);
- pid = wait4(0, &status, WNOHANG, &usage);
-
+ pid = wait4(0, &status, wait ? 0 : WNOHANG, &usage);
if (pid == 0 || pid == (pid_t)-1) {
(void) pthread_mutex_unlock(&_launched_processes_lock);
- if (pid == 0 || errno == ECHILD)
- pause();
- else if (errno != EINTR)
+ if ((pid == 0) || (errno == ECHILD)) {
+ if (do_pause)
+ pause();
+ } else if (errno != EINTR)
zed_log_msg(LOG_WARNING,
"Failed to wait for children: %s",
strerror(errno));
+ if (!do_pause)
+ return;
+
} else {
memset(&node, 0, sizeof (node));
node.pid = pid;
@@ -278,6 +270,25 @@ _reap_children(void *arg)
}
}
+}
+
+static void *
+_reap_children(void *arg)
+{
+ (void) arg;
+ struct sigaction sa = {};
+
+ (void) sigfillset(&sa.sa_mask);
+ (void) sigdelset(&sa.sa_mask, SIGCHLD);
+ (void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
+
+ (void) sigemptyset(&sa.sa_mask);
+ sa.sa_handler = _nop;
+ sa.sa_flags = SA_NOCLDSTOP;
+ (void) sigaction(SIGCHLD, &sa, NULL);
+
+ wait_for_children(B_TRUE, B_FALSE);
+
return (NULL);
}
@@ -307,6 +318,45 @@ zed_exec_fini(void)
}
/*
+ * Check if the zedlet name indicates if it is a synchronous zedlet
+ *
+ * Synchronous zedlets have a "-sync-" immediately following the event name in
+ * their zedlet filename, like:
+ *
+ * EVENT_NAME-sync-ZEDLETNAME.sh
+ *
+ * For example, if you wanted a synchronous statechange script:
+ *
+ * statechange-sync-myzedlet.sh
+ *
+ * Synchronous zedlets are guaranteed to be the only zedlet running. No other
+ * zedlets may run in parallel with a synchronous zedlet. A synchronous
+ * zedlet will wait for all previously spawned zedlets to finish before running.
+ * Users should be careful to only use synchronous zedlets when needed, since
+ * they decrease parallelism.
+ */
+static boolean_t
+zedlet_is_sync(const char *zedlet, const char *event)
+{
+ const char *sync_str = "-sync-";
+ size_t sync_str_len;
+ size_t zedlet_len;
+ size_t event_len;
+
+ sync_str_len = strlen(sync_str);
+ zedlet_len = strlen(zedlet);
+ event_len = strlen(event);
+
+ if (event_len + sync_str_len >= zedlet_len)
+ return (B_FALSE);
+
+ if (strncmp(&zedlet[event_len], sync_str, sync_str_len) == 0)
+ return (B_TRUE);
+
+ return (B_FALSE);
+}
+
+/*
* Process the event [eid] by synchronously invoking all zedlets with a
* matching class prefix.
*
@@ -368,9 +418,28 @@ zed_exec_process(uint64_t eid, const char *class, const char *subclass,
z = zed_strings_next(zcp->zedlets)) {
for (csp = class_strings; *csp; csp++) {
n = strlen(*csp);
- if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n]))
+ if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n])) {
+ boolean_t is_sync = zedlet_is_sync(z, *csp);
+
+ if (is_sync) {
+ /*
+ * Wait for previous zedlets to
+ * finish
+ */
+ wait_for_children(B_FALSE, B_TRUE);
+ }
+
_zed_exec_fork_child(eid, zcp->zedlet_dir,
z, e, zcp->zevent_fd, zcp->do_foreground);
+
+ if (is_sync) {
+ /*
+ * Wait for sync zedlet we just launched
+ * to finish.
+ */
+ wait_for_children(B_FALSE, B_TRUE);
+ }
+ }
}
}
free(e);
diff --git a/sys/contrib/openzfs/cmd/zfs/zfs_main.c b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
index 363bb6da74ec..ccdd5ffef8e6 100644
--- a/sys/contrib/openzfs/cmd/zfs/zfs_main.c
+++ b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
@@ -914,7 +914,11 @@ zfs_do_clone(int argc, char **argv)
log_history = B_FALSE;
}
- ret = zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
+ /*
+ * Dataset cloned successfully, mount/share failures are
+ * non-fatal.
+ */
+ (void) zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
}
zfs_close(zhp);
@@ -923,26 +927,22 @@ zfs_do_clone(int argc, char **argv)
return (!!ret);
usage:
- ASSERT3P(zhp, ==, NULL);
+ ASSERT0P(zhp);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
- * Return a default volblocksize for the pool which always uses more than
- * half of the data sectors. This primarily applies to dRAID which always
- * writes full stripe widths.
+ * Calculate the minimum allocation size based on the top-level vdevs.
*/
static uint64_t
-default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
+calculate_volblocksize(nvlist_t *config)
{
- uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
+ uint64_t asize = SPA_MINBLOCKSIZE;
nvlist_t *tree, **vdevs;
uint_t nvdevs;
- nvlist_t *config = zpool_get_config(zhp, NULL);
-
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
@@ -973,6 +973,24 @@ default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
}
}
+ return (asize);
+}
+
+/*
+ * Return a default volblocksize for the pool which always uses more than
+ * half of the data sectors. This primarily applies to dRAID which always
+ * writes full stripe widths.
+ */
+static uint64_t
+default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
+{
+ uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
+
+ nvlist_t *config = zpool_get_config(zhp, NULL);
+
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_MAX_ALLOC, &asize) != 0)
+ asize = calculate_volblocksize(config);
+
/*
* Calculate the target volblocksize such that more than half
* of the asize is used. The following table is for 4k sectors.
@@ -1319,7 +1337,9 @@ zfs_do_create(int argc, char **argv)
goto error;
}
- ret = zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
+ /* Dataset created successfully, mount/share failures are non-fatal */
+ ret = 0;
+ (void) zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
error:
nvlist_free(props);
return (ret);
@@ -5303,6 +5323,7 @@ zfs_do_receive(int argc, char **argv)
#define ZFS_DELEG_PERM_MOUNT "mount"
#define ZFS_DELEG_PERM_SHARE "share"
#define ZFS_DELEG_PERM_SEND "send"
+#define ZFS_DELEG_PERM_SEND_RAW "send:raw"
#define ZFS_DELEG_PERM_RECEIVE "receive"
#define ZFS_DELEG_PERM_RECEIVE_APPEND "receive:append"
#define ZFS_DELEG_PERM_ALLOW "allow"
@@ -5345,6 +5366,7 @@ static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = {
{ ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME },
{ ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK },
{ ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND },
+ { ZFS_DELEG_PERM_SEND_RAW, ZFS_DELEG_NOTE_SEND_RAW },
{ ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE },
{ ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT },
{ ZFS_DELEG_PERM_BOOKMARK, ZFS_DELEG_NOTE_BOOKMARK },
@@ -5929,6 +5951,10 @@ deleg_perm_comment(zfs_deleg_note_t note)
case ZFS_DELEG_NOTE_SEND:
str = gettext("");
break;
+ case ZFS_DELEG_NOTE_SEND_RAW:
+ str = gettext("Allow sending ONLY encrypted (raw) replication"
+ "\n\t\t\t\tstreams");
+ break;
case ZFS_DELEG_NOTE_SHARE:
str = gettext("Allows sharing file systems over NFS or SMB"
"\n\t\t\t\tprotocols");
@@ -6858,17 +6884,17 @@ print_holds(boolean_t scripted, int nwidth, int tagwidth, nvlist_t *nvl,
if (scripted) {
if (parsable) {
- (void) printf("%s\t%s\t%ld\n", zname,
- tagname, (unsigned long)time);
+ (void) printf("%s\t%s\t%lld\n", zname,
+ tagname, (long long)time);
} else {
(void) printf("%s\t%s\t%s\n", zname,
tagname, tsbuf);
}
} else {
if (parsable) {
- (void) printf("%-*s %-*s %ld\n",
+ (void) printf("%-*s %-*s %lld\n",
nwidth, zname, tagwidth,
- tagname, (unsigned long)time);
+ tagname, (long long)time);
} else {
(void) printf("%-*s %-*s %s\n",
nwidth, zname, tagwidth,
diff --git a/sys/contrib/openzfs/cmd/zhack.c b/sys/contrib/openzfs/cmd/zhack.c
index 8244bc83fa0d..8ffbf91ffb30 100644
--- a/sys/contrib/openzfs/cmd/zhack.c
+++ b/sys/contrib/openzfs/cmd/zhack.c
@@ -54,6 +54,7 @@
#include <sys/dmu_tx.h>
#include <zfeature_common.h>
#include <libzutil.h>
+#include <sys/metaslab_impl.h>
static importargs_t g_importargs;
static char *g_pool;
@@ -69,7 +70,8 @@ static __attribute__((noreturn)) void
usage(void)
{
(void) fprintf(stderr,
- "Usage: zhack [-c cachefile] [-d dir] <subcommand> <args> ...\n"
+ "Usage: zhack [-o tunable] [-c cachefile] [-d dir] <subcommand> "
+ "<args> ...\n"
"where <subcommand> <args> is one of the following:\n"
"\n");
@@ -93,7 +95,10 @@ usage(void)
" -c repair corrupted label checksums\n"
" -u restore the label on a detached device\n"
"\n"
- " <device> : path to vdev\n");
+ " <device> : path to vdev\n"
+ "\n"
+ " metaslab leak <pool>\n"
+ " apply allocation map from zdb to specified pool\n");
exit(1);
}
@@ -162,9 +167,9 @@ zhack_import(char *target, boolean_t readonly)
props = NULL;
if (readonly) {
- VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
- VERIFY(nvlist_add_uint64(props,
- zpool_prop_to_name(ZPOOL_PROP_READONLY), 1) == 0);
+ VERIFY0(nvlist_alloc(&props, NV_UNIQUE_NAME, 0));
+ VERIFY0(nvlist_add_uint64(props,
+ zpool_prop_to_name(ZPOOL_PROP_READONLY), 1));
}
zfeature_checks_disable = B_TRUE;
@@ -218,8 +223,8 @@ dump_obj(objset_t *os, uint64_t obj, const char *name)
} else {
ASSERT(za->za_integer_length == 1);
char val[1024];
- VERIFY(zap_lookup(os, obj, za->za_name,
- 1, sizeof (val), val) == 0);
+ VERIFY0(zap_lookup(os, obj, za->za_name,
+ 1, sizeof (val), val));
(void) printf("\t%s = %s\n", za->za_name, val);
}
}
@@ -363,10 +368,12 @@ feature_incr_sync(void *arg, dmu_tx_t *tx)
zfeature_info_t *feature = arg;
uint64_t refcount;
+ mutex_enter(&spa->spa_feat_stats_lock);
VERIFY0(feature_get_refcount_from_disk(spa, feature, &refcount));
feature_sync(spa, feature, refcount + 1, tx);
spa_history_log_internal(spa, "zhack feature incr", tx,
"name=%s", feature->fi_guid);
+ mutex_exit(&spa->spa_feat_stats_lock);
}
static void
@@ -376,10 +383,12 @@ feature_decr_sync(void *arg, dmu_tx_t *tx)
zfeature_info_t *feature = arg;
uint64_t refcount;
+ mutex_enter(&spa->spa_feat_stats_lock);
VERIFY0(feature_get_refcount_from_disk(spa, feature, &refcount));
feature_sync(spa, feature, refcount - 1, tx);
spa_history_log_internal(spa, "zhack feature decr", tx,
"name=%s", feature->fi_guid);
+ mutex_exit(&spa->spa_feat_stats_lock);
}
static void
@@ -496,6 +505,186 @@ zhack_do_feature(int argc, char **argv)
return (0);
}
+static boolean_t
+strstarts(const char *a, const char *b)
+{
+ return (strncmp(a, b, strlen(b)) == 0);
+}
+
+static void
+metaslab_force_alloc(metaslab_t *msp, uint64_t start, uint64_t size,
+ dmu_tx_t *tx)
+{
+ ASSERT(msp->ms_disabled);
+ ASSERT(MUTEX_HELD(&msp->ms_lock));
+ uint64_t txg = dmu_tx_get_txg(tx);
+
+ uint64_t off = start;
+ while (off < start + size) {
+ uint64_t ostart, osize;
+ boolean_t found = zfs_range_tree_find_in(msp->ms_allocatable,
+ off, start + size - off, &ostart, &osize);
+ if (!found)
+ break;
+ zfs_range_tree_remove(msp->ms_allocatable, ostart, osize);
+
+ if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
+ vdev_dirty(msp->ms_group->mg_vd, VDD_METASLAB, msp,
+ txg);
+
+ zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK], ostart,
+ osize);
+ msp->ms_allocating_total += osize;
+ off = ostart + osize;
+ }
+}
+
+static void
+zhack_do_metaslab_leak(int argc, char **argv)
+{
+ int c;
+ char *target;
+ spa_t *spa;
+
+ optind = 1;
+ boolean_t force = B_FALSE;
+ while ((c = getopt(argc, argv, "f")) != -1) {
+ switch (c) {
+ case 'f':
+ force = B_TRUE;
+ break;
+ default:
+ usage();
+ break;
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ if (argc < 1) {
+ (void) fprintf(stderr, "error: missing pool name\n");
+ usage();
+ }
+ target = argv[0];
+
+ zhack_spa_open(target, B_FALSE, FTAG, &spa);
+ spa_config_enter(spa, SCL_VDEV | SCL_ALLOC, FTAG, RW_READER);
+
+ char *line = NULL;
+ size_t cap = 0;
+
+ vdev_t *vd = NULL;
+ metaslab_t *prev = NULL;
+ dmu_tx_t *tx = NULL;
+ while (getline(&line, &cap, stdin) > 0) {
+ if (strstarts(line, "\tvdev ")) {
+ uint64_t vdev_id, ms_shift;
+ if (sscanf(line,
+ "\tvdev %10"PRIu64"\t%*s metaslab shift %4"PRIu64,
+ &vdev_id, &ms_shift) == 1) {
+ VERIFY3U(sscanf(line, "\tvdev %"PRIu64
+ "\t metaslab shift %4"PRIu64,
+ &vdev_id, &ms_shift), ==, 2);
+ }
+ vd = vdev_lookup_top(spa, vdev_id);
+ if (vd == NULL) {
+ fprintf(stderr, "error: no such vdev with "
+ "id %"PRIu64"\n", vdev_id);
+ break;
+ }
+ if (tx) {
+ dmu_tx_commit(tx);
+ mutex_exit(&prev->ms_lock);
+ metaslab_enable(prev, B_FALSE, B_FALSE);
+ tx = NULL;
+ prev = NULL;
+ }
+ if (vd->vdev_ms_shift != ms_shift) {
+ fprintf(stderr, "error: ms_shift mismatch: %"
+ PRIu64" != %"PRIu64"\n", vd->vdev_ms_shift,
+ ms_shift);
+ break;
+ }
+ } else if (strstarts(line, "\tmetaslabs ")) {
+ uint64_t ms_count;
+ VERIFY3U(sscanf(line, "\tmetaslabs %"PRIu64, &ms_count),
+ ==, 1);
+ ASSERT(vd);
+ if (!force && vd->vdev_ms_count != ms_count) {
+ fprintf(stderr, "error: ms_count mismatch: %"
+ PRIu64" != %"PRIu64"\n", vd->vdev_ms_count,
+ ms_count);
+ break;
+ }
+ } else if (strstarts(line, "ALLOC:")) {
+ uint64_t start, size;
+ VERIFY3U(sscanf(line, "ALLOC: %"PRIu64" %"PRIu64"\n",
+ &start, &size), ==, 2);
+
+ ASSERT(vd);
+ metaslab_t *cur =
+ vd->vdev_ms[start >> vd->vdev_ms_shift];
+ if (prev != cur) {
+ if (prev) {
+ dmu_tx_commit(tx);
+ mutex_exit(&prev->ms_lock);
+ metaslab_enable(prev, B_FALSE, B_FALSE);
+ }
+ ASSERT(cur);
+ metaslab_disable(cur);
+ mutex_enter(&cur->ms_lock);
+ metaslab_load(cur);
+ prev = cur;
+ tx = dmu_tx_create_dd(
+ spa_get_dsl(vd->vdev_spa)->dp_root_dir);
+ dmu_tx_assign(tx, DMU_TX_WAIT);
+ }
+
+ metaslab_force_alloc(cur, start, size, tx);
+ } else {
+ continue;
+ }
+ }
+ if (tx) {
+ dmu_tx_commit(tx);
+ mutex_exit(&prev->ms_lock);
+ metaslab_enable(prev, B_FALSE, B_FALSE);
+ tx = NULL;
+ prev = NULL;
+ }
+ if (line)
+ free(line);
+
+ spa_config_exit(spa, SCL_VDEV | SCL_ALLOC, FTAG);
+ spa_close(spa, FTAG);
+}
+
+static int
+zhack_do_metaslab(int argc, char **argv)
+{
+ char *subcommand;
+
+ argc--;
+ argv++;
+ if (argc == 0) {
+ (void) fprintf(stderr,
+ "error: no metaslab operation specified\n");
+ usage();
+ }
+
+ subcommand = argv[0];
+ if (strcmp(subcommand, "leak") == 0) {
+ zhack_do_metaslab_leak(argc, argv);
+ } else {
+ (void) fprintf(stderr, "error: unknown subcommand: %s\n",
+ subcommand);
+ usage();
+ }
+
+ return (0);
+}
+
#define ASHIFT_UBERBLOCK_SHIFT(ashift) \
MIN(MAX(ashift, UBERBLOCK_SHIFT), \
MAX_UBERBLOCK_SHIFT)
@@ -525,6 +714,23 @@ zhack_repair_read_label(const int fd, vdev_label_t *vl,
return (0);
}
+static int
+zhack_repair_get_byteswap(const zio_eck_t *vdev_eck, const int l, int *byteswap)
+{
+ if (vdev_eck->zec_magic == ZEC_MAGIC) {
+ *byteswap = B_FALSE;
+ } else if (vdev_eck->zec_magic == BSWAP_64((uint64_t)ZEC_MAGIC)) {
+ *byteswap = B_TRUE;
+ } else {
+ (void) fprintf(stderr, "error: label %d: "
+ "Expected the nvlist checksum magic number but instead got "
+ "0x%" PRIx64 "\n",
+ l, vdev_eck->zec_magic);
+ return (1);
+ }
+ return (0);
+}
+
static void
zhack_repair_calc_cksum(const int byteswap, void *data, const uint64_t offset,
const uint64_t abdsize, zio_eck_t *eck, zio_cksum_t *cksum)
@@ -551,33 +757,10 @@ zhack_repair_calc_cksum(const int byteswap, void *data, const uint64_t offset,
}
static int
-zhack_repair_check_label(uberblock_t *ub, const int l, const char **cfg_keys,
- const size_t cfg_keys_len, nvlist_t *cfg, nvlist_t *vdev_tree_cfg,
- uint64_t *ashift)
+zhack_repair_get_ashift(nvlist_t *cfg, const int l, uint64_t *ashift)
{
int err;
-
- if (ub->ub_txg != 0) {
- (void) fprintf(stderr,
- "error: label %d: UB TXG of 0 expected, but got %"
- PRIu64 "\n",
- l, ub->ub_txg);
- (void) fprintf(stderr, "It would appear the device was not "
- "properly removed.\n");
- return (1);
- }
-
- for (int i = 0; i < cfg_keys_len; i++) {
- uint64_t val;
- err = nvlist_lookup_uint64(cfg, cfg_keys[i], &val);
- if (err) {
- (void) fprintf(stderr,
- "error: label %d, %d: "
- "cannot find nvlist key %s\n",
- l, i, cfg_keys[i]);
- return (err);
- }
- }
+ nvlist_t *vdev_tree_cfg;
err = nvlist_lookup_nvlist(cfg,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree_cfg);
@@ -601,7 +784,7 @@ zhack_repair_check_label(uberblock_t *ub, const int l, const char **cfg_keys,
(void) fprintf(stderr,
"error: label %d: nvlist key %s is zero\n",
l, ZPOOL_CONFIG_ASHIFT);
- return (err);
+ return (1);
}
return (0);
@@ -616,30 +799,35 @@ zhack_repair_undetach(uberblock_t *ub, nvlist_t *cfg, const int l)
*/
if (BP_GET_LOGICAL_BIRTH(&ub->ub_rootbp) != 0) {
const uint64_t txg = BP_GET_LOGICAL_BIRTH(&ub->ub_rootbp);
+ int err;
+
ub->ub_txg = txg;
- if (nvlist_remove_all(cfg, ZPOOL_CONFIG_CREATE_TXG) != 0) {
+ err = nvlist_remove_all(cfg, ZPOOL_CONFIG_CREATE_TXG);
+ if (err) {
(void) fprintf(stderr,
"error: label %d: "
"Failed to remove pool creation TXG\n",
l);
- return (1);
+ return (err);
}
- if (nvlist_remove_all(cfg, ZPOOL_CONFIG_POOL_TXG) != 0) {
+ err = nvlist_remove_all(cfg, ZPOOL_CONFIG_POOL_TXG);
+ if (err) {
(void) fprintf(stderr,
"error: label %d: Failed to remove pool TXG to "
"be replaced.\n",
l);
- return (1);
+ return (err);
}
- if (nvlist_add_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, txg) != 0) {
+ err = nvlist_add_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, txg);
+ if (err) {
(void) fprintf(stderr,
"error: label %d: "
"Failed to add pool TXG of %" PRIu64 "\n",
l, txg);
- return (1);
+ return (err);
}
}
@@ -733,6 +921,7 @@ zhack_repair_test_cksum(const int byteswap, void *vdev_data,
BSWAP_64(ZEC_MAGIC) : ZEC_MAGIC;
const uint64_t actual_magic = vdev_eck->zec_magic;
int err = 0;
+
if (actual_magic != expected_magic) {
(void) fprintf(stderr, "error: label %d: "
"Expected "
@@ -754,6 +943,36 @@ zhack_repair_test_cksum(const int byteswap, void *vdev_data,
return (err);
}
+static int
+zhack_repair_unpack_cfg(vdev_label_t *vl, const int l, nvlist_t **cfg)
+{
+ const char *cfg_keys[] = { ZPOOL_CONFIG_VERSION,
+ ZPOOL_CONFIG_POOL_STATE, ZPOOL_CONFIG_GUID };
+ int err;
+
+ err = nvlist_unpack(vl->vl_vdev_phys.vp_nvlist,
+ VDEV_PHYS_SIZE - sizeof (zio_eck_t), cfg, 0);
+ if (err) {
+ (void) fprintf(stderr,
+ "error: cannot unpack nvlist label %d\n", l);
+ return (err);
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(cfg_keys); i++) {
+ uint64_t val;
+ err = nvlist_lookup_uint64(*cfg, cfg_keys[i], &val);
+ if (err) {
+ (void) fprintf(stderr,
+ "error: label %d, %d: "
+ "cannot find nvlist key %s\n",
+ l, i, cfg_keys[i]);
+ return (err);
+ }
+ }
+
+ return (0);
+}
+
static void
zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
vdev_label_t *vl, const uint64_t label_offset, const int l,
@@ -767,10 +986,7 @@ zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
(zio_eck_t *)((char *)(vdev_data) + VDEV_PHYS_SIZE) - 1;
const uint64_t vdev_phys_offset =
label_offset + offsetof(vdev_label_t, vl_vdev_phys);
- const char *cfg_keys[] = { ZPOOL_CONFIG_VERSION,
- ZPOOL_CONFIG_POOL_STATE, ZPOOL_CONFIG_GUID };
nvlist_t *cfg;
- nvlist_t *vdev_tree_cfg = NULL;
uint64_t ashift;
int byteswap;
@@ -778,18 +994,9 @@ zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
if (err)
return;
- if (vdev_eck->zec_magic == 0) {
- (void) fprintf(stderr, "error: label %d: "
- "Expected the nvlist checksum magic number to not be zero"
- "\n",
- l);
- (void) fprintf(stderr, "There should already be a checksum "
- "for the label.\n");
+ err = zhack_repair_get_byteswap(vdev_eck, l, &byteswap);
+ if (err)
return;
- }
-
- byteswap =
- (vdev_eck->zec_magic == BSWAP_64((uint64_t)ZEC_MAGIC));
if (byteswap) {
byteswap_uint64_array(&vdev_eck->zec_cksum,
@@ -805,16 +1012,7 @@ zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
return;
}
- err = nvlist_unpack(vl->vl_vdev_phys.vp_nvlist,
- VDEV_PHYS_SIZE - sizeof (zio_eck_t), &cfg, 0);
- if (err) {
- (void) fprintf(stderr,
- "error: cannot unpack nvlist label %d\n", l);
- return;
- }
-
- err = zhack_repair_check_label(ub,
- l, cfg_keys, ARRAY_SIZE(cfg_keys), cfg, vdev_tree_cfg, &ashift);
+ err = zhack_repair_unpack_cfg(vl, l, &cfg);
if (err)
return;
@@ -822,6 +1020,19 @@ zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
char *buf;
size_t buflen;
+ if (ub->ub_txg != 0) {
+ (void) fprintf(stderr,
+ "error: label %d: UB TXG of 0 expected, but got %"
+ PRIu64 "\n", l, ub->ub_txg);
+ (void) fprintf(stderr, "It would appear the device was "
+ "not properly detached.\n");
+ return;
+ }
+
+ err = zhack_repair_get_ashift(cfg, l, &ashift);
+ if (err)
+ return;
+
err = zhack_repair_undetach(ub, cfg, l);
if (err)
return;
@@ -981,7 +1192,7 @@ main(int argc, char **argv)
dprintf_setup(&argc, argv);
zfs_prop_init();
- while ((c = getopt(argc, argv, "+c:d:")) != -1) {
+ while ((c = getopt(argc, argv, "+c:d:o:")) != -1) {
switch (c) {
case 'c':
g_importargs.cachefile = optarg;
@@ -990,6 +1201,10 @@ main(int argc, char **argv)
assert(g_importargs.paths < MAX_NUM_PATHS);
g_importargs.path[g_importargs.paths++] = optarg;
break;
+ case 'o':
+ if (handle_tunable_option(optarg, B_FALSE) != 0)
+ exit(1);
+ break;
default:
usage();
break;
@@ -1011,6 +1226,8 @@ main(int argc, char **argv)
rv = zhack_do_feature(argc, argv);
} else if (strcmp(subcommand, "label") == 0) {
return (zhack_do_label(argc, argv));
+ } else if (strcmp(subcommand, "metaslab") == 0) {
+ rv = zhack_do_metaslab(argc, argv);
} else {
(void) fprintf(stderr, "error: unknown subcommand: %s\n",
subcommand);
diff --git a/sys/contrib/openzfs/cmd/zilstat.in b/sys/contrib/openzfs/cmd/zilstat.in
index 4140398bf4a3..d01db9b0914b 100755
--- a/sys/contrib/openzfs/cmd/zilstat.in
+++ b/sys/contrib/openzfs/cmd/zilstat.in
@@ -47,6 +47,7 @@ cols = {
"cec": [5, 1000, "zil_commit_error_count"],
"csc": [5, 1000, "zil_commit_stall_count"],
"cSc": [5, 1000, "zil_commit_suspend_count"],
+ "cCc": [5, 1000, "zil_commit_crash_count"],
"ic": [5, 1000, "zil_itx_count"],
"iic": [5, 1000, "zil_itx_indirect_count"],
"iib": [5, 1024, "zil_itx_indirect_bytes"],
diff --git a/sys/contrib/openzfs/cmd/zinject/zinject.c b/sys/contrib/openzfs/cmd/zinject/zinject.c
index 113797c878b9..c2f646f2567d 100644
--- a/sys/contrib/openzfs/cmd/zinject/zinject.c
+++ b/sys/contrib/openzfs/cmd/zinject/zinject.c
@@ -107,6 +107,8 @@
* zinject
* zinject <-a | -u pool>
* zinject -c <id|all>
+ * zinject -E <delay> [-a] [-m] [-f freq] [-l level] [-r range]
+ * [-T iotype] [-t type object | -b bookmark pool]
* zinject [-q] <-t type> [-f freq] [-u] [-a] [-m] [-e errno] [-l level]
* [-r range] <object>
* zinject [-f freq] [-a] [-m] [-u] -b objset:object:level:start:end pool
@@ -132,14 +134,18 @@
* The '-f' flag controls the frequency of errors injected, expressed as a
* real number percentage between 0.0001 and 100. The default is 100.
*
- * The this form is responsible for actually injecting the handler into the
+ * The <object> form is responsible for actually injecting the handler into the
* framework. It takes the arguments described above, translates them to the
* internal tuple using libzpool, and then issues an ioctl() to register the
* handler.
*
- * The final form can target a specific bookmark, regardless of whether a
+ * The '-b' option can target a specific bookmark, regardless of whether a
* human-readable interface has been designed. It allows developers to specify
* a particular block by number.
+ *
+ * The '-E' option injects pipeline ready stage delays for the given object or
+ * bookmark. The delay is specified in milliseconds, and it supports I/O type
+ * and range filters.
*/
#include <errno.h>
@@ -346,6 +352,13 @@ usage(void)
"\t\tsuch that the operation takes a minimum of supplied seconds\n"
"\t\tto complete.\n"
"\n"
+ "\tzinject -E <delay> [-a] [-m] [-f freq] [-l level] [-r range]\n"
+ "\t\t[-T iotype] [-t type object | -b bookmark pool]\n"
+ "\n"
+ "\t\tInject pipeline ready stage delays for the given object path\n"
+ "\t\t(data or dnode) or raw bookmark. The delay is specified in\n"
+ "\t\tmilliseconds.\n"
+ "\n"
"\tzinject -I [-s <seconds> | -g <txgs>] pool\n"
"\t\tCause the pool to stop writing blocks yet not\n"
"\t\treport errors for a duration. Simulates buggy hardware\n"
@@ -724,12 +737,15 @@ register_handler(const char *pool, int flags, zinject_record_t *record,
if (quiet) {
(void) printf("%llu\n", (u_longlong_t)zc.zc_guid);
} else {
+ boolean_t show_object = B_FALSE;
+ boolean_t show_iotype = B_FALSE;
(void) printf("Added handler %llu with the following "
"properties:\n", (u_longlong_t)zc.zc_guid);
(void) printf(" pool: %s\n", pool);
if (record->zi_guid) {
(void) printf(" vdev: %llx\n",
(u_longlong_t)record->zi_guid);
+ show_iotype = B_TRUE;
} else if (record->zi_func[0] != '\0') {
(void) printf(" panic function: %s\n",
record->zi_func);
@@ -742,7 +758,18 @@ register_handler(const char *pool, int flags, zinject_record_t *record,
} else if (record->zi_timer > 0) {
(void) printf(" timer: %lld ms\n",
(u_longlong_t)NSEC2MSEC(record->zi_timer));
+ if (record->zi_cmd == ZINJECT_DELAY_READY) {
+ show_object = B_TRUE;
+ show_iotype = B_TRUE;
+ }
} else {
+ show_object = B_TRUE;
+ }
+ if (show_iotype) {
+ (void) printf("iotype: %s\n",
+ iotype_to_str(record->zi_iotype));
+ }
+ if (show_object) {
(void) printf("objset: %llu\n",
(u_longlong_t)record->zi_objset);
(void) printf("object: %llu\n",
@@ -910,6 +937,7 @@ main(int argc, char **argv)
int ret;
int flags = 0;
uint32_t dvas = 0;
+ hrtime_t ready_delay = -1;
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
@@ -940,7 +968,7 @@ main(int argc, char **argv)
}
while ((c = getopt(argc, argv,
- ":aA:b:C:d:D:f:Fg:qhIc:t:T:l:mr:s:e:uL:p:P:")) != -1) {
+ ":aA:b:C:d:D:E:f:Fg:qhIc:t:T:l:mr:s:e:uL:p:P:")) != -1) {
switch (c) {
case 'a':
flags |= ZINJECT_FLUSH_ARC;
@@ -1113,6 +1141,18 @@ main(int argc, char **argv)
case 'u':
flags |= ZINJECT_UNLOAD_SPA;
break;
+ case 'E':
+ ready_delay = MSEC2NSEC(strtol(optarg, &end, 10));
+ if (ready_delay <= 0 || *end != '\0') {
+ (void) fprintf(stderr, "invalid delay '%s': "
+ "must be a positive duration\n", optarg);
+ usage();
+ libzfs_fini(g_zfs);
+ return (1);
+ }
+ record.zi_cmd = ZINJECT_DELAY_READY;
+ record.zi_timer = ready_delay;
+ break;
case 'L':
if ((label = name_to_type(optarg)) == TYPE_INVAL &&
!LABEL_TYPE(type)) {
@@ -1150,7 +1190,7 @@ main(int argc, char **argv)
*/
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
level != 0 || record.zi_cmd != ZINJECT_UNINITIALIZED ||
- record.zi_freq > 0 || dvas != 0) {
+ record.zi_freq > 0 || dvas != 0 || ready_delay >= 0) {
(void) fprintf(stderr, "cancel (-c) incompatible with "
"any other options\n");
usage();
@@ -1186,7 +1226,7 @@ main(int argc, char **argv)
*/
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
level != 0 || record.zi_cmd != ZINJECT_UNINITIALIZED ||
- dvas != 0) {
+ dvas != 0 || ready_delay >= 0) {
(void) fprintf(stderr, "device (-d) incompatible with "
"data error injection\n");
usage();
@@ -1276,13 +1316,23 @@ main(int argc, char **argv)
return (1);
}
- record.zi_cmd = ZINJECT_DATA_FAULT;
+ if (record.zi_cmd == ZINJECT_UNINITIALIZED) {
+ record.zi_cmd = ZINJECT_DATA_FAULT;
+ if (!error)
+ error = EIO;
+ } else if (error != 0) {
+ (void) fprintf(stderr, "error type -e incompatible "
+ "with delay injection\n");
+ libzfs_fini(g_zfs);
+ return (1);
+ } else {
+ record.zi_iotype = io_type;
+ }
+
if (translate_raw(raw, &record) != 0) {
libzfs_fini(g_zfs);
return (1);
}
- if (!error)
- error = EIO;
} else if (record.zi_cmd == ZINJECT_PANIC) {
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
level != 0 || device != NULL || record.zi_freq > 0 ||
@@ -1410,6 +1460,13 @@ main(int argc, char **argv)
record.zi_dvas = dvas;
}
+ if (record.zi_cmd != ZINJECT_UNINITIALIZED && error != 0) {
+ (void) fprintf(stderr, "error type -e incompatible "
+ "with delay injection\n");
+ libzfs_fini(g_zfs);
+ return (1);
+ }
+
if (error == EACCES) {
if (type != TYPE_DATA) {
(void) fprintf(stderr, "decryption errors "
@@ -1425,8 +1482,12 @@ main(int argc, char **argv)
* not found.
*/
error = ECKSUM;
- } else {
+ } else if (record.zi_cmd == ZINJECT_UNINITIALIZED) {
record.zi_cmd = ZINJECT_DATA_FAULT;
+ if (!error)
+ error = EIO;
+ } else {
+ record.zi_iotype = io_type;
}
if (translate_record(type, argv[0], range, level, &record, pool,
@@ -1434,8 +1495,6 @@ main(int argc, char **argv)
libzfs_fini(g_zfs);
return (1);
}
- if (!error)
- error = EIO;
}
/*
diff --git a/sys/contrib/openzfs/cmd/zpool/Makefile.am b/sys/contrib/openzfs/cmd/zpool/Makefile.am
index 2f962408e5a3..5bb6d8160b18 100644
--- a/sys/contrib/openzfs/cmd/zpool/Makefile.am
+++ b/sys/contrib/openzfs/cmd/zpool/Makefile.am
@@ -148,6 +148,7 @@ dist_zpoolcompat_DATA = \
%D%/compatibility.d/openzfs-2.1-linux \
%D%/compatibility.d/openzfs-2.2 \
%D%/compatibility.d/openzfs-2.3 \
+ %D%/compatibility.d/openzfs-2.4 \
%D%/compatibility.d/openzfsonosx-1.7.0 \
%D%/compatibility.d/openzfsonosx-1.8.1 \
%D%/compatibility.d/openzfsonosx-1.9.3 \
@@ -187,7 +188,9 @@ zpoolcompatlinks = \
"openzfs-2.2 openzfs-2.2-linux" \
"openzfs-2.2 openzfs-2.2-freebsd" \
"openzfs-2.3 openzfs-2.3-linux" \
- "openzfs-2.3 openzfs-2.3-freebsd"
+ "openzfs-2.3 openzfs-2.3-freebsd" \
+ "openzfs-2.4 openzfs-2.4-linux" \
+ "openzfs-2.4 openzfs-2.4-freebsd"
zpoolconfdir = $(sysconfdir)/zfs/zpool.d
INSTALL_DATA_HOOKS += zpool-install-data-hook
diff --git a/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.4 b/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.4
new file mode 100644
index 000000000000..3fbd91014c95
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.4
@@ -0,0 +1,48 @@
+# Features supported by OpenZFS 2.4 on Linux and FreeBSD
+allocation_classes
+async_destroy
+blake3
+block_cloning
+block_cloning_endian
+bookmark_v2
+bookmark_written
+bookmarks
+device_rebuild
+device_removal
+draid
+dynamic_gang_header
+edonr
+embedded_data
+empty_bpobj
+enabled_txg
+encryption
+extensible_dataset
+fast_dedup
+filesystem_limits
+head_errlog
+hole_birth
+large_blocks
+large_dnode
+large_microzap
+livelist
+log_spacemap
+longname
+lz4_compress
+multi_vdev_crash_dump
+obsolete_counts
+physical_rewrite
+project_quota
+raidz_expansion
+redacted_datasets
+redaction_bookmarks
+redaction_list_spill
+resilver_defer
+sha512
+skein
+spacemap_histogram
+spacemap_v2
+userobj_accounting
+vdev_zaps_v2
+zilsaxattr
+zpool_checkpoint
+zstd_compress
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
index 2eec9a95e24c..fef602736705 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
@@ -26,6 +26,7 @@
/*
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
+ * Copyright (c) 2025, Klara, Inc.
*/
#include <libintl.h>
@@ -52,7 +53,7 @@
typedef struct zpool_node {
zpool_handle_t *zn_handle;
uu_avl_node_t zn_avlnode;
- int zn_mark;
+ hrtime_t zn_last_refresh;
} zpool_node_t;
struct zpool_list {
@@ -62,6 +63,7 @@ struct zpool_list {
uu_avl_pool_t *zl_pool;
zprop_list_t **zl_proplist;
zfs_type_t zl_type;
+ hrtime_t zl_last_refresh;
};
static int
@@ -81,26 +83,30 @@ zpool_compare(const void *larg, const void *rarg, void *unused)
* of known pools.
*/
static int
-add_pool(zpool_handle_t *zhp, void *data)
+add_pool(zpool_handle_t *zhp, zpool_list_t *zlp)
{
- zpool_list_t *zlp = data;
- zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
+ zpool_node_t *node, *new = safe_malloc(sizeof (zpool_node_t));
uu_avl_index_t idx;
- node->zn_handle = zhp;
- uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
- if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
+ new->zn_handle = zhp;
+ uu_avl_node_init(new, &new->zn_avlnode, zlp->zl_pool);
+
+ node = uu_avl_find(zlp->zl_avl, new, NULL, &idx);
+ if (node == NULL) {
if (zlp->zl_proplist &&
zpool_expand_proplist(zhp, zlp->zl_proplist,
zlp->zl_type, zlp->zl_literal) != 0) {
zpool_close(zhp);
- free(node);
+ free(new);
return (-1);
}
- uu_avl_insert(zlp->zl_avl, node, idx);
+ new->zn_last_refresh = zlp->zl_last_refresh;
+ uu_avl_insert(zlp->zl_avl, new, idx);
} else {
+ zpool_refresh_stats_from_handle(node->zn_handle, zhp);
+ node->zn_last_refresh = zlp->zl_last_refresh;
zpool_close(zhp);
- free(node);
+ free(new);
return (-1);
}
@@ -108,6 +114,18 @@ add_pool(zpool_handle_t *zhp, void *data)
}
/*
+ * add_pool(), but always returns 0. This allows zpool_iter() to continue
+ * even if a pool exists in the tree, or we fail to get the properties for
+ * a new one.
+ */
+static int
+add_pool_cb(zpool_handle_t *zhp, void *data)
+{
+ (void) add_pool(zhp, data);
+ return (0);
+}
+
+/*
* Create a list of pools based on the given arguments. If we're given no
* arguments, then iterate over all pools in the system and add them to the AVL
* tree. Otherwise, add only those pool explicitly specified on the command
@@ -135,9 +153,10 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
zlp->zl_type = type;
zlp->zl_literal = literal;
+ zlp->zl_last_refresh = gethrtime();
if (argc == 0) {
- (void) zpool_iter(g_zfs, add_pool, zlp);
+ (void) zpool_iter(g_zfs, add_pool_cb, zlp);
zlp->zl_findall = B_TRUE;
} else {
int i;
@@ -159,15 +178,61 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
}
/*
- * Search for any new pools, adding them to the list. We only add pools when no
- * options were given on the command line. Otherwise, we keep the list fixed as
- * those that were explicitly specified.
+ * Refresh the state of all pools on the list. Additionally, if no options were
+ * given on the command line, add any new pools and remove any that are no
+ * longer available.
*/
-void
-pool_list_update(zpool_list_t *zlp)
+int
+pool_list_refresh(zpool_list_t *zlp)
{
- if (zlp->zl_findall)
- (void) zpool_iter(g_zfs, add_pool, zlp);
+ zlp->zl_last_refresh = gethrtime();
+
+ if (!zlp->zl_findall) {
+ /*
+ * This list is a fixed list of pools, so we must not add
+ * or remove any. Just walk over them and refresh their
+ * state.
+ */
+ int navail = 0;
+ for (zpool_node_t *node = uu_avl_first(zlp->zl_avl);
+ node != NULL; node = uu_avl_next(zlp->zl_avl, node)) {
+ boolean_t missing;
+ zpool_refresh_stats(node->zn_handle, &missing);
+ navail += !missing;
+ node->zn_last_refresh = zlp->zl_last_refresh;
+ }
+ return (navail);
+ }
+
+ /* Search for any new pools and add them to the list. */
+ (void) zpool_iter(g_zfs, add_pool_cb, zlp);
+
+ /* Walk the list of existing pools, and update or remove them. */
+ zpool_node_t *node, *next;
+ for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next) {
+ next = uu_avl_next(zlp->zl_avl, node);
+
+ /*
+ * Skip any that were refreshed and are online; they were added
+ * by zpool_iter() and are already up to date.
+ */
+ if (node->zn_last_refresh == zlp->zl_last_refresh &&
+ zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL)
+ continue;
+
+ /* Refresh and remove if necessary. */
+ boolean_t missing;
+ zpool_refresh_stats(node->zn_handle, &missing);
+ if (missing) {
+ uu_avl_remove(zlp->zl_avl, node);
+ zpool_close(node->zn_handle);
+ free(node);
+ } else {
+ node->zn_last_refresh = zlp->zl_last_refresh;
+ }
+ }
+
+ return (uu_avl_numnodes(zlp->zl_avl));
}
/*
@@ -191,23 +256,6 @@ pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
}
/*
- * Remove the given pool from the list. When running iostat, we want to remove
- * those pools that no longer exist.
- */
-void
-pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
-{
- zpool_node_t search, *node;
-
- search.zn_handle = zhp;
- if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
- uu_avl_remove(zlp->zl_avl, node);
- zpool_close(node->zn_handle);
- free(node);
- }
-}
-
-/*
* Free all the handles associated with this list.
*/
void
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index d401e087916b..1feec55c0e8b 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -33,7 +33,7 @@
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
- * Copyright (c) 2021, 2023, Klara Inc.
+ * Copyright (c) 2021, 2023, 2025, Klara, Inc.
* Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.
*/
@@ -456,7 +456,7 @@ get_usage(zpool_help_t idx)
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-fsw] [-o property=value] "
- "<pool> <device> <new-device>\n"));
+ "<pool> <vdev> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
case HELP_CREATE:
@@ -752,10 +752,11 @@ usage(boolean_t requested)
}
/*
- * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
+ * zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
+ * -a Use all pools.
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -u Uninitialize. Clears initialization state.
@@ -776,7 +777,7 @@ zpool_do_initialize(int argc, char **argv)
{"suspend", no_argument, NULL, 's'},
{"uninit", no_argument, NULL, 'u'},
{"wait", no_argument, NULL, 'w'},
- {"all", no_argument, NULL, 'a'},
+ {"all", no_argument, NULL, 'a'},
{0, 0, 0, 0}
};
@@ -5760,24 +5761,6 @@ children:
return (ret);
}
-static int
-refresh_iostat(zpool_handle_t *zhp, void *data)
-{
- iostat_cbdata_t *cb = data;
- boolean_t missing;
-
- /*
- * If the pool has disappeared, remove it from the list and continue.
- */
- if (zpool_refresh_stats(zhp, &missing) != 0)
- return (-1);
-
- if (missing)
- pool_list_remove(cb->cb_list, zhp);
-
- return (0);
-}
-
/*
* Callback to print out the iostats for the given pool.
*/
@@ -6358,15 +6341,14 @@ get_namewidth_iostat(zpool_handle_t *zhp, void *data)
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
- * on pool_list_update() to detect the addition of new pools. Configuration
- * changes are all handled within libzfs.
+ * on pool_list_refresh() to detect the addition and removal of pools.
+ * Configuration changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
- int npools;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
@@ -6617,10 +6599,24 @@ zpool_do_iostat(int argc, char **argv)
return (1);
}
+ int last_npools = 0;
for (;;) {
- if ((npools = pool_list_count(list)) == 0)
+ /*
+ * Refresh all pools in list, adding or removing pools as
+ * necessary.
+ */
+ int npools = pool_list_refresh(list);
+ if (npools == 0) {
(void) fprintf(stderr, gettext("no pools available\n"));
- else {
+ } else {
+ /*
+ * If the list of pools has changed since last time
+ * around, reset the iteration count to force the
+ * header to be redisplayed.
+ */
+ if (last_npools != npools)
+ cb.cb_iteration = 0;
+
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
@@ -6629,15 +6625,6 @@ zpool_do_iostat(int argc, char **argv)
cb.cb_iteration == 0);
/*
- * Refresh all statistics. This is done as an
- * explicit step before calculating the maximum name
- * width, so that any * configuration changes are
- * properly accounted for.
- */
- (void) pool_list_iter(list, B_FALSE, refresh_iostat,
- &cb);
-
- /*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
@@ -6690,6 +6677,7 @@ zpool_do_iostat(int argc, char **argv)
if (skip) {
(void) fflush(stdout);
(void) fsleep(interval);
+ last_npools = npools;
continue;
}
@@ -6727,6 +6715,8 @@ zpool_do_iostat(int argc, char **argv)
(void) fflush(stdout);
(void) fsleep(interval);
+
+ last_npools = npools;
}
pool_list_free(list);
@@ -7643,7 +7633,7 @@ zpool_do_replace(int argc, char **argv)
}
/*
- * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
+ * zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
@@ -7651,9 +7641,9 @@ zpool_do_replace(int argc, char **argv)
* -w Wait for resilvering (mirror) or expansion (raidz) to complete
* before returning.
*
- * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
- * mirror or raidz. If <device> is not part of a mirror, then <device> will
- * be transformed into a mirror of <device> and <new_device>. When a mirror
+ * Attach <new_device> to a <vdev>, where the vdev can be of type
+ * device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will
+ * be transformed into a mirror of <vdev> and <new_device>. When a mirror
* is involved, <new_device> will begin life with a DTL of [0, now], and will
* immediately begin to resilver itself. For the raidz case, a expansion will
* commence and reflow the raidz data across all the disks including the
@@ -8446,8 +8436,9 @@ date_string_to_sec(const char *timestr, boolean_t rounding)
}
/*
- * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] <pool> ...
+ * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...]
*
+ * -a Scrub all pools.
* -e Only scrub blocks in the error log.
* -E End date of scrub.
* -S Start date of scrub.
@@ -8621,8 +8612,9 @@ zpool_do_resilver(int argc, char **argv)
}
/*
- * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
+ * zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...]
*
+ * -a Trim all pools.
* -c Cancel. Ends any in-progress trim.
* -d Secure trim. Requires kernel and device support.
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
@@ -12374,7 +12366,7 @@ zpool_do_events_next(ev_opts_t *opts)
nvlist_free(nvl);
}
- VERIFY(0 == close(zevent_fd));
+ VERIFY0(close(zevent_fd));
return (ret);
}
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_util.h b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
index 5ab7cb9750f1..3af23c52bd45 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_util.h
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
@@ -76,11 +76,10 @@ typedef struct zpool_list zpool_list_t;
zpool_list_t *pool_list_get(int, char **, zprop_list_t **, zfs_type_t,
boolean_t, int *);
-void pool_list_update(zpool_list_t *);
+int pool_list_refresh(zpool_list_t *);
int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *);
void pool_list_free(zpool_list_t *);
int pool_list_count(zpool_list_t *);
-void pool_list_remove(zpool_list_t *, zpool_handle_t *);
extern libzfs_handle_t *g_zfs;
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
index 684b46a2d673..088c0108e911 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
@@ -609,22 +609,28 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
ZPOOL_CONFIG_PATH, &path) == 0);
/*
+ * Skip active spares they should never cause
+ * the pool to be evaluated as inconsistent.
+ */
+ if (is_spare(NULL, path))
+ continue;
+
+ /*
* If we have a raidz/mirror that combines disks
- * with files, report it as an error.
+ * with files, only report it as an error when
+ * fatal is set to ensure all the replication
+ * checks aren't skipped in check_replication().
*/
- if (!dontreport && type != NULL &&
+ if (fatal && !dontreport && type != NULL &&
strcmp(type, childtype) != 0) {
if (ret != NULL)
free(ret);
ret = NULL;
- if (fatal)
- vdev_error(gettext(
- "mismatched replication "
- "level: %s contains both "
- "files and devices\n"),
- rep.zprl_type);
- else
- return (NULL);
+ vdev_error(gettext(
+ "mismatched replication "
+ "level: %s contains both "
+ "files and devices\n"),
+ rep.zprl_type);
dontreport = B_TRUE;
}
diff --git a/sys/contrib/openzfs/cmd/zstream/Makefile.am b/sys/contrib/openzfs/cmd/zstream/Makefile.am
index be3539fe905d..80ef1ea7ca11 100644
--- a/sys/contrib/openzfs/cmd/zstream/Makefile.am
+++ b/sys/contrib/openzfs/cmd/zstream/Makefile.am
@@ -18,6 +18,7 @@ zstream_LDADD = \
libzpool.la \
libnvpair.la
-PHONY += install-exec-hook
-install-exec-hook:
+cmd-zstream-install-exec-hook:
cd $(DESTDIR)$(sbindir) && $(LN_S) -f zstream zstreamdump
+
+INSTALL_EXEC_HOOKS += cmd-zstream-install-exec-hook
diff --git a/sys/contrib/openzfs/cmd/ztest.c b/sys/contrib/openzfs/cmd/ztest.c
index 2e88ae3e7994..89752dcb0f0f 100644
--- a/sys/contrib/openzfs/cmd/ztest.c
+++ b/sys/contrib/openzfs/cmd/ztest.c
@@ -273,7 +273,6 @@ extern int zfs_compressed_arc_enabled;
extern int zfs_abd_scatter_enabled;
extern uint_t dmu_object_alloc_chunk_shift;
extern boolean_t zfs_force_some_double_word_sm_entries;
-extern unsigned long zio_decompress_fail_fraction;
extern unsigned long zfs_reconstruct_indirect_damage_fraction;
extern uint64_t raidz_expand_max_reflow_bytes;
extern uint_t raidz_expand_pause_point;
@@ -829,8 +828,8 @@ static char *short_opts = NULL;
static void
init_options(void)
{
- ASSERT3P(long_opts, ==, NULL);
- ASSERT3P(short_opts, ==, NULL);
+ ASSERT0P(long_opts);
+ ASSERT0P(short_opts);
int count = sizeof (option_table) / sizeof (option_table[0]);
long_opts = umem_alloc(sizeof (struct option) * count, UMEM_NOFAIL);
@@ -1686,7 +1685,7 @@ ztest_rll_init(rll_t *rll)
static void
ztest_rll_destroy(rll_t *rll)
{
- ASSERT3P(rll->rll_writer, ==, NULL);
+ ASSERT0P(rll->rll_writer);
ASSERT0(rll->rll_readers);
mutex_destroy(&rll->rll_lock);
cv_destroy(&rll->rll_cv);
@@ -1720,7 +1719,7 @@ ztest_rll_unlock(rll_t *rll)
rll->rll_writer = NULL;
} else {
ASSERT3S(rll->rll_readers, >, 0);
- ASSERT3P(rll->rll_writer, ==, NULL);
+ ASSERT0P(rll->rll_writer);
rll->rll_readers--;
}
@@ -1996,7 +1995,7 @@ ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH |
DMU_KEEP_CACHING) != 0) {
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
write_state = WR_NEED_COPY;
}
@@ -2278,8 +2277,8 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
ztest_block_tag_t rbt;
- VERIFY(dmu_read(os, lr->lr_foid, offset,
- sizeof (rbt), &rbt, flags) == 0);
+ VERIFY0(dmu_read(os, lr->lr_foid, offset,
+ sizeof (rbt), &rbt, flags));
if (rbt.bt_magic == BT_MAGIC) {
ztest_bt_verify(&rbt, os, lr->lr_foid, 0,
offset, gen, txg, crtxg);
@@ -2966,7 +2965,7 @@ ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
(void) pthread_rwlock_rdlock(&zd->zd_zilog_lock);
- zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
+ VERIFY0(zil_commit(zilog, ztest_random(ZTEST_OBJECTS)));
/*
* Remember the committed values in zd, which is in parent/child
@@ -4007,7 +4006,7 @@ raidz_scratch_verify(void)
* requested by user, but scratch object was not created.
*/
case RRSS_SCRATCH_NOT_IN_USE:
- ASSERT3U(offset, ==, 0);
+ ASSERT0(offset);
break;
/*
@@ -5537,8 +5536,8 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
}
if (i == 1) {
- VERIFY(dmu_buf_hold(os, bigobj, off,
- FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
+ VERIFY0(dmu_buf_hold(os, bigobj, off,
+ FTAG, &dbt, DMU_READ_NO_PREFETCH));
}
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
@@ -7937,7 +7936,7 @@ ztest_freeze(void)
*/
while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
ztest_dmu_object_alloc_free(zd, 0);
- zil_commit(zd->zd_zilog, 0);
+ VERIFY0(zil_commit(zd->zd_zilog, 0));
}
txg_wait_synced(spa_get_dsl(spa), 0);
@@ -7979,7 +7978,7 @@ ztest_freeze(void)
/*
* Commit all of the changes we just generated.
*/
- zil_commit(zd->zd_zilog, 0);
+ VERIFY0(zil_commit(zd->zd_zilog, 0));
txg_wait_synced(spa_get_dsl(spa), 0);
/*
@@ -8979,7 +8978,7 @@ main(int argc, char **argv)
exit(EXIT_FAILURE);
} else {
/* children should not be spawned if setting gvars fails */
- VERIFY3S(err, ==, 0);
+ VERIFY0(err);
}
/* Override location of zpool.cache */
diff --git a/sys/contrib/openzfs/config/Shellcheck.am b/sys/contrib/openzfs/config/Shellcheck.am
index 1ab13516066c..87e6494056cf 100644
--- a/sys/contrib/openzfs/config/Shellcheck.am
+++ b/sys/contrib/openzfs/config/Shellcheck.am
@@ -16,10 +16,14 @@ SHELLCHECK_OPTS = $(call JUST_SHELLCHECK_OPTS,$(1)) $(call JUST_CHECKBAS
PHONY += shellcheck
+shellcheck_verbose = $(shellcheck_verbose_@AM_V@)
+shellcheck_verbose_ = $(shellcheck_verbose_@AM_DEFAULT_V@)
+shellcheck_verbose_0 = @echo SHELLCHECK $(_STGT);
+
_STGT = $(subst ^,/,$(subst shellcheck-here-,,$@))
shellcheck-here-%:
if HAVE_SHELLCHECK
- shellcheck --format=gcc --enable=all --exclude=SC1090,SC1091,SC2039,SC2250,SC2312,SC2317,SC3043 $$([ -n "$(SHELLCHECK_SHELL)" ] && echo "--shell=$(SHELLCHECK_SHELL)") "$$([ -e "$(_STGT)" ] || echo "$(srcdir)/")$(_STGT)"
+ $(shellcheck_verbose)shellcheck --format=gcc --enable=all --exclude=SC1090,SC1091,SC2039,SC2250,SC2312,SC2317,SC3043 $$([ -n "$(SHELLCHECK_SHELL)" ] && echo "--shell=$(SHELLCHECK_SHELL)") "$$([ -e "$(_STGT)" ] || echo "$(srcdir)/")$(_STGT)"
else
@echo "skipping shellcheck of" $(_STGT) "because shellcheck is not installed"
endif
@@ -29,11 +33,15 @@ shellcheck: $(SHELLCHECKSCRIPTS) $(call JUST_SHELLCHECK_OPTS,$(SHELLCHECKSCRIPTS
PHONY += checkbashisms
+checkbashisms_verbose = $(checkbashisms_verbose_@AM_V@)
+checkbashisms_verbose_ = $(checkbashisms_verbose_@AM_DEFAULT_V@)
+checkbashisms_verbose_0 = @echo CHECKBASHISMS $(_BTGT);
+
# command -v *is* specified by POSIX and every shell in existence supports it
_BTGT = $(subst ^,/,$(subst checkbashisms-here-,,$@))
checkbashisms-here-%:
if HAVE_CHECKBASHISMS
- ! { [ -n "$(SHELLCHECK_SHELL)" ] && echo '#!/bin/$(SHELLCHECK_SHELL)'; cat "$$([ -e "$(_BTGT)" ] || echo "$(srcdir)/")$(_BTGT)"; } | \
+ $(checkbashisms_verbose)! { [ -n "$(SHELLCHECK_SHELL)" ] && echo '#!/bin/$(SHELLCHECK_SHELL)'; cat "$$([ -e "$(_BTGT)" ] || echo "$(srcdir)/")$(_BTGT)"; } | \
checkbashisms -npx 2>&1 | grep -vFe "'command' with option other than -p" -e 'command -v' -e 'any possible bashisms' $(CHECKBASHISMS_IGNORE) >&2
else
@echo "skipping checkbashisms of" $(_BTGT) "because checkbashisms is not installed"
diff --git a/sys/contrib/openzfs/config/always-arch.m4 b/sys/contrib/openzfs/config/always-arch.m4
index 9f413eeddf95..d73b878916cb 100644
--- a/sys/contrib/openzfs/config/always-arch.m4
+++ b/sys/contrib/openzfs/config/always-arch.m4
@@ -34,8 +34,26 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_ARCH], [
esac
AM_CONDITIONAL([TARGET_CPU_AARCH64], test $TARGET_CPU = aarch64)
+ AM_CONDITIONAL([TARGET_CPU_I386], test $TARGET_CPU = i386)
AM_CONDITIONAL([TARGET_CPU_X86_64], test $TARGET_CPU = x86_64)
AM_CONDITIONAL([TARGET_CPU_POWERPC], test $TARGET_CPU = powerpc)
AM_CONDITIONAL([TARGET_CPU_SPARC64], test $TARGET_CPU = sparc64)
AM_CONDITIONAL([TARGET_CPU_ARM], test $TARGET_CPU = arm)
])
+dnl #
+dnl # Check for conflicting environment variables
+dnl #
+dnl # If ARCH env variable is set up, then kernel Makefile in the /usr/src/kernel
+dnl # can misbehave during the zfs ./configure test of the module compilation.
+AC_DEFUN([ZFS_AC_CONFIG_CHECK_ARCH_VAR], [
+ AC_MSG_CHECKING([for conflicting environment variables])
+ if test -n "$ARCH"; then
+ AC_MSG_RESULT([warning])
+ AC_MSG_WARN(m4_normalize([ARCH environment variable is set to "$ARCH".
+ This can cause build kernel modules support check failure.
+ Please unset it.]))
+ else
+ AC_MSG_RESULT([done])
+ fi
+])
+
diff --git a/sys/contrib/openzfs/config/always-compiler-options.m4 b/sys/contrib/openzfs/config/always-compiler-options.m4
index 6383b12506ee..0e96435e3713 100644
--- a/sys/contrib/openzfs/config/always-compiler-options.m4
+++ b/sys/contrib/openzfs/config/always-compiler-options.m4
@@ -156,6 +156,34 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_ZERO_LENGTH], [
])
dnl #
+dnl # Check if kernel cc supports -Wno-format-zero-length option.
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_KERNEL_CC_NO_FORMAT_ZERO_LENGTH], [
+ saved_cc="$CC"
+ AS_IF(
+ [ test -n "$KERNEL_CC" ], [ CC="$KERNEL_CC" ],
+ [ test -n "$KERNEL_LLVM" ], [ CC="clang" ],
+ [ CC="gcc" ]
+ )
+ AC_MSG_CHECKING([whether $CC supports -Wno-format-zero-length])
+
+ saved_flags="$CFLAGS"
+ CFLAGS="$CFLAGS -Werror -Wno-format-zero-length"
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
+ KERNEL_NO_FORMAT_ZERO_LENGTH=-Wno-format-zero-length
+ AC_MSG_RESULT([yes])
+ ], [
+ KERNEL_NO_FORMAT_ZERO_LENGTH=
+ AC_MSG_RESULT([no])
+ ])
+
+ CC="$saved_cc"
+ CFLAGS="$saved_flags"
+ AC_SUBST([KERNEL_NO_FORMAT_ZERO_LENGTH])
+])
+
+dnl #
dnl # Check if cc supports -Wno-clobbered option.
dnl #
dnl # We actually invoke it with the -Wclobbered option
@@ -182,6 +210,27 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_CLOBBERED], [
])
dnl #
+dnl # Check if cc supports -Wno-atomic-alignment option.
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_ATOMIC_ALIGNMENT], [
+ AC_MSG_CHECKING([whether $CC supports -Wno-atomic-alignment])
+
+ saved_flags="$CFLAGS"
+ CFLAGS="$CFLAGS -Werror -Wno-atomic-alignment"
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
+ NO_ATOMIC_ALIGNMENT=-Wno-atomic-alignment
+ AC_MSG_RESULT([yes])
+ ], [
+ NO_ATOMIC_ALIGNMENT=
+ AC_MSG_RESULT([no])
+ ])
+
+ CFLAGS="$saved_flags"
+ AC_SUBST([NO_ATOMIC_ALIGNMENT])
+])
+
+dnl #
dnl # Check if cc supports -Wimplicit-fallthrough option.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_IMPLICIT_FALLTHROUGH], [
@@ -231,20 +280,17 @@ dnl #
dnl # Check if kernel cc supports -Winfinite-recursion option.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_KERNEL_CC_INFINITE_RECURSION], [
- AC_MSG_CHECKING([whether $KERNEL_CC supports -Winfinite-recursion])
-
saved_cc="$CC"
+ AS_IF(
+ [ test -n "$KERNEL_CC" ], [ CC="$KERNEL_CC" ],
+ [ test -n "$KERNEL_LLVM" ], [ CC="clang" ],
+ [ CC="gcc" ]
+ )
+ AC_MSG_CHECKING([whether $CC supports -Winfinite-recursion])
+
saved_flags="$CFLAGS"
- CC="gcc"
CFLAGS="$CFLAGS -Werror -Winfinite-recursion"
- AS_IF([ test -n "$KERNEL_CC" ], [
- CC="$KERNEL_CC"
- ])
- AS_IF([ test -n "$KERNEL_LLVM" ], [
- CC="clang"
- ])
-
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
KERNEL_INFINITE_RECURSION=-Winfinite-recursion
AC_DEFINE([HAVE_KERNEL_INFINITE_RECURSION], 1,
@@ -329,20 +375,17 @@ dnl #
dnl # Check if kernel cc supports -fno-ipa-sra option.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_KERNEL_CC_NO_IPA_SRA], [
- AC_MSG_CHECKING([whether $KERNEL_CC supports -fno-ipa-sra])
-
saved_cc="$CC"
+ AS_IF(
+ [ test -n "$KERNEL_CC" ], [ CC="$KERNEL_CC" ],
+ [ test -n "$KERNEL_LLVM" ], [ CC="clang" ],
+ [ CC="gcc" ]
+ )
+ AC_MSG_CHECKING([whether $CC supports -fno-ipa-sra])
+
saved_flags="$CFLAGS"
- CC="gcc"
CFLAGS="$CFLAGS -Werror -fno-ipa-sra"
- AS_IF([ test -n "$KERNEL_CC" ], [
- CC="$KERNEL_CC"
- ])
- AS_IF([ test -n "$KERNEL_LLVM" ], [
- CC="clang"
- ])
-
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
KERNEL_NO_IPA_SRA=-fno-ipa-sra
AC_MSG_RESULT([yes])
diff --git a/sys/contrib/openzfs/config/kernel-blkdev.m4 b/sys/contrib/openzfs/config/kernel-blkdev.m4
index 83190c6fbe3f..02011bf39fb2 100644
--- a/sys/contrib/openzfs/config/kernel-blkdev.m4
+++ b/sys/contrib/openzfs/config/kernel-blkdev.m4
@@ -29,9 +29,8 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH_4ARG], [
const char *path = "path";
fmode_t mode = 0;
void *holder = NULL;
- struct blk_holder_ops h;
- bdev = blkdev_get_by_path(path, mode, holder, &h);
+ bdev = blkdev_get_by_path(path, mode, holder, NULL);
])
])
@@ -48,9 +47,8 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_OPEN_BY_PATH], [
const char *path = "path";
fmode_t mode = 0;
void *holder = NULL;
- struct blk_holder_ops h;
- bdh = bdev_open_by_path(path, mode, holder, &h);
+ bdh = bdev_open_by_path(path, mode, holder, NULL);
])
])
@@ -68,9 +66,8 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BDEV_FILE_OPEN_BY_PATH], [
const char *path = "path";
fmode_t mode = 0;
void *holder = NULL;
- struct blk_holder_ops h;
- file = bdev_file_open_by_path(path, mode, holder, &h);
+ file = bdev_file_open_by_path(path, mode, holder, NULL);
])
])
diff --git a/sys/contrib/openzfs/config/kernel-dentry-operations.m4 b/sys/contrib/openzfs/config/kernel-dentry-operations.m4
index aa5a9f2aff39..ce0e6e5be959 100644
--- a/sys/contrib/openzfs/config/kernel-dentry-operations.m4
+++ b/sys/contrib/openzfs/config/kernel-dentry-operations.m4
@@ -24,6 +24,9 @@ dnl #
dnl # 2.6.38 API change
dnl # Added d_set_d_op() helper function.
dnl #
+dnl # 6.17 API change
+dnl # d_set_d_op() removed. No direct replacement.
+dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_D_SET_D_OP], [
ZFS_LINUX_TEST_SRC([d_set_d_op], [
#include <linux/dcache.h>
@@ -34,22 +37,46 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_D_SET_D_OP], [
AC_DEFUN([ZFS_AC_KERNEL_D_SET_D_OP], [
AC_MSG_CHECKING([whether d_set_d_op() is available])
- ZFS_LINUX_TEST_RESULT_SYMBOL([d_set_d_op],
- [d_set_d_op], [fs/dcache.c], [
+ ZFS_LINUX_TEST_RESULT([d_set_d_op], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_D_SET_D_OP, 1,
+ [Define if d_set_d_op() is available])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+])
+
+dnl #
+dnl # 6.17 API change
+dnl # sb->s_d_op removed; set_default_d_op(sb, dop) added
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_SET_DEFAULT_D_OP], [
+ ZFS_LINUX_TEST_SRC([set_default_d_op], [
+ #include <linux/dcache.h>
+ ], [
+ set_default_d_op(NULL, NULL);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SET_DEFAULT_D_OP], [
+ AC_MSG_CHECKING([whether set_default_d_op() is available])
+ ZFS_LINUX_TEST_RESULT([set_default_d_op], [
AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SET_DEFAULT_D_OP, 1,
+ [Define if set_default_d_op() is available])
], [
- ZFS_LINUX_TEST_ERROR([d_set_d_op])
+ AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_DENTRY], [
ZFS_AC_KERNEL_SRC_D_OBTAIN_ALIAS
ZFS_AC_KERNEL_SRC_D_SET_D_OP
- ZFS_AC_KERNEL_SRC_S_D_OP
+ ZFS_AC_KERNEL_SRC_SET_DEFAULT_D_OP
])
AC_DEFUN([ZFS_AC_KERNEL_DENTRY], [
ZFS_AC_KERNEL_D_OBTAIN_ALIAS
ZFS_AC_KERNEL_D_SET_D_OP
- ZFS_AC_KERNEL_S_D_OP
+ ZFS_AC_KERNEL_SET_DEFAULT_D_OP
])
diff --git a/sys/contrib/openzfs/config/kernel-mkdir.m4 b/sys/contrib/openzfs/config/kernel-mkdir.m4
index c1aebc387abe..78b32447c593 100644
--- a/sys/contrib/openzfs/config/kernel-mkdir.m4
+++ b/sys/contrib/openzfs/config/kernel-mkdir.m4
@@ -84,6 +84,8 @@ AC_DEFUN([ZFS_AC_KERNEL_MKDIR], [
AC_DEFINE(HAVE_IOPS_MKDIR_DENTRY, 1,
[iops->mkdir() returns struct dentry*])
],[
+ AC_MSG_RESULT(no)
+
dnl #
dnl # 6.3 API change
dnl # mkdir() takes struct mnt_idmap * as the first arg
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index e3e7625db7d8..35819e4d68c5 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -70,6 +70,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_COMMIT_METADATA
ZFS_AC_KERNEL_SRC_SETATTR_PREPARE
ZFS_AC_KERNEL_SRC_INSERT_INODE_LOCKED
+ ZFS_AC_KERNEL_SRC_DENTRY
ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SRC_SECURITY_INODE
ZFS_AC_KERNEL_SRC_FST_MOUNT
@@ -188,6 +189,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_COMMIT_METADATA
ZFS_AC_KERNEL_SETATTR_PREPARE
ZFS_AC_KERNEL_INSERT_INODE_LOCKED
+ ZFS_AC_KERNEL_DENTRY
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SECURITY_INODE
ZFS_AC_KERNEL_FST_MOUNT
diff --git a/sys/contrib/openzfs/config/toolchain-simd.m4 b/sys/contrib/openzfs/config/toolchain-simd.m4
index 061576fd94e3..f18c91007cde 100644
--- a/sys/contrib/openzfs/config/toolchain-simd.m4
+++ b/sys/contrib/openzfs/config/toolchain-simd.m4
@@ -24,6 +24,8 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD], [
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AES
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_PCLMULQDQ
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_MOVBE
+ ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VAES
+ ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VPCLMULQDQ
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVEOPT
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVES
@@ -38,9 +40,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE], [
AC_MSG_CHECKING([whether host toolchain supports SSE])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
- void main()
+ int main()
{
__asm__ __volatile__("xorps %xmm0, %xmm1");
+ return (0);
}
]])], [
AC_DEFINE([HAVE_SSE], 1, [Define if host toolchain supports SSE])
@@ -57,9 +60,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE2], [
AC_MSG_CHECKING([whether host toolchain supports SSE2])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
- void main()
+ int main()
{
__asm__ __volatile__("pxor %xmm0, %xmm1");
+ return (0);
}
]])], [
AC_DEFINE([HAVE_SSE2], 1, [Define if host toolchain supports SSE2])
@@ -76,10 +80,11 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE3], [
AC_MSG_CHECKING([whether host toolchain supports SSE3])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
- void main()
+ int main()
{
char v[16];
__asm__ __volatile__("lddqu %0,%%xmm0" :: "m"(v[0]));
+ return (0);
}
]])], [
AC_DEFINE([HAVE_SSE3], 1, [Define if host toolchain supports SSE3])
@@ -96,9 +101,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSSE3], [
AC_MSG_CHECKING([whether host toolchain supports SSSE3])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
- void main()
+ int main()
{
__asm__ __volatile__("pshufb %xmm0,%xmm1");
+ return (0);
}
]])], [
AC_DEFINE([HAVE_SSSE3], 1, [Define if host toolchain supports SSSE3])
@@ -115,9 +121,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE4_1], [
AC_MSG_CHECKING([whether host toolchain supports SSE4.1])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
- void main()
+ int main()
{
__asm__ __volatile__("pmaxsb %xmm0,%xmm1");
+ return (0);
}
]])], [
AC_DEFINE([HAVE_SSE4_1], 1, [Define if host toolchain supports SSE4.1])
@@ -134,9 +141,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE4_2], [
AC_MSG_CHECKING([whether host toolchain supports SSE4.2])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
- void main()
+ int main()
{
__asm__ __volatile__("pcmpgtq %xmm0, %xmm1");
+ return (0);
}
]])], [
AC_DEFINE([HAVE_SSE4_2], 1, [Define if host toolchain supports SSE4.2])
@@ -153,10 +161,11 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX], [
AC_MSG_CHECKING([whether host toolchain supports AVX])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
- void main()
+ int main()
{
char v[32];
__asm__ __volatile__("vmovdqa %0,%%ymm0" :: "m"(v[0]));
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -174,9 +183,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX2], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vpshufb %ymm0,%ymm1,%ymm2");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -194,9 +204,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512F], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vpandd %zmm0,%zmm1,%zmm2");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -214,9 +225,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512CD], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vplzcntd %zmm0,%zmm1");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -234,9 +246,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512DQ], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vandpd %zmm0,%zmm1,%zmm2");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -254,9 +267,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512BW], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vpshufb %zmm0,%zmm1,%zmm2");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -274,9 +288,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512IFMA], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vpmadd52luq %zmm0,%zmm1,%zmm2");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -294,9 +309,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512VBMI], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vpermb %zmm0,%zmm1,%zmm2");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -314,9 +330,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512PF], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vgatherpf0dps (%rsi,%zmm0,4){%k1}");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -334,9 +351,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512ER], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vexp2pd %zmm0,%zmm1");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -354,9 +372,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512VL], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("vpabsq %zmm0,%zmm1");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -374,9 +393,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AES], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("aesenc %xmm0, %xmm1");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -394,9 +414,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_PCLMULQDQ], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("pclmulqdq %0, %%xmm0, %%xmm1" :: "i"(0));
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -414,9 +435,10 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_MOVBE], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
__asm__ __volatile__("movbe 0(%eax), %eax");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -427,6 +449,48 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_MOVBE], [
])
dnl #
+dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VAES
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VAES], [
+ AC_MSG_CHECKING([whether host toolchain supports VAES])
+
+ AC_LINK_IFELSE([AC_LANG_SOURCE([
+ [
+ int main()
+ {
+ __asm__ __volatile__("vaesenc %ymm0, %ymm1, %ymm0");
+ return (0);
+ }
+ ]])], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([HAVE_VAES], 1, [Define if host toolchain supports VAES])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
+
+dnl #
+dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VPCLMULQDQ
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VPCLMULQDQ], [
+ AC_MSG_CHECKING([whether host toolchain supports VPCLMULQDQ])
+
+ AC_LINK_IFELSE([AC_LANG_SOURCE([
+ [
+ int main()
+ {
+ __asm__ __volatile__("vpclmulqdq %0, %%ymm4, %%ymm3, %%ymm5" :: "i"(0));
+ return (0);
+ }
+ ]])], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([HAVE_VPCLMULQDQ], 1, [Define if host toolchain supports VPCLMULQDQ])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
+
+dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE], [
@@ -434,10 +498,11 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
char b[4096] __attribute__ ((aligned (64)));
__asm__ __volatile__("xsave %[b]\n" : : [b] "m" (*b) : "memory");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -455,10 +520,11 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVEOPT], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
char b[4096] __attribute__ ((aligned (64)));
__asm__ __volatile__("xsaveopt %[b]\n" : : [b] "m" (*b) : "memory");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
@@ -476,10 +542,11 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVES], [
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
- void main()
+ int main()
{
char b[4096] __attribute__ ((aligned (64)));
__asm__ __volatile__("xsaves %[b]\n" : : [b] "m" (*b) : "memory");
+ return (0);
}
]])], [
AC_MSG_RESULT([yes])
diff --git a/sys/contrib/openzfs/config/user-statx.m4 b/sys/contrib/openzfs/config/user-statx.m4
index 0315f93e0c20..1ba74a40e9b8 100644
--- a/sys/contrib/openzfs/config/user-statx.m4
+++ b/sys/contrib/openzfs/config/user-statx.m4
@@ -2,7 +2,7 @@ dnl #
dnl # Check for statx() function and STATX_MNT_ID availability
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_STATX], [
- AC_CHECK_HEADERS([linux/stat.h],
+ AC_CHECK_HEADERS([sys/stat.h],
[have_stat_headers=yes],
[have_stat_headers=no])
@@ -14,7 +14,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_STATX], [
AC_MSG_CHECKING([for STATX_MNT_ID])
AC_COMPILE_IFELSE([
AC_LANG_PROGRAM([[
- #include <linux/stat.h>
+ #include <sys/stat.h>
]], [[
struct statx stx;
int mask = STATX_MNT_ID;
@@ -29,6 +29,6 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_STATX], [
])
])
], [
- AC_MSG_WARN([linux/stat.h not found; skipping statx support])
+ AC_MSG_WARN([sys/stat.h not found; skipping statx support])
])
]) dnl end AC_DEFUN
diff --git a/sys/contrib/openzfs/config/zfs-build.m4 b/sys/contrib/openzfs/config/zfs-build.m4
index 7cf1b02d8757..161d390466db 100644
--- a/sys/contrib/openzfs/config/zfs-build.m4
+++ b/sys/contrib/openzfs/config/zfs-build.m4
@@ -252,10 +252,12 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [
ZFS_AC_CONFIG_ALWAYS_CC_NO_CLOBBERED
ZFS_AC_CONFIG_ALWAYS_CC_INFINITE_RECURSION
ZFS_AC_CONFIG_ALWAYS_KERNEL_CC_INFINITE_RECURSION
+ ZFS_AC_CONFIG_ALWAYS_CC_NO_ATOMIC_ALIGNMENT
ZFS_AC_CONFIG_ALWAYS_CC_IMPLICIT_FALLTHROUGH
ZFS_AC_CONFIG_ALWAYS_CC_FRAME_LARGER_THAN
ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_TRUNCATION
ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_ZERO_LENGTH
+ ZFS_AC_CONFIG_ALWAYS_KERNEL_CC_NO_FORMAT_ZERO_LENGTH
ZFS_AC_CONFIG_ALWAYS_CC_FORMAT_OVERFLOW
ZFS_AC_CONFIG_ALWAYS_CC_NO_OMIT_FRAME_POINTER
ZFS_AC_CONFIG_ALWAYS_CC_NO_IPA_SRA
@@ -265,6 +267,7 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [
ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD
ZFS_AC_CONFIG_ALWAYS_SYSTEM
ZFS_AC_CONFIG_ALWAYS_ARCH
+ ZFS_AC_CONFIG_CHECK_ARCH_VAR
ZFS_AC_CONFIG_ALWAYS_PYTHON
ZFS_AC_CONFIG_ALWAYS_PYZFS
ZFS_AC_CONFIG_ALWAYS_SED
diff --git a/sys/contrib/openzfs/contrib/debian/control b/sys/contrib/openzfs/contrib/debian/control
index 96a2bdd88665..c5358dedc0fd 100644
--- a/sys/contrib/openzfs/contrib/debian/control
+++ b/sys/contrib/openzfs/contrib/debian/control
@@ -100,8 +100,8 @@ Depends: ${misc:Depends}, ${shlibs:Depends}
# The libcurl4 is loaded through dlopen("libcurl.so.4").
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=988521
Recommends: libcurl4
-Breaks: libzfs2, libzfs4, libzfs4linux, libzfs6linux
-Replaces: libzfs2, libzfs4, libzfs4linux, libzfs6linux
+Breaks: libzfs2, libzfs4, libzfs4linux, libzfs6linux, openzfs-libzfs4
+Replaces: libzfs2, libzfs4, libzfs4linux, libzfs6linux, openzfs-libzfs4
Conflicts: libzfs6linux
Description: OpenZFS filesystem library for Linux - general support
OpenZFS is a storage platform that encompasses the functionality of
@@ -128,8 +128,8 @@ Package: openzfs-libzpool6
Section: contrib/libs
Architecture: linux-any
Depends: ${misc:Depends}, ${shlibs:Depends}
-Breaks: libzpool2, libzpool5, libzpool5linux, libzpool6linux
-Replaces: libzpool2, libzpool5, libzpool5linux, libzpool6linux
+Breaks: libzpool2, libzpool5, libzpool6linux
+Replaces: libzpool2, libzpool5, libzpool6linux
Conflicts: libzpool6linux
Description: OpenZFS pool library for Linux
OpenZFS is a storage platform that encompasses the functionality of
diff --git a/sys/contrib/openzfs/contrib/debian/copyright b/sys/contrib/openzfs/contrib/debian/copyright
index 65c7d209d8eb..006f32fdf924 100644
--- a/sys/contrib/openzfs/contrib/debian/copyright
+++ b/sys/contrib/openzfs/contrib/debian/copyright
@@ -4,7 +4,7 @@ The detailed contributor information can be found in [2][3].
Files: contrib/debian/*
Copyright:
- 2013-2016, Aron Xu <aron@debian.org>
+ 2013-2025, Aron Xu <aron@debian.org>
2016, Petter Reinholdtsen <pere@hungry.com>
2013, Carlos Alberto Lopez Perez <clopez@igalia.com>
2013, Turbo Fredriksson <turbo@bayour.com>
@@ -12,6 +12,8 @@ Copyright:
2011-2013, Darik Horn <dajhorn@vanadac.com>
2018-2019, Mo Zhou <cdluminate@gmail.com>
2018-2020, Mo Zhou <lumin@debian.org>
+ 2023-2024, Shengqi Chen <harry-chen@outlook.com>
+ 2024-2025, Shengqi Chen <harry@debian.org>
License: GPL-2+
[1] https://tracker.debian.org/pkg/zfs-linux
diff --git a/sys/contrib/openzfs/contrib/debian/not-installed b/sys/contrib/openzfs/contrib/debian/not-installed
index 88557f76fcae..9c08da5a6a7b 100644
--- a/sys/contrib/openzfs/contrib/debian/not-installed
+++ b/sys/contrib/openzfs/contrib/debian/not-installed
@@ -1,4 +1,4 @@
-usr/bin/arc_summary.py
+usr/bin/zarcsummary.py
usr/share/zfs/zfs-helpers.sh
etc/default/zfs
etc/init.d
@@ -9,4 +9,4 @@ etc/zfs/vdev_id.conf.sas_direct.example
etc/zfs/vdev_id.conf.sas_switch.example
etc/zfs/vdev_id.conf.scsi.example
etc/zfs/zfs-functions
-lib/systemd/system/zfs-import.service
+usr/lib/systemd/system/zfs-import.service
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-libnvpair3.install.in b/sys/contrib/openzfs/contrib/debian/openzfs-libnvpair3.install.in
index ed7b541e3607..fce542270dd8 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-libnvpair3.install.in
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-libnvpair3.install.in
@@ -1 +1 @@
-lib/@DEB_HOST_MULTIARCH@/libnvpair.so.*
+usr/lib/@DEB_HOST_MULTIARCH@/libnvpair.so.*
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.install b/sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.install
index c33123f69a8d..bafdebe9bb91 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.install
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.install
@@ -1,2 +1,2 @@
-lib/*/security/pam_zfs_key.so
+usr/lib/*/security/pam_zfs_key.so
usr/share/pam-configs/zfs_key
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.postinst b/sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.postinst
index 03893454eee9..db4db73d6d5a 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.postinst
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-libpam-zfs.postinst
@@ -1,7 +1,7 @@
#!/bin/sh
set -e
-if ! $(ldd "/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)/security/pam_zfs_key.so" | grep -q "libasan") ; then
+if ! $(ldd "/usr/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)/security/pam_zfs_key.so" | grep -q "libasan") ; then
pam-auth-update --package
fi
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-libuutil3.install.in b/sys/contrib/openzfs/contrib/debian/openzfs-libuutil3.install.in
index a197d030d743..bb33386791e1 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-libuutil3.install.in
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-libuutil3.install.in
@@ -1 +1 @@
-lib/@DEB_HOST_MULTIARCH@/libuutil.so.*
+usr/lib/@DEB_HOST_MULTIARCH@/libuutil.so.*
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-libzfs-dev.install.in b/sys/contrib/openzfs/contrib/debian/openzfs-libzfs-dev.install.in
index eaa8c3925e24..5673e2661c6a 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-libzfs-dev.install.in
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-libzfs-dev.install.in
@@ -1,3 +1,5 @@
-lib/@DEB_HOST_MULTIARCH@/*.a usr/lib/@DEB_HOST_MULTIARCH@
+usr/lib/@DEB_HOST_MULTIARCH@/*.a
+usr/lib/@DEB_HOST_MULTIARCH@/*.so
+usr/lib/@DEB_HOST_MULTIARCH@/pkgconfig
usr/include
-usr/lib/@DEB_HOST_MULTIARCH@
+
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-libzfs6.install.in b/sys/contrib/openzfs/contrib/debian/openzfs-libzfs6.install.in
index 6765aaee59cc..a9054c14cc73 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-libzfs6.install.in
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-libzfs6.install.in
@@ -1,2 +1,2 @@
-lib/@DEB_HOST_MULTIARCH@/libzfs.so.*
-lib/@DEB_HOST_MULTIARCH@/libzfs_core.so.*
+usr/lib/@DEB_HOST_MULTIARCH@/libzfs.so.*
+usr/lib/@DEB_HOST_MULTIARCH@/libzfs_core.so.*
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-libzfsbootenv1.install.in b/sys/contrib/openzfs/contrib/debian/openzfs-libzfsbootenv1.install.in
index 49216742433f..b61b8ab63265 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-libzfsbootenv1.install.in
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-libzfsbootenv1.install.in
@@ -1 +1 @@
-lib/@DEB_HOST_MULTIARCH@/libzfsbootenv.so.*
+usr/lib/@DEB_HOST_MULTIARCH@/libzfsbootenv.so.*
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-libzpool6.install.in b/sys/contrib/openzfs/contrib/debian/openzfs-libzpool6.install.in
index b9e872df9ba8..0e087a2709b3 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-libzpool6.install.in
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-libzpool6.install.in
@@ -1 +1 @@
-lib/@DEB_HOST_MULTIARCH@/libzpool.so.*
+usr/lib/@DEB_HOST_MULTIARCH@/libzpool.so.*
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install b/sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install
index b3afef50dbd4..496cab2ad5e4 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-zfs-test.install
@@ -1,4 +1,4 @@
-sbin/ztest
+usr/sbin/ztest
usr/bin/raidz_test
usr/share/man/man1/raidz_test.1
usr/share/man/man1/test-runner.1
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-zfs-zed.install b/sys/contrib/openzfs/contrib/debian/openzfs-zfs-zed.install
index a348ba828ee5..30699a8a98da 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-zfs-zed.install
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-zfs-zed.install
@@ -1,5 +1,5 @@
etc/zfs/zed.d/*
-lib/systemd/system/zfs-zed.service
+usr/lib/systemd/system/zfs-zed.service
usr/lib/zfs-linux/zed.d/*
usr/sbin/zed
usr/share/man/man8/zed.8
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install b/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install
index 37284a78ad18..6810108f2c5d 100644
--- a/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.install
@@ -1,48 +1,48 @@
etc/default/zfs
etc/zfs/zfs-functions
etc/zfs/zpool.d/
-lib/systemd/system-generators/
-lib/systemd/system-preset/
-lib/systemd/system/zfs-import-cache.service
-lib/systemd/system/zfs-import-scan.service
-lib/systemd/system/zfs-import.target
-lib/systemd/system/zfs-load-key.service
-lib/systemd/system/zfs-mount.service
-lib/systemd/system/zfs-mount@.service
-lib/systemd/system/zfs-scrub-monthly@.timer
-lib/systemd/system/zfs-scrub-weekly@.timer
-lib/systemd/system/zfs-scrub@.service
-lib/systemd/system/zfs-trim-monthly@.timer
-lib/systemd/system/zfs-trim-weekly@.timer
-lib/systemd/system/zfs-trim@.service
-lib/systemd/system/zfs-share.service
-lib/systemd/system/zfs-volume-wait.service
-lib/systemd/system/zfs-volumes.target
-lib/systemd/system/zfs.target
-lib/udev/
-sbin/fsck.zfs
-sbin/mount.zfs
-sbin/zdb
-sbin/zfs
-sbin/zfs_ids_to_path
-sbin/zgenhostid
-sbin/zhack
-sbin/zinject
-sbin/zpool
-sbin/zstream
-sbin/zstreamdump
+usr/lib/systemd/system-generators/
+usr/lib/systemd/system-preset/
+usr/lib/systemd/system/zfs-import-cache.service
+usr/lib/systemd/system/zfs-import-scan.service
+usr/lib/systemd/system/zfs-import.target
+usr/lib/systemd/system/zfs-load-key.service
+usr/lib/systemd/system/zfs-mount.service
+usr/lib/systemd/system/zfs-mount@.service
+usr/lib/systemd/system/zfs-scrub-monthly@.timer
+usr/lib/systemd/system/zfs-scrub-weekly@.timer
+usr/lib/systemd/system/zfs-scrub@.service
+usr/lib/systemd/system/zfs-trim-monthly@.timer
+usr/lib/systemd/system/zfs-trim-weekly@.timer
+usr/lib/systemd/system/zfs-trim@.service
+usr/lib/systemd/system/zfs-share.service
+usr/lib/systemd/system/zfs-volume-wait.service
+usr/lib/systemd/system/zfs-volumes.target
+usr/lib/systemd/system/zfs.target
+usr/lib/udev/
+usr/sbin/fsck.zfs
+usr/sbin/mount.zfs
+usr/sbin/zdb
+usr/sbin/zfs
+usr/sbin/zfs_ids_to_path
+usr/sbin/zgenhostid
+usr/sbin/zhack
+usr/sbin/zinject
+usr/sbin/zpool
+usr/sbin/zstream
+usr/sbin/zstreamdump
usr/bin/zvol_wait
-usr/lib/modules-load.d/ lib/
+usr/lib/modules-load.d/
usr/lib/zfs-linux/zpool.d/
usr/lib/zfs-linux/zpool_influxdb
usr/lib/zfs-linux/zfs_prepare_disk
-usr/sbin/arc_summary
-usr/sbin/arcstat
-usr/sbin/dbufstat
-usr/sbin/zilstat
+usr/bin/zarcsummary
+usr/bin/zarcstat
+usr/bin/dbufstat usr/sbin
+usr/bin/zilstat
usr/share/zfs/compatibility.d/
usr/share/bash-completion/completions
-usr/share/man/man1/arcstat.1
+usr/share/man/man1/zarcstat.1
usr/share/man/man1/zhack.1
usr/share/man/man1/zvol_wait.1
usr/share/man/man5/
diff --git a/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.links b/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.links
new file mode 100644
index 000000000000..54099e6573b0
--- /dev/null
+++ b/sys/contrib/openzfs/contrib/debian/openzfs-zfsutils.links
@@ -0,0 +1,3 @@
+usr/sbin/zfs usr/bin/zfs
+usr/sbin/zpool usr/bin/zpool
+usr/lib/zfs-linux/zpool_influxdb usr/bin/zpool_influxdb
diff --git a/sys/contrib/openzfs/contrib/debian/rules.in b/sys/contrib/openzfs/contrib/debian/rules.in
index 3226d604546c..5087a7e18e16 100755
--- a/sys/contrib/openzfs/contrib/debian/rules.in
+++ b/sys/contrib/openzfs/contrib/debian/rules.in
@@ -37,18 +37,19 @@ override_dh_auto_configure:
@# Build the userland, but don't build the kernel modules.
dh_auto_configure -- @CFGOPTS@ \
--bindir=/usr/bin \
- --sbindir=/sbin \
- --libdir=/lib/"$(DEB_HOST_MULTIARCH)" \
- --with-udevdir=/lib/udev \
+ --sbindir=/usr/sbin \
+ --with-mounthelperdir=/usr/sbin \
+ --libdir=/usr/lib/"$(DEB_HOST_MULTIARCH)" \
+ --with-udevdir=/usr/lib/udev \
--with-zfsexecdir=/usr/lib/zfs-linux \
--enable-systemd \
--enable-pyzfs \
--with-python=python3 \
- --with-pammoduledir='/lib/$(DEB_HOST_MULTIARCH)/security' \
+ --with-pammoduledir='/usr/lib/$(DEB_HOST_MULTIARCH)/security' \
--with-pkgconfigdir='/usr/lib/$(DEB_HOST_MULTIARCH)/pkgconfig' \
- --with-systemdunitdir=/lib/systemd/system \
- --with-systemdpresetdir=/lib/systemd/system-preset \
- --with-systemdgeneratordir=/lib/systemd/system-generators \
+ --with-systemdunitdir=/usr/lib/systemd/system \
+ --with-systemdpresetdir=/usr/lib/systemd/system-preset \
+ --with-systemdgeneratordir=/usr/lib/systemd/system-generators \
--with-config=user
for i in $(wildcard $(CURDIR)/debian/*.install.in) ; do \
@@ -77,23 +78,10 @@ override_dh_auto_install:
@# Install the utilities.
$(MAKE) install DESTDIR='$(CURDIR)/debian/tmp'
- # Move from bin_dir to /usr/sbin
- # Remove suffix (.py) as per policy 10.4 - Scripts
- # https://www.debian.org/doc/debian-policy/ch-files.html#s-scripts
- mkdir -p '$(CURDIR)/debian/tmp/usr/sbin/'
- mv '$(CURDIR)/debian/tmp/usr/bin/arc_summary' '$(CURDIR)/debian/tmp/usr/sbin/arc_summary'
- mv '$(CURDIR)/debian/tmp/usr/bin/arcstat' '$(CURDIR)/debian/tmp/usr/sbin/arcstat'
- mv '$(CURDIR)/debian/tmp/usr/bin/dbufstat' '$(CURDIR)/debian/tmp/usr/sbin/dbufstat'
- mv '$(CURDIR)/debian/tmp/usr/bin/zilstat' '$(CURDIR)/debian/tmp/usr/sbin/zilstat'
-
- @# Zed has dependencies outside of the system root.
- mv '$(CURDIR)/debian/tmp/sbin/zed' '$(CURDIR)/debian/tmp/usr/sbin/zed'
- sed -i 's|ExecStart=/sbin/|ExecStart=/usr/sbin/|g' '$(CURDIR)/debian/tmp/lib/systemd/system/zfs-zed.service'
-
@# Install the DKMS source.
@# We only want the files needed to build the modules
install -D -t '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/scripts' \
- '$(CURDIR)/scripts/dkms.postbuild'
+ '$(CURDIR)/scripts/dkms.postbuild' '$(CURDIR)/scripts/objtool-wrapper.in'
$(foreach file,$(DKMSFILES),mv '$(CURDIR)/$(NAME)-$(DEB_VERSION_UPSTREAM)/$(file)' '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)' || exit 1;)
@# Only ever build Linux modules
@@ -108,8 +96,8 @@ override_dh_auto_install:
@# - zfs.release$
@# * Takes care of spaces and tabs
@# * Remove reference to ZFS_AC_PACKAGE
- awk '/^AC_CONFIG_FILES\(\[/,/^\]\)/ {\
- if ($$0 !~ /^(AC_CONFIG_FILES\(\[([ \t]+)?$$|\]\)([ \t]+)?$$|([ \t]+)?(include\/(Makefile|sys|os\/(Makefile|linux))|module\/|Makefile([ \t]+)?$$|zfs\.release([ \t]+)?$$))/) \
+ awk '/^AC_CONFIG_FILES\(\[/,/\]\)/ {\
+ if ($$0 !~ /^(AC_CONFIG_FILES\(\[([ \t]+)?$$|\]\)([ \t]+)?$$|([ \t]+)?(include\/(Makefile|sys|os\/(Makefile|linux))|module\/|Makefile([ \t]+)?$$|zfs\.release([ \t]+)?$$))|scripts\/objtool-wrapper.*\]\)$$/) \
{next} } {print}' \
'$(CURDIR)/$(NAME)-$(DEB_VERSION_UPSTREAM)/configure.ac' | sed '/ZFS_AC_PACKAGE/d' > '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/configure.ac'
@# Set "SUBDIRS = module include" for CONFIG_KERNEL and remove SUBDIRS for all other configs.
@@ -131,11 +119,6 @@ override_dh_auto_install:
cd '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)'; ./autogen.sh
rm -fr '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/autom4te.cache'
- for i in `ls $(CURDIR)/debian/tmp/lib/$(DEB_HOST_MULTIARCH)/*.so`; do \
- ln -s '/lib/$(DEB_HOST_MULTIARCH)/'`readlink $${i}` '$(CURDIR)/debian/tmp/usr/lib/$(DEB_HOST_MULTIARCH)/'`basename $${i}`; \
- rm $${i}; \
- done
-
chmod a-x '$(CURDIR)/debian/tmp/etc/zfs/zfs-functions'
chmod a-x '$(CURDIR)/debian/tmp/etc/default/zfs'
@@ -159,7 +142,7 @@ override_dh_auto_clean:
@if test -e META.orig; then mv META.orig META; fi
override_dh_install:
- find debian/tmp/lib -name '*.la' -delete
+ find debian/tmp/usr/lib -name '*.la' -delete
dh_install
override_dh_missing:
@@ -173,8 +156,8 @@ override_dh_installinit:
dh_installinit -R --name zfs-zed
override_dh_installsystemd:
- mkdir -p debian/openzfs-zfsutils/lib/systemd/system
- ln -sr /dev/null debian/openzfs-zfsutils/lib/systemd/system/zfs-import.service
+ mkdir -p debian/openzfs-zfsutils/usr/lib/systemd/system
+ ln -sr /dev/null debian/openzfs-zfsutils/usr/lib/systemd/system/zfs-import.service
dh_installsystemd --no-stop-on-upgrade -X zfs-zed.service
dh_installsystemd --name zfs-zed
diff --git a/sys/contrib/openzfs/contrib/debian/tree/zfs-initramfs/usr/share/initramfs-tools/hooks/zdev b/sys/contrib/openzfs/contrib/debian/tree/zfs-initramfs/usr/share/initramfs-tools/hooks/zdev
index 0cf21a4211a8..d4f968aed8f2 100755
--- a/sys/contrib/openzfs/contrib/debian/tree/zfs-initramfs/usr/share/initramfs-tools/hooks/zdev
+++ b/sys/contrib/openzfs/contrib/debian/tree/zfs-initramfs/usr/share/initramfs-tools/hooks/zdev
@@ -5,7 +5,7 @@
PREREQ="udev"
PREREQ_UDEV_RULES="60-zvol.rules 69-vdev.rules"
-COPY_EXEC_LIST="/lib/udev/zvol_id /lib/udev/vdev_id"
+COPY_EXEC_LIST="/usr/lib/udev/zvol_id /usr/lib/udev/vdev_id"
# Generic result code.
RC=0
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
index acad468edfd1..130d94c70707 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
@@ -16,7 +16,8 @@ depends() {
}
installkernel() {
- instmods -c zfs
+ hostonly='' instmods -c zfs
+ instmods mpt3sas virtio_blk
}
install() {
diff --git a/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/LICENSE b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/LICENSE
new file mode 100644
index 000000000000..04c03a37e0cb
--- /dev/null
+++ b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/LICENSE
@@ -0,0 +1,253 @@
+BoringSSL is a fork of OpenSSL. As such, large parts of it fall under OpenSSL
+licensing. Files that are completely new have a Google copyright and an ISC
+license. This license is reproduced at the bottom of this file.
+
+Contributors to BoringSSL are required to follow the CLA rules for Chromium:
+https://cla.developers.google.com/clas
+
+Files in third_party/ have their own licenses, as described therein. The MIT
+license, for third_party/fiat, which, unlike other third_party directories, is
+compiled into non-test libraries, is included below.
+
+The OpenSSL toolkit stays under a dual license, i.e. both the conditions of the
+OpenSSL License and the original SSLeay license apply to the toolkit. See below
+for the actual license texts. Actually both licenses are BSD-style Open Source
+licenses. In case of any license issues related to OpenSSL please contact
+openssl-core@openssl.org.
+
+The following are Google-internal bug numbers where explicit permission from
+some authors is recorded for use of their work. (This is purely for our own
+record keeping.)
+ 27287199
+ 27287880
+ 27287883
+ 263291445
+
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+
+ISC license used for completely new code in BoringSSL:
+
+/* Copyright 2015 The BoringSSL Authors
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+
+The code in third_party/fiat carries the MIT license:
+
+Copyright (c) 2015-2016 the fiat-crypto authors (see
+https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS).
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Licenses for support code
+-------------------------
+
+Parts of the TLS test suite are under the Go license. This code is not included
+in BoringSSL (i.e. libcrypto and libssl) when compiled, however, so
+distributing code linked against BoringSSL does not trigger this license:
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+BoringSSL uses the Chromium test infrastructure to run a continuous build,
+trybots etc. The scripts which manage this, and the script for generating build
+metadata, are under the Chromium license. Distributing code linked against
+BoringSSL does not trigger this license.
+
+Copyright 2015 The Chromium Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/README b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/README
new file mode 100644
index 000000000000..aa6fb6d477fa
--- /dev/null
+++ b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/README
@@ -0,0 +1,11 @@
+This directory contains the original BoringSSL [1] GCM x86-64 assembly
+files [2].
+
+The assembler files where then further modified to fit the ICP conventions.
+
+The main purpose to include these files (and the original ones) here, is to
+serve as a reference if upstream changes need to be applied to the files
+included and modified in the ICP.
+
+[1] https://github.com/google/boringssl
+[2] https://github.com/google/boringssl/blob/d5440dd2c2c500ac2d3bba4afec47a054b4d99ae/gen/bcm/aes-gcm-avx2-x86_64-linux.S
diff --git a/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/aes-gcm-avx2-x86_64-linux.S b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/aes-gcm-avx2-x86_64-linux.S
new file mode 100644
index 000000000000..e7327c9de872
--- /dev/null
+++ b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/aes-gcm-avx2-x86_64-linux.S
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: Apache-2.0
+// This file is generated from a similarly-named Perl script in the BoringSSL
+// source tree. Do not edit by hand.
+
+#include <openssl/asm_base.h>
+
+#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
+.section .rodata
+.align 16
+
+
+.Lbswap_mask:
+.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
+
+
+
+
+
+
+
+
+.Lgfpoly:
+.quad 1, 0xc200000000000000
+
+
+.Lgfpoly_and_internal_carrybit:
+.quad 1, 0xc200000000000001
+
+.align 32
+
+.Lctr_pattern:
+.quad 0, 0
+.quad 1, 0
+.Linc_2blocks:
+.quad 2, 0
+.quad 2, 0
+
+.text
+.globl gcm_init_vpclmulqdq_avx2
+.hidden gcm_init_vpclmulqdq_avx2
+.type gcm_init_vpclmulqdq_avx2,@function
+.align 32
+gcm_init_vpclmulqdq_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+
+
+
+
+
+ vpshufd $0x4e,(%rsi),%xmm3
+
+
+
+
+
+ vpshufd $0xd3,%xmm3,%xmm0
+ vpsrad $31,%xmm0,%xmm0
+ vpaddq %xmm3,%xmm3,%xmm3
+ vpand .Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm6
+
+
+ vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
+ vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1
+ vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+ vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2
+ vpshufd $0x4e,%xmm0,%xmm0
+ vpxor %xmm0,%xmm1,%xmm1
+ vpxor %xmm2,%xmm1,%xmm1
+ vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
+ vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm5,%xmm5
+ vpxor %xmm0,%xmm5,%xmm5
+
+
+
+ vinserti128 $1,%xmm3,%ymm5,%ymm3
+ vinserti128 $1,%xmm5,%ymm5,%ymm5
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+
+ vmovdqu %ymm3,96(%rdi)
+ vmovdqu %ymm4,64(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128+32(%rdi)
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm4,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm4,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm4,%ymm3
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm3,%ymm3
+ vpxor %ymm0,%ymm3,%ymm3
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu %ymm3,32(%rdi)
+ vmovdqu %ymm4,0(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128(%rdi)
+
+ vzeroupper
+ ret
+
+.cfi_endproc
+.size gcm_init_vpclmulqdq_avx2, . - gcm_init_vpclmulqdq_avx2
+.globl gcm_gmult_vpclmulqdq_avx2
+.hidden gcm_gmult_vpclmulqdq_avx2
+.type gcm_gmult_vpclmulqdq_avx2,@function
+.align 32
+gcm_gmult_vpclmulqdq_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+
+
+
+ vmovdqu (%rdi),%xmm0
+ vmovdqu .Lbswap_mask(%rip),%xmm1
+ vmovdqu 128-16(%rsi),%xmm2
+ vmovdqu .Lgfpoly(%rip),%xmm3
+ vpshufb %xmm1,%xmm0,%xmm0
+
+ vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4
+ vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5
+ vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6
+ vpshufd $0x4e,%xmm4,%xmm4
+ vpxor %xmm4,%xmm5,%xmm5
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0
+ vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4
+ vpshufd $0x4e,%xmm5,%xmm5
+ vpxor %xmm5,%xmm0,%xmm0
+ vpxor %xmm4,%xmm0,%xmm0
+
+
+ vpshufb %xmm1,%xmm0,%xmm0
+ vmovdqu %xmm0,(%rdi)
+ ret
+
+.cfi_endproc
+.size gcm_gmult_vpclmulqdq_avx2, . - gcm_gmult_vpclmulqdq_avx2
+.globl gcm_ghash_vpclmulqdq_avx2
+.hidden gcm_ghash_vpclmulqdq_avx2
+.type gcm_ghash_vpclmulqdq_avx2,@function
+.align 32
+gcm_ghash_vpclmulqdq_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+
+
+
+
+
+
+ vmovdqu .Lbswap_mask(%rip),%xmm6
+ vmovdqu .Lgfpoly(%rip),%xmm7
+
+
+ vmovdqu (%rdi),%xmm5
+ vpshufb %xmm6,%xmm5,%xmm5
+
+
+ cmpq $32,%rcx
+ jb .Lghash_lastblock
+
+
+
+ vinserti128 $1,%xmm6,%ymm6,%ymm6
+ vinserti128 $1,%xmm7,%ymm7,%ymm7
+
+ cmpq $127,%rcx
+ jbe .Lghash_loop_1x
+
+
+ vmovdqu 128(%rsi),%ymm8
+ vmovdqu 128+32(%rsi),%ymm9
+.Lghash_loop_4x:
+
+ vmovdqu 0(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 0(%rsi),%ymm2
+ vpxor %ymm5,%ymm1,%ymm1
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm8,%ymm0,%ymm4
+
+ vmovdqu 32(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 32(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm8,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu 64(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 64(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+ vmovdqu 96(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 96(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm5,%ymm4,%ymm4
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm3,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm3,%ymm3
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpclmulqdq $0x01,%ymm4,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm4,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpxor %ymm0,%ymm5,%ymm5
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+
+ subq $-128,%rdx
+ addq $-128,%rcx
+ cmpq $127,%rcx
+ ja .Lghash_loop_4x
+
+
+ cmpq $32,%rcx
+ jb .Lghash_loop_1x_done
+.Lghash_loop_1x:
+ vmovdqu (%rdx),%ymm0
+ vpshufb %ymm6,%ymm0,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vmovdqu 128-32(%rsi),%ymm0
+ vpclmulqdq $0x00,%ymm0,%ymm5,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2
+ vpclmulqdq $0x10,%ymm0,%ymm5,%ymm3
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x01,%ymm1,%ymm7,%ymm3
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm2,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x11,%ymm0,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm7,%ymm1
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpxor %ymm1,%ymm5,%ymm5
+
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ addq $32,%rdx
+ subq $32,%rcx
+ cmpq $32,%rcx
+ jae .Lghash_loop_1x
+.Lghash_loop_1x_done:
+
+
+.Lghash_lastblock:
+ testq %rcx,%rcx
+ jz .Lghash_done
+ vmovdqu (%rdx),%xmm0
+ vpshufb %xmm6,%xmm0,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ vmovdqu 128-16(%rsi),%xmm0
+ vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
+ vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
+ vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm2,%xmm2
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
+ vpshufd $0x4e,%xmm2,%xmm2
+ vpxor %xmm2,%xmm5,%xmm5
+ vpxor %xmm1,%xmm5,%xmm5
+
+
+.Lghash_done:
+
+ vpshufb %xmm6,%xmm5,%xmm5
+ vmovdqu %xmm5,(%rdi)
+
+ vzeroupper
+ ret
+
+.cfi_endproc
+.size gcm_ghash_vpclmulqdq_avx2, . - gcm_ghash_vpclmulqdq_avx2
+.globl aes_gcm_enc_update_vaes_avx2
+.hidden aes_gcm_enc_update_vaes_avx2
+.type aes_gcm_enc_update_vaes_avx2,@function
+.align 32
+aes_gcm_enc_update_vaes_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+#ifdef BORINGSSL_DISPATCH_TEST
+.extern BORINGSSL_function_hit
+.hidden BORINGSSL_function_hit
+ movb $1,BORINGSSL_function_hit+8(%rip)
+#endif
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 240(%rcx),%r10d
+ leal -20(,%r10,4),%r10d
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func1
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_first_4_vecs__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_first_4_vecs__func1
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ jbe .Lghash_last_ciphertext_4x__func1
+.align 16
+.Lcrypt_loop_4x__func1:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func1
+ je .Laes192__func1
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func1:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func1:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+ subq $-128,%rsi
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func1
+.Lghash_last_ciphertext_4x__func1:
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+ subq $-128,%rsi
+.Lcrypt_loop_4x_done__func1:
+
+ testq %rdx,%rdx
+ jz .Ldone__func1
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func1
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %ymm0,%ymm13,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func1
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func1:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func1
+ je .Lxor_two_blocks__func1
+
+.Lxor_three_blocks__func1:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %xmm0,%xmm13,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_two_blocks__func1:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_one_block__func1:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm12,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func1:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func1:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func1:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ ret
+
+.cfi_endproc
+.size aes_gcm_enc_update_vaes_avx2, . - aes_gcm_enc_update_vaes_avx2
+.globl aes_gcm_dec_update_vaes_avx2
+.hidden aes_gcm_dec_update_vaes_avx2
+.type aes_gcm_dec_update_vaes_avx2,@function
+.align 32
+aes_gcm_dec_update_vaes_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 240(%rcx),%r10d
+ leal -20(,%r10,4),%r10d
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func2
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+.align 16
+.Lcrypt_loop_4x__func2:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func2
+ je .Laes192__func2
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func2:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func2:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ subq $-128,%rsi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func2
+.Lcrypt_loop_4x_done__func2:
+
+ testq %rdx,%rdx
+ jz .Ldone__func2
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func2
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %ymm0,%ymm3,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func2
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func2:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func2
+ je .Lxor_two_blocks__func2
+
+.Lxor_three_blocks__func2:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %xmm0,%xmm3,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_two_blocks__func2:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_one_block__func2:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm2,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func2:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func2:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func2:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ ret
+
+.cfi_endproc
+.size aes_gcm_dec_update_vaes_avx2, . - aes_gcm_dec_update_vaes_avx2
+#endif
diff --git a/sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in b/sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in
index 4776087d9a76..db9bf0e20274 100644
--- a/sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in
+++ b/sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in
@@ -8,3 +8,12 @@ fi
. /usr/share/initramfs-tools/hook-functions
copy_exec /usr/share/initramfs-tools/zfsunlock /usr/bin/zfsunlock
+
+if [ -f /etc/initramfs-tools/etc/motd ]; then
+ copy_file text /etc/initramfs-tools/etc/motd /etc/motd
+else
+ tmpf=$(mktemp)
+ echo "If you use zfs encrypted root filesystems, you can use \`zfsunlock\` to manually unlock it" > "$tmpf"
+ copy_file text "$tmpf" /etc/motd
+ rm -f "$tmpf"
+fi
diff --git a/sys/contrib/openzfs/contrib/initramfs/scripts/zfs b/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
index c569b2528368..67707e9d80f4 100644
--- a/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
+++ b/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
@@ -979,7 +979,8 @@ mountroot()
touch /run/zfs_unlock_complete
if [ -e /run/zfs_unlock_complete_notify ]; then
- read -r < /run/zfs_unlock_complete_notify
+ # shellcheck disable=SC2034
+ read -r zfs_unlock_complete_notify < /run/zfs_unlock_complete_notify
fi
# ------------
diff --git a/sys/contrib/openzfs/contrib/intel_qat/readme.md b/sys/contrib/openzfs/contrib/intel_qat/readme.md
index 7e45d395bb80..04c299b6404c 100644
--- a/sys/contrib/openzfs/contrib/intel_qat/readme.md
+++ b/sys/contrib/openzfs/contrib/intel_qat/readme.md
@@ -8,7 +8,7 @@ This contrib contains community compatibility patches to get Intel QAT working o
These patches are based on the following Intel QAT version:
[1.7.l.4.10.0-00014](https://01.org/sites/default/files/downloads/qat1.7.l.4.10.0-00014.tar.gz)
-When using QAT with above kernels versions, the following patches needs to be applied using:
+When using QAT with the above kernel versions, the following patches need to be applied using:
patch -p1 < _$PATCH_
_Where $PATCH refers to the path of the patch in question_
diff --git a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
index a0bc172c6f44..88698dedabbc 100644
--- a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
+++ b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
@@ -391,7 +391,11 @@ static int
zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
int argc, const char **argv)
{
+#if defined(__FreeBSD__)
+ config->homes_prefix = strdup("zroot/home");
+#else
config->homes_prefix = strdup("rpool/home");
+#endif
if (config->homes_prefix == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure");
return (PAM_SERVICE_ERR);
diff --git a/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/exceptions.py b/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/exceptions.py
index b26a37f5de10..26d66a452726 100644
--- a/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/exceptions.py
+++ b/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/exceptions.py
@@ -604,5 +604,4 @@ class RaidzExpansionRunning(ZFSError):
errno = ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS
message = "A raidz device is currently expanding"
-
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
diff --git a/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py b/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
index 971aa1d0d493..bad1af2d1671 100644
--- a/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
+++ b/sys/contrib/openzfs/contrib/pyzfs/libzfs_core/test/test_libzfs_core.py
@@ -4223,7 +4223,7 @@ class _TempPool(object):
self.getRoot().reset()
return
- # On the Buildbot builders this may fail with "pool is busy"
+ # On the CI builders this may fail with "pool is busy"
# Retry 5 times before raising an error
retry = 0
while True:
diff --git a/sys/contrib/openzfs/etc/init.d/README.md b/sys/contrib/openzfs/etc/init.d/README.md
index da780fdc1222..3852dd9a6b2e 100644
--- a/sys/contrib/openzfs/etc/init.d/README.md
+++ b/sys/contrib/openzfs/etc/init.d/README.md
@@ -1,5 +1,5 @@
DESCRIPTION
- These script were written with the primary intention of being portable and
+ These scripts were written with the primary intention of being portable and
usable on as many systems as possible.
This is, in practice, usually not possible. But the intention is there.
diff --git a/sys/contrib/openzfs/include/libzfs.h b/sys/contrib/openzfs/include/libzfs.h
index 3fcdc176a621..14930fb90622 100644
--- a/sys/contrib/openzfs/include/libzfs.h
+++ b/sys/contrib/openzfs/include/libzfs.h
@@ -479,6 +479,8 @@ _LIBZFS_H zpool_status_t zpool_import_status(nvlist_t *, const char **,
_LIBZFS_H nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
_LIBZFS_H nvlist_t *zpool_get_features(zpool_handle_t *);
_LIBZFS_H int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
+_LIBZFS_H void zpool_refresh_stats_from_handle(zpool_handle_t *,
+ zpool_handle_t *);
_LIBZFS_H int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
_LIBZFS_H void zpool_add_propname(zpool_handle_t *, const char *);
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/debug.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/debug.h
index 974704e92bbd..32bc02f3dc86 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/debug.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/debug.h
@@ -69,6 +69,10 @@
#define __maybe_unused __attribute__((unused))
#endif
+#ifndef __must_check
+#define __must_check __attribute__((__warn_unused_result__))
+#endif
+
/*
* Without this, we see warnings from objtool during normal Linux builds when
* the kernel is built with CONFIG_STACK_VALIDATION=y:
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/proc.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/proc.h
index c6bc10d6babe..1cbd79ec893f 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/proc.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/proc.h
@@ -77,8 +77,8 @@ do_thread_create(caddr_t stk, size_t stksize, void (*proc)(void *), void *arg,
/*
* Be sure there are no surprises.
*/
- ASSERT(stk == NULL);
- ASSERT(len == 0);
+ ASSERT0P(stk);
+ ASSERT0(len);
ASSERT(state == TS_RUN);
if (pp == &p0)
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/time.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/time.h
index 2f5fe4619ef7..14b42f2e7087 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/time.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/time.h
@@ -63,6 +63,17 @@ typedef longlong_t hrtime_t;
#define NSEC_TO_TICK(nsec) ((nsec) / (NANOSEC / hz))
static __inline hrtime_t
+getlrtime(void)
+{
+ struct timespec ts;
+ hrtime_t nsec;
+
+ getnanouptime(&ts);
+ nsec = ((hrtime_t)ts.tv_sec * NANOSEC) + ts.tv_nsec;
+ return (nsec);
+}
+
+static __inline hrtime_t
gethrtime(void)
{
struct timespec ts;
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
index 076dab8ba6dc..214f3ea0e787 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
@@ -542,24 +542,6 @@ blk_generic_alloc_queue(make_request_fn make_request, int node_id)
}
#endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
-/*
- * All the io_*() helper functions below can operate on a bio, or a rq, but
- * not both. The older submit_bio() codepath will pass a bio, and the
- * newer blk-mq codepath will pass a rq.
- */
-static inline int
-io_data_dir(struct bio *bio, struct request *rq)
-{
- if (rq != NULL) {
- if (op_is_write(req_op(rq))) {
- return (WRITE);
- } else {
- return (READ);
- }
- }
- return (bio_data_dir(bio));
-}
-
static inline int
io_is_flush(struct bio *bio, struct request *rq)
{
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h
index 16e8a319a5f8..152e5a606f0e 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/dcache_compat.h
@@ -61,32 +61,6 @@
#endif
/*
- * 2.6.30 API change,
- * The const keyword was added to the 'struct dentry_operations' in
- * the dentry structure. To handle this we define an appropriate
- * dentry_operations_t typedef which can be used.
- */
-typedef const struct dentry_operations dentry_operations_t;
-
-/*
- * 2.6.38 API addition,
- * Added d_clear_d_op() helper function which clears some flags and the
- * registered dentry->d_op table. This is required because d_set_d_op()
- * issues a warning when the dentry operations table is already set.
- * For the .zfs control directory to work properly we must be able to
- * override the default operations table and register custom .d_automount
- * and .d_revalidate callbacks.
- */
-static inline void
-d_clear_d_op(struct dentry *dentry)
-{
- dentry->d_op = NULL;
- dentry->d_flags &= ~(
- DCACHE_OP_HASH | DCACHE_OP_COMPARE |
- DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE);
-}
-
-/*
* Walk and invalidate all dentry aliases of an inode
* unless it's a mountpoint
*/
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
index cd245a5f0135..326f471d7c9b 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
@@ -139,15 +139,6 @@
*/
#if defined(HAVE_KERNEL_FPU_INTERNAL)
-/*
- * For kernels not exporting *kfpu_{begin,end} we have to use inline assembly
- * with the XSAVE{,OPT,S} instructions, so we need the toolchain to support at
- * least XSAVE.
- */
-#if !defined(HAVE_XSAVE)
-#error "Toolchain needs to support the XSAVE assembler instruction"
-#endif
-
#ifndef XFEATURE_MASK_XTILE
/*
* For kernels where this doesn't exist yet, we still don't want to break
@@ -335,9 +326,13 @@ kfpu_begin(void)
return;
}
#endif
+#if defined(HAVE_XSAVE)
if (static_cpu_has(X86_FEATURE_XSAVE)) {
kfpu_do_xsave("xsave", state, ~XFEATURE_MASK_XTILE);
- } else if (static_cpu_has(X86_FEATURE_FXSR)) {
+ return;
+ }
+#endif
+ if (static_cpu_has(X86_FEATURE_FXSR)) {
kfpu_save_fxsr(state);
} else {
kfpu_save_fsave(state);
@@ -390,9 +385,13 @@ kfpu_end(void)
goto out;
}
#endif
+#if defined(HAVE_XSAVE)
if (static_cpu_has(X86_FEATURE_XSAVE)) {
kfpu_do_xrstor("xrstor", state, ~XFEATURE_MASK_XTILE);
- } else if (static_cpu_has(X86_FEATURE_FXSR)) {
+ goto out;
+ }
+#endif
+ if (static_cpu_has(X86_FEATURE_FXSR)) {
kfpu_restore_fxsr(state);
} else {
kfpu_restore_fsave(state);
@@ -599,6 +598,32 @@ zfs_movbe_available(void)
}
/*
+ * Check if VAES instruction set is available
+ */
+static inline boolean_t
+zfs_vaes_available(void)
+{
+#if defined(X86_FEATURE_VAES)
+ return (!!boot_cpu_has(X86_FEATURE_VAES));
+#else
+ return (B_FALSE);
+#endif
+}
+
+/*
+ * Check if VPCLMULQDQ instruction set is available
+ */
+static inline boolean_t
+zfs_vpclmulqdq_available(void)
+{
+#if defined(X86_FEATURE_VPCLMULQDQ)
+ return (!!boot_cpu_has(X86_FEATURE_VPCLMULQDQ));
+#else
+ return (B_FALSE);
+#endif
+}
+
+/*
* Check if SHA_NI instruction set is available
*/
static inline boolean_t
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/debug.h b/sys/contrib/openzfs/include/os/linux/spl/sys/debug.h
index 1671ba4074da..85b96e1e23a7 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/debug.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/debug.h
@@ -69,6 +69,10 @@
#define __maybe_unused __attribute__((unused))
#endif
+#ifndef __must_check
+#define __must_check __attribute__((__warn_unused_result__))
+#endif
+
/*
* Without this, we see warnings from objtool during normal Linux builds when
* the kernel is built with CONFIG_STACK_VALIDATION=y:
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/mutex.h b/sys/contrib/openzfs/include/os/linux/spl/sys/mutex.h
index f000f53ab9b6..4eca2414fc5b 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/mutex.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/mutex.h
@@ -111,7 +111,7 @@ spl_mutex_lockdep_on_maybe(kmutex_t *mp) \
#undef mutex_destroy
#define mutex_destroy(mp) \
{ \
- VERIFY3P(mutex_owner(mp), ==, NULL); \
+ VERIFY0P(mutex_owner(mp)); \
}
#define mutex_tryenter(mp) \
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/rwlock.h b/sys/contrib/openzfs/include/os/linux/spl/sys/rwlock.h
index 563e0a19663d..c883836c2f83 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/rwlock.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/rwlock.h
@@ -130,7 +130,7 @@ RW_READ_HELD(krwlock_t *rwp)
/*
* The Linux rwsem implementation does not require a matching destroy.
*/
-#define rw_destroy(rwp) ((void) 0)
+#define rw_destroy(rwp) ASSERT(!(RW_LOCK_HELD(rwp)))
/*
* Upgrading a rwsem from a reader to a writer is not supported by the
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/stat.h b/sys/contrib/openzfs/include/os/linux/spl/sys/stat.h
index 087389b57b34..ad2815e46394 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/stat.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/stat.h
@@ -25,6 +25,6 @@
#ifndef _SPL_STAT_H
#define _SPL_STAT_H
-#include <linux/stat.h>
+#include <sys/stat.h>
#endif /* SPL_STAT_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/time.h b/sys/contrib/openzfs/include/os/linux/spl/sys/time.h
index 33b273b53996..4edc42a8aef9 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/time.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/time.h
@@ -80,6 +80,14 @@ gethrestime_sec(void)
}
static inline hrtime_t
+getlrtime(void)
+{
+ inode_timespec_t ts;
+ ktime_get_coarse_ts64(&ts);
+ return (((hrtime_t)ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec);
+}
+
+static inline hrtime_t
gethrtime(void)
{
struct timespec64 ts;
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
index 955462c85d10..e34ea46b3fe8 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
@@ -139,18 +139,18 @@
#define ZCW_TP_STRUCT_ENTRY \
__field(lwb_t *, zcw_lwb) \
__field(boolean_t, zcw_done) \
- __field(int, zcw_zio_error) \
+ __field(int, zcw_error) \
#define ZCW_TP_FAST_ASSIGN \
__entry->zcw_lwb = zcw->zcw_lwb; \
__entry->zcw_done = zcw->zcw_done; \
- __entry->zcw_zio_error = zcw->zcw_zio_error;
+ __entry->zcw_error = zcw->zcw_error;
#define ZCW_TP_PRINTK_FMT \
"zcw { lwb %p done %u error %u }"
#define ZCW_TP_PRINTK_ARGS \
- __entry->zcw_lwb, __entry->zcw_done, __entry->zcw_zio_error
+ __entry->zcw_lwb, __entry->zcw_done, __entry->zcw_error
/*
* Generic support for two argument tracepoints of the form:
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
index f5a9105cd885..8994aab889fe 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
@@ -55,6 +55,7 @@ extern const struct file_operations zpl_dir_file_operations;
extern void zpl_prune_sb(uint64_t nr_to_scan, void *arg);
extern const struct super_operations zpl_super_operations;
+extern const struct dentry_operations zpl_dentry_operations;
extern const struct export_operations zpl_export_operations;
extern struct file_system_type zpl_fs_type;
diff --git a/sys/contrib/openzfs/include/sys/dmu.h b/sys/contrib/openzfs/include/sys/dmu.h
index 7c2024a16d8f..aa5035862def 100644
--- a/sys/contrib/openzfs/include/sys/dmu.h
+++ b/sys/contrib/openzfs/include/sys/dmu.h
@@ -414,9 +414,9 @@ typedef struct dmu_buf {
#define DMU_POOL_ZPOOL_CHECKPOINT "com.delphix:zpool_checkpoint"
#define DMU_POOL_LOG_SPACEMAP_ZAP "com.delphix:log_spacemap_zap"
#define DMU_POOL_DELETED_CLONES "com.delphix:deleted_clones"
-#define DMU_POOL_TXG_LOG_TIME_MINUTES "com.klaraystems:txg_log_time:minutes"
-#define DMU_POOL_TXG_LOG_TIME_DAYS "com.klaraystems:txg_log_time:days"
-#define DMU_POOL_TXG_LOG_TIME_MONTHS "com.klaraystems:txg_log_time:months"
+#define DMU_POOL_TXG_LOG_TIME_MINUTES "com.klarasystems:txg_log_time:minutes"
+#define DMU_POOL_TXG_LOG_TIME_DAYS "com.klarasystems:txg_log_time:days"
+#define DMU_POOL_TXG_LOG_TIME_MONTHS "com.klarasystems:txg_log_time:months"
/*
* Allocate an object from this objset. The range of object numbers
@@ -742,8 +742,8 @@ dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func_sync,
dmu_buf_evict_func_t *evict_func_async,
dmu_buf_t **clear_on_evict_dbufp __maybe_unused)
{
- ASSERT(dbu->dbu_evict_func_sync == NULL);
- ASSERT(dbu->dbu_evict_func_async == NULL);
+ ASSERT0P(dbu->dbu_evict_func_sync);
+ ASSERT0P(dbu->dbu_evict_func_async);
/* must have at least one evict func */
IMPLY(evict_func_sync == NULL, evict_func_async != NULL);
diff --git a/sys/contrib/openzfs/include/sys/dmu_impl.h b/sys/contrib/openzfs/include/sys/dmu_impl.h
index 21a8b16a3ee6..bae872bd1907 100644
--- a/sys/contrib/openzfs/include/sys/dmu_impl.h
+++ b/sys/contrib/openzfs/include/sys/dmu_impl.h
@@ -168,12 +168,10 @@ extern "C" {
* dn_allocated_txg
* dn_free_txg
* dn_assigned_txg
- * dn_dirty_txg
+ * dn_dirtycnt
* dd_assigned_tx
* dn_notxholds
* dn_nodnholds
- * dn_dirtyctx
- * dn_dirtyctx_firstset
* (dn_phys copy fields?)
* (dn_phys contents?)
* held from:
diff --git a/sys/contrib/openzfs/include/sys/dnode.h b/sys/contrib/openzfs/include/sys/dnode.h
index 76218c8b09ca..8bd1db5b7165 100644
--- a/sys/contrib/openzfs/include/sys/dnode.h
+++ b/sys/contrib/openzfs/include/sys/dnode.h
@@ -141,12 +141,6 @@ struct dmu_buf_impl;
struct objset;
struct zio;
-enum dnode_dirtycontext {
- DN_UNDIRTIED,
- DN_DIRTY_OPEN,
- DN_DIRTY_SYNC
-};
-
/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */
#define DNODE_FLAG_USED_BYTES (1 << 0)
#define DNODE_FLAG_USERUSED_ACCOUNTED (1 << 1)
@@ -340,11 +334,9 @@ struct dnode {
uint64_t dn_allocated_txg;
uint64_t dn_free_txg;
uint64_t dn_assigned_txg;
- uint64_t dn_dirty_txg; /* txg dnode was last dirtied */
+ uint8_t dn_dirtycnt;
kcondvar_t dn_notxholds;
kcondvar_t dn_nodnholds;
- enum dnode_dirtycontext dn_dirtyctx;
- const void *dn_dirtyctx_firstset; /* dbg: contents meaningless */
/* protected by own devices */
zfs_refcount_t dn_tx_holds;
@@ -440,7 +432,6 @@ void dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting);
int dnode_try_claim(objset_t *os, uint64_t object, int slots);
boolean_t dnode_is_dirty(dnode_t *dn);
void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
-void dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag);
void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
@@ -468,9 +459,6 @@ void dnode_free_interior_slots(dnode_t *dn);
void dnode_set_storage_type(dnode_t *dn, dmu_object_type_t type);
-#define DNODE_IS_DIRTY(_dn) \
- ((_dn)->dn_dirty_txg >= spa_syncing_txg((_dn)->dn_objset->os_spa))
-
#define DNODE_LEVEL_IS_CACHEABLE(_dn, _level) \
((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \
(((_level) > 0 || DMU_OT_IS_METADATA((_dn)->dn_type)) && \
diff --git a/sys/contrib/openzfs/include/sys/dsl_deleg.h b/sys/contrib/openzfs/include/sys/dsl_deleg.h
index ae729b9f32ff..36dd6211219d 100644
--- a/sys/contrib/openzfs/include/sys/dsl_deleg.h
+++ b/sys/contrib/openzfs/include/sys/dsl_deleg.h
@@ -46,6 +46,7 @@ extern "C" {
#define ZFS_DELEG_PERM_MOUNT "mount"
#define ZFS_DELEG_PERM_SHARE "share"
#define ZFS_DELEG_PERM_SEND "send"
+#define ZFS_DELEG_PERM_SEND_RAW "send:raw"
#define ZFS_DELEG_PERM_RECEIVE "receive"
#define ZFS_DELEG_PERM_RECEIVE_APPEND "receive:append"
#define ZFS_DELEG_PERM_ALLOW "allow"
diff --git a/sys/contrib/openzfs/include/sys/fm/fs/zfs.h b/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
index 659c64bf15a6..a771b11420fd 100644
--- a/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
+++ b/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
@@ -58,6 +58,7 @@ extern "C" {
#define FM_EREPORT_ZFS_PROBE_FAILURE "probe_failure"
#define FM_EREPORT_ZFS_LOG_REPLAY "log_replay"
#define FM_EREPORT_ZFS_CONFIG_CACHE_WRITE "config_cache_write"
+#define FM_EREPORT_ZFS_SITOUT "sitout"
#define FM_EREPORT_PAYLOAD_ZFS_POOL "pool"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE "pool_failmode"
diff --git a/sys/contrib/openzfs/include/sys/fs/zfs.h b/sys/contrib/openzfs/include/sys/fs/zfs.h
index fc359c10365a..662fd81c5ee1 100644
--- a/sys/contrib/openzfs/include/sys/fs/zfs.h
+++ b/sys/contrib/openzfs/include/sys/fs/zfs.h
@@ -385,6 +385,8 @@ typedef enum {
VDEV_PROP_TRIM_SUPPORT,
VDEV_PROP_TRIM_ERRORS,
VDEV_PROP_SLOW_IOS,
+ VDEV_PROP_SIT_OUT,
+ VDEV_PROP_AUTOSIT,
VDEV_NUM_PROPS
} vdev_prop_t;
@@ -746,6 +748,8 @@ typedef struct zpool_load_policy {
#define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift"
#define ZPOOL_CONFIG_ASHIFT "ashift"
#define ZPOOL_CONFIG_ASIZE "asize"
+#define ZPOOL_CONFIG_MIN_ALLOC "min_alloc"
+#define ZPOOL_CONFIG_MAX_ALLOC "max_alloc"
#define ZPOOL_CONFIG_DTL "DTL"
#define ZPOOL_CONFIG_SCAN_STATS "scan_stats" /* not stored on disk */
#define ZPOOL_CONFIG_REMOVAL_STATS "removal_stats" /* not stored on disk */
@@ -1673,6 +1677,7 @@ typedef enum {
ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS,
ZFS_ERR_ASHIFT_MISMATCH,
ZFS_ERR_STREAM_LARGE_MICROZAP,
+ ZFS_ERR_TOO_MANY_SITOUTS,
} zfs_errno_t;
/*
diff --git a/sys/contrib/openzfs/include/sys/range_tree.h b/sys/contrib/openzfs/include/sys/range_tree.h
index 0f6884682459..0f6def36f9f6 100644
--- a/sys/contrib/openzfs/include/sys/range_tree.h
+++ b/sys/contrib/openzfs/include/sys/range_tree.h
@@ -238,8 +238,7 @@ zfs_rs_set_end_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t end)
}
static inline void
-zfs_zfs_rs_set_fill_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt,
- uint64_t fill)
+zfs_rs_set_fill_raw(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t fill)
{
ASSERT3U(rt->rt_type, <=, ZFS_RANGE_SEG_NUM_TYPES);
switch (rt->rt_type) {
@@ -277,7 +276,7 @@ static inline void
zfs_rs_set_fill(zfs_range_seg_t *rs, zfs_range_tree_t *rt, uint64_t fill)
{
ASSERT(IS_P2ALIGNED(fill, 1ULL << rt->rt_shift));
- zfs_zfs_rs_set_fill_raw(rs, rt, fill >> rt->rt_shift);
+ zfs_rs_set_fill_raw(rs, rt, fill >> rt->rt_shift);
}
typedef void zfs_range_tree_func_t(void *arg, uint64_t start, uint64_t size);
diff --git a/sys/contrib/openzfs/include/sys/spa.h b/sys/contrib/openzfs/include/sys/spa.h
index db6de332ae67..f172f2af6f07 100644
--- a/sys/contrib/openzfs/include/sys/spa.h
+++ b/sys/contrib/openzfs/include/sys/spa.h
@@ -880,7 +880,6 @@ extern kcondvar_t spa_namespace_cv;
#define SPA_CONFIG_UPDATE_VDEVS 1
extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t, boolean_t);
-extern void spa_config_load(void);
extern int spa_all_configs(uint64_t *generation, nvlist_t **pools);
extern void spa_config_set(spa_t *spa, nvlist_t *config);
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
@@ -1031,7 +1030,7 @@ extern void spa_import_progress_set_notes_nolog(spa_t *spa,
extern int spa_config_tryenter(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw);
-extern void spa_config_enter_mmp(spa_t *spa, int locks, const void *tag,
+extern void spa_config_enter_priority(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, const void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
@@ -1085,6 +1084,7 @@ extern pool_state_t spa_state(spa_t *spa);
extern spa_load_state_t spa_load_state(spa_t *spa);
extern uint64_t spa_freeze_txg(spa_t *spa);
extern uint64_t spa_get_worst_case_asize(spa_t *spa, uint64_t lsize);
+extern void spa_get_min_alloc_range(spa_t *spa, uint64_t *min, uint64_t *max);
extern uint64_t spa_get_dspace(spa_t *spa);
extern uint64_t spa_get_checkpoint_space(spa_t *spa);
extern uint64_t spa_get_slop_space(spa_t *spa);
@@ -1244,7 +1244,6 @@ extern void vdev_mirror_stat_fini(void);
/* Initialization and termination */
extern void spa_init(spa_mode_t mode);
extern void spa_fini(void);
-extern void spa_boot_init(void *);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
diff --git a/sys/contrib/openzfs/include/sys/spa_impl.h b/sys/contrib/openzfs/include/sys/spa_impl.h
index 07a959db3447..62b062984d36 100644
--- a/sys/contrib/openzfs/include/sys/spa_impl.h
+++ b/sys/contrib/openzfs/include/sys/spa_impl.h
@@ -265,6 +265,7 @@ struct spa {
uint64_t spa_min_ashift; /* of vdevs in normal class */
uint64_t spa_max_ashift; /* of vdevs in normal class */
uint64_t spa_min_alloc; /* of vdevs in normal class */
+ uint64_t spa_max_alloc; /* of vdevs in normal class */
uint64_t spa_gcd_alloc; /* of vdevs in normal class */
uint64_t spa_config_guid; /* config pool guid */
uint64_t spa_load_guid; /* spa_load initialized guid */
diff --git a/sys/contrib/openzfs/include/sys/vdev_impl.h b/sys/contrib/openzfs/include/sys/vdev_impl.h
index 4ab472bd6742..5a8c2f846be2 100644
--- a/sys/contrib/openzfs/include/sys/vdev_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_impl.h
@@ -279,10 +279,12 @@ struct vdev {
uint64_t vdev_noalloc; /* device is passivated? */
uint64_t vdev_removing; /* device is being removed? */
uint64_t vdev_failfast; /* device failfast setting */
+ boolean_t vdev_autosit; /* automatic sitout management */
boolean_t vdev_rz_expanding; /* raidz is being expanded? */
boolean_t vdev_ishole; /* is a hole in the namespace */
uint64_t vdev_top_zap;
vdev_alloc_bias_t vdev_alloc_bias; /* metaslab allocation bias */
+ uint64_t vdev_last_latency_check;
/* pool checkpoint related */
space_map_t *vdev_checkpoint_sm; /* contains reserved blocks */
@@ -431,6 +433,10 @@ struct vdev {
hrtime_t vdev_mmp_pending; /* 0 if write finished */
uint64_t vdev_mmp_kstat_id; /* to find kstat entry */
uint64_t vdev_expansion_time; /* vdev's last expansion time */
+ /* used to calculate average read latency */
+ uint64_t *vdev_prev_histo;
+ int64_t vdev_outlier_count; /* read outlier amongst peers */
+ hrtime_t vdev_read_sit_out_expire; /* end of sit out period */
list_node_t vdev_leaf_node; /* leaf vdev list */
/*
diff --git a/sys/contrib/openzfs/include/sys/vdev_raidz.h b/sys/contrib/openzfs/include/sys/vdev_raidz.h
index 3b02728cdbf3..df8c2aed4045 100644
--- a/sys/contrib/openzfs/include/sys/vdev_raidz.h
+++ b/sys/contrib/openzfs/include/sys/vdev_raidz.h
@@ -61,6 +61,9 @@ void vdev_raidz_checksum_error(zio_t *, struct raidz_col *, abd_t *);
struct raidz_row *vdev_raidz_row_alloc(int, zio_t *);
void vdev_raidz_reflow_copy_scratch(spa_t *);
void raidz_dtl_reassessed(vdev_t *);
+boolean_t vdev_sit_out_reads(vdev_t *, zio_flag_t);
+void vdev_raidz_sit_child(vdev_t *, uint64_t);
+void vdev_raidz_unsit_child(vdev_t *);
extern const zio_vsd_ops_t vdev_raidz_vsd_ops;
diff --git a/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h b/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
index debce6f09a22..8c8dcfb077f6 100644
--- a/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
@@ -119,6 +119,7 @@ typedef struct raidz_col {
uint8_t rc_need_orig_restore:1; /* need to restore from orig_data? */
uint8_t rc_force_repair:1; /* Write good data to this column */
uint8_t rc_allow_repair:1; /* Allow repair I/O to this column */
+ uint8_t rc_latency_outlier:1; /* Latency outlier for this device */
int rc_shadow_devidx; /* for double write during expansion */
int rc_shadow_error; /* for double write during expansion */
uint64_t rc_shadow_offset; /* for double write during expansion */
@@ -133,6 +134,7 @@ typedef struct raidz_row {
int rr_firstdatacol; /* First data column/parity count */
abd_t *rr_abd_empty; /* dRAID empty sector buffer */
int rr_nempty; /* empty sectors included in parity */
+ int rr_outlier_cnt; /* Count of latency outlier devices */
#ifdef ZFS_DEBUG
uint64_t rr_offset; /* Logical offset for *_io_verify() */
uint64_t rr_size; /* Physical size for *_io_verify() */
diff --git a/sys/contrib/openzfs/include/sys/zfs_file.h b/sys/contrib/openzfs/include/sys/zfs_file.h
index a1f344c2bb79..67abe9988aaa 100644
--- a/sys/contrib/openzfs/include/sys/zfs_file.h
+++ b/sys/contrib/openzfs/include/sys/zfs_file.h
@@ -46,7 +46,7 @@ void zfs_file_close(zfs_file_t *fp);
int zfs_file_write(zfs_file_t *fp, const void *buf, size_t len, ssize_t *resid);
int zfs_file_pwrite(zfs_file_t *fp, const void *buf, size_t len, loff_t off,
- ssize_t *resid);
+ uint8_t ashift, ssize_t *resid);
int zfs_file_read(zfs_file_t *fp, void *buf, size_t len, ssize_t *resid);
int zfs_file_pread(zfs_file_t *fp, void *buf, size_t len, loff_t off,
ssize_t *resid);
diff --git a/sys/contrib/openzfs/include/sys/zfs_ioctl.h b/sys/contrib/openzfs/include/sys/zfs_ioctl.h
index 8174242abdac..cfe11f43bb8e 100644
--- a/sys/contrib/openzfs/include/sys/zfs_ioctl.h
+++ b/sys/contrib/openzfs/include/sys/zfs_ioctl.h
@@ -455,6 +455,7 @@ typedef enum zinject_type {
ZINJECT_DECRYPT_FAULT,
ZINJECT_DELAY_IMPORT,
ZINJECT_DELAY_EXPORT,
+ ZINJECT_DELAY_READY,
} zinject_type_t;
typedef enum zinject_iotype {
diff --git a/sys/contrib/openzfs/include/sys/zfs_znode.h b/sys/contrib/openzfs/include/sys/zfs_znode.h
index ba577b80c98f..79b845a672a8 100644
--- a/sys/contrib/openzfs/include/sys/zfs_znode.h
+++ b/sys/contrib/openzfs/include/sys/zfs_znode.h
@@ -73,7 +73,7 @@ extern "C" {
pflags |= attr; \
else \
pflags &= ~attr; \
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(ZTOZSB(zp)), \
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(ZTOZSB(zp)), \
&pflags, sizeof (pflags), tx)); \
}
diff --git a/sys/contrib/openzfs/include/sys/zil.h b/sys/contrib/openzfs/include/sys/zil.h
index 9d1fb47e2dfc..da085998879b 100644
--- a/sys/contrib/openzfs/include/sys/zil.h
+++ b/sys/contrib/openzfs/include/sys/zil.h
@@ -456,7 +456,7 @@ typedef enum {
WR_NUM_STATES /* number of states */
} itx_wr_state_t;
-typedef void (*zil_callback_t)(void *data);
+typedef void (*zil_callback_t)(void *data, int err);
typedef struct itx {
list_node_t itx_node; /* linkage on zl_itx_list */
@@ -498,10 +498,13 @@ typedef struct zil_stats {
* (see zil_commit_writer_stall())
* - suspend: ZIL suspended
* (see zil_commit(), zil_get_commit_list())
+ * - crash: ZIL crashed
+ * (see zil_crash(), zil_commit(), ...)
*/
kstat_named_t zil_commit_error_count;
kstat_named_t zil_commit_stall_count;
kstat_named_t zil_commit_suspend_count;
+ kstat_named_t zil_commit_crash_count;
/*
* Number of transactions (reads, writes, renames, etc.)
@@ -549,6 +552,7 @@ typedef struct zil_sums {
wmsum_t zil_commit_error_count;
wmsum_t zil_commit_stall_count;
wmsum_t zil_commit_suspend_count;
+ wmsum_t zil_commit_crash_count;
wmsum_t zil_itx_count;
wmsum_t zil_itx_indirect_count;
wmsum_t zil_itx_indirect_bytes;
@@ -577,6 +581,25 @@ typedef struct zil_sums {
#define ZIL_STAT_BUMP(zil, stat) \
ZIL_STAT_INCR(zil, stat, 1);
+/*
+ * Flags for zil_commit_flags(). zil_commit() is a shortcut for
+ * zil_commit_flags(ZIL_COMMIT_FAILMODE), which is the most common use.
+ */
+typedef enum {
+ /*
+ * Try to commit the ZIL. If it fails, fall back to txg_wait_synced().
+ * If that fails, return EIO.
+ */
+ ZIL_COMMIT_NOW = 0,
+
+ /*
+ * Like ZIL_COMMIT_NOW, but if the ZIL commit fails because the pool
+ * suspended, act according to the pool's failmode= setting (wait for
+ * the pool to resume, or return EIO).
+ */
+ ZIL_COMMIT_FAILMODE = (1 << 1),
+} zil_commit_flag_t;
+
typedef int zil_parse_blk_func_t(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t txg);
typedef int zil_parse_lr_func_t(zilog_t *zilog, const lr_t *lr, void *arg,
@@ -606,14 +629,16 @@ extern boolean_t zil_destroy(zilog_t *zilog, boolean_t keep_first);
extern void zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx);
extern itx_t *zil_itx_create(uint64_t txtype, size_t lrsize);
-extern void zil_itx_destroy(itx_t *itx);
+extern void zil_itx_destroy(itx_t *itx, int err);
extern void zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx);
extern void zil_async_to_sync(zilog_t *zilog, uint64_t oid);
-extern void zil_commit(zilog_t *zilog, uint64_t oid);
-extern void zil_commit_impl(zilog_t *zilog, uint64_t oid);
extern void zil_remove_async(zilog_t *zilog, uint64_t oid);
+extern int zil_commit_flags(zilog_t *zilog, uint64_t oid,
+ zil_commit_flag_t flags);
+extern int __must_check zil_commit(zilog_t *zilog, uint64_t oid);
+
extern int zil_reset(const char *osname, void *txarg);
extern int zil_claim(struct dsl_pool *dp,
struct dsl_dataset *ds, void *txarg);
diff --git a/sys/contrib/openzfs/include/sys/zil_impl.h b/sys/contrib/openzfs/include/sys/zil_impl.h
index 252264b9eae9..ea1364a7e35a 100644
--- a/sys/contrib/openzfs/include/sys/zil_impl.h
+++ b/sys/contrib/openzfs/include/sys/zil_impl.h
@@ -41,8 +41,8 @@ extern "C" {
*
* An lwb will start out in the "new" state, and transition to the "opened"
* state via a call to zil_lwb_write_open() on first itx assignment. When
- * transitioning from "new" to "opened" the zilog's "zl_issuer_lock" must be
- * held.
+ * transitioning from "new" to "opened" the zilog's "zl_issuer_lock" and
+ * LWB's "lwb_lock" must be held.
*
* After the lwb is "opened", it can be assigned number of itxs and transition
* into the "closed" state via zil_lwb_write_close() when full or on timeout.
@@ -100,16 +100,22 @@ typedef enum {
* holding the "zl_issuer_lock". After the lwb is issued, the zilog's
* "zl_lock" is used to protect the lwb against concurrent access.
*/
+typedef enum {
+ LWB_FLAG_SLIM = (1<<0), /* log block has slim format */
+ LWB_FLAG_SLOG = (1<<1), /* lwb_blk is on SLOG device */
+ LWB_FLAG_CRASHED = (1<<2), /* lwb is on the crash list */
+} lwb_flag_t;
+
typedef struct lwb {
zilog_t *lwb_zilog; /* back pointer to log struct */
blkptr_t lwb_blk; /* on disk address of this log blk */
- boolean_t lwb_slim; /* log block has slim format */
- boolean_t lwb_slog; /* lwb_blk is on SLOG device */
+ lwb_flag_t lwb_flags; /* extra info about this lwb */
int lwb_error; /* log block allocation error */
int lwb_nmax; /* max bytes in the buffer */
int lwb_nused; /* # used bytes in buffer */
int lwb_nfilled; /* # filled bytes in buffer */
int lwb_sz; /* size of block and buffer */
+ int lwb_min_sz; /* min size for range allocation */
lwb_state_t lwb_state; /* the state of this lwb */
char *lwb_buf; /* log write buffer */
zio_t *lwb_child_zio; /* parent zio for children */
@@ -124,7 +130,7 @@ typedef struct lwb {
list_t lwb_itxs; /* list of itx's */
list_t lwb_waiters; /* list of zil_commit_waiter's */
avl_tree_t lwb_vdev_tree; /* vdevs to flush after lwb write */
- kmutex_t lwb_vdev_lock; /* protects lwb_vdev_tree */
+ kmutex_t lwb_lock; /* protects lwb_vdev_tree and size */
} lwb_t;
/*
@@ -149,7 +155,7 @@ typedef struct zil_commit_waiter {
list_node_t zcw_node; /* linkage in lwb_t:lwb_waiter list */
lwb_t *zcw_lwb; /* back pointer to lwb when linked */
boolean_t zcw_done; /* B_TRUE when "done", else B_FALSE */
- int zcw_zio_error; /* contains the zio io_error value */
+ int zcw_error; /* result to return from zil_commit() */
} zil_commit_waiter_t;
/*
@@ -221,6 +227,7 @@ struct zilog {
uint64_t zl_cur_left; /* current burst remaining size */
uint64_t zl_cur_max; /* biggest record in current burst */
list_t zl_lwb_list; /* in-flight log write list */
+ list_t zl_lwb_crash_list; /* log writes in-flight at crash */
avl_tree_t zl_bp_tree; /* track bps during log parse */
clock_t zl_replay_time; /* lbolt of when replay started */
uint64_t zl_replay_blks; /* number of log blocks replayed */
@@ -245,6 +252,9 @@ struct zilog {
*/
uint64_t zl_max_block_size;
+ /* After crash, txg to restart zil */
+ uint64_t zl_restart_txg;
+
/* Pointer for per dataset zil sums */
zil_sums_t *zl_sums;
};
diff --git a/sys/contrib/openzfs/include/sys/zio.h b/sys/contrib/openzfs/include/sys/zio.h
index 4f46eab3db89..acb0a03a36b2 100644
--- a/sys/contrib/openzfs/include/sys/zio.h
+++ b/sys/contrib/openzfs/include/sys/zio.h
@@ -82,7 +82,8 @@ gbh_nblkptrs(uint64_t size) {
static inline zio_eck_t *
gbh_eck(zio_gbh_phys_t *gbh, uint64_t size) {
ASSERT(IS_P2ALIGNED(size, sizeof (blkptr_t)));
- return ((zio_eck_t *)((uintptr_t)gbh + (size_t)size - sizeof (zio_eck_t)));
+ return ((zio_eck_t *)((uintptr_t)gbh + (size_t)size -
+ sizeof (zio_eck_t)));
}
static inline blkptr_t *
@@ -360,26 +361,26 @@ struct zbookmark_err_phys {
(zb)->zb_blkid == ZB_ROOT_BLKID)
typedef struct zio_prop {
- enum zio_checksum zp_checksum;
- enum zio_compress zp_compress;
+ enum zio_checksum zp_checksum:8;
+ enum zio_compress zp_compress:8;
uint8_t zp_complevel;
uint8_t zp_level;
uint8_t zp_copies;
uint8_t zp_gang_copies;
- dmu_object_type_t zp_type;
- boolean_t zp_dedup;
- boolean_t zp_dedup_verify;
- boolean_t zp_nopwrite;
- boolean_t zp_brtwrite;
- boolean_t zp_encrypt;
- boolean_t zp_byteorder;
- boolean_t zp_direct_write;
- boolean_t zp_rewrite;
+ dmu_object_type_t zp_type:8;
+ dmu_object_type_t zp_storage_type:8;
+ boolean_t zp_dedup:1;
+ boolean_t zp_dedup_verify:1;
+ boolean_t zp_nopwrite:1;
+ boolean_t zp_brtwrite:1;
+ boolean_t zp_encrypt:1;
+ boolean_t zp_byteorder:1;
+ boolean_t zp_direct_write:1;
+ boolean_t zp_rewrite:1;
+ uint32_t zp_zpl_smallblk;
uint8_t zp_salt[ZIO_DATA_SALT_LEN];
uint8_t zp_iv[ZIO_DATA_IV_LEN];
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
- uint32_t zp_zpl_smallblk;
- dmu_object_type_t zp_storage_type;
} zio_prop_t;
typedef struct zio_cksum_report zio_cksum_report_t;
@@ -622,7 +623,8 @@ extern zio_t *zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp, zio_flag_t flags);
extern int zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg,
- blkptr_t *new_bp, uint64_t size, boolean_t *slog);
+ blkptr_t *new_bp, uint64_t min_size, uint64_t max_size, boolean_t *slog,
+ boolean_t allow_larger);
extern void zio_flush(zio_t *zio, vdev_t *vd);
extern void zio_shrink(zio_t *zio, uint64_t size);
@@ -716,6 +718,7 @@ extern void zio_handle_ignored_writes(zio_t *zio);
extern hrtime_t zio_handle_io_delay(zio_t *zio);
extern void zio_handle_import_delay(spa_t *spa, hrtime_t elapsed);
extern void zio_handle_export_delay(spa_t *spa, hrtime_t elapsed);
+extern hrtime_t zio_handle_ready_delay(zio_t *zio);
/*
* Checksum ereport functions
diff --git a/sys/contrib/openzfs/include/sys/zvol.h b/sys/contrib/openzfs/include/sys/zvol.h
index cdc9dba2a28d..5791246e99e4 100644
--- a/sys/contrib/openzfs/include/sys/zvol.h
+++ b/sys/contrib/openzfs/include/sys/zvol.h
@@ -53,7 +53,7 @@ extern int zvol_set_volsize(const char *, uint64_t);
extern int zvol_set_volthreading(const char *, boolean_t);
extern int zvol_set_common(const char *, zfs_prop_t, zprop_source_t, uint64_t);
extern int zvol_set_ro(const char *, boolean_t);
-extern zvol_state_handle_t *zvol_suspend(const char *);
+extern int zvol_suspend(const char *, zvol_state_handle_t **);
extern int zvol_resume(zvol_state_handle_t *);
extern void *zvol_tag(zvol_state_handle_t *);
diff --git a/sys/contrib/openzfs/include/sys/zvol_impl.h b/sys/contrib/openzfs/include/sys/zvol_impl.h
index f3dd9f26f23c..5422e66832c0 100644
--- a/sys/contrib/openzfs/include/sys/zvol_impl.h
+++ b/sys/contrib/openzfs/include/sys/zvol_impl.h
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
#ifndef _SYS_ZVOL_IMPL_H
@@ -56,6 +56,7 @@ typedef struct zvol_state {
atomic_t zv_suspend_ref; /* refcount for suspend */
krwlock_t zv_suspend_lock; /* suspend lock */
kcondvar_t zv_removing_cv; /* ready to remove minor */
+ list_node_t zv_remove_node; /* node on removal list */
struct zvol_state_os *zv_zso; /* private platform state */
boolean_t zv_threading; /* volthreading property */
} zvol_state_t;
@@ -135,7 +136,7 @@ int zvol_os_rename_minor(zvol_state_t *zv, const char *newname);
int zvol_os_create_minor(const char *name);
int zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize);
boolean_t zvol_os_is_zvol(const char *path);
-void zvol_os_clear_private(zvol_state_t *zv);
+void zvol_os_remove_minor(zvol_state_t *zv);
void zvol_os_set_disk_ro(zvol_state_t *zv, int flags);
void zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity);
diff --git a/sys/contrib/openzfs/include/zfs_deleg.h b/sys/contrib/openzfs/include/zfs_deleg.h
index f80fe46d35f8..a7bbf1620ad5 100644
--- a/sys/contrib/openzfs/include/zfs_deleg.h
+++ b/sys/contrib/openzfs/include/zfs_deleg.h
@@ -55,6 +55,7 @@ typedef enum {
ZFS_DELEG_NOTE_PROMOTE,
ZFS_DELEG_NOTE_RENAME,
ZFS_DELEG_NOTE_SEND,
+ ZFS_DELEG_NOTE_SEND_RAW,
ZFS_DELEG_NOTE_RECEIVE,
ZFS_DELEG_NOTE_ALLOW,
ZFS_DELEG_NOTE_USERPROP,
diff --git a/sys/contrib/openzfs/lib/libicp/Makefile.am b/sys/contrib/openzfs/lib/libicp/Makefile.am
index ce24d13a760f..23adba10bc44 100644
--- a/sys/contrib/openzfs/lib/libicp/Makefile.am
+++ b/sys/contrib/openzfs/lib/libicp/Makefile.am
@@ -69,6 +69,7 @@ nodist_libicp_la_SOURCES += \
module/icp/asm-x86_64/aes/aes_aesni.S \
module/icp/asm-x86_64/modes/gcm_pclmulqdq.S \
module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S \
+ module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S \
module/icp/asm-x86_64/modes/ghash-x86_64.S \
module/icp/asm-x86_64/sha2/sha256-x86_64.S \
module/icp/asm-x86_64/sha2/sha512-x86_64.S \
diff --git a/sys/contrib/openzfs/lib/libspl/Makefile.am b/sys/contrib/openzfs/lib/libspl/Makefile.am
index 6640ecd582a7..0fd907d3011e 100644
--- a/sys/contrib/openzfs/lib/libspl/Makefile.am
+++ b/sys/contrib/openzfs/lib/libspl/Makefile.am
@@ -2,6 +2,9 @@ include $(srcdir)/%D%/include/Makefile.am
libspl_assert_la_CFLAGS = $(AM_CFLAGS) $(LIBRARY_CFLAGS) $(LIBUNWIND_CFLAGS)
libspl_la_CFLAGS = $(libspl_assert_la_CFLAGS)
+if TARGET_CPU_I386
+libspl_la_CFLAGS += $(NO_ATOMIC_ALIGNMENT)
+endif
noinst_LTLIBRARIES += libspl_assert.la libspl.la
CPPCHECKTARGETS += libspl_assert.la libspl.la
diff --git a/sys/contrib/openzfs/lib/libspl/include/os/linux/sys/stat.h b/sys/contrib/openzfs/lib/libspl/include/os/linux/sys/stat.h
index a605af962a6d..13cc0b46ac93 100644
--- a/sys/contrib/openzfs/lib/libspl/include/os/linux/sys/stat.h
+++ b/sys/contrib/openzfs/lib/libspl/include/os/linux/sys/stat.h
@@ -33,7 +33,7 @@
#ifdef HAVE_STATX
#include <fcntl.h>
-#include <linux/stat.h>
+#include <sys/stat.h>
#endif
/*
diff --git a/sys/contrib/openzfs/lib/libspl/include/sys/debug.h b/sys/contrib/openzfs/lib/libspl/include/sys/debug.h
index cced309bd1bb..02f33a68b75b 100644
--- a/sys/contrib/openzfs/lib/libspl/include/sys/debug.h
+++ b/sys/contrib/openzfs/lib/libspl/include/sys/debug.h
@@ -38,4 +38,8 @@
#define __maybe_unused __attribute__((unused))
#endif
+#ifndef __must_check
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
#endif
diff --git a/sys/contrib/openzfs/lib/libspl/include/sys/simd.h b/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
index 1ef24f5a7d39..4772a5416b2e 100644
--- a/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
+++ b/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
@@ -102,7 +102,9 @@ typedef enum cpuid_inst_sets {
AES,
PCLMULQDQ,
MOVBE,
- SHA_NI
+ SHA_NI,
+ VAES,
+ VPCLMULQDQ
} cpuid_inst_sets_t;
/*
@@ -127,6 +129,8 @@ typedef struct cpuid_feature_desc {
#define _AES_BIT (1U << 25)
#define _PCLMULQDQ_BIT (1U << 1)
#define _MOVBE_BIT (1U << 22)
+#define _VAES_BIT (1U << 9)
+#define _VPCLMULQDQ_BIT (1U << 10)
#define _SHA_NI_BIT (1U << 29)
/*
@@ -157,6 +161,8 @@ static const cpuid_feature_desc_t cpuid_features[] = {
[PCLMULQDQ] = {1U, 0U, _PCLMULQDQ_BIT, ECX },
[MOVBE] = {1U, 0U, _MOVBE_BIT, ECX },
[SHA_NI] = {7U, 0U, _SHA_NI_BIT, EBX },
+ [VAES] = {7U, 0U, _VAES_BIT, ECX },
+ [VPCLMULQDQ] = {7U, 0U, _VPCLMULQDQ_BIT, ECX },
};
/*
@@ -231,6 +237,8 @@ CPUID_FEATURE_CHECK(aes, AES);
CPUID_FEATURE_CHECK(pclmulqdq, PCLMULQDQ);
CPUID_FEATURE_CHECK(movbe, MOVBE);
CPUID_FEATURE_CHECK(shani, SHA_NI);
+CPUID_FEATURE_CHECK(vaes, VAES);
+CPUID_FEATURE_CHECK(vpclmulqdq, VPCLMULQDQ);
/*
* Detect register set support
@@ -382,6 +390,24 @@ zfs_shani_available(void)
}
/*
+ * Check if VAES instruction is available
+ */
+static inline boolean_t
+zfs_vaes_available(void)
+{
+ return (__cpuid_has_vaes());
+}
+
+/*
+ * Check if VPCLMULQDQ instruction is available
+ */
+static inline boolean_t
+zfs_vpclmulqdq_available(void)
+{
+ return (__cpuid_has_vpclmulqdq());
+}
+
+/*
* AVX-512 family of instruction sets:
*
* AVX512F Foundation
diff --git a/sys/contrib/openzfs/lib/libspl/include/sys/time.h b/sys/contrib/openzfs/lib/libspl/include/sys/time.h
index da80a5852ae5..062c6ec979fc 100644
--- a/sys/contrib/openzfs/lib/libspl/include/sys/time.h
+++ b/sys/contrib/openzfs/lib/libspl/include/sys/time.h
@@ -98,6 +98,15 @@ gethrestime_sec(void)
}
static inline hrtime_t
+getlrtime(void)
+{
+ struct timeval tv;
+ (void) gettimeofday(&tv, NULL);
+ return ((((uint64_t)tv.tv_sec) * NANOSEC) +
+ ((uint64_t)tv.tv_usec * NSEC_PER_USEC));
+}
+
+static inline hrtime_t
gethrtime(void)
{
struct timespec ts;
diff --git a/sys/contrib/openzfs/lib/libuutil/libuutil.abi b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
index 6c736c61e4a5..2a740afa07ca 100644
--- a/sys/contrib/openzfs/lib/libuutil/libuutil.abi
+++ b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
@@ -616,6 +616,7 @@
<array-type-def dimensions='1' type-id='de572c22' size-in-bits='1472' id='6d3c2f42'>
<subrange length='23' type-id='7359adad' id='fdd0f594'/>
</array-type-def>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<array-type-def dimensions='1' type-id='3a47d82b' size-in-bits='256' id='a133ec23'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
@@ -1020,13 +1021,6 @@
<array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
- <array-type-def dimensions='1' type-id='d315442e' size-in-bits='16' id='811205dc'>
- <subrange length='1' type-id='7359adad' id='52f813b4'/>
- </array-type-def>
- <array-type-def dimensions='1' type-id='d3130597' size-in-bits='768' id='f63f23b9'>
- <subrange length='12' type-id='7359adad' id='84827bdc'/>
- </array-type-def>
- <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
@@ -1061,93 +1055,6 @@
<var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__u16' type-id='8efea9e5' id='d315442e'/>
- <typedef-decl name='__s32' type-id='95e97e5e' id='3158a266'/>
- <typedef-decl name='__u32' type-id='f0981eeb' id='3f1a6b60'/>
- <typedef-decl name='__s64' type-id='1eb56b1e' id='49659421'/>
- <typedef-decl name='__u64' type-id='3a47d82b' id='d3130597'/>
- <class-decl name='statx_timestamp' size-in-bits='128' is-struct='yes' visibility='default' id='94101016'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='49659421' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__reserved' type-id='3158a266' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='statx' size-in-bits='2048' is-struct='yes' visibility='default' id='720b04c5'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='stx_mask' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='stx_blksize' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='stx_attributes' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='stx_nlink' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='stx_uid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='stx_gid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='stx_mode' type-id='d315442e' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='__spare0' type-id='811205dc' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='stx_ino' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='stx_size' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='stx_blocks' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='stx_attributes_mask' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='stx_atime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='stx_btime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='stx_ctime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='stx_mtime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='stx_rdev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1056'>
- <var-decl name='stx_rdev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='stx_dev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1120'>
- <var-decl name='stx_dev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='stx_mnt_id' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__spare2' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__spare3' type-id='f63f23b9' visibility='default'/>
- </data-member>
- </class-decl>
<class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' id='56fe4a37'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_fsname' type-id='26a90f95' visibility='default'/>
@@ -1237,8 +1144,6 @@
<pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
<pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
<qualified-type-def type-id='62f7a03d' restrict='yes' id='f1cadedf'/>
- <pointer-type-def type-id='720b04c5' size-in-bits='64' id='936b8e35'/>
- <qualified-type-def type-id='936b8e35' restrict='yes' id='31d265b7'/>
<function-decl name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='3cad23cd'/>
@@ -1254,14 +1159,6 @@
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
- <function-decl name='statx' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='9d26089a'/>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='f0981eeb'/>
- <parameter type-id='31d265b7'/>
- <return type-id='95e97e5e'/>
- </function-decl>
<function-decl name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='95e97e5e'/>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs.abi b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
index ba161d1ef10f..f988d27a286a 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs.abi
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
@@ -571,6 +571,7 @@
<elf-symbol name='zpool_props_refresh' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_read_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_refresh_stats' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zpool_refresh_stats_from_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reguid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reopen_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_scan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@@ -641,7 +642,7 @@
<elf-symbol name='sa_protocol_names' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spa_feature_table' size='2632' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
- <elf-symbol name='zfs_deleg_perm_tab' size='528' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zfs_deleg_perm_tab' size='544' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_max_dataset_nesting' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userquota_prop_prefixes' size='96' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@@ -1458,103 +1459,8 @@
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/os/linux/getmntany.c' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='d315442e' size-in-bits='16' id='811205dc'>
- <subrange length='1' type-id='7359adad' id='52f813b4'/>
- </array-type-def>
- <array-type-def dimensions='1' type-id='d3130597' size-in-bits='768' id='f63f23b9'>
- <subrange length='12' type-id='7359adad' id='84827bdc'/>
- </array-type-def>
- <typedef-decl name='__u16' type-id='8efea9e5' id='d315442e'/>
- <typedef-decl name='__s32' type-id='95e97e5e' id='3158a266'/>
- <typedef-decl name='__u32' type-id='f0981eeb' id='3f1a6b60'/>
- <typedef-decl name='__s64' type-id='1eb56b1e' id='49659421'/>
- <typedef-decl name='__u64' type-id='3a47d82b' id='d3130597'/>
- <class-decl name='statx_timestamp' size-in-bits='128' is-struct='yes' visibility='default' id='94101016'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='49659421' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__reserved' type-id='3158a266' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='statx' size-in-bits='2048' is-struct='yes' visibility='default' id='720b04c5'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='stx_mask' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='stx_blksize' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='stx_attributes' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='stx_nlink' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='stx_uid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='stx_gid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='stx_mode' type-id='d315442e' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='__spare0' type-id='811205dc' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='stx_ino' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='stx_size' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='stx_blocks' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='stx_attributes_mask' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='stx_atime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='stx_btime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='stx_ctime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='stx_mtime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='stx_rdev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1056'>
- <var-decl name='stx_rdev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='stx_dev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1120'>
- <var-decl name='stx_dev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='stx_mnt_id' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__spare2' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__spare3' type-id='f63f23b9' visibility='default'/>
- </data-member>
- </class-decl>
<pointer-type-def type-id='56fe4a37' size-in-bits='64' id='b6b61d2f'/>
<qualified-type-def type-id='b6b61d2f' restrict='yes' id='3cad23cd'/>
- <pointer-type-def type-id='720b04c5' size-in-bits='64' id='936b8e35'/>
- <qualified-type-def type-id='936b8e35' restrict='yes' id='31d265b7'/>
<function-decl name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='3cad23cd'/>
@@ -1566,14 +1472,6 @@
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='statx' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='9d26089a'/>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='f0981eeb'/>
- <parameter type-id='31d265b7'/>
- <return type-id='95e97e5e'/>
- </function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/timestamp.c' language='LANG_C99'>
<typedef-decl name='nl_item' type-id='95e97e5e' id='03b79a94'/>
@@ -3194,6 +3092,10 @@
<parameter type-id='dace003f'/>
<return type-id='80f4b756'/>
</function-decl>
+ <function-decl name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='22cce67b'/>
+ <return type-id='5ce45b60'/>
+ </function-decl>
<function-decl name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='5ce45b60'/>
@@ -3238,6 +3140,11 @@
<parameter type-id='37e3bd22' name='missing'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='zpool_refresh_stats_from_handle' mangled-name='zpool_refresh_stats_from_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats_from_handle'>
+ <parameter type-id='4c81de99' name='dzhp'/>
+ <parameter type-id='4c81de99' name='szhp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
<function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
<parameter type-id='80f4b756' name='poolname'/>
<return type-id='c19b74c3'/>
@@ -6117,7 +6024,9 @@
<enumerator name='VDEV_PROP_TRIM_SUPPORT' value='49'/>
<enumerator name='VDEV_PROP_TRIM_ERRORS' value='50'/>
<enumerator name='VDEV_PROP_SLOW_IOS' value='51'/>
- <enumerator name='VDEV_NUM_PROPS' value='52'/>
+ <enumerator name='VDEV_PROP_SIT_OUT' value='52'/>
+ <enumerator name='VDEV_PROP_AUTOSIT' value='53'/>
+ <enumerator name='VDEV_NUM_PROPS' value='54'/>
</enum-decl>
<typedef-decl name='vdev_prop_t' type-id='1573bec8' id='5aa5c90c'/>
<class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='2f65b36f'>
@@ -9396,10 +9305,6 @@
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='22cce67b'/>
- <return type-id='5ce45b60'/>
- </function-decl>
<function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
<return type-id='b59d7dce'/>
</function-decl>
@@ -9772,8 +9677,8 @@
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfs_deleg.c' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='4224' id='55e705e7'>
- <subrange length='33' type-id='7359adad' id='6a5934df'/>
+ <array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='4352' id='55f84f08'>
+ <subrange length='34' type-id='7359adad' id='6a6a7e00'/>
</array-type-def>
<array-type-def dimensions='1' type-id='fa1870fd' size-in-bits='infinite' id='7c00e69d'>
<subrange length='infinite' id='031f2035'/>
@@ -9803,30 +9708,31 @@
<enumerator name='ZFS_DELEG_NOTE_PROMOTE' value='5'/>
<enumerator name='ZFS_DELEG_NOTE_RENAME' value='6'/>
<enumerator name='ZFS_DELEG_NOTE_SEND' value='7'/>
- <enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='8'/>
- <enumerator name='ZFS_DELEG_NOTE_ALLOW' value='9'/>
- <enumerator name='ZFS_DELEG_NOTE_USERPROP' value='10'/>
- <enumerator name='ZFS_DELEG_NOTE_MOUNT' value='11'/>
- <enumerator name='ZFS_DELEG_NOTE_SHARE' value='12'/>
- <enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='13'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='14'/>
- <enumerator name='ZFS_DELEG_NOTE_USERUSED' value='15'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='16'/>
- <enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='17'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='18'/>
- <enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='19'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='20'/>
- <enumerator name='ZFS_DELEG_NOTE_HOLD' value='21'/>
- <enumerator name='ZFS_DELEG_NOTE_RELEASE' value='22'/>
- <enumerator name='ZFS_DELEG_NOTE_DIFF' value='23'/>
- <enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='24'/>
- <enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='25'/>
- <enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='26'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='27'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='28'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='29'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='30'/>
- <enumerator name='ZFS_DELEG_NOTE_NONE' value='31'/>
+ <enumerator name='ZFS_DELEG_NOTE_SEND_RAW' value='8'/>
+ <enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='9'/>
+ <enumerator name='ZFS_DELEG_NOTE_ALLOW' value='10'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERPROP' value='11'/>
+ <enumerator name='ZFS_DELEG_NOTE_MOUNT' value='12'/>
+ <enumerator name='ZFS_DELEG_NOTE_SHARE' value='13'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='14'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='15'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERUSED' value='16'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='17'/>
+ <enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='18'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='19'/>
+ <enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='20'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='21'/>
+ <enumerator name='ZFS_DELEG_NOTE_HOLD' value='22'/>
+ <enumerator name='ZFS_DELEG_NOTE_RELEASE' value='23'/>
+ <enumerator name='ZFS_DELEG_NOTE_DIFF' value='24'/>
+ <enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='25'/>
+ <enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='26'/>
+ <enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='27'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='28'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='29'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='30'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='31'/>
+ <enumerator name='ZFS_DELEG_NOTE_NONE' value='32'/>
</enum-decl>
<typedef-decl name='zfs_deleg_note_t' type-id='729d4547' id='4613c173'/>
<class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='5aa05c1f'>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_config.c b/sys/contrib/openzfs/lib/libzfs/libzfs_config.c
index 0d2102191389..9d704e4303ff 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_config.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_config.c
@@ -308,6 +308,23 @@ zpool_refresh_stats(zpool_handle_t *zhp, boolean_t *missing)
}
/*
+ * Copies the pool config and state from szhp to dzhp. szhp and dzhp must
+ * represent the same pool. Used by pool_list_refresh() to avoid another
+ * round-trip into the kernel to get stats already collected earlier in the
+ * function.
+ */
+void
+zpool_refresh_stats_from_handle(zpool_handle_t *dzhp, zpool_handle_t *szhp)
+{
+ VERIFY0(strcmp(dzhp->zpool_name, szhp->zpool_name));
+ nvlist_free(dzhp->zpool_old_config);
+ dzhp->zpool_old_config = dzhp->zpool_config;
+ dzhp->zpool_config = fnvlist_dup(szhp->zpool_config);
+ dzhp->zpool_config_size = szhp->zpool_config_size;
+ dzhp->zpool_state = szhp->zpool_state;
+}
+
+/*
* The following environment variables are undocumented
* and should be used for testing purposes only:
*
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_diff.c b/sys/contrib/openzfs/lib/libzfs/libzfs_diff.c
index 6aa0375f98d7..5f50bce531f7 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_diff.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_diff.c
@@ -81,7 +81,7 @@ get_stats_for_obj(differ_info_t *di, const char *dsname, uint64_t obj,
/* we can get stats even if we failed to get a path */
(void) memcpy(sb, &zc.zc_stat, sizeof (zfs_stat_t));
if (error == 0) {
- ASSERT(di->zerr == 0);
+ ASSERT0(di->zerr);
(void) strlcpy(pn, zc.zc_value, maxlen);
return (0);
}
@@ -404,7 +404,7 @@ write_free_diffs(FILE *fp, differ_info_t *di, dmu_diff_record_t *dr)
(void) strlcpy(zc.zc_name, di->fromsnap, sizeof (zc.zc_name));
zc.zc_obj = dr->ddr_first - 1;
- ASSERT(di->zerr == 0);
+ ASSERT0(di->zerr);
while (zc.zc_obj < dr->ddr_last) {
int err;
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_import.c b/sys/contrib/openzfs/lib/libzfs/libzfs_import.c
index 599e8e6f7819..7f276e9592c9 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_import.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_import.c
@@ -122,7 +122,7 @@ const pool_config_ops_t libzfs_config_ops = {
static uint64_t
label_offset(uint64_t size, int l)
{
- ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
+ ASSERT0(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t));
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c b/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
index 2a81b658d342..5c9e2199eed4 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
@@ -516,7 +516,7 @@ zfs_mount_at(zfs_handle_t *zhp, const char *options, int flags,
} else if (rc == ENOTSUP) {
int spa_version;
- VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
+ VERIFY0(zfs_spa_version(zhp, &spa_version));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can't mount a version %llu "
"file system on a version %d pool. Pool must be"
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
index 10b42720e963..ce154ae1a4cd 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
@@ -5549,6 +5549,8 @@ zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
/* Only use if provided by the RAIDZ VDEV above */
if (prop == VDEV_PROP_RAIDZ_EXPANDING)
return (ENOENT);
+ if (prop == VDEV_PROP_SIT_OUT)
+ return (ENOENT);
}
if (vdev_prop_index_to_string(prop, intval,
(const char **)&strval) != 0)
@@ -5718,8 +5720,16 @@ zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
nvlist_free(nvl);
nvlist_free(outnvl);
- if (ret)
- (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
+ if (ret) {
+ if (errno == ENOTSUP) {
+ zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
+ "property not supported for this vdev"));
+ (void) zfs_error(zhp->zpool_hdl, EZFS_PROPTYPE, errbuf);
+ } else {
+ (void) zpool_standard_error(zhp->zpool_hdl, errno,
+ errbuf);
+ }
+ }
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
index 1ad10ebb3c15..77134d197904 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
@@ -2505,7 +2505,7 @@ zfs_send_cb_impl(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
err = ENOENT;
if (sdd.cleanup_fd != -1) {
- VERIFY(0 == close(sdd.cleanup_fd));
+ VERIFY0(close(sdd.cleanup_fd));
sdd.cleanup_fd = -1;
}
@@ -2531,7 +2531,7 @@ err_out:
fnvlist_free(sdd.snapholds);
if (sdd.cleanup_fd != -1)
- VERIFY(0 == close(sdd.cleanup_fd));
+ VERIFY0(close(sdd.cleanup_fd));
return (err);
}
@@ -5108,7 +5108,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
nvlist_t *holds, *errors = NULL;
int cleanup_fd = -1;
- VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
+ VERIFY0(nvlist_alloc(&holds, 0, KM_SLEEP));
for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
@@ -5560,7 +5560,7 @@ zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
*cp = '\0';
sendfs = nonpackage_sendfs;
- VERIFY(finalsnap == NULL);
+ VERIFY0P(finalsnap);
}
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
index 4edddc2a759b..26f5135dff62 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
@@ -776,6 +776,11 @@ zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
case ZFS_ERR_ASHIFT_MISMATCH:
zfs_verror(hdl, EZFS_ASHIFT_MISMATCH, fmt, ap);
break;
+ case ZFS_ERR_TOO_MANY_SITOUTS:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "too many disks "
+ "already sitting out"));
+ zfs_verror(hdl, EZFS_BUSY, fmt, ap);
+ break;
default:
zfs_error_aux(hdl, "%s", zfs_strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
diff --git a/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi b/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
index 7464b3adb254..263cad045f7a 100644
--- a/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
+++ b/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
@@ -617,6 +617,7 @@
<array-type-def dimensions='1' type-id='de572c22' size-in-bits='1472' id='6d3c2f42'>
<subrange length='23' type-id='7359adad' id='fdd0f594'/>
</array-type-def>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<array-type-def dimensions='1' type-id='3a47d82b' size-in-bits='256' id='a133ec23'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
@@ -988,13 +989,6 @@
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='lib/libspl/os/linux/getmntany.c' language='LANG_C99'>
- <array-type-def dimensions='1' type-id='d315442e' size-in-bits='16' id='811205dc'>
- <subrange length='1' type-id='7359adad' id='52f813b4'/>
- </array-type-def>
- <array-type-def dimensions='1' type-id='d3130597' size-in-bits='768' id='f63f23b9'>
- <subrange length='12' type-id='7359adad' id='84827bdc'/>
- </array-type-def>
- <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
@@ -1029,93 +1023,6 @@
<var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__u16' type-id='8efea9e5' id='d315442e'/>
- <typedef-decl name='__s32' type-id='95e97e5e' id='3158a266'/>
- <typedef-decl name='__u32' type-id='f0981eeb' id='3f1a6b60'/>
- <typedef-decl name='__s64' type-id='1eb56b1e' id='49659421'/>
- <typedef-decl name='__u64' type-id='3a47d82b' id='d3130597'/>
- <class-decl name='statx_timestamp' size-in-bits='128' is-struct='yes' visibility='default' id='94101016'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='49659421' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__reserved' type-id='3158a266' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='statx' size-in-bits='2048' is-struct='yes' visibility='default' id='720b04c5'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='stx_mask' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='stx_blksize' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='stx_attributes' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='stx_nlink' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='stx_uid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='stx_gid' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='stx_mode' type-id='d315442e' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='__spare0' type-id='811205dc' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='stx_ino' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='stx_size' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='stx_blocks' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='stx_attributes_mask' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='stx_atime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='stx_btime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='stx_ctime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='stx_mtime' type-id='94101016' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='stx_rdev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1056'>
- <var-decl name='stx_rdev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1088'>
- <var-decl name='stx_dev_major' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1120'>
- <var-decl name='stx_dev_minor' type-id='3f1a6b60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='stx_mnt_id' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__spare2' type-id='d3130597' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__spare3' type-id='f63f23b9' visibility='default'/>
- </data-member>
- </class-decl>
<class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' id='56fe4a37'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_fsname' type-id='26a90f95' visibility='default'/>
@@ -1191,8 +1098,6 @@
<pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
<pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
<qualified-type-def type-id='62f7a03d' restrict='yes' id='f1cadedf'/>
- <pointer-type-def type-id='720b04c5' size-in-bits='64' id='936b8e35'/>
- <qualified-type-def type-id='936b8e35' restrict='yes' id='31d265b7'/>
<function-decl name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='3cad23cd'/>
@@ -1208,14 +1113,6 @@
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
- <function-decl name='statx' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='9d26089a'/>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='f0981eeb'/>
- <parameter type-id='31d265b7'/>
- <return type-id='95e97e5e'/>
- </function-decl>
<function-decl name='stat64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='f1cadedf'/>
diff --git a/sys/contrib/openzfs/lib/libzpool/abd_os.c b/sys/contrib/openzfs/lib/libzpool/abd_os.c
index 0d5795de143a..8bd7a64ab24a 100644
--- a/sys/contrib/openzfs/lib/libzpool/abd_os.c
+++ b/sys/contrib/openzfs/lib/libzpool/abd_os.c
@@ -302,7 +302,7 @@ abd_iter_at_end(struct abd_iter *aiter)
void
abd_iter_advance(struct abd_iter *aiter, size_t amount)
{
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
if (abd_iter_at_end(aiter))
@@ -315,7 +315,7 @@ abd_iter_advance(struct abd_iter *aiter, size_t amount)
void
abd_iter_map(struct abd_iter *aiter)
{
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
if (abd_iter_at_end(aiter))
diff --git a/sys/contrib/openzfs/lib/libzpool/kernel.c b/sys/contrib/openzfs/lib/libzpool/kernel.c
index 2e8bf160465a..8ed374627264 100644
--- a/sys/contrib/openzfs/lib/libzpool/kernel.c
+++ b/sys/contrib/openzfs/lib/libzpool/kernel.c
@@ -38,6 +38,7 @@
#include <sys/processor.h>
#include <sys/rrwlock.h>
#include <sys/spa.h>
+#include <sys/spa_impl.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/time.h>
@@ -369,7 +370,7 @@ cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
if (delta <= 0)
return (-1);
- VERIFY(gettimeofday(&tv, NULL) == 0);
+ VERIFY0(gettimeofday(&tv, NULL));
ts.tv_sec = tv.tv_sec + delta / hz;
ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % hz) * (NANOSEC / hz);
@@ -811,6 +812,79 @@ umem_out_of_memory(void)
return (0);
}
+static void
+spa_config_load(void)
+{
+ void *buf = NULL;
+ nvlist_t *nvlist, *child;
+ nvpair_t *nvpair;
+ char *pathname;
+ zfs_file_t *fp;
+ zfs_file_attr_t zfa;
+ uint64_t fsize;
+ int err;
+
+ /*
+ * Open the configuration file.
+ */
+ pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+
+ (void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path);
+
+ err = zfs_file_open(pathname, O_RDONLY, 0, &fp);
+ if (err)
+ err = zfs_file_open(ZPOOL_CACHE_BOOT, O_RDONLY, 0, &fp);
+
+ kmem_free(pathname, MAXPATHLEN);
+
+ if (err)
+ return;
+
+ if (zfs_file_getattr(fp, &zfa))
+ goto out;
+
+ fsize = zfa.zfa_size;
+ buf = kmem_alloc(fsize, KM_SLEEP);
+
+ /*
+ * Read the nvlist from the file.
+ */
+ if (zfs_file_read(fp, buf, fsize, NULL) < 0)
+ goto out;
+
+ /*
+ * Unpack the nvlist.
+ */
+ if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
+ goto out;
+
+ /*
+ * Iterate over all elements in the nvlist, creating a new spa_t for
+ * each one with the specified configuration.
+ */
+ mutex_enter(&spa_namespace_lock);
+ nvpair = NULL;
+ while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
+ if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
+ continue;
+
+ child = fnvpair_value_nvlist(nvpair);
+
+ if (spa_lookup(nvpair_name(nvpair)) != NULL)
+ continue;
+ (void) spa_add(nvpair_name(nvpair), child, NULL);
+ }
+ mutex_exit(&spa_namespace_lock);
+
+ nvlist_free(nvlist);
+
+out:
+ if (buf != NULL)
+ kmem_free(buf, fsize);
+
+ zfs_file_close(fp);
+}
+
void
kernel_init(int mode)
{
@@ -835,6 +909,7 @@ kernel_init(int mode)
zstd_init();
spa_init((spa_mode_t)mode);
+ spa_config_load();
fletcher_4_init();
@@ -1163,7 +1238,7 @@ zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid)
*/
int
zfs_file_pwrite(zfs_file_t *fp, const void *buf,
- size_t count, loff_t pos, ssize_t *resid)
+ size_t count, loff_t pos, uint8_t ashift, ssize_t *resid)
{
ssize_t rc, split, done;
int sectors;
@@ -1173,8 +1248,8 @@ zfs_file_pwrite(zfs_file_t *fp, const void *buf,
* system calls so that the process can be killed in between.
* This is used by ztest to simulate realistic failure modes.
*/
- sectors = count >> SPA_MINBLOCKSHIFT;
- split = (sectors > 0 ? rand() % sectors : 0) << SPA_MINBLOCKSHIFT;
+ sectors = count >> ashift;
+ split = (sectors > 0 ? rand() % sectors : 0) << ashift;
rc = pwrite64(fp->f_fd, buf, split, pos);
if (rc != -1) {
done = rc;
diff --git a/sys/contrib/openzfs/lib/libzpool/util.c b/sys/contrib/openzfs/lib/libzpool/util.c
index 1d0d1a1e56d9..66d6f43967d5 100644
--- a/sys/contrib/openzfs/lib/libzpool/util.c
+++ b/sys/contrib/openzfs/lib/libzpool/util.c
@@ -137,12 +137,10 @@ show_pool_stats(spa_t *spa)
nvlist_t *config, *nvroot;
const char *name;
- VERIFY(spa_get_stats(spa_name(spa), &config, NULL, 0) == 0);
+ VERIFY0(spa_get_stats(spa_name(spa), &config, NULL, 0));
- VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
- &nvroot) == 0);
- VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
- &name) == 0);
+ VERIFY0(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot));
+ VERIFY0(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name));
show_vdev_stats(name, ZPOOL_CONFIG_CHILDREN, nvroot, 0);
show_vdev_stats(NULL, ZPOOL_CONFIG_L2CACHE, nvroot, 0);
diff --git a/sys/contrib/openzfs/lib/libzutil/zutil_import.c b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
index ccdc874076c3..08367f4c064d 100644
--- a/sys/contrib/openzfs/lib/libzutil/zutil_import.c
+++ b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
@@ -917,7 +917,7 @@ error:
static uint64_t
label_offset(uint64_t size, int l)
{
- ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
+ ASSERT0(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t));
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
@@ -1769,7 +1769,7 @@ zpool_find_import_cached(libpc_handle_t *hdl, importargs_t *iarg)
fnvlist_add_nvlist(pools, nvpair_name(pair),
fnvpair_value_nvlist(pair));
- VERIFY3P(nvlist_next_nvpair(nv, pair), ==, NULL);
+ VERIFY0P(nvlist_next_nvpair(nv, pair));
iarg->guid = saved_guid;
iarg->poolname = saved_poolname;
@@ -1903,30 +1903,43 @@ zpool_find_config(libpc_handle_t *hdl, const char *target, nvlist_t **configp,
*sepp = '\0';
pools = zpool_search_import(hdl, args);
+ if (pools == NULL) {
+ zutil_error_aux(hdl, dgettext(TEXT_DOMAIN, "no pools found"));
+ (void) zutil_error_fmt(hdl, LPC_UNKNOWN, dgettext(TEXT_DOMAIN,
+ "failed to find config for pool '%s'"), targetdup);
+ free(targetdup);
+ return (ENOENT);
+ }
- if (pools != NULL) {
- nvpair_t *elem = NULL;
- while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
- VERIFY0(nvpair_value_nvlist(elem, &config));
- if (pool_match(config, targetdup)) {
- count++;
- if (match != NULL) {
- /* multiple matches found */
- continue;
- } else {
- match = fnvlist_dup(config);
- }
+ nvpair_t *elem = NULL;
+ while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
+ VERIFY0(nvpair_value_nvlist(elem, &config));
+ if (pool_match(config, targetdup)) {
+ count++;
+ if (match != NULL) {
+ /* multiple matches found */
+ continue;
+ } else {
+ match = fnvlist_dup(config);
}
}
- fnvlist_free(pools);
}
+ fnvlist_free(pools);
if (count == 0) {
+ zutil_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "no matching pools"));
+ (void) zutil_error_fmt(hdl, LPC_UNKNOWN, dgettext(TEXT_DOMAIN,
+ "failed to find config for pool '%s'"), targetdup);
free(targetdup);
return (ENOENT);
}
if (count > 1) {
+ zutil_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "more than one matching pool"));
+ (void) zutil_error_fmt(hdl, LPC_UNKNOWN, dgettext(TEXT_DOMAIN,
+ "failed to find config for pool '%s'"), targetdup);
free(targetdup);
fnvlist_free(match);
return (EINVAL);
diff --git a/sys/contrib/openzfs/man/Makefile.am b/sys/contrib/openzfs/man/Makefile.am
index 6a7b2d3e46b7..7a63641c1c39 100644
--- a/sys/contrib/openzfs/man/Makefile.am
+++ b/sys/contrib/openzfs/man/Makefile.am
@@ -2,7 +2,7 @@ dist_noinst_man_MANS = \
%D%/man1/cstyle.1
dist_man_MANS = \
- %D%/man1/arcstat.1 \
+ %D%/man1/zarcstat.1 \
%D%/man1/raidz_test.1 \
%D%/man1/test-runner.1 \
%D%/man1/zhack.1 \
@@ -124,10 +124,21 @@ dist_noinst_DATA += $(dist_noinst_man_MANS) $(dist_man_MANS)
SUBSTFILES += $(nodist_man_MANS)
-CHECKS += mancheck
-mancheck:
- $(top_srcdir)/scripts/mancheck.sh $(srcdir)/%D%
+MANFILES = $(dist_noinst_man_MANS) $(dist_man_MANS) $(nodist_man_MANS)
+
+PHONY += mancheck
+
+mancheck_verbose = $(mancheck_verbose_@AM_V@)
+mancheck_verbose_ = $(mancheck_verbose_@AM_DEFAULT_V@)
+mancheck_verbose_0 = @echo MANCHECK $(_MTGT);
+_MTGT = $(subst ^,/,$(subst mancheck-,,$@))
+mancheck-%:
+ $(mancheck_verbose)scripts/mancheck.sh $(_MTGT)
+
+mancheck: $(foreach manfile, $(MANFILES), $(addprefix mancheck-,$(subst /,^,$(manfile))))
+
+CHECKS += mancheck
if BUILD_LINUX
# The manual pager in most Linux distros defaults to "BSD" when .Os is blank,
diff --git a/sys/contrib/openzfs/man/man1/cstyle.1 b/sys/contrib/openzfs/man/man1/cstyle.1
index 241c82edd5a8..8f29129ce175 100644
--- a/sys/contrib/openzfs/man/man1/cstyle.1
+++ b/sys/contrib/openzfs/man/man1/cstyle.1
@@ -21,7 +21,7 @@
.\"
.\" CDDL HEADER END
.\"
-.Dd May 26, 2021
+.Dd April 4, 2022
.Dt CSTYLE 1
.Os
.
diff --git a/sys/contrib/openzfs/man/man1/arcstat.1 b/sys/contrib/openzfs/man/man1/zarcstat.1
index f2474fbb701f..3633c5d417fe 100644
--- a/sys/contrib/openzfs/man/man1/arcstat.1
+++ b/sys/contrib/openzfs/man/man1/zarcstat.1
@@ -13,12 +13,12 @@
.\" Copyright (c) 2015 by Delphix. All rights reserved.
.\" Copyright (c) 2020 by AJ Jordan. All rights reserved.
.\"
-.Dd December 23, 2022
-.Dt ARCSTAT 1
+.Dd September 19, 2024
+.Dt ZARCSTAT 1
.Os
.
.Sh NAME
-.Nm arcstat
+.Nm zarcstat
.Nd report ZFS ARC and L2ARC statistics
.Sh SYNOPSIS
.Nm
diff --git a/sys/contrib/openzfs/man/man1/zhack.1 b/sys/contrib/openzfs/man/man1/zhack.1
index f58c0527649b..63658cf930e9 100644
--- a/sys/contrib/openzfs/man/man1/zhack.1
+++ b/sys/contrib/openzfs/man/man1/zhack.1
@@ -23,7 +23,7 @@
.\"
.\" lint-ok: WARNING: sections out of conventional order: Sh SYNOPSIS
.\"
-.Dd May 26, 2021
+.Dd May 3, 2023
.Dt ZHACK 1
.Os
.
@@ -122,6 +122,24 @@ Example:
.Nm zhack Cm label repair Fl cu Ar device
Fix checksums and undetach a device
.
+.It Xo
+.Nm zhack
+.Cm metaslab leak
+.Op Fl f
+.Ar pool
+.Xc
+Apply a fragmentation profile generated by
+.Sy zdb
+to the specified
+.Ar pool Ns
+\&.
+.Pp
+The
+.Fl f
+flag forces the profile to apply even if the vdevs in the
+.Ar pool
+don't have the same number of metaslabs as the fragmentation profile.
+.
.El
.
.Sh GLOBAL OPTIONS
@@ -143,6 +161,8 @@ Search for
members in
.Ar dir .
Can be specified more than once.
+.It Fl o Ar var Ns = Ns Ar value
+Set the given tunable to the provided value.
.El
.
.Sh EXAMPLES
diff --git a/sys/contrib/openzfs/man/man1/ztest.1 b/sys/contrib/openzfs/man/man1/ztest.1
index febbb62b1664..ae857bfea29c 100644
--- a/sys/contrib/openzfs/man/man1/ztest.1
+++ b/sys/contrib/openzfs/man/man1/ztest.1
@@ -24,7 +24,7 @@
.\" reserved.
.\" Copyright (c) 2017, Intel Corporation.
.\"
-.Dd May 26, 2021
+.Dd July 12, 2025
.Dt ZTEST 1
.Os
.
diff --git a/sys/contrib/openzfs/man/man4/spl.4 b/sys/contrib/openzfs/man/man4/spl.4
index 683f8e2b631f..61dfe42e463d 100644
--- a/sys/contrib/openzfs/man/man4/spl.4
+++ b/sys/contrib/openzfs/man/man4/spl.4
@@ -15,7 +15,7 @@
.\"
.\" Copyright 2013 Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\"
-.Dd August 24, 2020
+.Dd May 7, 2025
.Dt SPL 4
.Os
.
diff --git a/sys/contrib/openzfs/man/man4/zfs.4 b/sys/contrib/openzfs/man/man4/zfs.4
index 4a5f9fd93f4f..11bcbf430210 100644
--- a/sys/contrib/openzfs/man/man4/zfs.4
+++ b/sys/contrib/openzfs/man/man4/zfs.4
@@ -4,6 +4,7 @@
.\" Copyright (c) 2019, 2021 by Delphix. All rights reserved.
.\" Copyright (c) 2019 Datto Inc.
.\" Copyright (c) 2023, 2024, 2025, Klara, Inc.
+.\"
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
@@ -17,7 +18,7 @@
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
-.Dd May 29, 2025
+.Dd September 15, 2025
.Dt ZFS 4
.Os
.
@@ -601,6 +602,42 @@ new format when enabling the
feature.
The default is to convert all log entries.
.
+.It Sy vdev_read_sit_out_secs Ns = Ns Sy 600 Ns s Po 10 min Pc Pq ulong
+When a slow disk outlier is detected it is placed in a sit out state.
+While sitting out the disk will not participate in normal reads, instead its
+data will be reconstructed as needed from parity.
+Scrub operations will always read from a disk, even if it's sitting out.
+A number of disks in a RAID-Z or dRAID vdev may sit out at the same time, up
+to the number of parity devices.
+Writes will still be issued to a disk which is sitting out to maintain full
+redundancy.
+Defaults to 600 seconds and a value of zero disables disk sit-outs in general,
+including slow disk outlier detection.
+.
+.It Sy vdev_raidz_outlier_check_interval_ms Ns = Ns Sy 1000 Ns ms Po 1 sec Pc Pq ulong
+How often each RAID-Z and dRAID vdev will check for slow disk outliers.
+Increasing this interval will reduce the sensitivity of detection (since all
+I/Os since the last check are included in the statistics), but will slow the
+response to a disk developing a problem.
+Defaults to once per second; setting extremely small values may cause negative
+performance effects.
+.
+.It Sy vdev_raidz_outlier_insensitivity Ns = Ns Sy 50 Pq uint
+When performing slow outlier checks for RAID-Z and dRAID vdevs, this value is
+used to determine how far out an outlier must be before it counts as an event
+worth consdering.
+This is phrased as "insensitivity" because larger values result in fewer
+detections.
+Smaller values will result in more aggressive sitting out of disks that may have
+problems, but may significantly increase the rate of spurious sit-outs.
+.Pp
+To provide a more technical definition of this parameter, this is the multiple
+of the inter-quartile range (IQR) that is being used in a Tukey's Fence
+detection algorithm.
+This is much higher than a normal Tukey's Fence k-value, because the
+distribution under consideration is probably an extreme-value distribution,
+rather than a more typical Gaussian distribution.
+.
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
During top-level vdev removal, chunks of data are copied from the vdev
which may include free space in order to trade bandwidth for IOPS.
@@ -941,10 +978,6 @@ The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
.Sy 512 KiB No and Sy all_system_memory/64 .
.
-.It Sy zfs_autoimport_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
-Disable pool import at module load by ignoring the cache file
-.Pq Sy spa_config_path .
-.
.It Sy zfs_checksum_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit checksum events to this many per second.
Note that this should not be set below the ZED thresholds
@@ -2550,6 +2583,49 @@ the xattr so as to not accumulate duplicates.
.It Sy zio_requeue_io_start_cut_in_line Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prioritize requeued I/O.
.
+.It Sy zfs_delete_inode Ns = Ns Sy 0 Ns | Ns 1 Pq int
+Sets whether the kernel should free an inode structure when the last reference
+is released, or cache it in memory.
+Intended for testing/debugging.
+.Pp
+A live inode structure "pins" versious internal OpenZFS structures in memory,
+which can result in large amounts of "unusable" memory on systems with lots of
+infrequently-accessed files, until the kernel's memory pressure mechanism
+asks OpenZFS to release them.
+.Pp
+The default value of
+.Sy 0
+always caches inodes that appear to still exist on disk.
+Setting it to
+.Sy 1
+will immediately release unused inodes and their associated memory back to the
+dbuf cache or the ARC for reuse, but may reduce performance if inodes are
+frequently evicted and reloaded.
+.Pp
+This parameter is only available on Linux.
+.
+.It Sy zfs_delete_dentry Ns = Ns Sy 0 Ns | Ns 1 Pq int
+Sets whether the kernel should free a dentry structure when it is no longer
+required, or hold it in the dentry cache.
+Intended for testing/debugging.
+.
+Since a dentry structure holds an inode reference, a cached dentry can "pin"
+an inode in memory indefinitely, along with associated OpenZFS structures (See
+.Sy zfs_delete_inode ) .
+.Pp
+The default value of
+.Sy 0
+instructs the kernel to cache entries and their associated inodes when they
+are no longer directly referenced.
+They will be reclaimed as part of the kernel's normal cache management
+processes.
+Setting it to
+.Sy 1
+will instruct the kernel to release directory entries and their inodes as soon
+as they are no longer referenced by the filesystem.
+.Pp
+This parameter is only available on Linux.
+.
.It Sy zio_taskq_batch_pct Ns = Ns Sy 80 Ns % Pq uint
Percentage of online CPUs which will run a worker thread for I/O.
These workers are responsible for I/O work such as compression, encryption,
diff --git a/sys/contrib/openzfs/man/man5/vdev_id.conf.5 b/sys/contrib/openzfs/man/man5/vdev_id.conf.5
index d2f817631c15..299a23720201 100644
--- a/sys/contrib/openzfs/man/man5/vdev_id.conf.5
+++ b/sys/contrib/openzfs/man/man5/vdev_id.conf.5
@@ -9,7 +9,7 @@
.\" source. A copy of the CDDL is also available via the Internet at
.\" http://www.illumos.org/license/CDDL.
.\"
-.Dd May 26, 2021
+.Dd October 8, 2024
.Dt VDEV_ID.CONF 5
.Os
.
diff --git a/sys/contrib/openzfs/man/man7/dracut.zfs.7 b/sys/contrib/openzfs/man/man7/dracut.zfs.7
index fb5da553af6e..3d051d4d3343 100644
--- a/sys/contrib/openzfs/man/man7/dracut.zfs.7
+++ b/sys/contrib/openzfs/man/man7/dracut.zfs.7
@@ -1,7 +1,7 @@
.\" SPDX-License-Identifier: CDDL-1.0
.\" SPDX-License-Identifier: 0BSD
.\"
-.Dd March 28, 2023
+.Dd July 13, 2024
.Dt DRACUT.ZFS 7
.Os
.
diff --git a/sys/contrib/openzfs/man/man7/vdevprops.7 b/sys/contrib/openzfs/man/man7/vdevprops.7
index acabe6b6613a..0fb28d7db13c 100644
--- a/sys/contrib/openzfs/man/man7/vdevprops.7
+++ b/sys/contrib/openzfs/man/man7/vdevprops.7
@@ -19,9 +19,9 @@
.\"
.\" CDDL HEADER END
.\"
-.\" Copyright (c) 2021 Klara, Inc.
+.\" Copyright (c) 2021, 2025, Klara, Inc.
.\"
-.Dd October 30, 2022
+.Dd July 23, 2024
.Dt VDEVPROPS 7
.Os
.
@@ -106,11 +106,17 @@ The number of children belonging to this vdev
.It Sy read_errors , write_errors , checksum_errors , initialize_errors , trim_errors
The number of errors of each type encountered by this vdev
.It Sy slow_ios
-The number of slow I/Os encountered by this vdev,
-These represent I/O operations that didn't complete in
+This indicates the number of slow I/O operations encountered by this vdev.
+A slow I/O is defined as an operation that did not complete within the
.Sy zio_slow_io_ms
-milliseconds
+threshold in milliseconds
.Pq Sy 30000 No by default .
+For
+.Sy RAIDZ
+and
+.Sy DRAID
+configurations, this value also represents the number of times the vdev was
+identified as an outlier and excluded from participating in read I/O operations.
.It Sy null_ops , read_ops , write_ops , free_ops , claim_ops , trim_ops
The number of I/O operations of each type performed by this vdev
.It Xo
@@ -150,6 +156,31 @@ The amount of space to reserve for the EFI system partition
.It Sy failfast
If this device should propagate BIO errors back to ZFS, used to disable
failfast.
+.It Sy sit_out
+Only valid for
+.Sy RAIDZ
+and
+.Sy DRAID
+vdevs.
+True when a slow disk outlier was detected and the vdev is currently in a sit
+out state.
+This property can be manually set to cause vdevs to sit out.
+It will also be automatically set by the
+.Sy autosit
+logic if that is enabled.
+While sitting out, the vdev will not participate in normal reads, instead its
+data will be reconstructed as needed from parity.
+.It Sy autosit
+Only valid for
+.Sy RAIDZ
+and
+.Sy DRAID
+vdevs.
+If set, this enables the kernel-level slow disk detection logic.
+This logic automatically causes any vdevs that are significant negative
+performance outliers to sit out, as described in the
+.Sy sit_out
+property.
.It Sy path
The path to the device for this vdev
.It Sy allocating
diff --git a/sys/contrib/openzfs/man/man7/zfsconcepts.7 b/sys/contrib/openzfs/man/man7/zfsconcepts.7
index 5c736e53670d..bb2178d85bcd 100644
--- a/sys/contrib/openzfs/man/man7/zfsconcepts.7
+++ b/sys/contrib/openzfs/man/man7/zfsconcepts.7
@@ -31,7 +31,7 @@
.\" Copyright 2019 Joyent, Inc.
.\" Copyright 2023 Klara, Inc.
.\"
-.Dd October 6, 2023
+.Dd October 2, 2024
.Dt ZFSCONCEPTS 7
.Os
.
diff --git a/sys/contrib/openzfs/man/man7/zfsprops.7 b/sys/contrib/openzfs/man/man7/zfsprops.7
index ac3152cb5d51..77e994b912b6 100644
--- a/sys/contrib/openzfs/man/man7/zfsprops.7
+++ b/sys/contrib/openzfs/man/man7/zfsprops.7
@@ -39,7 +39,7 @@
.\" Copyright (c) 2019, Kjeld Schouten-Lebbing
.\" Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
.\"
-.Dd June 29, 2024
+.Dd September 13, 2025
.Dt ZFSPROPS 7
.Os
.
@@ -1192,18 +1192,26 @@ keylocation can be with either
.Nm zfs Cm set
or
.Nm zfs Cm change-key .
+.Pp
If
.Sy prompt
-is selected ZFS will ask for the key at the command prompt when it is required
-to access the encrypted data (see
+is selected, ZFS will expect the key to be provided when it is required to
+access the encrypted data (see
.Nm zfs Cm load-key
for details).
-This setting will also allow the key to be passed in via the standard input
-stream,
-but users should be careful not to place keys which should be kept secret on
-the command line.
-If a file URI is selected, the key will be loaded from the
+If stdin is a TTY, then ZFS will ask for the key to be provided.
+Otherwise, stdin is expected to be the key to use and will be processed as such.
+Users should be careful not to place keys which should be kept secret on the
+command line, as most operating systems may expose command line arguments to
+other processes.
+If the
+.Dq raw
+.Sy keyformat
+was used, then the key must be provided via stdin.
+.Pp
+If a file URL is selected, the key will be loaded from the
specified absolute file path.
+.Pp
If an HTTPS or HTTP URL is selected, it will be GETted using
.Xr fetch 3 ,
libcurl, or nothing, depending on compile-time configuration and run-time
diff --git a/sys/contrib/openzfs/man/man7/zpool-features.7 b/sys/contrib/openzfs/man/man7/zpool-features.7
index 10dfd1f92936..b4404a6eb58d 100644
--- a/sys/contrib/openzfs/man/man7/zpool-features.7
+++ b/sys/contrib/openzfs/man/man7/zpool-features.7
@@ -19,7 +19,7 @@
.\" Copyright (c) 2019, Allan Jude
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
-.Dd October 2, 2024
+.Dd July 23, 2025
.Dt ZPOOL-FEATURES 7
.Os
.
diff --git a/sys/contrib/openzfs/man/man7/zpoolconcepts.7 b/sys/contrib/openzfs/man/man7/zpoolconcepts.7
index dafe3bffc453..b9c8926d835d 100644
--- a/sys/contrib/openzfs/man/man7/zpoolconcepts.7
+++ b/sys/contrib/openzfs/man/man7/zpoolconcepts.7
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd April 7, 2023
+.Dd August 6, 2025
.Dt ZPOOLCONCEPTS 7
.Os
.
diff --git a/sys/contrib/openzfs/man/man7/zpoolprops.7 b/sys/contrib/openzfs/man/man7/zpoolprops.7
index 5d84753193ee..d3b4c2376943 100644
--- a/sys/contrib/openzfs/man/man7/zpoolprops.7
+++ b/sys/contrib/openzfs/man/man7/zpoolprops.7
@@ -29,7 +29,7 @@
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\" Copyright (c) 2023, Klara Inc.
.\"
-.Dd November 18, 2024
+.Dd December 4, 2024
.Dt ZPOOLPROPS 7
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zdb.8 b/sys/contrib/openzfs/man/man8/zdb.8
index 3984aaac5866..c3290ea14769 100644
--- a/sys/contrib/openzfs/man/man8/zdb.8
+++ b/sys/contrib/openzfs/man/man8/zdb.8
@@ -15,7 +15,7 @@
.\" Copyright (c) 2017 Lawrence Livermore National Security, LLC.
.\" Copyright (c) 2017 Intel Corporation.
.\"
-.Dd October 27, 2024
+.Dd August 12, 2025
.Dt ZDB 8
.Os
.
@@ -69,6 +69,13 @@
.Op Fl U Ar cache
.Ar poolname Op Ar vdev Oo Ar metaslab Oc Ns …
.Nm
+.Fl -allocated-map
+.Op Fl mAFLPXY
+.Op Fl e Oo Fl V Oc Oo Fl p Ar path Oc Ns …
+.Op Fl t Ar txg
+.Op Fl U Ar cache
+.Ar poolname Op Ar vdev Oo Ar metaslab Oc Ns …
+.Nm
.Fl O
.Op Fl K Ar key
.Ar dataset path
@@ -128,6 +135,11 @@ that zdb may interpret inconsistent pool data and behave erratically.
.Sh OPTIONS
Display options:
.Bl -tag -width Ds
+.It Fl Sy -allocated-map
+Prints out a list of all the allocated regions in the pool.
+Primarily intended for use with the
+.Nm zhack metaslab leak
+subcommand.
.It Fl b , -block-stats
Display statistics regarding the number, size
.Pq logical, physical and allocated
@@ -531,6 +543,18 @@ option, with more occurrences enabling more verbosity.
If no options are specified, all information about the named pool will be
displayed at default verbosity.
.
+.Sh EXIT STATUS
+The
+.Nm
+utility exits
+.Sy 0
+on success,
+.Sy 1
+if a fatal error occurs,
+.Sy 2
+if invalid command line options were specified, or
+.Sy 3
+if on-disk corruption was detected, but was not fatal.
.Sh EXAMPLES
.Ss Example 1 : No Display the configuration of imported pool Ar rpool
.Bd -literal
diff --git a/sys/contrib/openzfs/man/man8/zed.8.in b/sys/contrib/openzfs/man/man8/zed.8.in
index c90a1834403b..2d19f2d8496b 100644
--- a/sys/contrib/openzfs/man/man8/zed.8.in
+++ b/sys/contrib/openzfs/man/man8/zed.8.in
@@ -13,7 +13,7 @@
.\"
.\" Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049)
.\"
-.Dd May 26, 2021
+.Dd August 22, 2022
.Dt ZED 8
.Os
.
@@ -158,6 +158,8 @@ Multiple ZEDLETs may be invoked for a given zevent.
ZEDLETs are executables invoked by the ZED in response to a given zevent.
They should be written under the presumption they can be invoked concurrently,
and they should use appropriate locking to access any shared resources.
+The one exception to this are "synchronous zedlets", which are described later
+in this page.
Common variables used by ZEDLETs can be stored in the default rc file which
is sourced by scripts; these variables should be prefixed with
.Sy ZED_ .
@@ -233,6 +235,36 @@ and
.Sy ZPOOL .
These variables may be overridden in the rc file.
.
+.Sh Synchronous ZEDLETS
+ZED's normal behavior is to spawn off zedlets in parallel and ignore their
+completion order.
+This means that ZED can potentially
+have zedlets for event ID number 2 starting before zedlets for event ID number
+1 have finished.
+Most of the time this is fine, and it actually helps when the system is getting
+hammered with hundreds of events.
+.Pp
+However, there are times when you want your zedlets to be executed in sequence
+with the event ID.
+That is where synchronous zedlets come in.
+.Pp
+ZED will wait for all previously spawned zedlets to finish before running
+a synchronous zedlet.
+Synchronous zedlets are guaranteed to be the only
+zedlet running.
+No other zedlets may run in parallel with a synchronous zedlet.
+Users should be careful to only use synchronous zedlets when needed, since
+they decrease parallelism.
+.Pp
+To make a zedlet synchronous, simply add a "-sync-" immediately following the
+event name in the zedlet's file name:
+.Pp
+.Sy EVENT_NAME-sync-ZEDLETNAME.sh
+.Pp
+For example, if you wanted a synchronous statechange script:
+.Pp
+.Sy statechange-sync-myzedlet.sh
+.
.Sh FILES
.Bl -tag -width "-c"
.It Pa @sysconfdir@/zfs/zed.d
diff --git a/sys/contrib/openzfs/man/man8/zfs-allow.8 b/sys/contrib/openzfs/man/man8/zfs-allow.8
index 5a8e80bf6a43..e3b0e1ab3e12 100644
--- a/sys/contrib/openzfs/man/man8/zfs-allow.8
+++ b/sys/contrib/openzfs/man/man8/zfs-allow.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd September 8, 2025
.Dt ZFS-ALLOW 8
.Os
.
@@ -212,7 +212,8 @@ receive subcommand Must also have the \fBmount\fR and \fBcreate\fR ability, requ
release subcommand Allows releasing a user hold which might destroy the snapshot
rename subcommand Must also have the \fBmount\fR and \fBcreate\fR ability in the new parent
rollback subcommand Must also have the \fBmount\fR ability
-send subcommand
+send subcommand Allows sending a replication stream of a dataset.
+send:raw subcommand Only allows sending raw replication streams, preventing encrypted datasets being sent in decrypted form.
share subcommand Allows sharing file systems over NFS or SMB protocols
snapshot subcommand Must also have the \fBmount\fR ability
diff --git a/sys/contrib/openzfs/man/man8/zfs-bookmark.8 b/sys/contrib/openzfs/man/man8/zfs-bookmark.8
index 083ff46d241b..5a0933820020 100644
--- a/sys/contrib/openzfs/man/man8/zfs-bookmark.8
+++ b/sys/contrib/openzfs/man/man8/zfs-bookmark.8
@@ -31,7 +31,7 @@
.\" Copyright 2019 Joyent, Inc.
.\" Copyright (c) 2019, 2020 by Christian Schwarz. All Rights Reserved.
.\"
-.Dd May 12, 2022
+.Dd July 11, 2022
.Dt ZFS-BOOKMARK 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-clone.8 b/sys/contrib/openzfs/man/man8/zfs-clone.8
index cd412815f5fe..9609cf2ce36a 100644
--- a/sys/contrib/openzfs/man/man8/zfs-clone.8
+++ b/sys/contrib/openzfs/man/man8/zfs-clone.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZFS-CLONE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-create.8 b/sys/contrib/openzfs/man/man8/zfs-create.8
index 91878056cc7d..58bde5799240 100644
--- a/sys/contrib/openzfs/man/man8/zfs-create.8
+++ b/sys/contrib/openzfs/man/man8/zfs-create.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd June 2, 2023
.Dt ZFS-CREATE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-destroy.8 b/sys/contrib/openzfs/man/man8/zfs-destroy.8
index 38359be02430..6a6791f7a44e 100644
--- a/sys/contrib/openzfs/man/man8/zfs-destroy.8
+++ b/sys/contrib/openzfs/man/man8/zfs-destroy.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd February 5, 2025
.Dt ZFS-DESTROY 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-diff.8 b/sys/contrib/openzfs/man/man8/zfs-diff.8
index d4c48f4109be..5b94ea524666 100644
--- a/sys/contrib/openzfs/man/man8/zfs-diff.8
+++ b/sys/contrib/openzfs/man/man8/zfs-diff.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZFS-DIFF 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-hold.8 b/sys/contrib/openzfs/man/man8/zfs-hold.8
index 0c88937f0dc8..a877e428f88b 100644
--- a/sys/contrib/openzfs/man/man8/zfs-hold.8
+++ b/sys/contrib/openzfs/man/man8/zfs-hold.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd June 30, 2019
+.Dd November 8, 2022
.Dt ZFS-HOLD 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-jail.8 b/sys/contrib/openzfs/man/man8/zfs-jail.8
index 53499a279d05..569f5f57eab4 100644
--- a/sys/contrib/openzfs/man/man8/zfs-jail.8
+++ b/sys/contrib/openzfs/man/man8/zfs-jail.8
@@ -37,7 +37,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd May 27, 2021
+.Dd July 11, 2022
.Dt ZFS-JAIL 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-list.8 b/sys/contrib/openzfs/man/man8/zfs-list.8
index 677d8292e207..42eff94f9762 100644
--- a/sys/contrib/openzfs/man/man8/zfs-list.8
+++ b/sys/contrib/openzfs/man/man8/zfs-list.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd February 8, 2024
+.Dd August 25, 2025
.Dt ZFS-LIST 8
.Os
.
@@ -50,27 +50,25 @@
.Oo Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Oc Ns …
.
.Sh DESCRIPTION
-If specified, you can list property information by the absolute pathname or the
-relative pathname.
-By default, all file systems and volumes are displayed.
+By default, all file systems and volumes are displayed, with the following
+fields:
+.Sy name , Sy used , Sy available , Sy referenced , Sy mountpoint .
Snapshots are displayed if the
.Sy listsnapshots
pool property is
.Sy on
.Po the default is
.Sy off
-.Pc ,
+.Pc
or if the
.Fl t Sy snapshot
or
.Fl t Sy all
options are specified.
-The following fields are displayed:
-.Sy name , Sy used , Sy available , Sy referenced , Sy mountpoint .
.Bl -tag -width "-H"
.It Fl H
Used for scripting mode.
-Do not print headers and separate fields by a single tab instead of arbitrary
+Do not print headers, and separate fields by a single tab instead of arbitrary
white space.
.It Fl j , -json Op Ar --json-int
Print the output in JSON format.
@@ -87,7 +85,7 @@ of
will display only the dataset and its direct children.
.It Fl o Ar property
A comma-separated list of properties to display.
-The property must be:
+Each property must be:
.Bl -bullet -compact
.It
One of the properties described in the
@@ -125,30 +123,41 @@ section of
or the value
.Sy name
to sort by the dataset name.
-Multiple properties can be specified at one time using multiple
+Multiple properties can be specified to operate together using multiple
.Fl s
-property options.
+or
+.Fl S
+options.
Multiple
.Fl s
-options are evaluated from left to right in decreasing order of importance.
-The following is a list of sorting criteria:
+and
+.Fl S
+options are evaluated from left to right to supply sort keys in
+decreasing order of priority.
+Property types operate as follows:
.Bl -bullet -compact
.It
Numeric types sort in numeric order.
.It
String types sort in alphabetical order.
.It
-Types inappropriate for a row sort that row to the literal bottom, regardless of
-the specified ordering.
+Types inappropriate for a row sort that row to the literal bottom,
+regardless of the specified ordering.
.El
.Pp
-If no sorting options are specified the existing behavior of
-.Nm zfs Cm list
-is preserved.
+If no sort columns are specified, or if two lines of output would sort
+equally across all specified columns, then datasets and bookmarks are
+sorted by name, whereas snapshots are sorted first by the name of their
+dataset and then by the time of their creation.
+When no sort columns are specified but snapshots are listed, this
+default behavior causes snapshots to be grouped under their datasets in
+chronological order by creation time.
.It Fl S Ar property
Same as
.Fl s ,
-but sorts by property in descending order.
+but sorts by
+.Ar property
+in descending order.
.It Fl t Ar type
A comma-separated list of types to display, where
.Ar type
diff --git a/sys/contrib/openzfs/man/man8/zfs-load-key.8 b/sys/contrib/openzfs/man/man8/zfs-load-key.8
index 7838c46d9e77..3a11cea99fd6 100644
--- a/sys/contrib/openzfs/man/man8/zfs-load-key.8
+++ b/sys/contrib/openzfs/man/man8/zfs-load-key.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd January 13, 2020
+.Dd July 11, 2022
.Dt ZFS-LOAD-KEY 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-mount-generator.8.in b/sys/contrib/openzfs/man/man8/zfs-mount-generator.8.in
index ea470247daac..9e44ea30c636 100644
--- a/sys/contrib/openzfs/man/man8/zfs-mount-generator.8.in
+++ b/sys/contrib/openzfs/man/man8/zfs-mount-generator.8.in
@@ -23,7 +23,7 @@
.\" OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
.\" WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.\"
-.Dd May 31, 2021
+.Dd November 30, 2021
.Dt ZFS-MOUNT-GENERATOR 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-mount.8 b/sys/contrib/openzfs/man/man8/zfs-mount.8
index 9fca6fffd5bb..2689b6dc345b 100644
--- a/sys/contrib/openzfs/man/man8/zfs-mount.8
+++ b/sys/contrib/openzfs/man/man8/zfs-mount.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd February 16, 2019
+.Dd October 12, 2024
.Dt ZFS-MOUNT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-project.8 b/sys/contrib/openzfs/man/man8/zfs-project.8
index 36547680f53e..4ebfdf6ffe4f 100644
--- a/sys/contrib/openzfs/man/man8/zfs-project.8
+++ b/sys/contrib/openzfs/man/man8/zfs-project.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd May 27, 2021
+.Dd July 11, 2022
.Dt ZFS-PROJECT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-promote.8 b/sys/contrib/openzfs/man/man8/zfs-promote.8
index 767045812607..435a7a5d0144 100644
--- a/sys/contrib/openzfs/man/man8/zfs-promote.8
+++ b/sys/contrib/openzfs/man/man8/zfs-promote.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZFS-PROMOTE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-rename.8 b/sys/contrib/openzfs/man/man8/zfs-rename.8
index 4cf192c0682b..8fedc67469e6 100644
--- a/sys/contrib/openzfs/man/man8/zfs-rename.8
+++ b/sys/contrib/openzfs/man/man8/zfs-rename.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZFS-RENAME 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-rewrite.8 b/sys/contrib/openzfs/man/man8/zfs-rewrite.8
index a3a037f3794a..ca5340c7e5eb 100644
--- a/sys/contrib/openzfs/man/man8/zfs-rewrite.8
+++ b/sys/contrib/openzfs/man/man8/zfs-rewrite.8
@@ -21,7 +21,7 @@
.\"
.\" Copyright (c) 2025 iXsystems, Inc.
.\"
-.Dd May 6, 2025
+.Dd July 23, 2025
.Dt ZFS-REWRITE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-send.8 b/sys/contrib/openzfs/man/man8/zfs-send.8
index c920a5a48798..6c5f6b94afd5 100644
--- a/sys/contrib/openzfs/man/man8/zfs-send.8
+++ b/sys/contrib/openzfs/man/man8/zfs-send.8
@@ -31,7 +31,7 @@
.\" Copyright 2019 Joyent, Inc.
.\" Copyright (c) 2024, Klara, Inc.
.\"
-.Dd October 2, 2024
+.Dd August 29, 2025
.Dt ZFS-SEND 8
.Os
.
@@ -173,8 +173,10 @@ The receiving system must have the
feature enabled.
If the
.Sy lz4_compress
-feature is active on the sending system, then the receiving system must have
-that feature enabled as well.
+or
+.Sy zstd_compress
+features are active on the sending system, then the receiving system must have
+the corresponding features enabled as well.
Datasets that are sent with this flag may not be
received as an encrypted dataset, since encrypted datasets cannot use the
.Sy embedded_data
@@ -201,8 +203,10 @@ property for details
.Pc .
If the
.Sy lz4_compress
-feature is active on the sending system, then the receiving system must have
-that feature enabled as well.
+or
+.Sy zstd_compress
+features are active on the sending system, then the receiving system must have
+the corresponding features enabled as well.
If the
.Sy large_blocks
feature is enabled on the sending system but the
@@ -357,8 +361,10 @@ property for details
.Pc .
If the
.Sy lz4_compress
-feature is active on the sending system, then the receiving system must have
-that feature enabled as well.
+or
+.Sy zstd_compress
+features are active on the sending system, then the receiving system must have
+the corresponding features enabled as well.
If the
.Sy large_blocks
feature is enabled on the sending system but the
@@ -400,8 +406,10 @@ The receiving system must have the
feature enabled.
If the
.Sy lz4_compress
-feature is active on the sending system, then the receiving system must have
-that feature enabled as well.
+or
+.Sy zstd_compress
+features are active on the sending system, then the receiving system must have
+the corresponding features enabled as well.
Datasets that are sent with this flag may not be received as an encrypted
dataset,
since encrypted datasets cannot use the
diff --git a/sys/contrib/openzfs/man/man8/zfs-set.8 b/sys/contrib/openzfs/man/man8/zfs-set.8
index 67f4d6eba171..08daf09d05f8 100644
--- a/sys/contrib/openzfs/man/man8/zfs-set.8
+++ b/sys/contrib/openzfs/man/man8/zfs-set.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd April 20, 2024
+.Dd October 12, 2024
.Dt ZFS-SET 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-share.8 b/sys/contrib/openzfs/man/man8/zfs-share.8
index f7a09a189182..e9c32a44b0c7 100644
--- a/sys/contrib/openzfs/man/man8/zfs-share.8
+++ b/sys/contrib/openzfs/man/man8/zfs-share.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd May 17, 2021
+.Dd July 11, 2022
.Dt ZFS-SHARE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-snapshot.8 b/sys/contrib/openzfs/man/man8/zfs-snapshot.8
index 3ddd1273c8e8..8f4b2c335f09 100644
--- a/sys/contrib/openzfs/man/man8/zfs-snapshot.8
+++ b/sys/contrib/openzfs/man/man8/zfs-snapshot.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZFS-SNAPSHOT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-upgrade.8 b/sys/contrib/openzfs/man/man8/zfs-upgrade.8
index bac74e37aef9..a5ce2b760da4 100644
--- a/sys/contrib/openzfs/man/man8/zfs-upgrade.8
+++ b/sys/contrib/openzfs/man/man8/zfs-upgrade.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd June 30, 2019
+.Dd July 11, 2022
.Dt ZFS-UPGRADE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-userspace.8 b/sys/contrib/openzfs/man/man8/zfs-userspace.8
index d7a4d18e83b1..c255d911740d 100644
--- a/sys/contrib/openzfs/man/man8/zfs-userspace.8
+++ b/sys/contrib/openzfs/man/man8/zfs-userspace.8
@@ -30,7 +30,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd June 30, 2019
+.Dd July 11, 2022
.Dt ZFS-USERSPACE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-wait.8 b/sys/contrib/openzfs/man/man8/zfs-wait.8
index 554a67455c60..e5c60010d2f9 100644
--- a/sys/contrib/openzfs/man/man8/zfs-wait.8
+++ b/sys/contrib/openzfs/man/man8/zfs-wait.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 31, 2021
+.Dd July 11, 2022
.Dt ZFS-WAIT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs-zone.8 b/sys/contrib/openzfs/man/man8/zfs-zone.8
index 7ad0ac89463c..a56a304e82b2 100644
--- a/sys/contrib/openzfs/man/man8/zfs-zone.8
+++ b/sys/contrib/openzfs/man/man8/zfs-zone.8
@@ -38,7 +38,7 @@
.\" Copyright 2019 Joyent, Inc.
.\" Copyright 2021 Klara, Inc.
.\"
-.Dd June 3, 2022
+.Dd July 11, 2022
.Dt ZFS-ZONE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs.8 b/sys/contrib/openzfs/man/man8/zfs.8
index e16a3a82b672..b7566a727469 100644
--- a/sys/contrib/openzfs/man/man8/zfs.8
+++ b/sys/contrib/openzfs/man/man8/zfs.8
@@ -37,7 +37,7 @@
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd April 18, 2025
+.Dd May 12, 2025
.Dt ZFS 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zfs_ids_to_path.8 b/sys/contrib/openzfs/man/man8/zfs_ids_to_path.8
index eef0ce68f17b..465e336d170c 100644
--- a/sys/contrib/openzfs/man/man8/zfs_ids_to_path.8
+++ b/sys/contrib/openzfs/man/man8/zfs_ids_to_path.8
@@ -21,7 +21,7 @@
.\"
.\" Copyright (c) 2020 by Delphix. All rights reserved.
.\"
-.Dd April 17, 2020
+.Dd July 11, 2022
.Dt ZFS_IDS_TO_PATH 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zgenhostid.8 b/sys/contrib/openzfs/man/man8/zgenhostid.8
index 2b5b4fc18216..ff564880f97d 100644
--- a/sys/contrib/openzfs/man/man8/zgenhostid.8
+++ b/sys/contrib/openzfs/man/man8/zgenhostid.8
@@ -21,7 +21,7 @@
.\"
.\" Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
.\"
-.Dd May 26, 2021
+.Dd July 11, 2022
.Dt ZGENHOSTID 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zinject.8 b/sys/contrib/openzfs/man/man8/zinject.8
index 1d9e43aed5ec..704f6a7accd8 100644
--- a/sys/contrib/openzfs/man/man8/zinject.8
+++ b/sys/contrib/openzfs/man/man8/zinject.8
@@ -138,6 +138,20 @@ This injector is automatically cleared after the import is finished.
.
.It Xo
.Nm zinject
+.Fl E Ar delay
+.Op Fl a
+.Op Fl m
+.Op Fl f Ar freq
+.Op Fl l Ar level
+.Op Fl r Ar range
+.Op Fl T Ar iotype
+.Op Fl t Ar type Ns | Ns Fl b Ar bookmark
+.Xc
+Inject pipeline ready stage delays for the given object or bookmark.
+The delay is specified in milliseconds.
+.
+.It Xo
+.Nm zinject
.Fl I
.Op Fl s Ar seconds Ns | Ns Fl g Ar txgs
.Ar pool
diff --git a/sys/contrib/openzfs/man/man8/zpool-attach.8 b/sys/contrib/openzfs/man/man8/zpool-attach.8
index 51d876767666..04996ed4fa11 100644
--- a/sys/contrib/openzfs/man/man8/zpool-attach.8
+++ b/sys/contrib/openzfs/man/man8/zpool-attach.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd June 28, 2023
+.Dd November 8, 2023
.Dt ZPOOL-ATTACH 8
.Os
.
@@ -39,24 +39,24 @@
.Cm attach
.Op Fl fsw
.Oo Fl o Ar property Ns = Ns Ar value Oc
-.Ar pool device new_device
+.Ar pool vdev new_device
.
.Sh DESCRIPTION
Attaches
.Ar new_device
to the existing
-.Ar device .
+.Ar vdev .
The behavior differs depending on if the existing
-.Ar device
+.Ar vdev
is a RAID-Z device, or a mirror/plain device.
.Pp
-If the existing device is a mirror or plain device
+If the existing vdev is a mirror or plain device
.Pq e.g. specified as Qo Li sda Qc or Qq Li mirror-7 ,
-the new device will be mirrored with the existing device, a resilver will be
+the new device will be mirrored with the existing vdev, a resilver will be
initiated, and the new device will contribute to additional redundancy once the
resilver completes.
If
-.Ar device
+.Ar vdev
is not currently part of a mirrored configuration,
.Ar device
automatically transforms into a two-way mirror of
@@ -64,7 +64,7 @@ automatically transforms into a two-way mirror of
and
.Ar new_device .
If
-.Ar device
+.Ar vdev
is part of a two-way mirror, attaching
.Ar new_device
creates a three-way mirror, and so on.
@@ -72,7 +72,7 @@ In either case,
.Ar new_device
begins to resilver immediately and any running scrub is canceled.
.Pp
-If the existing device is a RAID-Z device
+If the existing vdev is a RAID-Z device
.Pq e.g. specified as Qq Ar raidz2-0 ,
the new device will become part of that RAID-Z group.
A "raidz expansion" will be initiated, and once the expansion completes,
@@ -112,7 +112,7 @@ the checksums of all blocks which have been copied during the expansion.
Forces use of
.Ar new_device ,
even if it appears to be in use.
-Not all devices can be overridden in this manner.
+Not all vdevs can be overridden in this manner.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties.
See the
@@ -121,7 +121,7 @@ manual page for a list of valid properties that can be set.
The only property supported at the moment is
.Sy ashift .
.It Fl s
-When attaching to a mirror or plain device, the
+When attaching to a mirror or plain vdev, the
.Ar new_device
is reconstructed sequentially to restore redundancy as quickly as possible.
Checksums are not verified during sequential reconstruction so a scrub is
diff --git a/sys/contrib/openzfs/man/man8/zpool-checkpoint.8 b/sys/contrib/openzfs/man/man8/zpool-checkpoint.8
index d97d10d5df6e..b654f669cfa2 100644
--- a/sys/contrib/openzfs/man/man8/zpool-checkpoint.8
+++ b/sys/contrib/openzfs/man/man8/zpool-checkpoint.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 27, 2021
+.Dd July 11, 2022
.Dt ZPOOL-CHECKPOINT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-clear.8 b/sys/contrib/openzfs/man/man8/zpool-clear.8
index 19cd4be36408..70cd8325bd0e 100644
--- a/sys/contrib/openzfs/man/man8/zpool-clear.8
+++ b/sys/contrib/openzfs/man/man8/zpool-clear.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 27, 2021
+.Dd April 29, 2024
.Dt ZPOOL-CLEAR 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-create.8 b/sys/contrib/openzfs/man/man8/zpool-create.8
index 490c67629a20..a36ae260a158 100644
--- a/sys/contrib/openzfs/man/man8/zpool-create.8
+++ b/sys/contrib/openzfs/man/man8/zpool-create.8
@@ -28,7 +28,7 @@
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZPOOL-CREATE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-destroy.8 b/sys/contrib/openzfs/man/man8/zpool-destroy.8
index f49f29804ad7..82f3f3e203d6 100644
--- a/sys/contrib/openzfs/man/man8/zpool-destroy.8
+++ b/sys/contrib/openzfs/man/man8/zpool-destroy.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZPOOL-DESTROY 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-detach.8 b/sys/contrib/openzfs/man/man8/zpool-detach.8
index ae02dbc2d5b8..79a44310110d 100644
--- a/sys/contrib/openzfs/man/man8/zpool-detach.8
+++ b/sys/contrib/openzfs/man/man8/zpool-detach.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd August 9, 2019
+.Dd July 11, 2022
.Dt ZPOOL-DETACH 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-events.8 b/sys/contrib/openzfs/man/man8/zpool-events.8
index 2d32dce2bb65..36a9864dc73b 100644
--- a/sys/contrib/openzfs/man/man8/zpool-events.8
+++ b/sys/contrib/openzfs/man/man8/zpool-events.8
@@ -190,6 +190,16 @@ Issued when a scrub is resumed on a pool.
.It Sy scrub.paused
Issued when a scrub is paused on a pool.
.It Sy bootfs.vdev.attach
+.It Sy sitout
+Issued when a
+.Sy RAIDZ
+or
+.Sy DRAID
+vdev triggers the
+.Sy autosit
+logic.
+This logic detects when a disk in such a vdev is significantly slower than its
+peers, and sits them out temporarily to preserve the performance of the pool.
.El
.
.Sh PAYLOADS
diff --git a/sys/contrib/openzfs/man/man8/zpool-export.8 b/sys/contrib/openzfs/man/man8/zpool-export.8
index 171a7541c6d2..02495c088f94 100644
--- a/sys/contrib/openzfs/man/man8/zpool-export.8
+++ b/sys/contrib/openzfs/man/man8/zpool-export.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZPOOL-EXPORT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-get.8 b/sys/contrib/openzfs/man/man8/zpool-get.8
index 1d6d1f08afa6..bfe1bae7619f 100644
--- a/sys/contrib/openzfs/man/man8/zpool-get.8
+++ b/sys/contrib/openzfs/man/man8/zpool-get.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd August 9, 2019
+.Dd October 12, 2024
.Dt ZPOOL-GET 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-history.8 b/sys/contrib/openzfs/man/man8/zpool-history.8
index f15086eabc47..f02168951ff2 100644
--- a/sys/contrib/openzfs/man/man8/zpool-history.8
+++ b/sys/contrib/openzfs/man/man8/zpool-history.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd August 9, 2019
+.Dd July 11, 2022
.Dt ZPOOL-HISTORY 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-import.8 b/sys/contrib/openzfs/man/man8/zpool-import.8
index 9076f5c34929..c6d5f222b6b2 100644
--- a/sys/contrib/openzfs/man/man8/zpool-import.8
+++ b/sys/contrib/openzfs/man/man8/zpool-import.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZPOOL-IMPORT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-initialize.8 b/sys/contrib/openzfs/man/man8/zpool-initialize.8
index 39579a58010e..5299a897cb97 100644
--- a/sys/contrib/openzfs/man/man8/zpool-initialize.8
+++ b/sys/contrib/openzfs/man/man8/zpool-initialize.8
@@ -28,7 +28,7 @@
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\" Copyright (c) 2025 Hewlett Packard Enterprise Development LP.
.\"
-.Dd May 27, 2021
+.Dd July 30, 2025
.Dt ZPOOL-INITIALIZE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-iostat.8 b/sys/contrib/openzfs/man/man8/zpool-iostat.8
index d8c21d0cfc6c..5dd9c9d55e20 100644
--- a/sys/contrib/openzfs/man/man8/zpool-iostat.8
+++ b/sys/contrib/openzfs/man/man8/zpool-iostat.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd March 16, 2022
+.Dd January 29, 2024
.Dt ZPOOL-IOSTAT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-labelclear.8 b/sys/contrib/openzfs/man/man8/zpool-labelclear.8
index ba3d1509aa75..b807acaaede3 100644
--- a/sys/contrib/openzfs/man/man8/zpool-labelclear.8
+++ b/sys/contrib/openzfs/man/man8/zpool-labelclear.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 31, 2021
+.Dd July 11, 2022
.Dt ZPOOL-LABELCLEAR 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-list.8 b/sys/contrib/openzfs/man/man8/zpool-list.8
index b720e203c1c9..106399941f98 100644
--- a/sys/contrib/openzfs/man/man8/zpool-list.8
+++ b/sys/contrib/openzfs/man/man8/zpool-list.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd March 16, 2022
+.Dd October 12, 2024
.Dt ZPOOL-LIST 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-offline.8 b/sys/contrib/openzfs/man/man8/zpool-offline.8
index 49b1f34ad5d5..388c7634acce 100644
--- a/sys/contrib/openzfs/man/man8/zpool-offline.8
+++ b/sys/contrib/openzfs/man/man8/zpool-offline.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd August 9, 2019
+.Dd December 21, 2023
.Dt ZPOOL-OFFLINE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-reguid.8 b/sys/contrib/openzfs/man/man8/zpool-reguid.8
index 77101fc07326..b98c88e320de 100644
--- a/sys/contrib/openzfs/man/man8/zpool-reguid.8
+++ b/sys/contrib/openzfs/man/man8/zpool-reguid.8
@@ -29,7 +29,7 @@
.\" Copyright (c) 2024, Klara Inc.
.\" Copyright (c) 2024, Mateusz Piotrowski
.\"
-.Dd June 21, 2023
+.Dd August 26, 2024
.Dt ZPOOL-REGUID 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-remove.8 b/sys/contrib/openzfs/man/man8/zpool-remove.8
index d10a92e49bbe..4d5fc431d332 100644
--- a/sys/contrib/openzfs/man/man8/zpool-remove.8
+++ b/sys/contrib/openzfs/man/man8/zpool-remove.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd March 16, 2022
+.Dd November 19, 2024
.Dt ZPOOL-REMOVE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-reopen.8 b/sys/contrib/openzfs/man/man8/zpool-reopen.8
index 594cff3d16d8..c4e10f0a546e 100644
--- a/sys/contrib/openzfs/man/man8/zpool-reopen.8
+++ b/sys/contrib/openzfs/man/man8/zpool-reopen.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd June 2, 2021
+.Dd July 11, 2022
.Dt ZPOOL-REOPEN 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-replace.8 b/sys/contrib/openzfs/man/man8/zpool-replace.8
index 9f3156eeb3ef..651af13b19b8 100644
--- a/sys/contrib/openzfs/man/man8/zpool-replace.8
+++ b/sys/contrib/openzfs/man/man8/zpool-replace.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 29, 2021
+.Dd July 11, 2022
.Dt ZPOOL-REPLACE 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-resilver.8 b/sys/contrib/openzfs/man/man8/zpool-resilver.8
index 2161d77f62ed..59c4be5db209 100644
--- a/sys/contrib/openzfs/man/man8/zpool-resilver.8
+++ b/sys/contrib/openzfs/man/man8/zpool-resilver.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 27, 2021
+.Dd July 11, 2022
.Dt ZPOOL-RESILVER 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-scrub.8 b/sys/contrib/openzfs/man/man8/zpool-scrub.8
index 0ecf8bd3851f..cf7ead5788bf 100644
--- a/sys/contrib/openzfs/man/man8/zpool-scrub.8
+++ b/sys/contrib/openzfs/man/man8/zpool-scrub.8
@@ -28,7 +28,7 @@
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\" Copyright (c) 2025 Hewlett Packard Enterprise Development LP.
.\"
-.Dd December 11, 2024
+.Dd August 6, 2025
.Dt ZPOOL-SCRUB 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-split.8 b/sys/contrib/openzfs/man/man8/zpool-split.8
index a67c865cf30c..ee4c6384cf23 100644
--- a/sys/contrib/openzfs/man/man8/zpool-split.8
+++ b/sys/contrib/openzfs/man/man8/zpool-split.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd June 2, 2021
+.Dd July 11, 2022
.Dt ZPOOL-SPLIT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-status.8 b/sys/contrib/openzfs/man/man8/zpool-status.8
index a7f3e088043b..108a1067b384 100644
--- a/sys/contrib/openzfs/man/man8/zpool-status.8
+++ b/sys/contrib/openzfs/man/man8/zpool-status.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd February 14, 2024
+.Dd May 20, 2025
.Dt ZPOOL-STATUS 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-sync.8 b/sys/contrib/openzfs/man/man8/zpool-sync.8
index 8f438f363e83..d1dc05d0c202 100644
--- a/sys/contrib/openzfs/man/man8/zpool-sync.8
+++ b/sys/contrib/openzfs/man/man8/zpool-sync.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd August 9, 2019
+.Dd July 11, 2022
.Dt ZPOOL-SYNC 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-trim.8 b/sys/contrib/openzfs/man/man8/zpool-trim.8
index 18723e1be0d2..c4e849019789 100644
--- a/sys/contrib/openzfs/man/man8/zpool-trim.8
+++ b/sys/contrib/openzfs/man/man8/zpool-trim.8
@@ -28,7 +28,7 @@
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\" Copyright (c) 2025 Hewlett Packard Enterprise Development LP.
.\"
-.Dd May 27, 2021
+.Dd July 30, 2025
.Dt ZPOOL-TRIM 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool-upgrade.8 b/sys/contrib/openzfs/man/man8/zpool-upgrade.8
index 20632ae4bba0..adae47f82eb1 100644
--- a/sys/contrib/openzfs/man/man8/zpool-upgrade.8
+++ b/sys/contrib/openzfs/man/man8/zpool-upgrade.8
@@ -28,7 +28,7 @@
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
-.Dd March 16, 2022
+.Dd July 11, 2022
.Dt ZPOOL-UPGRADE 8
.Os
.
@@ -65,10 +65,10 @@ property).
.Cm upgrade
.Fl v
.Xc
-Displays legacy ZFS versions supported by the this version of ZFS.
+Displays legacy ZFS versions supported by this version of ZFS.
See
.Xr zpool-features 7
-for a description of feature flags features supported by this version of ZFS.
+for a description of features supported by this version of ZFS.
.It Xo
.Nm zpool
.Cm upgrade
diff --git a/sys/contrib/openzfs/man/man8/zpool-wait.8 b/sys/contrib/openzfs/man/man8/zpool-wait.8
index 0ffb4badfb7b..28a51d29a913 100644
--- a/sys/contrib/openzfs/man/man8/zpool-wait.8
+++ b/sys/contrib/openzfs/man/man8/zpool-wait.8
@@ -28,7 +28,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd May 27, 2021
+.Dd January 29, 2024
.Dt ZPOOL-WAIT 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zpool.8 b/sys/contrib/openzfs/man/man8/zpool.8
index b96944050594..3bfef780b298 100644
--- a/sys/contrib/openzfs/man/man8/zpool.8
+++ b/sys/contrib/openzfs/man/man8/zpool.8
@@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
-.Dd February 14, 2024
+.Dd November 19, 2024
.Dt ZPOOL 8
.Os
.
diff --git a/sys/contrib/openzfs/man/man8/zstream.8 b/sys/contrib/openzfs/man/man8/zstream.8
index 03a8479c9e6a..5b3d063bc4a5 100644
--- a/sys/contrib/openzfs/man/man8/zstream.8
+++ b/sys/contrib/openzfs/man/man8/zstream.8
@@ -21,7 +21,7 @@
.\"
.\" Copyright (c) 2020 by Delphix. All rights reserved.
.\"
-.Dd October 4, 2022
+.Dd November 10, 2022
.Dt ZSTREAM 8
.Os
.
diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in
index 3d6f288fa5da..58a80dc4402c 100644
--- a/sys/contrib/openzfs/module/Kbuild.in
+++ b/sys/contrib/openzfs/module/Kbuild.in
@@ -4,7 +4,7 @@
ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement
ZFS_MODULE_CFLAGS += -Wmissing-prototypes
-ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@
+ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @KERNEL_NO_FORMAT_ZERO_LENGTH@
ifneq ($(KBUILD_EXTMOD),)
zfs_include = @abs_top_srcdir@/include
@@ -135,6 +135,7 @@ ICP_OBJS_X86_64 := \
asm-x86_64/sha2/sha256-x86_64.o \
asm-x86_64/sha2/sha512-x86_64.o \
asm-x86_64/modes/aesni-gcm-x86_64.o \
+ asm-x86_64/modes/aesni-gcm-avx2-vaes.o \
asm-x86_64/modes/gcm_pclmulqdq.o \
asm-x86_64/modes/ghash-x86_64.o
diff --git a/sys/contrib/openzfs/module/avl/avl.c b/sys/contrib/openzfs/module/avl/avl.c
index b6c1c02bc3f2..67cbcd3adeec 100644
--- a/sys/contrib/openzfs/module/avl/avl.c
+++ b/sys/contrib/openzfs/module/avl/avl.c
@@ -225,7 +225,7 @@ avl_nearest(avl_tree_t *tree, avl_index_t where, int direction)
size_t off = tree->avl_offset;
if (node == NULL) {
- ASSERT(tree->avl_root == NULL);
+ ASSERT0P(tree->avl_root);
return (NULL);
}
data = AVL_NODE2DATA(node, off);
@@ -478,7 +478,7 @@ avl_insert(avl_tree_t *tree, void *new_data, avl_index_t where)
size_t off = tree->avl_offset;
#ifdef _LP64
- ASSERT(((uintptr_t)new_data & 0x7) == 0);
+ ASSERT0(((uintptr_t)new_data & 0x7));
#endif
node = AVL_DATA2NODE(new_data, off);
@@ -495,10 +495,10 @@ avl_insert(avl_tree_t *tree, void *new_data, avl_index_t where)
AVL_SETBALANCE(node, 0);
AVL_SETPARENT(node, parent);
if (parent != NULL) {
- ASSERT(parent->avl_child[which_child] == NULL);
+ ASSERT0P(parent->avl_child[which_child]);
parent->avl_child[which_child] = node;
} else {
- ASSERT(tree->avl_root == NULL);
+ ASSERT0P(tree->avl_root);
tree->avl_root = node;
}
/*
@@ -608,7 +608,7 @@ avl_insert_here(
ASSERT(diff > 0 ? child == 1 : child == 0);
#endif
}
- ASSERT(node->avl_child[child] == NULL);
+ ASSERT0P(node->avl_child[child]);
avl_insert(tree, new_data, AVL_MKINDEX(node, child));
}
@@ -881,7 +881,7 @@ avl_create(avl_tree_t *tree, int (*compar) (const void *, const void *),
ASSERT(size > 0);
ASSERT(size >= offset + sizeof (avl_node_t));
#ifdef _LP64
- ASSERT((offset & 0x7) == 0);
+ ASSERT0((offset & 0x7));
#endif
tree->avl_compar = compar;
@@ -897,8 +897,8 @@ void
avl_destroy(avl_tree_t *tree)
{
ASSERT(tree);
- ASSERT(tree->avl_numnodes == 0);
- ASSERT(tree->avl_root == NULL);
+ ASSERT0(tree->avl_numnodes);
+ ASSERT0P(tree->avl_root);
}
diff --git a/sys/contrib/openzfs/module/icp/algs/modes/gcm.c b/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
index c2a982b5a376..3cfa5b8165ce 100644
--- a/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
+++ b/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
@@ -46,6 +46,9 @@
#define IMPL_CYCLE (UINT32_MAX-1)
#ifdef CAN_USE_GCM_ASM
#define IMPL_AVX (UINT32_MAX-2)
+#if CAN_USE_GCM_ASM >= 2
+#define IMPL_AVX2 (UINT32_MAX-3)
+#endif
#endif
#define GCM_IMPL_READ(i) (*(volatile uint32_t *) &(i))
static uint32_t icp_gcm_impl = IMPL_FASTEST;
@@ -56,17 +59,16 @@ static uint32_t user_sel_impl = IMPL_FASTEST;
boolean_t gcm_avx_can_use_movbe = B_FALSE;
/*
* Whether to use the optimized openssl gcm and ghash implementations.
- * Set to true if module parameter icp_gcm_impl == "avx".
*/
-static boolean_t gcm_use_avx = B_FALSE;
-#define GCM_IMPL_USE_AVX (*(volatile boolean_t *)&gcm_use_avx)
+static gcm_impl gcm_impl_used = GCM_IMPL_GENERIC;
+#define GCM_IMPL_USED (*(volatile gcm_impl *)&gcm_impl_used)
extern boolean_t ASMABI atomic_toggle_boolean_nv(volatile boolean_t *);
static inline boolean_t gcm_avx_will_work(void);
-static inline void gcm_set_avx(boolean_t);
-static inline boolean_t gcm_toggle_avx(void);
-static inline size_t gcm_simd_get_htab_size(boolean_t);
+static inline boolean_t gcm_avx2_will_work(void);
+static inline void gcm_use_impl(gcm_impl impl);
+static inline gcm_impl gcm_toggle_impl(void);
static int gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *, char *, size_t,
crypto_data_t *, size_t);
@@ -89,7 +91,7 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
void (*xor_block)(uint8_t *, uint8_t *))
{
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_mode_encrypt_contiguous_blocks_avx(
ctx, data, length, out, block_size));
#endif
@@ -208,7 +210,7 @@ gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
{
(void) copy_block;
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_encrypt_final_avx(ctx, out, block_size));
#endif
@@ -374,7 +376,7 @@ gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
void (*xor_block)(uint8_t *, uint8_t *))
{
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_decrypt_final_avx(ctx, out, block_size));
#endif
@@ -631,23 +633,23 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
((aes_key_t *)gcm_ctx->gcm_keysched)->ops->needs_byteswap;
if (GCM_IMPL_READ(icp_gcm_impl) != IMPL_CYCLE) {
- gcm_ctx->gcm_use_avx = GCM_IMPL_USE_AVX;
+ gcm_ctx->impl = GCM_IMPL_USED;
} else {
/*
- * Handle the "cycle" implementation by creating avx and
- * non-avx contexts alternately.
+ * Handle the "cycle" implementation by creating different
+ * contexts, one per implementation.
*/
- gcm_ctx->gcm_use_avx = gcm_toggle_avx();
+ gcm_ctx->impl = gcm_toggle_impl();
- /* The avx impl. doesn't handle byte swapped key schedules. */
- if (gcm_ctx->gcm_use_avx == B_TRUE && needs_bswap == B_TRUE) {
- gcm_ctx->gcm_use_avx = B_FALSE;
+ /* The AVX impl. doesn't handle byte swapped key schedules. */
+ if (needs_bswap == B_TRUE) {
+ gcm_ctx->impl = GCM_IMPL_GENERIC;
}
/*
- * If this is a GCM context, use the MOVBE and the BSWAP
+ * If this is an AVX context, use the MOVBE and the BSWAP
* variants alternately.
*/
- if (gcm_ctx->gcm_use_avx == B_TRUE &&
+ if (gcm_ctx->impl == GCM_IMPL_AVX &&
zfs_movbe_available() == B_TRUE) {
(void) atomic_toggle_boolean_nv(
(volatile boolean_t *)&gcm_avx_can_use_movbe);
@@ -658,12 +660,13 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
* still they could be created by the aes generic implementation.
* Make sure not to use them since we'll corrupt data if we do.
*/
- if (gcm_ctx->gcm_use_avx == B_TRUE && needs_bswap == B_TRUE) {
- gcm_ctx->gcm_use_avx = B_FALSE;
+ if (gcm_ctx->impl != GCM_IMPL_GENERIC && needs_bswap == B_TRUE) {
+ gcm_ctx->impl = GCM_IMPL_GENERIC;
cmn_err_once(CE_WARN,
"ICP: Can't use the aes generic or cycle implementations "
- "in combination with the gcm avx implementation!");
+ "in combination with the gcm avx or avx2-vaes "
+ "implementation!");
cmn_err_once(CE_WARN,
"ICP: Falling back to a compatible implementation, "
"aes-gcm performance will likely be degraded.");
@@ -672,36 +675,20 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
"restore performance.");
}
- /* Allocate Htab memory as needed. */
- if (gcm_ctx->gcm_use_avx == B_TRUE) {
- size_t htab_len = gcm_simd_get_htab_size(gcm_ctx->gcm_use_avx);
-
- if (htab_len == 0) {
- return (CRYPTO_MECHANISM_PARAM_INVALID);
- }
- gcm_ctx->gcm_htab_len = htab_len;
- gcm_ctx->gcm_Htable =
- kmem_alloc(htab_len, KM_SLEEP);
-
- if (gcm_ctx->gcm_Htable == NULL) {
- return (CRYPTO_HOST_MEMORY);
- }
+ /*
+ * AVX implementations use Htable with sizes depending on
+ * implementation.
+ */
+ if (gcm_ctx->impl != GCM_IMPL_GENERIC) {
+ rv = gcm_init_avx(gcm_ctx, iv, iv_len, aad, aad_len,
+ block_size);
}
- /* Avx and non avx context initialization differs from here on. */
- if (gcm_ctx->gcm_use_avx == B_FALSE) {
+ else
#endif /* ifdef CAN_USE_GCM_ASM */
- if (gcm_init(gcm_ctx, iv, iv_len, aad, aad_len, block_size,
- encrypt_block, copy_block, xor_block) != CRYPTO_SUCCESS) {
- rv = CRYPTO_MECHANISM_PARAM_INVALID;
- }
-#ifdef CAN_USE_GCM_ASM
- } else {
- if (gcm_init_avx(gcm_ctx, iv, iv_len, aad, aad_len,
- block_size) != CRYPTO_SUCCESS) {
- rv = CRYPTO_MECHANISM_PARAM_INVALID;
- }
+ if (gcm_init(gcm_ctx, iv, iv_len, aad, aad_len, block_size,
+ encrypt_block, copy_block, xor_block) != CRYPTO_SUCCESS) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
}
-#endif /* ifdef CAN_USE_GCM_ASM */
return (rv);
}
@@ -767,6 +754,9 @@ gcm_impl_get_ops(void)
break;
#ifdef CAN_USE_GCM_ASM
case IMPL_AVX:
+#if CAN_USE_GCM_ASM >= 2
+ case IMPL_AVX2:
+#endif
/*
* Make sure that we return a valid implementation while
* switching to the avx implementation since there still
@@ -828,6 +818,13 @@ gcm_impl_init(void)
* Use the avx implementation if it's available and the implementation
* hasn't changed from its default value of fastest on module load.
*/
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_avx2_will_work()) {
+ if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) {
+ gcm_use_impl(GCM_IMPL_AVX2);
+ }
+ } else
+#endif
if (gcm_avx_will_work()) {
#ifdef HAVE_MOVBE
if (zfs_movbe_available() == B_TRUE) {
@@ -835,7 +832,7 @@ gcm_impl_init(void)
}
#endif
if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) {
- gcm_set_avx(B_TRUE);
+ gcm_use_impl(GCM_IMPL_AVX);
}
}
#endif
@@ -852,6 +849,7 @@ static const struct {
{ "fastest", IMPL_FASTEST },
#ifdef CAN_USE_GCM_ASM
{ "avx", IMPL_AVX },
+ { "avx2-vaes", IMPL_AVX2 },
#endif
};
@@ -887,7 +885,13 @@ gcm_impl_set(const char *val)
/* Check mandatory options */
for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
#ifdef CAN_USE_GCM_ASM
+#if CAN_USE_GCM_ASM >= 2
/* Ignore avx implementation if it won't work. */
+ if (gcm_impl_opts[i].sel == IMPL_AVX2 &&
+ !gcm_avx2_will_work()) {
+ continue;
+ }
+#endif
if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) {
continue;
}
@@ -915,11 +919,17 @@ gcm_impl_set(const char *val)
* Use the avx implementation if available and the requested one is
* avx or fastest.
*/
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_avx2_will_work() == B_TRUE &&
+ (impl == IMPL_AVX2 || impl == IMPL_FASTEST)) {
+ gcm_use_impl(GCM_IMPL_AVX2);
+ } else
+#endif
if (gcm_avx_will_work() == B_TRUE &&
(impl == IMPL_AVX || impl == IMPL_FASTEST)) {
- gcm_set_avx(B_TRUE);
+ gcm_use_impl(GCM_IMPL_AVX);
} else {
- gcm_set_avx(B_FALSE);
+ gcm_use_impl(GCM_IMPL_GENERIC);
}
#endif
@@ -952,6 +962,12 @@ icp_gcm_impl_get(char *buffer, zfs_kernel_param_t *kp)
for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
#ifdef CAN_USE_GCM_ASM
/* Ignore avx implementation if it won't work. */
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_impl_opts[i].sel == IMPL_AVX2 &&
+ !gcm_avx2_will_work()) {
+ continue;
+ }
+#endif
if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) {
continue;
}
@@ -993,9 +1009,6 @@ MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation.");
/* Clear the FPU registers since they hold sensitive internal state. */
#define clear_fpu_regs() clear_fpu_regs_avx()
-#define GHASH_AVX(ctx, in, len) \
- gcm_ghash_avx((ctx)->gcm_ghash, (const uint64_t *)(ctx)->gcm_Htable, \
- in, len)
#define gcm_incr_counter_block(ctx) gcm_incr_counter_block_by(ctx, 1)
@@ -1010,20 +1023,77 @@ MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation.");
static uint32_t gcm_avx_chunk_size =
((32 * 1024) / GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES;
+/*
+ * GCM definitions: uint128_t is copied from include/crypto/modes.h
+ * Avoiding u128 because it is already defined in kernel sources.
+ */
+typedef struct {
+ uint64_t hi, lo;
+} uint128_t;
+
extern void ASMABI clear_fpu_regs_avx(void);
extern void ASMABI gcm_xor_avx(const uint8_t *src, uint8_t *dst);
extern void ASMABI aes_encrypt_intel(const uint32_t rk[], int nr,
const uint32_t pt[4], uint32_t ct[4]);
extern void ASMABI gcm_init_htab_avx(uint64_t *Htable, const uint64_t H[2]);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI gcm_init_vpclmulqdq_avx2(uint128_t Htable[16],
+ const uint64_t H[2]);
+#endif
extern void ASMABI gcm_ghash_avx(uint64_t ghash[2], const uint64_t *Htable,
const uint8_t *in, size_t len);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI gcm_ghash_vpclmulqdq_avx2(uint64_t ghash[2],
+ const uint64_t *Htable, const uint8_t *in, size_t len);
+#endif
+static inline void GHASH_AVX(gcm_ctx_t *ctx, const uint8_t *in, size_t len)
+{
+ switch (ctx->impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ gcm_ghash_vpclmulqdq_avx2(ctx->gcm_ghash,
+ (const uint64_t *)ctx->gcm_Htable, in, len);
+ break;
+#endif
+
+ case GCM_IMPL_AVX:
+ gcm_ghash_avx(ctx->gcm_ghash,
+ (const uint64_t *)ctx->gcm_Htable, in, len);
+ break;
+
+ default:
+ VERIFY(B_FALSE);
+ }
+}
+typedef size_t ASMABI aesni_gcm_encrypt_impl(const uint8_t *, uint8_t *,
+ size_t, const void *, uint64_t *, const uint64_t *Htable, uint64_t *);
extern size_t ASMABI aesni_gcm_encrypt(const uint8_t *, uint8_t *, size_t,
const void *, uint64_t *, uint64_t *);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI aes_gcm_enc_update_vaes_avx2(const uint8_t *in,
+ uint8_t *out, size_t len, const void *key, const uint8_t ivec[16],
+ const uint128_t Htable[16], uint8_t Xi[16]);
+#endif
+typedef size_t ASMABI aesni_gcm_decrypt_impl(const uint8_t *, uint8_t *,
+ size_t, const void *, uint64_t *, const uint64_t *Htable, uint64_t *);
extern size_t ASMABI aesni_gcm_decrypt(const uint8_t *, uint8_t *, size_t,
const void *, uint64_t *, uint64_t *);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI aes_gcm_dec_update_vaes_avx2(const uint8_t *in,
+ uint8_t *out, size_t len, const void *key, const uint8_t ivec[16],
+ const uint128_t Htable[16], uint8_t Xi[16]);
+#endif
+
+static inline boolean_t
+gcm_avx2_will_work(void)
+{
+ return (kfpu_allowed() &&
+ zfs_avx2_available() && zfs_vaes_available() &&
+ zfs_vpclmulqdq_available());
+}
static inline boolean_t
gcm_avx_will_work(void)
@@ -1035,33 +1105,67 @@ gcm_avx_will_work(void)
}
static inline void
-gcm_set_avx(boolean_t val)
+gcm_use_impl(gcm_impl impl)
{
- if (gcm_avx_will_work() == B_TRUE) {
- atomic_swap_32(&gcm_use_avx, val);
+ switch (impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ if (gcm_avx2_will_work() == B_TRUE) {
+ atomic_swap_32(&gcm_impl_used, impl);
+ return;
+ }
+
+ zfs_fallthrough;
+#endif
+
+ case GCM_IMPL_AVX:
+ if (gcm_avx_will_work() == B_TRUE) {
+ atomic_swap_32(&gcm_impl_used, impl);
+ return;
+ }
+
+ zfs_fallthrough;
+
+ default:
+ atomic_swap_32(&gcm_impl_used, GCM_IMPL_GENERIC);
}
}
static inline boolean_t
-gcm_toggle_avx(void)
+gcm_impl_will_work(gcm_impl impl)
{
- if (gcm_avx_will_work() == B_TRUE) {
- return (atomic_toggle_boolean_nv(&GCM_IMPL_USE_AVX));
- } else {
- return (B_FALSE);
+ switch (impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ return (gcm_avx2_will_work());
+#endif
+
+ case GCM_IMPL_AVX:
+ return (gcm_avx_will_work());
+
+ default:
+ return (B_TRUE);
}
}
-static inline size_t
-gcm_simd_get_htab_size(boolean_t simd_mode)
+static inline gcm_impl
+gcm_toggle_impl(void)
{
- switch (simd_mode) {
- case B_TRUE:
- return (2 * 6 * 2 * sizeof (uint64_t));
+ gcm_impl current_impl, new_impl;
+ do { /* handle races */
+ current_impl = atomic_load_32(&gcm_impl_used);
+ new_impl = current_impl;
+ while (B_TRUE) { /* handle incompatble implementations */
+ new_impl = (new_impl + 1) % GCM_IMPL_MAX;
+ if (gcm_impl_will_work(new_impl)) {
+ break;
+ }
+ }
- default:
- return (0);
- }
+ } while (atomic_cas_32(&gcm_impl_used, current_impl, new_impl) !=
+ current_impl);
+
+ return (new_impl);
}
@@ -1077,6 +1181,50 @@ gcm_incr_counter_block_by(gcm_ctx_t *ctx, int n)
ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
}
+static size_t aesni_gcm_encrypt_avx(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ (void) Htable;
+ return (aesni_gcm_encrypt(in, out, len, key, iv, Xip));
+}
+
+#if CAN_USE_GCM_ASM >= 2
+// kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four
+// bits of a |size_t|.
+// This is from boringssl/crypto/fipsmodule/aes/gcm.cc.inc
+static const size_t kSizeTWithoutLower4Bits = (size_t)-16;
+
+/* The following CRYPTO methods are from boringssl/crypto/internal.h */
+static inline uint32_t CRYPTO_bswap4(uint32_t x) {
+ return (__builtin_bswap32(x));
+}
+
+static inline uint32_t CRYPTO_load_u32_be(const void *in) {
+ uint32_t v;
+ memcpy(&v, in, sizeof (v));
+ return (CRYPTO_bswap4(v));
+}
+
+static inline void CRYPTO_store_u32_be(void *out, uint32_t v) {
+ v = CRYPTO_bswap4(v);
+ memcpy(out, &v, sizeof (v));
+}
+
+static size_t aesni_gcm_encrypt_avx2(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ uint8_t *ivec = (uint8_t *)iv;
+ len &= kSizeTWithoutLower4Bits;
+ aes_gcm_enc_update_vaes_avx2(in, out, len, key, ivec,
+ (const uint128_t *)Htable, (uint8_t *)Xip);
+ CRYPTO_store_u32_be(&ivec[12],
+ CRYPTO_load_u32_be(&ivec[12]) + len / 16);
+ return (len);
+}
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+
/*
* Encrypt multiple blocks of data in GCM mode.
* This is done in gcm_avx_chunk_size chunks, utilizing AVX assembler routines
@@ -1091,8 +1239,15 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
size_t done = 0;
uint8_t *datap = (uint8_t *)data;
size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ;
+ aesni_gcm_encrypt_impl *encrypt_blocks =
+#if CAN_USE_GCM_ASM >= 2
+ ctx->impl == GCM_IMPL_AVX2 ?
+ aesni_gcm_encrypt_avx2 :
+#endif
+ aesni_gcm_encrypt_avx;
const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched);
uint64_t *ghash = ctx->gcm_ghash;
+ uint64_t *htable = ctx->gcm_Htable;
uint64_t *cb = ctx->gcm_cb;
uint8_t *ct_buf = NULL;
uint8_t *tmp = (uint8_t *)ctx->gcm_tmp;
@@ -1156,8 +1311,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
/* Do the bulk encryption in chunk_size blocks. */
for (; bleft >= chunk_size; bleft -= chunk_size) {
kfpu_begin();
- done = aesni_gcm_encrypt(
- datap, ct_buf, chunk_size, key, cb, ghash);
+ done = encrypt_blocks(
+ datap, ct_buf, chunk_size, key, cb, htable, ghash);
clear_fpu_regs();
kfpu_end();
@@ -1180,7 +1335,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
/* Bulk encrypt the remaining data. */
kfpu_begin();
if (bleft >= GCM_AVX_MIN_ENCRYPT_BYTES) {
- done = aesni_gcm_encrypt(datap, ct_buf, bleft, key, cb, ghash);
+ done = encrypt_blocks(datap, ct_buf, bleft, key, cb, htable,
+ ghash);
if (done == 0) {
rv = CRYPTO_FAILED;
goto out;
@@ -1293,6 +1449,29 @@ gcm_encrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
return (CRYPTO_SUCCESS);
}
+static size_t aesni_gcm_decrypt_avx(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ (void) Htable;
+ return (aesni_gcm_decrypt(in, out, len, key, iv, Xip));
+}
+
+#if CAN_USE_GCM_ASM >= 2
+static size_t aesni_gcm_decrypt_avx2(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ uint8_t *ivec = (uint8_t *)iv;
+ len &= kSizeTWithoutLower4Bits;
+ aes_gcm_dec_update_vaes_avx2(in, out, len, key, ivec,
+ (const uint128_t *)Htable, (uint8_t *)Xip);
+ CRYPTO_store_u32_be(&ivec[12],
+ CRYPTO_load_u32_be(&ivec[12]) + len / 16);
+ return (len);
+}
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+
/*
* Finalize decryption: We just have accumulated crypto text, so now we
* decrypt it here inplace.
@@ -1306,10 +1485,17 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
B_FALSE);
size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ;
+ aesni_gcm_decrypt_impl *decrypt_blocks =
+#if CAN_USE_GCM_ASM >= 2
+ ctx->impl == GCM_IMPL_AVX2 ?
+ aesni_gcm_decrypt_avx2 :
+#endif
+ aesni_gcm_decrypt_avx;
size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
uint8_t *datap = ctx->gcm_pt_buf;
const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched);
uint32_t *cb = (uint32_t *)ctx->gcm_cb;
+ uint64_t *htable = ctx->gcm_Htable;
uint64_t *ghash = ctx->gcm_ghash;
uint32_t *tmp = (uint32_t *)ctx->gcm_tmp;
int rv = CRYPTO_SUCCESS;
@@ -1322,8 +1508,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
*/
for (bleft = pt_len; bleft >= chunk_size; bleft -= chunk_size) {
kfpu_begin();
- done = aesni_gcm_decrypt(datap, datap, chunk_size,
- (const void *)key, ctx->gcm_cb, ghash);
+ done = decrypt_blocks(datap, datap, chunk_size,
+ (const void *)key, ctx->gcm_cb, htable, ghash);
clear_fpu_regs();
kfpu_end();
if (done != chunk_size) {
@@ -1334,8 +1520,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
/* Decrypt remainder, which is less than chunk size, in one go. */
kfpu_begin();
if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) {
- done = aesni_gcm_decrypt(datap, datap, bleft,
- (const void *)key, ctx->gcm_cb, ghash);
+ done = decrypt_blocks(datap, datap, bleft,
+ (const void *)key, ctx->gcm_cb, htable, ghash);
if (done == 0) {
clear_fpu_regs();
kfpu_end();
@@ -1424,13 +1610,42 @@ gcm_init_avx(gcm_ctx_t *ctx, const uint8_t *iv, size_t iv_len,
ASSERT3S(((aes_key_t *)ctx->gcm_keysched)->ops->needs_byteswap, ==,
B_FALSE);
+ size_t htab_len = 0;
+#if CAN_USE_GCM_ASM >= 2
+ if (ctx->impl == GCM_IMPL_AVX2) {
+ /*
+ * BoringSSL's API specifies uint128_t[16] for htab; but only
+ * uint128_t[12] are used.
+ * See https://github.com/google/boringssl/blob/
+ * 813840dd094f9e9c1b00a7368aa25e656554221f1/crypto/fipsmodule/
+ * modes/asm/aes-gcm-avx2-x86_64.pl#L198-L200
+ */
+ htab_len = (2 * 8 * sizeof (uint128_t));
+ } else
+#endif /* CAN_USE_GCM_ASM >= 2 */
+ {
+ htab_len = (2 * 6 * sizeof (uint128_t));
+ }
+
+ ctx->gcm_Htable = kmem_alloc(htab_len, KM_SLEEP);
+ if (ctx->gcm_Htable == NULL) {
+ return (CRYPTO_HOST_MEMORY);
+ }
+
/* Init H (encrypt zero block) and create the initial counter block. */
memset(H, 0, sizeof (ctx->gcm_H));
kfpu_begin();
aes_encrypt_intel(keysched, aes_rounds,
(const uint32_t *)H, (uint32_t *)H);
- gcm_init_htab_avx(ctx->gcm_Htable, H);
+#if CAN_USE_GCM_ASM >= 2
+ if (ctx->impl == GCM_IMPL_AVX2) {
+ gcm_init_vpclmulqdq_avx2((uint128_t *)ctx->gcm_Htable, H);
+ } else
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+ {
+ gcm_init_htab_avx(ctx->gcm_Htable, H);
+ }
if (iv_len == 12) {
memcpy(cb, iv, 12);
diff --git a/sys/contrib/openzfs/module/icp/algs/modes/modes.c b/sys/contrib/openzfs/module/icp/algs/modes/modes.c
index 343591cd9691..ef3c1806e4b6 100644
--- a/sys/contrib/openzfs/module/icp/algs/modes/modes.c
+++ b/sys/contrib/openzfs/module/icp/algs/modes/modes.c
@@ -171,7 +171,7 @@ gcm_clear_ctx(gcm_ctx_t *ctx)
explicit_memset(ctx->gcm_remainder, 0, sizeof (ctx->gcm_remainder));
explicit_memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H));
#if defined(CAN_USE_GCM_ASM)
- if (ctx->gcm_use_avx == B_TRUE) {
+ if (ctx->impl != GCM_IMPL_GENERIC) {
ASSERT3P(ctx->gcm_Htable, !=, NULL);
explicit_memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
diff --git a/sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c b/sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c
index 6d3bcca9f995..dcb0a391dda4 100644
--- a/sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c
+++ b/sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c
@@ -38,11 +38,14 @@
kfpu_begin(); E(s, d, b); kfpu_end(); \
}
+#if defined(__x86_64) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__PPC64__)
/* some implementation is always okay */
static inline boolean_t sha2_is_supported(void)
{
return (B_TRUE);
}
+#endif
#if defined(__x86_64)
diff --git a/sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c b/sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c
index 2efd9fcf4c99..a85a71a83df4 100644
--- a/sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c
+++ b/sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c
@@ -38,11 +38,14 @@
kfpu_begin(); E(s, d, b); kfpu_end(); \
}
+#if defined(__x86_64) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__arm__) || defined(__PPC64__)
/* some implementation is always okay */
static inline boolean_t sha2_is_supported(void)
{
return (B_TRUE);
}
+#endif
#if defined(__x86_64)
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl
new file mode 100644
index 000000000000..04c03a37e0cb
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl
@@ -0,0 +1,253 @@
+BoringSSL is a fork of OpenSSL. As such, large parts of it fall under OpenSSL
+licensing. Files that are completely new have a Google copyright and an ISC
+license. This license is reproduced at the bottom of this file.
+
+Contributors to BoringSSL are required to follow the CLA rules for Chromium:
+https://cla.developers.google.com/clas
+
+Files in third_party/ have their own licenses, as described therein. The MIT
+license, for third_party/fiat, which, unlike other third_party directories, is
+compiled into non-test libraries, is included below.
+
+The OpenSSL toolkit stays under a dual license, i.e. both the conditions of the
+OpenSSL License and the original SSLeay license apply to the toolkit. See below
+for the actual license texts. Actually both licenses are BSD-style Open Source
+licenses. In case of any license issues related to OpenSSL please contact
+openssl-core@openssl.org.
+
+The following are Google-internal bug numbers where explicit permission from
+some authors is recorded for use of their work. (This is purely for our own
+record keeping.)
+ 27287199
+ 27287880
+ 27287883
+ 263291445
+
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+
+ISC license used for completely new code in BoringSSL:
+
+/* Copyright 2015 The BoringSSL Authors
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+
+The code in third_party/fiat carries the MIT license:
+
+Copyright (c) 2015-2016 the fiat-crypto authors (see
+https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS).
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Licenses for support code
+-------------------------
+
+Parts of the TLS test suite are under the Go license. This code is not included
+in BoringSSL (i.e. libcrypto and libssl) when compiled, however, so
+distributing code linked against BoringSSL does not trigger this license:
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+BoringSSL uses the Chromium test infrastructure to run a continuous build,
+trybots etc. The scripts which manage this, and the script for generating build
+metadata, are under the Chromium license. Distributing code linked against
+BoringSSL does not trigger this license.
+
+Copyright 2015 The Chromium Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip
new file mode 100644
index 000000000000..f63a67a4d2ae
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip
@@ -0,0 +1 @@
+PORTIONS OF AES GCM and GHASH FUNCTIONALITY
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S
new file mode 100644
index 000000000000..3d1b045127e2
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S
@@ -0,0 +1,1323 @@
+// SPDX-License-Identifier: Apache-2.0
+// This file is generated from a similarly-named Perl script in the BoringSSL
+// source tree. Do not edit by hand.
+
+#if defined(__x86_64__) && defined(HAVE_AVX) && \
+ defined(HAVE_VAES) && defined(HAVE_VPCLMULQDQ)
+
+#define _ASM
+#include <sys/asm_linkage.h>
+
+/* Windows userland links with OpenSSL */
+#if !defined (_WIN32) || defined (_KERNEL)
+
+.section .rodata
+.balign 16
+
+
+.Lbswap_mask:
+.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
+
+
+
+
+
+
+
+
+.Lgfpoly:
+.quad 1, 0xc200000000000000
+
+
+.Lgfpoly_and_internal_carrybit:
+.quad 1, 0xc200000000000001
+
+.balign 32
+
+.Lctr_pattern:
+.quad 0, 0
+.quad 1, 0
+.Linc_2blocks:
+.quad 2, 0
+.quad 2, 0
+
+ENTRY_ALIGN(gcm_init_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+
+
+ vmovdqu (%rsi),%xmm3
+ // KCF/ICP stores H in network byte order with the hi qword first
+ // so we need to swap all bytes, not the 2 qwords.
+ vmovdqu .Lbswap_mask(%rip),%xmm4
+ vpshufb %xmm4,%xmm3,%xmm3
+
+
+
+
+
+ vpshufd $0xd3,%xmm3,%xmm0
+ vpsrad $31,%xmm0,%xmm0
+ vpaddq %xmm3,%xmm3,%xmm3
+ vpand .Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm6
+
+
+ vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
+ vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
+ vpclmulqdq $0x01,%xmm0,%xmm6,%xmm1
+ vpshufd $0x4e,%xmm0,%xmm0
+ vpxor %xmm0,%xmm1,%xmm1
+ vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm5,%xmm5
+ vpxor %xmm0,%xmm5,%xmm5
+
+
+
+ vinserti128 $1,%xmm3,%ymm5,%ymm3
+ vinserti128 $1,%xmm5,%ymm5,%ymm5
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+
+ vmovdqu %ymm3,96(%rdi)
+ vmovdqu %ymm4,64(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128+32(%rdi)
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm4,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm4,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm4,%ymm3
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm3,%ymm3
+ vpxor %ymm0,%ymm3,%ymm3
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu %ymm3,32(%rdi)
+ vmovdqu %ymm4,0(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128(%rdi)
+
+ vzeroupper
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_init_vpclmulqdq_avx2)
+ENTRY_ALIGN(gcm_gmult_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+ vmovdqu (%rdi),%xmm0
+ vmovdqu .Lbswap_mask(%rip),%xmm1
+ vmovdqu 128-16(%rsi),%xmm2
+ vmovdqu .Lgfpoly(%rip),%xmm3
+ vpshufb %xmm1,%xmm0,%xmm0
+
+ vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4
+ vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5
+ vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6
+ vpshufd $0x4e,%xmm4,%xmm4
+ vpxor %xmm4,%xmm5,%xmm5
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0
+ vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4
+ vpshufd $0x4e,%xmm5,%xmm5
+ vpxor %xmm5,%xmm0,%xmm0
+ vpxor %xmm4,%xmm0,%xmm0
+
+
+ vpshufb %xmm1,%xmm0,%xmm0
+ vmovdqu %xmm0,(%rdi)
+
+
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_gmult_vpclmulqdq_avx2)
+ENTRY_ALIGN(gcm_ghash_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+
+
+
+ vmovdqu .Lbswap_mask(%rip),%xmm6
+ vmovdqu .Lgfpoly(%rip),%xmm7
+
+
+ vmovdqu (%rdi),%xmm5
+ vpshufb %xmm6,%xmm5,%xmm5
+
+
+ cmpq $32,%rcx
+ jb .Lghash_lastblock
+
+
+
+ vinserti128 $1,%xmm6,%ymm6,%ymm6
+ vinserti128 $1,%xmm7,%ymm7,%ymm7
+
+ cmpq $127,%rcx
+ jbe .Lghash_loop_1x
+
+
+ vmovdqu 128(%rsi),%ymm8
+ vmovdqu 128+32(%rsi),%ymm9
+.Lghash_loop_4x:
+
+ vmovdqu 0(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 0(%rsi),%ymm2
+ vpxor %ymm5,%ymm1,%ymm1
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm8,%ymm0,%ymm4
+
+ vmovdqu 32(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 32(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm8,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu 64(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 64(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+ vmovdqu 96(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 96(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm5,%ymm4,%ymm4
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm3,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm3,%ymm3
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpclmulqdq $0x01,%ymm4,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm4,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpxor %ymm0,%ymm5,%ymm5
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+
+ subq $-128,%rdx
+ addq $-128,%rcx
+ cmpq $127,%rcx
+ ja .Lghash_loop_4x
+
+
+ cmpq $32,%rcx
+ jb .Lghash_loop_1x_done
+.Lghash_loop_1x:
+ vmovdqu (%rdx),%ymm0
+ vpshufb %ymm6,%ymm0,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vmovdqu 128-32(%rsi),%ymm0
+ vpclmulqdq $0x00,%ymm0,%ymm5,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2
+ vpclmulqdq $0x10,%ymm0,%ymm5,%ymm3
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x01,%ymm1,%ymm7,%ymm3
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm2,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x11,%ymm0,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm7,%ymm1
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpxor %ymm1,%ymm5,%ymm5
+
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ addq $32,%rdx
+ subq $32,%rcx
+ cmpq $32,%rcx
+ jae .Lghash_loop_1x
+.Lghash_loop_1x_done:
+
+
+.Lghash_lastblock:
+ testq %rcx,%rcx
+ jz .Lghash_done
+ vmovdqu (%rdx),%xmm0
+ vpshufb %xmm6,%xmm0,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ vmovdqu 128-16(%rsi),%xmm0
+ vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
+ vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
+ vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm2,%xmm2
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
+ vpshufd $0x4e,%xmm2,%xmm2
+ vpxor %xmm2,%xmm5,%xmm5
+ vpxor %xmm1,%xmm5,%xmm5
+
+
+.Lghash_done:
+
+ vpshufb %xmm6,%xmm5,%xmm5
+ vmovdqu %xmm5,(%rdi)
+
+ vzeroupper
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_ghash_vpclmulqdq_avx2)
+ENTRY_ALIGN(aes_gcm_enc_update_vaes_avx2, 32)
+.cfi_startproc
+
+ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+#ifdef BORINGSSL_DISPATCH_TEST
+.extern BORINGSSL_function_hit
+.hidden BORINGSSL_function_hit
+ movb $1,BORINGSSL_function_hit+6(%rip)
+#endif
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 504(%rcx),%r10d // ICP has a larger offset for rounds.
+ leal -24(,%r10,4),%r10d // ICP uses 10,12,14 not 9,11,13 for rounds.
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func1
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_first_4_vecs__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_first_4_vecs__func1
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ jbe .Lghash_last_ciphertext_4x__func1
+.balign 16
+.Lcrypt_loop_4x__func1:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func1
+ je .Laes192__func1
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func1:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func1:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+ subq $-128,%rsi
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func1
+.Lghash_last_ciphertext_4x__func1:
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+ subq $-128,%rsi
+.Lcrypt_loop_4x_done__func1:
+
+ testq %rdx,%rdx
+ jz .Ldone__func1
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func1
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %ymm0,%ymm13,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func1
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func1:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func1
+ je .Lxor_two_blocks__func1
+
+.Lxor_three_blocks__func1:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %xmm0,%xmm13,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_two_blocks__func1:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_one_block__func1:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm12,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func1:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func1:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func1:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ RET
+
+.cfi_endproc
+SET_SIZE(aes_gcm_enc_update_vaes_avx2)
+ENTRY_ALIGN(aes_gcm_dec_update_vaes_avx2, 32)
+.cfi_startproc
+
+ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 504(%rcx),%r10d // ICP has a larger offset for rounds.
+ leal -24(,%r10,4),%r10d // ICP uses 10,12,14 not 9,11,13 for rounds.
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func2
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+.balign 16
+.Lcrypt_loop_4x__func2:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func2
+ je .Laes192__func2
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func2:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func2:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ subq $-128,%rsi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func2
+.Lcrypt_loop_4x_done__func2:
+
+ testq %rdx,%rdx
+ jz .Ldone__func2
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func2
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %ymm0,%ymm3,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func2
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func2:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func2
+ je .Lxor_two_blocks__func2
+
+.Lxor_three_blocks__func2:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %xmm0,%xmm3,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_two_blocks__func2:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_one_block__func2:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm2,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func2:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func2:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func2:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ RET
+
+.cfi_endproc
+SET_SIZE(aes_gcm_dec_update_vaes_avx2)
+
+#endif /* !_WIN32 || _KERNEL */
+
+/* Mark the stack non-executable. */
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+#endif /* defined(__x86_64__) && defined(HAVE_AVX) && defined(HAVE_AES) ... */
diff --git a/sys/contrib/openzfs/module/icp/core/kcf_sched.c b/sys/contrib/openzfs/module/icp/core/kcf_sched.c
index 759f0d81d521..75e1052a4ed4 100644
--- a/sys/contrib/openzfs/module/icp/core/kcf_sched.c
+++ b/sys/contrib/openzfs/module/icp/core/kcf_sched.c
@@ -124,7 +124,7 @@ kcf_context_cache_destructor(void *buf, void *cdrarg)
(void) cdrarg;
kcf_context_t *kctx = (kcf_context_t *)buf;
- ASSERT(kctx->kc_refcnt == 0);
+ ASSERT0(kctx->kc_refcnt);
}
void
diff --git a/sys/contrib/openzfs/module/icp/include/modes/modes.h b/sys/contrib/openzfs/module/icp/include/modes/modes.h
index ca734cf4f045..de11d9eafafb 100644
--- a/sys/contrib/openzfs/module/icp/include/modes/modes.h
+++ b/sys/contrib/openzfs/module/icp/include/modes/modes.h
@@ -42,7 +42,7 @@ extern "C" {
*/
#if defined(__x86_64__) && defined(HAVE_AVX) && \
defined(HAVE_AES) && defined(HAVE_PCLMULQDQ)
-#define CAN_USE_GCM_ASM
+#define CAN_USE_GCM_ASM (HAVE_VAES && HAVE_VPCLMULQDQ ? 2 : 1)
extern boolean_t gcm_avx_can_use_movbe;
#endif
@@ -129,6 +129,15 @@ typedef struct ccm_ctx {
#define ccm_copy_to ccm_common.cc_copy_to
#define ccm_flags ccm_common.cc_flags
+#ifdef CAN_USE_GCM_ASM
+typedef enum gcm_impl {
+ GCM_IMPL_GENERIC = 0,
+ GCM_IMPL_AVX,
+ GCM_IMPL_AVX2,
+ GCM_IMPL_MAX,
+} gcm_impl;
+#endif
+
/*
* gcm_tag_len: Length of authentication tag.
*
@@ -174,7 +183,7 @@ typedef struct gcm_ctx {
uint64_t gcm_len_a_len_c[2];
uint8_t *gcm_pt_buf;
#ifdef CAN_USE_GCM_ASM
- boolean_t gcm_use_avx;
+ enum gcm_impl impl;
#endif
} gcm_ctx_t;
diff --git a/sys/contrib/openzfs/module/icp/io/aes.c b/sys/contrib/openzfs/module/icp/io/aes.c
index ba703efa71fc..ca586eaf97ef 100644
--- a/sys/contrib/openzfs/module/icp/io/aes.c
+++ b/sys/contrib/openzfs/module/icp/io/aes.c
@@ -236,16 +236,16 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
} else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) {
ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
} else {
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
}
if (plaintext != ciphertext) {
@@ -337,7 +337,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
@@ -349,7 +349,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
diff --git a/sys/contrib/openzfs/module/nvpair/nvpair.c b/sys/contrib/openzfs/module/nvpair/nvpair.c
index 811cfc87d7a4..eb8c14b4a783 100644
--- a/sys/contrib/openzfs/module/nvpair/nvpair.c
+++ b/sys/contrib/openzfs/module/nvpair/nvpair.c
@@ -265,7 +265,7 @@ nv_priv_alloc_embedded(nvpriv_t *priv)
static int
nvt_tab_alloc(nvpriv_t *priv, uint64_t buckets)
{
- ASSERT3P(priv->nvp_hashtable, ==, NULL);
+ ASSERT0P(priv->nvp_hashtable);
ASSERT0(priv->nvp_nbuckets);
ASSERT0(priv->nvp_nentries);
@@ -334,7 +334,7 @@ nvt_lookup_name_type(const nvlist_t *nvl, const char *name, data_type_t type)
i_nvp_t **tab = priv->nvp_hashtable;
if (tab == NULL) {
- ASSERT3P(priv->nvp_list, ==, NULL);
+ ASSERT0P(priv->nvp_list);
ASSERT0(priv->nvp_nbuckets);
ASSERT0(priv->nvp_nentries);
return (NULL);
@@ -540,7 +540,7 @@ nvt_add_nvpair(nvlist_t *nvl, nvpair_t *nvp)
/* insert link at the beginning of the bucket */
i_nvp_t *new_entry = NVPAIR2I_NVP(nvp);
- ASSERT3P(new_entry->nvi_hashtable_next, ==, NULL);
+ ASSERT0P(new_entry->nvi_hashtable_next);
new_entry->nvi_hashtable_next = bucket;
// cppcheck-suppress nullPointerRedundantCheck
tab[index] = new_entry;
diff --git a/sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c b/sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c
index 6d198fad5203..ae6e36d988c2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c
+++ b/sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c
@@ -160,7 +160,7 @@ kmem_cache_create(const char *name, size_t bufsize, size_t align,
{
kmem_cache_t *cache;
- ASSERT3P(vmp, ==, NULL);
+ ASSERT0P(vmp);
cache = kmem_alloc(sizeof (*cache), KM_SLEEP);
strlcpy(cache->kc_name, name, sizeof (cache->kc_name));
diff --git a/sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c b/sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c
index 9da633c2b1be..3c2d39b20c09 100644
--- a/sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c
+++ b/sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c
@@ -256,7 +256,7 @@ sysevent_worker(void *arg __unused)
* free `ze`, so just inline the free() here -- events have already
* been drained.
*/
- VERIFY3P(ze->ze_zevent, ==, NULL);
+ VERIFY0P(ze->ze_zevent);
kmem_free(ze, sizeof (zfs_zevent_t));
kthread_exit();
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
index fbf67f6a14a8..4bf487cdc469 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
@@ -507,7 +507,7 @@ abd_iter_at_end(struct abd_iter *aiter)
void
abd_iter_advance(struct abd_iter *aiter, size_t amount)
{
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to advance to, so do nothing */
@@ -526,7 +526,7 @@ abd_iter_map(struct abd_iter *aiter)
{
void *paddr;
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to iterate over, so do nothing */
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
index 364bbfc60abd..26cc7981bfcd 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
@@ -156,7 +156,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (dbp[0]->db_offset != 0 || numbufs > 1) {
for (i = 0; i < numbufs; i++) {
ASSERT(ISP2(dbp[i]->db_size));
- ASSERT3U((dbp[i]->db_offset % dbp[i]->db_size), ==, 0);
+ ASSERT0((dbp[i]->db_offset % dbp[i]->db_size));
ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size);
}
}
@@ -175,7 +175,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_sunbusy(m);
break;
}
- ASSERT3U(m->dirty, ==, 0);
+ ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m));
ASSERT3U(db->db_size, >, PAGE_SIZE);
@@ -201,7 +201,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (m != bogus_page) {
vm_page_assert_xbusied(m);
ASSERT(vm_page_none_valid(m));
- ASSERT3U(m->dirty, ==, 0);
+ ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m));
va = zfs_map_page(m, &sf);
}
@@ -295,7 +295,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_sunbusy(m);
break;
}
- ASSERT3U(m->dirty, ==, 0);
+ ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m));
ASSERT3U(db->db_size, >, PAGE_SIZE);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c b/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
index c114db14a916..b218c0da8125 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
@@ -112,7 +112,6 @@ static int zfs__fini(void);
static void zfs_shutdown(void *, int);
static eventhandler_tag zfs_shutdown_event_tag;
-static eventhandler_tag zfs_mountroot_event_tag;
#define ZFS_MIN_KSTACK_PAGES 4
@@ -311,9 +310,6 @@ zfs_modevent(module_t mod, int type, void *unused __unused)
zfs_shutdown_event_tag = EVENTHANDLER_REGISTER(
shutdown_post_sync, zfs_shutdown, NULL,
SHUTDOWN_PRI_FIRST);
- zfs_mountroot_event_tag = EVENTHANDLER_REGISTER(
- mountroot, spa_boot_init, NULL,
- SI_ORDER_ANY);
}
return (err);
case MOD_UNLOAD:
@@ -322,9 +318,6 @@ zfs_modevent(module_t mod, int type, void *unused __unused)
if (zfs_shutdown_event_tag != NULL)
EVENTHANDLER_DEREGISTER(shutdown_post_sync,
zfs_shutdown_event_tag);
- if (zfs_mountroot_event_tag != NULL)
- EVENTHANDLER_DEREGISTER(mountroot,
- zfs_mountroot_event_tag);
}
return (err);
case MOD_SHUTDOWN:
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
index ace2360c032d..ebc2c0eeb6d2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
@@ -163,6 +163,13 @@ param_set_arc_int(SYSCTL_HANDLER_ARGS)
return (0);
}
+static void
+warn_deprecated_sysctl(const char *old, const char *new)
+{
+ printf("WARNING: sysctl vfs.zfs.%s is deprecated. Use vfs.zfs.%s instead.\n",
+ old, new);
+}
+
int
param_set_arc_max(SYSCTL_HANDLER_ARGS)
{
@@ -185,12 +192,15 @@ param_set_arc_max(SYSCTL_HANDLER_ARGS)
if (val != 0)
zfs_arc_max = arc_c_max;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("arc_max", "arc.max");
+
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- NULL, 0, param_set_arc_max, "LU",
+ NULL, 1, param_set_arc_max, "LU",
"Maximum ARC size in bytes (LEGACY)");
int
@@ -214,12 +224,15 @@ param_set_arc_min(SYSCTL_HANDLER_ARGS)
if (val != 0)
zfs_arc_min = arc_c_min;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("arc_min", "arc.min");
+
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- NULL, 0, param_set_arc_min, "LU",
+ NULL, 1, param_set_arc_min, "LU",
"Minimum ARC size in bytes (LEGACY)");
extern uint_t zfs_arc_free_target;
@@ -242,6 +255,9 @@ param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
zfs_arc_free_target = val;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("arc_free_target", "arc.free_target");
+
return (0);
}
@@ -251,7 +267,7 @@ param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
*/
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
- NULL, 0, param_set_arc_free_target, "IU",
+ NULL, 1, param_set_arc_free_target, "IU",
"Desired number of free pages below which ARC triggers reclaim"
" (LEGACY)");
@@ -270,12 +286,15 @@ param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
arc_no_grow_shift = val;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("arc_no_grow_shift", "arc.no_grow_shift");
+
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- NULL, 0, param_set_arc_no_grow_shift, "I",
+ NULL, 1, param_set_arc_no_grow_shift, "I",
"log2(fraction of ARC which must be free to allow growing) (LEGACY)");
extern uint64_t l2arc_write_max;
@@ -746,12 +765,15 @@ param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
zfs_vdev_min_auto_ashift = val;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("min_auto_ashift",
+ "vdev.min_auto_ashift");
+
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
- CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- &zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
+ CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 1,
param_set_min_auto_ashift, "IU",
"Min ashift used when creating new top-level vdev. (LEGACY)");
@@ -771,12 +793,15 @@ param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
zfs_vdev_max_auto_ashift = val;
+ if (arg2 != 0)
+ warn_deprecated_sysctl("max_auto_ashift",
+ "vdev.max_auto_ashift");
+
return (0);
}
SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
- CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- &zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift),
+ CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 1,
param_set_max_auto_ashift, "IU",
"Max ashift used when optimizing for logical -> physical sector size on"
" new top-level vdevs. (LEGACY)");
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
index c8ab7cc7cf8e..bbd1dafc69be 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
@@ -1236,7 +1236,7 @@ vdev_geom_io_done(zio_t *zio)
struct bio *bp = zio->io_bio;
if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
- ASSERT3P(bp, ==, NULL);
+ ASSERT0P(bp);
return;
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
index 5c5adc6cc12b..cb5787269db2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
@@ -1175,7 +1175,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
int count = 0;
zfs_acl_phys_t acl_phys;
- if (zp->z_zfsvfs->z_replay == B_FALSE) {
+ if (ZTOV(zp) != NULL && zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
@@ -1632,7 +1632,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
} else
- ASSERT3P(dzp->z_vnode, ==, NULL);
+ ASSERT0P(dzp->z_vnode);
memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
@@ -2014,7 +2014,7 @@ top:
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(error);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
+ ASSERT0P(zp->z_acl_cached);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index 8d0ff9b25e30..4de48e013ec4 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -357,7 +357,7 @@ zfsctl_create(zfsvfs_t *zfsvfs)
vnode_t *rvp;
uint64_t crtime[2];
- ASSERT3P(zfsvfs->z_ctldir, ==, NULL);
+ ASSERT0P(zfsvfs->z_ctldir);
snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT,
ZFSCTL_INO_SNAPDIR);
@@ -494,7 +494,7 @@ zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
vap->va_uid = 0;
vap->va_gid = 0;
- vap->va_rdev = 0;
+ vap->va_rdev = NODEV;
/*
* We are a purely virtual object, so we have no
* blocksize or allocated blocks.
@@ -688,6 +688,8 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
* count to return is 0.
*/
if (zfs_uio_offset(&uio) == 3 * sizeof (entry)) {
+ if (eofp != NULL)
+ *eofp = 1;
return (0);
}
@@ -1367,7 +1369,7 @@ zfsctl_snapshot_unmount(const char *snapname, int flags __unused)
int err = getzfsvfs(snapname, &zfsvfs);
if (err != 0) {
- ASSERT3P(zfsvfs, ==, NULL);
+ ASSERT0P(zfsvfs);
return (0);
}
vfsp = zfsvfs->z_vfs;
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
index 191df832d726..75ba2ea0cb9e 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
@@ -273,7 +273,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT(zp->z_unlinked);
- ASSERT3U(zp->z_links, ==, 0);
+ ASSERT0(zp->z_links);
VERIFY0(zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
@@ -437,7 +437,7 @@ zfs_rmnode(znode_t *zp)
uint64_t count;
int error;
- ASSERT3U(zp->z_links, ==, 0);
+ ASSERT0(zp->z_links);
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
index 21e5f7938f9f..ca13569a1235 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
@@ -164,8 +164,9 @@ zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid)
int
zfs_file_pwrite(zfs_file_t *fp, const void *buf, size_t count, loff_t off,
- ssize_t *resid)
+ uint8_t ashift, ssize_t *resid)
{
+ (void) ashift;
return (zfs_file_write_impl(fp, buf, count, &off, resid));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index 0456552ed07e..79b784288911 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -455,8 +455,13 @@ zfs_sync(vfs_t *vfsp, int waitfor)
return (0);
}
- if (zfsvfs->z_log != NULL)
- zil_commit(zfsvfs->z_log, 0);
+ if (zfsvfs->z_log != NULL) {
+ error = zil_commit(zfsvfs->z_log, 0);
+ if (error != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
+ }
zfs_exit(zfsvfs, FTAG);
} else {
@@ -1091,7 +1096,7 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
if (mounting) {
boolean_t readonly;
- ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
+ ASSERT0P(zfsvfs->z_kstat.dk_kstats);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index c4270d8b5d5c..411225786089 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -61,6 +61,7 @@
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
+#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
@@ -388,7 +389,9 @@ zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
error = vn_lock(vp, LK_EXCLUSIVE);
if (error)
return (error);
+ vn_seqc_write_begin(vp);
error = zfs_ioctl_setxattr(vp, fsx, cred);
+ vn_seqc_write_end(vp);
VOP_UNLOCK(vp);
return (error);
}
@@ -1101,7 +1104,7 @@ zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
zfs_exit(zfsvfs, FTAG);
return (error);
}
- ASSERT3P(zp, ==, NULL);
+ ASSERT0P(zp);
/*
* Create a new file object and update the directory
@@ -1193,8 +1196,8 @@ out:
*zpp = zp;
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1323,9 +1326,8 @@ out:
if (xzp)
vrele(ZTOV(xzp));
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1482,7 +1484,7 @@ zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
zfs_exit(zfsvfs, FTAG);
return (error);
}
- ASSERT3P(zp, ==, NULL);
+ ASSERT0P(zp);
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr,
mnt_ns))) {
@@ -1556,8 +1558,8 @@ out:
getnewvnode_drop_reserve();
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1637,8 +1639,8 @@ zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
if (zfsvfs->z_use_namecache)
cache_vop_rmdir(dvp, vp);
out:
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1736,7 +1738,7 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
/*
* Quit if directory has been removed (posix)
*/
- if ((*eofp = zp->z_unlinked) != 0) {
+ if ((*eofp = (zp->z_unlinked != 0)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
@@ -2014,7 +2016,7 @@ zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
if (vp->v_type == VBLK || vp->v_type == VCHR)
vap->va_rdev = zfs_cmpldev(rdev);
else
- vap->va_rdev = 0;
+ vap->va_rdev = NODEV;
vap->va_gen = zp->z_gen;
vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
vap->va_filerev = zp->z_seq;
@@ -2204,6 +2206,7 @@ zfs_setattr_dir(znode_t *dzp)
if (err)
break;
+ vn_seqc_write_begin(ZTOV(zp));
mutex_enter(&dzp->z_lock);
if (zp->z_uid != dzp->z_uid) {
@@ -2253,6 +2256,7 @@ sa_add_projid_err:
dmu_tx_abort(tx);
}
tx = NULL;
+ vn_seqc_write_end(ZTOV(zp));
if (err != 0 && err != ENOENT)
break;
@@ -3009,8 +3013,8 @@ out:
}
out2:
- if (os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (err == 0 && os->os_sync == ZFS_SYNC_ALWAYS)
+ err = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (err);
@@ -3539,7 +3543,7 @@ out_seq:
out:
if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -3731,7 +3735,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
*zpp = zp;
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ error = zil_commit(zilog, 0);
}
zfs_exit(zfsvfs, FTAG);
@@ -3921,8 +3925,8 @@ zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
vnevent_link(ZTOV(szp), ct);
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -4313,7 +4317,7 @@ typedef struct {
} putpage_commit_arg_t;
static void
-zfs_putpage_commit_cb(void *arg)
+zfs_putpage_commit_cb(void *arg, int err)
{
putpage_commit_arg_t *pca = arg;
vm_object_t object = pca->pca_pages[0]->object;
@@ -4322,7 +4326,17 @@ zfs_putpage_commit_cb(void *arg)
for (uint_t i = 0; i < pca->pca_npages; i++) {
vm_page_t pp = pca->pca_pages[i];
- vm_page_undirty(pp);
+
+ if (err == 0) {
+ /*
+ * Writeback succeeded, so undirty the page. If it
+ * fails, we leave it in the same state it was. That's
+ * most likely dirty, so it will get tried again some
+ * other time.
+ */
+ vm_page_undirty(pp);
+ }
+
vm_page_sunbusy(pp);
}
@@ -4510,8 +4524,13 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
out:
zfs_rangelock_exit(lr);
- if (commit)
- zil_commit(zfsvfs->z_log, zp->z_id);
+ if (commit) {
+ err = zil_commit(zfsvfs->z_log, zp->z_id);
+ if (err != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (err);
+ }
+ }
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, len);
@@ -5223,8 +5242,32 @@ struct vop_fsync_args {
static int
zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
+ vnode_t *vp = ap->a_vp;
+ int err = 0;
+
+ /*
+ * Push any dirty mmap()'d data out to the DMU and ZIL, ready for
+ * zil_commit() to be called in zfs_fsync().
+ */
+ if (vm_object_mightbedirty(vp->v_object)) {
+ zfs_vmobject_wlock(vp->v_object);
+ if (!vm_object_page_clean(vp->v_object, 0, 0, 0))
+ err = SET_ERROR(EIO);
+ zfs_vmobject_wunlock(vp->v_object);
+ if (err) {
+ /*
+ * Unclear what state things are in. zfs_putpages()
+ * will ensure the pages remain dirty if they haven't
+ * been written down to the DMU, but because there may
+ * be nothing logged, we can't assume that zfs_sync()
+ * -> zil_commit() will give us a useful error. It's
+ * safest if we just error out here.
+ */
+ return (err);
+ }
+ }
- return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
+ return (zfs_fsync(VTOZ(vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
@@ -5689,6 +5732,9 @@ zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
{
ulong_t val;
int error;
+#ifdef _PC_CLONE_BLKSIZE
+ zfsvfs_t *zfsvfs;
+#endif
error = zfs_pathconf(ap->a_vp, ap->a_name, &val,
curthread->td_ucred, NULL);
@@ -5735,6 +5781,21 @@ zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
*ap->a_retval = 1;
return (0);
#endif
+#ifdef _PC_CLONE_BLKSIZE
+ case _PC_CLONE_BLKSIZE:
+ zfsvfs = (zfsvfs_t *)ap->a_vp->v_mount->mnt_data;
+ if (zfs_bclone_enabled &&
+ spa_feature_is_enabled(dmu_objset_spa(zfsvfs->z_os),
+ SPA_FEATURE_BLOCK_CLONING))
+ *ap->a_retval = dsl_dataset_feature_is_active(
+ zfsvfs->z_os->os_dsl_dataset,
+ SPA_FEATURE_LARGE_BLOCKS) ?
+ SPA_MAXBLOCKSIZE :
+ SPA_OLD_MAXBLOCKSIZE;
+ else
+ *ap->a_retval = 0;
+ return (0);
+#endif
default:
return (vop_stdpathconf(ap));
}
@@ -6773,9 +6834,11 @@ zfs_deallocate(struct vop_deallocate_args *ap)
if (error == 0) {
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS ||
(ap->a_ioflag & IO_SYNC) != 0)
- zil_commit(zilog, zp->z_id);
- *ap->a_offset = off + len;
- *ap->a_len = 0;
+ error = zil_commit(zilog, zp->z_id);
+ if (error == 0) {
+ *ap->a_offset = off + len;
+ *ap->a_len = 0;
+ }
}
zfs_exit(zfsvfs, FTAG);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
index 775f54a65f7d..649022ab5bcb 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
@@ -161,15 +161,15 @@ zfs_znode_cache_destructor(void *buf, void *arg)
znode_t *zp = buf;
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
- ASSERT3P(zp->z_vnode, ==, NULL);
+ ASSERT0P(zp->z_vnode);
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
- ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT0P(zp->z_acl_cached);
+ ASSERT0P(zp->z_xattr_cached);
}
@@ -195,7 +195,7 @@ zfs_znode_init(void)
/*
* Initialize zcache
*/
- ASSERT3P(znode_uma_zone, ==, NULL);
+ ASSERT0P(znode_uma_zone);
znode_uma_zone = uma_zcreate("zfs_znode_cache",
sizeof (znode_t), zfs_znode_cache_constructor_smr,
zfs_znode_cache_destructor_smr, NULL, NULL, 0, 0);
@@ -224,7 +224,7 @@ zfs_znode_init(void)
/*
* Initialize zcache
*/
- ASSERT3P(znode_cache, ==, NULL);
+ ASSERT0P(znode_cache);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_RECLAIMABLE);
@@ -353,8 +353,8 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
- ASSERT3P(zp->z_sa_hdl, ==, NULL);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
+ ASSERT0P(zp->z_sa_hdl);
+ ASSERT0P(zp->z_acl_cached);
if (sa_hdl == NULL) {
VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
@@ -817,6 +817,10 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
(*zpp)->z_dnodesize = dnodesize;
(*zpp)->z_projid = projid;
+ vnode_t *vp = ZTOV(*zpp);
+ if (!(flag & IS_ROOT_NODE))
+ vn_seqc_write_begin(vp);
+
if (vap->va_mask & AT_XVATTR)
zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
@@ -825,7 +829,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
if (!(flag & IS_ROOT_NODE)) {
- vnode_t *vp = ZTOV(*zpp);
+ vn_seqc_write_end(vp);
vp->v_vflag |= VV_FORCEINSMQ;
int err = insmntque(vp, zfsvfs->z_vfs);
vp->v_vflag &= ~VV_FORCEINSMQ;
@@ -1127,7 +1131,7 @@ zfs_rezget(znode_t *zp)
}
rw_exit(&zp->z_xattr_lock);
- ASSERT3P(zp->z_sa_hdl, ==, NULL);
+ ASSERT0P(zp->z_sa_hdl);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
@@ -1298,7 +1302,7 @@ zfs_znode_free(znode_t *zp)
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
char *symlink;
- ASSERT3P(zp->z_sa_hdl, ==, NULL);
+ ASSERT0P(zp->z_sa_hdl);
zp->z_vnode = NULL;
mutex_enter(&zfsvfs->z_znodes_lock);
POINTER_INVALIDATE(&zp->z_zfsvfs);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 72a7c4ea082a..0dd2ecd7fd8d 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -31,7 +31,7 @@
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
@@ -196,7 +196,6 @@ DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
static int zvol_geom_open(struct g_provider *pp, int flag, int count);
static int zvol_geom_close(struct g_provider *pp, int flag, int count);
-static void zvol_geom_destroy(zvol_state_t *zv);
static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
static void zvol_geom_bio_start(struct bio *bp);
static int zvol_geom_bio_getattr(struct bio *bp);
@@ -226,25 +225,14 @@ zvol_geom_open(struct g_provider *pp, int flag, int count)
}
retry:
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- /*
- * Obtain a copy of private under zvol_state_lock to make sure either
- * the result of zvol free code setting private to NULL is observed,
- * or the zv is protected from being freed because of the positive
- * zv_open_count.
- */
- zv = pp->private;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- err = SET_ERROR(ENXIO);
- goto out_locked;
- }
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
mutex_enter(&zv->zv_state_lock);
if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) {
- rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
@@ -257,8 +245,24 @@ retry:
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
+
+ /*
+ * Removal may happen while the locks are down, so
+ * we can't trust zv any longer; we have to start over.
+ */
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
+
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (zv->zv_zso->zso_dying ||
+ zv->zv_flags & ZVOL_REMOVING) {
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
+ }
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -266,7 +270,6 @@ retry:
}
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -294,7 +297,7 @@ retry:
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
- goto out_zv_locked;
+ goto out_locked;
pp->mediasize = zv->zv_volsize;
pp->stripeoffset = 0;
pp->stripesize = zv->zv_volblocksize;
@@ -329,9 +332,8 @@ out_opened:
zvol_last_close(zv);
wakeup(zv);
}
-out_zv_locked:
- mutex_exit(&zv->zv_state_lock);
out_locked:
+ mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
@@ -345,12 +347,9 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
boolean_t drop_suspend = B_TRUE;
int new_open_count;
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- zv = pp->private;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
return (SET_ERROR(ENXIO));
- }
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
@@ -377,6 +376,15 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_geom_open(), we don't check if
+ * removal started here, because we might be one of the
+ * openers that needs to be thrown out! If we're the
+ * last, we need to call zvol_last_close() below to
+ * finish cleanup. So, no special treatment for us.
+ */
+
/* Check to see if zv_suspend_lock is needed. */
new_open_count = zv->zv_open_count - count;
if (new_open_count != 0) {
@@ -387,7 +395,6 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -408,20 +415,6 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
return (0);
}
-static void
-zvol_geom_destroy(zvol_state_t *zv)
-{
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp = zsg->zsg_provider;
-
- ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
-
- g_topology_assert();
-
- zsg->zsg_provider = NULL;
- g_wither_geom(pp->geom, ENXIO);
-}
-
void
zvol_wait_close(zvol_state_t *zv)
{
@@ -454,7 +447,7 @@ zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
pp->name, acr, acw, ace));
- if (pp->private == NULL) {
+ if (atomic_load_ptr(&pp->private) == NULL) {
if (acr <= 0 && acw <= 0 && ace <= 0)
return (0);
return (pp->error);
@@ -727,9 +720,9 @@ unlock:
break;
}
- if (commit) {
+ if (error == 0 && commit) {
commit:
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
}
resume:
rw_exit(&zv->zv_suspend_lock);
@@ -906,8 +899,8 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - zfs_uio_resid(&uio);
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
- if (commit)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ if (error == 0 && commit)
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
return (error);
@@ -921,25 +914,14 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
boolean_t drop_suspend = B_FALSE;
retry:
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- /*
- * Obtain a copy of si_drv2 under zvol_state_lock to make sure either
- * the result of zvol free code setting si_drv2 to NULL is observed,
- * or the zv is protected from being freed because of the positive
- * zv_open_count.
- */
- zv = dev->si_drv2;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- err = SET_ERROR(ENXIO);
- goto out_locked;
- }
+ zv = atomic_load_ptr(&dev->si_drv2);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
mutex_enter(&zv->zv_state_lock);
- if (zv->zv_zso->zso_dying) {
- rw_exit(&zvol_state_lock);
+ if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) {
err = SET_ERROR(ENXIO);
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
@@ -954,6 +936,13 @@ retry:
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
+ /* Removal started while locks were down. */
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
+ }
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -961,7 +950,6 @@ retry:
}
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -989,7 +977,7 @@ retry:
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -1016,9 +1004,8 @@ out_opened:
zvol_last_close(zv);
wakeup(zv);
}
-out_zv_locked:
- mutex_exit(&zv->zv_state_lock);
out_locked:
+ mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
@@ -1030,12 +1017,9 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- zv = dev->si_drv2;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
+ zv = atomic_load_ptr(&dev->si_drv2);
+ if (zv == NULL)
return (SET_ERROR(ENXIO));
- }
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
@@ -1060,6 +1044,15 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_cdev_open(), we don't check if
+ * removal started here, because we might be one of the
+ * openers that needs to be thrown out! If we're the
+ * last, we need to call zvol_last_close() below to
+ * finish cleanup. So, no special treatment for us.
+ */
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
@@ -1069,7 +1062,6 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -1101,7 +1093,8 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
int error;
boolean_t sync;
- zv = dev->si_drv2;
+ zv = atomic_load_ptr(&dev->si_drv2);
+ ASSERT3P(zv, !=, NULL);
error = 0;
KASSERT(zv->zv_open_count > 0,
@@ -1117,7 +1110,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
case DIOCGFLUSH:
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
if (zv->zv_zilog != NULL)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGDELETE:
@@ -1152,7 +1145,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
}
zfs_rangelock_exit(lr);
if (sync)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGSTRIPESIZE:
@@ -1162,6 +1155,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
*(off_t *)data = 0;
break;
case DIOCGATTR: {
+ rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
spa_t *spa = dmu_objset_spa(zv->zv_objset);
struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
uint64_t refd, avail, usedobjs, availobjs;
@@ -1186,6 +1180,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
arg->value.off = refd / DEV_BSIZE;
} else
error = SET_ERROR(ENOIOCTL);
+ rw_exit(&zv->zv_suspend_lock);
break;
}
case FIOSEEKHOLE:
@@ -1196,10 +1191,12 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
hole = (cmd == FIOSEEKHOLE);
noff = *off;
+ rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
lr = zfs_rangelock_enter(&zv->zv_rangelock, 0, UINT64_MAX,
RL_READER);
error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
zfs_rangelock_exit(lr);
+ rw_exit(&zv->zv_suspend_lock);
*off = noff;
break;
}
@@ -1400,42 +1397,65 @@ zvol_alloc(const char *name, uint64_t volsize, uint64_t volblocksize,
* Remove minor node for the specified volume.
*/
void
-zvol_os_free(zvol_state_t *zv)
+zvol_os_remove_minor(zvol_state_t *zv)
{
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
+ ASSERT0(atomic_read(&zv->zv_suspend_ref));
+ ASSERT(zv->zv_flags & ZVOL_REMOVING);
- ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
-
- rw_destroy(&zv->zv_suspend_lock);
- zfs_rangelock_fini(&zv->zv_rangelock);
+ struct zvol_state_os *zso = zv->zv_zso;
+ zv->zv_zso = NULL;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp __maybe_unused = zsg->zsg_provider;
-
- ASSERT3P(pp->private, ==, NULL);
+ struct zvol_state_geom *zsg = &zso->zso_geom;
+ struct g_provider *pp = zsg->zsg_provider;
+ atomic_store_ptr(&pp->private, NULL);
+ mutex_exit(&zv->zv_state_lock);
g_topology_lock();
- zvol_geom_destroy(zv);
+ g_wither_geom(pp->geom, ENXIO);
g_topology_unlock();
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
- struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
+ struct zvol_state_dev *zsd = &zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
+ if (dev != NULL)
+ atomic_store_ptr(&dev->si_drv2, NULL);
+ mutex_exit(&zv->zv_state_lock);
+
if (dev != NULL) {
- ASSERT3P(dev->si_drv2, ==, NULL);
destroy_dev(dev);
knlist_clear(&zsd->zsd_selinfo.si_note, 0);
knlist_destroy(&zsd->zsd_selinfo.si_note);
}
}
+ kmem_free(zso, sizeof (struct zvol_state_os));
+
+ mutex_enter(&zv->zv_state_lock);
+}
+
+void
+zvol_os_free(zvol_state_t *zv)
+{
+ ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
+ ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT0(zv->zv_open_count);
+ ASSERT0P(zv->zv_zso);
+
+ ASSERT0P(zv->zv_objset);
+ ASSERT0P(zv->zv_zilog);
+ ASSERT0P(zv->zv_dn);
+
+ ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
+
+ rw_destroy(&zv->zv_suspend_lock);
+ zfs_rangelock_fini(&zv->zv_rangelock);
+
mutex_destroy(&zv->zv_state_lock);
cv_destroy(&zv->zv_removing_cv);
dataset_kstats_destroy(&zv->zv_kstat);
- kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
zvol_minors--;
}
@@ -1493,11 +1513,11 @@ zvol_os_create_minor(const char *name)
zv->zv_objset = os;
- ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
+ ASSERT0P(zv->zv_kstat.dk_kstats);
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
if (error)
goto out_dmu_objset_disown;
- ASSERT3P(zv->zv_zilog, ==, NULL);
+ ASSERT0P(zv->zv_zilog);
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
@@ -1538,28 +1558,6 @@ out_doi:
return (error);
}
-void
-zvol_os_clear_private(zvol_state_t *zv)
-{
- ASSERT(RW_LOCK_HELD(&zvol_state_lock));
- if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp = zsg->zsg_provider;
-
- if (pp->private == NULL) /* already cleared */
- return;
-
- pp->private = NULL;
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
- struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
- struct cdev *dev = zsd->zsd_cdev;
-
- if (dev != NULL)
- dev->si_drv2 = NULL;
- }
-}
-
int
zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c b/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
index ce9c9e39e60c..aac5f2ebbfd2 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
@@ -66,9 +66,9 @@ void
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
ASSERT(cvp);
- ASSERT(name == NULL);
+ ASSERT0P(name);
ASSERT(type == CV_DEFAULT);
- ASSERT(arg == NULL);
+ ASSERT0P(arg);
cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event);
@@ -83,7 +83,7 @@ static int
cv_destroy_wakeup(kcondvar_t *cvp)
{
if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
- ASSERT(cvp->cv_mutex == NULL);
+ ASSERT0P(cvp->cv_mutex);
ASSERT(!waitqueue_active(&cvp->cv_event));
return (1);
}
@@ -104,7 +104,7 @@ __cv_destroy(kcondvar_t *cvp)
while (cv_destroy_wakeup(cvp) == 0)
wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
- ASSERT3P(cvp->cv_mutex, ==, NULL);
+ ASSERT0P(cvp->cv_mutex);
ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c b/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
index f37699b4347e..89ca4a648b2f 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
@@ -709,7 +709,7 @@ zone_get_hostid(void *zone)
{
uint32_t hostid;
- ASSERT3P(zone, ==, NULL);
+ ASSERT0P(zone);
if (spl_hostid != 0)
return ((uint32_t)(spl_hostid & HW_HOSTID_MASK));
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
index fab80289b278..22e4ed169d03 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
@@ -296,7 +296,7 @@ spl_slab_free(spl_kmem_slab_t *sks,
spl_kmem_cache_t *skc;
ASSERT(sks->sks_magic == SKS_MAGIC);
- ASSERT(sks->sks_ref == 0);
+ ASSERT0(sks->sks_ref);
skc = sks->sks_cache;
ASSERT(skc->skc_magic == SKC_MAGIC);
@@ -598,7 +598,7 @@ static void
spl_magazine_free(spl_kmem_magazine_t *skm)
{
ASSERT(skm->skm_magic == SKM_MAGIC);
- ASSERT(skm->skm_avail == 0);
+ ASSERT0(skm->skm_avail);
kfree(skm);
}
@@ -610,7 +610,7 @@ spl_magazine_create(spl_kmem_cache_t *skc)
{
int i = 0;
- ASSERT((skc->skc_flags & KMC_SLAB) == 0);
+ ASSERT0((skc->skc_flags & KMC_SLAB));
skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
@@ -640,7 +640,7 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
spl_kmem_magazine_t *skm;
int i = 0;
- ASSERT((skc->skc_flags & KMC_SLAB) == 0);
+ ASSERT0((skc->skc_flags & KMC_SLAB));
for_each_possible_cpu(i) {
skm = skc->skc_mag[i];
@@ -679,8 +679,8 @@ spl_kmem_cache_create(const char *name, size_t size, size_t align,
/*
* Unsupported flags
*/
- ASSERT(vmp == NULL);
- ASSERT(reclaim == NULL);
+ ASSERT0P(vmp);
+ ASSERT0P(reclaim);
might_sleep();
@@ -863,11 +863,11 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
* Validate there are no objects in use and free all the
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
*/
- ASSERT3U(skc->skc_slab_alloc, ==, 0);
- ASSERT3U(skc->skc_obj_alloc, ==, 0);
- ASSERT3U(skc->skc_slab_total, ==, 0);
- ASSERT3U(skc->skc_obj_total, ==, 0);
- ASSERT3U(skc->skc_obj_emergency, ==, 0);
+ ASSERT0(skc->skc_slab_alloc);
+ ASSERT0(skc->skc_obj_alloc);
+ ASSERT0(skc->skc_slab_total);
+ ASSERT0(skc->skc_obj_total);
+ ASSERT0(skc->skc_obj_emergency);
ASSERT(list_empty(&skc->skc_complete_list));
ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
@@ -986,7 +986,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT((skc->skc_flags & KMC_SLAB) == 0);
+ ASSERT0((skc->skc_flags & KMC_SLAB));
*obj = NULL;
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
index 48f70b00c96b..02c5b42bc4a0 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
@@ -541,7 +541,7 @@ __kstat_create(const char *ks_module, int ks_instance, const char *ks_name,
kstat_t *ksp;
ASSERT(ks_module);
- ASSERT(ks_instance == 0);
+ ASSERT0(ks_instance);
ASSERT(ks_name);
if ((ks_type == KSTAT_TYPE_INTR) || (ks_type == KSTAT_TYPE_IO))
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c b/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
index f42f455222de..8f5c73b13df5 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
@@ -80,7 +80,7 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
/* Option pp is simply ignored */
/* Variable stack size unsupported */
- ASSERT(stk == NULL);
+ ASSERT0P(stk);
tp = kmem_alloc(sizeof (thread_priv_t), KM_PUSHPAGE);
if (tp == NULL)
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c b/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
index 34a61bef7d4f..2e8cedf0dc87 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
@@ -161,7 +161,7 @@ tsd_hash_add(tsd_hash_table_t *table, uint_t key, pid_t pid, void *value)
ulong_t hash;
int rc = 0;
- ASSERT3P(tsd_hash_search(table, key, pid), ==, NULL);
+ ASSERT0P(tsd_hash_search(table, key, pid));
/* New entry allocate structure, set value, and add to hash */
entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
index 248c9b7a6d3b..8a8316f63c48 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
@@ -863,9 +863,9 @@ abd_iter_advance(struct abd_iter *aiter, size_t amount)
* Ensure that last chunk is not in use. abd_iterate_*() must clear
* this state (directly or abd_iter_unmap()) before advancing.
*/
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
- ASSERT3P(aiter->iter_page, ==, NULL);
+ ASSERT0P(aiter->iter_page);
ASSERT0(aiter->iter_page_doff);
ASSERT0(aiter->iter_page_dsize);
@@ -897,7 +897,7 @@ abd_iter_map(struct abd_iter *aiter)
void *paddr;
size_t offset = 0;
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to iterate over, so do nothing */
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
index 154ca22d9513..1bd3500e9f66 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
@@ -471,13 +471,17 @@ vdev_disk_close(vdev_t *v)
if (v->vdev_reopening || vd == NULL)
return;
+ rw_enter(&vd->vd_lock, RW_WRITER);
+
if (vd->vd_bdh != NULL)
vdev_blkdev_put(vd->vd_bdh, spa_mode(v->vdev_spa),
zfs_vdev_holder);
+ v->vdev_tsd = NULL;
+
+ rw_exit(&vd->vd_lock);
rw_destroy(&vd->vd_lock);
kmem_free(vd, sizeof (vdev_disk_t));
- v->vdev_tsd = NULL;
}
/*
@@ -552,7 +556,7 @@ vdev_bio_associate_blkg(struct bio *bio)
#endif
ASSERT3P(q, !=, NULL);
- ASSERT3P(bio->bi_blkg, ==, NULL);
+ ASSERT0P(bio->bi_blkg);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
@@ -574,7 +578,7 @@ vdev_bio_set_dev(struct bio *bio, struct block_device *bdev)
bio->bi_bdev = bdev;
ASSERT3P(q, !=, NULL);
- ASSERT3P(bio->bi_blkg, ==, NULL);
+ ASSERT0P(bio->bi_blkg);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
@@ -806,7 +810,7 @@ vbio_completion(struct bio *bio)
* here; instead we stash vbio on the zio and take care of it in the
* done callback.
*/
- ASSERT3P(zio->io_bio, ==, NULL);
+ ASSERT0P(zio->io_bio);
zio->io_bio = vbio;
zio_delay_interrupt(zio);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
index 1b169122f25b..daa4b5776837 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
@@ -1900,7 +1900,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
- VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
+ VERIFY0(zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
@@ -2204,8 +2204,8 @@ top:
}
error = zfs_aclset_common(zp, aclp, cr, tx);
- ASSERT(error == 0);
- ASSERT(zp->z_acl_cached == NULL);
+ ASSERT0(error);
+ ASSERT0P(zp->z_acl_cached);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
index 6552a933ce0a..fb4de50480a3 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
@@ -494,9 +494,9 @@ zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
if (!creation)
now = current_time(ip);
zp = ITOZ(ip);
- ASSERT3P(zp->z_dirlocks, ==, NULL);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
- ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT0P(zp->z_dirlocks);
+ ASSERT0P(zp->z_acl_cached);
+ ASSERT0P(zp->z_xattr_cached);
zp->z_id = id;
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
@@ -590,7 +590,7 @@ zfsctl_inode_lookup(zfsvfs_t *zfsvfs, uint64_t id,
int
zfsctl_create(zfsvfs_t *zfsvfs)
{
- ASSERT(zfsvfs->z_ctldir == NULL);
+ ASSERT0P(zfsvfs->z_ctldir);
zfsvfs->z_ctldir = zfsctl_inode_alloc(zfsvfs, ZFSCTL_INO_ROOT,
&zpl_fops_root, &zpl_ops_root, 0);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c
index 2f935bb3fc8c..e8de536606e2 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c
@@ -463,7 +463,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
zfsvfs_t *zfsvfs = ZTOZSB(zp);
ASSERT(zp->z_unlinked);
- ASSERT(ZTOI(zp)->i_nlink == 0);
+ ASSERT0(ZTOI(zp)->i_nlink);
VERIFY3U(0, ==,
zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
@@ -662,8 +662,8 @@ zfs_rmnode(znode_t *zp)
uint64_t links;
int error;
- ASSERT(ZTOI(zp)->i_nlink == 0);
- ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0);
+ ASSERT0(ZTOI(zp)->i_nlink);
+ ASSERT0(atomic_read(&ZTOI(zp)->i_count));
/*
* If this is an attribute directory, purge its contents.
@@ -710,7 +710,7 @@ zfs_rmnode(znode_t *zp)
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
- ASSERT(error == 0);
+ ASSERT0(error);
}
acl_obj = zfs_external_acl(zp);
@@ -744,12 +744,12 @@ zfs_rmnode(znode_t *zp)
}
if (xzp) {
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_enter(&xzp->z_lock);
xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */
clear_nlink(ZTOI(xzp)); /* no more links to it */
links = 0;
- VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
+ VERIFY0(sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&links, sizeof (links), tx));
mutex_exit(&xzp->z_lock);
zfs_unlinked_add(xzp, tx);
@@ -872,7 +872,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
ctime);
}
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_exit(&zp->z_lock);
@@ -894,7 +894,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
&dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_exit(&dzp->z_lock);
return (0);
@@ -986,7 +986,7 @@ zfs_drop_nlink_locked(znode_t *zp, dmu_tx_t *tx, boolean_t *unlinkedp)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links));
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
- ASSERT3U(error, ==, 0);
+ ASSERT0(error);
if (unlinkedp != NULL)
*unlinkedp = unlinked;
@@ -1058,7 +1058,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
/* The only error is !zfs_dirempty() and we checked earlier. */
error = zfs_drop_nlink_locked(zp, tx, &unlinked);
- ASSERT3U(error, ==, 0);
+ ASSERT0(error);
mutex_exit(&zp->z_lock);
} else {
error = zfs_dropname(dl, zp, dzp, tx, flag);
@@ -1083,7 +1083,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_exit(&dzp->z_lock);
if (unlinkedp != NULL)
@@ -1167,7 +1167,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
ASSERT(error == 0 && parent == zp->z_id);
#endif
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
sizeof (xzp->z_id), tx));
if (!zp->z_unlinked)
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c
index c729947369c2..3fdcdbac6f68 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c
@@ -115,8 +115,9 @@ zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid)
*/
int
zfs_file_pwrite(zfs_file_t *fp, const void *buf, size_t count, loff_t off,
- ssize_t *resid)
+ uint8_t ashift, ssize_t *resid)
{
+ (void) ashift;
ssize_t rc;
rc = kernel_write(fp, buf, count, &off);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c
index 1c187d7b9cab..895d80b2d79e 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c
@@ -223,7 +223,7 @@ zfs_kobj_add(zfs_mod_kobj_t *zkobj, struct kobject *parent, const char *name)
{
/* zko_default_group.attrs must be NULL terminated */
ASSERT(zkobj->zko_default_group.attrs != NULL);
- ASSERT(zkobj->zko_default_group.attrs[zkobj->zko_attr_count] == NULL);
+ ASSERT0P(zkobj->zko_default_group.attrs[zkobj->zko_attr_count]);
kobject_init(&zkobj->zko_kobj, &zkobj->zko_kobj_type);
return (kobject_add(&zkobj->zko_kobj, parent, name));
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
index 396faef8f646..8a7d14ab6119 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
@@ -279,19 +279,14 @@ zfs_sync(struct super_block *sb, int wait, cred_t *cr)
return (err);
/*
- * If the pool is suspended, just return an error. This is to help
- * with shutting down with pools suspended, as we don't want to block
- * in that case.
+ * Sync any pending writes, but do not block if the pool is suspended.
+ * This is to help with shutting down with pools suspended, as we don't
+ * want to block in that case.
*/
- if (spa_suspended(zfsvfs->z_os->os_spa)) {
- zfs_exit(zfsvfs, FTAG);
- return (SET_ERROR(EIO));
- }
-
- zil_commit(zfsvfs->z_log, 0);
+ err = zil_commit_flags(zfsvfs->z_log, 0, ZIL_COMMIT_NOW);
zfs_exit(zfsvfs, FTAG);
- return (0);
+ return (err);
}
static void
@@ -883,7 +878,7 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
* operations out since we closed the ZIL.
*/
if (mounting) {
- ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
+ ASSERT0P(zfsvfs->z_kstat.dk_kstats);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
@@ -1561,6 +1556,12 @@ zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
sb->s_xattr = zpl_xattr_handlers;
sb->s_export_op = &zpl_export_operations;
+#ifdef HAVE_SET_DEFAULT_D_OP
+ set_default_d_op(sb, &zpl_dentry_operations);
+#else
+ sb->s_d_op = &zpl_dentry_operations;
+#endif
+
/* Set features for file system. */
zfs_set_fuid_feature(zfsvfs);
@@ -1676,7 +1677,7 @@ zfs_umount(struct super_block *sb)
if (zfsvfs->z_arc_prune != NULL)
arc_remove_prune_callback(zfsvfs->z_arc_prune);
- VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
+ VERIFY0(zfsvfs_teardown(zfsvfs, B_TRUE));
os = zfsvfs->z_os;
/*
@@ -1802,8 +1803,8 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
ASSERT(*ipp != NULL);
if (object == ZFSCTL_INO_SNAPDIR) {
- VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
- 0, kcred, NULL, NULL) == 0);
+ VERIFY0(zfsctl_root_lookup(*ipp, "snapshot", ipp,
+ 0, kcred, NULL, NULL));
} else {
/*
* Must have an existing ref, so igrab()
@@ -1905,7 +1906,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
- VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
+ VERIFY0(zfsvfs_setup(zfsvfs, B_FALSE));
zfs_set_fuid_feature(zfsvfs);
zfsvfs->z_rollback_time = jiffies;
@@ -2078,7 +2079,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
- VERIFY(0 == sa_set_sa_object(os, sa_obj));
+ VERIFY0(sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
index 6a2fc5ad7935..6106726651a3 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
@@ -841,8 +841,8 @@ out:
*zpp = zp;
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1203,8 +1203,8 @@ out:
zfs_zrele_async(xzp);
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1392,14 +1392,15 @@ out:
zfs_dirent_unlock(dl);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
if (error != 0) {
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
+
+ if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
+
}
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1528,8 +1529,8 @@ out:
zfs_znode_update_vfs(zp);
zrele(zp);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -2483,10 +2484,10 @@ top:
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
- ASSERT(err == 0);
+ ASSERT0(err);
if (attrzp) {
err = zfs_acl_chown_setattr(attrzp);
- ASSERT(err == 0);
+ ASSERT0(err);
}
}
@@ -2600,7 +2601,7 @@ out:
if (err == 0 && xattr_count > 0) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
- ASSERT(err2 == 0);
+ ASSERT0(err2);
}
if (aclp)
@@ -2630,8 +2631,8 @@ out:
}
out2:
- if (os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (err == 0 && os->os_sync == ZFS_SYNC_ALWAYS)
+ err = zil_commit(zilog, 0);
out3:
kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
@@ -3157,7 +3158,7 @@ top:
* zfs_link_create() to add back the same entry, but with a new
* dnode (szp), should not fail.
*/
- ASSERT3P(tzp, ==, NULL);
+ ASSERT0P(tzp);
goto commit_link_tzp;
}
@@ -3235,8 +3236,8 @@ out:
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -3436,7 +3437,7 @@ top:
*zpp = zp;
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ error = zil_commit(zilog, 0);
} else {
zrele(zp);
}
@@ -3654,8 +3655,8 @@ top:
* operation are sync safe.
*/
if (is_tmpfile) {
- VERIFY(zap_remove_int(zfsvfs->z_os,
- zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
+ VERIFY0(zap_remove_int(zfsvfs->z_os,
+ zfsvfs->z_unlinkedobj, szp->z_id, tx));
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
@@ -3670,18 +3671,20 @@ top:
zfs_dirent_unlock(dl);
- if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
- if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
- txg_wait_flag_t wait_flags =
- spa_get_failmode(dmu_objset_spa(zfsvfs->z_os)) ==
- ZIO_FAILURE_MODE_CONTINUE ? TXG_WAIT_SUSPEND : 0;
- error = txg_wait_synced_flags(dmu_objset_pool(zfsvfs->z_os),
- txg, wait_flags);
- if (error != 0) {
- ASSERT3U(error, ==, ESHUTDOWN);
- error = SET_ERROR(EIO);
+ if (error == 0) {
+ if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
+
+ if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
+ txg_wait_flag_t wait_flags =
+ spa_get_failmode(dmu_objset_spa(zfsvfs->z_os)) ==
+ ZIO_FAILURE_MODE_CONTINUE ? TXG_WAIT_SUSPEND : 0;
+ error = txg_wait_synced_flags(
+ dmu_objset_pool(zfsvfs->z_os), txg, wait_flags);
+ if (error != 0) {
+ ASSERT3U(error, ==, ESHUTDOWN);
+ error = SET_ERROR(EIO);
+ }
}
}
@@ -3691,16 +3694,42 @@ top:
return (error);
}
-static void
-zfs_putpage_commit_cb(void *arg)
+/* Finish page writeback. */
+static inline void
+zfs_page_writeback_done(struct page *pp, int err)
{
- struct page *pp = arg;
+ if (err != 0) {
+ /*
+ * Writeback failed. Re-dirty the page. It was undirtied before
+ * the IO was issued (in zfs_putpage() or write_cache_pages()).
+ * The kernel only considers writeback for dirty pages; if we
+ * don't do this, it is eligible for eviction without being
+ * written out, which we definitely don't want.
+ */
+#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
+ filemap_dirty_folio(page_mapping(pp), page_folio(pp));
+#else
+ __set_page_dirty_nobuffers(pp);
+#endif
+ }
ClearPageError(pp);
end_page_writeback(pp);
}
/*
+ * ZIL callback for page writeback. Passes to zfs_log_write() in zfs_putpage()
+ * for syncing writes. Called when the ZIL itx has been written to the log or
+ * the whole txg syncs, or if the ZIL crashes or the pool suspends. Any failure
+ * is passed as `err`.
+ */
+static void
+zfs_putpage_commit_cb(void *arg, int err)
+{
+ zfs_page_writeback_done(arg, err);
+}
+
+/*
* Push a page out to disk, once the page is on stable storage the
* registered commit callback will be run as notification of completion.
*
@@ -3853,16 +3882,15 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
err = dmu_tx_assign(tx, DMU_TX_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
-#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
- filemap_dirty_folio(page_mapping(pp), page_folio(pp));
-#else
- __set_page_dirty_nobuffers(pp);
-#endif
- ClearPageError(pp);
- end_page_writeback(pp);
+ zfs_page_writeback_done(pp, err);
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
- return (err);
+
+ /*
+ * Don't return error for an async writeback; we've re-dirtied
+ * the page so it will be tried again some other time.
+ */
+ return (for_sync ? err : 0);
}
va = kmap(pp);
@@ -3916,7 +3944,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
* ALL, zfs_putpage should do it.
*
* Summary:
- * for_sync: 0=unlock immediately; 1 unlock once on disk
+ * for_sync: 0=unlock immediately; 1=unlock once on disk
* sync_mode: NONE=caller will commit; ALL=we will commit
*/
boolean_t need_commit = (wbc->sync_mode != WB_SYNC_NONE);
@@ -3931,16 +3959,24 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
B_FALSE, for_sync ? zfs_putpage_commit_cb : NULL, pp);
if (!for_sync) {
- ClearPageError(pp);
- end_page_writeback(pp);
+ /*
+ * Async writeback is logged and written to the DMU, so page
+ * can now be unlocked.
+ */
+ zfs_page_writeback_done(pp, 0);
}
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
- if (need_commit)
- zil_commit(zfsvfs->z_log, zp->z_id);
+ if (need_commit) {
+ err = zil_commit_flags(zfsvfs->z_log, zp->z_id, ZIL_COMMIT_NOW);
+ if (err != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (err);
+ }
+ }
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, pglen);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c
index 7683eeb3cf9f..bcaabeb32b8a 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c
@@ -144,9 +144,9 @@ zfs_znode_cache_destructor(void *buf, void *arg)
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
- ASSERT3P(zp->z_dirlocks, ==, NULL);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
- ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT0P(zp->z_dirlocks);
+ ASSERT0P(zp->z_acl_cached);
+ ASSERT0P(zp->z_xattr_cached);
}
static int
@@ -178,13 +178,13 @@ zfs_znode_init(void)
* backed by kmalloc() when on the Linux slab in order that any
* wait_on_bit() operations on the related inode operate properly.
*/
- ASSERT(znode_cache == NULL);
+ ASSERT0P(znode_cache);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL,
KMC_SLAB | KMC_RECLAIMABLE);
- ASSERT(znode_hold_cache == NULL);
+ ASSERT0P(znode_hold_cache);
znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
@@ -327,10 +327,10 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
mutex_enter(&zp->z_lock);
- ASSERT(zp->z_sa_hdl == NULL);
- ASSERT(zp->z_acl_cached == NULL);
+ ASSERT0P(zp->z_sa_hdl);
+ ASSERT0P(zp->z_acl_cached);
if (sa_hdl == NULL) {
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
+ VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
@@ -530,9 +530,9 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
return (NULL);
zp = ITOZ(ip);
- ASSERT(zp->z_dirlocks == NULL);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
- ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT0P(zp->z_dirlocks);
+ ASSERT0P(zp->z_acl_cached);
+ ASSERT0P(zp->z_xattr_cached);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_is_ctldir = B_FALSE;
@@ -611,7 +611,7 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
* processing so do not hash unlinked znodes.
*/
if (links > 0)
- VERIFY3S(insert_inode_locked(ip), ==, 0);
+ VERIFY0(insert_inode_locked(ip));
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
@@ -811,7 +811,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
}
/* Now add in all of the "SA" attributes */
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
+ VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
@@ -901,7 +901,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
acl_ids->z_fuid, acl_ids->z_fgid);
}
- VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
+ VERIFY0(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx));
if (!(flag & IS_ROOT_NODE)) {
/*
@@ -1200,7 +1200,7 @@ zfs_rezget(znode_t *zp)
}
rw_exit(&zp->z_xattr_lock);
- ASSERT(zp->z_sa_hdl == NULL);
+ ASSERT0P(zp->z_sa_hdl);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
@@ -1314,9 +1314,9 @@ zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
zh = zfs_znode_hold_enter(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
- VERIFY(0 == dmu_object_free(os, acl_obj, tx));
+ VERIFY0(dmu_object_free(os, acl_obj, tx));
}
- VERIFY(0 == dmu_object_free(os, obj, tx));
+ VERIFY0(dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
@@ -1536,7 +1536,7 @@ zfs_extend(znode_t *zp, uint64_t end)
zp->z_size = end;
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_rangelock_exit(lr);
@@ -1726,7 +1726,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
- VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
+ VERIFY0(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
@@ -1793,7 +1793,7 @@ log:
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
@@ -1840,7 +1840,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
/*
* Set starting attributes.
@@ -1853,7 +1853,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
const char *name;
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
- VERIFY(nvpair_value_uint64(elem, &val) == 0);
+ VERIFY0(nvpair_value_uint64(elem, &val));
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version)
@@ -1861,7 +1861,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
- ASSERT(error == 0);
+ ASSERT0(error);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
@@ -1869,7 +1869,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
}
ASSERT(version != 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
/*
* Create zap object used for SA attribute registration
@@ -1879,7 +1879,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
} else {
sa_obj = 0;
}
@@ -1889,7 +1889,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
/*
* Create root znode. Create minimal znode/inode/zfsvfs/sb
@@ -1922,7 +1922,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
- ASSERT(error == 0);
+ ASSERT0(error);
/*
* Fold case on file systems that are always or sometimes case
@@ -1946,12 +1946,12 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
- VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
+ VERIFY0(zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids, zfs_init_idmap));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
zfs_acl_ids_free(&acl_ids);
atomic_set(&ZTOI(rootzp)->i_count, 0);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
index 48dae79a2373..81ac26cb0c93 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
@@ -202,7 +202,7 @@ zpl_snapdir_revalidate(struct dentry *dentry, unsigned int flags)
return (!!dentry->d_inode);
}
-static dentry_operations_t zpl_dops_snapdirs = {
+static const struct dentry_operations zpl_dops_snapdirs = {
/*
* Auto mounting of snapshots is only supported for 2.6.37 and
* newer kernels. Prior to this kernel the ops->follow_link()
@@ -215,6 +215,51 @@ static dentry_operations_t zpl_dops_snapdirs = {
.d_revalidate = zpl_snapdir_revalidate,
};
+/*
+ * For the .zfs control directory to work properly we must be able to override
+ * the default operations table and register custom .d_automount and
+ * .d_revalidate callbacks.
+ */
+static void
+set_snapdir_dentry_ops(struct dentry *dentry, unsigned int extraflags) {
+ static const unsigned int op_flags =
+ DCACHE_OP_HASH | DCACHE_OP_COMPARE |
+ DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE |
+ DCACHE_OP_PRUNE | DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_REAL;
+
+#ifdef HAVE_D_SET_D_OP
+ /*
+ * d_set_d_op() will set the DCACHE_OP_ flags according to what it
+ * finds in the passed dentry_operations, so we don't have to.
+ *
+ * We clear the flags and the old op table before calling d_set_d_op()
+ * because issues a warning when the dentry operations table is already
+ * set.
+ */
+ dentry->d_op = NULL;
+ dentry->d_flags &= ~op_flags;
+ d_set_d_op(dentry, &zpl_dops_snapdirs);
+ dentry->d_flags |= extraflags;
+#else
+ /*
+ * Since 6.17 there's no exported way to modify dentry ops, so we have
+ * to reach in and do it ourselves. This should be safe for our very
+ * narrow use case, which is to create or splice in an entry to give
+ * access to a snapshot.
+ *
+ * We need to set the op flags directly. We hardcode
+ * DCACHE_OP_REVALIDATE because that's the only operation we have; if
+ * we ever extend zpl_dops_snapdirs we will need to update the op flags
+ * to match.
+ */
+ spin_lock(&dentry->d_lock);
+ dentry->d_op = &zpl_dops_snapdirs;
+ dentry->d_flags &= ~op_flags;
+ dentry->d_flags |= DCACHE_OP_REVALIDATE | extraflags;
+ spin_unlock(&dentry->d_lock);
+#endif
+}
+
static struct dentry *
zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry,
unsigned int flags)
@@ -236,10 +281,7 @@ zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry,
return (ERR_PTR(error));
ASSERT(error == 0 || ip == NULL);
- d_clear_d_op(dentry);
- d_set_d_op(dentry, &zpl_dops_snapdirs);
- dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
-
+ set_snapdir_dentry_ops(dentry, DCACHE_NEED_AUTOMOUNT);
return (d_splice_alias(ip, dentry));
}
@@ -373,8 +415,7 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
error = -zfsctl_snapdir_mkdir(dip, dname(dentry), vap, &ip, cr, 0);
if (error == 0) {
- d_clear_d_op(dentry);
- d_set_d_op(dentry, &zpl_dops_snapdirs);
+ set_snapdir_dentry_ops(dentry, 0);
d_instantiate(dentry, ip);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
index ef7bd7352084..d07317b0d910 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
@@ -22,6 +22,7 @@
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
+ * Copyright (c) 2025, Klara, Inc.
*/
@@ -106,6 +107,10 @@ zpl_iterate(struct file *filp, struct dir_context *ctx)
return (error);
}
+static inline int
+zpl_write_cache_pages(struct address_space *mapping,
+ struct writeback_control *wbc, void *data);
+
static int
zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
@@ -115,9 +120,38 @@ zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
int error;
fstrans_cookie_t cookie;
- error = filemap_write_and_wait_range(inode->i_mapping, start, end);
- if (error)
- return (error);
+ /*
+ * Force dirty pages in the range out to the DMU and the log, ready
+ * for zil_commit() to write down.
+ *
+ * We call write_cache_pages() directly to ensure that zpl_putpage() is
+ * called with the flags we need. We need WB_SYNC_NONE to avoid a call
+ * to zil_commit() (since we're doing this as a kind of pre-sync); but
+ * we do need for_sync so that the pages remain in writeback until
+ * they're on disk, and so that we get an error if the DMU write fails.
+ */
+ if (filemap_range_has_page(inode->i_mapping, start, end)) {
+ int for_sync = 1;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = LONG_MAX,
+ .range_start = start,
+ .range_end = end,
+ };
+ error =
+ zpl_write_cache_pages(inode->i_mapping, &wbc, &for_sync);
+ if (error != 0) {
+ /*
+ * Unclear what state things are in. zfs_putpage() will
+ * ensure the pages remain dirty if they haven't been
+ * written down to the DMU, but because there may be
+ * nothing logged, we can't assume that zfs_sync() ->
+ * zil_commit() will give us a useful error. It's
+ * safest if we just error out here.
+ */
+ return (error);
+ }
+ }
crhold(cr);
cookie = spl_fstrans_mark();
@@ -494,11 +528,30 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (sync_mode != wbc->sync_mode) {
if ((result = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (result);
- if (zfsvfs->z_log != NULL)
- zil_commit(zfsvfs->z_log, zp->z_id);
+
+ if (zfsvfs->z_log != NULL) {
+ /*
+ * We don't want to block here if the pool suspends,
+ * because this is not a syncing op by itself, but
+ * might be part of one that the caller will
+ * coordinate.
+ */
+ result = -zil_commit_flags(zfsvfs->z_log, zp->z_id,
+ ZIL_COMMIT_NOW);
+ }
+
zpl_exit(zfsvfs, FTAG);
/*
+ * If zil_commit_flags() failed, it's unclear what state things
+ * are currently in. putpage() has written back out what it can
+ * to the DMU, but it may not be on disk. We have little choice
+ * but to escape.
+ */
+ if (result != 0)
+ return (result);
+
+ /*
* We need to call write_cache_pages() again (we can't just
* return after the commit) because the previous call in
* non-SYNC mode does not guarantee that we got all the dirty
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
index f9f6406f8b47..f97662d052c7 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
@@ -247,7 +247,7 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
* and fifos, but we want to know if this behavior ever changes.
*/
if (S_ISSOCK(mode) || S_ISFIFO(mode))
- ASSERT(rdev == 0);
+ ASSERT0(rdev);
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
index 94dcdd0b887d..444948d03cb3 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
@@ -22,6 +22,7 @@
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2023, Datto Inc. All rights reserved.
+ * Copyright (c) 2025, Klara, Inc.
*/
@@ -33,6 +34,20 @@
#include <linux/iversion.h>
#include <linux/version.h>
+/*
+ * What to do when the last reference to an inode is released. If 0, the kernel
+ * will cache it on the superblock. If 1, the inode will be freed immediately.
+ * See zpl_drop_inode().
+ */
+int zfs_delete_inode = 0;
+
+/*
+ * What to do when the last reference to a dentry is released. If 0, the kernel
+ * will cache it until the entry (file) is destroyed. If 1, the dentry will be
+ * marked for cleanup, at which time its inode reference will be released. See
+ * zpl_dentry_delete().
+ */
+int zfs_delete_dentry = 0;
static struct inode *
zpl_inode_alloc(struct super_block *sb)
@@ -49,7 +64,7 @@ zpl_inode_alloc(struct super_block *sb)
static void
zpl_inode_free(struct inode *ip)
{
- ASSERT(atomic_read(&ip->i_count) == 0);
+ ASSERT0(atomic_read(&ip->i_count));
zfs_inode_free(ip);
}
#endif
@@ -57,7 +72,7 @@ zpl_inode_free(struct inode *ip)
static void
zpl_inode_destroy(struct inode *ip)
{
- ASSERT(atomic_read(&ip->i_count) == 0);
+ ASSERT0(atomic_read(&ip->i_count));
zfs_inode_destroy(ip);
}
@@ -77,11 +92,36 @@ zpl_dirty_inode(struct inode *ip, int flags)
}
/*
- * When ->drop_inode() is called its return value indicates if the
- * inode should be evicted from the inode cache. If the inode is
- * unhashed and has no links the default policy is to evict it
- * immediately.
+ * ->drop_inode() is called when the last reference to an inode is released.
+ * Its return value indicates if the inode should be destroyed immediately, or
+ * cached on the superblock structure.
+ *
+ * By default (zfs_delete_inode=0), we call generic_drop_inode(), which returns
+ * "destroy immediately" if the inode is unhashed and has no links (roughly: no
+ * longer exists on disk). On datasets with millions of rarely-accessed files,
+ * this can cause a large amount of memory to be "pinned" by cached inodes,
+ * which in turn pin their associated dnodes and dbufs, until the kernel starts
+ * reporting memory pressure and requests OpenZFS release some memory (see
+ * zfs_prune()).
+ *
+ * When set to 1, we call generic_delete_node(), which always returns "destroy
+ * immediately", resulting in inodes being destroyed immediately, releasing
+ * their associated dnodes and dbufs to the dbuf cached and the ARC to be
+ * evicted as normal.
*
+ * Note that the "last reference" doesn't always mean the last _userspace_
+ * reference; the dentry cache also holds a reference, so "busy" inodes will
+ * still be kept alive that way (subject to dcache tuning).
+ */
+static int
+zpl_drop_inode(struct inode *ip)
+{
+ if (zfs_delete_inode)
+ return (generic_delete_inode(ip));
+ return (generic_drop_inode(ip));
+}
+
+/*
* The ->evict_inode() callback must minimally truncate the inode pages,
* and call clear_inode(). For 2.6.35 and later kernels this will
* simply update the inode state, with the sync occurring before the
@@ -470,6 +510,7 @@ const struct super_operations zpl_super_operations = {
.destroy_inode = zpl_inode_destroy,
.dirty_inode = zpl_dirty_inode,
.write_inode = NULL,
+ .drop_inode = zpl_drop_inode,
.evict_inode = zpl_evict_inode,
.put_super = zpl_put_super,
.sync_fs = zpl_sync_fs,
@@ -480,6 +521,35 @@ const struct super_operations zpl_super_operations = {
.show_stats = NULL,
};
+/*
+ * ->d_delete() is called when the last reference to a dentry is released. Its
+ * return value indicates if the dentry should be destroyed immediately, or
+ * retained in the dentry cache.
+ *
+ * By default (zfs_delete_dentry=0) the kernel will always cache unused
+ * entries. Each dentry holds an inode reference, so cached dentries can hold
+ * the final inode reference indefinitely, leading to the inode and its related
+ * data being pinned (see zpl_drop_inode()).
+ *
+ * When set to 1, we signal that the dentry should be destroyed immediately and
+ * never cached. This reduces memory usage, at the cost of higher overheads to
+ * lookup a file, as the inode and its underlying data (dnode/dbuf) need to be
+ * reloaded and reinflated.
+ *
+ * Note that userspace does not have direct control over dentry references and
+ * reclaim; rather, this is part of the kernel's caching and reclaim subsystems
+ * (eg vm.vfs_cache_pressure).
+ */
+static int
+zpl_dentry_delete(const struct dentry *dentry)
+{
+ return (zfs_delete_dentry ? 1 : 0);
+}
+
+const struct dentry_operations zpl_dentry_operations = {
+ .d_delete = zpl_dentry_delete,
+};
+
struct file_system_type zpl_fs_type = {
.owner = THIS_MODULE,
.name = ZFS_DRIVER,
@@ -491,3 +561,10 @@ struct file_system_type zpl_fs_type = {
.mount = zpl_mount,
.kill_sb = zpl_kill_sb,
};
+
+ZFS_MODULE_PARAM(zfs, zfs_, delete_inode, INT, ZMOD_RW,
+ "Delete inodes as soon as the last reference is released.");
+
+ZFS_MODULE_PARAM(zfs, zfs_, delete_dentry, INT, ZMOD_RW,
+ "Delete dentries from dentry cache as soon as the last reference is "
+ "released.");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
index a098197e7448..d93282db815a 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
@@ -1494,7 +1494,7 @@ zpl_posix_acl_free(void *arg)
acl_rel_head = NULL;
if (cmpxchg(&acl_rel_tail, &a->next,
&acl_rel_head) == &a->next) {
- ASSERT3P(a->next, ==, NULL);
+ ASSERT0P(a->next);
a->next = freelist;
freelist = a;
break;
@@ -1544,7 +1544,7 @@ zpl_posix_acl_release_impl(struct posix_acl *acl)
a->time = ddi_get_lbolt();
/* atomically points tail to us and get the previous tail */
prev = xchg(&acl_rel_tail, &a->next);
- ASSERT3P(*prev, ==, NULL);
+ ASSERT0P(*prev);
*prev = a;
/* if it was empty before, schedule the free task */
if (prev == &acl_rel_head)
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index a7431cc4da9d..967a018640e1 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -22,7 +22,7 @@
/*
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2024, Rob Norris <robn@despairlabs.com>
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
#include <sys/dataset_kstats.h>
@@ -84,8 +84,9 @@ static unsigned int zvol_blk_mq_blocks_per_thread = 8;
static inline void
zvol_end_io(struct bio *bio, struct request *rq, int error)
{
+ ASSERT3U(error, >=, 0);
if (bio) {
- bio->bi_status = errno_to_bi_status(-error);
+ bio->bi_status = errno_to_bi_status(error);
bio_endio(bio);
} else {
blk_mq_end_request(rq, errno_to_bi_status(error));
@@ -208,8 +209,14 @@ zvol_write(zv_request_t *zvr)
disk = zv->zv_zso->zvo_disk;
/* bio marked as FLUSH need to flush before write */
- if (io_is_flush(bio, rq))
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ if (io_is_flush(bio, rq)) {
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ if (error != 0) {
+ rw_exit(&zv->zv_suspend_lock);
+ zvol_end_io(bio, rq, -error);
+ return;
+ }
+ }
/* Some requests are just for flush and nothing else. */
if (io_size(bio, rq) == 0) {
@@ -273,8 +280,8 @@ zvol_write(zv_request_t *zvr)
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
task_io_account_write(nwritten);
- if (sync)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ if (error == 0 && sync)
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
@@ -282,7 +289,7 @@ zvol_write(zv_request_t *zvr)
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
}
- zvol_end_io(bio, rq, -error);
+ zvol_end_io(bio, rq, error);
}
static void
@@ -361,7 +368,7 @@ zvol_discard(zv_request_t *zvr)
zfs_rangelock_exit(lr);
if (error == 0 && sync)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
unlock:
rw_exit(&zv->zv_suspend_lock);
@@ -371,7 +378,7 @@ unlock:
start_time);
}
- zvol_end_io(bio, rq, -error);
+ zvol_end_io(bio, rq, error);
}
static void
@@ -449,7 +456,7 @@ zvol_read(zv_request_t *zvr)
blk_generic_end_io_acct(q, disk, READ, bio, start_time);
}
- zvol_end_io(bio, rq, -error);
+ zvol_end_io(bio, rq, error);
}
static void
@@ -477,10 +484,31 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = io_offset(bio, rq);
uint64_t size = io_size(bio, rq);
- int rw = io_data_dir(bio, rq);
+ int rw;
+
+ if (rq != NULL) {
+ /*
+ * Flush & trim requests go down the zvol_write codepath. Or
+ * more specifically:
+ *
+ * If request is a write, or if it's op_is_sync() and not a
+ * read, or if it's a flush, or if it's a discard, then send the
+ * request down the write path.
+ */
+ if (op_is_write(rq->cmd_flags) ||
+ (op_is_sync(rq->cmd_flags) && req_op(rq) != REQ_OP_READ) ||
+ req_op(rq) == REQ_OP_FLUSH ||
+ op_is_discard(rq->cmd_flags)) {
+ rw = WRITE;
+ } else {
+ rw = READ;
+ }
+ } else {
+ rw = bio_data_dir(bio);
+ }
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
- zvol_end_io(bio, rq, -SET_ERROR(ENXIO));
+ zvol_end_io(bio, rq, SET_ERROR(ENXIO));
goto out;
}
@@ -499,7 +527,7 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
(long long unsigned)offset,
(long unsigned)size);
- zvol_end_io(bio, rq, -SET_ERROR(EIO));
+ zvol_end_io(bio, rq, SET_ERROR(EIO));
goto out;
}
@@ -512,8 +540,8 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
#ifdef HAVE_BLK_MQ_RQ_HCTX
blk_mq_hw_queue = rq->mq_hctx->queue_num;
#else
- blk_mq_hw_queue =
- rq->q->queue_hw_ctx[rq->q->mq_map[rq->cpu]]->queue_num;
+ blk_mq_hw_queue = rq->q->queue_hw_ctx[
+ rq->q->mq_map[raw_smp_processor_id()]]->queue_num;
#endif
taskq_hash = cityhash3((uintptr_t)zv, offset >> ZVOL_TASKQ_OFFSET_SHIFT,
blk_mq_hw_queue);
@@ -521,7 +549,7 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
if (rw == WRITE) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
- zvol_end_io(bio, rq, -SET_ERROR(EROFS));
+ zvol_end_io(bio, rq, SET_ERROR(EROFS));
goto out;
}
@@ -672,28 +700,19 @@ zvol_open(struct block_device *bdev, fmode_t flag)
retry:
#endif
- rw_enter(&zvol_state_lock, RW_READER);
- /*
- * Obtain a copy of private_data under the zvol_state_lock to make
- * sure that either the result of zvol free code path setting
- * disk->private_data to NULL is observed, or zvol_os_free()
- * is not called on this zv because of the positive zv_open_count.
- */
+
#ifdef HAVE_BLK_MODE_T
- zv = disk->private_data;
+ zv = atomic_load_ptr(&disk->private_data);
#else
- zv = bdev->bd_disk->private_data;
+ zv = atomic_load_ptr(&bdev->bd_disk->private_data);
#endif
if (zv == NULL) {
- rw_exit(&zvol_state_lock);
return (-SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
-
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
return (-SET_ERROR(ENXIO));
}
@@ -705,8 +724,28 @@ retry:
if (zv->zv_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
+
+ /*
+ * Removal may happen while the locks are down, so
+ * we can't trust zv any longer; we have to start over.
+ */
+#ifdef HAVE_BLK_MODE_T
+ zv = atomic_load_ptr(&disk->private_data);
+#else
+ zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+#endif
+ if (zv == NULL)
+ return (-SET_ERROR(ENXIO));
+
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
+ return (-SET_ERROR(ENXIO));
+ }
+
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -717,7 +756,6 @@ retry:
drop_suspend = B_TRUE;
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -814,11 +852,11 @@ zvol_release(struct gendisk *disk, fmode_t unused)
#if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG)
(void) unused;
#endif
- zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
- rw_enter(&zvol_state_lock, RW_READER);
- zv = disk->private_data;
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
+ if (zv == NULL)
+ return;
mutex_enter(&zv->zv_state_lock);
ASSERT3U(zv->zv_open_count, >, 0);
@@ -832,6 +870,15 @@ zvol_release(struct gendisk *disk, fmode_t unused)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_open(), we don't check if removal
+ * started here, because we might be one of the openers
+ * that needs to be thrown out! If we're the last, we
+ * need to call zvol_last_close() below to finish
+ * cleanup. So, no special treatment for us.
+ */
+
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
@@ -841,7 +888,6 @@ zvol_release(struct gendisk *disk, fmode_t unused)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -861,9 +907,10 @@ static int
zvol_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- zvol_state_t *zv = bdev->bd_disk->private_data;
int error = 0;
+ zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+ ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
switch (cmd) {
@@ -886,16 +933,18 @@ zvol_ioctl(struct block_device *bdev, fmode_t mode,
case BLKZNAME:
mutex_enter(&zv->zv_state_lock);
- error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
+ error = -copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
mutex_exit(&zv->zv_state_lock);
+ if (error)
+ error = SET_ERROR(error);
break;
default:
- error = -ENOTTY;
+ error = SET_ERROR(ENOTTY);
break;
}
- return (SET_ERROR(error));
+ return (-error);
}
#ifdef CONFIG_COMPAT
@@ -914,9 +963,8 @@ zvol_check_events(struct gendisk *disk, unsigned int clearing)
{
unsigned int mask = 0;
- rw_enter(&zvol_state_lock, RW_READER);
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
- zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
@@ -924,17 +972,14 @@ zvol_check_events(struct gendisk *disk, unsigned int clearing)
mutex_exit(&zv->zv_state_lock);
}
- rw_exit(&zvol_state_lock);
-
return (mask);
}
static int
zvol_revalidate_disk(struct gendisk *disk)
{
- rw_enter(&zvol_state_lock, RW_READER);
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
- zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
set_capacity(zv->zv_zso->zvo_disk,
@@ -942,8 +987,6 @@ zvol_revalidate_disk(struct gendisk *disk)
mutex_exit(&zv->zv_state_lock);
}
- rw_exit(&zvol_state_lock);
-
return (0);
}
@@ -962,16 +1005,6 @@ zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
return (0);
}
-void
-zvol_os_clear_private(zvol_state_t *zv)
-{
- /*
- * Cleared while holding zvol_state_lock as a writer
- * which will prevent zvol_open() from opening it.
- */
- zv->zv_zso->zvo_disk->private_data = NULL;
-}
-
/*
* Provide a simple virtual geometry for legacy compatibility. For devices
* smaller than 1 MiB a small head and sector count is used to allow very
@@ -981,9 +1014,10 @@ zvol_os_clear_private(zvol_state_t *zv)
static int
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- zvol_state_t *zv = bdev->bd_disk->private_data;
sector_t sectors;
+ zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+ ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
sectors = get_capacity(zv->zv_zso->zvo_disk);
@@ -1408,53 +1442,70 @@ out_kmem:
return (ret);
}
-/*
- * Cleanup then free a zvol_state_t which was created by zvol_alloc().
- * At this time, the structure is not opened by anyone, is taken off
- * the zvol_state_list, and has its private data set to NULL.
- * The zvol_state_lock is dropped.
- *
- * This function may take many milliseconds to complete (e.g. we've seen
- * it take over 256ms), due to the calls to "blk_cleanup_queue" and
- * "del_gendisk". Thus, consumers need to be careful to account for this
- * latency when calling this function.
- */
void
-zvol_os_free(zvol_state_t *zv)
+zvol_os_remove_minor(zvol_state_t *zv)
{
-
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
- ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
+ ASSERT0(atomic_read(&zv->zv_suspend_ref));
+ ASSERT(zv->zv_flags & ZVOL_REMOVING);
- rw_destroy(&zv->zv_suspend_lock);
- zfs_rangelock_fini(&zv->zv_rangelock);
+ struct zvol_state_os *zso = zv->zv_zso;
+ zv->zv_zso = NULL;
+
+ /* Clearing private_data will make new callers return immediately. */
+ atomic_store_ptr(&zso->zvo_disk->private_data, NULL);
+
+ /*
+ * Drop the state lock before calling del_gendisk(). There may be
+ * callers waiting to acquire it, but del_gendisk() will block until
+ * they exit, which would deadlock.
+ */
+ mutex_exit(&zv->zv_state_lock);
- del_gendisk(zv->zv_zso->zvo_disk);
+ del_gendisk(zso->zvo_disk);
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
(defined(HAVE_BLK_ALLOC_DISK) || defined(HAVE_BLK_ALLOC_DISK_2ARG))
#if defined(HAVE_BLK_CLEANUP_DISK)
- blk_cleanup_disk(zv->zv_zso->zvo_disk);
+ blk_cleanup_disk(zso->zvo_disk);
#else
- put_disk(zv->zv_zso->zvo_disk);
+ put_disk(zso->zvo_disk);
#endif
#else
- blk_cleanup_queue(zv->zv_zso->zvo_queue);
- put_disk(zv->zv_zso->zvo_disk);
+ blk_cleanup_queue(zso->zvo_queue);
+ put_disk(zso->zvo_disk);
#endif
- if (zv->zv_zso->use_blk_mq)
- blk_mq_free_tag_set(&zv->zv_zso->tag_set);
+ if (zso->use_blk_mq)
+ blk_mq_free_tag_set(&zso->tag_set);
+
+ ida_simple_remove(&zvol_ida, MINOR(zso->zvo_dev) >> ZVOL_MINOR_BITS);
+
+ kmem_free(zso, sizeof (struct zvol_state_os));
+
+ mutex_enter(&zv->zv_state_lock);
+}
+
+void
+zvol_os_free(zvol_state_t *zv)
+{
+
+ ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
+ ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT0(zv->zv_open_count);
+ ASSERT0P(zv->zv_zso);
- ida_simple_remove(&zvol_ida,
- MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
+ ASSERT0P(zv->zv_objset);
+ ASSERT0P(zv->zv_zilog);
+ ASSERT0P(zv->zv_dn);
+
+ rw_destroy(&zv->zv_suspend_lock);
+ zfs_rangelock_fini(&zv->zv_rangelock);
cv_destroy(&zv->zv_removing_cv);
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
- kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
}
@@ -1474,7 +1525,9 @@ __zvol_os_add_disk(struct gendisk *disk)
{
int error = 0;
#ifdef HAVE_ADD_DISK_RET
- error = add_disk(disk);
+ error = -add_disk(disk);
+ if (error)
+ error = SET_ERROR(error);
#else
add_disk(disk);
#endif
@@ -1649,11 +1702,11 @@ zvol_os_create_minor(const char *name)
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
#endif
- ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
+ ASSERT0P(zv->zv_kstat.dk_kstats);
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
if (error)
goto out_dmu_objset_disown;
- ASSERT3P(zv->zv_zilog, ==, NULL);
+ ASSERT0P(zv->zv_zilog);
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
@@ -1759,10 +1812,10 @@ zvol_init(void)
return (error);
}
- error = register_blkdev(zvol_major, ZVOL_DRIVER);
+ error = -register_blkdev(zvol_major, ZVOL_DRIVER);
if (error) {
printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
- return (error);
+ return (SET_ERROR(error));
}
if (zvol_blk_mq_queue_depth == 0) {
diff --git a/sys/contrib/openzfs/module/zcommon/simd_stat.c b/sys/contrib/openzfs/module/zcommon/simd_stat.c
index 11e2080ff9f2..007ae9e4fbbc 100644
--- a/sys/contrib/openzfs/module/zcommon/simd_stat.c
+++ b/sys/contrib/openzfs/module/zcommon/simd_stat.c
@@ -118,6 +118,10 @@ simd_stat_kstat_data(char *buf, size_t size, void *data)
"pclmulqdq", zfs_pclmulqdq_available());
off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
"movbe", zfs_movbe_available());
+ off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
+ "vaes", zfs_vaes_available());
+ off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
+ "vpclmulqdq", zfs_vpclmulqdq_available());
off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
"osxsave", boot_cpu_has(X86_FEATURE_OSXSAVE));
diff --git a/sys/contrib/openzfs/module/zcommon/zfs_deleg.c b/sys/contrib/openzfs/module/zcommon/zfs_deleg.c
index 49bb534ca26c..87596558c9a1 100644
--- a/sys/contrib/openzfs/module/zcommon/zfs_deleg.c
+++ b/sys/contrib/openzfs/module/zcommon/zfs_deleg.c
@@ -59,6 +59,7 @@ const zfs_deleg_perm_tab_t zfs_deleg_perm_tab[] = {
{ZFS_DELEG_PERM_SNAPSHOT},
{ZFS_DELEG_PERM_SHARE},
{ZFS_DELEG_PERM_SEND},
+ {ZFS_DELEG_PERM_SEND_RAW},
{ZFS_DELEG_PERM_USERPROP},
{ZFS_DELEG_PERM_USERQUOTA},
{ZFS_DELEG_PERM_GROUPQUOTA},
diff --git a/sys/contrib/openzfs/module/zcommon/zfs_prop.c b/sys/contrib/openzfs/module/zcommon/zfs_prop.c
index 864e3898b365..9190ae0362ea 100644
--- a/sys/contrib/openzfs/module/zcommon/zfs_prop.c
+++ b/sys/contrib/openzfs/module/zcommon/zfs_prop.c
@@ -364,8 +364,8 @@ zfs_prop_init(void)
static const zprop_index_t xattr_table[] = {
{ "off", ZFS_XATTR_OFF },
- { "on", ZFS_XATTR_SA },
{ "sa", ZFS_XATTR_SA },
+ { "on", ZFS_XATTR_SA },
{ "dir", ZFS_XATTR_DIR },
{ NULL }
};
diff --git a/sys/contrib/openzfs/module/zcommon/zpool_prop.c b/sys/contrib/openzfs/module/zcommon/zpool_prop.c
index 04ae9f986d8f..07819ba2be8b 100644
--- a/sys/contrib/openzfs/module/zcommon/zpool_prop.c
+++ b/sys/contrib/openzfs/module/zcommon/zpool_prop.c
@@ -467,9 +467,15 @@ vdev_prop_init(void)
zprop_register_index(VDEV_PROP_RAIDZ_EXPANDING, "raidz_expanding", 0,
PROP_READONLY, ZFS_TYPE_VDEV, "on | off", "RAIDZ_EXPANDING",
boolean_table, sfeatures);
+ zprop_register_index(VDEV_PROP_SIT_OUT, "sit_out", 0,
+ PROP_DEFAULT, ZFS_TYPE_VDEV, "on | off", "SIT_OUT", boolean_table,
+ sfeatures);
zprop_register_index(VDEV_PROP_TRIM_SUPPORT, "trim_support", 0,
PROP_READONLY, ZFS_TYPE_VDEV, "on | off", "TRIMSUP",
boolean_table, sfeatures);
+ zprop_register_index(VDEV_PROP_AUTOSIT, "autosit", 0,
+ PROP_DEFAULT, ZFS_TYPE_VDEV, "on | off", "AUTOSIT", boolean_table,
+ sfeatures);
/* default index properties */
zprop_register_index(VDEV_PROP_FAILFAST, "failfast", B_TRUE,
diff --git a/sys/contrib/openzfs/module/zfs/abd.c b/sys/contrib/openzfs/module/zfs/abd.c
index 826928e67350..bf9b13c30509 100644
--- a/sys/contrib/openzfs/module/zfs/abd.c
+++ b/sys/contrib/openzfs/module/zfs/abd.c
@@ -563,7 +563,7 @@ abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
left -= csize;
off = 0;
}
- ASSERT3U(left, ==, 0);
+ ASSERT0(left);
} else {
abd = abd_get_offset_scatter(abd, sabd, off, size);
}
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index 3483be64ec57..b677f90280d7 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1392,6 +1392,7 @@ arc_get_complevel(arc_buf_t *buf)
return (buf->b_hdr->b_complevel);
}
+__maybe_unused
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
@@ -2239,8 +2240,8 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
@@ -2278,8 +2279,8 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
@@ -2319,7 +2320,7 @@ add_reference(arc_buf_hdr_t *hdr, const void *tag)
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
}
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
@@ -2503,7 +2504,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr)
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
HDR_GET_LSIZE(hdr), hdr);
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
@@ -2547,7 +2548,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr)
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
/*
@@ -2758,7 +2759,7 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
- ASSERT3P(*ret, ==, NULL);
+ ASSERT0P(*ret);
IMPLY(encrypted, compressed);
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
@@ -2982,7 +2983,7 @@ static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
@@ -3201,14 +3202,14 @@ arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
- ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
+ ASSERT0P(hdr->b_crypt_hdr.b_rabd);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
@@ -3290,7 +3291,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
ASSERT(HDR_EMPTY(hdr));
#ifdef ZFS_DEBUG
- ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_freeze_cksum);
#endif
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
@@ -3351,12 +3352,12 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
- ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(nhdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
#ifdef ZFS_DEBUG
- ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_freeze_cksum);
#endif
/*
@@ -3375,7 +3376,7 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
- VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ VERIFY0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
@@ -3698,12 +3699,12 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
arc_hdr_free_abd(hdr, B_TRUE);
}
- ASSERT3P(hdr->b_hash_next, ==, NULL);
+ ASSERT0P(hdr->b_hash_next);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
- ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_acb);
#ifdef ZFS_DEBUG
- ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_freeze_cksum);
#endif
kmem_cache_free(hdr_full_cache, hdr);
} else {
@@ -3771,7 +3772,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, uint64_t *real_evicted)
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
*real_evicted = 0;
@@ -3796,7 +3797,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, uint64_t *real_evicted)
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
- ASSERT(hdr->b_l1hdr.b_pabd == NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
@@ -5554,7 +5555,7 @@ static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
- ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
+ ASSERT0(HDR_GET_PSIZE(hdr));
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
@@ -6132,14 +6133,14 @@ top:
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
#ifdef ZFS_DEBUG
- ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_freeze_cksum);
#endif
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
@@ -6233,7 +6234,7 @@ top:
acb->acb_nobuf = no_buf;
acb->acb_zb = *zb;
- ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_acb);
hdr->b_l1hdr.b_acb = acb;
if (HDR_HAS_L2HDR(hdr) &&
@@ -6717,7 +6718,7 @@ arc_release(arc_buf_t *buf, const void *tag)
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type);
- ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(nhdr->b_l1hdr.b_buf);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
@@ -6804,7 +6805,7 @@ arc_write_ready(zio_t *zio)
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
@@ -6948,7 +6949,7 @@ arc_write_done(zio_t *zio)
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
- ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_acb);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
@@ -6973,7 +6974,7 @@ arc_write_done(zio_t *zio)
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
- ASSERT3U(zio->io_error, ==, 0);
+ ASSERT0(zio->io_error);
arc_cksum_verify(buf);
@@ -6994,7 +6995,7 @@ arc_write_done(zio_t *zio)
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
- ASSERT3P(exists, ==, NULL);
+ ASSERT0P(exists);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
@@ -7007,7 +7008,7 @@ arc_write_done(zio_t *zio)
ASSERT(ARC_BUF_LAST(hdr->b_l1hdr.b_buf));
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
- ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
+ ASSERT0(BP_GET_LEVEL(zio->io_bp));
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
@@ -7044,7 +7045,7 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
- ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_acb);
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
if (uncached)
arc_hdr_set_flags(hdr, ARC_FLAG_UNCACHED);
@@ -7113,7 +7114,7 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
diff --git a/sys/contrib/openzfs/module/zfs/bpobj.c b/sys/contrib/openzfs/module/zfs/bpobj.c
index 0a8a077edf63..ea9fbd036c6e 100644
--- a/sys/contrib/openzfs/module/zfs/bpobj.c
+++ b/sys/contrib/openzfs/module/zfs/bpobj.c
@@ -160,8 +160,8 @@ bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
memset(bpo, 0, sizeof (*bpo));
mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
- ASSERT(bpo->bpo_dbuf == NULL);
- ASSERT(bpo->bpo_phys == NULL);
+ ASSERT0P(bpo->bpo_dbuf);
+ ASSERT0P(bpo->bpo_phys);
ASSERT(object != 0);
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
@@ -478,7 +478,7 @@ bpobj_iterate_impl(bpobj_t *initial_bpo, bpobj_itor_t func, void *arg,
* We have unprocessed subobjs. Process the next one.
*/
ASSERT(bpo->bpo_havecomp);
- ASSERT3P(bpobj_size, ==, NULL);
+ ASSERT0P(bpobj_size);
/* Add the last subobj to stack. */
int64_t i = bpi->bpi_unprocessed_subobjs - 1;
diff --git a/sys/contrib/openzfs/module/zfs/btree.c b/sys/contrib/openzfs/module/zfs/btree.c
index aa282f711bc3..725b96a3b2c7 100644
--- a/sys/contrib/openzfs/module/zfs/btree.c
+++ b/sys/contrib/openzfs/module/zfs/btree.c
@@ -1110,7 +1110,7 @@ zfs_btree_add_idx(zfs_btree_t *tree, const void *value,
if (where->bti_node == NULL) {
ASSERT3U(tree->bt_num_elems, ==, 1);
ASSERT3S(tree->bt_height, ==, -1);
- ASSERT3P(tree->bt_root, ==, NULL);
+ ASSERT0P(tree->bt_root);
ASSERT0(where->bti_offset);
tree->bt_num_nodes++;
@@ -1947,7 +1947,7 @@ void
zfs_btree_destroy(zfs_btree_t *tree)
{
ASSERT0(tree->bt_num_elems);
- ASSERT3P(tree->bt_root, ==, NULL);
+ ASSERT0P(tree->bt_root);
}
/* Verify that every child of this node has the correct parent pointer. */
@@ -1969,10 +1969,10 @@ static void
zfs_btree_verify_pointers(zfs_btree_t *tree)
{
if (tree->bt_height == -1) {
- VERIFY3P(tree->bt_root, ==, NULL);
+ VERIFY0P(tree->bt_root);
return;
}
- VERIFY3P(tree->bt_root->bth_parent, ==, NULL);
+ VERIFY0P(tree->bt_root->bth_parent);
zfs_btree_verify_pointers_helper(tree, tree->bt_root);
}
diff --git a/sys/contrib/openzfs/module/zfs/dataset_kstats.c b/sys/contrib/openzfs/module/zfs/dataset_kstats.c
index d3baabd6169f..e5abcd2044cf 100644
--- a/sys/contrib/openzfs/module/zfs/dataset_kstats.c
+++ b/sys/contrib/openzfs/module/zfs/dataset_kstats.c
@@ -44,6 +44,7 @@ static dataset_kstat_values_t empty_dataset_kstats = {
{ "zil_commit_error_count", KSTAT_DATA_UINT64 },
{ "zil_commit_stall_count", KSTAT_DATA_UINT64 },
{ "zil_commit_suspend_count", KSTAT_DATA_UINT64 },
+ { "zil_commit_crash_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index 432c99cec960..fccc4c5b5b94 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -523,7 +523,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
return;
/* Only data blocks support the attachment of user data. */
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
/* Clients must resolve a dbuf before attaching user data. */
ASSERT(db->db.db_data != NULL);
@@ -1128,8 +1128,8 @@ dbuf_verify(dmu_buf_impl_t *db)
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn == NULL) {
- ASSERT(db->db_parent == NULL);
- ASSERT(db->db_blkptr == NULL);
+ ASSERT0P(db->db_parent);
+ ASSERT0P(db->db_blkptr);
} else {
ASSERT3U(db->db.db_object, ==, dn->dn_object);
ASSERT3P(db->db_objset, ==, dn->dn_objset);
@@ -1180,7 +1180,7 @@ dbuf_verify(dmu_buf_impl_t *db)
/* db is pointed to by the dnode */
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
- ASSERT(db->db_parent == NULL);
+ ASSERT0P(db->db_parent);
else
ASSERT(db->db_parent != NULL);
if (db->db_blkid != DMU_SPILL_BLKID)
@@ -1219,7 +1219,7 @@ dbuf_verify(dmu_buf_impl_t *db)
int i;
for (i = 0; i < db->db.db_size >> 3; i++) {
- ASSERT(buf[i] == 0);
+ ASSERT0(buf[i]);
}
} else {
blkptr_t *bps = db->db.db_data;
@@ -1259,7 +1259,7 @@ dbuf_clear_data(dmu_buf_impl_t *db)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
dbuf_evict_user(db);
- ASSERT3P(db->db_buf, ==, NULL);
+ ASSERT0P(db->db_buf);
db->db.db_data = NULL;
if (db->db_state != DB_NOFILL) {
db->db_state = DB_UNCACHED;
@@ -1384,13 +1384,13 @@ dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
* All reads are synchronous, so we must have a hold on the dbuf
*/
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
- ASSERT(db->db_buf == NULL);
- ASSERT(db->db.db_data == NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
if (buf == NULL) {
/* i/o error */
ASSERT(zio == NULL || zio->io_error != 0);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
- ASSERT3P(db->db_buf, ==, NULL);
+ ASSERT0P(db->db_buf);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "i/o error");
} else if (db->db_level == 0 && db->db_freed_in_flight) {
@@ -1584,7 +1584,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, dmu_flags_t flags,
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
- ASSERT(db->db_buf == NULL);
+ ASSERT0P(db->db_buf);
ASSERT(db->db_parent == NULL ||
RW_LOCK_HELD(&db->db_parent->db_rwlock));
@@ -1682,7 +1682,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db.db_data != NULL);
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
if (dr == NULL ||
@@ -1901,8 +1901,8 @@ dbuf_noread(dmu_buf_impl_t *db, dmu_flags_t flags)
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED) {
- ASSERT(db->db_buf == NULL);
- ASSERT(db->db.db_data == NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "assigning filled buffer");
@@ -1929,7 +1929,7 @@ dbuf_unoverride(dbuf_dirty_record_t *dr)
* comes from dbuf_dirty() callers who must also hold a range lock.
*/
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
if (db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
@@ -1994,7 +1994,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
mutex_enter(&dn->dn_dbufs_mtx);
db = avl_find(&dn->dn_dbufs, db_search, &where);
- ASSERT3P(db, ==, NULL);
+ ASSERT0P(db);
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
@@ -2017,7 +2017,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
if (db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL ||
db->db_state == DB_EVICTING) {
- ASSERT(db->db.db_data == NULL);
+ ASSERT0P(db->db.db_data);
mutex_exit(&db->db_mtx);
continue;
}
@@ -2270,14 +2270,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
- /*
- * We make this assert for private objects as well, but after we
- * check if we're already dirty. They are allowed to re-dirty
- * in syncing context.
- */
- ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
- dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
- (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
@@ -2289,12 +2281,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
- mutex_enter(&dn->dn_mtx);
- dnode_set_dirtyctx(dn, tx, db);
- if (tx->tx_txg > dn->dn_dirty_txg)
- dn->dn_dirty_txg = tx->tx_txg;
- mutex_exit(&dn->dn_mtx);
-
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
@@ -2313,13 +2299,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
return (dr_next);
}
- /*
- * Only valid if not already dirty.
- */
- ASSERT(dn->dn_object == 0 ||
- dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
- (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
-
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*
@@ -2557,12 +2536,13 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
/*
* Due to our use of dn_nlevels below, this can only be called
- * in open context, unless we are operating on the MOS.
- * From syncing context, dn_nlevels may be different from the
- * dn_nlevels used when dbuf was dirtied.
+ * in open context, unless we are operating on the MOS or it's
+ * a special object. From syncing context, dn_nlevels may be
+ * different from the dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
+ DMU_OBJECT_IS_SPECIAL(db->db.db_object) ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
@@ -2896,8 +2876,8 @@ dmu_buf_will_clone_or_dio(dmu_buf_t *db_fake, dmu_tx_t *tx)
dbuf_clear_data(db);
}
- ASSERT3P(db->db_buf, ==, NULL);
- ASSERT3P(db->db.db_data, ==, NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db,
@@ -2932,7 +2912,7 @@ dmu_buf_will_fill_flags(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail,
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(tx->tx_txg != 0);
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
@@ -3144,7 +3124,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx,
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
ASSERT(buf != NULL);
ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
@@ -3209,7 +3189,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx,
VERIFY(!dbuf_undirty(db, tx));
db->db_state = DB_UNCACHED;
}
- ASSERT(db->db_buf == NULL);
+ ASSERT0P(db->db_buf);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "filling assigned arcbuf");
@@ -3269,7 +3249,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
}
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
- ASSERT(db->db_data_pending == NULL);
+ ASSERT0P(db->db_data_pending);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_state = DB_EVICTING;
@@ -3321,11 +3301,11 @@ dbuf_destroy(dmu_buf_impl_t *db)
db->db_parent = NULL;
- ASSERT(db->db_buf == NULL);
- ASSERT(db->db.db_data == NULL);
- ASSERT(db->db_hash_next == NULL);
- ASSERT(db->db_blkptr == NULL);
- ASSERT(db->db_data_pending == NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
+ ASSERT0P(db->db_hash_next);
+ ASSERT0P(db->db_blkptr);
+ ASSERT0P(db->db_data_pending);
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT(!multilist_link_active(&db->db_cache_link));
@@ -3960,7 +3940,7 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
if (fail_uncached)
return (SET_ERROR(ENOENT));
- ASSERT3P(parent, ==, NULL);
+ ASSERT0P(parent);
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
if (fail_sparse) {
if (err == 0 && bp && BP_IS_HOLE(bp))
@@ -4064,7 +4044,7 @@ dbuf_create_bonus(dnode_t *dn)
{
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
- ASSERT(dn->dn_bonus == NULL);
+ ASSERT0P(dn->dn_bonus);
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
dn->dn_bonus->db_pending_evict = FALSE;
@@ -4416,7 +4396,7 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
* inappropriate to hook it in (i.e., nlevels mismatch).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
- ASSERT(db->db_parent == NULL);
+ ASSERT0P(db->db_parent);
db->db_parent = dn->dn_dbuf;
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
DBUF_VERIFY(db);
@@ -4477,7 +4457,7 @@ dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
- ASSERT3U(db->db_level, ==, 0);
+ ASSERT0(db->db_level);
if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb;
@@ -4588,7 +4568,7 @@ dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
/* ensure that everything is zero after our data */
for (; datap_end < datap_max; datap_end++)
- ASSERT(*datap_end == 0);
+ ASSERT0(*datap_end);
#endif
}
@@ -4596,7 +4576,7 @@ static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
{
/* This must be a lightweight dirty record. */
- ASSERT3P(dr->dr_dbuf, ==, NULL);
+ ASSERT0P(dr->dr_dbuf);
dnode_t *dn = dr->dr_dnode;
if (dn->dn_phys->dn_nlevels == 1) {
@@ -4739,7 +4719,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
*/
if (db->db_state == DB_UNCACHED) {
/* This buffer has been freed since it was dirtied */
- ASSERT3P(db->db.db_data, ==, NULL);
+ ASSERT0P(db->db.db_data);
} else if (db->db_state == DB_FILL) {
/* This buffer was freed and is now being re-filled */
ASSERT(db->db.db_data != dr->dt.dl.dr_data);
@@ -4756,9 +4736,9 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
*/
dbuf_dirty_record_t *dr_head =
list_head(&db->db_dirty_records);
- ASSERT3P(db->db_buf, ==, NULL);
- ASSERT3P(db->db.db_data, ==, NULL);
- ASSERT3P(dr_head->dt.dl.dr_data, ==, NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
+ ASSERT0P(dr_head->dt.dl.dr_data);
ASSERT3U(dr_head->dt.dl.dr_override_state, ==, DR_OVERRIDDEN);
} else {
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
diff --git a/sys/contrib/openzfs/module/zfs/ddt.c b/sys/contrib/openzfs/module/zfs/ddt.c
index e0b9fc3951ff..0dc9adc7fd4f 100644
--- a/sys/contrib/openzfs/module/zfs/ddt.c
+++ b/sys/contrib/openzfs/module/zfs/ddt.c
@@ -397,7 +397,7 @@ ddt_object_create(ddt_t *ddt, ddt_type_t type, ddt_class_t class,
ddt_object_name(ddt, type, class, name);
- ASSERT3U(*objectp, ==, 0);
+ ASSERT0(*objectp);
VERIFY0(ddt_ops[type]->ddt_op_create(os, objectp, tx, prehash));
ASSERT3U(*objectp, !=, 0);
@@ -1011,7 +1011,7 @@ ddt_free(const ddt_t *ddt, ddt_entry_t *dde)
{
if (dde->dde_io != NULL) {
for (int p = 0; p < DDT_NPHYS(ddt); p++)
- ASSERT3P(dde->dde_io->dde_lead_zio[p], ==, NULL);
+ ASSERT0P(dde->dde_io->dde_lead_zio[p]);
if (dde->dde_io->dde_repair_abd != NULL)
abd_free(dde->dde_io->dde_repair_abd);
@@ -1421,7 +1421,7 @@ ddt_key_compare(const void *x1, const void *x2)
static void
ddt_create_dir(ddt_t *ddt, dmu_tx_t *tx)
{
- ASSERT3U(ddt->ddt_dir_object, ==, 0);
+ ASSERT0(ddt->ddt_dir_object);
ASSERT3U(ddt->ddt_version, ==, DDT_VERSION_FDT);
char name[DDT_NAMELEN];
@@ -1701,9 +1701,11 @@ ddt_load(spa_t *spa)
}
}
- error = ddt_log_load(ddt);
- if (error != 0 && error != ENOENT)
- return (error);
+ if (ddt->ddt_flags & DDT_FLAG_LOG) {
+ error = ddt_log_load(ddt);
+ if (error != 0 && error != ENOENT)
+ return (error);
+ }
DDT_KSTAT_SET(ddt, dds_log_active_entries,
avl_numnodes(&ddt->ddt_log_active->ddl_tree));
@@ -2395,7 +2397,7 @@ ddt_sync(spa_t *spa, uint64_t txg)
* scan's root zio here so that we can wait for any scan IOs in
* addition to the regular ddt IOs.
*/
- ASSERT3P(scn->scn_zio_root, ==, NULL);
+ ASSERT0P(scn->scn_zio_root);
scn->scn_zio_root = rio;
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
diff --git a/sys/contrib/openzfs/module/zfs/ddt_log.c b/sys/contrib/openzfs/module/zfs/ddt_log.c
index dbd381aa9609..c7a2426f3a77 100644
--- a/sys/contrib/openzfs/module/zfs/ddt_log.c
+++ b/sys/contrib/openzfs/module/zfs/ddt_log.c
@@ -116,7 +116,7 @@ static void
ddt_log_create_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx)
{
ASSERT3U(ddt->ddt_dir_object, >, 0);
- ASSERT3U(ddl->ddl_object, ==, 0);
+ ASSERT0(ddl->ddl_object);
char name[DDT_NAMELEN];
ddt_log_name(ddt, name, n);
@@ -176,11 +176,13 @@ ddt_log_update_stats(ddt_t *ddt)
* that's reasonable to expect anyway.
*/
dmu_object_info_t doi;
- uint64_t nblocks;
- dmu_object_info(ddt->ddt_os, ddt->ddt_log_active->ddl_object, &doi);
- nblocks = doi.doi_physical_blocks_512;
- dmu_object_info(ddt->ddt_os, ddt->ddt_log_flushing->ddl_object, &doi);
- nblocks += doi.doi_physical_blocks_512;
+ uint64_t nblocks = 0;
+ if (dmu_object_info(ddt->ddt_os, ddt->ddt_log_active->ddl_object,
+ &doi) == 0)
+ nblocks += doi.doi_physical_blocks_512;
+ if (dmu_object_info(ddt->ddt_os, ddt->ddt_log_flushing->ddl_object,
+ &doi) == 0)
+ nblocks += doi.doi_physical_blocks_512;
ddt_object_t *ddo = &ddt->ddt_log_stats;
ddo->ddo_count =
@@ -194,7 +196,7 @@ void
ddt_log_begin(ddt_t *ddt, size_t nentries, dmu_tx_t *tx, ddt_log_update_t *dlu)
{
ASSERT3U(nentries, >, 0);
- ASSERT3P(dlu->dlu_dbp, ==, NULL);
+ ASSERT0P(dlu->dlu_dbp);
if (ddt->ddt_log_active->ddl_object == 0)
ddt_log_create(ddt, tx);
@@ -243,6 +245,13 @@ ddt_log_alloc_entry(ddt_t *ddt)
}
static void
+ddt_log_free_entry(ddt_t *ddt, ddt_log_entry_t *ddle)
+{
+ kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
+ ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
+}
+
+static void
ddt_log_update_entry(ddt_t *ddt, ddt_log_t *ddl, ddt_lightweight_entry_t *ddlwe)
{
/* Create the log tree entry from a live or stored entry */
@@ -347,8 +356,7 @@ ddt_log_take_first(ddt_t *ddt, ddt_log_t *ddl, ddt_lightweight_entry_t *ddlwe)
ddt_histogram_sub_entry(ddt, &ddt->ddt_log_histogram, ddlwe);
avl_remove(&ddl->ddl_tree, ddle);
- kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
- ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
+ ddt_log_free_entry(ddt, ddle);
return (B_TRUE);
}
@@ -365,8 +373,7 @@ ddt_log_remove_key(ddt_t *ddt, ddt_log_t *ddl, const ddt_key_t *ddk)
ddt_histogram_sub_entry(ddt, &ddt->ddt_log_histogram, &ddlwe);
avl_remove(&ddl->ddl_tree, ddle);
- kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
- ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
+ ddt_log_free_entry(ddt, ddle);
return (B_TRUE);
}
@@ -527,8 +534,7 @@ ddt_log_empty(ddt_t *ddt, ddt_log_t *ddl)
IMPLY(ddt->ddt_version == UINT64_MAX, avl_is_empty(&ddl->ddl_tree));
while ((ddle =
avl_destroy_nodes(&ddl->ddl_tree, &cookie)) != NULL) {
- kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
- ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
+ ddt_log_free_entry(ddt, ddle);
}
ASSERT(avl_is_empty(&ddl->ddl_tree));
}
@@ -727,7 +733,7 @@ ddt_log_load(ddt_t *ddt)
ddle = fe;
fe = AVL_NEXT(fl, fe);
avl_remove(fl, ddle);
-
+ ddt_log_free_entry(ddt, ddle);
ddle = ae;
ae = AVL_NEXT(al, ae);
}
@@ -748,8 +754,8 @@ ddt_log_load(ddt_t *ddt)
void
ddt_log_alloc(ddt_t *ddt)
{
- ASSERT3P(ddt->ddt_log_active, ==, NULL);
- ASSERT3P(ddt->ddt_log_flushing, ==, NULL);
+ ASSERT0P(ddt->ddt_log_active);
+ ASSERT0P(ddt->ddt_log_flushing);
avl_create(&ddt->ddt_log[0].ddl_tree, ddt_key_compare,
sizeof (ddt_log_entry_t), offsetof(ddt_log_entry_t, ddle_node));
diff --git a/sys/contrib/openzfs/module/zfs/dmu.c b/sys/contrib/openzfs/module/zfs/dmu.c
index 296e58ef9cd8..a7a5c89bdafb 100644
--- a/sys/contrib/openzfs/module/zfs/dmu.c
+++ b/sys/contrib/openzfs/module/zfs/dmu.c
@@ -759,6 +759,8 @@ dmu_prefetch_by_dnode(dnode_t *dn, int64_t level, uint64_t offset,
*/
uint8_t ibps = ibs - SPA_BLKPTRSHIFT;
limit = P2ROUNDUP(dmu_prefetch_max, 1 << ibs) >> ibs;
+ if (limit == 0)
+ end2 = start2;
do {
level2++;
start2 = P2ROUNDUP(start2, 1 << ibps) >> ibps;
@@ -1343,7 +1345,7 @@ dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
if (size == 0)
return;
- VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
+ VERIFY0(dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
for (i = 0; i < numbufs; i++) {
@@ -1689,8 +1691,8 @@ dmu_object_cached_size(objset_t *os, uint64_t object,
dmu_object_info_from_dnode(dn, &doi);
- for (uint64_t off = 0; off < doi.doi_max_offset;
- off += dmu_prefetch_max) {
+ for (uint64_t off = 0; off < doi.doi_max_offset &&
+ dmu_prefetch_max > 0; off += dmu_prefetch_max) {
/* dbuf_read doesn't prefetch L1 blocks. */
dmu_prefetch_by_dnode(dn, 1, off,
dmu_prefetch_max, ZIO_PRIORITY_SYNC_READ);
@@ -1872,7 +1874,7 @@ dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
*/
BP_SET_LSIZE(bp, db->db_size);
} else if (!BP_IS_EMBEDDED(bp)) {
- ASSERT(BP_GET_LEVEL(bp) == 0);
+ ASSERT0(BP_GET_LEVEL(bp));
BP_SET_FILL(bp, 1);
}
}
@@ -2405,7 +2407,7 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
}
}
} else if (wp & WP_NOFILL) {
- ASSERT(level == 0);
+ ASSERT0(level);
/*
* If we're writing preallocated blocks, we aren't actually
@@ -2865,7 +2867,7 @@ byteswap_uint64_array(void *vbuf, size_t size)
size_t count = size >> 3;
int i;
- ASSERT((size & 7) == 0);
+ ASSERT0((size & 7));
for (i = 0; i < count; i++)
buf[i] = BSWAP_64(buf[i]);
@@ -2878,7 +2880,7 @@ byteswap_uint32_array(void *vbuf, size_t size)
size_t count = size >> 2;
int i;
- ASSERT((size & 3) == 0);
+ ASSERT0((size & 3));
for (i = 0; i < count; i++)
buf[i] = BSWAP_32(buf[i]);
@@ -2891,7 +2893,7 @@ byteswap_uint16_array(void *vbuf, size_t size)
size_t count = size >> 1;
int i;
- ASSERT((size & 1) == 0);
+ ASSERT0((size & 1));
for (i = 0; i < count; i++)
buf[i] = BSWAP_16(buf[i]);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_direct.c b/sys/contrib/openzfs/module/zfs/dmu_direct.c
index 930ff101eca3..d44c686088fc 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_direct.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_direct.c
@@ -95,9 +95,9 @@ dmu_write_direct_done(zio_t *zio)
abd_free(zio->io_abd);
mutex_enter(&db->db_mtx);
- ASSERT3P(db->db_buf, ==, NULL);
- ASSERT3P(dr->dt.dl.dr_data, ==, NULL);
- ASSERT3P(db->db.db_data, ==, NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(dr->dt.dl.dr_data);
+ ASSERT0P(db->db.db_data);
db->db_state = DB_UNCACHED;
mutex_exit(&db->db_mtx);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_object.c b/sys/contrib/openzfs/module/zfs/dmu_object.c
index b4ff7d224cc9..207cc6d0e713 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_object.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_object.c
@@ -90,7 +90,7 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
if (allocated_dnode != NULL) {
ASSERT3P(tag, !=, NULL);
} else {
- ASSERT3P(tag, ==, NULL);
+ ASSERT0P(tag);
tag = FTAG;
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu_objset.c b/sys/contrib/openzfs/module/zfs/dmu_objset.c
index c135f620800f..8e6b569c2100 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_objset.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_objset.c
@@ -724,7 +724,7 @@ dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
if (err == 0) {
mutex_enter(&ds->ds_lock);
- ASSERT(ds->ds_objset == NULL);
+ ASSERT0P(ds->ds_objset);
ds->ds_objset = os;
mutex_exit(&ds->ds_lock);
}
@@ -2037,6 +2037,8 @@ userquota_updates_task(void *arg)
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
+ ASSERT3U(dn->dn_dirtycnt, >, 0);
+ dn->dn_dirtycnt--;
mutex_exit(&dn->dn_mtx);
multilist_sublist_remove(list, dn);
@@ -2070,6 +2072,10 @@ dnode_rele_task(void *arg)
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
+ mutex_enter(&dn->dn_mtx);
+ ASSERT3U(dn->dn_dirtycnt, >, 0);
+ dn->dn_dirtycnt--;
+ mutex_exit(&dn->dn_mtx);
multilist_sublist_remove(list, dn);
dnode_rele(dn, &os->os_synced_dnodes);
}
@@ -2226,7 +2232,7 @@ dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
rf |= DB_RF_HAVESTRUCT;
error = dmu_spill_hold_by_dnode(dn, rf,
FTAG, (dmu_buf_t **)&db);
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_enter(&db->db_mtx);
data = (before) ? db->db.db_data :
dmu_objset_userquota_find_data(db, tx);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_recv.c b/sys/contrib/openzfs/module/zfs/dmu_recv.c
index 73227b58c140..45c7af2bdcd2 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_recv.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_recv.c
@@ -866,7 +866,7 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
*/
if (dcp == NULL && drrb->drr_fromguid == 0 &&
drba->drba_origin == NULL) {
- ASSERT3P(dcp, ==, NULL);
+ ASSERT0P(dcp);
dcp = &dummy_dcp;
if (featureflags & DMU_BACKUP_FEATURE_RAW)
@@ -881,7 +881,7 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
if (drba->drba_cookie->drc_fromsnapobj != 0) {
VERIFY0(dsl_dataset_hold_obj(dp,
drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
- ASSERT3P(dcp, ==, NULL);
+ ASSERT0P(dcp);
}
if (drc->drc_heal) {
/* When healing we want to use the provided snapshot */
@@ -905,7 +905,7 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
if (drba->drba_origin != NULL) {
VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
FTAG, &origin));
- ASSERT3P(dcp, ==, NULL);
+ ASSERT0P(dcp);
}
/* Create new dataset. */
@@ -2792,7 +2792,7 @@ receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
drc->drc_rrd->bytes_read = drc->drc_bytes_read;
}
} else {
- ASSERT3P(buf, ==, NULL);
+ ASSERT0P(buf);
}
drc->drc_prev_cksum = drc->drc_cksum;
@@ -3450,7 +3450,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
break;
}
- ASSERT3P(drc->drc_rrd, ==, NULL);
+ ASSERT0P(drc->drc_rrd);
drc->drc_rrd = drc->drc_next_rrd;
drc->drc_next_rrd = NULL;
/* Allocates and loads header into drc->drc_next_rrd */
@@ -3468,7 +3468,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
drc->drc_rrd = NULL;
}
- ASSERT3P(drc->drc_rrd, ==, NULL);
+ ASSERT0P(drc->drc_rrd);
drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
drc->drc_rrd->eos_marker = B_TRUE;
bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_redact.c b/sys/contrib/openzfs/module/zfs/dmu_redact.c
index 9226ac9e4b80..5a22ed71a5fe 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_redact.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_redact.c
@@ -1067,7 +1067,7 @@ dmu_redact_snap(const char *snapname, nvlist_t *redactnvl,
}
if (err != 0)
goto out;
- VERIFY3P(nvlist_next_nvpair(redactnvl, pair), ==, NULL);
+ VERIFY0P(nvlist_next_nvpair(redactnvl, pair));
boolean_t resuming = B_FALSE;
zfs_bookmark_phys_t bookmark;
diff --git a/sys/contrib/openzfs/module/zfs/dmu_send.c b/sys/contrib/openzfs/module/zfs/dmu_send.c
index deeba29e159a..8ecb99d5f57c 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_send.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_send.c
@@ -962,7 +962,7 @@ do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
char *data = NULL;
if (srdp->abd != NULL) {
data = abd_to_buf(srdp->abd);
- ASSERT3P(srdp->abuf, ==, NULL);
+ ASSERT0P(srdp->abuf);
} else if (srdp->abuf != NULL) {
data = srdp->abuf->b_data;
}
@@ -2514,7 +2514,7 @@ dmu_send_impl(struct dmu_send_params *dspp)
* list in the stream.
*/
if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
- ASSERT3P(from_rl, ==, NULL);
+ ASSERT0P(from_rl);
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
if (dspp->numfromredactsnaps > 0) {
@@ -2891,7 +2891,7 @@ dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
&fromds);
if (err != 0) {
- ASSERT3P(fromds, ==, NULL);
+ ASSERT0P(fromds);
} else {
/*
* We need to make a deep copy of the redact
diff --git a/sys/contrib/openzfs/module/zfs/dmu_tx.c b/sys/contrib/openzfs/module/zfs/dmu_tx.c
index d85d8b89423e..40c0b3402a05 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_tx.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_tx.c
@@ -126,7 +126,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
* problem, but there's no way for it to happen (for
* now, at least).
*/
- ASSERT(dn->dn_assigned_txg == 0);
+ ASSERT0(dn->dn_assigned_txg);
dn->dn_assigned_txg = tx->tx_txg;
(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
mutex_exit(&dn->dn_mtx);
@@ -443,7 +443,7 @@ dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
dnode_t *dn = txh->txh_dnode;
int err;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
return;
@@ -607,7 +607,7 @@ dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
dnode_t *dn = txh->txh_dnode;
int err;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
dmu_tx_count_dnode(txh);
@@ -681,7 +681,7 @@ dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
{
dmu_tx_hold_t *txh;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_BONUS, 0, 0);
@@ -706,7 +706,7 @@ dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
{
dmu_tx_hold_t *txh;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
DMU_NEW_OBJECT, THT_SPACE, space, 0);
@@ -1232,7 +1232,7 @@ dmu_tx_assign(dmu_tx_t *tx, dmu_tx_flag_t flags)
{
int err;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
ASSERT0(flags & ~(DMU_TX_WAIT | DMU_TX_NOTHROTTLE | DMU_TX_SUSPEND));
IMPLY(flags & DMU_TX_SUSPEND, flags & DMU_TX_WAIT);
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
@@ -1328,7 +1328,7 @@ dmu_tx_wait(dmu_tx_t *tx)
dsl_pool_t *dp = tx->tx_pool;
hrtime_t before;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
ASSERT(!dsl_pool_config_held(tx->tx_pool));
/*
@@ -1644,12 +1644,12 @@ dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
dmu_tx_hold_spill(tx, object);
} else {
DB_DNODE_ENTER(db);
if (DB_DNODE(db)->dn_have_spill) {
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
dmu_tx_hold_spill(tx, object);
}
DB_DNODE_EXIT(db);
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index 451e1533efa0..e88d394b5229 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -173,9 +173,7 @@ dnode_cons(void *arg, void *unused, int kmflag)
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
- dn->dn_dirty_txg = 0;
- dn->dn_dirtyctx = 0;
- dn->dn_dirtyctx_firstset = NULL;
+ dn->dn_dirtycnt = 0;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_zio = NULL;
@@ -214,7 +212,7 @@ dnode_dest(void *arg, void *unused)
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
- ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
+ ASSERT0P(dn->dn_free_ranges[i]);
list_destroy(&dn->dn_dirty_records[i]);
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
@@ -229,12 +227,10 @@ dnode_dest(void *arg, void *unused)
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
- ASSERT0(dn->dn_dirty_txg);
- ASSERT0(dn->dn_dirtyctx);
- ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
- ASSERT3P(dn->dn_bonus, ==, NULL);
+ ASSERT0(dn->dn_dirtycnt);
+ ASSERT0P(dn->dn_bonus);
ASSERT(!dn->dn_have_spill);
- ASSERT3P(dn->dn_zio, ==, NULL);
+ ASSERT0P(dn->dn_zio);
ASSERT0(dn->dn_oldused);
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
@@ -318,7 +314,7 @@ dnode_kstats_update(kstat_t *ksp, int rw)
void
dnode_init(void)
{
- ASSERT(dnode_cache == NULL);
+ ASSERT0P(dnode_cache);
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
0, dnode_cons, dnode_dest, NULL, NULL, NULL, KMC_RECLAIMABLE);
kmem_cache_set_move(dnode_cache, dnode_move);
@@ -509,7 +505,7 @@ dnode_buf_byteswap(void *vbuf, size_t size)
int i = 0;
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
- ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
+ ASSERT0((size & (sizeof (dnode_phys_t)-1)));
while (i < size) {
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
@@ -673,7 +669,7 @@ dnode_destroy(dnode_t *dn)
objset_t *os = dn->dn_objset;
boolean_t complete_os_eviction = B_FALSE;
- ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
+ ASSERT0((dn->dn_id_flags & DN_ID_NEW_EXIST));
mutex_enter(&os->os_lock);
POINTER_INVALIDATE(&dn->dn_objset);
@@ -692,10 +688,8 @@ dnode_destroy(dnode_t *dn)
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
- dn->dn_dirty_txg = 0;
+ dn->dn_dirtycnt = 0;
- dn->dn_dirtyctx = 0;
- dn->dn_dirtyctx_firstset = NULL;
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
@@ -780,7 +774,7 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
ASSERT0(dn->dn_next_maxblkid[i]);
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
- ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
+ ASSERT0P(dn->dn_free_ranges[i]);
}
dn->dn_type = ot;
@@ -800,11 +794,9 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
- dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
- dn->dn_dirtyctx_firstset = NULL;
- dn->dn_dirty_txg = 0;
+ dn->dn_dirtycnt = 0;
dn->dn_allocated_txg = tx->tx_txg;
dn->dn_id_flags = 0;
@@ -955,10 +947,8 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
- ndn->dn_dirty_txg = odn->dn_dirty_txg;
- ndn->dn_dirtyctx = odn->dn_dirtyctx;
- ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
- ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
+ ndn->dn_dirtycnt = odn->dn_dirtycnt;
+ ASSERT0(zfs_refcount_count(&odn->dn_tx_holds));
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
@@ -1020,9 +1010,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
- odn->dn_dirty_txg = 0;
- odn->dn_dirtyctx = 0;
- odn->dn_dirtyctx_firstset = NULL;
+ odn->dn_dirtycnt = 0;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
odn->dn_oldused = 0;
@@ -1273,8 +1261,8 @@ dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
} else if (DN_SLOT_IS_PTR(dn)) {
mutex_enter(&dn->dn_mtx);
boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
- zfs_refcount_is_zero(&dn->dn_holds) &&
- !DNODE_IS_DIRTY(dn));
+ dn->dn_dirtycnt == 0 &&
+ zfs_refcount_is_zero(&dn->dn_holds));
mutex_exit(&dn->dn_mtx);
if (!can_free)
@@ -1757,17 +1745,23 @@ dnode_hold(objset_t *os, uint64_t object, const void *tag, dnode_t **dnp)
* reference on the dnode. Returns FALSE if unable to add a
* new reference.
*/
+static boolean_t
+dnode_add_ref_locked(dnode_t *dn, const void *tag)
+{
+ ASSERT(MUTEX_HELD(&dn->dn_mtx));
+ if (zfs_refcount_is_zero(&dn->dn_holds))
+ return (FALSE);
+ VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
+ return (TRUE);
+}
+
boolean_t
dnode_add_ref(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
- if (zfs_refcount_is_zero(&dn->dn_holds)) {
- mutex_exit(&dn->dn_mtx);
- return (FALSE);
- }
- VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
+ boolean_t r = dnode_add_ref_locked(dn, tag);
mutex_exit(&dn->dn_mtx);
- return (TRUE);
+ return (r);
}
void
@@ -1830,31 +1824,20 @@ dnode_try_claim(objset_t *os, uint64_t object, int slots)
}
/*
- * Checks if the dnode itself is dirty, or is carrying any uncommitted records.
- * It is important to check both conditions, as some operations (eg appending
- * to a file) can dirty both as a single logical unit, but they are not synced
- * out atomically, so checking one and not the other can result in an object
- * appearing to be clean mid-way through a commit.
+ * Test if the dnode is dirty, or carrying uncommitted records.
*
- * Do not change this lightly! If you get it wrong, dmu_offset_next() can
- * detect a hole where there is really data, leading to silent corruption.
+ * dn_dirtycnt is the number of txgs this dnode is dirty on. It's incremented
+ * in dnode_setdirty() the first time the dnode is dirtied on a txg, and
+ * decremented in either dnode_rele_task() or userquota_updates_task() when the
+ * txg is synced out.
*/
boolean_t
dnode_is_dirty(dnode_t *dn)
{
mutex_enter(&dn->dn_mtx);
-
- for (int i = 0; i < TXG_SIZE; i++) {
- if (multilist_link_active(&dn->dn_dirty_link[i]) ||
- !list_is_empty(&dn->dn_dirty_records[i])) {
- mutex_exit(&dn->dn_mtx);
- return (B_TRUE);
- }
- }
-
+ boolean_t dirty = (dn->dn_dirtycnt != 0);
mutex_exit(&dn->dn_mtx);
-
- return (B_FALSE);
+ return (dirty);
}
void
@@ -1916,7 +1899,11 @@ dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
* dnode will hang around after we finish processing its
* children.
*/
- VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
+ mutex_enter(&dn->dn_mtx);
+ VERIFY(dnode_add_ref_locked(dn, (void *)(uintptr_t)tx->tx_txg));
+ dn->dn_dirtycnt++;
+ ASSERT3U(dn->dn_dirtycnt, <=, 3);
+ mutex_exit(&dn->dn_mtx);
(void) dbuf_dirty(dn->dn_dbuf, tx);
@@ -2221,32 +2208,6 @@ dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
mutex_exit(&dn->dn_dbufs_mtx);
}
-void
-dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
-{
- /*
- * Don't set dirtyctx to SYNC if we're just modifying this as we
- * initialize the objset.
- */
- if (dn->dn_dirtyctx == DN_UNDIRTIED) {
- dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
-
- if (ds != NULL) {
- rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
- }
- if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
- if (dmu_tx_is_syncing(tx))
- dn->dn_dirtyctx = DN_DIRTY_SYNC;
- else
- dn->dn_dirtyctx = DN_DIRTY_OPEN;
- dn->dn_dirtyctx_firstset = tag;
- }
- if (ds != NULL) {
- rrw_exit(&ds->ds_bp_rwlock, tag);
- }
- }
-}
-
static void
dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
dmu_tx_t *tx)
@@ -2304,7 +2265,7 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
if ((off >> blkshift) > dn->dn_maxblkid)
return;
} else {
- ASSERT(dn->dn_maxblkid == 0);
+ ASSERT0(dn->dn_maxblkid);
if (off == 0 && len >= blksz) {
/*
* Freeing the whole block; fast-track this request.
@@ -2524,7 +2485,7 @@ dnode_diduse_space(dnode_t *dn, int64_t delta)
}
space += delta;
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
- ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
+ ASSERT0((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES));
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
} else {
@@ -2695,6 +2656,32 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
}
/*
+ * Adjust *offset to the next (or previous) block byte offset at lvl.
+ * Returns FALSE if *offset would overflow or underflow.
+ */
+static boolean_t
+dnode_next_block(dnode_t *dn, int flags, uint64_t *offset, int lvl)
+{
+ int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
+ int span = lvl * epbs + dn->dn_datablkshift;
+ uint64_t blkid, maxblkid;
+
+ if (span >= 8 * sizeof (uint64_t))
+ return (B_FALSE);
+
+ blkid = *offset >> span;
+ maxblkid = 1ULL << (8 * sizeof (*offset) - span);
+ if (!(flags & DNODE_FIND_BACKWARDS) && blkid + 1 < maxblkid)
+ *offset = (blkid + 1) << span;
+ else if ((flags & DNODE_FIND_BACKWARDS) && blkid > 0)
+ *offset = (blkid << span) - 1;
+ else
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+/*
* Find the next hole, data, or sparse region at or after *offset.
* The value 'blkfill' tells us how many items we expect to find
* in an L0 data block; this value is 1 for normal objects,
@@ -2721,7 +2708,7 @@ int
dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
int minlvl, uint64_t blkfill, uint64_t txg)
{
- uint64_t initial_offset = *offset;
+ uint64_t matched = *offset;
int lvl, maxlvl;
int error = 0;
@@ -2745,16 +2732,36 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
maxlvl = dn->dn_phys->dn_nlevels;
- for (lvl = minlvl; lvl <= maxlvl; lvl++) {
+ for (lvl = minlvl; lvl <= maxlvl; ) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
- if (error != ESRCH)
+ if (error == 0 && lvl > minlvl) {
+ --lvl;
+ matched = *offset;
+ } else if (error == ESRCH && lvl < maxlvl &&
+ dnode_next_block(dn, flags, &matched, lvl)) {
+ /*
+ * Continue search at next/prev offset in lvl+1 block.
+ *
+ * Usually we only search upwards at the start of the
+ * search as higher level blocks point at a matching
+ * minlvl block in most cases, but we backtrack if not.
+ *
+ * This can happen for txg > 0 searches if the block
+ * contains only BPs/dnodes freed at that txg. It also
+ * happens if we are still syncing out the tree, and
+ * some BP's at higher levels are not updated yet.
+ *
+ * We must adjust offset to avoid coming back to the
+ * same offset and getting stuck looping forever. This
+ * also deals with the case where offset is already at
+ * the beginning or end of the object.
+ */
+ ++lvl;
+ *offset = matched;
+ } else {
break;
- }
-
- while (error == 0 && --lvl >= minlvl) {
- error = dnode_next_offset_level(dn,
- flags, offset, lvl, blkfill, txg);
+ }
}
/*
@@ -2766,9 +2773,6 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
error = 0;
}
- if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
- initial_offset < *offset : initial_offset > *offset))
- error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);
diff --git a/sys/contrib/openzfs/module/zfs/dnode_sync.c b/sys/contrib/openzfs/module/zfs/dnode_sync.c
index 4067f221f1bf..046ceddb3609 100644
--- a/sys/contrib/openzfs/module/zfs/dnode_sync.c
+++ b/sys/contrib/openzfs/module/zfs/dnode_sync.c
@@ -209,8 +209,8 @@ free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
rw_exit(&dn->dn_struct_rwlock);
if (err == ENOENT)
continue;
- ASSERT(err == 0);
- ASSERT(child->db_level == 0);
+ ASSERT0(err);
+ ASSERT0(child->db_level);
dr = dbuf_find_dirty_eq(child, txg);
/* data_old better be zeroed */
@@ -868,7 +868,7 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
- ASSERT3P(list_head(list), ==, NULL);
+ ASSERT0P(list_head(list));
dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_bookmark.c b/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
index fdc8b7b198f0..ee574c499f9f 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
@@ -243,7 +243,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp,
/* error is retval of the following if-cascade */
if (strchr(source, '@') != NULL) {
dsl_dataset_t *source_snap_ds;
- ASSERT3S(snapshot_namecheck(source, NULL, NULL), ==, 0);
+ ASSERT0(snapshot_namecheck(source, NULL, NULL));
error = dsl_dataset_hold(dp, source, FTAG, &source_snap_ds);
if (error == 0) {
VERIFY(source_snap_ds->ds_is_snapshot);
@@ -258,7 +258,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp,
}
} else if (strchr(source, '#') != NULL) {
zfs_bookmark_phys_t source_phys;
- ASSERT3S(bookmark_namecheck(source, NULL, NULL), ==, 0);
+ ASSERT0(bookmark_namecheck(source, NULL, NULL));
/*
* Source must exists and be an earlier point in newbm_ds's
* timeline (newbm_ds's origin may be a snap of source's ds)
@@ -501,7 +501,7 @@ dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
sizeof (uint64_t) * num_redact_snaps);
local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
if (bookmark_redacted) {
- ASSERT3P(redaction_list, ==, NULL);
+ ASSERT0P(redaction_list);
local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
local_rl->rl_phys->rlp_last_object = UINT64_MAX;
dsl_redaction_list_long_rele(local_rl, tag);
diff --git a/sys/contrib/openzfs/module/zfs/dsl_crypt.c b/sys/contrib/openzfs/module/zfs/dsl_crypt.c
index 6b6bb8d45b6b..f519b937edc0 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_crypt.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_crypt.c
@@ -534,7 +534,7 @@ out:
static void
dsl_crypto_key_free(dsl_crypto_key_t *dck)
{
- ASSERT(zfs_refcount_count(&dck->dck_holds) == 0);
+ ASSERT0(zfs_refcount_count(&dck->dck_holds));
/* destroy the zio_crypt_key_t */
zio_crypt_key_destroy(&dck->dck_key);
@@ -1912,7 +1912,7 @@ dsl_dataset_create_crypt_sync(uint64_t dsobj, dsl_dir_t *dd,
/* clones always use their origin's wrapping key */
if (dsl_dir_is_clone(dd)) {
- ASSERT3P(dcp, ==, NULL);
+ ASSERT0P(dcp);
/*
* If this is an encrypted clone we just need to clone the
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dataset.c b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
index b767c9641419..420687480a76 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dataset.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
@@ -450,7 +450,7 @@ dsl_dataset_evict_sync(void *dbu)
{
dsl_dataset_t *ds = dbu;
- ASSERT(ds->ds_owner == NULL);
+ ASSERT0P(ds->ds_owner);
unique_remove(ds->ds_fsid_guid);
}
@@ -460,7 +460,7 @@ dsl_dataset_evict_async(void *dbu)
{
dsl_dataset_t *ds = dbu;
- ASSERT(ds->ds_owner == NULL);
+ ASSERT0P(ds->ds_owner);
ds->ds_dbuf = NULL;
@@ -1187,7 +1187,7 @@ dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
ASSERT(dmu_tx_is_syncing(tx));
- ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
+ ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
@@ -2112,7 +2112,7 @@ dsl_dataset_sync(dsl_dataset_t *ds, zio_t *rio, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(ds->ds_objset != NULL);
- ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
+ ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
/*
* in case we had to change ds_fsid_guid when we opened it,
@@ -4180,7 +4180,7 @@ dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_pool_t *dp = dmu_tx_pool(tx);
int64_t unused_refres_delta;
- ASSERT(clone->ds_reserved == 0);
+ ASSERT0(clone->ds_reserved);
/*
* NOTE: On DEBUG kernels there could be a race between this and
* the check function if spa_asize_inflation is adjusted...
diff --git a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
index 9ffc998ac173..41ac72bf1c16 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
@@ -1037,7 +1037,7 @@ dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed,
avl_tree_t *avl = lia->avl;
bplist_t *to_free = lia->to_free;
zthr_t *t = lia->t;
- ASSERT(tx == NULL);
+ ASSERT0P(tx);
if ((t != NULL) && (zthr_has_waiters(t) || zthr_iscancelled(t)))
return (SET_ERROR(EINTR));
@@ -1049,7 +1049,8 @@ dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed,
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(&found->le_bp));
ASSERT3U(BP_GET_CHECKSUM(bp), ==,
BP_GET_CHECKSUM(&found->le_bp));
- ASSERT3U(BP_GET_BIRTH(bp), ==, BP_GET_BIRTH(&found->le_bp));
+ ASSERT3U(BP_GET_PHYSICAL_BIRTH(bp), ==,
+ BP_GET_PHYSICAL_BIRTH(&found->le_bp));
}
if (bp_freed) {
if (found == NULL) {
diff --git a/sys/contrib/openzfs/module/zfs/dsl_deleg.c b/sys/contrib/openzfs/module/zfs/dsl_deleg.c
index c01a06e98340..fdd37b36e280 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_deleg.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_deleg.c
@@ -102,7 +102,7 @@ dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr)
nvlist_t *perms;
nvpair_t *permpair = NULL;
- VERIFY(nvpair_value_nvlist(whopair, &perms) == 0);
+ VERIFY0(nvpair_value_nvlist(whopair, &perms));
while ((permpair = nvlist_next_nvpair(perms, permpair))) {
const char *perm = nvpair_name(permpair);
@@ -189,8 +189,7 @@ dsl_deleg_set_sync(void *arg, dmu_tx_t *tx)
const char *perm = nvpair_name(permpair);
uint64_t n = 0;
- VERIFY(zap_update(mos, jumpobj,
- perm, 8, 1, &n, tx) == 0);
+ VERIFY0(zap_update(mos, jumpobj, perm, 8, 1, &n, tx));
spa_history_log_internal_dd(dd, "permission update", tx,
"%s %s", whokey, perm);
}
@@ -225,7 +224,7 @@ dsl_deleg_unset_sync(void *arg, dmu_tx_t *tx)
if (zap_lookup(mos, zapobj, whokey, 8,
1, &jumpobj) == 0) {
(void) zap_remove(mos, zapobj, whokey, tx);
- VERIFY(0 == zap_destroy(mos, jumpobj, tx));
+ VERIFY0(zap_destroy(mos, jumpobj, tx));
}
spa_history_log_internal_dd(dd, "permission who remove",
tx, "%s", whokey);
@@ -243,7 +242,7 @@ dsl_deleg_unset_sync(void *arg, dmu_tx_t *tx)
if (zap_count(mos, jumpobj, &n) == 0 && n == 0) {
(void) zap_remove(mos, zapobj,
whokey, tx);
- VERIFY(0 == zap_destroy(mos,
+ VERIFY0(zap_destroy(mos,
jumpobj, tx));
}
spa_history_log_internal_dd(dd, "permission remove", tx,
@@ -332,7 +331,7 @@ dsl_deleg_get(const char *ddname, nvlist_t **nvp)
basezc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
baseza = zap_attribute_alloc();
source = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
- VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
for (dd = startdd; dd != NULL; dd = dd->dd_parent) {
nvlist_t *sp_nvp;
@@ -706,7 +705,7 @@ copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj,
ZFS_DELEG_LOCAL, &uid);
if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) == ENOENT) {
jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
- VERIFY(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx) == 0);
+ VERIFY0(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx));
}
za = zap_attribute_alloc();
@@ -716,8 +715,7 @@ copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj,
uint64_t zero = 0;
ASSERT(za->za_integer_length == 8 && za->za_num_integers == 1);
- VERIFY(zap_update(mos, jumpobj, za->za_name,
- 8, 1, &zero, tx) == 0);
+ VERIFY0(zap_update(mos, jumpobj, za->za_name, 8, 1, &zero, tx));
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
@@ -761,10 +759,10 @@ dsl_deleg_destroy(objset_t *mos, uint64_t zapobj, dmu_tx_t *tx)
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za->za_integer_length == 8 && za->za_num_integers == 1);
- VERIFY(0 == zap_destroy(mos, za->za_first_integer, tx));
+ VERIFY0(zap_destroy(mos, za->za_first_integer, tx));
}
zap_cursor_fini(&zc);
- VERIFY(0 == zap_destroy(mos, zapobj, tx));
+ VERIFY0(zap_destroy(mos, zapobj, tx));
zap_attribute_free(za);
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_destroy.c b/sys/contrib/openzfs/module/zfs/dsl_destroy.c
index fff49c97f4d2..ea01ee586f8b 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_destroy.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_destroy.c
@@ -350,7 +350,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
dsl_dataset_deactivate_feature(ds, f, tx);
}
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
- ASSERT3P(ds->ds_prev, ==, NULL);
+ ASSERT0P(ds->ds_prev);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
after_branch_point =
@@ -465,7 +465,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
&used, &comp, &uncomp);
dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
dsl_dataset_rele(ds_nextnext, FTAG);
- ASSERT3P(ds_next->ds_prev, ==, NULL);
+ ASSERT0P(ds_next->ds_prev);
/* Collapse range in this head. */
dsl_dataset_t *hds;
@@ -525,7 +525,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
/* remove from snapshot namespace */
dsl_dataset_t *ds_head;
- ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
+ ASSERT0(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
VERIFY0(dsl_dataset_get_snapname(ds));
@@ -728,7 +728,7 @@ kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
*/
dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
} else {
- ASSERT(zilog == NULL);
+ ASSERT0P(zilog);
ASSERT3U(BP_GET_BIRTH(bp), >,
dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dir.c b/sys/contrib/openzfs/module/zfs/dsl_dir.c
index f24cd2049533..6ce1890cfea1 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dir.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dir.c
@@ -151,8 +151,8 @@ dsl_dir_evict_async(void *dbu)
for (t = 0; t < TXG_SIZE; t++) {
ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
- ASSERT(dd->dd_tempreserved[t] == 0);
- ASSERT(dd->dd_space_towrite[t] == 0);
+ ASSERT0(dd->dd_tempreserved[t]);
+ ASSERT0(dd->dd_space_towrite[t]);
}
if (dd->dd_parent)
diff --git a/sys/contrib/openzfs/module/zfs/dsl_pool.c b/sys/contrib/openzfs/module/zfs/dsl_pool.c
index 4f1f66b835f2..f47822df8b53 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_pool.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_pool.c
@@ -522,8 +522,8 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)),
/* create and open the free_bplist */
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
- VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
- DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
+ VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
dp->dp_meta_objset, obj));
}
@@ -1077,7 +1077,7 @@ upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
dsl_dataset_phys(prev)->ds_num_children++;
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
- ASSERT(ds->ds_prev == NULL);
+ ASSERT0P(ds->ds_prev);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev));
@@ -1173,7 +1173,7 @@ dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
dsl_dataset_t *ds;
ASSERT(dmu_tx_is_syncing(tx));
- ASSERT(dp->dp_origin_snap == NULL);
+ ASSERT0P(dp->dp_origin_snap);
ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
/* create the origin dir, ds, & snap-ds */
@@ -1250,7 +1250,7 @@ dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
- ASSERT(dp->dp_tmp_userrefs_obj == 0);
+ ASSERT0(dp->dp_tmp_userrefs_obj);
ASSERT(dmu_tx_is_syncing(tx));
dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
diff --git a/sys/contrib/openzfs/module/zfs/dsl_prop.c b/sys/contrib/openzfs/module/zfs/dsl_prop.c
index b76f22df61e2..51f624da5689 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_prop.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_prop.c
@@ -815,7 +815,7 @@ dsl_prop_set_sync_impl(dsl_dataset_t *ds, const char *propname,
*/
err = zap_update(mos, zapobj, recvdstr,
intsz, numints, value, tx);
- ASSERT(err == 0);
+ ASSERT0(err);
break;
case (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED):
/*
@@ -1166,7 +1166,7 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
if (nvlist_exists(nv, propname))
continue;
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
if (za->za_integer_length == 1) {
/*
* String property
@@ -1179,8 +1179,7 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
kmem_free(tmp, za->za_num_integers);
break;
}
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE,
- tmp) == 0);
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, tmp));
kmem_free(tmp, za->za_num_integers);
} else {
/*
@@ -1191,8 +1190,8 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
za->za_first_integer);
}
- VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, source) == 0);
- VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
+ VERIFY0(nvlist_add_string(propval, ZPROP_SOURCE, source));
+ VERIFY0(nvlist_add_nvlist(nv, propname, propval));
nvlist_free(propval);
}
zap_cursor_fini(&zc);
@@ -1215,7 +1214,7 @@ dsl_prop_get_all_ds(dsl_dataset_t *ds, nvlist_t **nvp,
int err = 0;
char setpoint[ZFS_MAX_DATASET_NAME_LEN];
- VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
if (ds->ds_is_snapshot)
flags |= DSL_PROP_GET_SNAPSHOT;
@@ -1333,18 +1332,18 @@ dsl_prop_nvlist_add_uint64(nvlist_t *nv, zfs_prop_t prop, uint64_t value)
uint64_t default_value;
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
- VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
+ VERIFY0(nvlist_add_uint64(propval, ZPROP_VALUE, value));
return;
}
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_uint64(propval, ZPROP_VALUE, value));
/* Indicate the default source if we can. */
if (dodefault(prop, 8, 1, &default_value) == 0 &&
value == default_value) {
- VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, "") == 0);
+ VERIFY0(nvlist_add_string(propval, ZPROP_SOURCE, ""));
}
- VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
+ VERIFY0(nvlist_add_nvlist(nv, propname, propval));
nvlist_free(propval);
}
@@ -1355,13 +1354,13 @@ dsl_prop_nvlist_add_string(nvlist_t *nv, zfs_prop_t prop, const char *value)
const char *propname = zfs_prop_to_name(prop);
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, value));
return;
}
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
- VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, value));
+ VERIFY0(nvlist_add_nvlist(nv, propname, propval));
nvlist_free(propval);
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_scan.c b/sys/contrib/openzfs/module/zfs/dsl_scan.c
index 5052992d775c..fcd50c459d07 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_scan.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_scan.c
@@ -1784,7 +1784,7 @@ dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
- VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
+ VERIFY0(scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
return (0);
}
@@ -1820,7 +1820,7 @@ dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
- VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
+ VERIFY0(scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
}
return (0);
}
@@ -5141,7 +5141,7 @@ dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
mutex_enter(&svd->vdev_scan_io_queue_lock);
mutex_enter(&tvd->vdev_scan_io_queue_lock);
- VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
+ VERIFY0P(tvd->vdev_scan_io_queue);
tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
svd->vdev_scan_io_queue = NULL;
if (tvd->vdev_scan_io_queue != NULL)
diff --git a/sys/contrib/openzfs/module/zfs/dsl_userhold.c b/sys/contrib/openzfs/module/zfs/dsl_userhold.c
index 57c70e4ce3d2..f91b7a1eb69a 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_userhold.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_userhold.c
@@ -335,7 +335,7 @@ dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist)
dduha.dduha_holds = holds;
/* chkholds can have non-unique name */
- VERIFY(0 == nvlist_alloc(&dduha.dduha_chkholds, 0, KM_SLEEP));
+ VERIFY0(nvlist_alloc(&dduha.dduha_chkholds, 0, KM_SLEEP));
dduha.dduha_errlist = errlist;
dduha.dduha_minor = cleanup_minor;
diff --git a/sys/contrib/openzfs/module/zfs/fm.c b/sys/contrib/openzfs/module/zfs/fm.c
index a092817efedd..ae788b2310d8 100644
--- a/sys/contrib/openzfs/module/zfs/fm.c
+++ b/sys/contrib/openzfs/module/zfs/fm.c
@@ -337,7 +337,7 @@ zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
}
}
- VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
+ VERIFY0(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE));
if (size > *event_size) {
*event_size = size;
error = ENOMEM;
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index 0e5f09b2724c..9f4399af56bd 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -391,7 +391,7 @@ static kstat_t *metaslab_ksp;
void
metaslab_stat_init(void)
{
- ASSERT(metaslab_alloc_trace_cache == NULL);
+ ASSERT0P(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
@@ -456,16 +456,16 @@ metaslab_class_destroy(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
- ASSERT(mc->mc_alloc == 0);
- ASSERT(mc->mc_deferred == 0);
- ASSERT(mc->mc_space == 0);
- ASSERT(mc->mc_dspace == 0);
+ ASSERT0(mc->mc_alloc);
+ ASSERT0(mc->mc_deferred);
+ ASSERT0(mc->mc_space);
+ ASSERT0(mc->mc_dspace);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
avl_destroy(&mca->mca_tree);
mutex_destroy(&mca->mca_lock);
- ASSERT(mca->mca_rotor == NULL);
+ ASSERT0P(mca->mca_rotor);
ASSERT0(mca->mca_reserved);
}
mutex_destroy(&mc->mc_lock);
@@ -1087,8 +1087,8 @@ metaslab_group_destroy(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_class->mc_spa;
- ASSERT(mg->mg_prev == NULL);
- ASSERT(mg->mg_next == NULL);
+ ASSERT0P(mg->mg_prev);
+ ASSERT0P(mg->mg_next);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
@@ -1118,8 +1118,8 @@ metaslab_group_activate(metaslab_group_t *mg)
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
- ASSERT(mg->mg_prev == NULL);
- ASSERT(mg->mg_next == NULL);
+ ASSERT0P(mg->mg_prev);
+ ASSERT0P(mg->mg_next);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
@@ -1164,8 +1164,8 @@ metaslab_group_passivate(metaslab_group_t *mg)
if (--mg->mg_activation_count != 0) {
for (int i = 0; i < spa->spa_alloc_count; i++)
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
- ASSERT(mg->mg_prev == NULL);
- ASSERT(mg->mg_next == NULL);
+ ASSERT0P(mg->mg_prev);
+ ASSERT0P(mg->mg_next);
ASSERT(mg->mg_activation_count < 0);
return;
}
@@ -1345,7 +1345,7 @@ metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
- ASSERT(msp->ms_group == NULL);
+ ASSERT0P(msp->ms_group);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
@@ -3017,7 +3017,7 @@ metaslab_fini(metaslab_t *msp)
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
- VERIFY(msp->ms_group == NULL);
+ VERIFY0P(msp->ms_group);
/*
* If this metaslab hasn't been through metaslab_sync_done() yet its
@@ -5739,7 +5739,7 @@ metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
ASSERT(!vd->vdev_removing);
ASSERT(vdev_is_concrete(vd));
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
- ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
+ ASSERT0P(vd->vdev_indirect_mapping);
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
@@ -5997,7 +5997,7 @@ metaslab_alloc_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
- ASSERT(BP_GET_NDVAS(bp) == 0);
+ ASSERT0(BP_GET_NDVAS(bp));
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
@@ -6029,7 +6029,7 @@ metaslab_alloc_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
smallest_psize = MIN(cur_psize, smallest_psize);
}
}
- ASSERT(error == 0);
+ ASSERT0(error);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
if (actual_psize)
*actual_psize = smallest_psize;
diff --git a/sys/contrib/openzfs/module/zfs/mmp.c b/sys/contrib/openzfs/module/zfs/mmp.c
index f3665d29b8b4..fd46127b6068 100644
--- a/sys/contrib/openzfs/module/zfs/mmp.c
+++ b/sys/contrib/openzfs/module/zfs/mmp.c
@@ -260,7 +260,7 @@ mmp_thread_stop(spa_t *spa)
zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu",
spa_name(spa), gethrtime());
- ASSERT(mmp->mmp_thread == NULL);
+ ASSERT0P(mmp->mmp_thread);
mmp->mmp_thread_exiting = 0;
}
@@ -446,7 +446,7 @@ mmp_write_uberblock(spa_t *spa)
uint64_t offset;
hrtime_t lock_acquire_time = gethrtime();
- spa_config_enter_mmp(spa, SCL_STATE, mmp_tag, RW_READER);
+ spa_config_enter_priority(spa, SCL_STATE, mmp_tag, RW_READER);
lock_acquire_time = gethrtime() - lock_acquire_time;
if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "
diff --git a/sys/contrib/openzfs/module/zfs/multilist.c b/sys/contrib/openzfs/module/zfs/multilist.c
index 7b85d19e19ee..46fb79269310 100644
--- a/sys/contrib/openzfs/module/zfs/multilist.c
+++ b/sys/contrib/openzfs/module/zfs/multilist.c
@@ -81,7 +81,7 @@ multilist_create_impl(multilist_t *ml, size_t size, size_t offset,
ml->ml_num_sublists = num;
ml->ml_index_func = index_func;
- ml->ml_sublists = kmem_zalloc(sizeof (multilist_sublist_t) *
+ ml->ml_sublists = vmem_zalloc(sizeof (multilist_sublist_t) *
ml->ml_num_sublists, KM_SLEEP);
ASSERT3P(ml->ml_sublists, !=, NULL);
@@ -134,7 +134,7 @@ multilist_destroy(multilist_t *ml)
}
ASSERT3P(ml->ml_sublists, !=, NULL);
- kmem_free(ml->ml_sublists,
+ vmem_free(ml->ml_sublists,
sizeof (multilist_sublist_t) * ml->ml_num_sublists);
ml->ml_num_sublists = 0;
diff --git a/sys/contrib/openzfs/module/zfs/range_tree.c b/sys/contrib/openzfs/module/zfs/range_tree.c
index fc2b17606bd2..d73195f1a21f 100644
--- a/sys/contrib/openzfs/module/zfs/range_tree.c
+++ b/sys/contrib/openzfs/module/zfs/range_tree.c
@@ -377,7 +377,7 @@ zfs_range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
return;
}
- ASSERT3P(rs, ==, NULL);
+ ASSERT0P(rs);
/*
* Determine whether or not we will have to merge with our neighbors.
@@ -585,7 +585,7 @@ zfs_range_tree_remove_impl(zfs_range_tree_t *rt, uint64_t start, uint64_t size,
* the size, since we do not support removing partial segments
* of range trees with gaps.
*/
- zfs_zfs_rs_set_fill_raw(rs, rt, zfs_rs_get_end_raw(rs, rt) -
+ zfs_rs_set_fill_raw(rs, rt, zfs_rs_get_end_raw(rs, rt) -
zfs_rs_get_start_raw(rs, rt));
zfs_range_tree_stat_incr(rt, &rs_tmp);
@@ -867,7 +867,7 @@ zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
next = zfs_btree_next(&removefrom->rt_root, &where, &where);
}
- VERIFY3P(curr, ==, NULL);
+ VERIFY0P(curr);
if (start != end) {
VERIFY3U(start, <, end);
diff --git a/sys/contrib/openzfs/module/zfs/rrwlock.c b/sys/contrib/openzfs/module/zfs/rrwlock.c
index 8ee784619839..d0df39b93560 100644
--- a/sys/contrib/openzfs/module/zfs/rrwlock.c
+++ b/sys/contrib/openzfs/module/zfs/rrwlock.c
@@ -108,7 +108,7 @@ rrn_add(rrwlock_t *rrl, const void *tag)
rn->rn_rrl = rrl;
rn->rn_next = tsd_get(rrw_tsd_key);
rn->rn_tag = tag;
- VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
+ VERIFY0(tsd_set(rrw_tsd_key, rn));
}
/*
@@ -129,7 +129,7 @@ rrn_find_and_remove(rrwlock_t *rrl, const void *tag)
if (prev)
prev->rn_next = rn->rn_next;
else
- VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
+ VERIFY0(tsd_set(rrw_tsd_key, rn->rn_next));
kmem_free(rn, sizeof (*rn));
return (B_TRUE);
}
@@ -155,7 +155,7 @@ rrw_destroy(rrwlock_t *rrl)
{
mutex_destroy(&rrl->rr_lock);
cv_destroy(&rrl->rr_cv);
- ASSERT(rrl->rr_writer == NULL);
+ ASSERT0P(rrl->rr_writer);
zfs_refcount_destroy(&rrl->rr_anon_rcount);
zfs_refcount_destroy(&rrl->rr_linked_rcount);
}
@@ -188,7 +188,7 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, const void *tag)
} else {
(void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
}
- ASSERT(rrl->rr_writer == NULL);
+ ASSERT0P(rrl->rr_writer);
mutex_exit(&rrl->rr_lock);
}
diff --git a/sys/contrib/openzfs/module/zfs/sa.c b/sys/contrib/openzfs/module/zfs/sa.c
index 5db470ce6242..7ad25d4d85ba 100644
--- a/sys/contrib/openzfs/module/zfs/sa.c
+++ b/sys/contrib/openzfs/module/zfs/sa.c
@@ -304,7 +304,7 @@ sa_get_spill(sa_handle_t *hdl)
if (hdl->sa_spill == NULL) {
if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
&hdl->sa_spill)) == 0)
- VERIFY(0 == sa_build_index(hdl, SA_SPILL));
+ VERIFY0(sa_build_index(hdl, SA_SPILL));
} else {
rc = 0;
}
@@ -432,7 +432,7 @@ sa_add_layout_entry(objset_t *os, const sa_attr_type_t *attrs, int attr_count,
(void) snprintf(attr_name, sizeof (attr_name),
"%d", (int)lot_num);
- VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
+ VERIFY0(zap_update(os, os->os_sa->sa_layout_attr_obj,
attr_name, 2, attr_count, attrs, tx));
}
@@ -505,7 +505,7 @@ sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
}
error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
return (error);
}
@@ -717,7 +717,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
hdl->sa_spill->db_size)
- VERIFY(0 == sa_resize_spill(hdl,
+ VERIFY0(sa_resize_spill(hdl,
BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
}
@@ -791,7 +791,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
hdl->sa_bonus_tab = NULL;
}
if (!sa->sa_force_spill)
- VERIFY(0 == sa_build_index(hdl, SA_BONUS));
+ VERIFY0(sa_build_index(hdl, SA_BONUS));
if (hdl->sa_spill) {
sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
if (!spilling) {
@@ -801,10 +801,10 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
dmu_buf_rele(hdl->sa_spill, NULL);
hdl->sa_spill = NULL;
hdl->sa_spill_tab = NULL;
- VERIFY(0 == dmu_rm_spill(hdl->sa_os,
+ VERIFY0(dmu_rm_spill(hdl->sa_os,
sa_handle_object(hdl), tx));
} else {
- VERIFY(0 == sa_build_index(hdl, SA_SPILL));
+ VERIFY0(sa_build_index(hdl, SA_SPILL));
}
}
@@ -1733,10 +1733,10 @@ sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
NULL, dxattr_obj, dxattr_size);
}
- VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
- VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
+ VERIFY0(dmu_set_bonustype(db, DMU_OT_SA, tx));
+ VERIFY0(sa_replace_all_by_template_locked(hdl, attrs, count, tx));
if (znode_acl.z_acl_extern_obj) {
- VERIFY(0 == dmu_object_free(zfsvfs->z_os,
+ VERIFY0(dmu_object_free(zfsvfs->z_os,
znode_acl.z_acl_extern_obj, tx));
}
@@ -1858,7 +1858,7 @@ sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
continue;
ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
tb[i].sa_byteswap);
- VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
+ VERIFY0(zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
tb[i].sa_name, 8, 1, &attr_value, tx));
tb[i].sa_registered = B_TRUE;
}
@@ -2013,7 +2013,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
* Only a variable-sized attribute can be
* replaced here, and its size must be changing.
*/
- ASSERT3U(reg_length, ==, 0);
+ ASSERT0(reg_length);
ASSERT3U(length, !=, buflen);
SA_ADD_BULK_ATTR(attr_desc, j, attr,
locator, datastart, buflen);
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 5ecb175fbd63..b3bb46da263b 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -426,10 +426,10 @@ spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
{
nvlist_t *propval;
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
- VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_uint64(propval, ZPROP_SOURCE, src));
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, strval));
+ VERIFY0(nvlist_add_nvlist(nvl, propname, propval));
nvlist_free(propval);
}
@@ -965,7 +965,7 @@ spa_prop_set(spa_t *spa, nvlist_t *nvp)
uint64_t ver = 0;
if (prop == ZPOOL_PROP_VERSION) {
- VERIFY(nvpair_value_uint64(elem, &ver) == 0);
+ VERIFY0(nvpair_value_uint64(elem, &ver));
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
@@ -1295,7 +1295,7 @@ spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
- ASSERT3U(tqs->stqs_count, ==, 0);
+ ASSERT0(tqs->stqs_count);
return;
}
@@ -1836,9 +1836,9 @@ static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
- ASSERT(spa->spa_dsl_pool == NULL);
- ASSERT(spa->spa_root_vdev == NULL);
- ASSERT(spa->spa_async_zio_root == NULL);
+ ASSERT0P(spa->spa_dsl_pool);
+ ASSERT0P(spa->spa_root_vdev);
+ ASSERT0P(spa->spa_async_zio_root);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
@@ -2021,7 +2021,7 @@ spa_unload_log_sm_flush_all(spa_t *spa)
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
- ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
+ ASSERT0(spa->spa_log_flushall_txg);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
@@ -2280,7 +2280,7 @@ spa_unload(spa_t *spa)
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
- ASSERT(spa->spa_root_vdev == NULL);
+ ASSERT0P(spa->spa_root_vdev);
/*
* Close the dsl pool.
@@ -2418,8 +2418,8 @@ spa_load_spares(spa_t *spa)
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
- VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
- VDEV_ALLOC_SPARE) == 0);
+ VERIFY0(spa_config_parse(spa, &vd, spares[i], NULL, 0,
+ VDEV_ALLOC_SPARE));
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
@@ -2546,8 +2546,8 @@ spa_load_l2cache(spa_t *spa)
/*
* Create new vdev
*/
- VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
- VDEV_ALLOC_L2CACHE) == 0);
+ VERIFY0(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
+ VDEV_ALLOC_L2CACHE));
ASSERT(vd != NULL);
newvdevs[i] = vd;
@@ -2799,7 +2799,7 @@ spa_passivate_log(spa_t *spa)
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
@@ -2822,7 +2822,7 @@ spa_activate_log(spa_t *spa)
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_activate(tvd->vdev_mg);
}
}
@@ -3259,7 +3259,7 @@ spa_livelist_delete_cb(void *arg, zthr_t *z)
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
- ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
+ ASSERT0P(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
@@ -3275,7 +3275,7 @@ static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
- ASSERT(tx == NULL);
+ ASSERT0P(tx);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
@@ -3469,7 +3469,7 @@ spa_start_livelist_condensing_thread(spa_t *spa)
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
- ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
+ ASSERT0P(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
@@ -3486,7 +3486,7 @@ spa_spawn_aux_threads(spa_t *spa)
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
- ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
+ ASSERT0P(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
@@ -4091,11 +4091,11 @@ spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
- ASSERT(spa->spa_comment == NULL);
+ ASSERT0P(spa->spa_comment);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
- ASSERT(spa->spa_compatibility == NULL);
+ ASSERT0P(spa->spa_compatibility);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
@@ -5913,7 +5913,7 @@ spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
- ASSERT3P(loadinfo, ==, NULL);
+ ASSERT0P(loadinfo);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
@@ -9091,7 +9091,7 @@ spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
@@ -9102,7 +9102,7 @@ spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
int
spa_scan_stop(spa_t *spa)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
@@ -9119,7 +9119,7 @@ int
spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
@@ -9548,7 +9548,7 @@ spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
- VERIFY(zio_wait(zio) == 0);
+ VERIFY0(zio_wait(zio));
}
/*
@@ -9587,7 +9587,7 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
size_t nvsize = 0;
dmu_buf_t *db;
- VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
+ VERIFY0(nvlist_size(nv, &nvsize, NV_ENCODE_XDR));
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
@@ -9597,15 +9597,15 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
- VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
- KM_SLEEP) == 0);
+ VERIFY0(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
+ KM_SLEEP));
memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
- VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
+ VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
@@ -10541,7 +10541,7 @@ spa_sync_tq_create(spa_t *spa, const char *name)
{
kthread_t **kthreads;
- ASSERT(spa->spa_sync_tq == NULL);
+ ASSERT0P(spa->spa_sync_tq);
ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus);
/*
diff --git a/sys/contrib/openzfs/module/zfs/spa_config.c b/sys/contrib/openzfs/module/zfs/spa_config.c
index 7d4d06659146..f615591e826b 100644
--- a/sys/contrib/openzfs/module/zfs/spa_config.c
+++ b/sys/contrib/openzfs/module/zfs/spa_config.c
@@ -48,18 +48,17 @@
/*
* Pool configuration repository.
*
- * Pool configuration is stored as a packed nvlist on the filesystem. By
- * default, all pools are stored in /etc/zfs/zpool.cache and loaded on boot
- * (when the ZFS module is loaded). Pools can also have the 'cachefile'
- * property set that allows them to be stored in an alternate location until
- * the control of external software.
+ * Pool configuration is stored as a packed nvlist on the filesystem. When
+ * pools are imported they are added to the /etc/zfs/zpool.cache file and
+ * removed from it when exported. For each cache file, we have a single nvlist
+ * which holds all the configuration information. Pools can also have the
+ * 'cachefile' property set which allows this config to be stored in an
+ * alternate location under the control of external software.
*
- * For each cache file, we have a single nvlist which holds all the
- * configuration information. When the module loads, we read this information
- * from /etc/zfs/zpool.cache and populate the SPA namespace. This namespace is
- * maintained independently in spa.c. Whenever the namespace is modified, or
- * the configuration of a pool is changed, we call spa_write_cachefile(), which
- * walks through all the active pools and writes the configuration to disk.
+ * The kernel independantly maintains an AVL tree of imported pools. See the
+ * "SPA locking" comment in spa.c. Whenever a pool configuration is modified
+ * we call spa_write_cachefile() which walks through all the active pools and
+ * writes the updated configuration to to /etc/zfs/zpool.cache file.
*/
static uint64_t spa_config_generation = 1;
@@ -69,94 +68,6 @@ static uint64_t spa_config_generation = 1;
* userland pools when doing testing.
*/
char *spa_config_path = (char *)ZPOOL_CACHE;
-#ifdef _KERNEL
-static int zfs_autoimport_disable = B_TRUE;
-#endif
-
-/*
- * Called when the module is first loaded, this routine loads the configuration
- * file into the SPA namespace. It does not actually open or load the pools; it
- * only populates the namespace.
- */
-void
-spa_config_load(void)
-{
- void *buf = NULL;
- nvlist_t *nvlist, *child;
- nvpair_t *nvpair;
- char *pathname;
- zfs_file_t *fp;
- zfs_file_attr_t zfa;
- uint64_t fsize;
- int err;
-
-#ifdef _KERNEL
- if (zfs_autoimport_disable)
- return;
-#endif
-
- /*
- * Open the configuration file.
- */
- pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
-
- (void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path);
-
- err = zfs_file_open(pathname, O_RDONLY, 0, &fp);
-
-#ifdef __FreeBSD__
- if (err)
- err = zfs_file_open(ZPOOL_CACHE_BOOT, O_RDONLY, 0, &fp);
-#endif
- kmem_free(pathname, MAXPATHLEN);
-
- if (err)
- return;
-
- if (zfs_file_getattr(fp, &zfa))
- goto out;
-
- fsize = zfa.zfa_size;
- buf = kmem_alloc(fsize, KM_SLEEP);
-
- /*
- * Read the nvlist from the file.
- */
- if (zfs_file_read(fp, buf, fsize, NULL) < 0)
- goto out;
-
- /*
- * Unpack the nvlist.
- */
- if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
- goto out;
-
- /*
- * Iterate over all elements in the nvlist, creating a new spa_t for
- * each one with the specified configuration.
- */
- mutex_enter(&spa_namespace_lock);
- nvpair = NULL;
- while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
- if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
- continue;
-
- child = fnvpair_value_nvlist(nvpair);
-
- if (spa_lookup(nvpair_name(nvpair)) != NULL)
- continue;
- (void) spa_add(nvpair_name(nvpair), child, NULL);
- }
- mutex_exit(&spa_namespace_lock);
-
- nvlist_free(nvlist);
-
-out:
- if (buf != NULL)
- kmem_free(buf, fsize);
-
- zfs_file_close(fp);
-}
static int
spa_config_remove(spa_config_dirent_t *dp)
@@ -461,6 +372,8 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, spa->spa_errata);
+ fnvlist_add_uint64(config, ZPOOL_CONFIG_MIN_ALLOC, spa->spa_min_alloc);
+ fnvlist_add_uint64(config, ZPOOL_CONFIG_MAX_ALLOC, spa->spa_max_alloc);
if (spa->spa_comment != NULL)
fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT,
spa->spa_comment);
@@ -623,7 +536,6 @@ spa_config_update(spa_t *spa, int what)
spa_config_update(spa, SPA_CONFIG_UPDATE_VDEVS);
}
-EXPORT_SYMBOL(spa_config_load);
EXPORT_SYMBOL(spa_all_configs);
EXPORT_SYMBOL(spa_config_set);
EXPORT_SYMBOL(spa_config_generate);
@@ -634,8 +546,3 @@ EXPORT_SYMBOL(spa_config_update);
ZFS_MODULE_PARAM(zfs_spa, spa_, config_path, STRING, ZMOD_RD,
"SPA config file (/etc/zfs/zpool.cache)");
#endif
-
-#ifdef _KERNEL
-ZFS_MODULE_PARAM(zfs, zfs_, autoimport_disable, INT, ZMOD_RW,
- "Disable pool import at module load");
-#endif
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 2eba8362a166..0bead6d49666 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -251,11 +251,11 @@ spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
#ifdef ZFS_DEBUG
/*
- * Everything except dprintf, set_error, spa, and indirect_remap is on
- * by default in debug builds.
+ * Everything except dprintf, set_error, indirect_remap, and raidz_reconstruct
+ * is on by default in debug builds.
*/
int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
- ZFS_DEBUG_INDIRECT_REMAP);
+ ZFS_DEBUG_INDIRECT_REMAP | ZFS_DEBUG_RAIDZ_RECONSTRUCT);
#else
int zfs_flags = 0;
#endif
@@ -471,9 +471,9 @@ spa_config_lock_destroy(spa_t *spa)
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
- ASSERT(scl->scl_writer == NULL);
- ASSERT(scl->scl_write_wanted == 0);
- ASSERT(scl->scl_count == 0);
+ ASSERT0P(scl->scl_writer);
+ ASSERT0(scl->scl_write_wanted);
+ ASSERT0(scl->scl_count);
}
}
@@ -510,7 +510,7 @@ spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
static void
spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
- int mmp_flag)
+ int priority_flag)
{
(void) tag;
int wlocks_held = 0;
@@ -526,7 +526,7 @@ spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer ||
- (!mmp_flag && scl->scl_write_wanted)) {
+ (!priority_flag && scl->scl_write_wanted)) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
@@ -551,7 +551,7 @@ spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
}
/*
- * The spa_config_enter_mmp() allows the mmp thread to cut in front of
+ * The spa_config_enter_priority() allows the mmp thread to cut in front of
* outstanding write lock requests. This is needed since the mmp updates are
* time sensitive and failure to service them promptly will result in a
* suspended pool. This pool suspension has been seen in practice when there is
@@ -560,7 +560,7 @@ spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
*/
void
-spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw)
+spa_config_enter_priority(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 1);
}
@@ -784,29 +784,29 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
- VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
- KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, KM_SLEEP));
if (config != NULL) {
nvlist_t *features;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) == 0) {
- VERIFY(nvlist_dup(features, &spa->spa_label_features,
- 0) == 0);
+ VERIFY0(nvlist_dup(features,
+ &spa->spa_label_features, 0));
}
- VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
+ VERIFY0(nvlist_dup(config, &spa->spa_config, 0));
}
if (spa->spa_label_features == NULL) {
- VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
- KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
+ KM_SLEEP));
}
spa->spa_min_ashift = INT_MAX;
spa->spa_max_ashift = 0;
spa->spa_min_alloc = INT_MAX;
+ spa->spa_max_alloc = 0;
spa->spa_gcd_alloc = INT_MAX;
/* Reset cached value */
@@ -1866,6 +1866,19 @@ spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
}
/*
+ * Return the range of minimum allocation sizes for the normal allocation
+ * class. This can be used by external consumers of the DMU to estimate
+ * potential wasted capacity when setting the recordsize for an object.
+ * This is mainly for dRAID pools which always pad to a full stripe width.
+ */
+void
+spa_get_min_alloc_range(spa_t *spa, uint64_t *min_alloc, uint64_t *max_alloc)
+{
+ *min_alloc = spa->spa_min_alloc;
+ *max_alloc = spa->spa_max_alloc;
+}
+
+/*
* Return the amount of slop space in bytes. It is typically 1/32 of the pool
* (3.2%), minus the embedded log space. On very small pools, it may be
* slightly larger than this. On very large pools, it will be capped to
@@ -2549,13 +2562,6 @@ spa_name_compare(const void *a1, const void *a2)
}
void
-spa_boot_init(void *unused)
-{
- (void) unused;
- spa_config_load();
-}
-
-void
spa_init(spa_mode_t mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -2608,7 +2614,6 @@ spa_init(spa_mode_t mode)
chksum_init();
zpool_prop_init();
zpool_feature_init();
- spa_config_load();
vdev_prop_init();
l2arc_start();
scan_init();
@@ -3094,6 +3099,7 @@ EXPORT_SYMBOL(spa_version);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
+EXPORT_SYMBOL(spa_get_min_alloc_range); /* for Lustre */
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
diff --git a/sys/contrib/openzfs/module/zfs/spa_stats.c b/sys/contrib/openzfs/module/zfs/spa_stats.c
index 6d7cabcf766d..2c87122a0aa9 100644
--- a/sys/contrib/openzfs/module/zfs/spa_stats.c
+++ b/sys/contrib/openzfs/module/zfs/spa_stats.c
@@ -718,7 +718,7 @@ spa_mmp_history_set(spa_t *spa, uint64_t mmp_node_id, int io_error,
for (smh = list_tail(&shl->procfs_list.pl_list); smh != NULL;
smh = list_prev(&shl->procfs_list.pl_list, smh)) {
if (smh->mmp_node_id == mmp_node_id) {
- ASSERT(smh->io_error == 0);
+ ASSERT0(smh->io_error);
smh->io_error = io_error;
smh->duration = duration;
error = 0;
diff --git a/sys/contrib/openzfs/module/zfs/space_map.c b/sys/contrib/openzfs/module/zfs/space_map.c
index c429e0edd168..5f24963f2291 100644
--- a/sys/contrib/openzfs/module/zfs/space_map.c
+++ b/sys/contrib/openzfs/module/zfs/space_map.c
@@ -817,7 +817,7 @@ space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
space_map_t *sm;
int error;
- ASSERT(*smp == NULL);
+ ASSERT0P(*smp);
ASSERT(os != NULL);
ASSERT(object != 0);
diff --git a/sys/contrib/openzfs/module/zfs/space_reftree.c b/sys/contrib/openzfs/module/zfs/space_reftree.c
index 9b2d5ed31dc9..889980e08c06 100644
--- a/sys/contrib/openzfs/module/zfs/space_reftree.c
+++ b/sys/contrib/openzfs/module/zfs/space_reftree.c
@@ -149,6 +149,6 @@ space_reftree_generate_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t minref)
}
}
}
- ASSERT(refcnt == 0);
+ ASSERT0(refcnt);
ASSERT(start == -1ULL);
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index 70b14fb9b2c8..c8d7280387a2 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -29,7 +29,7 @@
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
- * Copyright (c) 2021, Klara Inc.
+ * Copyright (c) 2021, 2025, Klara, Inc.
* Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
*/
@@ -554,7 +554,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
- ASSERT(cvd->vdev_parent == NULL);
+ ASSERT0P(cvd->vdev_parent);
cvd->vdev_parent = pvd;
@@ -578,7 +578,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
pvd->vdev_nonrot &= cvd->vdev_nonrot;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
- ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
+ ASSERT0P(cvd->vdev_top->vdev_parent->vdev_parent);
/*
* Walk up all ancestors to update guid sum.
@@ -1086,6 +1086,10 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
}
}
+ if (top_level && (ops == &vdev_raidz_ops || ops == &vdev_draid_ops))
+ vd->vdev_autosit =
+ vdev_prop_default_numeric(VDEV_PROP_AUTOSIT);
+
/*
* Add ourselves to the parent's list of children.
*/
@@ -1101,10 +1105,10 @@ vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
- ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
+ ASSERT0P(vd->vdev_trim_thread);
+ ASSERT0P(vd->vdev_autotrim_thread);
+ ASSERT0P(vd->vdev_rebuild_thread);
/*
* Scan queues are normally destroyed at the end of a scan. If the
@@ -1133,7 +1137,7 @@ vdev_free(vdev_t *vd)
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
- ASSERT(vd->vdev_child == NULL);
+ ASSERT0P(vd->vdev_child);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
@@ -1162,7 +1166,7 @@ vdev_free(vdev_t *vd)
*/
vdev_remove_child(vd->vdev_parent, vd);
- ASSERT(vd->vdev_parent == NULL);
+ ASSERT0P(vd->vdev_parent);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
@@ -1187,6 +1191,9 @@ vdev_free(vdev_t *vd)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
+ if (vd->vdev_prev_histo)
+ kmem_free(vd->vdev_prev_histo,
+ sizeof (uint64_t) * VDEV_L_HISTO_BUCKETS);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
@@ -1309,9 +1316,9 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
- ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
- ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
- ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
+ ASSERT0P(tvd->vdev_indirect_mapping);
+ ASSERT0P(tvd->vdev_indirect_births);
+ ASSERT0P(tvd->vdev_obsolete_sm);
ASSERT0(tvd->vdev_noalloc);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
@@ -1464,7 +1471,7 @@ vdev_remove_parent(vdev_t *cvd)
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
- ASSERT(mvd->vdev_children == 0);
+ ASSERT0(mvd->vdev_children);
vdev_free(mvd);
}
@@ -1490,12 +1497,14 @@ vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
{
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
- if (spa->spa_gcd_alloc == INT_MAX) {
+
+ if (min_alloc > spa->spa_max_alloc)
+ spa->spa_max_alloc = min_alloc;
+
+ if (spa->spa_gcd_alloc == INT_MAX)
spa->spa_gcd_alloc = min_alloc;
- } else {
- spa->spa_gcd_alloc = vdev_gcd(min_alloc,
- spa->spa_gcd_alloc);
- }
+ else
+ spa->spa_gcd_alloc = vdev_gcd(min_alloc, spa->spa_gcd_alloc);
}
void
@@ -1553,8 +1562,7 @@ vdev_metaslab_group_create(vdev_t *vd)
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
- uint64_t min_alloc = vdev_get_min_alloc(vd);
- vdev_spa_set_alloc(spa, min_alloc);
+ vdev_spa_set_alloc(spa, vdev_get_min_alloc(vd));
}
}
}
@@ -2134,14 +2142,14 @@ vdev_open(vdev_t *vd)
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
@@ -2197,7 +2205,7 @@ vdev_open(vdev_t *vd)
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
@@ -2206,7 +2214,7 @@ vdev_open(vdev_t *vd)
}
if (vd->vdev_degraded) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
@@ -3857,6 +3865,26 @@ vdev_load(vdev_t *vd)
}
}
+ if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
+ spa_t *spa = vd->vdev_spa;
+ uint64_t autosit;
+
+ error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
+ vdev_prop_to_name(VDEV_PROP_AUTOSIT), sizeof (autosit),
+ 1, &autosit);
+ if (error == 0) {
+ vd->vdev_autosit = autosit == 1;
+ } else if (error == ENOENT) {
+ vd->vdev_autosit = vdev_prop_default_numeric(
+ VDEV_PROP_AUTOSIT);
+ } else {
+ vdev_dbgmsg(vd,
+ "vdev_load: zap_lookup(top_zap=%llu) "
+ "failed [error=%d]",
+ (u_longlong_t)vd->vdev_top_zap, error);
+ }
+ }
+
/*
* Load any rebuild state from the top-level vdev zap.
*/
@@ -3945,7 +3973,7 @@ vdev_load(vdev_t *vd)
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
- ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
+ ASSERT0P(vd->vdev_checkpoint_sm);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
@@ -3993,7 +4021,7 @@ vdev_load(vdev_t *vd)
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
- ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
+ ASSERT0P(vd->vdev_obsolete_sm);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
@@ -4521,7 +4549,7 @@ top:
/*
* Prevent any future allocations.
*/
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
@@ -4616,6 +4644,8 @@ vdev_clear(spa_t *spa, vdev_t *vd)
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_dio_verify_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
+ atomic_store_64(&vd->vdev_outlier_count, 0);
+ vd->vdev_read_sit_out_expire = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
@@ -5194,7 +5224,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
- ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
+ ASSERT0((space & (SPA_MINBLOCKSIZE-1)));
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
@@ -5286,8 +5316,8 @@ vdev_config_dirty(vdev_t *vd)
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
- VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
- ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
+ VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
+ ZPOOL_CONFIG_SPARES, &aux, &naux));
}
ASSERT(c < naux);
@@ -5675,7 +5705,7 @@ vdev_expand(vdev_t *vd, uint64_t txg)
(vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
- VERIFY(vdev_metaslab_init(vd, txg) == 0);
+ VERIFY0(vdev_metaslab_init(vd, txg));
vdev_config_dirty(vd);
}
}
@@ -6107,6 +6137,56 @@ vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
}
vd->vdev_failfast = intval & 1;
break;
+ case VDEV_PROP_SIT_OUT:
+ /* Only expose this for a draid or raidz leaf */
+ if (!vd->vdev_ops->vdev_op_leaf ||
+ vd->vdev_top == NULL ||
+ (vd->vdev_top->vdev_ops != &vdev_raidz_ops &&
+ vd->vdev_top->vdev_ops != &vdev_draid_ops)) {
+ error = ENOTSUP;
+ break;
+ }
+ if (nvpair_value_uint64(elem, &intval) != 0) {
+ error = EINVAL;
+ break;
+ }
+ if (intval == 1) {
+ vdev_t *ancestor = vd;
+ while (ancestor->vdev_parent != vd->vdev_top)
+ ancestor = ancestor->vdev_parent;
+ vdev_t *pvd = vd->vdev_top;
+ uint_t sitouts = 0;
+ for (int i = 0; i < pvd->vdev_children; i++) {
+ if (pvd->vdev_child[i] == ancestor)
+ continue;
+ if (vdev_sit_out_reads(
+ pvd->vdev_child[i], 0)) {
+ sitouts++;
+ }
+ }
+ if (sitouts >= vdev_get_nparity(pvd)) {
+ error = ZFS_ERR_TOO_MANY_SITOUTS;
+ break;
+ }
+ if (error == 0)
+ vdev_raidz_sit_child(vd,
+ INT64_MAX - gethrestime_sec());
+ } else {
+ vdev_raidz_unsit_child(vd);
+ }
+ break;
+ case VDEV_PROP_AUTOSIT:
+ if (vd->vdev_ops != &vdev_raidz_ops &&
+ vd->vdev_ops != &vdev_draid_ops) {
+ error = ENOTSUP;
+ break;
+ }
+ if (nvpair_value_uint64(elem, &intval) != 0) {
+ error = EINVAL;
+ break;
+ }
+ vd->vdev_autosit = intval == 1;
+ break;
case VDEV_PROP_CHECKSUM_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
@@ -6456,6 +6536,19 @@ vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
ZPROP_SRC_NONE);
}
continue;
+ case VDEV_PROP_SIT_OUT:
+ /* Only expose this for a draid or raidz leaf */
+ if (vd->vdev_ops->vdev_op_leaf &&
+ vd->vdev_top != NULL &&
+ (vd->vdev_top->vdev_ops ==
+ &vdev_raidz_ops ||
+ vd->vdev_top->vdev_ops ==
+ &vdev_draid_ops)) {
+ vdev_prop_add_list(outnvl, propname,
+ NULL, vdev_sit_out_reads(vd, 0),
+ ZPROP_SRC_NONE);
+ }
+ continue;
case VDEV_PROP_TRIM_SUPPORT:
/* only valid for leaf vdevs */
if (vd->vdev_ops->vdev_op_leaf) {
@@ -6506,6 +6599,29 @@ vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
vdev_prop_add_list(outnvl, propname, strval,
intval, src);
break;
+ case VDEV_PROP_AUTOSIT:
+ /* Only raidz vdevs cannot have this property */
+ if (vd->vdev_ops != &vdev_raidz_ops &&
+ vd->vdev_ops != &vdev_draid_ops) {
+ src = ZPROP_SRC_NONE;
+ intval = ZPROP_BOOLEAN_NA;
+ } else {
+ err = vdev_prop_get_int(vd, prop,
+ &intval);
+ if (err && err != ENOENT)
+ break;
+
+ if (intval ==
+ vdev_prop_default_numeric(prop))
+ src = ZPROP_SRC_DEFAULT;
+ else
+ src = ZPROP_SRC_LOCAL;
+ }
+
+ vdev_prop_add_list(outnvl, propname, NULL,
+ intval, src);
+ break;
+
case VDEV_PROP_CHECKSUM_N:
case VDEV_PROP_CHECKSUM_T:
case VDEV_PROP_IO_N:
diff --git a/sys/contrib/openzfs/module/zfs/vdev_draid.c b/sys/contrib/openzfs/module/zfs/vdev_draid.c
index feec5fd3ce17..8588cfee3f7d 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_draid.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_draid.c
@@ -22,6 +22,7 @@
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
+ * Copyright (c) 2025, Klara, Inc.
*/
#include <sys/zfs_context.h>
@@ -477,7 +478,7 @@ vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
VERIFY3U(map->dm_seed, !=, 0);
VERIFY3U(map->dm_nperms, !=, 0);
- VERIFY3P(map->dm_perms, ==, NULL);
+ VERIFY0P(map->dm_perms);
#ifdef _KERNEL
/*
@@ -590,7 +591,7 @@ vdev_draid_psize_to_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
ASSERT3U(asize, !=, 0);
- ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0);
+ ASSERT0(asize % (vdc->vdc_groupwidth));
return (asize);
}
@@ -704,7 +705,7 @@ vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
- ASSERT3P(rr->rr_abd_empty, ==, NULL);
+ ASSERT0P(rr->rr_abd_empty);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
@@ -793,7 +794,7 @@ vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
- ASSERT3P(rr->rr_abd_empty, ==, NULL);
+ ASSERT0P(rr->rr_abd_empty);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
@@ -807,7 +808,7 @@ vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
/* empty data column (small read), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
- ASSERT3P(rc->rc_abd, ==, NULL);
+ ASSERT0P(rc->rc_abd);
rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
skip_off, skip_size);
skip_off += skip_size;
@@ -1623,7 +1624,7 @@ vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
SPA_MAXBLOCKSIZE);
ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
- ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0);
+ ASSERT0(asize % (vdc->vdc_groupwidth << ashift));
/* Chunks must evenly span all data columns in the group. */
psize = (((psize >> ashift) / ndata) * ndata) << ashift;
@@ -1634,7 +1635,7 @@ vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
chunk_size = MIN(chunk_size, left);
- ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0);
+ ASSERT0(chunk_size % (vdc->vdc_groupwidth << ashift));
ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
vdev_draid_offset_to_group(vd, start + chunk_size - 1));
@@ -1996,6 +1997,33 @@ vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
rc->rc_allow_repair = 1;
}
}
+
+ if (vdev_sit_out_reads(cvd, zio->io_flags)) {
+ rr->rr_outlier_cnt++;
+ ASSERT0(rc->rc_latency_outlier);
+ rc->rc_latency_outlier = 1;
+ }
+ }
+
+ /*
+ * When the row contains a latency outlier and sufficient parity
+ * exists to reconstruct the column data, then skip reading the
+ * known slow child vdev as a performance optimization.
+ */
+ if (rr->rr_outlier_cnt > 0 &&
+ (rr->rr_firstdatacol - rr->rr_missingparity) >=
+ (rr->rr_missingdata + 1)) {
+
+ for (int c = rr->rr_cols - 1; c >= rr->rr_firstdatacol; c--) {
+ raidz_col_t *rc = &rr->rr_col[c];
+
+ if (rc->rc_error == 0 && rc->rc_latency_outlier) {
+ rr->rr_missingdata++;
+ rc->rc_error = SET_ERROR(EAGAIN);
+ rc->rc_skipped = 1;
+ break;
+ }
+ }
}
/*
@@ -2272,7 +2300,7 @@ vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
- ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0);
+ ASSERT0(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT);
ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
vdc->vdc_ndisks, ==, 0);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_file.c b/sys/contrib/openzfs/module/zfs/vdev_file.c
index f457669bc809..20b4db65ec06 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_file.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_file.c
@@ -228,7 +228,8 @@ vdev_file_io_strategy(void *arg)
abd_return_buf_copy(zio->io_abd, buf, size);
} else {
buf = abd_borrow_buf_copy(zio->io_abd, zio->io_size);
- err = zfs_file_pwrite(vf->vf_file, buf, size, off, &resid);
+ err = zfs_file_pwrite(vf->vf_file, buf, size, off,
+ vd->vdev_ashift, &resid);
abd_return_buf(zio->io_abd, buf, size);
}
zio->io_error = err;
diff --git a/sys/contrib/openzfs/module/zfs/vdev_indirect.c b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
index 9fc71fa0e03e..7538f471e63c 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_indirect.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
@@ -792,7 +792,7 @@ spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (*scip) / sizeof (uint64_t), scip, tx));
- ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
+ ASSERT0P(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
@@ -882,7 +882,7 @@ spa_condense_fini(spa_t *spa)
void
spa_start_indirect_condensing_thread(spa_t *spa)
{
- ASSERT3P(spa->spa_condense_zthr, ==, NULL);
+ ASSERT0P(spa->spa_condense_zthr);
spa->spa_condense_zthr = zthr_create("z_indirect_condense",
spa_condense_indirect_thread_check,
spa_condense_indirect_thread, spa, minclsyspri);
@@ -1504,7 +1504,7 @@ vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
is != NULL; is = list_next(&iv->iv_splits, is)) {
ASSERT3P(is->is_good_child->ic_data, !=, NULL);
- ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
+ ASSERT0P(is->is_good_child->ic_duplicate);
abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
is->is_split_offset, 0, is->is_size);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_initialize.c b/sys/contrib/openzfs/module/zfs/vdev_initialize.c
index 9243c76e810d..27188c46e561 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_initialize.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_initialize.c
@@ -632,7 +632,7 @@ vdev_initialize(vdev_t *vd)
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_initialize_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
@@ -653,7 +653,7 @@ vdev_uninitialize(vdev_t *vd)
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_initialize_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
@@ -672,7 +672,7 @@ vdev_initialize_stop_wait_impl(vdev_t *vd)
while (vd->vdev_initialize_thread != NULL)
cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock);
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
vd->vdev_initialize_exit_wanted = B_FALSE;
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev_label.c b/sys/contrib/openzfs/module/zfs/vdev_label.c
index 6baa6236aac2..0d4fdaa77ba0 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_label.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_label.c
@@ -163,7 +163,7 @@ uint64_t
vdev_label_offset(uint64_t psize, int l, uint64_t offset)
{
ASSERT(offset < sizeof (vdev_label_t));
- ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
+ ASSERT0(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t));
return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
@@ -511,6 +511,8 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
vd->vdev_asize);
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_MIN_ALLOC,
+ vdev_get_min_alloc(vd));
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
if (vd->vdev_noalloc) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
@@ -768,12 +770,12 @@ vdev_top_config_generate(spa_t *spa, nvlist_t *config)
}
if (idx) {
- VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
- array, idx) == 0);
+ VERIFY0(nvlist_add_uint64_array(config,
+ ZPOOL_CONFIG_HOLE_ARRAY, array, idx));
}
- VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
- rvd->vdev_children) == 0);
+ VERIFY0(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
+ rvd->vdev_children));
kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
}
@@ -1189,8 +1191,8 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
* vdev uses as described above, and automatically expires if we
* fail.
*/
- VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
- crtxg) == 0);
+ VERIFY0(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
+ crtxg));
}
buf = vp->vp_nvlist;
diff --git a/sys/contrib/openzfs/module/zfs/vdev_queue.c b/sys/contrib/openzfs/module/zfs/vdev_queue.c
index aa41f7066036..c12713b107bf 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_queue.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_queue.c
@@ -780,7 +780,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
if (dio->io_flags & ZIO_FLAG_NODATA) {
/* allocate a buffer for a write gap */
ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
- ASSERT3P(dio->io_abd, ==, NULL);
+ ASSERT0P(dio->io_abd);
abd_gang_add(aio->io_abd,
abd_get_zeros(dio->io_size), B_TRUE);
} else {
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz.c b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
index 210cdcab1ecc..56b8e3b60b22 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
@@ -24,6 +24,7 @@
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2016 Gvozden Nešković. All rights reserved.
+ * Copyright (c) 2025, Klara, Inc.
*/
#include <sys/zfs_context.h>
@@ -356,6 +357,32 @@ unsigned long raidz_expand_max_reflow_bytes = 0;
uint_t raidz_expand_pause_point = 0;
/*
+ * This represents the duration for a slow drive read sit out.
+ */
+static unsigned long vdev_read_sit_out_secs = 600;
+
+/*
+ * How often each RAID-Z and dRAID vdev will check for slow disk outliers.
+ * Increasing this interval will reduce the sensitivity of detection (since all
+ * I/Os since the last check are included in the statistics), but will slow the
+ * response to a disk developing a problem.
+ *
+ * Defaults to once per second; setting extremely small values may cause
+ * negative performance effects.
+ */
+static hrtime_t vdev_raidz_outlier_check_interval_ms = 1000;
+
+/*
+ * When performing slow outlier checks for RAID-Z and dRAID vdevs, this value is
+ * used to determine how far out an outlier must be before it counts as an event
+ * worth consdering.
+ *
+ * Smaller values will result in more aggressive sitting out of disks that may
+ * have problems, but may significantly increase the rate of spurious sit-outs.
+ */
+static uint32_t vdev_raidz_outlier_insensitivity = 50;
+
+/*
* Maximum amount of copy io's outstanding at once.
*/
#ifdef _ILP32
@@ -412,7 +439,7 @@ vdev_raidz_map_free(raidz_map_t *rm)
rm->rm_nphys_cols);
}
- ASSERT3P(rm->rm_lr, ==, NULL);
+ ASSERT0P(rm->rm_lr);
kmem_free(rm, offsetof(raidz_map_t, rm_row[rm->rm_nrows]));
}
@@ -2311,6 +2338,41 @@ vdev_raidz_min_asize(vdev_t *vd)
vd->vdev_children);
}
+/*
+ * return B_TRUE if a read should be skipped due to being too slow.
+ *
+ * In vdev_child_slow_outlier() it looks for outliers based on disk
+ * latency from the most recent child reads. Here we're checking if,
+ * over time, a disk has has been an outlier too many times and is
+ * now in a sit out period.
+ */
+boolean_t
+vdev_sit_out_reads(vdev_t *vd, zio_flag_t io_flags)
+{
+ if (vdev_read_sit_out_secs == 0)
+ return (B_FALSE);
+
+ /* Avoid skipping a data column read when scrubbing */
+ if (io_flags & ZIO_FLAG_SCRUB)
+ return (B_FALSE);
+
+ if (!vd->vdev_ops->vdev_op_leaf) {
+ boolean_t sitting = B_FALSE;
+ for (int c = 0; c < vd->vdev_children; c++) {
+ sitting |= vdev_sit_out_reads(vd->vdev_child[c],
+ io_flags);
+ }
+ return (sitting);
+ }
+
+ if (vd->vdev_read_sit_out_expire >= gethrestime_sec())
+ return (B_TRUE);
+
+ vd->vdev_read_sit_out_expire = 0;
+
+ return (B_FALSE);
+}
+
void
vdev_raidz_child_done(zio_t *zio)
{
@@ -2431,7 +2493,7 @@ raidz_start_skip_writes(zio_t *zio)
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (rc->rc_size != 0)
continue;
- ASSERT3P(rc->rc_abd, ==, NULL);
+ ASSERT0P(rc->rc_abd);
ASSERT3U(rc->rc_offset, <,
cvd->vdev_psize - VDEV_LABEL_END_SIZE);
@@ -2475,6 +2537,45 @@ vdev_raidz_io_start_read_row(zio_t *zio, raidz_row_t *rr, boolean_t forceparity)
rc->rc_skipped = 1;
continue;
}
+
+ if (vdev_sit_out_reads(cvd, zio->io_flags)) {
+ rr->rr_outlier_cnt++;
+ ASSERT0(rc->rc_latency_outlier);
+ rc->rc_latency_outlier = 1;
+ }
+ }
+
+ /*
+ * When the row contains a latency outlier and sufficient parity
+ * exists to reconstruct the column data, then skip reading the
+ * known slow child vdev as a performance optimization.
+ */
+ if (rr->rr_outlier_cnt > 0 &&
+ (rr->rr_firstdatacol - rr->rr_missingparity) >=
+ (rr->rr_missingdata + 1)) {
+
+ for (int c = rr->rr_cols - 1; c >= 0; c--) {
+ raidz_col_t *rc = &rr->rr_col[c];
+
+ if (rc->rc_error == 0 && rc->rc_latency_outlier) {
+ if (c >= rr->rr_firstdatacol)
+ rr->rr_missingdata++;
+ else
+ rr->rr_missingparity++;
+ rc->rc_error = SET_ERROR(EAGAIN);
+ rc->rc_skipped = 1;
+ break;
+ }
+ }
+ }
+
+ for (int c = rr->rr_cols - 1; c >= 0; c--) {
+ raidz_col_t *rc = &rr->rr_col[c];
+ vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
+
+ if (rc->rc_error || rc->rc_size == 0)
+ continue;
+
if (forceparity ||
c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
@@ -2498,6 +2599,7 @@ vdev_raidz_io_start_read_phys_cols(zio_t *zio, raidz_map_t *rm)
ASSERT3U(prc->rc_devidx, ==, i);
vdev_t *cvd = vd->vdev_child[i];
+
if (!vdev_readable(cvd)) {
prc->rc_error = SET_ERROR(ENXIO);
prc->rc_tried = 1; /* don't even try */
@@ -2774,6 +2876,239 @@ vdev_raidz_worst_error(raidz_row_t *rr)
return (error);
}
+/*
+ * Find the median value from a set of n values
+ */
+static uint64_t
+latency_median_value(const uint64_t *data, size_t n)
+{
+ uint64_t m;
+
+ if (n % 2 == 0)
+ m = (data[(n >> 1) - 1] + data[n >> 1]) >> 1;
+ else
+ m = data[((n + 1) >> 1) - 1];
+
+ return (m);
+}
+
+/*
+ * Calculate the outlier fence from a set of n latency values
+ *
+ * fence = Q3 + vdev_raidz_outlier_insensitivity x (Q3 - Q1)
+ */
+static uint64_t
+latency_quartiles_fence(const uint64_t *data, size_t n, uint64_t *iqr)
+{
+ uint64_t q1 = latency_median_value(&data[0], n >> 1);
+ uint64_t q3 = latency_median_value(&data[(n + 1) >> 1], n >> 1);
+
+ /*
+ * To avoid detecting false positive outliers when N is small and
+ * and the latencies values are very close, make sure the IQR
+ * is at least 25% larger than Q1.
+ */
+ *iqr = MAX(q3 - q1, q1 / 4);
+
+ return (q3 + (*iqr * vdev_raidz_outlier_insensitivity));
+}
+#define LAT_CHILDREN_MIN 5
+#define LAT_OUTLIER_LIMIT 20
+
+static int
+latency_compare(const void *arg1, const void *arg2)
+{
+ const uint64_t *l1 = (uint64_t *)arg1;
+ const uint64_t *l2 = (uint64_t *)arg2;
+
+ return (TREE_CMP(*l1, *l2));
+}
+
+void
+vdev_raidz_sit_child(vdev_t *svd, uint64_t secs)
+{
+ for (int c = 0; c < svd->vdev_children; c++)
+ vdev_raidz_sit_child(svd->vdev_child[c], secs);
+
+ if (!svd->vdev_ops->vdev_op_leaf)
+ return;
+
+ /* Begin a sit out period for this slow drive */
+ svd->vdev_read_sit_out_expire = gethrestime_sec() +
+ secs;
+
+ /* Count each slow io period */
+ mutex_enter(&svd->vdev_stat_lock);
+ svd->vdev_stat.vs_slow_ios++;
+ mutex_exit(&svd->vdev_stat_lock);
+}
+
+void
+vdev_raidz_unsit_child(vdev_t *vd)
+{
+ for (int c = 0; c < vd->vdev_children; c++)
+ vdev_raidz_unsit_child(vd->vdev_child[c]);
+
+ if (!vd->vdev_ops->vdev_op_leaf)
+ return;
+
+ vd->vdev_read_sit_out_expire = 0;
+}
+
+/*
+ * Check for any latency outlier from latest set of child reads.
+ *
+ * Uses a Tukey's fence, with K = 50, for detecting extreme outliers. This
+ * rule defines extreme outliers as data points outside the fence of the
+ * third quartile plus fifty times the Interquartile Range (IQR). This range
+ * is the distance between the first and third quartile.
+ *
+ * Fifty is an extremely large value for Tukey's fence, but the outliers we're
+ * attempting to detect here are orders of magnitude times larger than the
+ * median. This large value should capture any truly fault disk quickly,
+ * without causing spurious sit-outs.
+ *
+ * To further avoid spurious sit-outs, vdevs must be detected multiple times
+ * as an outlier before they are sat, and outlier counts will gradually decay.
+ * Every nchildren times we have detected an outlier, we subtract 2 from the
+ * outlier count of all children. If detected outliers are close to uniformly
+ * distributed, this will result in the outlier count remaining close to 0
+ * (in expectation; over long enough time-scales, spurious sit-outs are still
+ * possible).
+ */
+static void
+vdev_child_slow_outlier(zio_t *zio)
+{
+ vdev_t *vd = zio->io_vd;
+ if (!vd->vdev_autosit || vdev_read_sit_out_secs == 0 ||
+ vd->vdev_children < LAT_CHILDREN_MIN)
+ return;
+
+ hrtime_t now = getlrtime();
+ uint64_t last = atomic_load_64(&vd->vdev_last_latency_check);
+
+ if ((now - last) < MSEC2NSEC(vdev_raidz_outlier_check_interval_ms))
+ return;
+
+ /* Allow a single winner when there are racing callers. */
+ if (atomic_cas_64(&vd->vdev_last_latency_check, last, now) != last)
+ return;
+
+ int children = vd->vdev_children;
+ uint64_t *lat_data = kmem_alloc(sizeof (uint64_t) * children, KM_SLEEP);
+
+ for (int c = 0; c < children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ if (cvd->vdev_prev_histo == NULL) {
+ mutex_enter(&cvd->vdev_stat_lock);
+ size_t size =
+ sizeof (cvd->vdev_stat_ex.vsx_disk_histo[0]);
+ cvd->vdev_prev_histo = kmem_zalloc(size, KM_SLEEP);
+ memcpy(cvd->vdev_prev_histo,
+ cvd->vdev_stat_ex.vsx_disk_histo[ZIO_TYPE_READ],
+ size);
+ mutex_exit(&cvd->vdev_stat_lock);
+ }
+ }
+ uint64_t max = 0;
+ vdev_t *svd = NULL;
+ uint_t sitouts = 0;
+ boolean_t skip = B_FALSE, svd_sitting = B_FALSE;
+ for (int c = 0; c < children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ boolean_t sitting = vdev_sit_out_reads(cvd, 0) ||
+ cvd->vdev_state != VDEV_STATE_HEALTHY;
+
+ /* We can't sit out more disks than we have parity */
+ if (sitting && ++sitouts >= vdev_get_nparity(vd))
+ skip = B_TRUE;
+
+ mutex_enter(&cvd->vdev_stat_lock);
+
+ uint64_t *prev_histo = cvd->vdev_prev_histo;
+ uint64_t *histo =
+ cvd->vdev_stat_ex.vsx_disk_histo[ZIO_TYPE_READ];
+ if (skip) {
+ size_t size =
+ sizeof (cvd->vdev_stat_ex.vsx_disk_histo[0]);
+ memcpy(prev_histo, histo, size);
+ mutex_exit(&cvd->vdev_stat_lock);
+ continue;
+ }
+ uint64_t count = 0;
+ lat_data[c] = 0;
+ for (int i = 0; i < VDEV_L_HISTO_BUCKETS; i++) {
+ uint64_t this_count = histo[i] - prev_histo[i];
+ lat_data[c] += (1ULL << i) * this_count;
+ count += this_count;
+ }
+ size_t size = sizeof (cvd->vdev_stat_ex.vsx_disk_histo[0]);
+ memcpy(prev_histo, histo, size);
+ mutex_exit(&cvd->vdev_stat_lock);
+ lat_data[c] /= MAX(1, count);
+
+ /* Wait until all disks have been read from */
+ if (lat_data[c] == 0 && !sitting) {
+ skip = B_TRUE;
+ continue;
+ }
+
+ /* Keep track of the vdev with largest value */
+ if (lat_data[c] > max) {
+ max = lat_data[c];
+ svd = cvd;
+ svd_sitting = sitting;
+ }
+ }
+
+ if (skip) {
+ kmem_free(lat_data, sizeof (uint64_t) * children);
+ return;
+ }
+
+ qsort((void *)lat_data, children, sizeof (uint64_t), latency_compare);
+
+ uint64_t iqr;
+ uint64_t fence = latency_quartiles_fence(lat_data, children, &iqr);
+
+ ASSERT3U(lat_data[children - 1], ==, max);
+ if (max > fence && !svd_sitting) {
+ ASSERT3U(iqr, >, 0);
+ uint64_t incr = MAX(1, MIN((max - fence) / iqr,
+ LAT_OUTLIER_LIMIT / 4));
+ vd->vdev_outlier_count += incr;
+ if (vd->vdev_outlier_count >= children) {
+ for (int c = 0; c < children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ cvd->vdev_outlier_count -= 2;
+ cvd->vdev_outlier_count = MAX(0,
+ cvd->vdev_outlier_count);
+ }
+ vd->vdev_outlier_count = 0;
+ }
+ /*
+ * Keep track of how many times this child has had
+ * an outlier read. A disk that persitently has a
+ * higher than peers outlier count will be considered
+ * a slow disk.
+ */
+ svd->vdev_outlier_count += incr;
+ if (svd->vdev_outlier_count > LAT_OUTLIER_LIMIT) {
+ ASSERT0(svd->vdev_read_sit_out_expire);
+ vdev_raidz_sit_child(svd, vdev_read_sit_out_secs);
+ (void) zfs_ereport_post(FM_EREPORT_ZFS_SITOUT,
+ zio->io_spa, svd, NULL, NULL, 0);
+ vdev_dbgmsg(svd, "begin read sit out for %d secs",
+ (int)vdev_read_sit_out_secs);
+
+ for (int c = 0; c < vd->vdev_children; c++)
+ vd->vdev_child[c]->vdev_outlier_count = 0;
+ }
+ }
+
+ kmem_free(lat_data, sizeof (uint64_t) * children);
+}
+
static void
vdev_raidz_io_done_verified(zio_t *zio, raidz_row_t *rr)
{
@@ -3363,7 +3698,7 @@ vdev_raidz_io_done_reconstruct_known_missing(zio_t *zio, raidz_map_t *rm,
* also have been fewer parity errors than parity
* columns or, again, we wouldn't be in this code path.
*/
- ASSERT(parity_untried == 0);
+ ASSERT0(parity_untried);
ASSERT(parity_errors < rr->rr_firstdatacol);
/*
@@ -3515,6 +3850,9 @@ vdev_raidz_io_done(zio_t *zio)
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_verified(zio, rr);
}
+ /* Periodically check for a read outlier */
+ if (zio->io_type == ZIO_TYPE_READ)
+ vdev_child_slow_outlier(zio);
zio_checksum_verified(zio);
} else {
/*
@@ -4743,7 +5081,7 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
void
spa_start_raidz_expansion_thread(spa_t *spa)
{
- ASSERT3P(spa->spa_raidz_expand_zthr, ==, NULL);
+ ASSERT0P(spa->spa_raidz_expand_zthr);
spa->spa_raidz_expand_zthr = zthr_create("raidz_expand",
spa_raidz_expand_thread_check, spa_raidz_expand_thread,
spa, defclsyspri);
@@ -5155,3 +5493,10 @@ ZFS_MODULE_PARAM(zfs_vdev, raidz_, io_aggregate_rows, ULONG, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, scrub_after_expand, INT, ZMOD_RW,
"For expanded RAIDZ, automatically start a pool scrub when expansion "
"completes");
+ZFS_MODULE_PARAM(zfs_vdev, vdev_, read_sit_out_secs, ULONG, ZMOD_RW,
+ "Raidz/draid slow disk sit out time period in seconds");
+ZFS_MODULE_PARAM(zfs_vdev, vdev_, raidz_outlier_check_interval_ms, U64,
+ ZMOD_RW, "Interval to check for slow raidz/draid children");
+ZFS_MODULE_PARAM(zfs_vdev, vdev_, raidz_outlier_insensitivity, UINT,
+ ZMOD_RW, "How insensitive the slow raidz/draid child check should be");
+/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_rebuild.c b/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
index cf259788ccf4..47b3b9921abe 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
@@ -256,7 +256,7 @@ vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx)
"vdev_id=%llu vdev_guid=%llu started",
(u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_rebuild_thread);
vd->vdev_rebuild_thread = thread_create(NULL, 0,
vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
@@ -413,7 +413,7 @@ vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx)
mutex_enter(&vd->vdev_rebuild_lock);
ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_rebuild_thread);
vrp->vrp_last_offset = 0;
vrp->vrp_min_txg = 0;
diff --git a/sys/contrib/openzfs/module/zfs/vdev_removal.c b/sys/contrib/openzfs/module/zfs/vdev_removal.c
index 3887be4bd548..2f7a739da241 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_removal.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_removal.c
@@ -344,10 +344,10 @@ spa_vdev_remove_aux(nvlist_t *config, const char *name, nvlist_t **dev,
for (int i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove)
continue;
- VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
+ VERIFY0(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP));
}
- VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
+ VERIFY0(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY));
fnvlist_add_nvlist_array(config, name, (const nvlist_t * const *)newdev,
count - 1);
@@ -423,7 +423,7 @@ vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
svr = spa_vdev_removal_create(vd);
ASSERT(vd->vdev_removing);
- ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
+ ASSERT0P(vd->vdev_indirect_mapping);
spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
@@ -529,7 +529,7 @@ vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
* but in any case only when there are outstanding free i/os, which
* there are not).
*/
- ASSERT3P(spa->spa_vdev_removal, ==, NULL);
+ ASSERT0P(spa->spa_vdev_removal);
spa->spa_vdev_removal = svr;
svr->svr_thread = thread_create(NULL, 0,
spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri);
@@ -1362,11 +1362,11 @@ vdev_remove_complete(spa_t *spa)
txg_wait_synced(spa->spa_dsl_pool, 0);
txg = spa_vdev_enter(spa);
vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
- ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
+ ASSERT0P(vd->vdev_trim_thread);
+ ASSERT0P(vd->vdev_autotrim_thread);
vdev_rebuild_stop_wait(vd);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_rebuild_thread);
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
@@ -1868,7 +1868,7 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
objset_t *mos = spa->spa_meta_objset;
- ASSERT3P(svr->svr_thread, ==, NULL);
+ ASSERT0P(svr->svr_thread);
spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
@@ -2076,7 +2076,7 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
- ASSERT3P(vd->vdev_log_mg, ==, NULL);
+ ASSERT0P(vd->vdev_log_mg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
@@ -2112,7 +2112,7 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
if (error != 0) {
metaslab_group_activate(mg);
- ASSERT3P(vd->vdev_log_mg, ==, NULL);
+ ASSERT0P(vd->vdev_log_mg);
return (error);
}
ASSERT0(vd->vdev_stat.vs_alloc);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_trim.c b/sys/contrib/openzfs/module/zfs/vdev_trim.c
index fc8d5b8e9a8a..eee18b367909 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_trim.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_trim.c
@@ -1010,7 +1010,7 @@ vdev_trim(vdev_t *vd, uint64_t rate, boolean_t partial, boolean_t secure)
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT0P(vd->vdev_trim_thread);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
@@ -1032,7 +1032,7 @@ vdev_trim_stop_wait_impl(vdev_t *vd)
while (vd->vdev_trim_thread != NULL)
cv_wait(&vd->vdev_trim_cv, &vd->vdev_trim_lock);
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT0P(vd->vdev_trim_thread);
vd->vdev_trim_exit_wanted = B_FALSE;
}
@@ -1539,7 +1539,7 @@ vdev_autotrim_stop_wait(vdev_t *tvd)
cv_wait(&tvd->vdev_autotrim_cv,
&tvd->vdev_autotrim_lock);
- ASSERT3P(tvd->vdev_autotrim_thread, ==, NULL);
+ ASSERT0P(tvd->vdev_autotrim_thread);
tvd->vdev_autotrim_exit_wanted = B_FALSE;
}
mutex_exit(&tvd->vdev_autotrim_lock);
@@ -1712,7 +1712,7 @@ vdev_trim_l2arc(spa_t *spa)
mutex_enter(&vd->vdev_trim_lock);
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT0P(vd->vdev_trim_thread);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
diff --git a/sys/contrib/openzfs/module/zfs/zap.c b/sys/contrib/openzfs/module/zfs/zap.c
index 0896690c97e3..3e4e997798a3 100644
--- a/sys/contrib/openzfs/module/zfs/zap.c
+++ b/sys/contrib/openzfs/module/zfs/zap.c
@@ -921,7 +921,7 @@ fzap_add_cd(zap_name_t *zn,
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT(!zap->zap_ismicro);
- ASSERT(fzap_check(zn, integer_size, num_integers) == 0);
+ ASSERT0(fzap_check(zn, integer_size, num_integers));
err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
if (err != 0)
@@ -1386,7 +1386,7 @@ again:
}
err = zap_entry_read_name(zap, &zeh,
za->za_name_len, za->za_name);
- ASSERT(err == 0);
+ ASSERT0(err);
za->za_normalization_conflict =
zap_entry_normalization_conflict(&zeh,
@@ -1546,7 +1546,7 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
boolean_t trunc = B_FALSE;
int err = 0;
- ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0);
+ ASSERT0(zap_leaf_phys(l)->l_hdr.lh_nentries);
ASSERT3U(prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT3U(ZAP_HASH_IDX(hash, prefix_len), ==, prefix);
@@ -1564,7 +1564,7 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
uint64_t sl_hash = ZAP_PREFIX_HASH(sl_prefix, prefix_len);
int slbit = prefix & 1;
- ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0);
+ ASSERT0(zap_leaf_phys(l)->l_hdr.lh_nentries);
/*
* Check if there is a sibling by reading ptrtbl ptrs.
diff --git a/sys/contrib/openzfs/module/zfs/zap_micro.c b/sys/contrib/openzfs/module/zfs/zap_micro.c
index 411b1a9db5ab..ea4e3117a8b9 100644
--- a/sys/contrib/openzfs/module/zfs/zap_micro.c
+++ b/sys/contrib/openzfs/module/zfs/zap_micro.c
@@ -346,7 +346,7 @@ zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
{
zap_name_t *zn = kmem_cache_alloc(zap_name_cache, KM_SLEEP);
- ASSERT(zap->zap_normflags == 0);
+ ASSERT0(zap->zap_normflags);
zn->zn_zap = zap;
zn->zn_key_intlen = sizeof (*key);
zn->zn_key_orig = zn->zn_key_norm = key;
@@ -1876,7 +1876,7 @@ zap_cursor_serialize(zap_cursor_t *zc)
return (-1ULL);
if (zc->zc_zap == NULL)
return (zc->zc_serialized);
- ASSERT((zc->zc_hash & zap_maxcd(zc->zc_zap)) == 0);
+ ASSERT0((zc->zc_hash & zap_maxcd(zc->zc_zap)));
ASSERT(zc->zc_cd < zap_maxcd(zc->zc_zap));
/*
@@ -1911,7 +1911,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
* we must add to the existing zc_cd, which may already
* be 1 due to the zap_cursor_advance.
*/
- ASSERT(zc->zc_hash == 0);
+ ASSERT0(zc->zc_hash);
hb = zap_hashbits(zc->zc_zap);
zc->zc_hash = zc->zc_serialized << (64 - hb);
zc->zc_cd += zc->zc_serialized >> hb;
diff --git a/sys/contrib/openzfs/module/zfs/zcp.c b/sys/contrib/openzfs/module/zfs/zcp.c
index 9aecf67fd256..c6684f453e95 100644
--- a/sys/contrib/openzfs/module/zfs/zcp.c
+++ b/sys/contrib/openzfs/module/zfs/zcp.c
@@ -765,7 +765,7 @@ zcp_lua_alloc(void *ud, void *ptr, size_t osize, size_t nsize)
return (NULL);
}
(void) memcpy(luabuf, ptr, osize);
- VERIFY3P(zcp_lua_alloc(ud, ptr, osize, 0), ==, NULL);
+ VERIFY0P(zcp_lua_alloc(ud, ptr, osize, 0));
return (luabuf);
}
}
diff --git a/sys/contrib/openzfs/module/zfs/zfeature.c b/sys/contrib/openzfs/module/zfs/zfeature.c
index 7dfe00d42a08..4cf9e0dbb405 100644
--- a/sys/contrib/openzfs/module/zfs/zfeature.c
+++ b/sys/contrib/openzfs/module/zfs/zfeature.c
@@ -210,8 +210,8 @@ spa_features_check(spa_t *spa, boolean_t for_write,
za->za_name, 1, MAXPATHLEN, buf) == 0)
desc = buf;
- VERIFY(nvlist_add_string(unsup_feat,
- za->za_name, desc) == 0);
+ VERIFY0(nvlist_add_string(unsup_feat,
+ za->za_name, desc));
}
}
}
@@ -308,6 +308,7 @@ feature_sync(spa_t *spa, zfeature_info_t *feature, uint64_t refcount,
ASSERT(VALID_FEATURE_OR_NONE(feature->fi_feature));
uint64_t zapobj = (feature->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
spa->spa_feat_for_write_obj : spa->spa_feat_for_read_obj;
+ ASSERT(MUTEX_HELD(&spa->spa_feat_stats_lock));
VERIFY0(zap_update(spa->spa_meta_objset, zapobj, feature->fi_guid,
sizeof (uint64_t), 1, &refcount, tx));
@@ -360,7 +361,9 @@ feature_enable_sync(spa_t *spa, zfeature_info_t *feature, dmu_tx_t *tx)
feature->fi_guid, 1, strlen(feature->fi_desc) + 1,
feature->fi_desc, tx));
+ mutex_enter(&spa->spa_feat_stats_lock);
feature_sync(spa, feature, initial_refcount, tx);
+ mutex_exit(&spa->spa_feat_stats_lock);
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENABLED_TXG)) {
uint64_t enabling_txg = dmu_tx_get_txg(tx);
@@ -416,6 +419,7 @@ feature_do_action(spa_t *spa, spa_feature_t fid, feature_action_t action,
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3U(spa_version(spa), >=, SPA_VERSION_FEATURES);
+ mutex_enter(&spa->spa_feat_stats_lock);
VERIFY3U(feature_get_refcount(spa, feature, &refcount), !=, ENOTSUP);
switch (action) {
@@ -433,6 +437,7 @@ feature_do_action(spa_t *spa, spa_feature_t fid, feature_action_t action,
}
feature_sync(spa, feature, refcount, tx);
+ mutex_exit(&spa->spa_feat_stats_lock);
}
void
diff --git a/sys/contrib/openzfs/module/zfs/zfs_crrd.c b/sys/contrib/openzfs/module/zfs/zfs_crrd.c
index f9267ed41d71..30d4c7c36897 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_crrd.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_crrd.c
@@ -162,9 +162,9 @@ dbrrd_add(dbrrd_t *db, hrtime_t time, uint64_t txg)
daydiff = time - rrd_tail(&db->dbr_days);
monthdiff = time - rrd_tail(&db->dbr_months);
- if (monthdiff >= 0 && monthdiff >= SEC2NSEC(30 * 24 * 60 * 60))
+ if (monthdiff >= 0 && monthdiff >= 30 * 24 * 60 * 60)
rrd_add(&db->dbr_months, time, txg);
- else if (daydiff >= 0 && daydiff >= SEC2NSEC(24 * 60 * 60))
+ else if (daydiff >= 0 && daydiff >= 24 * 60 * 60)
rrd_add(&db->dbr_days, time, txg);
else if (minutedif >= 0)
rrd_add(&db->dbr_minutes, time, txg);
@@ -208,7 +208,8 @@ dbrrd_closest(hrtime_t tv, const rrd_data_t *r1, const rrd_data_t *r2)
if (r2 == NULL)
return (r1);
- return (ABS(tv - r1->rrdd_time) < ABS(tv - r2->rrdd_time) ? r1 : r2);
+ return (ABS(tv - (hrtime_t)r1->rrdd_time) <
+ ABS(tv - (hrtime_t)r2->rrdd_time) ? r1 : r2);
}
uint64_t
diff --git a/sys/contrib/openzfs/module/zfs/zfs_fuid.c b/sys/contrib/openzfs/module/zfs/zfs_fuid.c
index 10a6d289fbf8..2af1efe82e62 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_fuid.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_fuid.c
@@ -112,8 +112,7 @@ zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
uint64_t fuid_size;
ASSERT(fuid_obj != 0);
- VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
- FTAG, &db));
+ VERIFY0(dmu_bonus_hold(os, fuid_obj, FTAG, &db));
fuid_size = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
@@ -125,22 +124,21 @@ zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
int i;
packed = kmem_alloc(fuid_size, KM_SLEEP);
- VERIFY(dmu_read(os, fuid_obj, 0,
- fuid_size, packed, DMU_READ_PREFETCH) == 0);
- VERIFY(nvlist_unpack(packed, fuid_size,
- &nvp, 0) == 0);
- VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
- &fuidnvp, &count) == 0);
+ VERIFY0(dmu_read(os, fuid_obj, 0,
+ fuid_size, packed, DMU_READ_PREFETCH));
+ VERIFY0(nvlist_unpack(packed, fuid_size, &nvp, 0));
+ VERIFY0(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
+ &fuidnvp, &count));
for (i = 0; i != count; i++) {
fuid_domain_t *domnode;
const char *domain;
uint64_t idx;
- VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
- &domain) == 0);
- VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
- &idx) == 0);
+ VERIFY0(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
+ &domain));
+ VERIFY0(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
+ &idx));
domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
@@ -246,35 +244,33 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
&zfsvfs->z_fuid_obj, tx) == 0);
}
- VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP));
numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
- VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
- domnode->f_idx) == 0);
- VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
- VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
- domnode->f_ksid->kd_name) == 0);
+ VERIFY0(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_uint64(fuids[i], FUID_IDX,
+ domnode->f_idx));
+ VERIFY0(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0));
+ VERIFY0(nvlist_add_string(fuids[i], FUID_DOMAIN,
+ domnode->f_ksid->kd_name));
}
fnvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
(const nvlist_t * const *)fuids, numnodes);
for (i = 0; i != numnodes; i++)
nvlist_free(fuids[i]);
kmem_free(fuids, numnodes * sizeof (void *));
- VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
+ VERIFY0(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR));
packed = kmem_alloc(nvsize, KM_SLEEP);
- VERIFY(nvlist_pack(nvp, &packed, &nvsize,
- NV_ENCODE_XDR, KM_SLEEP) == 0);
+ VERIFY0(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP));
nvlist_free(nvp);
zfsvfs->z_fuid_size = nvsize;
dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
zfsvfs->z_fuid_size, packed, tx);
kmem_free(packed, zfsvfs->z_fuid_size);
- VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
- FTAG, &db));
+ VERIFY0(dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
dmu_buf_rele(db, FTAG);
diff --git a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
index dcb71229f96a..5ca7c2320c4e 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
@@ -683,6 +683,7 @@ zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
dsl_dataset_t *ds;
const char *cp;
int error;
+ boolean_t rawok = (zc->zc_flags & 0x8);
/*
* Generate the current snapshot name from the given objsetid, then
@@ -705,6 +706,10 @@ zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds,
ZFS_DELEG_PERM_SEND, cr);
+ if (error != 0 && rawok == B_TRUE) {
+ error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds,
+ ZFS_DELEG_PERM_SEND_RAW, cr);
+ }
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
@@ -714,9 +719,17 @@ zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
static int
zfs_secpolicy_send_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
+ boolean_t rawok = nvlist_exists(innvl, "rawok");
+ int error;
+
(void) innvl;
- return (zfs_secpolicy_write_perms(zc->zc_name,
- ZFS_DELEG_PERM_SEND, cr));
+ error = zfs_secpolicy_write_perms(zc->zc_name,
+ ZFS_DELEG_PERM_SEND, cr);
+ if (error != 0 && rawok == B_TRUE) {
+ error = zfs_secpolicy_write_perms(zc->zc_name,
+ ZFS_DELEG_PERM_SEND_RAW, cr);
+ }
+ return (error);
}
static int
@@ -1493,7 +1506,7 @@ zfs_ioc_pool_create(zfs_cmd_t *zc)
goto pool_props_bad;
(void) nvlist_remove_all(props, ZPOOL_HIDDEN_ARGS);
- VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP));
error = zfs_fill_zplprops_root(version, rootprops,
zplprops, NULL);
if (error != 0)
@@ -2245,7 +2258,7 @@ nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop)
*/
if ((error = zfs_get_zplprop(os, prop, &value)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0);
+ VERIFY0(nvlist_add_uint64(props, zfs_prop_to_name(prop), value));
return (0);
}
@@ -2280,7 +2293,7 @@ zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
dmu_objset_type(os) == DMU_OST_ZFS) {
nvlist_t *nv;
- VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP));
if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 &&
@@ -2483,7 +2496,7 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
+ VERIFY0(nvpair_value_nvlist(pair, &attrs));
if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&pair) != 0)
return (SET_ERROR(EINVAL));
@@ -2538,9 +2551,8 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source,
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
- VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
- &pair) == 0);
+ VERIFY0(nvpair_value_nvlist(pair, &attrs));
+ VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair));
}
/* all special properties are numeric except for keylocation */
@@ -2932,14 +2944,14 @@ props_skip(nvlist_t *props, nvlist_t *skipped, nvlist_t **newprops)
{
nvpair_t *pair;
- VERIFY(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP));
pair = NULL;
while ((pair = nvlist_next_nvpair(props, pair)) != NULL) {
if (nvlist_exists(skipped, nvpair_name(pair)))
continue;
- VERIFY(nvlist_add_nvpair(*newprops, pair) == 0);
+ VERIFY0(nvlist_add_nvpair(*newprops, pair));
}
}
@@ -3064,11 +3076,11 @@ zfs_ioc_inherit_prop(zfs_cmd_t *zc)
switch (type) {
case PROP_TYPE_STRING:
- VERIFY(0 == nvlist_add_string(dummy, propname, ""));
+ VERIFY0(nvlist_add_string(dummy, propname, ""));
break;
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
- VERIFY(0 == nvlist_add_uint64(dummy, propname, 0));
+ VERIFY0(nvlist_add_uint64(dummy, propname, 0));
break;
default:
err = SET_ERROR(EINVAL);
@@ -3454,14 +3466,14 @@ zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
/*
* Put the version in the zplprops
*/
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_VERSION), zplver));
if (norm == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm));
/*
* If we're normalizing, names must always be valid UTF-8 strings.
@@ -3471,55 +3483,55 @@ zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
if (u8 == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8));
if (sense == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_CASE, &sense)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_CASE), sense));
if (duq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTUSERQUOTA, &duq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTUSERQUOTA), duq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTUSERQUOTA), duq));
if (dgq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTGROUPQUOTA,
&dgq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPQUOTA), dgq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPQUOTA), dgq));
if (dpq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTPROJECTQUOTA,
&dpq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTQUOTA), dpq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTQUOTA), dpq));
if (duoq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTUSEROBJQUOTA,
&duoq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTUSEROBJQUOTA), duoq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTUSEROBJQUOTA), duoq));
if (dgoq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTGROUPOBJQUOTA,
&dgoq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPOBJQUOTA), dgoq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPOBJQUOTA), dgoq));
if (dpoq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTPROJECTOBJQUOTA,
&dpoq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTOBJQUOTA), dpoq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTOBJQUOTA), dpoq));
if (is_ci)
*is_ci = (sense == ZFS_CASE_INSENSITIVE);
@@ -3668,8 +3680,8 @@ zfs_ioc_create(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
* file system creation, so go figure them out
* now.
*/
- VERIFY(nvlist_alloc(&zct.zct_zplprops,
- NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&zct.zct_zplprops,
+ NV_UNIQUE_NAME, KM_SLEEP));
error = zfs_fill_zplprops(fsname, nvprops,
zct.zct_zplprops, &is_insensitive);
if (error != 0) {
@@ -4727,7 +4739,7 @@ zfs_ioc_rollback(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
error = error ? error : resume_err;
}
zfs_vfs_rele(zfsvfs);
- } else if ((zv = zvol_suspend(fsname)) != NULL) {
+ } else if (zvol_suspend(fsname, &zv) == 0) {
error = dsl_dataset_rollback(fsname, target, zvol_tag(zv),
outnvl);
zvol_resume(zv);
@@ -4916,9 +4928,8 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
* format.
*/
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
- VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
- &pair) == 0);
+ VERIFY0(nvpair_value_nvlist(pair, &attrs));
+ VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair));
}
/*
@@ -5103,7 +5114,7 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
if (props == NULL)
return (0);
- VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP));
zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP);
(void) strlcpy(zc->zc_name, dataset, sizeof (zc->zc_name));
@@ -5115,9 +5126,8 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
sizeof (zc->zc_value));
if ((err = zfs_check_settable(dataset, pair, CRED())) != 0 ||
(err = zfs_secpolicy_inherit_prop(zc, NULL, CRED())) != 0) {
- VERIFY(nvlist_remove_nvpair(props, pair) == 0);
- VERIFY(nvlist_add_int32(errors,
- zc->zc_value, err) == 0);
+ VERIFY0(nvlist_remove_nvpair(props, pair));
+ VERIFY0(nvlist_add_int32(errors, zc->zc_value, err));
}
pair = next_pair;
}
@@ -5127,7 +5137,7 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
nvlist_free(errors);
errors = NULL;
} else {
- VERIFY(nvpair_value_int32(pair, &rv) == 0);
+ VERIFY0(nvpair_value_int32(pair, &rv));
}
if (errlist == NULL)
@@ -5144,16 +5154,14 @@ propval_equals(nvpair_t *p1, nvpair_t *p2)
if (nvpair_type(p1) == DATA_TYPE_NVLIST) {
/* dsl_prop_get_all_impl() format */
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(p1, &attrs) == 0);
- VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
- &p1) == 0);
+ VERIFY0(nvpair_value_nvlist(p1, &attrs));
+ VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p1));
}
if (nvpair_type(p2) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(p2, &attrs) == 0);
- VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
- &p2) == 0);
+ VERIFY0(nvpair_value_nvlist(p2, &attrs));
+ VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p2));
}
if (nvpair_type(p1) != nvpair_type(p2))
@@ -5162,14 +5170,14 @@ propval_equals(nvpair_t *p1, nvpair_t *p2)
if (nvpair_type(p1) == DATA_TYPE_STRING) {
const char *valstr1, *valstr2;
- VERIFY(nvpair_value_string(p1, &valstr1) == 0);
- VERIFY(nvpair_value_string(p2, &valstr2) == 0);
+ VERIFY0(nvpair_value_string(p1, &valstr1));
+ VERIFY0(nvpair_value_string(p2, &valstr2));
return (strcmp(valstr1, valstr2) == 0);
} else {
uint64_t intval1, intval2;
- VERIFY(nvpair_value_uint64(p1, &intval1) == 0);
- VERIFY(nvpair_value_uint64(p2, &intval2) == 0);
+ VERIFY0(nvpair_value_uint64(p1, &intval1));
+ VERIFY0(nvpair_value_uint64(p2, &intval2));
return (intval1 == intval2);
}
}
@@ -5237,7 +5245,7 @@ extract_delay_props(nvlist_t *props)
};
int i;
- VERIFY(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP));
for (nvp = nvlist_next_nvpair(props, NULL); nvp != NULL;
nvp = nvlist_next_nvpair(props, nvp)) {
@@ -5253,8 +5261,8 @@ extract_delay_props(nvlist_t *props)
}
if (delayable[i] != 0) {
tmp = nvlist_prev_nvpair(props, nvp);
- VERIFY(nvlist_add_nvpair(delayprops, nvp) == 0);
- VERIFY(nvlist_remove_nvpair(props, nvp) == 0);
+ VERIFY0(nvlist_add_nvpair(delayprops, nvp));
+ VERIFY0(nvlist_remove_nvpair(props, nvp));
nvp = tmp;
}
}
@@ -5453,7 +5461,7 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, const char *origin,
}
error = error ? error : end_err;
zfs_vfs_rele(zfsvfs);
- } else if ((zv = zvol_suspend(tofs)) != NULL) {
+ } else if (zvol_suspend(tofs, &zv) == 0) {
error = dmu_recv_end(&drc, zvol_tag(zv));
zvol_resume(zv);
} else {
@@ -5485,15 +5493,15 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, const char *origin,
* using ASSERT() will be just like a VERIFY.
*/
if (recv_delayprops != NULL) {
- ASSERT(nvlist_merge(recvprops, recv_delayprops, 0) == 0);
+ ASSERT0(nvlist_merge(recvprops, recv_delayprops, 0));
nvlist_free(recv_delayprops);
}
if (local_delayprops != NULL) {
- ASSERT(nvlist_merge(localprops, local_delayprops, 0) == 0);
+ ASSERT0(nvlist_merge(localprops, local_delayprops, 0));
nvlist_free(local_delayprops);
}
if (inherited_delayprops != NULL) {
- ASSERT(nvlist_merge(localprops, inherited_delayprops, 0) == 0);
+ ASSERT0(nvlist_merge(localprops, inherited_delayprops, 0));
nvlist_free(inherited_delayprops);
}
*read_bytes = off - noff;
@@ -7342,8 +7350,8 @@ zfs_ioctl_register_legacy(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
ASSERT3U(ioc, <, ZFS_IOC_LAST);
- ASSERT3P(vec->zvec_legacy_func, ==, NULL);
- ASSERT3P(vec->zvec_func, ==, NULL);
+ ASSERT0P(vec->zvec_legacy_func);
+ ASSERT0P(vec->zvec_func);
vec->zvec_legacy_func = func;
vec->zvec_secpolicy = secpolicy;
@@ -7366,8 +7374,8 @@ zfs_ioctl_register(const char *name, zfs_ioc_t ioc, zfs_ioc_func_t *func,
ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
ASSERT3U(ioc, <, ZFS_IOC_LAST);
- ASSERT3P(vec->zvec_legacy_func, ==, NULL);
- ASSERT3P(vec->zvec_func, ==, NULL);
+ ASSERT0P(vec->zvec_legacy_func);
+ ASSERT0P(vec->zvec_func);
/* if we are logging, the name must be valid */
ASSERT(!allow_log || namecheck != NO_NAME);
@@ -7624,7 +7632,7 @@ zfs_ioctl_init(void)
zfs_ioctl_register("scrub", ZFS_IOC_POOL_SCRUB,
zfs_ioc_pool_scrub, zfs_secpolicy_config, POOL_NAME,
- POOL_CHECK_NONE, B_TRUE, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_pool_scrub, ARRAY_SIZE(zfs_keys_pool_scrub));
zfs_ioctl_register("get_props", ZFS_IOC_POOL_GET_PROPS,
@@ -8148,7 +8156,7 @@ zfsdev_ioctl_common(uint_t vecnum, zfs_cmd_t *zc, int flag)
spa_t *spa;
nvlist_t *lognv = NULL;
- ASSERT(vec->zvec_legacy_func == NULL);
+ ASSERT0P(vec->zvec_legacy_func);
/*
* Add the innvl to the lognv before calling the func,
diff --git a/sys/contrib/openzfs/module/zfs/zfs_log.c b/sys/contrib/openzfs/module/zfs/zfs_log.c
index 2f61ecfd9b3b..ea17e049279f 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_log.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_log.c
@@ -620,7 +620,7 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
if (zil_replaying(zilog, tx) || zp->z_unlinked ||
zfs_xattr_owner_unlinked(zp)) {
if (callback != NULL)
- callback(callback_data);
+ callback(callback_data, 0);
return;
}
@@ -663,7 +663,7 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
DMU_KEEP_CACHING);
DB_DNODE_EXIT(db);
if (err != 0) {
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
itx = zil_itx_create(txtype, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
diff --git a/sys/contrib/openzfs/module/zfs/zfs_quota.c b/sys/contrib/openzfs/module/zfs/zfs_quota.c
index b8fe512d4f09..2e91ccc27d6d 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_quota.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_quota.c
@@ -374,7 +374,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
if (*objp == 0) {
*objp = zap_create(zfsvfs->z_os, DMU_OT_USERGROUP_QUOTA,
DMU_OT_NONE, 0, tx);
- VERIFY(0 == zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
+ VERIFY0(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
}
mutex_exit(&zfsvfs->z_lock);
@@ -386,7 +386,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
} else {
err = zap_update(zfsvfs->z_os, *objp, buf, 8, 1, &quota, tx);
}
- ASSERT(err == 0);
+ ASSERT0(err);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
dmu_tx_commit(tx);
diff --git a/sys/contrib/openzfs/module/zfs/zfs_rlock.c b/sys/contrib/openzfs/module/zfs/zfs_rlock.c
index 53eb3ef1b66e..4035baff77d6 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_rlock.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_rlock.c
@@ -666,7 +666,7 @@ zfs_rangelock_reduce(zfs_locked_range_t *lr, uint64_t off, uint64_t len)
/* Ensure there are no other locks */
ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1);
- ASSERT3U(lr->lr_offset, ==, 0);
+ ASSERT0(lr->lr_offset);
ASSERT3U(lr->lr_type, ==, RL_WRITER);
ASSERT(!lr->lr_proxy);
ASSERT3U(lr->lr_length, ==, UINT64_MAX);
diff --git a/sys/contrib/openzfs/module/zfs/zfs_sa.c b/sys/contrib/openzfs/module/zfs/zfs_sa.c
index 59b6ae4e4203..8b4fc6fd7fbd 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_sa.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_sa.c
@@ -169,7 +169,7 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
ASSERT(MUTEX_HELD(&zp->z_lock));
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
if (zp->z_is_sa)
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
&xoap->xoa_av_scanstamp,
sizeof (xoap->xoa_av_scanstamp), tx));
else {
@@ -181,12 +181,12 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
len = sizeof (xoap->xoa_av_scanstamp) +
ZFS_OLD_ZNODE_PHYS_SIZE;
if (len > doi.doi_bonus_size)
- VERIFY(dmu_set_bonus(db, len, tx) == 0);
+ VERIFY0(dmu_set_bonus(db, len, tx));
(void) memcpy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp));
zp->z_pflags |= ZFS_BONUS_SCANSTAMP;
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
&zp->z_pflags, sizeof (uint64_t), tx));
}
}
@@ -286,7 +286,7 @@ zfs_sa_set_xattr(znode_t *zp, const char *name, const void *value, size_t vsize)
dmu_tx_commit(tx);
if (logsaxattr && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ error = zil_commit(zilog, 0);
}
out_free:
vmem_free(obj, size);
@@ -427,11 +427,10 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
}
- VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
- VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
- count, tx) == 0);
+ VERIFY0(dmu_set_bonustype(db, DMU_OT_SA, tx));
+ VERIFY0(sa_replace_all_by_template_locked(hdl, sa_attrs, count, tx));
if (znode_acl.z_acl_extern_obj)
- VERIFY(0 == dmu_object_free(zfsvfs->z_os,
+ VERIFY0(dmu_object_free(zfsvfs->z_os,
znode_acl.z_acl_extern_obj, tx));
zp->z_is_sa = B_TRUE;
diff --git a/sys/contrib/openzfs/module/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
index 74aa91a4f2eb..7bb9ba57c69e 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
@@ -27,6 +27,7 @@
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
* Copyright (c) 2025, Rob Norris <robn@despairlabs.com>
+ * Copyright (c) 2025, Klara, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
@@ -116,7 +117,7 @@ zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
- zil_commit(zfsvfs->z_log, zp->z_id);
+ error = zil_commit(zfsvfs->z_log, zp->z_id);
zfs_exit(zfsvfs, FTAG);
}
return (error);
@@ -375,8 +376,13 @@ zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
frsync = !!(ioflag & FRSYNC);
#endif
if (zfsvfs->z_log &&
- (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
- zil_commit(zfsvfs->z_log, zp->z_id);
+ (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)) {
+ error = zil_commit(zfsvfs->z_log, zp->z_id);
+ if (error != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
+ }
/*
* Lock the range against changes.
@@ -1074,8 +1080,13 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
return (error);
}
- if (commit)
- zil_commit(zilog, zp->z_id);
+ if (commit) {
+ error = zil_commit(zilog, zp->z_id);
+ if (error != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
+ }
int64_t nwritten = start_resid - zfs_uio_resid(uio);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
@@ -1260,8 +1271,8 @@ zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
zilog = zfsvfs->z_log;
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1946,7 +1957,7 @@ unlock:
ZFS_ACCESSTIME_STAMP(inzfsvfs, inzp);
if (outos->os_sync == ZFS_SYNC_ALWAYS) {
- zil_commit(zilog, outzp->z_id);
+ error = zil_commit(zilog, outzp->z_id);
}
*inoffp += done;
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index 6e4f84257407..0307df55aa21 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -24,6 +24,7 @@
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2018 Datto Inc.
+ * Copyright (c) 2025, Klara, Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
@@ -103,6 +104,7 @@ static zil_kstat_values_t zil_stats = {
{ "zil_commit_error_count", KSTAT_DATA_UINT64 },
{ "zil_commit_stall_count", KSTAT_DATA_UINT64 },
{ "zil_commit_suspend_count", KSTAT_DATA_UINT64 },
+ { "zil_commit_crash_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
@@ -145,7 +147,7 @@ static uint64_t zil_slog_bulk = 64 * 1024 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
-static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
+static int zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
static itx_t *zil_itx_clone(itx_t *oitx);
static uint64_t zil_max_waste_space(zilog_t *zilog);
@@ -367,6 +369,7 @@ zil_sums_init(zil_sums_t *zs)
wmsum_init(&zs->zil_commit_error_count, 0);
wmsum_init(&zs->zil_commit_stall_count, 0);
wmsum_init(&zs->zil_commit_suspend_count, 0);
+ wmsum_init(&zs->zil_commit_crash_count, 0);
wmsum_init(&zs->zil_itx_count, 0);
wmsum_init(&zs->zil_itx_indirect_count, 0);
wmsum_init(&zs->zil_itx_indirect_bytes, 0);
@@ -392,6 +395,7 @@ zil_sums_fini(zil_sums_t *zs)
wmsum_fini(&zs->zil_commit_error_count);
wmsum_fini(&zs->zil_commit_stall_count);
wmsum_fini(&zs->zil_commit_suspend_count);
+ wmsum_fini(&zs->zil_commit_crash_count);
wmsum_fini(&zs->zil_itx_count);
wmsum_fini(&zs->zil_itx_indirect_count);
wmsum_fini(&zs->zil_itx_indirect_bytes);
@@ -422,6 +426,8 @@ zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums)
wmsum_value(&zil_sums->zil_commit_stall_count);
zs->zil_commit_suspend_count.value.ui64 =
wmsum_value(&zil_sums->zil_commit_suspend_count);
+ zs->zil_commit_crash_count.value.ui64 =
+ wmsum_value(&zil_sums->zil_commit_crash_count);
zs->zil_itx_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_count);
zs->zil_itx_indirect_count.value.ui64 =
@@ -813,34 +819,37 @@ zil_lwb_vdev_compare(const void *x1, const void *x2)
* we choose them here and later make the block allocation match.
*/
static lwb_t *
-zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
- uint64_t txg, lwb_state_t state)
+zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, int min_sz, int sz,
+ boolean_t slog, uint64_t txg)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
+ lwb->lwb_flags = 0;
lwb->lwb_zilog = zilog;
if (bp) {
lwb->lwb_blk = *bp;
- lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2);
+ if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2)
+ lwb->lwb_flags |= LWB_FLAG_SLIM;
sz = BP_GET_LSIZE(bp);
+ lwb->lwb_min_sz = sz;
} else {
BP_ZERO(&lwb->lwb_blk);
- lwb->lwb_slim = (spa_version(zilog->zl_spa) >=
- SPA_VERSION_SLIM_ZIL);
+ if (spa_version(zilog->zl_spa) >= SPA_VERSION_SLIM_ZIL)
+ lwb->lwb_flags |= LWB_FLAG_SLIM;
+ lwb->lwb_min_sz = min_sz;
}
- lwb->lwb_slog = slog;
+ if (slog)
+ lwb->lwb_flags |= LWB_FLAG_SLOG;
lwb->lwb_error = 0;
- if (lwb->lwb_slim) {
- lwb->lwb_nmax = sz;
- lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
- } else {
- lwb->lwb_nmax = sz - sizeof (zil_chain_t);
- lwb->lwb_nused = lwb->lwb_nfilled = 0;
- }
+ /*
+ * Buffer allocation and capacity setup will be done in
+ * zil_lwb_write_open() when the LWB is opened for ITX assignment.
+ */
+ lwb->lwb_nmax = lwb->lwb_nused = lwb->lwb_nfilled = 0;
lwb->lwb_sz = sz;
- lwb->lwb_state = state;
- lwb->lwb_buf = zio_buf_alloc(sz);
+ lwb->lwb_buf = NULL;
+ lwb->lwb_state = LWB_STATE_NEW;
lwb->lwb_child_zio = NULL;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
@@ -851,8 +860,6 @@ zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
- if (state != LWB_STATE_NEW)
- zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
return (lwb);
@@ -864,15 +871,15 @@ zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
ASSERT(MUTEX_HELD(&zilog->zl_lock));
ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
- ASSERT3P(lwb->lwb_child_zio, ==, NULL);
- ASSERT3P(lwb->lwb_write_zio, ==, NULL);
- ASSERT3P(lwb->lwb_root_zio, ==, NULL);
+ ASSERT0P(lwb->lwb_child_zio);
+ ASSERT0P(lwb->lwb_write_zio);
+ ASSERT0P(lwb->lwb_root_zio);
ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
VERIFY(list_is_empty(&lwb->lwb_itxs));
VERIFY(list_is_empty(&lwb->lwb_waiters));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
- ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
+ ASSERT(!MUTEX_HELD(&lwb->lwb_lock));
/*
* Clear the zilog's field to indicate this lwb is no longer
@@ -991,8 +998,8 @@ zil_create(zilog_t *zilog)
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
- ASSERT(zh->zh_claim_txg == 0);
- ASSERT(zh->zh_replay_seq == 0);
+ ASSERT0(zh->zh_claim_txg);
+ ASSERT0(zh->zh_replay_seq);
blk = zh->zh_log;
@@ -1013,7 +1020,7 @@ zil_create(zilog_t *zilog)
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
- ZIL_MIN_BLKSZ, &slog);
+ ZIL_MIN_BLKSZ, ZIL_MIN_BLKSZ, &slog, B_TRUE);
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
@@ -1022,7 +1029,7 @@ zil_create(zilog_t *zilog)
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
- lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW);
+ lwb = zil_alloc_lwb(zilog, &blk, 0, 0, slog, txg);
/*
* If we just allocated the first log block, commit our transaction
@@ -1104,7 +1111,7 @@ zil_destroy(zilog_t *zilog, boolean_t keep_first)
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
- ASSERT(zh->zh_claim_txg == 0);
+ ASSERT0(zh->zh_claim_txg);
VERIFY(!keep_first);
while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) {
if (lwb->lwb_buf != NULL)
@@ -1250,7 +1257,7 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
blkptr_t *bp;
int error;
- ASSERT(tx == NULL);
+ ASSERT0P(tx);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
@@ -1318,10 +1325,12 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
* zil_commit() is racing with spa_sync().
*/
static void
-zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
+zil_commit_waiter_done(zil_commit_waiter_t *zcw, int err)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
+ zcw->zcw_lwb = NULL;
+ zcw->zcw_error = err;
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
@@ -1351,7 +1360,7 @@ zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
ASSERT(!list_link_active(&zcw->zcw_node));
list_insert_tail(&lwb->lwb_waiters, zcw);
- ASSERT3P(zcw->zcw_lwb, ==, NULL);
+ ASSERT0P(zcw->zcw_lwb);
zcw->zcw_lwb = lwb;
}
@@ -1365,7 +1374,7 @@ zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
ASSERT(!list_link_active(&zcw->zcw_node));
list_insert_tail(nolwb, zcw);
- ASSERT3P(zcw->zcw_lwb, ==, NULL);
+ ASSERT0P(zcw->zcw_lwb);
}
void
@@ -1383,7 +1392,7 @@ zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
if (zil_nocacheflush)
return;
- mutex_enter(&lwb->lwb_vdev_lock);
+ mutex_enter(&lwb->lwb_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
@@ -1392,7 +1401,7 @@ zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
avl_insert(t, zv, where);
}
}
- mutex_exit(&lwb->lwb_vdev_lock);
+ mutex_exit(&lwb->lwb_lock);
}
static void
@@ -1409,12 +1418,12 @@ zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
- * not need the protection of lwb_vdev_lock (it will only be modified
+ * not need the protection of lwb_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
- mutex_enter(&nlwb->lwb_vdev_lock);
+ mutex_enter(&nlwb->lwb_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
@@ -1428,7 +1437,7 @@ zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
kmem_free(zv, sizeof (*zv));
}
}
- mutex_exit(&nlwb->lwb_vdev_lock);
+ mutex_exit(&nlwb->lwb_lock);
}
void
@@ -1482,13 +1491,9 @@ zil_lwb_flush_vdevs_done(zio_t *zio)
}
while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
- mutex_enter(&zcw->zcw_lock);
-
- ASSERT3P(zcw->zcw_lwb, ==, lwb);
- zcw->zcw_lwb = NULL;
/*
* We expect any ZIO errors from child ZIOs to have been
* propagated "up" to this specific LWB's root ZIO, in
@@ -1503,14 +1508,7 @@ zil_lwb_flush_vdevs_done(zio_t *zio)
* errors not being handled correctly here. See the
* comment above the call to "zio_flush" for details.
*/
-
- zcw->zcw_zio_error = zio->io_error;
-
- ASSERT3B(zcw->zcw_done, ==, B_FALSE);
- zcw->zcw_done = B_TRUE;
- cv_broadcast(&zcw->zcw_cv);
-
- mutex_exit(&zcw->zcw_lock);
+ zil_commit_waiter_done(zcw, zio->io_error);
}
uint64_t txg = lwb->lwb_issued_txg;
@@ -1582,7 +1580,7 @@ zil_lwb_write_done(zio_t *zio)
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
- lwb_t *nlwb;
+ lwb_t *nlwb = NULL;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
@@ -1602,9 +1600,11 @@ zil_lwb_write_done(zio_t *zio)
* its write ZIO a parent this ZIO. In such case we can not defer
* our flushes or below may be a race between the done callbacks.
*/
- nlwb = list_next(&zilog->zl_lwb_list, lwb);
- if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
- nlwb = NULL;
+ if (!(lwb->lwb_flags & LWB_FLAG_CRASHED)) {
+ nlwb = list_next(&zilog->zl_lwb_list, lwb);
+ if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
+ nlwb = NULL;
+ }
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
@@ -1618,12 +1618,17 @@ zil_lwb_write_done(zio_t *zio)
* written out.
*
* Additionally, we don't perform any further error handling at
- * this point (e.g. setting "zcw_zio_error" appropriately), as
- * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
- * we expect any error seen here, to have been propagated to
- * that function).
+ * this point (e.g. setting "zcw_error" appropriately), as we
+ * expect that to occur in "zil_lwb_flush_vdevs_done" (thus, we
+ * expect any error seen here, to have been propagated to that
+ * function).
+ *
+ * Note that we treat a "crashed" LWB as though it was in error,
+ * even if it did appear to succeed, because we've already
+ * signaled error and cleaned up waiters and committers in
+ * zil_crash(); we just want to clean up and get out of here.
*/
- if (zio->io_error != 0) {
+ if (zio->io_error != 0 || (lwb->lwb_flags & LWB_FLAG_CRASHED)) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
@@ -1736,10 +1741,26 @@ zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
return;
}
+ mutex_enter(&lwb->lwb_lock);
mutex_enter(&zilog->zl_lock);
lwb->lwb_state = LWB_STATE_OPENED;
zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
+ mutex_exit(&lwb->lwb_lock);
+
+ /*
+ * Allocate buffer and set up LWB capacities.
+ */
+ ASSERT0P(lwb->lwb_buf);
+ ASSERT3U(lwb->lwb_sz, >, 0);
+ lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
+ if (lwb->lwb_flags & LWB_FLAG_SLIM) {
+ lwb->lwb_nmax = lwb->lwb_sz;
+ lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
+ } else {
+ lwb->lwb_nmax = lwb->lwb_sz - sizeof (zil_chain_t);
+ lwb->lwb_nused = lwb->lwb_nfilled = 0;
+ }
}
/*
@@ -1756,6 +1777,8 @@ static uint_t
zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
{
uint_t md = zilog->zl_max_block_size - sizeof (zil_chain_t);
+ uint_t waste = zil_max_waste_space(zilog);
+ waste = MAX(waste, zilog->zl_cur_max);
if (size <= md) {
/*
@@ -1766,9 +1789,10 @@ zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
} else if (size > 8 * md) {
/*
* Big bursts use maximum blocks. The first block size
- * is hard to predict, but it does not really matter.
+ * is hard to predict, but we need at least enough space
+ * to make reasonable progress.
*/
- *minsize = 0;
+ *minsize = waste;
return (md);
}
@@ -1781,57 +1805,52 @@ zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
uint_t s = size;
uint_t n = DIV_ROUND_UP(s, md - sizeof (lr_write_t));
uint_t chunk = DIV_ROUND_UP(s, n);
- uint_t waste = zil_max_waste_space(zilog);
- waste = MAX(waste, zilog->zl_cur_max);
if (chunk <= md - waste) {
*minsize = MAX(s - (md - waste) * (n - 1), waste);
return (chunk);
} else {
- *minsize = 0;
+ *minsize = waste;
return (md);
}
}
/*
* Try to predict next block size based on previous history. Make prediction
- * sufficient for 7 of 8 previous bursts. Don't try to save if the saving is
- * less then 50%, extra writes may cost more, but we don't want single spike
- * to badly affect our predictions.
+ * sufficient for 7 of 8 previous bursts, but don't try to save if the saving
+ * is less then 50%. Extra writes may cost more, but we don't want single
+ * spike to badly affect our predictions.
*/
-static uint_t
-zil_lwb_predict(zilog_t *zilog)
+static void
+zil_lwb_predict(zilog_t *zilog, uint64_t *min_predict, uint64_t *max_predict)
{
- uint_t m, o;
+ uint_t m1 = 0, m2 = 0, o;
- /* If we are in the middle of a burst, take it into account also. */
- if (zilog->zl_cur_size > 0) {
- o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m);
- } else {
+ /* If we are in the middle of a burst, take it as another data point. */
+ if (zilog->zl_cur_size > 0)
+ o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m1);
+ else
o = UINT_MAX;
- m = 0;
- }
- /* Find minimum optimal size. We don't need to go below that. */
- for (int i = 0; i < ZIL_BURSTS; i++)
- o = MIN(o, zilog->zl_prev_opt[i]);
-
- /* Find two biggest minimal first block sizes above the optimal. */
- uint_t m1 = MAX(m, o), m2 = o;
+ /* Find two largest minimal first block sizes. */
for (int i = 0; i < ZIL_BURSTS; i++) {
- m = zilog->zl_prev_min[i];
- if (m >= m1) {
+ uint_t cur = zilog->zl_prev_min[i];
+ if (cur >= m1) {
m2 = m1;
- m1 = m;
- } else if (m > m2) {
- m2 = m;
+ m1 = cur;
+ } else if (cur > m2) {
+ m2 = cur;
}
}
- /*
- * If second minimum size gives 50% saving -- use it. It may cost us
- * one additional write later, but the space saving is just too big.
- */
- return ((m1 < m2 * 2) ? m1 : m2);
+ /* Minimum should guarantee progress in most cases. */
+ *min_predict = (m1 < m2 * 2) ? m1 : m2;
+
+ /* Maximum doesn't need to go below the minimum optimal size. */
+ for (int i = 0; i < ZIL_BURSTS; i++)
+ o = MIN(o, zilog->zl_prev_opt[i]);
+ m1 = MAX(m1, o);
+ m2 = MAX(m2, o);
+ *max_predict = (m1 < m2 * 2) ? m1 : m2;
}
/*
@@ -1839,12 +1858,13 @@ zil_lwb_predict(zilog_t *zilog)
* Has to be called under zl_issuer_lock to chain more lwbs.
*/
static lwb_t *
-zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
+zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb)
{
- uint64_t blksz, plan, plan2;
+ uint64_t minbs, maxbs;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
+ membar_producer();
lwb->lwb_state = LWB_STATE_CLOSED;
/*
@@ -1869,33 +1889,40 @@ zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
* Try to predict what can it be and plan for the worst case.
*/
uint_t m;
- plan = zil_lwb_plan(zilog, zilog->zl_cur_left, &m);
+ maxbs = zil_lwb_plan(zilog, zilog->zl_cur_left, &m);
+ minbs = m;
if (zilog->zl_parallel) {
- plan2 = zil_lwb_plan(zilog, zilog->zl_cur_left +
- zil_lwb_predict(zilog), &m);
- if (plan < plan2)
- plan = plan2;
+ uint64_t minp, maxp;
+ zil_lwb_predict(zilog, &minp, &maxp);
+ maxp = zil_lwb_plan(zilog, zilog->zl_cur_left + maxp,
+ &m);
+ if (maxbs < maxp)
+ maxbs = maxp;
}
} else {
/*
* The previous burst is done and we can only predict what
* will come next.
*/
- plan = zil_lwb_predict(zilog);
+ zil_lwb_predict(zilog, &minbs, &maxbs);
}
- blksz = plan + sizeof (zil_chain_t);
- blksz = P2ROUNDUP_TYPED(blksz, ZIL_MIN_BLKSZ, uint64_t);
- blksz = MIN(blksz, zilog->zl_max_block_size);
- DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, blksz,
- uint64_t, plan);
- return (zil_alloc_lwb(zilog, blksz, NULL, 0, 0, state));
+ minbs += sizeof (zil_chain_t);
+ maxbs += sizeof (zil_chain_t);
+ minbs = P2ROUNDUP_TYPED(minbs, ZIL_MIN_BLKSZ, uint64_t);
+ maxbs = P2ROUNDUP_TYPED(maxbs, ZIL_MIN_BLKSZ, uint64_t);
+ maxbs = MIN(maxbs, zilog->zl_max_block_size);
+ minbs = MIN(minbs, maxbs);
+ DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, minbs,
+ uint64_t, maxbs);
+
+ return (zil_alloc_lwb(zilog, NULL, minbs, maxbs, 0, 0));
}
/*
* Finalize previously closed block and issue the write zio.
*/
-static void
+static int
zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
{
spa_t *spa = zilog->zl_spa;
@@ -1909,8 +1936,13 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
/* Actually fill the lwb with the data. */
for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
- itx = list_next(&lwb->lwb_itxs, itx))
- zil_lwb_commit(zilog, lwb, itx);
+ itx = list_next(&lwb->lwb_itxs, itx)) {
+ error = zil_lwb_commit(zilog, lwb, itx);
+ if (error != 0) {
+ ASSERT3U(error, ==, ESHUTDOWN);
+ return (error);
+ }
+ }
lwb->lwb_nused = lwb->lwb_nfilled;
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax);
@@ -1928,19 +1960,21 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
lwb->lwb_state = LWB_STATE_READY;
if (BP_IS_HOLE(&lwb->lwb_blk) && lwb->lwb_error == 0) {
mutex_exit(&zilog->zl_lock);
- return;
+ return (0);
}
mutex_exit(&zilog->zl_lock);
next_lwb:
- if (lwb->lwb_slim)
+ if (lwb->lwb_flags & LWB_FLAG_SLIM)
zilc = (zil_chain_t *)lwb->lwb_buf;
else
zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax);
- int wsz = lwb->lwb_sz;
+ uint64_t alloc_size = BP_GET_LSIZE(&lwb->lwb_blk);
+ int wsz = alloc_size;
if (lwb->lwb_error == 0) {
abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz);
- if (!lwb->lwb_slog || zilog->zl_cur_size <= zil_slog_bulk)
+ if (!(lwb->lwb_flags & LWB_FLAG_SLOG) ||
+ zilog->zl_cur_size <= zil_slog_bulk)
prio = ZIO_PRIORITY_SYNC_WRITE;
else
prio = ZIO_PRIORITY_ASYNC_WRITE;
@@ -1948,16 +1982,17 @@ next_lwb:
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0,
- &lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done,
+ &lwb->lwb_blk, lwb_abd, alloc_size, zil_lwb_write_done,
lwb, prio, ZIO_FLAG_CANFAIL, &zb);
zil_lwb_add_block(lwb, &lwb->lwb_blk);
- if (lwb->lwb_slim) {
+ if (lwb->lwb_flags & LWB_FLAG_SLIM) {
/* For Slim ZIL only write what is used. */
wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ,
int);
- ASSERT3S(wsz, <=, lwb->lwb_sz);
- zio_shrink(lwb->lwb_write_zio, wsz);
+ ASSERT3S(wsz, <=, alloc_size);
+ if (wsz < alloc_size)
+ zio_shrink(lwb->lwb_write_zio, wsz);
wsz = lwb->lwb_write_zio->io_size;
}
memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
@@ -1993,13 +2028,53 @@ next_lwb:
BP_ZERO(bp);
error = lwb->lwb_error;
if (error == 0) {
- error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz,
- &slog);
+ /*
+ * Allocation flexibility depends on LWB state:
+ * if NEW: allow range allocation and larger sizes;
+ * if OPENED: use fixed predetermined allocation size;
+ * if CLOSED + Slim: allocate precisely for actual usage.
+ */
+ boolean_t flexible = (nlwb->lwb_state == LWB_STATE_NEW);
+ if (flexible) {
+ /* We need to prevent opening till we update lwb_sz. */
+ mutex_enter(&nlwb->lwb_lock);
+ flexible = (nlwb->lwb_state == LWB_STATE_NEW);
+ if (!flexible)
+ mutex_exit(&nlwb->lwb_lock); /* We lost. */
+ }
+ boolean_t closed_slim = (nlwb->lwb_state == LWB_STATE_CLOSED &&
+ (lwb->lwb_flags & LWB_FLAG_SLIM));
+
+ uint64_t min_size, max_size;
+ if (closed_slim) {
+ /* This transition is racy, but only one way. */
+ membar_consumer();
+ min_size = max_size = P2ROUNDUP_TYPED(nlwb->lwb_nused,
+ ZIL_MIN_BLKSZ, uint64_t);
+ } else if (flexible) {
+ min_size = nlwb->lwb_min_sz;
+ max_size = nlwb->lwb_sz;
+ } else {
+ min_size = max_size = nlwb->lwb_sz;
+ }
+
+ error = zio_alloc_zil(spa, zilog->zl_os, txg, bp,
+ min_size, max_size, &slog, flexible);
+ if (error == 0) {
+ if (closed_slim)
+ ASSERT3U(BP_GET_LSIZE(bp), ==, max_size);
+ else if (flexible)
+ nlwb->lwb_sz = BP_GET_LSIZE(bp);
+ else
+ ASSERT3U(BP_GET_LSIZE(bp), ==, nlwb->lwb_sz);
+ }
+ if (flexible)
+ mutex_exit(&nlwb->lwb_lock);
}
if (error == 0) {
ASSERT3U(BP_GET_BIRTH(bp), ==, txg);
- BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 :
- ZIO_CHECKSUM_ZILOG);
+ BP_SET_CHECKSUM(bp, (nlwb->lwb_flags & LWB_FLAG_SLIM) ?
+ ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
}
@@ -2028,14 +2103,15 @@ next_lwb:
if (nlwb) {
nlwb->lwb_blk = *bp;
nlwb->lwb_error = error;
- nlwb->lwb_slog = slog;
+ if (slog)
+ nlwb->lwb_flags |= LWB_FLAG_SLOG;
nlwb->lwb_alloc_txg = txg;
if (nlwb->lwb_state != LWB_STATE_READY)
nlwb = NULL;
}
mutex_exit(&zilog->zl_lock);
- if (lwb->lwb_slog) {
+ if (lwb->lwb_flags & LWB_FLAG_SLOG) {
ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
lwb->lwb_nused);
@@ -2065,6 +2141,8 @@ next_lwb:
lwb = nlwb;
if (lwb)
goto next_lwb;
+
+ return (0);
}
/*
@@ -2207,7 +2285,6 @@ zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
- ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
@@ -2249,9 +2326,10 @@ cont:
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
list_insert_tail(ilwbs, lwb);
- lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED);
+ lwb = zil_lwb_write_close(zilog, lwb);
if (lwb == NULL)
return (NULL);
+ zil_lwb_write_open(zilog, lwb);
lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
}
@@ -2308,11 +2386,13 @@ cont:
return (lwb);
}
+static void zil_crash(zilog_t *zilog);
+
/*
* Fill the actual transaction data into the lwb, following zil_lwb_assign().
* Does not require locking.
*/
-static void
+static int
zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
{
lr_t *lr, *lrb;
@@ -2324,7 +2404,7 @@ zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
lrw = (lr_write_t *)lr;
if (lr->lrc_txtype == TX_COMMIT)
- return;
+ return (0);
reclen = lr->lrc_reclen;
dlen = zil_itx_data_size(itx);
@@ -2410,16 +2490,35 @@ zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
". Falling back to txg_wait_synced().",
error);
zfs_fallthrough;
- case EIO:
- txg_wait_synced(zilog->zl_dmu_pool,
- lr->lrc_txg);
+ case EIO: {
+ int error = txg_wait_synced_flags(
+ zilog->zl_dmu_pool,
+ lr->lrc_txg, TXG_WAIT_SUSPEND);
+ if (error != 0) {
+ ASSERT3U(error, ==, ESHUTDOWN);
+ /*
+ * zil_lwb_commit() is called from a
+ * loop over a list of itxs at the
+ * top of zil_lwb_write_issue(), which
+ * itself is called from a loop over a
+ * list of lwbs in various places.
+ * zil_crash() will free those itxs
+ * and sometimes the lwbs, so they
+ * are invalid when zil_crash() returns.
+ * Callers must pretty much abort
+ * immediately.
+ */
+ zil_crash(zilog);
+ return (error);
+ }
zfs_fallthrough;
+ }
case ENOENT:
zfs_fallthrough;
case EEXIST:
zfs_fallthrough;
case EALREADY:
- return;
+ return (0);
}
}
}
@@ -2427,6 +2526,8 @@ zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
lwb->lwb_nfilled += reclen + dlen;
ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused);
ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t)));
+
+ return (0);
}
itx_t *
@@ -2468,7 +2569,7 @@ zil_itx_clone(itx_t *oitx)
}
void
-zil_itx_destroy(itx_t *itx)
+zil_itx_destroy(itx_t *itx, int err)
{
ASSERT3U(itx->itx_size, >=, sizeof (itx_t));
ASSERT3U(itx->itx_lr.lrc_reclen, ==,
@@ -2477,7 +2578,7 @@ zil_itx_destroy(itx_t *itx)
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
- itx->itx_callback(itx->itx_callback_data);
+ itx->itx_callback(itx->itx_callback_data, err);
zio_data_buf_free(itx, itx->itx_size);
}
@@ -2518,9 +2619,9 @@ zil_itxg_clean(void *arg)
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
- zil_commit_waiter_skip(itx->itx_private);
+ zil_commit_waiter_done(itx->itx_private, 0);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
cookie = NULL;
@@ -2530,7 +2631,7 @@ zil_itxg_clean(void *arg)
while ((itx = list_remove_head(list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
@@ -2592,7 +2693,7 @@ zil_remove_async(zilog_t *zilog, uint64_t oid)
while ((itx = list_remove_head(&clean_list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
list_destroy(&clean_list);
}
@@ -2677,6 +2778,68 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
}
/*
+ * Post-crash cleanup. This is called from zil_clean() because it needs to
+ * do cleanup after every txg until the ZIL is restarted, and zilog_dirty()
+ * can arrange that easily, unlike zil_sync() which is more complicated to
+ * get a call to without actual dirty data.
+ */
+static void
+zil_crash_clean(zilog_t *zilog, uint64_t synced_txg)
+{
+ ASSERT(MUTEX_HELD(&zilog->zl_lock));
+ ASSERT3U(zilog->zl_restart_txg, >, 0);
+
+ /* Clean up anything on the crash list from earlier txgs */
+ lwb_t *lwb;
+ while ((lwb = list_head(&zilog->zl_lwb_crash_list)) != NULL) {
+ if (lwb->lwb_alloc_txg >= synced_txg ||
+ lwb->lwb_max_txg >= synced_txg) {
+ /*
+ * This lwb was allocated or updated on this txg, or
+ * in the future. We stop processing here, to avoid
+ * the strange situation of freeing a ZIL block on
+ * on the same or earlier txg than what it was
+ * allocated for.
+ *
+ * We'll take care of it on the next txg.
+ */
+ break;
+ }
+
+ /* This LWB is from the past, so we can clean it up now. */
+ ASSERT(lwb->lwb_flags & LWB_FLAG_CRASHED);
+ list_remove(&zilog->zl_lwb_crash_list, lwb);
+ if (lwb->lwb_buf != NULL)
+ zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
+ if (!BP_IS_HOLE(&lwb->lwb_blk))
+ /*
+ * Free on the next txg, since zil_clean() is called
+ * once synced_txg has already been completed.
+ */
+ zio_free(zilog->zl_spa, synced_txg+1, &lwb->lwb_blk);
+ zil_free_lwb(zilog, lwb);
+ }
+
+ if (zilog->zl_restart_txg > synced_txg) {
+ /*
+ * Not reached the restart txg yet, so mark the ZIL dirty for
+ * the next txg and we'll consider it all again then.
+ */
+ zilog_dirty(zilog, synced_txg+1);
+ return;
+ }
+
+ /*
+ * Reached the restart txg, so we can allow new calls to zil_commit().
+ * All ZIL txgs have long past so there should be no IO waiting.
+ */
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
+ ASSERT(list_is_empty(&zilog->zl_lwb_crash_list));
+
+ zilog->zl_restart_txg = 0;
+}
+
+/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been committed) so that
@@ -2691,6 +2854,15 @@ zil_clean(zilog_t *zilog, uint64_t synced_txg)
ASSERT3U(synced_txg, <, ZILTEST_TXG);
+ /* Do cleanup and restart after crash. */
+ if (zilog->zl_restart_txg > 0) {
+ mutex_enter(&zilog->zl_lock);
+ /* Make sure we didn't lose a race. */
+ if (zilog->zl_restart_txg > 0)
+ zil_crash_clean(zilog, synced_txg);
+ mutex_exit(&zilog->zl_lock);
+ }
+
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
@@ -2875,7 +3047,7 @@ zil_prune_commit_list(zilog_t *zilog)
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
- zil_commit_waiter_skip(itx->itx_private);
+ zil_commit_waiter_done(itx->itx_private, 0);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
}
@@ -2883,13 +3055,13 @@ zil_prune_commit_list(zilog_t *zilog)
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
-static void
+static int
zil_commit_writer_stall(zilog_t *zilog)
{
/*
@@ -2914,8 +3086,22 @@ zil_commit_writer_stall(zilog_t *zilog)
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ZIL_STAT_BUMP(zilog, zil_commit_stall_count);
- txg_wait_synced(zilog->zl_dmu_pool, 0);
+
+ int err = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
+ if (err != 0) {
+ ASSERT3U(err, ==, ESHUTDOWN);
+ zil_crash(zilog);
+ }
+
+ /*
+ * Either zil_sync() has been called to wait for and clean up any
+ * in-flight LWBs, or zil_crash() has emptied out the list and arranged
+ * for them to be cleaned up later.
+ */
ASSERT(list_is_empty(&zilog->zl_lwb_list));
+
+ return (err);
}
static void
@@ -3082,7 +3268,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zilog->zl_cur_left -= zil_itx_full_size(itx);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
}
@@ -3092,10 +3278,21 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
+ *
+ * ESHUTDOWN has to be handled carefully here. If we get it,
+ * then the pool suspended and zil_crash() was called, so we
+ * need to stop trying and just get an error back to the
+ * callers.
*/
- while ((lwb = list_remove_head(ilwbs)) != NULL)
- zil_lwb_write_issue(zilog, lwb);
- zil_commit_writer_stall(zilog);
+ int err = 0;
+ while ((lwb = list_remove_head(ilwbs)) != NULL) {
+ if (err == 0)
+ err = zil_lwb_write_issue(zilog, lwb);
+ }
+ if (err != ESHUTDOWN)
+ err = zil_commit_writer_stall(zilog);
+ if (err == ESHUTDOWN)
+ err = SET_ERROR(EIO);
/*
* Additionally, we have to signal and mark the "nolwb"
@@ -3105,7 +3302,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_remove_head(&nolwb_waiters)) != NULL)
- zil_commit_waiter_skip(zcw);
+ zil_commit_waiter_done(zcw, err);
/*
* And finally, we have to destroy the itx's that
@@ -3113,7 +3310,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
* the itx's callback if one exists for the itx.
*/
while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, err);
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
@@ -3167,11 +3364,17 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
(!zilog->zl_parallel || zilog->zl_suspend > 0)) {
zil_burst_done(zilog);
list_insert_tail(ilwbs, lwb);
- lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
+ lwb = zil_lwb_write_close(zilog, lwb);
if (lwb == NULL) {
- while ((lwb = list_remove_head(ilwbs)) != NULL)
- zil_lwb_write_issue(zilog, lwb);
- zil_commit_writer_stall(zilog);
+ int err = 0;
+ while ((lwb =
+ list_remove_head(ilwbs)) != NULL) {
+ if (err == 0)
+ err = zil_lwb_write_issue(
+ zilog, lwb);
+ }
+ if (err != ESHUTDOWN)
+ (void) zil_commit_writer_stall(zilog);
}
}
}
@@ -3230,10 +3433,23 @@ zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
zil_prune_commit_list(zilog);
zil_process_commit_list(zilog, zcw, &ilwbs);
+ /*
+ * If the ZIL failed somewhere inside zil_process_commit_list(), it's
+ * will be because a fallback to txg_wait_sync_flags() happened at some
+ * point (eg zil_commit_writer_stall()). All cases should issue and
+ * empty ilwbs, so there will be nothing to in the issue loop below.
+ * That's why we don't have to plumb the error value back from
+ * zil_process_commit_list(), and don't have to skip it.
+ */
+ IMPLY(zilog->zl_restart_txg > 0, list_is_empty(&ilwbs));
+
out:
mutex_exit(&zilog->zl_issuer_lock);
- while ((lwb = list_remove_head(&ilwbs)) != NULL)
- zil_lwb_write_issue(zilog, lwb);
+ int err = 0;
+ while ((lwb = list_remove_head(&ilwbs)) != NULL) {
+ if (err == 0)
+ err = zil_lwb_write_issue(zilog, lwb);
+ }
list_destroy(&ilwbs);
return (wtxg);
}
@@ -3326,7 +3542,7 @@ zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
* hasn't been issued.
*/
zil_burst_done(zilog);
- lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
+ lwb_t *nlwb = zil_lwb_write_close(zilog, lwb);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
@@ -3402,7 +3618,7 @@ zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
- * marked done until zil_commit_waiter_skip() is called via
+ * marked done until zil_commit_waiter_done() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
@@ -3480,7 +3696,7 @@ zil_alloc_commit_waiter(void)
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
- zcw->zcw_zio_error = 0;
+ zcw->zcw_error = 0;
return (zcw);
}
@@ -3489,7 +3705,7 @@ static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
- ASSERT3P(zcw->zcw_lwb, ==, NULL);
+ ASSERT0P(zcw->zcw_lwb);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
@@ -3526,6 +3742,99 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
}
/*
+ * Crash the ZIL. This is something like suspending, but abandons the ZIL
+ * without further IO until the wanted txg completes. No effort is made to
+ * close the on-disk chain or do any other on-disk work, as the pool may
+ * have suspended. zil_sync() will handle cleanup as normal and restart the
+ * ZIL once enough txgs have passed.
+ */
+static void
+zil_crash(zilog_t *zilog)
+{
+ mutex_enter(&zilog->zl_lock);
+
+ uint64_t txg = spa_syncing_txg(zilog->zl_spa);
+ uint64_t restart_txg =
+ spa_syncing_txg(zilog->zl_spa) + TXG_CONCURRENT_STATES;
+
+ if (zilog->zl_restart_txg > 0) {
+ /*
+ * If the ZIL is already crashed, it's almost certainly because
+ * we lost a race involving multiple callers from
+ * zil_commit_impl().
+ */
+
+ /*
+ * This sanity check is to support my understanding that in the
+ * event of multiple callers to zil_crash(), only one of them
+ * can possibly be in the codepath to issue lwbs; the rest
+ * should be calling from zil_commit_impl() after their waiters
+ * have completed. As I understand it, a second thread trying
+ * to issue will eventually wait on zl_issuer_lock, and then
+ * have no work to do and leave.
+ *
+ * If more lwbs had been created an issued between zil_crash()
+ * calls, then we probably just need to take those too, add
+ * them to the crash list and clean them up, but it complicates
+ * this function and I don't think it can happend.
+ */
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
+
+ mutex_exit(&zilog->zl_lock);
+ return;
+ }
+
+ zilog->zl_restart_txg = restart_txg;
+
+ /*
+ * Capture any live LWBs. Depending on the state of the pool they may
+ * represent in-flight IO that won't return for some time, and we want
+ * to make sure they don't get in the way of normal ZIL operation.
+ */
+ ASSERT(list_is_empty(&zilog->zl_lwb_crash_list));
+ list_move_tail(&zilog->zl_lwb_crash_list, &zilog->zl_lwb_list);
+
+ /*
+ * Run through the LWB list; erroring all itxes and signalling error
+ * to all waiters.
+ */
+ for (lwb_t *lwb = list_head(&zilog->zl_lwb_crash_list); lwb != NULL;
+ lwb = list_next(&zilog->zl_lwb_crash_list, lwb)) {
+ ASSERT(!(lwb->lwb_flags & LWB_FLAG_CRASHED));
+ lwb->lwb_flags |= LWB_FLAG_CRASHED;
+
+ itx_t *itx;
+ while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
+ zil_itx_destroy(itx, EIO);
+
+ zil_commit_waiter_t *zcw;
+ while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
+ mutex_enter(&zcw->zcw_lock);
+ zcw->zcw_lwb = NULL;
+ zcw->zcw_error = EIO;
+ zcw->zcw_done = B_TRUE;
+ cv_broadcast(&zcw->zcw_cv);
+ mutex_exit(&zcw->zcw_lock);
+ }
+ }
+
+ /*
+ * Zero the ZIL header bp after the ZIL restarts. We'll free it in
+ * zil_clean() when we clean up the lwbs.
+ */
+ zil_header_t *zh = zil_header_in_syncing_context(zilog);
+ BP_ZERO(&zh->zh_log);
+
+ /*
+ * Mark this ZIL dirty on the next txg, so that zil_clean() will be
+ * called for cleanup.
+ */
+ zilog_dirty(zilog, txg+1);
+
+ mutex_exit(&zilog->zl_lock);
+}
+
+/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
@@ -3640,9 +3949,17 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
* but the order in which they complete will be the same order in
* which they were created.
*/
-void
+static int zil_commit_impl(zilog_t *zilog, uint64_t foid);
+
+int
zil_commit(zilog_t *zilog, uint64_t foid)
{
+ return (zil_commit_flags(zilog, foid, ZIL_COMMIT_FAILMODE));
+}
+
+int
+zil_commit_flags(zilog_t *zilog, uint64_t foid, zil_commit_flag_t flags)
+{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
@@ -3659,7 +3976,7 @@ zil_commit(zilog_t *zilog, uint64_t foid)
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
- return;
+ return (0);
if (!spa_writeable(zilog->zl_spa)) {
/*
@@ -3670,10 +3987,23 @@ zil_commit(zilog_t *zilog, uint64_t foid)
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
- ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
+ ASSERT0P(zilog->zl_last_lwb_opened);
for (int i = 0; i < TXG_SIZE; i++)
- ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
- return;
+ ASSERT0P(zilog->zl_itxg[i].itxg_itxs);
+ return (0);
+ }
+
+ int err = 0;
+
+ /*
+ * If the ZIL crashed, bypass it entirely, and rely on txg_wait_sync()
+ * to get the data out to disk.
+ */
+ if (zilog->zl_restart_txg > 0) {
+ ZIL_STAT_BUMP(zilog, zil_commit_crash_count);
+ err = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
+ goto out;
}
/*
@@ -3685,14 +4015,43 @@ zil_commit(zilog_t *zilog, uint64_t foid)
*/
if (zilog->zl_suspend > 0) {
ZIL_STAT_BUMP(zilog, zil_commit_suspend_count);
- txg_wait_synced(zilog->zl_dmu_pool, 0);
- return;
+ err = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
+ if (err != 0) {
+ ASSERT3U(err, ==, ESHUTDOWN);
+ zil_crash(zilog);
+ }
+ goto out;
}
- zil_commit_impl(zilog, foid);
+ err = zil_commit_impl(zilog, foid);
+
+out:
+ if (err == 0)
+ return (0);
+
+ /*
+ * The ZIL write failed and the pool is suspended. There's nothing else
+ * we can do except return or block.
+ */
+ ASSERT3U(err, ==, ESHUTDOWN);
+
+ /*
+ * Return error if failmode=continue or caller will handle directly.
+ */
+ if (!(flags & ZIL_COMMIT_FAILMODE) ||
+ spa_get_failmode(zilog->zl_spa) == ZIO_FAILURE_MODE_CONTINUE)
+ return (SET_ERROR(EIO));
+
+ /*
+ * Block until the pool returns. We assume that the data will make
+ * it out to disk in the end, and so return success.
+ */
+ txg_wait_synced(zilog->zl_dmu_pool, 0);
+ return (0);
}
-void
+static int
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zilog, zil_commit_count);
@@ -3729,7 +4088,8 @@ zil_commit_impl(zilog_t *zilog, uint64_t foid)
uint64_t wtxg = zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
- if (zcw->zcw_zio_error != 0) {
+ int err = 0;
+ if (zcw->zcw_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
@@ -3741,13 +4101,29 @@ zil_commit_impl(zilog_t *zilog, uint64_t foid)
ZIL_STAT_BUMP(zilog, zil_commit_error_count);
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
- txg_wait_synced(zilog->zl_dmu_pool, 0);
+ err = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
} else if (wtxg != 0) {
ZIL_STAT_BUMP(zilog, zil_commit_suspend_count);
- txg_wait_synced(zilog->zl_dmu_pool, wtxg);
+ err = txg_wait_synced_flags(zilog->zl_dmu_pool, wtxg,
+ TXG_WAIT_SUSPEND);
}
zil_free_commit_waiter(zcw);
+
+ if (err == 0)
+ return (0);
+
+ /*
+ * ZIL write failed and pool failed in the fallback to
+ * txg_wait_synced_flags(). Right now we have no idea if the data is on
+ * disk and the pool is probably suspended so we have no idea when it's
+ * coming back. All we can do is shut down and return error to the
+ * caller.
+ */
+ ASSERT3U(err, ==, ESHUTDOWN);
+ zil_crash(zilog);
+ return (err);
}
/*
@@ -3773,7 +4149,7 @@ zil_sync(zilog_t *zilog, dmu_tx_t *tx)
mutex_enter(&zilog->zl_lock);
- ASSERT(zilog->zl_stop_sync == 0);
+ ASSERT0(zilog->zl_stop_sync);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
@@ -3848,7 +4224,7 @@ zil_lwb_cons(void *vbuf, void *unused, int kmflag)
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
- mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&lwb->lwb_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
@@ -3857,7 +4233,7 @@ zil_lwb_dest(void *vbuf, void *unused)
{
(void) unused;
lwb_t *lwb = vbuf;
- mutex_destroy(&lwb->lwb_vdev_lock);
+ mutex_destroy(&lwb->lwb_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
@@ -3943,6 +4319,8 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys)
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
+ list_create(&zilog->zl_lwb_crash_list, sizeof (lwb_t),
+ offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
@@ -3967,9 +4345,12 @@ zil_free(zilog_t *zilog)
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
+ ASSERT0(zilog->zl_restart_txg);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
+ ASSERT(list_is_empty(&zilog->zl_lwb_crash_list));
+ list_destroy(&zilog->zl_lwb_crash_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
@@ -4005,8 +4386,8 @@ zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums)
{
zilog_t *zilog = dmu_objset_zil(os);
- ASSERT3P(zilog->zl_get_data, ==, NULL);
- ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
+ ASSERT0P(zilog->zl_get_data);
+ ASSERT0P(zilog->zl_last_lwb_opened);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
@@ -4025,7 +4406,8 @@ zil_close(zilog_t *zilog)
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
- zil_commit(zilog, 0);
+ if (zil_commit_flags(zilog, 0, ZIL_COMMIT_NOW) != 0)
+ txg_wait_synced(zilog->zl_dmu_pool, 0);
} else {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT0(zilog->zl_dirty_max_txg);
@@ -4074,7 +4456,7 @@ zil_close(zilog_t *zilog)
if (lwb != NULL) {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW);
- zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
+ ASSERT0P(lwb->lwb_buf);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
@@ -4126,6 +4508,17 @@ zil_suspend(const char *osname, void **cookiep)
return (SET_ERROR(EBUSY));
}
+ if (zilog->zl_restart_txg > 0) {
+ /*
+ * ZIL crashed. It effectively _is_ suspended, but callers
+ * are usually trying to make sure it's empty on-disk, which
+ * we can't guarantee right now.
+ */
+ mutex_exit(&zilog->zl_lock);
+ dmu_objset_rele(os, suspend_tag);
+ return (SET_ERROR(EBUSY));
+ }
+
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
@@ -4154,11 +4547,16 @@ zil_suspend(const char *osname, void **cookiep)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
- if (cookiep == NULL)
+ if (zilog->zl_restart_txg > 0) {
+ /* ZIL crashed while we were waiting. */
+ zil_resume(os);
+ error = SET_ERROR(EBUSY);
+ } else if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
- return (0);
+
+ return (error);
}
/*
@@ -4199,17 +4597,34 @@ zil_suspend(const char *osname, void **cookiep)
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_FLUSH_DONE before returning.
+ *
+ * However, zil_commit_impl() itself can return an error if any of the
+ * lwbs fail, or the pool suspends in the fallback
+ * txg_wait_sync_flushed(), which affects what we do next, so we
+ * capture that error.
*/
- zil_commit_impl(zilog, 0);
+ error = zil_commit_impl(zilog, 0);
+ if (error == ESHUTDOWN)
+ /* zil_commit_impl() has called zil_crash() already */
+ error = SET_ERROR(EBUSY);
/*
* Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
* use txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
- txg_wait_synced(zilog->zl_dmu_pool, 0);
+ if (error == 0) {
+ error = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
+ if (error != 0) {
+ ASSERT3U(error, ==, ESHUTDOWN);
+ zil_crash(zilog);
+ error = SET_ERROR(EBUSY);
+ }
+ }
- zil_destroy(zilog, B_FALSE);
+ if (error == 0)
+ zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
@@ -4223,7 +4638,8 @@ zil_suspend(const char *osname, void **cookiep)
zil_resume(os);
else
*cookiep = os;
- return (0);
+
+ return (error);
}
void
@@ -4386,7 +4802,7 @@ zil_replay(objset_t *os, void *arg,
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
- ASSERT(zilog->zl_replay_blks == 0);
+ ASSERT0(zilog->zl_replay_blks);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 218aec6093e2..aeea58bedfe4 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -339,8 +339,8 @@ zio_fini(void)
}
for (size_t i = 0; i < n; i++) {
- VERIFY3P(zio_buf_cache[i], ==, NULL);
- VERIFY3P(zio_data_buf_cache[i], ==, NULL);
+ VERIFY0P(zio_buf_cache[i]);
+ VERIFY0P(zio_data_buf_cache[i]);
}
if (zio_ksp != NULL) {
@@ -771,7 +771,7 @@ zio_add_child_impl(zio_t *pio, zio_t *cio, boolean_t first)
else
mutex_enter(&cio->io_lock);
- ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
+ ASSERT0(pio->io_state[ZIO_WAIT_DONE]);
uint64_t *countp = pio->io_children[cio->io_child_type];
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
@@ -821,7 +821,7 @@ zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
- ASSERT(zio->io_stall == NULL);
+ ASSERT0P(zio->io_stall);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
@@ -955,8 +955,8 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_t *zio;
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
- ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
- ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
+ ASSERT0(P2PHASE(psize, SPA_MINBLOCKSIZE));
+ ASSERT0(P2PHASE(offset, SPA_MINBLOCKSIZE));
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
@@ -1451,7 +1451,7 @@ zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
metaslab_check_free(spa, bp);
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
- VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
+ VERIFY0P(zio_free_sync(NULL, spa, txg, bp, 0));
}
}
@@ -1559,7 +1559,7 @@ zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
{
zio_t *zio;
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
@@ -1580,7 +1580,7 @@ zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
{
zio_t *zio;
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
@@ -1747,7 +1747,7 @@ zio_flush(zio_t *pio, vdev_t *vd)
void
zio_shrink(zio_t *zio, uint64_t size)
{
- ASSERT3P(zio->io_executor, ==, NULL);
+ ASSERT0P(zio->io_executor);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
@@ -1941,7 +1941,7 @@ zio_write_compress(zio_t *zio)
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
- ASSERT(zio->io_bp_override == NULL);
+ ASSERT0P(zio->io_bp_override);
if (!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) == zio->io_txg) {
/*
@@ -2436,7 +2436,7 @@ __zio_execute(zio_t *zio)
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
- ASSERT(zio->io_stall == NULL);
+ ASSERT0P(zio->io_stall);
do {
stage <<= 1;
@@ -2509,7 +2509,7 @@ zio_wait(zio_t *zio)
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
- ASSERT3P(zio->io_executor, ==, NULL);
+ ASSERT0P(zio->io_executor);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
@@ -2551,7 +2551,7 @@ zio_nowait(zio_t *zio)
if (zio == NULL)
return;
- ASSERT3P(zio->io_executor, ==, NULL);
+ ASSERT0P(zio->io_executor);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
list_is_empty(&zio->io_parent_list)) {
@@ -2590,8 +2590,8 @@ zio_reexecute(void *arg)
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
- ASSERT(pio->io_gang_leader == NULL);
- ASSERT(pio->io_gang_tree == NULL);
+ ASSERT0P(pio->io_gang_leader);
+ ASSERT0P(pio->io_gang_tree);
mutex_enter(&pio->io_lock);
pio->io_flags = pio->io_orig_flags;
@@ -2689,7 +2689,7 @@ zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
- ASSERT(zio_unique_parent(zio) == NULL);
+ ASSERT0P(zio_unique_parent(zio));
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
@@ -2908,7 +2908,7 @@ zio_gang_node_alloc(zio_gang_node_t **gnpp, uint64_t gangblocksize)
{
zio_gang_node_t *gn;
- ASSERT(*gnpp == NULL);
+ ASSERT0P(*gnpp);
gn = kmem_zalloc(sizeof (*gn) +
(gbh_nblkptrs(gangblocksize) * sizeof (gn)), KM_SLEEP);
@@ -2925,7 +2925,7 @@ zio_gang_node_free(zio_gang_node_t **gnpp)
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < gbh_nblkptrs(gn->gn_allocsize); g++)
- ASSERT(gn->gn_child[g] == NULL);
+ ASSERT0P(gn->gn_child[g]);
zio_buf_free(gn->gn_gbh, gn->gn_allocsize);
kmem_free(gn, sizeof (*gn) +
@@ -3362,11 +3362,11 @@ zio_nop_write(zio_t *zio)
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_IS_HOLE(bp));
- ASSERT(BP_GET_LEVEL(bp) == 0);
+ ASSERT0(BP_GET_LEVEL(bp));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
- ASSERT(zio->io_bp_override == NULL);
+ ASSERT0P(zio->io_bp_override);
ASSERT(IO_IS_ALLOCATING(zio));
/*
@@ -3495,7 +3495,7 @@ zio_ddt_read_start(zio_t *zio)
ddt_univ_phys_t *ddp = dde->dde_phys;
blkptr_t blk;
- ASSERT(zio->io_vsd == NULL);
+ ASSERT0P(zio->io_vsd);
zio->io_vsd = dde;
if (v_self == DDT_PHYS_NONE)
@@ -3560,7 +3560,7 @@ zio_ddt_read_done(zio_t *zio)
zio->io_vsd = NULL;
}
- ASSERT(zio->io_vsd == NULL);
+ ASSERT0P(zio->io_vsd);
return (zio);
}
@@ -4415,7 +4415,7 @@ static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(BP_GET_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
- ASSERT(zio->io_bp_override == NULL);
+ ASSERT0P(zio->io_bp_override);
if (!BP_IS_HOLE(bp)) {
metaslab_free(zio->io_spa, bp, BP_GET_BIRTH(bp), B_TRUE);
@@ -4434,12 +4434,15 @@ zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
- uint64_t size, boolean_t *slog)
+ uint64_t min_size, uint64_t max_size, boolean_t *slog,
+ boolean_t allow_larger)
{
int error;
zio_alloc_list_t io_alloc_list;
+ uint64_t alloc_size = 0;
ASSERT(txg > spa_syncing_txg(spa));
+ ASSERT3U(min_size, <=, max_size);
metaslab_trace_init(&io_alloc_list);
@@ -4448,7 +4451,7 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
- BP_SET_PSIZE(new_bp, size);
+ BP_SET_PSIZE(new_bp, max_size);
BP_SET_LEVEL(new_bp, 0);
/*
@@ -4463,43 +4466,51 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
ZIOSTAT_BUMP(ziostat_total_allocations);
/* Try log class (dedicated slog devices) first */
- error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
- txg, NULL, flags, &io_alloc_list, allocator, NULL);
+ error = metaslab_alloc_range(spa, spa_log_class(spa), min_size,
+ max_size, new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
+ NULL, &alloc_size);
*slog = (error == 0);
/* Try special_embedded_log class (reserved on special vdevs) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_special_embedded_log_class(spa),
- size, new_bp, 1, txg, NULL, flags, &io_alloc_list,
- allocator, NULL);
+ error = metaslab_alloc_range(spa,
+ spa_special_embedded_log_class(spa), min_size, max_size,
+ new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
+ NULL, &alloc_size);
}
/* Try special class (general special vdev allocation) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_special_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_special_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
/* Try embedded_log class (reserved on normal vdevs) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_embedded_log_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
/* Finally fall back to normal class */
if (error != 0) {
ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
- error = metaslab_alloc(spa, spa_normal_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_normal_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
- BP_SET_LSIZE(new_bp, size);
- BP_SET_PSIZE(new_bp, size);
+ if (!allow_larger)
+ alloc_size = MIN(alloc_size, max_size);
+ else if (max_size <= SPA_OLD_MAXBLOCKSIZE)
+ alloc_size = MIN(alloc_size, SPA_OLD_MAXBLOCKSIZE);
+ alloc_size = P2ALIGN_TYPED(alloc_size, ZIL_MIN_BLKSZ, uint64_t);
+
+ BP_SET_LSIZE(new_bp, alloc_size);
+ BP_SET_PSIZE(new_bp, alloc_size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
@@ -4527,8 +4538,8 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
- "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
- error);
+ "min_size %llu, max_size %llu, error %d", spa_name(spa),
+ (u_longlong_t)min_size, (u_longlong_t)max_size, error);
}
return (error);
@@ -4559,12 +4570,33 @@ zio_vdev_io_start(zio_t *zio)
zio->io_delay = 0;
- ASSERT(zio->io_error == 0);
- ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
+ ASSERT0(zio->io_error);
+ ASSERT0(zio->io_child_error[ZIO_CHILD_VDEV]);
if (vd == NULL) {
- if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
- spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
+ if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) {
+ /*
+ * A deadlock workaround. The ddt_prune_unique_entries()
+ * -> prune_candidates_sync() code path takes the
+ * SCL_ZIO reader lock and may request it again here.
+ * If there is another thread who wants the SCL_ZIO
+ * writer lock, then scl_write_wanted will be set.
+ * Thus, the spa_config_enter_priority() is used to
+ * ignore pending writer requests.
+ *
+ * The locking should be revised to remove the need
+ * for this workaround. If that's not workable then
+ * it should only be applied to the zios involved in
+ * the pruning process. This impacts the read/write
+ * I/O balance while pruning.
+ */
+ if (spa->spa_active_ddt_prune)
+ spa_config_enter_priority(spa, SCL_ZIO, zio,
+ RW_READER);
+ else
+ spa_config_enter(spa, SCL_ZIO, zio,
+ RW_READER);
+ }
/*
* The mirror_ops handle multiple DVAs in a single BP.
@@ -4751,7 +4783,7 @@ zio_vdev_io_done(zio_t *zio)
ops->vdev_op_io_done(zio);
if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
- VERIFY(vdev_probe(vd, zio) == NULL);
+ VERIFY0P(vdev_probe(vd, zio));
return (zio);
}
@@ -4903,7 +4935,7 @@ void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
- ASSERT(zio->io_error == 0);
+ ASSERT0(zio->io_error);
zio->io_stage >>= 1;
}
@@ -4920,7 +4952,7 @@ void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
- ASSERT(zio->io_error == 0);
+ ASSERT0(zio->io_error);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
@@ -5294,11 +5326,21 @@ zio_ready(zio_t *zio)
return (NULL);
}
+ if (zio_injection_enabled) {
+ hrtime_t target = zio_handle_ready_delay(zio);
+ if (target != 0 && zio->io_target_timestamp == 0) {
+ zio->io_stage >>= 1;
+ zio->io_target_timestamp = target;
+ zio_delay_interrupt(zio);
+ return (NULL);
+ }
+ }
+
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(BP_GET_BIRTH(bp) == zio->io_txg ||
BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
- ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
+ ASSERT0(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY]);
zio->io_ready(zio);
}
@@ -5448,7 +5490,7 @@ zio_done(zio_t *zio)
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
- ASSERT(zio->io_children[c][w] == 0);
+ ASSERT0(zio->io_children[c][w]);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
diff --git a/sys/contrib/openzfs/module/zfs/zio_checksum.c b/sys/contrib/openzfs/module/zfs/zio_checksum.c
index 63d0c6dadd46..1d0646a61185 100644
--- a/sys/contrib/openzfs/module/zfs/zio_checksum.c
+++ b/sys/contrib/openzfs/module/zfs/zio_checksum.c
@@ -215,7 +215,7 @@ zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
spa_feature_t
zio_checksum_to_feature(enum zio_checksum cksum)
{
- VERIFY((cksum & ~ZIO_CHECKSUM_MASK) == 0);
+ VERIFY0((cksum & ~ZIO_CHECKSUM_MASK));
switch (cksum) {
case ZIO_CHECKSUM_BLAKE3:
diff --git a/sys/contrib/openzfs/module/zfs/zio_compress.c b/sys/contrib/openzfs/module/zfs/zio_compress.c
index 9f0ac1b63146..89ceeb58ad91 100644
--- a/sys/contrib/openzfs/module/zfs/zio_compress.c
+++ b/sys/contrib/openzfs/module/zfs/zio_compress.c
@@ -38,12 +38,6 @@
#include <sys/zstd/zstd.h>
/*
- * If nonzero, every 1/X decompression attempts will fail, simulating
- * an undetected memory error.
- */
-static unsigned long zio_decompress_fail_fraction = 0;
-
-/*
* Compression vectors.
*/
zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS] = {
@@ -171,15 +165,6 @@ zio_decompress_data(enum zio_compress c, abd_t *src, abd_t *dst,
else
err = ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level);
- /*
- * Decompression shouldn't fail, because we've already verified
- * the checksum. However, for extra protection (e.g. against bitflips
- * in non-ECC RAM), we handle this error (and test it).
- */
- if (zio_decompress_fail_fraction != 0 &&
- random_in_range(zio_decompress_fail_fraction) == 0)
- err = SET_ERROR(EINVAL);
-
return (err);
}
diff --git a/sys/contrib/openzfs/module/zfs/zio_inject.c b/sys/contrib/openzfs/module/zfs/zio_inject.c
index df7b01ba879e..287577018ed1 100644
--- a/sys/contrib/openzfs/module/zfs/zio_inject.c
+++ b/sys/contrib/openzfs/module/zfs/zio_inject.c
@@ -827,6 +827,44 @@ zio_handle_export_delay(spa_t *spa, hrtime_t elapsed)
zio_handle_pool_delay(spa, elapsed, ZINJECT_DELAY_EXPORT);
}
+/*
+ * For testing, inject a delay before ready state.
+ */
+hrtime_t
+zio_handle_ready_delay(zio_t *zio)
+{
+ inject_handler_t *handler;
+ hrtime_t now = gethrtime();
+ hrtime_t target = 0;
+
+ /*
+ * Ignore I/O not associated with any logical data.
+ */
+ if (zio->io_logical == NULL)
+ return (0);
+
+ rw_enter(&inject_lock, RW_READER);
+
+ for (handler = list_head(&inject_handlers); handler != NULL;
+ handler = list_next(&inject_handlers, handler)) {
+ if (zio->io_spa != handler->zi_spa ||
+ handler->zi_record.zi_cmd != ZINJECT_DELAY_READY)
+ continue;
+
+ /* If this handler matches, inject the delay */
+ if (zio_match_iotype(zio, handler->zi_record.zi_iotype) &&
+ zio_match_handler(&zio->io_logical->io_bookmark,
+ zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
+ zio_match_dva(zio), &handler->zi_record, zio->io_error)) {
+ target = now + (hrtime_t)handler->zi_record.zi_timer;
+ break;
+ }
+ }
+
+ rw_exit(&inject_lock);
+ return (target);
+}
+
static int
zio_calculate_range(const char *pool, zinject_record_t *record)
{
@@ -1119,7 +1157,7 @@ zio_clear_fault(int id)
kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
handler->zi_record.zi_nlanes);
} else {
- ASSERT3P(handler->zi_lanes, ==, NULL);
+ ASSERT0P(handler->zi_lanes);
}
if (handler->zi_spa_name != NULL)
diff --git a/sys/contrib/openzfs/module/zfs/zrlock.c b/sys/contrib/openzfs/module/zfs/zrlock.c
index 3c0f1b7bbbc1..09c110945c97 100644
--- a/sys/contrib/openzfs/module/zfs/zrlock.c
+++ b/sys/contrib/openzfs/module/zfs/zrlock.c
@@ -129,7 +129,7 @@ zrl_tryenter(zrlock_t *zrl)
(uint32_t *)&zrl->zr_refcount, 0, ZRL_LOCKED);
if (cas == 0) {
#ifdef ZFS_DEBUG
- ASSERT3P(zrl->zr_owner, ==, NULL);
+ ASSERT0P(zrl->zr_owner);
zrl->zr_owner = curthread;
#endif
return (1);
diff --git a/sys/contrib/openzfs/module/zfs/zthr.c b/sys/contrib/openzfs/module/zfs/zthr.c
index 597a510528ea..d245ce4946e0 100644
--- a/sys/contrib/openzfs/module/zfs/zthr.c
+++ b/sys/contrib/openzfs/module/zfs/zthr.c
@@ -316,7 +316,7 @@ zthr_destroy(zthr_t *t)
{
ASSERT(!MUTEX_HELD(&t->zthr_state_lock));
ASSERT(!MUTEX_HELD(&t->zthr_request_lock));
- VERIFY3P(t->zthr_thread, ==, NULL);
+ VERIFY0P(t->zthr_thread);
mutex_destroy(&t->zthr_request_lock);
mutex_destroy(&t->zthr_state_lock);
cv_destroy(&t->zthr_cv);
diff --git a/sys/contrib/openzfs/module/zfs/zvol.c b/sys/contrib/openzfs/module/zfs/zvol.c
index 7e264f308cf2..faced0db7e9e 100644
--- a/sys/contrib/openzfs/module/zfs/zvol.c
+++ b/sys/contrib/openzfs/module/zfs/zvol.c
@@ -38,25 +38,36 @@
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
/*
* Note on locking of zvol state structures.
*
- * These structures are used to maintain internal state used to emulate block
- * devices on top of zvols. In particular, management of device minor number
- * operations - create, remove, rename, and set_snapdev - involves access to
- * these structures. The zvol_state_lock is primarily used to protect the
- * zvol_state_list. The zv->zv_state_lock is used to protect the contents
- * of the zvol_state_t structures, as well as to make sure that when the
- * time comes to remove the structure from the list, it is not in use, and
- * therefore, it can be taken off zvol_state_list and freed.
+ * zvol_state_t represents the connection between a single dataset
+ * (DMU_OST_ZVOL) and the device "minor" (some OS-specific representation of a
+ * "disk" or "device" or "volume", eg, a /dev/zdXX node, a GEOM object, etc).
*
- * The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
- * e.g. for the duration of receive and rollback operations. This lock can be
- * held for significant periods of time. Given that it is undesirable to hold
- * mutexes for long periods of time, the following lock ordering applies:
+ * The global zvol_state_lock is used to protect access to zvol_state_list and
+ * zvol_htable, which are the primary way to obtain a zvol_state_t from a name.
+ * It should not be used for anything not name-relateds, and you should avoid
+ * sleeping or waiting while its held. See zvol_find_by_name(), zvol_insert(),
+ * zvol_remove().
+ *
+ * The zv_state_lock is used to protect the contents of the associated
+ * zvol_state_t. Most of the zvol_state_t is dedicated to control and
+ * configuration; almost none of it is needed for data operations (that is,
+ * read, write, flush) so this lock is rarely taken during general IO. It
+ * should be released quickly; you should avoid sleeping or waiting while its
+ * held.
+ *
+ * zv_suspend_lock is used to suspend IO/data operations to a zvol. The read
+ * half should held for the duration of an IO operation. The write half should
+ * be taken when something to wait for IO to complete and the block further IO,
+ * eg for the duration of receive and rollback operations. This lock can be
+ * held for long periods of time.
+ *
+ * Thus, the following lock ordering appies.
* - take zvol_state_lock if necessary, to protect zvol_state_list
* - take zv_suspend_lock if necessary, by the code path in question
* - take zv_state_lock to protect zvol_state_t
@@ -67,9 +78,8 @@
* these operations are serialized per pool. Consequently, we can be certain
* that for a given zvol, there is only one operation at a time in progress.
* That is why one can be sure that first, zvol_state_t for a given zvol is
- * allocated and placed on zvol_state_list, and then other minor operations
- * for this zvol are going to proceed in the order of issue.
- *
+ * allocated and placed on zvol_state_list, and then other minor operations for
+ * this zvol are going to proceed in the order of issue.
*/
#include <sys/dataset_kstats.h>
@@ -215,8 +225,8 @@ zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
int error;
uint64_t volblocksize, volsize;
- VERIFY(nvlist_lookup_uint64(nvprops,
- zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
+ VERIFY0(nvlist_lookup_uint64(nvprops,
+ zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize));
if (nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
@@ -225,21 +235,20 @@ zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
* These properties must be removed from the list so the generic
* property setting step won't apply to them.
*/
- VERIFY(nvlist_remove_all(nvprops,
- zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
+ VERIFY0(nvlist_remove_all(nvprops, zfs_prop_to_name(ZFS_PROP_VOLSIZE)));
(void) nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
DMU_OT_NONE, 0, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
DMU_OT_NONE, 0, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
}
/*
@@ -254,7 +263,7 @@ zvol_get_stats(objset_t *os, nvlist_t *nv)
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
if (error)
- return (SET_ERROR(error));
+ return (error);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
@@ -267,7 +276,7 @@ zvol_get_stats(objset_t *os, nvlist_t *nv)
kmem_free(doi, sizeof (dmu_object_info_t));
- return (SET_ERROR(error));
+ return (error);
}
/*
@@ -305,7 +314,7 @@ zvol_update_volsize(uint64_t volsize, objset_t *os)
error = dmu_tx_assign(tx, DMU_TX_WAIT);
if (error) {
dmu_tx_abort(tx);
- return (SET_ERROR(error));
+ return (error);
}
txg = dmu_tx_get_txg(tx);
@@ -337,7 +346,7 @@ zvol_set_volsize(const char *name, uint64_t volsize)
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
if (error != 0)
- return (SET_ERROR(error));
+ return (error);
if (readonly)
return (SET_ERROR(EROFS));
@@ -353,7 +362,7 @@ zvol_set_volsize(const char *name, uint64_t volsize)
FTAG, &os)) != 0) {
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
- return (SET_ERROR(error));
+ return (error);
}
owned = B_TRUE;
if (zv != NULL)
@@ -390,7 +399,7 @@ out:
if (error == 0 && zv != NULL)
zvol_os_update_volsize(zv, volsize);
- return (SET_ERROR(error));
+ return (error);
}
/*
@@ -401,7 +410,7 @@ zvol_set_volthreading(const char *name, boolean_t value)
{
zvol_state_t *zv = zvol_find_by_name(name, RW_NONE);
if (zv == NULL)
- return (ENOENT);
+ return (SET_ERROR(ENOENT));
zv->zv_threading = value;
mutex_exit(&zv->zv_state_lock);
return (0);
@@ -450,8 +459,10 @@ zvol_check_volblocksize(const char *name, uint64_t volblocksize)
* We don't allow setting the property above 1MB,
* unless the tunable has been changed.
*/
- if (volblocksize > zfs_max_recordsize)
+ if (volblocksize > zfs_max_recordsize) {
+ spa_close(spa, FTAG);
return (SET_ERROR(EDOM));
+ }
spa_close(spa, FTAG);
}
@@ -618,7 +629,7 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
dmu_tx_t *tx;
blkptr_t *bps;
size_t maxblocks;
- int error = EINVAL;
+ int error = 0;
rw_enter(&zv_dst->zv_suspend_lock, RW_READER);
if (zv_dst->zv_zilog == NULL) {
@@ -644,23 +655,22 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
*/
if (!spa_feature_is_enabled(dmu_objset_spa(outos),
SPA_FEATURE_BLOCK_CLONING)) {
- error = EOPNOTSUPP;
+ error = SET_ERROR(EOPNOTSUPP);
goto out;
}
if (dmu_objset_spa(inos) != dmu_objset_spa(outos)) {
- error = EXDEV;
+ error = SET_ERROR(EXDEV);
goto out;
}
if (inos->os_encrypted != outos->os_encrypted) {
- error = EXDEV;
+ error = SET_ERROR(EXDEV);
goto out;
}
if (zv_src->zv_volblocksize != zv_dst->zv_volblocksize) {
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
goto out;
}
if (inoff >= zv_src->zv_volsize || outoff >= zv_dst->zv_volsize) {
- error = 0;
goto out;
}
@@ -671,17 +681,15 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
len = zv_src->zv_volsize - inoff;
if (len > zv_dst->zv_volsize - outoff)
len = zv_dst->zv_volsize - outoff;
- if (len == 0) {
- error = 0;
+ if (len == 0)
goto out;
- }
/*
* No overlapping if we are cloning within the same file
*/
if (zv_src == zv_dst) {
if (inoff < outoff + len && outoff < inoff + len) {
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
goto out;
}
}
@@ -691,7 +699,7 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
*/
if ((inoff % zv_src->zv_volblocksize) != 0 ||
(outoff % zv_dst->zv_volblocksize) != 0) {
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
goto out;
}
@@ -699,7 +707,7 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
* Length must be multiple of block size
*/
if ((len % zv_src->zv_volblocksize) != 0) {
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
goto out;
}
@@ -771,13 +779,13 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
zfs_rangelock_exit(outlr);
zfs_rangelock_exit(inlr);
if (error == 0 && zv_dst->zv_objset->os_sync == ZFS_SYNC_ALWAYS) {
- zil_commit(zilog_dst, ZVOL_OBJ);
+ error = zil_commit(zilog_dst, ZVOL_OBJ);
}
out:
if (zv_src != zv_dst)
rw_exit(&zv_src->zv_suspend_lock);
rw_exit(&zv_dst->zv_suspend_lock);
- return (SET_ERROR(error));
+ return (error);
}
/*
@@ -897,7 +905,7 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
if (wr_state == WR_COPIED &&
dmu_read_by_dnode(zv->zv_dn, offset, len, lr + 1,
DMU_READ_NO_PREFETCH | DMU_KEEP_CACHING) != 0) {
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
@@ -916,7 +924,7 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
itx->itx_private = zv;
- (void) zil_itx_assign(zilog, itx, tx);
+ zil_itx_assign(zilog, itx, tx);
offset += len;
size -= len;
@@ -1026,7 +1034,7 @@ zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
zvol_get_done(zgd, error);
- return (SET_ERROR(error));
+ return (error);
}
/*
@@ -1071,15 +1079,15 @@ zvol_setup_zv(zvol_state_t *zv)
error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
if (error)
- return (SET_ERROR(error));
+ return (error);
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
- return (SET_ERROR(error));
+ return (error);
error = dnode_hold(os, ZVOL_OBJ, zv, &zv->zv_dn);
if (error)
- return (SET_ERROR(error));
+ return (error);
zvol_os_set_capacity(zv, volsize >> 9);
zv->zv_volsize = volsize;
@@ -1121,7 +1129,7 @@ zvol_shutdown_zv(zvol_state_t *zv)
*/
if (zv->zv_flags & ZVOL_WRITTEN_TO)
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
- (void) dmu_objset_evict_dbufs(zv->zv_objset);
+ dmu_objset_evict_dbufs(zv->zv_objset);
}
/*
@@ -1137,20 +1145,34 @@ zvol_tag(zvol_state_t *zv)
/*
* Suspend the zvol for recv and rollback.
*/
-zvol_state_t *
-zvol_suspend(const char *name)
+int
+zvol_suspend(const char *name, zvol_state_t **zvp)
{
zvol_state_t *zv;
zv = zvol_find_by_name(name, RW_WRITER);
if (zv == NULL)
- return (NULL);
+ return (SET_ERROR(ENOENT));
/* block all I/O, release in zvol_resume. */
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
+ /*
+ * If it's being removed, unlock and return error. It doesn't make any
+ * sense to try to suspend a zvol being removed, but being here also
+ * means that zvol_remove_minors_impl() is about to call zvol_remove()
+ * and then destroy the zvol_state_t, so returning a pointer to it for
+ * the caller to mess with would be a disaster anyway.
+ */
+ if (zv->zv_flags & ZVOL_REMOVING) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
+ /* NB: Returning EIO here to match zfsvfs_teardown() */
+ return (SET_ERROR(EIO));
+ }
+
atomic_inc(&zv->zv_suspend_ref);
if (zv->zv_open_count > 0)
@@ -1163,7 +1185,8 @@ zvol_suspend(const char *name)
mutex_exit(&zv->zv_state_lock);
/* zv_suspend_lock is released in zvol_resume() */
- return (zv);
+ *zvp = zv;
+ return (0);
}
int
@@ -1198,7 +1221,7 @@ zvol_resume(zvol_state_t *zv)
if (zv->zv_flags & ZVOL_REMOVING)
cv_broadcast(&zv->zv_removing_cv);
- return (SET_ERROR(error));
+ return (error);
}
int
@@ -1214,7 +1237,7 @@ zvol_first_open(zvol_state_t *zv, boolean_t readonly)
boolean_t ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
if (error)
- return (SET_ERROR(error));
+ return (error);
zv->zv_objset = os;
@@ -1440,41 +1463,32 @@ zvol_task_update_status(zvol_task_t *task, uint64_t total, uint64_t done,
}
}
-static const char *
-zvol_task_op_msg(zvol_async_op_t op)
-{
- switch (op) {
- case ZVOL_ASYNC_CREATE_MINORS:
- return ("create");
- case ZVOL_ASYNC_REMOVE_MINORS:
- return ("remove");
- case ZVOL_ASYNC_RENAME_MINORS:
- return ("rename");
- case ZVOL_ASYNC_SET_SNAPDEV:
- case ZVOL_ASYNC_SET_VOLMODE:
- return ("set property");
- default:
- return ("unknown");
- }
-
- __builtin_unreachable();
- return (NULL);
-}
-
static void
zvol_task_report_status(zvol_task_t *task)
{
+#ifdef ZFS_DEBUG
+ static const char *const msg[] = {
+ "create",
+ "remove",
+ "rename",
+ "set snapdev",
+ "set volmode",
+ "unknown",
+ };
if (task->zt_status == 0)
return;
+ zvol_async_op_t op = MIN(task->zt_op, ZVOL_ASYNC_MAX);
if (task->zt_error) {
dprintf("The %s minors zvol task was not ok, last error %d\n",
- zvol_task_op_msg(task->zt_op), task->zt_error);
+ msg[op], task->zt_error);
} else {
- dprintf("The %s minors zvol task was not ok\n",
- zvol_task_op_msg(task->zt_op));
+ dprintf("The %s minors zvol task was not ok\n", msg[op]);
}
+#else
+ (void) task;
+#endif
}
/*
@@ -1581,184 +1595,156 @@ zvol_create_minors_impl(zvol_task_t *task)
}
/*
- * Remove minors for specified dataset including children and snapshots.
- */
-
-/*
- * Remove the minor for a given zvol. This will do it all:
- * - flag the zvol for removal, so new requests are rejected
- * - wait until outstanding requests are completed
- * - remove it from lists
- * - free it
- * It's also usable as a taskq task, and smells nice too.
+ * Remove minors for specified dataset and, optionally, its children and
+ * snapshots.
*/
static void
-zvol_remove_minor_task(void *arg)
-{
- zvol_state_t *zv = (zvol_state_t *)arg;
-
- ASSERT(!RW_LOCK_HELD(&zvol_state_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
-
- mutex_enter(&zv->zv_state_lock);
- while (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) {
- zv->zv_flags |= ZVOL_REMOVING;
- cv_wait(&zv->zv_removing_cv, &zv->zv_state_lock);
- }
- mutex_exit(&zv->zv_state_lock);
-
- rw_enter(&zvol_state_lock, RW_WRITER);
- mutex_enter(&zv->zv_state_lock);
-
- zvol_remove(zv);
- zvol_os_clear_private(zv);
-
- mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
-
- zvol_os_free(zv);
-}
-
-static void
-zvol_free_task(void *arg)
-{
- zvol_os_free(arg);
-}
-
-static void
zvol_remove_minors_impl(zvol_task_t *task)
{
zvol_state_t *zv, *zv_next;
const char *name = task ? task->zt_name1 : NULL;
int namelen = ((name) ? strlen(name) : 0);
- taskqid_t t;
- list_t delay_list, free_list;
+ boolean_t children = task ? !!task->zt_value : B_TRUE;
if (zvol_inhibit_dev)
return;
- list_create(&delay_list, sizeof (zvol_state_t),
- offsetof(zvol_state_t, zv_next));
- list_create(&free_list, sizeof (zvol_state_t),
- offsetof(zvol_state_t, zv_next));
+ /*
+ * We collect up zvols that we want to remove on a separate list, so
+ * that we don't have to hold zvol_state_lock for the whole time.
+ *
+ * We can't remove them from the global lists until we're completely
+ * done with them, because that would make them appear to ZFS-side ops
+ * that they don't exist, and the name might be reused, which can't be
+ * good.
+ */
+ list_t remove_list;
+ list_create(&remove_list, sizeof (zvol_state_t),
+ offsetof(zvol_state_t, zv_remove_node));
- rw_enter(&zvol_state_lock, RW_WRITER);
+ rw_enter(&zvol_state_lock, RW_READER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
+ if (zv->zv_flags & ZVOL_REMOVING) {
+ /* Another thread is handling shutdown, skip it. */
+ mutex_exit(&zv->zv_state_lock);
+ continue;
+ }
+
+ /*
+ * This zvol should be removed if:
+ * - no name was offered (ie removing all at shutdown); or
+ * - name matches exactly; or
+ * - we were asked to remove children, and
+ * - the start of the name matches, and
+ * - there is a '/' immediately after the matched name; or
+ * - there is a '@' immediately after the matched name
+ */
if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
- (strncmp(zv->zv_name, name, namelen) == 0 &&
+ (children && strncmp(zv->zv_name, name, namelen) == 0 &&
(zv->zv_name[namelen] == '/' ||
zv->zv_name[namelen] == '@'))) {
- /*
- * By holding zv_state_lock here, we guarantee that no
- * one is currently using this zv
- */
/*
- * If in use, try to throw everyone off and try again
- * later.
+ * Matched, so mark it removal. We want to take the
+ * write half of the suspend lock to make sure that
+ * the zvol is not suspended, and give any data ops
+ * chance to finish.
*/
- if (zv->zv_open_count > 0 ||
- atomic_read(&zv->zv_suspend_ref)) {
- zv->zv_flags |= ZVOL_REMOVING;
- t = taskq_dispatch(
- zv->zv_objset->os_spa->spa_zvol_taskq,
- zvol_remove_minor_task, zv, TQ_SLEEP);
- if (t == TASKQID_INVALID) {
- /*
- * Couldn't create the task, so we'll
- * do it in place once the loop is
- * finished.
- */
- list_insert_head(&delay_list, zv);
- }
+ mutex_exit(&zv->zv_state_lock);
+ rw_enter(&zv->zv_suspend_lock, RW_WRITER);
+ mutex_enter(&zv->zv_state_lock);
+
+ if (zv->zv_flags & ZVOL_REMOVING) {
+ /* Another thread has taken it, let them. */
mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
continue;
}
- zvol_remove(zv);
-
/*
- * Cleared while holding zvol_state_lock as a writer
- * which will prevent zvol_open() from opening it.
+ * Mark it and unlock. New entries will see the flag
+ * and return ENXIO.
*/
- zvol_os_clear_private(zv);
-
- /* Drop zv_state_lock before zvol_free() */
+ zv->zv_flags |= ZVOL_REMOVING;
mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
- /* Try parallel zv_free, if failed do it in place */
- t = taskq_dispatch(system_taskq, zvol_free_task, zv,
- TQ_SLEEP);
- if (t == TASKQID_INVALID)
- list_insert_head(&free_list, zv);
- } else {
+ /* Put it on the list for the next stage. */
+ list_insert_head(&remove_list, zv);
+ } else
mutex_exit(&zv->zv_state_lock);
- }
}
- rw_exit(&zvol_state_lock);
-
- /* Wait for zvols that we couldn't create a remove task for */
- while ((zv = list_remove_head(&delay_list)) != NULL)
- zvol_remove_minor_task(zv);
- /* Free any that we couldn't free in parallel earlier */
- while ((zv = list_remove_head(&free_list)) != NULL)
- zvol_os_free(zv);
-}
-
-/* Remove minor for this specific volume only */
-static int
-zvol_remove_minor_impl(const char *name)
-{
- zvol_state_t *zv = NULL, *zv_next;
-
- if (zvol_inhibit_dev)
- return (0);
+ rw_exit(&zvol_state_lock);
- rw_enter(&zvol_state_lock, RW_WRITER);
+ /* Didn't match any, nothing to do! */
+ if (list_is_empty(&remove_list)) {
+ if (task)
+ task->zt_error = SET_ERROR(ENOENT);
+ return;
+ }
- for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
- zv_next = list_next(&zvol_state_list, zv);
+ /* Actually shut them all down. */
+ for (zv = list_head(&remove_list); zv != NULL; zv = zv_next) {
+ zv_next = list_next(&remove_list, zv);
mutex_enter(&zv->zv_state_lock);
- if (strcmp(zv->zv_name, name) == 0)
- /* Found, leave the the loop with zv_lock held */
- break;
- mutex_exit(&zv->zv_state_lock);
- }
-
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- return (ENOENT);
- }
- ASSERT(MUTEX_HELD(&zv->zv_state_lock));
+ /*
+ * Still open or suspended, just wait. This can happen if, for
+ * example, we managed to acquire zv_state_lock in the moments
+ * where zvol_open() or zvol_release() are trading locks to
+ * call zvol_first_open() or zvol_last_close().
+ */
+ while (zv->zv_open_count > 0 ||
+ atomic_read(&zv->zv_suspend_ref))
+ cv_wait(&zv->zv_removing_cv, &zv->zv_state_lock);
- if (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) {
/*
- * In use, so try to throw everyone off, then wait
- * until finished.
+ * No users, shut down the OS side. This may not remove the
+ * minor from view immediately, depending on the kernel
+ * specifics, but it will ensure that it is unusable and that
+ * this zvol_state_t can never again be reached from an OS-side
+ * operation.
*/
- zv->zv_flags |= ZVOL_REMOVING;
+ zvol_os_remove_minor(zv);
mutex_exit(&zv->zv_state_lock);
+
+ /* Remove it from the name lookup lists */
+ rw_enter(&zvol_state_lock, RW_WRITER);
+ zvol_remove(zv);
rw_exit(&zvol_state_lock);
- zvol_remove_minor_task(zv);
- return (0);
}
- zvol_remove(zv);
- zvol_os_clear_private(zv);
+ /*
+ * Our own references on remove_list is the last one, free them and
+ * we're done.
+ */
+ while ((zv = list_remove_head(&remove_list)) != NULL)
+ zvol_os_free(zv);
- mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
+ list_destroy(&remove_list);
+}
+
+/* Remove minor for this specific volume only */
+static int
+zvol_remove_minor_impl(const char *name)
+{
+ if (zvol_inhibit_dev)
+ return (0);
- zvol_os_free(zv);
+ zvol_task_t task;
+ memset(&task, 0, sizeof (zvol_task_t));
+ strlcpy(task.zt_name1, name, sizeof (task.zt_name1));
+ task.zt_value = B_FALSE;
- return (0);
+ zvol_remove_minors_impl(&task);
+
+ return (task.zt_error);
}
/*
@@ -2078,6 +2064,7 @@ zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
task->zt_op = ZVOL_ASYNC_REMOVE_MINORS;
strlcpy(task->zt_name1, name, sizeof (task->zt_name1));
+ task->zt_value = B_TRUE;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
@@ -2199,20 +2186,12 @@ zvol_fini_impl(void)
zvol_remove_minors_impl(NULL);
- /*
- * The call to "zvol_remove_minors_impl" may dispatch entries to
- * the system_taskq, but it doesn't wait for those entries to
- * complete before it returns. Thus, we must wait for all of the
- * removals to finish, before we can continue.
- */
- taskq_wait_outstanding(system_taskq, 0);
-
kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
list_destroy(&zvol_state_list);
rw_destroy(&zvol_state_lock);
if (ztqs->tqs_taskq == NULL) {
- ASSERT3U(ztqs->tqs_cnt, ==, 0);
+ ASSERT0(ztqs->tqs_cnt);
} else {
for (uint_t i = 0; i < ztqs->tqs_cnt; i++) {
ASSERT3P(ztqs->tqs_taskq[i], !=, NULL);
diff --git a/sys/contrib/openzfs/module/zstd/zfs_zstd.c b/sys/contrib/openzfs/module/zstd/zfs_zstd.c
index 391216d6e263..c403c001086a 100644
--- a/sys/contrib/openzfs/module/zstd/zfs_zstd.c
+++ b/sys/contrib/openzfs/module/zstd/zfs_zstd.c
@@ -441,64 +441,6 @@ zstd_enum_to_level(enum zio_zstd_levels level, int16_t *zstd_level)
}
#ifndef IN_LIBSA
-static size_t
-zfs_zstd_compress_wrap(void *s_start, void *d_start, size_t s_len, size_t d_len,
- int level)
-{
- int16_t zstd_level;
- if (zstd_enum_to_level(level, &zstd_level)) {
- ZSTDSTAT_BUMP(zstd_stat_com_inval);
- return (s_len);
- }
- /*
- * A zstd early abort heuristic.
- *
- * - Zeroth, if this is <= zstd-3, or < zstd_abort_size (currently
- * 128k), don't try any of this, just go.
- * (because experimentally that was a reasonable cutoff for a perf win
- * with tiny ratio change)
- * - First, we try LZ4 compression, and if it doesn't early abort, we
- * jump directly to whatever compression level we intended to try.
- * - Second, we try zstd-1 - if that errors out (usually, but not
- * exclusively, if it would overflow), we give up early.
- *
- * If it works, instead we go on and compress anyway.
- *
- * Why two passes? LZ4 alone gets you a lot of the way, but on highly
- * compressible data, it was losing up to 8.5% of the compressed
- * savings versus no early abort, and all the zstd-fast levels are
- * worse indications on their own than LZ4, and don't improve the LZ4
- * pass noticably if stacked like this.
- */
- size_t actual_abort_size = zstd_abort_size;
- if (zstd_earlyabort_pass > 0 && zstd_level >= zstd_cutoff_level &&
- s_len >= actual_abort_size) {
- int pass_len = 1;
- pass_len = zfs_lz4_compress(s_start, d_start, s_len, d_len, 0);
- if (pass_len < d_len) {
- ZSTDSTAT_BUMP(zstd_stat_lz4pass_allowed);
- goto keep_trying;
- }
- ZSTDSTAT_BUMP(zstd_stat_lz4pass_rejected);
-
- pass_len = zfs_zstd_compress(s_start, d_start, s_len, d_len,
- ZIO_ZSTD_LEVEL_1);
- if (pass_len == s_len || pass_len <= 0 || pass_len > d_len) {
- ZSTDSTAT_BUMP(zstd_stat_zstdpass_rejected);
- return (s_len);
- }
- ZSTDSTAT_BUMP(zstd_stat_zstdpass_allowed);
- } else {
- ZSTDSTAT_BUMP(zstd_stat_passignored);
- if (s_len < actual_abort_size) {
- ZSTDSTAT_BUMP(zstd_stat_passignored_size);
- }
- }
-keep_trying:
- return (zfs_zstd_compress(s_start, d_start, s_len, d_len, level));
-
-}
-
/* Compress block using zstd */
static size_t
zfs_zstd_compress_impl(void *s_start, void *d_start, size_t s_len, size_t d_len,
@@ -876,9 +818,9 @@ static void __init
zstd_mempool_init(void)
{
zstd_mempool_cctx =
- kmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
+ vmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
zstd_mempool_dctx =
- kmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
+ vmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
for (int i = 0; i < ZSTD_POOL_MAX; i++) {
mutex_init(&zstd_mempool_cctx[i].barrier, NULL,
@@ -924,8 +866,8 @@ zstd_mempool_deinit(void)
release_pool(&zstd_mempool_dctx[i]);
}
- kmem_free(zstd_mempool_dctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
- kmem_free(zstd_mempool_cctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
+ vmem_free(zstd_mempool_dctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
+ vmem_free(zstd_mempool_cctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
zstd_mempool_dctx = NULL;
zstd_mempool_cctx = NULL;
}
diff --git a/sys/contrib/openzfs/rpm/generic/zfs.spec.in b/sys/contrib/openzfs/rpm/generic/zfs.spec.in
index dddc0a6c8f02..8986e29eb7fb 100644
--- a/sys/contrib/openzfs/rpm/generic/zfs.spec.in
+++ b/sys/contrib/openzfs/rpm/generic/zfs.spec.in
@@ -433,7 +433,7 @@ make install DESTDIR=%{?buildroot}
find %{?buildroot}%{_libdir} -name '*.la' -exec rm -f {} \;
%if 0%{!?__brp_mangle_shebangs:1}
find %{?buildroot}%{_bindir} \
- \( -name arc_summary -or -name arcstat -or -name dbufstat \
+ \( -name zarcsummary -or -name zarcstat -or -name dbufstat \
-or -name zilstat \) \
-exec %{__sed} -i 's|^#!.*|#!%{__python}|' {} \;
find %{?buildroot}%{_datadir} \
@@ -506,11 +506,10 @@ systemctl --system daemon-reload >/dev/null || true
# Core utilities
%{_sbindir}/*
%{_bindir}/raidz_test
-%{_sbindir}/zgenhostid
%{_bindir}/zvol_wait
# Optional Python 3 scripts
-%{_bindir}/arc_summary
-%{_bindir}/arcstat
+%{_bindir}/zarcsummary
+%{_bindir}/zarcstat
%{_bindir}/dbufstat
%{_bindir}/zilstat
# Man pages
diff --git a/sys/contrib/openzfs/scripts/mancheck.sh b/sys/contrib/openzfs/scripts/mancheck.sh
index 364ad1b76286..33d7d3c7155f 100755
--- a/sys/contrib/openzfs/scripts/mancheck.sh
+++ b/sys/contrib/openzfs/scripts/mancheck.sh
@@ -11,12 +11,12 @@
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
-# shellcheck disable=SC2086
+# shellcheck disable=SC2068,SC2086
trap 'rm -f "$stdout_file" "$stderr_file" "$result_file"' EXIT
if [ "$#" -eq 0 ]; then
- echo "Usage: $0 manpage-directory..."
+ echo "Usage: $0 <manpage-directory|manpage-file>..."
exit 1
fi
@@ -27,7 +27,16 @@ fi
IFS="
"
-files="$(find "$@" -type f -name '*[1-9]*' -not -name '.*')" || exit 1
+files="$(
+ for path in $@ ; do
+ find -L $path -type f -name '*[1-9]*' -not -name '.*'
+ done | sort | uniq
+)"
+
+if [ "$files" = "" ] ; then
+ echo no files to process! 1>&2
+ exit 1
+fi
add_excl="$(awk '
/^.\\" lint-ok:/ {
@@ -48,6 +57,4 @@ grep -vhE -e 'mandoc: outdated mandoc.db' -e 'STYLE: referenced manual not found
if [ -s "$result_file" ]; then
cat "$result_file"
exit 1
-else
- echo "no errors found"
fi
diff --git a/sys/contrib/openzfs/scripts/spdxcheck.pl b/sys/contrib/openzfs/scripts/spdxcheck.pl
index 88f5a235d70c..4d4e14368beb 100755
--- a/sys/contrib/openzfs/scripts/spdxcheck.pl
+++ b/sys/contrib/openzfs/scripts/spdxcheck.pl
@@ -83,8 +83,8 @@ my $tagged_patterns = q(
man/man?/*.?.in
# Unsuffixed programs (or generated of same)
- cmd/arcstat.in
- cmd/arc_summary
+ cmd/zarcstat.in
+ cmd/zarcsummary
cmd/dbufstat.in
cmd/zilstat.in
cmd/zpool/zpool.d/*
@@ -190,6 +190,7 @@ my @path_license_tags = (
['BSD-2-Clause OR GPL-2.0-only', 'CDDL-1.0'],
'module/icp' => ['Apache-2.0', 'CDDL-1.0'],
+ 'contrib/icp' => ['Apache-2.0', 'CDDL-1.0'],
# Python bindings are always Apache-2.0
'contrib/pyzfs' => ['Apache-2.0'],
diff --git a/sys/contrib/openzfs/scripts/zfs-helpers.sh b/sys/contrib/openzfs/scripts/zfs-helpers.sh
index b45384a9aa52..2e97d40db1c1 100755
--- a/sys/contrib/openzfs/scripts/zfs-helpers.sh
+++ b/sys/contrib/openzfs/scripts/zfs-helpers.sh
@@ -122,6 +122,13 @@ install() {
src=$1
dst=$2
+ # We may have an old symlink pointing to different ZFS workspace.
+ # Remove the old symlink if it doesn't point to our workspace.
+ if [ -h "$dst" ] && [ "$(readlink -f """$dst""")" != "$src" ] ; then
+ echo "Removing old symlink: $dst -> $(readlink """$dst""")"
+ rm "$dst"
+ fi
+
if [ -h "$dst" ]; then
echo "Symlink exists: $dst"
elif [ -e "$dst" ]; then
diff --git a/sys/contrib/openzfs/scripts/zfs-tests.sh b/sys/contrib/openzfs/scripts/zfs-tests.sh
index 04f3b6f32cb8..5a0a1a609448 100755
--- a/sys/contrib/openzfs/scripts/zfs-tests.sh
+++ b/sys/contrib/openzfs/scripts/zfs-tests.sh
@@ -38,6 +38,7 @@ DEBUG=""
CLEANUP="yes"
CLEANUPALL="no"
KMSG=""
+TIMEOUT_DEBUG=""
LOOPBACK="yes"
STACK_TRACER="no"
FILESIZE="4G"
@@ -364,6 +365,7 @@ OPTIONS:
-k Disable cleanup after test failure
-K Log test names to /dev/kmsg
-f Use files only, disables block device tests
+ -O Dump debugging info to /dev/kmsg on test timeout
-S Enable stack tracer (negative performance impact)
-c Only create and populate constrained path
-R Automatically rerun failing tests
@@ -402,7 +404,7 @@ $0 -x
EOF
}
-while getopts 'hvqxkKfScRmn:d:Ds:r:?t:T:u:I:' OPTION; do
+while getopts 'hvqxkKfScRmOn:d:Ds:r:?t:T:u:I:' OPTION; do
case $OPTION in
h)
usage
@@ -445,6 +447,9 @@ while getopts 'hvqxkKfScRmn:d:Ds:r:?t:T:u:I:' OPTION; do
export NFS=1
. "$nfsfile"
;;
+ O)
+ TIMEOUT_DEBUG="yes"
+ ;;
d)
FILEDIR="$OPTARG"
;;
@@ -773,6 +778,7 @@ msg "${TEST_RUNNER}" \
"${DEBUG:+-D}" \
"${KMEMLEAK:+-m}" \
"${KMSG:+-K}" \
+ "${TIMEOUT_DEBUG:+-O}" \
"-c \"${RUNFILES}\"" \
"-T \"${TAGS}\"" \
"-i \"${STF_SUITE}\"" \
@@ -783,6 +789,7 @@ msg "${TEST_RUNNER}" \
${DEBUG:+-D} \
${KMEMLEAK:+-m} \
${KMSG:+-K} \
+ ${TIMEOUT_DEBUG:+-O} \
-c "${RUNFILES}" \
-T "${TAGS}" \
-i "${STF_SUITE}" \
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index 9fad8946f4f3..9f531411fbe1 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -168,10 +168,10 @@ tags = ['functional', 'cli_root', 'zinject']
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
- 'zdb_display_block', 'zdb_encrypted', 'zdb_label_checksum',
- 'zdb_object_range_neg', 'zdb_object_range_pos', 'zdb_objset_id',
- 'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2', 'zdb_backup',
- 'zdb_tunables']
+ 'zdb_display_block', 'zdb_encrypted', 'zdb_encrypted_raw',
+ 'zdb_label_checksum', 'zdb_object_range_neg', 'zdb_object_range_pos',
+ 'zdb_objset_id', 'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2',
+ 'zdb_backup', 'zdb_tunables']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
@@ -323,6 +323,10 @@ tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_raw', 'zfs_send_sparse', 'zfs_send-b', 'zfs_send_skip_missing']
tags = ['functional', 'cli_root', 'zfs_send']
+[tests/functional/cli_root/zfs_send_delegation]
+tests = ['zfs_send_test']
+tags = ['functional', 'cli_root', 'zfs_send_delegation']
+
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
@@ -379,7 +383,7 @@ tags = ['functional', 'cli_root', 'zfs_wait']
[tests/functional/cli_root/zhack]
tests = ['zhack_label_repair_001', 'zhack_label_repair_002',
- 'zhack_label_repair_003', 'zhack_label_repair_004']
+ 'zhack_label_repair_003', 'zhack_label_repair_004', 'zhack_metaslab_leak']
pre =
post =
tags = ['functional', 'cli_root', 'zhack']
@@ -391,8 +395,9 @@ tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
- 'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
- 'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
+ 'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_warn_create',
+ 'zpool_add_warn_degraded', 'zpool_add_warn_removal', 'add-o_ashift',
+ 'add_prop_ashift', 'zpool_add_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
@@ -486,6 +491,10 @@ tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
tags = ['functional', 'cli_root', 'zpool_import']
timeout = 1200
+[tests/functional/cli_root/zpool_iostat]
+tests = ['zpool_iostat_interval_all', 'zpool_iostat_interval_some']
+tags = ['functional', 'cli_root', 'zpool_iostat']
+
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
@@ -546,7 +555,7 @@ tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_multiple_pools',
'zpool_error_scrub_001_pos', 'zpool_error_scrub_002_pos',
'zpool_error_scrub_003_pos', 'zpool_error_scrub_004_pos',
- 'zpool_scrub_date_range_001']
+ 'zpool_scrub_date_range_001', 'zpool_scrub_date_range_002']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
@@ -624,8 +633,8 @@ tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
- 'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
- 'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege',
+ 'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'zarcstat_001_pos',
+ 'zarcsummary_001_pos', 'zarcsummary_002_neg', 'zpool_wait_privilege',
'zilstat_001_pos']
user =
tags = ['functional', 'cli_user', 'misc']
@@ -637,6 +646,10 @@ tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
user =
tags = ['functional', 'cli_user', 'zfs_list']
+[tests/functional/cli_user/zfs_send_delegation_user]
+tests = ['zfs_send_usertest']
+tags = ['functional', 'cli_user', 'zfs_send_delegation_user']
+
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
@@ -725,7 +738,11 @@ tests = ['fadvise_willneed']
tags = ['functional', 'fadvise']
[tests/functional/failmode]
-tests = ['failmode_dmu_tx_wait', 'failmode_dmu_tx_continue']
+tests = ['failmode_dmu_tx_wait', 'failmode_dmu_tx_continue',
+ 'failmode_fsync_wait', 'failmode_fsync_continue',
+ 'failmode_msync_wait', 'failmode_msync_continue',
+ 'failmode_osync_wait', 'failmode_osync_continue',
+ 'failmode_syncalways_wait', 'failmode_syncalways_continue']
tags = ['functional', 'failmode']
[tests/functional/fallocate]
@@ -936,10 +953,11 @@ tags = ['functional', 'rename_dirs']
[tests/functional/replacement]
tests = ['attach_import', 'attach_multiple', 'attach_rebuild',
- 'attach_resilver', 'detach', 'rebuild_disabled_feature',
- 'rebuild_multiple', 'rebuild_raidz', 'replace_import', 'replace_rebuild',
- 'replace_resilver', 'resilver_restart_001', 'resilver_restart_002',
- 'scrub_cancel']
+ 'attach_resilver', 'attach_resilver_sit_out', 'detach',
+ 'rebuild_disabled_feature', 'rebuild_multiple', 'rebuild_raidz',
+ 'replace_import', 'replace_rebuild', 'replace_resilver',
+ 'replace_resilver_sit_out', 'resilver_restart_001',
+ 'resilver_restart_002', 'scrub_cancel']
tags = ['functional', 'replacement']
[tests/functional/reservation]
@@ -1072,7 +1090,8 @@ tags = ['functional', 'write_dirs']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
- 'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos', 'xattr_compat']
+ 'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos', 'xattr_014_pos',
+ 'xattr_compat']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
@@ -1089,7 +1108,7 @@ tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/zvol/zvol_stress]
-tests = ['zvol_stress']
+tests = ['zvol_stress', 'zvol_stress_destroy']
tags = ['functional', 'zvol', 'zvol_stress']
[tests/functional/zvol/zvol_swap]
diff --git a/sys/contrib/openzfs/tests/runfiles/linux.run b/sys/contrib/openzfs/tests/runfiles/linux.run
index f3d56acffde0..339361cc2762 100644
--- a/sys/contrib/openzfs/tests/runfiles/linux.run
+++ b/sys/contrib/openzfs/tests/runfiles/linux.run
@@ -109,7 +109,9 @@ tags = ['functional', 'direct']
[tests/functional/events:Linux]
tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter', 'zed_fd_spill',
'zed_cksum_reported', 'zed_cksum_config', 'zed_io_config',
- 'zed_slow_io', 'zed_slow_io_many_vdevs', 'zed_diagnose_multiple']
+ 'zed_slow_io', 'zed_slow_io_many_vdevs', 'zed_diagnose_multiple',
+ 'zed_synchronous_zedlet', 'slow_vdev_sit_out', 'slow_vdev_sit_out_neg',
+ 'slow_vdev_degraded_sit_out']
tags = ['functional', 'events']
[tests/functional/fallocate:Linux]
@@ -161,7 +163,7 @@ tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval',
tags = ['functional', 'mmp']
[tests/functional/mount:Linux]
-tests = ['umount_unlinked_drain']
+tests = ['umount_unlinked_drain', 'mount_loopback']
tags = ['functional', 'mount']
[tests/functional/pam:Linux]
diff --git a/sys/contrib/openzfs/tests/runfiles/sanity.run b/sys/contrib/openzfs/tests/runfiles/sanity.run
index 7767c0c2d535..249b415029c4 100644
--- a/sys/contrib/openzfs/tests/runfiles/sanity.run
+++ b/sys/contrib/openzfs/tests/runfiles/sanity.run
@@ -400,8 +400,8 @@ tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_offline_001_neg', 'zpool_online_001_neg',
'zpool_remove_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
- 'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
- 'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege',
+ 'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'zarcstat_001_pos',
+ 'zarcsummary_001_pos', 'zarcsummary_002_neg', 'zpool_wait_privilege',
'zilstat_001_pos']
user =
tags = ['functional', 'cli_user', 'misc']
@@ -622,7 +622,7 @@ tags = ['functional', 'vdev_zaps']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
- 'xattr_011_pos', 'xattr_013_pos', 'xattr_compat']
+ 'xattr_011_pos', 'xattr_013_pos', 'xattr_014_pos', 'xattr_compat']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in b/sys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in
index 2158208be6e5..d2c1185e4a94 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/test-runner.py.in
@@ -34,6 +34,7 @@ from select import select
from subprocess import PIPE
from subprocess import Popen
from subprocess import check_output
+from subprocess import run
from threading import Timer
from time import time, CLOCK_MONOTONIC
from os.path import exists
@@ -187,6 +188,63 @@ User: %s
''' % (self.pathname, self.identifier, self.outputdir, self.timeout, self.user)
def kill_cmd(self, proc, options, kmemleak, keyboard_interrupt=False):
+
+ """
+ We're about to kill a command due to a timeout.
+ If we're running with the -O option, then dump debug info about the
+ process with the highest CPU usage to /dev/kmsg (Linux only). This can
+ help debug the timeout.
+
+ Debug info includes:
+ - 30 lines from 'top'
+ - /proc/<PID>/stack output of process with highest CPU usage
+ - Last lines strace-ing process with highest CPU usage
+ """
+ if exists("/dev/kmsg"):
+ c = """
+TOP_OUT="$(COLUMNS=160 top -b -n 1 | head -n 30)"
+read -r PID CMD <<< $(echo "$TOP_OUT" | /usr/bin/awk \
+"/COMMAND/{
+ print_next=1
+ next
+}
+{
+ if (print_next == 1) {
+ print \\$1\\" \\"\\$12
+ exit
+ }
+}")
+echo "##### ZTS timeout debug #####"
+echo "----- top -----"
+echo "$TOP_OUT"
+echo "----- /proc/$PID/stack ($CMD)) -----"
+cat /proc/$PID/stack
+echo "----- strace ($CMD) -----"
+TMPFILE="$(mktemp --suffix=ZTS)"
+/usr/bin/strace -k --stack-traces -p $PID &> "$TMPFILE" &
+sleep 0.1
+killall strace
+tail -n 30 $TMPFILE
+rm "$TMPFILE"
+echo "##### /proc/sysrq-trigger stack #####"
+"""
+ c = "sudo bash -c '" + c + "'"
+ data = run(c, capture_output=True, shell=True, text=True)
+ out = data.stdout
+ try:
+ kp = Popen([SUDO, "sh", "-c",
+ "echo '" + out + "' > /dev/kmsg"])
+ kp.wait()
+
+ """
+ Trigger kernel stack traces
+ """
+ kp = Popen([SUDO, "sh", "-c",
+ "echo l > /proc/sysrq-trigger"])
+ kp.wait()
+ except Exception:
+ pass
+
"""
Kill a running command due to timeout, or ^C from the keyboard. If
sudo is required, this user was verified previously.
@@ -1129,6 +1187,9 @@ def parse_args():
parser.add_option('-o', action='callback', callback=options_cb,
default=BASEDIR, dest='outputdir', type='string',
metavar='outputdir', help='Specify an output directory.')
+ parser.add_option('-O', action='store_true', default=False,
+ dest='timeout_debug',
+ help='Dump debugging info to /dev/kmsg on test timeout')
parser.add_option('-i', action='callback', callback=options_cb,
default=TESTDIR, dest='testdir', type='string',
metavar='testdir', help='Specify a test directory.')
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
index 001970120148..5bc65f993734 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
@@ -232,7 +232,7 @@ maybe = {
'cli_root/zpool_trim/zpool_trim_fault_export_import_online':
['FAIL', known_reason],
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', 6141],
- 'cli_user/misc/arc_summary_001_pos': ['FAIL', known_reason],
+ 'cli_user/misc/zarcsummary_001_pos': ['FAIL', known_reason],
'delegate/setup': ['SKIP', exec_reason],
'events/zed_cksum_config': ['FAIL', known_reason],
'fault/auto_replace_002_pos': ['FAIL', known_reason],
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore b/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
index 1cd90024e94d..62f1684acfb4 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/.gitignore
@@ -28,6 +28,7 @@
/mmap_seek
/mmap_sync
/mmapwrite
+/mmap_write_sync
/nvlist_to_lua
/randfree_file
/randwritecomp
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
index d5448055a1e1..85c3cf3c35a8 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am
@@ -74,7 +74,7 @@ scripts_zfs_tests_bin_PROGRAMS += %D%/mkbusy %D%/mkfile %D%/mkfiles %D%/mktree
scripts_zfs_tests_bin_PROGRAMS += \
%D%/mmap_exec %D%/mmap_ftruncate %D%/mmap_seek \
- %D%/mmap_sync %D%/mmapwrite %D%/readmmap
+ %D%/mmap_sync %D%/mmapwrite %D%/readmmap %D%/mmap_write_sync
%C%_mmapwrite_LDADD = -lpthread
if WANT_MMAP_LIBAIO
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c
index e7b80d01efaa..f8948a61833d 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c
@@ -207,7 +207,7 @@ insert_find_remove(zfs_btree_t *bt, char *why)
"Found removed value (%llu)\n", *p);
return (1);
}
- ASSERT3S(zfs_btree_numnodes(bt), ==, 0);
+ ASSERT0(zfs_btree_numnodes(bt));
zfs_btree_verify(bt);
return (0);
@@ -279,7 +279,7 @@ drain_tree(zfs_btree_t *bt, char *why)
node = avl_last(&avl);
ASSERT3U(node->data, ==, *(uint64_t *)zfs_btree_last(bt, NULL));
}
- ASSERT3S(zfs_btree_numnodes(bt), ==, 0);
+ ASSERT0(zfs_btree_numnodes(bt));
void *avl_cookie = NULL;
while ((node = avl_destroy_nodes(&avl, &avl_cookie)) != NULL)
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c
index e08003f80464..c8d8622c7571 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c
@@ -529,6 +529,8 @@ static const char *aes_gcm_impl[][2] = {
{ "aesni", "pclmulqdq" },
{ "x86_64", "avx" },
{ "aesni", "avx" },
+ { "x86_64", "avx2" },
+ { "aesni", "avx2" },
};
/* signature of function to call after setting implementation params */
@@ -861,7 +863,8 @@ test_result(const crypto_test_t *test, int encrypt_rv, uint8_t *encrypt_buf,
return (pass);
/* print summary of test result */
- printf("%s[%lu]: encrypt=%s decrypt=%s\n", test->fileloc, test->id,
+ printf("%s[%ju]: encrypt=%s decrypt=%s\n", test->fileloc,
+ (uintmax_t)test->id,
encrypt_pass ? "PASS" : "FAIL",
decrypt_pass ? "PASS" : "FAIL");
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/mmap_write_sync.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/mmap_write_sync.c
new file mode 100644
index 000000000000..ad5e37f24960
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/mmap_write_sync.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: CDDL-1.0
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or https://opensource.org/licenses/CDDL-1.0.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2025, Klara, Inc.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#define PAGES (8)
+
+int
+main(int argc, char **argv)
+{
+ if (argc != 2) {
+ fprintf(stderr, "usage: %s <filename>\n", argv[0]);
+ exit(1);
+ }
+
+ long page_size = sysconf(_SC_PAGESIZE);
+ if (page_size < 0) {
+ perror("sysconf");
+ exit(2);
+ }
+ size_t map_size = page_size * PAGES;
+
+ int fd = open(argv[1], O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
+ if (fd < 0) {
+ perror("open");
+ exit(2);
+ }
+
+ if (ftruncate(fd, map_size) < 0) {
+ perror("ftruncate");
+ close(fd);
+ exit(2);
+ }
+
+ uint64_t *p =
+ mmap(NULL, map_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (p == MAP_FAILED) {
+ perror("mmap");
+ close(fd);
+ exit(2);
+ }
+
+ for (int i = 0; i < (map_size / sizeof (uint64_t)); i++)
+ p[i] = 0x0123456789abcdef;
+
+ if (msync(p, map_size, MS_SYNC) < 0) {
+ perror("msync");
+ munmap(p, map_size);
+ close(fd);
+ exit(3);
+ }
+
+ munmap(p, map_size);
+ close(fd);
+ exit(0);
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
index bbaa8665ecc8..1c4d25e152a7 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
@@ -100,6 +100,7 @@ export SYSTEM_FILES_COMMON='awk
uniq
vmstat
wc
+ which
xargs
xxh128sum'
@@ -146,6 +147,7 @@ export SYSTEM_FILES_LINUX='attr
lscpu
lsmod
lsscsi
+ mkfs.xfs
mkswap
modprobe
mountpoint
@@ -169,8 +171,8 @@ export ZFS_FILES='zdb
zpool
ztest
raidz_test
- arc_summary
- arcstat
+ zarcsummary
+ zarcstat
zilstat
dbufstat
mount.zfs
@@ -210,6 +212,7 @@ export ZFSTEST_FILES='badsend
mmap_seek
mmap_sync
mmapwrite
+ mmap_write_sync
nvlist_to_lua
randfree_file
randwritecomp
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib b/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
index 23e89599cae0..8b30b9b91641 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
@@ -1085,7 +1085,7 @@ function fill_fs # destdir dirnum filenum bytes num_writes data
typeset -i filenum=${3:-50}
typeset -i bytes=${4:-8192}
typeset -i num_writes=${5:-10240}
- typeset data=${6:-0}
+ typeset data=${6:-"R"}
mkdir -p $destdir/{1..$dirnum}
for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
@@ -1112,6 +1112,16 @@ function get_pool_prop # property pool
zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
}
+# Get the specified vdev property in parsable format or fail
+function get_vdev_prop
+{
+ typeset prop="$1"
+ typeset pool="$2"
+ typeset vdev="$3"
+
+ zpool get -Hpo value "$prop" "$pool" "$vdev" || log_fail "zpool get $prop $pool $vdev"
+}
+
# Return 0 if a pool exists; $? otherwise
#
# $1 - pool name
@@ -1971,6 +1981,28 @@ function wait_vdev_state # pool disk state timeout
}
#
+# Wait for vdev 'sit_out' property to be cleared.
+#
+# $1 pool name
+# $2 vdev name
+# $3 timeout
+#
+function wait_sit_out #pool vdev timeout
+{
+ typeset pool=${1:-$TESTPOOL}
+ typeset vdev="$2"
+ typeset timeout=${3:-300}
+ for (( timer = 0; timer < $timeout; timer++ )); do
+ if [ "$(get_vdev_prop sit_out "$pool" "$vdev")" = "off" ]; then
+ return 0
+ fi
+ sleep 1;
+ done
+
+ return 1
+}
+
+#
# Check the output of 'zpool status -v <pool>',
# and to see if the content of <token> contain the <keyword> specified.
#
@@ -2881,6 +2913,28 @@ function user_run
log_note "user: $user"
log_note "cmd: $*"
+ if ! sudo -Eu $user test -x $PATH ; then
+ log_note "-------------------------------------------------"
+ log_note "Warning: $user doesn't have permissions on $PATH"
+ log_note ""
+ log_note "This usually happens when you're running ZTS locally"
+ log_note "from inside the ZFS source dir, and are attempting to"
+ log_note "run a test that calls user_run. The ephemeral user"
+ log_note "($user) that ZTS is creating does not have permission"
+ log_note "to traverse to $PATH, or the binaries in $PATH are"
+ log_note "not the right permissions."
+ log_note ""
+ log_note "To get around this, copy your ZFS source directory"
+ log_note "to a world-accessible location (like /tmp), and "
+ log_note "change the permissions on your ZFS source dir "
+ log_note "to allow access."
+ log_note ""
+ log_note "Also, verify that /dev/zfs is RW for others:"
+ log_note ""
+ log_note " sudo chmod o+rw /dev/zfs"
+ log_note "-------------------------------------------------"
+ fi
+
typeset out=$TEST_BASE_DIR/out
typeset err=$TEST_BASE_DIR/err
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
index e273c9f85c28..127ea188f17f 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg
@@ -72,6 +72,9 @@ MULTIHOST_INTERVAL multihost.interval zfs_multihost_interval
OVERRIDE_ESTIMATE_RECORDSIZE send.override_estimate_recordsize zfs_override_estimate_recordsize
PREFETCH_DISABLE prefetch.disable zfs_prefetch_disable
RAIDZ_EXPAND_MAX_REFLOW_BYTES vdev.expand_max_reflow_bytes raidz_expand_max_reflow_bytes
+READ_SIT_OUT_SECS vdev.read_sit_out_secs vdev_read_sit_out_secs
+SIT_OUT_CHECK_INTERVAL vdev.raidz_outlier_check_interval_ms vdev_raidz_outlier_check_interval_ms
+SIT_OUT_INSENSITIVITY vdev.raidz_outlier_insensitivity vdev_raidz_outlier_insensitivity
REBUILD_SCRUB_ENABLED rebuild_scrub_enabled zfs_rebuild_scrub_enabled
REMOVAL_SUSPEND_PROGRESS removal_suspend_progress zfs_removal_suspend_progress
REMOVE_MAX_SEGMENT remove_max_segment zfs_remove_max_segment
@@ -88,6 +91,7 @@ SPA_DISCARD_MEMORY_LIMIT spa.discard_memory_limit zfs_spa_discard_memory_limit
SPA_LOAD_VERIFY_DATA spa.load_verify_data spa_load_verify_data
SPA_LOAD_VERIFY_METADATA spa.load_verify_metadata spa_load_verify_metadata
SPA_NOTE_TXG_TIME spa.note_txg_time spa_note_txg_time
+SPA_FLUSH_TXG_TIME spa.flush_txg_time spa_flush_txg_time
TRIM_EXTENT_BYTES_MIN trim.extent_bytes_min zfs_trim_extent_bytes_min
TRIM_METASLAB_SKIP trim.metaslab_skip zfs_trim_metaslab_skip
TRIM_TXG_BATCH trim.txg_batch zfs_trim_txg_batch
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
index c2542287c1d7..678c01b58f94 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
@@ -197,6 +197,7 @@ nobase_dist_datadir_zfs_tests_tests_DATA += \
functional/cli_root/zpool_import/blockfiles/unclean_export.dat.bz2 \
functional/cli_root/zpool_import/zpool_import.cfg \
functional/cli_root/zpool_import/zpool_import.kshlib \
+ functional/cli_root/zpool_iostat/zpool_iostat.kshlib \
functional/cli_root/zpool_initialize/zpool_initialize.kshlib \
functional/cli_root/zpool_labelclear/labelclear.cfg \
functional/cli_root/zpool_remove/zpool_remove.cfg \
@@ -276,6 +277,7 @@ nobase_dist_datadir_zfs_tests_tests_DATA += \
functional/direct/dio.kshlib \
functional/events/events.cfg \
functional/events/events_common.kshlib \
+ functional/failmode/failmode.kshlib \
functional/fault/fault.cfg \
functional/gang_blocks/gang_blocks.kshlib \
functional/grow/grow.cfg \
@@ -639,6 +641,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zdb/zdb_decompress_zstd.ksh \
functional/cli_root/zdb/zdb_display_block.ksh \
functional/cli_root/zdb/zdb_encrypted.ksh \
+ functional/cli_root/zdb/zdb_encrypted_raw.ksh \
functional/cli_root/zdb/zdb_label_checksum.ksh \
functional/cli_root/zdb/zdb_object_range_neg.ksh \
functional/cli_root/zdb/zdb_object_range_pos.ksh \
@@ -891,6 +894,9 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zfs_send/zfs_send_raw.ksh \
functional/cli_root/zfs_send/zfs_send_skip_missing.ksh \
functional/cli_root/zfs_send/zfs_send_sparse.ksh \
+ functional/cli_root/zfs_send_delegation/cleanup.ksh \
+ functional/cli_root/zfs_send_delegation/setup.ksh \
+ functional/cli_root/zfs_send_delegation/zfs_send_test.ksh \
functional/cli_root/zfs_set/cache_001_pos.ksh \
functional/cli_root/zfs_set/cache_002_neg.ksh \
functional/cli_root/zfs_set/canmount_001_pos.ksh \
@@ -1008,6 +1014,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zhack/zhack_label_repair_002.ksh \
functional/cli_root/zhack/zhack_label_repair_003.ksh \
functional/cli_root/zhack/zhack_label_repair_004.ksh \
+ functional/cli_root/zhack/zhack_metaslab_leak.ksh \
functional/cli_root/zpool_add/add_nested_replacing_spare.ksh \
functional/cli_root/zpool_add/add-o_ashift.ksh \
functional/cli_root/zpool_add/add_prop_ashift.ksh \
@@ -1022,7 +1029,9 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zpool_add/zpool_add_007_neg.ksh \
functional/cli_root/zpool_add/zpool_add_008_neg.ksh \
functional/cli_root/zpool_add/zpool_add_009_neg.ksh \
- functional/cli_root/zpool_add/zpool_add_010_pos.ksh \
+ functional/cli_root/zpool_add/zpool_add_warn_create.ksh \
+ functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh \
+ functional/cli_root/zpool_add/zpool_add_warn_removal.ksh \
functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh \
functional/cli_root/zpool_attach/attach-o_ashift.ksh \
functional/cli_root/zpool_attach/cleanup.ksh \
@@ -1173,6 +1182,10 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zpool_import/zpool_import_parallel_admin.ksh \
functional/cli_root/zpool_import/zpool_import_parallel_neg.ksh \
functional/cli_root/zpool_import/zpool_import_parallel_pos.ksh \
+ functional/cli_root/zpool_iostat/setup.ksh \
+ functional/cli_root/zpool_iostat/cleanup.ksh \
+ functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh \
+ functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh \
functional/cli_root/zpool_initialize/cleanup.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_attach_detach_add_remove.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_fault_export_import_online.ksh \
@@ -1246,6 +1259,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_txg_continue_from_last.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_date_range_001.ksh \
+ functional/cli_root/zpool_scrub/zpool_scrub_date_range_002.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_001_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_003_pos.ksh \
@@ -1350,9 +1364,9 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_root/zpool/zpool_002_pos.ksh \
functional/cli_root/zpool/zpool_003_pos.ksh \
functional/cli_root/zpool/zpool_colors.ksh \
- functional/cli_user/misc/arcstat_001_pos.ksh \
- functional/cli_user/misc/arc_summary_001_pos.ksh \
- functional/cli_user/misc/arc_summary_002_neg.ksh \
+ functional/cli_user/misc/zarcstat_001_pos.ksh \
+ functional/cli_user/misc/zarcsummary_001_pos.ksh \
+ functional/cli_user/misc/zarcsummary_002_neg.ksh \
functional/cli_user/misc/zilstat_001_pos.ksh \
functional/cli_user/misc/cleanup.ksh \
functional/cli_user/misc/setup.ksh \
@@ -1407,6 +1421,9 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/cli_user/zfs_list/zfs_list_005_neg.ksh \
functional/cli_user/zfs_list/zfs_list_007_pos.ksh \
functional/cli_user/zfs_list/zfs_list_008_neg.ksh \
+ functional/cli_user/zfs_send_delegation_user/cleanup.ksh \
+ functional/cli_user/zfs_send_delegation_user/setup.ksh \
+ functional/cli_user/zfs_send_delegation_user/zfs_send_usertest.ksh \
functional/cli_user/zpool_iostat/cleanup.ksh \
functional/cli_user/zpool_iostat/setup.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_001_neg.ksh \
@@ -1523,6 +1540,9 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/events/events_001_pos.ksh \
functional/events/events_002_pos.ksh \
functional/events/setup.ksh \
+ functional/events/slow_vdev_degraded_sit_out.ksh \
+ functional/events/slow_vdev_sit_out.ksh \
+ functional/events/slow_vdev_sit_out_neg.ksh \
functional/events/zed_cksum_config.ksh \
functional/events/zed_cksum_reported.ksh \
functional/events/zed_diagnose_multiple.ksh \
@@ -1531,6 +1551,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/events/zed_rc_filter.ksh \
functional/events/zed_slow_io.ksh \
functional/events/zed_slow_io_many_vdevs.ksh \
+ functional/events/zed_synchronous_zedlet.ksh \
functional/exec/cleanup.ksh \
functional/exec/exec_001_pos.ksh \
functional/exec/exec_002_neg.ksh \
@@ -1541,6 +1562,14 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/failmode/cleanup.ksh \
functional/failmode/failmode_dmu_tx_wait.ksh \
functional/failmode/failmode_dmu_tx_continue.ksh \
+ functional/failmode/failmode_fsync_wait.ksh \
+ functional/failmode/failmode_fsync_continue.ksh \
+ functional/failmode/failmode_msync_wait.ksh \
+ functional/failmode/failmode_msync_continue.ksh \
+ functional/failmode/failmode_osync_wait.ksh \
+ functional/failmode/failmode_osync_continue.ksh \
+ functional/failmode/failmode_syncalways_wait.ksh \
+ functional/failmode/failmode_syncalways_continue.ksh \
functional/failmode/setup.ksh \
functional/fallocate/cleanup.ksh \
functional/fallocate/fallocate_prealloc.ksh \
@@ -1697,6 +1726,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/mmp/setup.ksh \
functional/mount/cleanup.ksh \
functional/mount/setup.ksh \
+ functional/mount/mount_loopback.ksh \
functional/mount/umount_001.ksh \
functional/mount/umountall_001.ksh \
functional/mount/umount_unlinked_drain.ksh \
@@ -1926,6 +1956,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/replacement/attach_multiple.ksh \
functional/replacement/attach_rebuild.ksh \
functional/replacement/attach_resilver.ksh \
+ functional/replacement/attach_resilver_sit_out.ksh \
functional/replacement/cleanup.ksh \
functional/replacement/detach.ksh \
functional/replacement/rebuild_disabled_feature.ksh \
@@ -1934,6 +1965,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/replacement/replace_import.ksh \
functional/replacement/replace_rebuild.ksh \
functional/replacement/replace_resilver.ksh \
+ functional/replacement/replace_resilver_sit_out.ksh \
functional/replacement/resilver_restart_001.ksh \
functional/replacement/resilver_restart_002.ksh \
functional/replacement/scrub_cancel.ksh \
@@ -2202,6 +2234,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/xattr/xattr_011_pos.ksh \
functional/xattr/xattr_012_pos.ksh \
functional/xattr/xattr_013_pos.ksh \
+ functional/xattr/xattr_014_pos.ksh \
functional/xattr/xattr_compat.ksh \
functional/zap_shrink/cleanup.ksh \
functional/zap_shrink/zap_shrink_001_pos.ksh \
@@ -2235,6 +2268,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/zvol/zvol_stress/cleanup.ksh \
functional/zvol/zvol_stress/setup.ksh \
functional/zvol/zvol_stress/zvol_stress.ksh \
+ functional/zvol/zvol_stress/zvol_stress_destroy.ksh \
functional/zvol/zvol_swap/cleanup.ksh \
functional/zvol/zvol_swap/setup.ksh \
functional/zvol/zvol_swap/zvol_swap_001_pos.ksh \
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_encrypted_raw.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_encrypted_raw.ksh
new file mode 100755
index 000000000000..85d267d5402f
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_encrypted_raw.ksh
@@ -0,0 +1,75 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
+
+#
+# DESCRIPTION:
+# 'zdb -K ...' should enable reading from a raw-encrypted dataset
+#
+# STRATEGY:
+# 1. Create an encrypted dataset
+# 2. Write some data to a file
+# 3. Run zdb -dddd on the file, confirm it can't be read
+# 4. Run zdb -K ... -ddddd on the file, confirm it can be read
+#
+
+verify_runnable "both"
+
+dataset="$TESTPOOL/$TESTFS2"
+file="$TESTDIR2/somefile"
+keyfile="$TEST_BASE_DIR/keyfile"
+
+function cleanup
+{
+ datasetexists "$dataset" && destroy_dataset "$dataset" -f
+ rm -f "$keyfile"
+ default_cleanup_noexit
+}
+
+log_onexit cleanup
+
+log_must default_setup_noexit $DISKS
+
+log_assert "'zdb -K' should enable reading from a raw-encrypted dataset"
+
+# The key must be 32 bytes long.
+echo -n "$RAWKEY" > "$keyfile"
+
+log_must zfs create -o mountpoint="$TESTDIR2" \
+ -o encryption=on -o keyformat=raw -o keylocation="file://$keyfile" \
+ "$dataset"
+
+echo 'my great encrypted text' > "$file"
+
+typeset -i obj=$(ls -i "$file" | cut -d' ' -f1)
+typeset -i size=$(wc -c < "$file")
+
+log_note "test file $file is objid $obj, size $size"
+
+sync_pool "$TESTPOOL" true
+
+log_must eval "zdb -dddd $dataset $obj | grep -q 'object encrypted'"
+
+log_must eval "zdb -K $keyfile -dddd $dataset $obj | grep -q 'size\s$size$'"
+
+log_pass "'zdb -K' enables reading from a raw-encrypted dataset"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_tunables.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_tunables.ksh
index 46965aa7cc37..b89790d5d525 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_tunables.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_tunables.ksh
@@ -63,7 +63,7 @@ log_mustnot_expect 'no such tunable: 0' zdb -o show=0
log_mustnot_expect 'no such tunable: 1' zdb -o info=1
# can set multiple in same command
-log_must eval 'zdb -o zfs_recover=1 -o zfs_flags=512 | xargs | grep -qE "^zfs_recover: 0 -> 1 zfs_flags: 4294965758 -> 512$"'
+log_must eval 'zdb -o zfs_recover=1 -o zfs_flags=512 | xargs | grep -qE "^zfs_recover: 0 -> 1 zfs_flags: 4294932990 -> 512$"'
# can set and show in same command
log_must eval 'zdb -o zfs_recover=1 -o zfs_recover -o zfs_recover=0 | xargs | grep -qE "^zfs_recover: 0 -> 1 zfs_recover: 1 zfs_recover: 1 -> 0$"'
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/cleanup.ksh
new file mode 100755
index 000000000000..4a59e15cc693
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/cleanup.ksh
@@ -0,0 +1,43 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/delegate/delegate_common.kshlib
+
+
+poolexists $TESTPOOL1 && \
+ destroy_pool $TESTPOOL1
+
+del_user $STAFF1
+del_user $STAFF2
+del_group $STAFF_GROUP
+
+del_user $OTHER1
+del_user $OTHER2
+del_group $OTHER_GROUP
+
+default_cleanup
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/setup.ksh
new file mode 100755
index 000000000000..0978193eddc4
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/setup.ksh
@@ -0,0 +1,50 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/delegate/delegate_common.kshlib
+
+# Create staff group and add two user to it
+log_must add_group $STAFF_GROUP
+if ! id $STAFF1 > /dev/null 2>&1; then
+ log_must add_user $STAFF_GROUP $STAFF1
+fi
+if ! id $STAFF2 > /dev/null 2>&1; then
+ log_must add_user $STAFF_GROUP $STAFF2
+fi
+
+# Create other group and add two user to it
+log_must add_group $OTHER_GROUP
+if ! id $OTHER1 > /dev/null 2>&1; then
+ log_must add_user $OTHER_GROUP $OTHER1
+fi
+if ! id $OTHER2 > /dev/null 2>&1; then
+ log_must add_user $OTHER_GROUP $OTHER2
+fi
+DISK=${DISKS%% *}
+
+default_raidz_setup $DISKS
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/zfs_send_test.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/zfs_send_test.ksh
new file mode 100755
index 000000000000..d018f313fae1
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_send_delegation/zfs_send_test.ksh
@@ -0,0 +1,111 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara Inc.
+#
+
+# STRATEGY:
+# 1. Create a pool (this is done by the test framework)
+# 2. Create an encrypted dataset
+# 3. Write random data to the encrypted dataset
+# 4. Snapshot the dataset
+# 5. As root: attempt a send and raw send (both should succeed)
+# 6. Create a delegation (zfs allow -u user send testpool/encrypted_dataset)
+# 7. As root: attempt a send and raw send (both should succeed)
+# 8. Create a delegation (zfs allow -u user send:raw testpool/encrypted_dataset)
+# 9. As root: attempt a send and raw send (both should succeed)
+# 10. Disable delegation (zfs unallow)
+# 11. As root: attempt a send and raw send (both should succeed)
+# 12. Clean up (handled by framework)
+#
+# Tested as a user under ../cli_user/zfs_send_delegation_user/
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zfs_create/zfs_create_common.kshlib
+. $STF_SUITE/tests/functional/cli_root/zfs_create/properties.kshlib
+. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
+. $STF_SUITE/tests/functional/delegate/delegate.cfg
+
+# create encrypted dataset
+
+log_must eval "echo $PASSPHRASE | zfs create -o encryption=on -o keyformat=passphrase $TESTPOOL/$TESTFS1"
+
+# create target dataset for receives
+if ! zfs list | grep testfs2 >/dev/null 2>&1; then
+ dataset_created="TRUE"
+ log_must zfs create $TESTPOOL/$TESTFS2
+fi
+
+# create user and group
+typeset perms="snapshot,reservation,compression,checksum,userprop,receive"
+
+log_note "Added permissions to the $OTHER1 user."
+log_must zfs allow $OTHER1 $perms $TESTPOOL/$TESTFS1
+log_must zfs allow $OTHER1 $perms $TESTPOOL/$TESTFS2
+
+# create random data
+log_must fill_fs $TESTPOOL/$TESTFS1/child 1 2047 1024 1 R
+
+# snapshot
+log_must zfs snapshot $TESTPOOL/$TESTFS1@snap1
+
+
+# check baseline send abilities (should pass)
+log_must eval "zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive $TESTPOOL/$TESTFS2/zfsrecv0_datastream.$$"
+log_must eval "zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive $TESTPOOL/$TESTFS2/zfsrecv0raw_datastream.$$"
+
+
+# create delegation
+log_must zfs allow $OTHER1 send $TESTPOOL/$TESTFS1
+
+# attempt send with full allow
+
+log_must eval "zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive $TESTPOOL/$TESTFS2/zfsrecv1_datastream.$$"
+log_must eval "zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive $TESTPOOL/$TESTFS2/zfsrecv1raw_datastream.$$"
+
+# create raw delegation
+log_must zfs allow $OTHER1 send:raw $TESTPOOL/$TESTFS1
+log_must zfs unallow $OTHER1 send $TESTPOOL/$TESTFS1
+
+# test new send abilities (should pass)
+log_must eval "zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive $TESTPOOL/$TESTFS2/zfsrecv2_datastream.$$"
+log_must eval "zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive $TESTPOOL/$TESTFS2/zfsrecv2raw_datastream.$$"
+
+
+# disable raw delegation
+zfs unallow $OTHER1 send:raw $TESTPOOL/$TESTFS1
+zfs allow $OTHER1 send $TESTPOOL/$TESTFS1
+
+# verify original send abilities (should pass)
+log_must eval "zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive $TESTPOOL/$TESTFS2/zfsrecv3_datastream.$$"
+log_must eval "zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive $TESTPOOL/$TESTFS2/zfsrecv3raw_datastream.$$"
+
+
+function cleanup
+{
+ datasetexists $TESTPOOL/$TESTFS1 && \
+ destroy_dataset $TESTPOOL/$TESTFS1 -r \
+ destroy_dataset $TESTPOOL/$TESTFS2 -r
+
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/library.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/library.kshlib
index 0f5f6198daf2..0d07b1fd1952 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/library.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/library.kshlib
@@ -33,13 +33,16 @@
# Test one:
#
# 1. Create pool on a loopback device with some test data
-# 2. Export the pool.
-# 3. Corrupt all label checksums in the pool
-# 4. Check that pool cannot be imported
-# 5. Verify that it cannot be imported after using zhack label repair -u
+# 2. Checksum repair should work with a valid TXG. Repeatedly write and
+# sync the pool so there are enough transactions for every uberblock
+# to have a TXG
+# 3. Export the pool.
+# 4. Corrupt all label checksums in the pool
+# 5. Check that pool cannot be imported
+# 6. Verify that it cannot be imported after using zhack label repair -u
# to ensure that the -u option will quit on corrupted checksums.
-# 6. Use zhack label repair -c on device
-# 7. Check that pool can be imported and that data is intact
+# 7. Use zhack label repair -c on device
+# 8. Check that pool can be imported and that data is intact
#
# Test two:
#
@@ -97,7 +100,7 @@ VIRTUAL_MIRROR_DEVICE=
function cleanup_lo
{
- L_DEVICE="$1"
+ typeset L_DEVICE="$1"
if [[ -e $L_DEVICE ]]; then
if is_linux; then
@@ -133,9 +136,9 @@ function get_devsize
function pick_logop
{
- L_SHOULD_SUCCEED="$1"
+ typeset L_SHOULD_SUCCEED="$1"
- l_logop="log_mustnot"
+ typeset l_logop="log_mustnot"
if [ "$L_SHOULD_SUCCEED" == true ]; then
l_logop="log_must"
fi
@@ -145,7 +148,9 @@ function pick_logop
function check_dataset
{
- L_SHOULD_SUCCEED="$1"
+ typeset L_SHOULD_SUCCEED="$1"
+
+ typeset L_LOGOP=
L_LOGOP="$(pick_logop "$L_SHOULD_SUCCEED")"
"$L_LOGOP" mounted "$TESTPOOL"/"$TESTFS"
@@ -170,9 +175,21 @@ function setup_dataset
check_dataset true
}
+function force_transactions
+{
+ typeset L_TIMES="$1"
+ typeset i=
+ for ((i=0; i < L_TIMES; i++))
+ do
+ touch "$TESTDIR"/"test" || return $?
+ zpool sync -f "$TESTPOOL" || return $?
+ done
+ return 0
+}
+
function get_practical_size
{
- L_SIZE="$1"
+ typeset L_SIZE="$1"
if [ "$((L_SIZE % LABEL_SIZE))" -ne 0 ]; then
echo "$(((L_SIZE / LABEL_SIZE) * LABEL_SIZE))"
@@ -183,10 +200,11 @@ function get_practical_size
function corrupt_sized_label_checksum
{
- L_SIZE="$1"
- L_LABEL="$2"
- L_DEVICE="$3"
+ typeset L_SIZE="$1"
+ typeset L_LABEL="$2"
+ typeset L_DEVICE="$3"
+ typeset L_PRACTICAL_SIZE=
L_PRACTICAL_SIZE="$(get_practical_size "$L_SIZE")"
typeset -a L_OFFSETS=("$LABEL_CKSUM_START" \
@@ -201,8 +219,8 @@ function corrupt_sized_label_checksum
function corrupt_labels
{
- L_SIZE="$1"
- L_DISK="$2"
+ typeset L_SIZE="$1"
+ typeset L_DISK="$2"
corrupt_sized_label_checksum "$L_SIZE" 0 "$L_DISK"
corrupt_sized_label_checksum "$L_SIZE" 1 "$L_DISK"
@@ -212,11 +230,14 @@ function corrupt_labels
function try_import_and_repair
{
- L_REPAIR_SHOULD_SUCCEED="$1"
- L_IMPORT_SHOULD_SUCCEED="$2"
- L_OP="$3"
- L_POOLDISK="$4"
+ typeset L_REPAIR_SHOULD_SUCCEED="$1"
+ typeset L_IMPORT_SHOULD_SUCCEED="$2"
+ typeset L_OP="$3"
+ typeset L_POOLDISK="$4"
+
+ typeset L_REPAIR_LOGOP=
L_REPAIR_LOGOP="$(pick_logop "$L_REPAIR_SHOULD_SUCCEED")"
+ typeset L_IMPORT_LOGOP=
L_IMPORT_LOGOP="$(pick_logop "$L_IMPORT_SHOULD_SUCCEED")"
log_mustnot zpool import "$TESTPOOL" -d "$L_POOLDISK"
@@ -230,10 +251,10 @@ function try_import_and_repair
function prepare_vdev
{
- L_SIZE="$1"
- L_BACKFILE="$2"
+ typeset L_SIZE="$1"
+ typeset L_BACKFILE="$2"
- l_devname=
+ typeset l_devname=
if truncate -s "$L_SIZE" "$L_BACKFILE"; then
if is_linux; then
l_devname="$(losetup -f "$L_BACKFILE" --show)"
@@ -248,7 +269,7 @@ function prepare_vdev
function run_test_one
{
- L_SIZE="$1"
+ typeset L_SIZE="$1"
VIRTUAL_DEVICE="$(prepare_vdev "$L_SIZE" "$VIRTUAL_DISK")"
log_must test -e "$VIRTUAL_DEVICE"
@@ -257,6 +278,9 @@ function run_test_one
setup_dataset
+ # Force 256 extra transactions to ensure all uberblocks are assigned a TXG
+ log_must force_transactions 256
+
log_must zpool export "$TESTPOOL"
corrupt_labels "$L_SIZE" "$VIRTUAL_DISK"
@@ -272,7 +296,7 @@ function run_test_one
function make_mirrored_pool
{
- L_SIZE="$1"
+ typeset L_SIZE="$1"
VIRTUAL_DEVICE="$(prepare_vdev "$L_SIZE" "$VIRTUAL_DISK")"
log_must test -e "$VIRTUAL_DEVICE"
@@ -296,7 +320,7 @@ function export_and_cleanup_vdisk
function run_test_two
{
- L_SIZE="$1"
+ typeset L_SIZE="$1"
make_mirrored_pool "$L_SIZE"
@@ -317,7 +341,7 @@ function run_test_two
function run_test_three
{
- L_SIZE="$1"
+ typeset L_SIZE="$1"
make_mirrored_pool "$L_SIZE"
@@ -342,7 +366,7 @@ function run_test_three
function run_test_four
{
- L_SIZE="$1"
+ typeset L_SIZE="$1"
make_mirrored_pool "$L_SIZE"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_label_repair_001.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_label_repair_001.ksh
index ce159b555d20..b5b24322f882 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_label_repair_001.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_label_repair_001.ksh
@@ -18,13 +18,16 @@
# Strategy:
#
# 1. Create pool on a loopback device with some test data
-# 2. Export the pool.
-# 3. Corrupt all label checksums in the pool
-# 4. Check that pool cannot be imported
-# 5. Verify that it cannot be imported after using zhack label repair -u
+# 2. Checksum repair should work with a valid TXG. Repeatedly write and
+# sync the pool so there are enough transactions for every uberblock
+# to have a TXG
+# 3. Export the pool.
+# 4. Corrupt all label checksums in the pool
+# 5. Check that pool cannot be imported
+# 6. Verify that it cannot be imported after using zhack label repair -u
# to ensure that the -u option will quit on corrupted checksums.
-# 6. Use zhack label repair -c on device
-# 7. Check that pool can be imported and that data is intact
+# 7. Use zhack label repair -c on device
+# 8. Check that pool can be imported and that data is intact
. "$STF_SUITE"/tests/functional/cli_root/zhack/library.kshlib
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_metaslab_leak.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_metaslab_leak.ksh
new file mode 100755
index 000000000000..0d2a39be6b5a
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zhack/zhack_metaslab_leak.ksh
@@ -0,0 +1,70 @@
+#!/bin/ksh
+# SPDX-License-Identifier: CDDL-1.0
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+
+#
+# Description:
+#
+# Test whether zhack metaslab leak functions correctly
+#
+# Strategy:
+#
+# 1. Create pool on a loopback device with some test data
+# 2. Gather pool capacity stats
+# 3. Generate fragmentation data with zdb
+# 4. Destroy the pool
+# 5. Create a new pool with the same configuration
+# 6. Export the pool
+# 7. Apply the fragmentation information with zhack metaslab leak
+# 8. Import the pool
+# 9. Verify that pool capacity stats match
+
+. "$STF_SUITE"/include/libtest.shlib
+
+verify_runnable "global"
+
+function cleanup
+{
+ zpool destroy $TESTPOOL
+ rm $tmp
+}
+
+log_onexit cleanup
+log_assert "zhack metaslab leak leaks the right amount of space"
+
+typeset tmp=$(mktemp)
+
+log_must zpool create $TESTPOOL $DISKS
+for i in `seq 1 16`; do
+ log_must dd if=/dev/urandom of=/$TESTPOOL/f$i bs=1M count=16
+ log_must zpool sync $TESTPOOL
+done
+for i in `seq 2 2 16`; do
+ log_must rm /$TESTPOOL/f$i
+done
+for i in `seq 1 16`; do
+ log_must touch /$TESTPOOL/g$i
+ log_must zpool sync $TESTPOOL
+done
+
+alloc=$(zpool get -Hpo value alloc $TESTPOOL)
+log_must eval "zdb -m --allocated-map $TESTPOOL > $tmp"
+log_must zpool destroy $TESTPOOL
+
+log_must zpool create $TESTPOOL $DISKS
+log_must zpool export $TESTPOOL
+log_must eval "zhack metaslab leak $TESTPOOL < $tmp"
+log_must zpool import $TESTPOOL
+
+alloc2=$(zpool get -Hpo value alloc $TESTPOOL)
+
+within_percent $alloc $alloc2 98 ||
+ log_fail "space usage changed too much: $alloc to $alloc2"
+
+log_pass "zhack metaslab leak behaved correctly"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib
index 091d65bb4f33..74780bb02141 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.kshlib
@@ -27,6 +27,7 @@
#
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2025 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
@@ -89,3 +90,44 @@ function save_dump_dev
fi
echo $dumpdev
}
+
+function zpool_create_add_setup
+{
+ typeset -i i=0
+
+ while ((i < 10)); do
+ log_must truncate -s $MINVDEVSIZE $TEST_BASE_DIR/vdev$i
+
+ eval vdev$i=$TEST_BASE_DIR/vdev$i
+ ((i += 1))
+ done
+
+ if is_linux; then
+ vdev_lo="$(losetup -f "$vdev4" --show)"
+ elif is_freebsd; then
+ vdev_lo=/dev/"$(mdconfig -a -t vnode -f "$vdev4")"
+ else
+ vdev_lo="$(lofiadm -a "$vdev4")"
+ fi
+}
+
+function zpool_create_add_cleanup
+{
+ datasetexists $TESTPOOL1 && destroy_pool $TESTPOOL1
+
+ if [[ -e $vdev_lo ]]; then
+ if is_linux; then
+ log_must losetup -d "$vdev_lo"
+ elif is_freebsd; then
+ log_must mdconfig -d -u "$vdev_lo"
+ else
+ log_must lofiadm -d "$vdev_lo"
+ fi
+ fi
+
+ typeset -i i=0
+ while ((i < 10)); do
+ rm -f $TEST_BASE_DIR/vdev$i
+ ((i += 1))
+ done
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_create.ksh
index df085a2ec746..661e55998d8d 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_010_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_create.ksh
@@ -23,67 +23,51 @@
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
-# Use is subject to license terms.
-#
-
-#
-# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2025 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
-. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_add/zpool_add.kshlib
#
# DESCRIPTION:
-# Verify zpool add succeed when adding vdevs with matching redundancy.
+# Verify zpool add succeeds when adding vdevs with matching redundancy
+# and warns with differing redundancy for a healthy pool.
#
# STRATEGY:
# 1. Create several files == $MINVDEVSIZE.
# 2. Verify 'zpool add' succeeds with matching redundancy.
# 3. Verify 'zpool add' warns with differing redundancy.
-# 4. Verify 'zpool add' warns with differing redundancy after removal.
#
verify_runnable "global"
-function cleanup
-{
- datasetexists $TESTPOOL1 && destroy_pool $TESTPOOL1
-
- typeset -i i=0
- while ((i < 10)); do
- rm -f $TEST_BASE_DIR/vdev$i
- ((i += 1))
- done
-}
-
+log_assert "Verify 'zpool add' warns for differing redundancy."
+log_onexit zpool_create_add_cleanup
-log_assert "Verify 'zpool add' succeed with keywords combination."
-log_onexit cleanup
+zpool_create_add_setup
-# 1. Create several files == $MINVDEVSIZE.
typeset -i i=0
-while ((i < 10)); do
- log_must truncate -s $MINVDEVSIZE $TEST_BASE_DIR/vdev$i
-
- eval vdev$i=$TEST_BASE_DIR/vdev$i
- ((i += 1))
-done
+typeset -i j=0
set -A redundancy0_create_args \
"$vdev0"
set -A redundancy1_create_args \
"mirror $vdev0 $vdev1" \
- "raidz1 $vdev0 $vdev1"
+ "raidz1 $vdev0 $vdev1" \
+ "draid1:1s $vdev0 $vdev1 $vdev9"
set -A redundancy2_create_args \
"mirror $vdev0 $vdev1 $vdev2" \
- "raidz2 $vdev0 $vdev1 $vdev2"
+ "raidz2 $vdev0 $vdev1 $vdev2" \
+ "draid2:1s $vdev0 $vdev1 $vdev2 $vdev9"
set -A redundancy3_create_args \
"mirror $vdev0 $vdev1 $vdev2 $vdev3" \
- "raidz3 $vdev0 $vdev1 $vdev2 $vdev3"
+ "raidz3 $vdev0 $vdev1 $vdev2 $vdev3" \
+ "draid3:1s $vdev0 $vdev1 $vdev2 $vdev3 $vdev9"
set -A redundancy0_add_args \
"$vdev5" \
@@ -93,21 +77,19 @@ set -A redundancy1_add_args \
"mirror $vdev5 $vdev6" \
"raidz1 $vdev5 $vdev6" \
"raidz1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
- "mirror $vdev5 $vdev6 raidz1 $vdev7 $vdev8"
+ "mirror $vdev5 $vdev6 raidz1 $vdev7 $vdev8" \
+ "draid1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 draid1 $vdev7 $vdev8"
set -A redundancy2_add_args \
"mirror $vdev5 $vdev6 $vdev7" \
- "raidz2 $vdev5 $vdev6 $vdev7"
+ "raidz2 $vdev5 $vdev6 $vdev7" \
+ "draid2 $vdev5 $vdev6 $vdev7"
set -A redundancy3_add_args \
"mirror $vdev5 $vdev6 $vdev7 $vdev8" \
- "raidz3 $vdev5 $vdev6 $vdev7 $vdev8"
-
-set -A log_args "log" "$vdev4"
-set -A cache_args "cache" "$vdev4"
-set -A spare_args "spare" "$vdev4"
-
-typeset -i j=0
+ "raidz3 $vdev5 $vdev6 $vdev7 $vdev8" \
+ "draid3 $vdev5 $vdev6 $vdev7 $vdev8"
function zpool_create_add
{
@@ -148,30 +130,6 @@ function zpool_create_forced_add
done
}
-function zpool_create_rm_add
-{
- typeset -n create_args=$1
- typeset -n add_args=$2
- typeset -n rm_args=$3
-
- i=0
- while ((i < ${#create_args[@]})); do
- j=0
- while ((j < ${#add_args[@]})); do
- log_must zpool create $TESTPOOL1 ${create_args[$i]}
- log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
- log_must zpool add $TESTPOOL1 ${add_args[$j]}
- log_must zpool remove $TESTPOOL1 ${rm_args[1]}
- log_mustnot zpool add $TESTPOOL1 ${rm_args[1]}
- log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
- log_must zpool destroy -f $TESTPOOL1
-
- ((j += 1))
- done
- ((i += 1))
- done
-}
-
# 2. Verify 'zpool add' succeeds with matching redundancy.
zpool_create_add redundancy0_create_args redundancy0_add_args
zpool_create_add redundancy1_create_args redundancy1_add_args
@@ -195,17 +153,4 @@ zpool_create_forced_add redundancy3_create_args redundancy0_add_args
zpool_create_forced_add redundancy3_create_args redundancy1_add_args
zpool_create_forced_add redundancy3_create_args redundancy2_add_args
-# 4. Verify 'zpool add' warns with differing redundancy after removal.
-zpool_create_rm_add redundancy1_create_args redundancy1_add_args log_args
-zpool_create_rm_add redundancy2_create_args redundancy2_add_args log_args
-zpool_create_rm_add redundancy3_create_args redundancy3_add_args log_args
-
-zpool_create_rm_add redundancy1_create_args redundancy1_add_args cache_args
-zpool_create_rm_add redundancy2_create_args redundancy2_add_args cache_args
-zpool_create_rm_add redundancy3_create_args redundancy3_add_args cache_args
-
-zpool_create_rm_add redundancy1_create_args redundancy1_add_args spare_args
-zpool_create_rm_add redundancy2_create_args redundancy2_add_args spare_args
-zpool_create_rm_add redundancy3_create_args redundancy3_add_args spare_args
-
-log_pass "'zpool add' succeed with keywords combination."
+log_pass "Verify 'zpool add' warns for differing redundancy."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh
new file mode 100755
index 000000000000..313eb3666f27
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_degraded.ksh
@@ -0,0 +1,204 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2025 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_add/zpool_add.kshlib
+
+#
+# DESCRIPTION:
+# Verify zpool add succeeds when adding vdevs with matching redundancy
+# and warns with differing redundancy for a degraded pool.
+#
+# STRATEGY:
+# 1. Create several files == $MINVDEVSIZE.
+# 2. Verify 'zpool add' succeeds with matching redundancy
+# 3. Verify 'zpool add' warns with differing redundancy when
+# a. Degraded pool with replaced mismatch vdev (file vs disk)
+# b. Degraded pool dRAID distributed spare active
+# c. Degraded pool hot spare active
+#
+
+verify_runnable "global"
+
+log_assert "Verify 'zpool add' warns for differing redundancy."
+log_onexit zpool_create_add_cleanup
+
+zpool_create_add_setup
+
+set -A redundancy1_create_args \
+ "mirror $vdev0 $vdev1" \
+ "raidz1 $vdev0 $vdev1" \
+ "draid1:1s $vdev0 $vdev1 $vdev9"
+
+set -A redundancy2_create_args \
+ "mirror $vdev0 $vdev1 $vdev2" \
+ "raidz2 $vdev0 $vdev1 $vdev2" \
+ "draid2:1s $vdev0 $vdev1 $vdev2 $vdev9"
+
+set -A redundancy3_create_args \
+ "mirror $vdev0 $vdev1 $vdev2 $vdev3" \
+ "raidz3 $vdev0 $vdev1 $vdev2 $vdev3" \
+ "draid3:1s $vdev0 $vdev1 $vdev2 $vdev3 $vdev9"
+
+set -A redundancy1_add_args \
+ "mirror $vdev5 $vdev6" \
+ "raidz1 $vdev5 $vdev6" \
+ "raidz1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 raidz1 $vdev7 $vdev8" \
+ "draid1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 draid1 $vdev7 $vdev8"
+
+set -A redundancy2_add_args \
+ "mirror $vdev5 $vdev6 $vdev7" \
+ "raidz2 $vdev5 $vdev6 $vdev7" \
+ "draid2 $vdev5 $vdev6 $vdev7"
+
+set -A redundancy3_add_args \
+ "mirror $vdev5 $vdev6 $vdev7 $vdev8" \
+ "raidz3 $vdev5 $vdev6 $vdev7 $vdev8" \
+ "draid3 $vdev5 $vdev6 $vdev7 $vdev8"
+
+set -A redundancy1_create_draid_args \
+ "draid1:1s $vdev0 $vdev1 $vdev2"
+
+set -A redundancy2_create_draid_args \
+ "draid2:1s $vdev0 $vdev1 $vdev2 $vdev3"
+
+set -A redundancy3_create_draid_args \
+ "draid3:1s $vdev0 $vdev1 $vdev2 $vdev3 $vdev9"
+
+set -A redundancy1_create_spare_args \
+ "mirror $vdev0 $vdev1 spare $vdev_lo" \
+ "raidz1 $vdev0 $vdev1 spare $vdev_lo" \
+ "draid1 $vdev0 $vdev1 spare $vdev_lo"
+
+set -A redundancy2_create_spare_args \
+ "mirror $vdev0 $vdev1 $vdev2 spare $vdev_lo" \
+ "raidz2 $vdev0 $vdev1 $vdev2 spare $vdev_lo" \
+ "draid2 $vdev0 $vdev1 $vdev2 spare $vdev_lo"
+
+set -A redundancy3_create_spare_args \
+ "mirror $vdev0 $vdev1 $vdev2 $vdev3 spare $vdev_lo" \
+ "raidz3 $vdev0 $vdev1 $vdev2 $vdev3 spare $vdev_lo" \
+ "draid3 $vdev0 $vdev1 $vdev2 $vdev3 spare $vdev_lo"
+
+set -A replace_args "$vdev1" "$vdev_lo"
+set -A draid1_args "$vdev1" "draid1-0-0"
+set -A draid2_args "$vdev1" "draid2-0-0"
+set -A draid3_args "$vdev1" "draid3-0-0"
+
+typeset -i i=0
+typeset -i j=0
+
+function zpool_create_degraded_add
+{
+ typeset -n create_args=$1
+ typeset -n add_args=$2
+ typeset -n rm_args=$3
+
+ i=0
+ while ((i < ${#create_args[@]})); do
+ j=0
+ while ((j < ${#add_args[@]})); do
+ log_must zpool create $TESTPOOL1 ${create_args[$i]}
+ log_must zpool offline -f $TESTPOOL1 ${rm_args[0]}
+ log_must zpool replace -w $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
+ log_must zpool add $TESTPOOL1 ${add_args[$j]}
+ log_must zpool destroy -f $TESTPOOL1
+ log_must zpool labelclear -f ${rm_args[0]}
+
+ ((j += 1))
+ done
+ ((i += 1))
+ done
+}
+
+function zpool_create_forced_degraded_add
+{
+ typeset -n create_args=$1
+ typeset -n add_args=$2
+ typeset -n rm_args=$3
+
+ i=0
+ while ((i < ${#create_args[@]})); do
+ j=0
+ while ((j < ${#add_args[@]})); do
+ log_must zpool create $TESTPOOL1 ${create_args[$i]}
+ log_must zpool offline -f $TESTPOOL1 ${rm_args[0]}
+ log_must zpool replace -w $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
+ log_mustnot zpool add $TESTPOOL1 ${add_args[$j]}
+ log_must zpool add --allow-replication-mismatch $TESTPOOL1 ${add_args[$j]}
+ log_must zpool destroy -f $TESTPOOL1
+ log_must zpool labelclear -f ${rm_args[0]}
+
+ ((j += 1))
+ done
+ ((i += 1))
+ done
+}
+
+# 2. Verify 'zpool add' succeeds with matching redundancy and a degraded pool.
+zpool_create_degraded_add redundancy1_create_args redundancy1_add_args replace_args
+zpool_create_degraded_add redundancy2_create_args redundancy2_add_args replace_args
+zpool_create_degraded_add redundancy3_create_args redundancy3_add_args replace_args
+
+# 3. Verify 'zpool add' warns with differing redundancy and a degraded pool.
+#
+# a. Degraded pool with replaced mismatch vdev (file vs disk)
+zpool_create_forced_degraded_add redundancy1_create_args redundancy2_add_args replace_args
+zpool_create_forced_degraded_add redundancy1_create_args redundancy3_add_args replace_args
+
+zpool_create_forced_degraded_add redundancy2_create_args redundancy1_add_args replace_args
+zpool_create_forced_degraded_add redundancy2_create_args redundancy3_add_args replace_args
+
+zpool_create_forced_degraded_add redundancy3_create_args redundancy1_add_args replace_args
+zpool_create_forced_degraded_add redundancy3_create_args redundancy2_add_args replace_args
+
+# b. Degraded pool dRAID distributed spare active
+
+zpool_create_forced_degraded_add redundancy1_create_draid_args redundancy2_add_args draid1_args
+zpool_create_forced_degraded_add redundancy1_create_draid_args redundancy3_add_args draid1_args
+
+zpool_create_forced_degraded_add redundancy2_create_draid_args redundancy1_add_args draid2_args
+zpool_create_forced_degraded_add redundancy2_create_draid_args redundancy3_add_args draid2_args
+
+zpool_create_forced_degraded_add redundancy3_create_draid_args redundancy1_add_args draid3_args
+zpool_create_forced_degraded_add redundancy3_create_draid_args redundancy2_add_args draid3_args
+
+# c. Degraded pool hot spare active
+zpool_create_forced_degraded_add redundancy1_create_spare_args redundancy2_add_args replace_args
+zpool_create_forced_degraded_add redundancy1_create_spare_args redundancy3_add_args replace_args
+
+zpool_create_forced_degraded_add redundancy2_create_spare_args redundancy1_add_args replace_args
+zpool_create_forced_degraded_add redundancy2_create_spare_args redundancy3_add_args replace_args
+
+zpool_create_forced_degraded_add redundancy3_create_spare_args redundancy1_add_args replace_args
+zpool_create_forced_degraded_add redundancy3_create_spare_args redundancy2_add_args replace_args
+
+log_pass "Verify 'zpool add' warns for differing redundancy."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_removal.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_removal.ksh
new file mode 100755
index 000000000000..782858e301ac
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_warn_removal.ksh
@@ -0,0 +1,126 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2012, 2016 by Delphix. All rights reserved.
+# Copyright 2025 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_add/zpool_add.kshlib
+
+#
+# DESCRIPTION:
+# Verify zpool add succeeds when adding vdevs with matching redundancy
+# and warns with differing redundancy after removal.
+#
+# STRATEGY:
+# 1. Create several files == $MINVDEVSIZE.
+# 2. Verify 'zpool add' warns with differing redundancy after removal.
+#
+
+verify_runnable "global"
+
+log_assert "Verify 'zpool add' warns for differing redundancy."
+log_onexit zpool_create_add_cleanup
+
+zpool_create_add_setup
+
+typeset -i i=0
+typeset -i j=0
+
+set -A redundancy1_create_args \
+ "mirror $vdev0 $vdev1" \
+ "raidz1 $vdev0 $vdev1" \
+ "draid1:1s $vdev0 $vdev1 $vdev9"
+
+set -A redundancy2_create_args \
+ "mirror $vdev0 $vdev1 $vdev2" \
+ "raidz2 $vdev0 $vdev1 $vdev2" \
+ "draid2:1s $vdev0 $vdev1 $vdev2 $vdev9"
+
+set -A redundancy3_create_args \
+ "mirror $vdev0 $vdev1 $vdev2 $vdev3" \
+ "raidz3 $vdev0 $vdev1 $vdev2 $vdev3" \
+ "draid3:1s $vdev0 $vdev1 $vdev2 $vdev3 $vdev9"
+
+set -A redundancy1_add_args \
+ "mirror $vdev5 $vdev6" \
+ "raidz1 $vdev5 $vdev6" \
+ "raidz1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 raidz1 $vdev7 $vdev8" \
+ "draid1 $vdev5 $vdev6 mirror $vdev7 $vdev8" \
+ "mirror $vdev5 $vdev6 draid1 $vdev7 $vdev8"
+
+set -A redundancy2_add_args \
+ "mirror $vdev5 $vdev6 $vdev7" \
+ "raidz2 $vdev5 $vdev6 $vdev7" \
+ "draid2 $vdev5 $vdev6 $vdev7"
+
+set -A redundancy3_add_args \
+ "mirror $vdev5 $vdev6 $vdev7 $vdev8" \
+ "raidz3 $vdev5 $vdev6 $vdev7 $vdev8" \
+ "draid3 $vdev5 $vdev6 $vdev7 $vdev8"
+
+set -A log_args "log" "$vdev_lo"
+set -A cache_args "cache" "$vdev_lo"
+set -A spare_args "spare" "$vdev_lo"
+
+
+function zpool_create_rm_add
+{
+ typeset -n create_args=$1
+ typeset -n add_args=$2
+ typeset -n rm_args=$3
+
+ i=0
+ while ((i < ${#create_args[@]})); do
+ j=0
+ while ((j < ${#add_args[@]})); do
+ log_must zpool create $TESTPOOL1 ${create_args[$i]}
+ log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
+ log_must zpool add $TESTPOOL1 ${add_args[$j]}
+ log_must zpool remove $TESTPOOL1 ${rm_args[1]}
+ log_mustnot zpool add $TESTPOOL1 ${rm_args[1]}
+ log_must zpool add $TESTPOOL1 ${rm_args[0]} ${rm_args[1]}
+ log_must zpool destroy -f $TESTPOOL1
+
+ ((j += 1))
+ done
+ ((i += 1))
+ done
+}
+
+# 2. Verify 'zpool add' warns with differing redundancy after removal.
+zpool_create_rm_add redundancy1_create_args redundancy1_add_args log_args
+zpool_create_rm_add redundancy2_create_args redundancy2_add_args log_args
+zpool_create_rm_add redundancy3_create_args redundancy3_add_args log_args
+
+zpool_create_rm_add redundancy1_create_args redundancy1_add_args cache_args
+zpool_create_rm_add redundancy2_create_args redundancy2_add_args cache_args
+zpool_create_rm_add redundancy3_create_args redundancy3_add_args cache_args
+
+zpool_create_rm_add redundancy1_create_args redundancy1_add_args spare_args
+zpool_create_rm_add redundancy2_create_args redundancy2_add_args spare_args
+zpool_create_rm_add redundancy3_create_args redundancy3_add_args spare_args
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/cleanup.ksh
new file mode 100755
index 000000000000..099b5426031d
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/cleanup.ksh
@@ -0,0 +1,30 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+#
+. $STF_SUITE/include/libtest.shlib
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/setup.ksh
new file mode 100755
index 000000000000..3529a0ccc015
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/setup.ksh
@@ -0,0 +1,32 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+#
+. $STF_SUITE/include/libtest.shlib
+
+verify_runnable "global"
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib
new file mode 100644
index 000000000000..ea4b0bd2756d
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib
@@ -0,0 +1,235 @@
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+# Since we want to make sure that iostat responds correctly as pools appear and
+# disappear, we run it in the background and capture its output to a file.
+# Once we're done, we parse the output and ensure it matches what we'd expect
+# from the operations we performed.
+#
+# Because iostat is producing output every interval, it may produce the "same"
+# output for each step of the change; in fact, we want that to make sure we
+# don't miss anything. So, we describe what we expect as a series of "chunks".
+# Each chunk is a particular kind of output, which may repeat. Current known
+# chunk types are:
+#
+# NOPOOL: the text "no pools available"
+# HEADER: three lines, starting with "capacity", "pool" and "----" respectively.
+# (the rough shape of the normal iostat header).
+# POOL1: a line starting with "pool1" (stats line for a pool of that name)
+# POOL2: a line starting with "pool2"
+# POOLBOTH: three lines, starting with "pool1", "pool2" (either order) and
+# "-----" respectively. (the pool stat output for multiple pools)
+#
+# (the parser may produce other chunks in a failed parse to assist with
+# debugging, but they should never be part of the "wanted" output See the
+# parser commentary below).
+#
+# To help recognise the start of a new interval output, we run iostat with the
+# -T u option, which will output a numeric timestamp before each header or
+# second-or-later pool stat after the header.
+#
+# To keep the test run shorter, we use a subsecond interval, but to make sure
+# nothing is missed, we sleep for three intervals after each change.
+
+typeset _iostat_out=$(mktemp)
+typeset _iostat_pid=""
+
+function cleanup_iostat {
+ if [[ -n $_iostat_pid ]] ; then
+ kill -KILL $_iostat_pid || true
+ fi
+ rm -f $_iostat_out
+}
+
+function start_iostat {
+ zpool iostat -T u $@ 0.1 > $_iostat_out 2>&1 &
+ _iostat_pid=$!
+}
+
+function stop_iostat {
+ kill -TERM $_iostat_pid
+ wait $_iostat_pid
+ _iostat_pid=""
+}
+
+function delay_iostat {
+ sleep 0.3
+}
+
+typeset -a _iostat_expect
+function expect_iostat {
+ typeset chunk=$1
+ _iostat_expect+=($chunk)
+}
+
+# Parse the output The `state` var is used to track state across
+# multiple lines. The `last` var and the `_got_iostat` function are used
+# to record the completed chunks, and to collapse repetitions.
+typeset -a _iostat_got
+typeset _iostat_last=""
+typeset _iostat_state=""
+
+function _got_iostat {
+ typeset chunk=$1
+ if [[ -n $chunk && $_iostat_last != $chunk ]] ; then
+ _iostat_last=$chunk
+ _iostat_got+=($chunk)
+ fi
+ _iostat_state=""
+}
+
+function verify_iostat {
+
+ cat $_iostat_out | while read line ; do
+
+ # The "no pools available" text has no timestamp or other
+ # header, and should never appear in the middle of multiline
+ # chunk, so we can close any in-flight state.
+ if [[ $line = "no pools available" ]] ; then
+ _got_iostat $_iostat_state
+ _got_iostat "NOPOOL"
+ continue
+ fi
+
+ # A run of digits alone on the line is a timestamp (the `-T u`
+ # switch to `iostat`). It closes any in-flight state as a
+ # complete chunk, and indicates the start of a new chunk.
+ if [[ -z ${line/#+([0-9])/} ]] ; then
+ _got_iostat $_iostat_state
+ _iostat_state="TIMESTAMP"
+ continue
+ fi
+
+ # For this test, the first word of each line should be unique,
+ # so we extract it and use it for simplicity.
+ typeset first=${line%% *}
+
+ # Header is emitted whenever the pool list changes. It has
+ # three lines:
+ #
+ # capacity operations bandwidth
+ # pool alloc free read write read write
+ # ---------- ----- ----- ----- ----- ----- -----
+ #
+ # Each line moves the state; when we get to a run of dashes, we
+ # commit. Note that we check for one-or-more dashes, because
+ # the width can vary depending on the length of pool name.
+ #
+ if [[ $_iostat_state = "TIMESTAMP" &&
+ $first = "capacity" ]] ; then
+ _iostat_state="INHEADER1"
+ continue
+ fi
+ if [[ $_iostat_state = "INHEADER1" &&
+ $first = "pool" ]] ; then
+ _iostat_state="INHEADER2"
+ continue
+ fi
+ if [[ $_iostat_state = "INHEADER2" &&
+ -z ${first/#+(-)/} ]] ; then
+ # Headers never repeat, so if the last committed chunk
+ # was a header, we commit this one as EXTRAHEADER so we
+ # can see it in the error output.
+ if [[ $_iostat_last = "HEADER" ]] ; then
+ _got_iostat "EXTRAHEADER"
+ elif [[ $_iostat_last != "EXTRAHEADER" ]] ; then
+ _got_iostat "HEADER"
+ fi
+ _iostat_state="HEADER"
+ continue
+ fi
+
+ # A pool stat line looks like:
+ #
+ # pool1 147K 240M 0 0 0 0
+ #
+ # If there are multiple pools, iostat follows them with a
+ # separator of dashed lines:
+ #
+ # pool1 147K 240M 0 0 0 0
+ # pool2 147K 240M 0 0 0 0
+ # ---------- ----- ----- ----- ----- ----- -----
+ #
+ # Stats rows always start after a timestamp or a header. If the
+ # header was emitted, we won't see a timestamp here (it goes
+ # before the header).
+ #
+ # Because our test exercises both pools on their own and
+ # together, we allow pools in either order. In practice they
+ # are sorted, but that's a side-effect of the implementation
+ # (see zpool_compare()), so we're not going to rely on it here.
+ if [[ $first = "pool1" ]] || [[ $first = "pool2" ]] ; then
+
+ # First line, track which one we saw. If it's a
+ # standalone line, it will be committed by the next
+ # NOPOOL or TIMESTAMP above (or the `_got_iostat` after
+ # the loop if this is the last line).
+ if [[ $_iostat_state == "TIMESTAMP" ||
+ $_iostat_state == "HEADER" ]] ; then
+ if [[ $first = "pool1" ]] ; then
+ _iostat_state="POOL1"
+ elif [[ $first = "pool2" ]] ; then
+ _iostat_state="POOL2"
+ fi
+ continue
+ fi
+
+ # If this is the second pool, we're in a multi-pool
+ # block, and need to look for the separator to close it
+ # out.
+ if [[ $_iostat_state = "POOL1" && $first = "pool2" ]] ||
+ [[ $_iostat_state = "POOL2" && $first = "pool1" ]] ;
+ then
+ _iostat_state="INPOOLBOTH"
+ continue
+ fi
+ fi
+
+ # Separator after the stats block.
+ if [[ $_iostat_state = "INPOOLBOTH" &&
+ -z ${first/#+(-)/} ]] ; then
+ _got_iostat "POOLBOTH"
+ continue
+ fi
+
+ # Anything else will fall through to here. We commit any
+ # in-flight state, then "UNKNOWN", all to help with debugging..
+ if [[ $_iostat_state != "UNKNOWN" ]] ; then
+ _got_iostat $_iostat_state
+ _got_iostat "UNKNOWN"
+ fi
+ done
+
+ # Close out any remaining state.
+ _got_iostat $_iostat_state
+
+ # Compare what we wanted with what we got, and pass/fail the test!
+ if [[ "${_iostat_expect[*]}" != "${_iostat_got[*]}" ]] ; then
+ log_note "expected: ${_iostat_expect[*]}"
+ log_note " got: ${_iostat_got[*]}"
+ log_fail "zpool iostat did not produce expected output"
+ fi
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh
new file mode 100755
index 000000000000..8e040058ec3e
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_all.ksh
@@ -0,0 +1,90 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+# `zpool iostat <N>` should keep running and update the pools it displays as
+# pools are created/destroyed/imported/export.
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib
+
+typeset vdev1=$(mktemp)
+typeset vdev2=$(mktemp)
+
+function cleanup {
+ cleanup_iostat
+
+ poolexists pool1 && destroy_pool pool1
+ poolexists pool2 && destroy_pool pool2
+ rm -f $vdev1 $vdev2
+}
+
+log_must mkfile $MINVDEVSIZE $vdev1 $vdev2
+
+expect_iostat "NOPOOL"
+
+start_iostat
+
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL1"
+log_must zpool create pool1 $vdev1
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOLBOTH"
+log_must zpool create pool2 $vdev2
+delay_iostat
+
+expect_iostat "NOPOOL"
+log_must zpool export -a
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL2"
+log_must zpool import -d $vdev2 pool2
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOLBOTH"
+log_must zpool import -d $vdev1 pool1
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL2"
+log_must zpool destroy pool1
+delay_iostat
+
+expect_iostat "NOPOOL"
+log_must zpool destroy pool2
+delay_iostat
+
+stop_iostat
+
+verify_iostat
+
+log_pass "zpool iostat in interval mode follows pool updates"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh
new file mode 100755
index 000000000000..ab1f258aa1cd
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_iostat/zpool_iostat_interval_some.ksh
@@ -0,0 +1,80 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+# `zpool iostat <pools> <N>` should keep running and only show the listed pools.
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_iostat/zpool_iostat.kshlib
+
+typeset vdev1=$(mktemp)
+typeset vdev2=$(mktemp)
+
+function cleanup {
+ cleanup_iostat
+
+ poolexists pool1 && destroy_pool pool1
+ poolexists pool2 && destroy_pool pool2
+ rm -f $vdev1 $vdev2
+}
+
+log_must mkfile $MINVDEVSIZE $vdev1 $vdev2
+
+log_must zpool create pool1 $vdev1
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL1"
+start_iostat pool1
+delay_iostat
+
+log_must zpool create pool2 $vdev2
+delay_iostat
+
+expect_iostat "NOPOOL"
+log_must zpool export -a
+delay_iostat
+
+log_must zpool import -d $vdev2 pool2
+delay_iostat
+
+expect_iostat "HEADER"
+expect_iostat "POOL1"
+log_must zpool import -d $vdev1 pool1
+delay_iostat
+
+expect_iostat "NOPOOL"
+log_must zpool destroy pool1
+delay_iostat
+
+log_must zpool destroy pool2
+delay_iostat
+
+stop_iostat
+
+verify_iostat
+
+log_pass "zpool iostat in interval mode with pools follows listed pool updates"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_date_range_002.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_date_range_002.ksh
new file mode 100755
index 000000000000..9327df81a5c5
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_date_range_002.ksh
@@ -0,0 +1,76 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2025 Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_scrub/zpool_scrub.cfg
+
+#
+# DESCRIPTION:
+# Verify that the timestamp database updates all the tables as expected.
+#
+# STRATEGY:
+# 1. Decrease the note and flush frequency of the txg database.
+# 2. Force the pool to sync several txgs
+# 3. Verify that there are entries in each of the "month", "day", and
+# "minute" tables.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ log_must restore_tunable SPA_NOTE_TXG_TIME
+ log_must restore_tunable SPA_FLUSH_TXG_TIME
+ rm /$TESTPOOL/f1
+}
+
+log_onexit cleanup
+
+log_assert "Verifiy timestamp databases all update as expected."
+
+log_must save_tunable SPA_NOTE_TXG_TIME
+log_must set_tunable64 SPA_NOTE_TXG_TIME 1
+log_must save_tunable SPA_FLUSH_TXG_TIME
+log_must set_tunable64 SPA_FLUSH_TXG_TIME 1
+
+log_must touch /$TESTPOOL/f1
+log_must zpool sync $TESTPOOL
+sleep 1
+log_must touch /$TESTPOOL/f1
+log_must zpool sync $TESTPOOL
+sleep 1
+log_must touch /$TESTPOOL/f1
+log_must zpool sync $TESTPOOL
+
+mos_zap="$(zdb -dddd $TESTPOOL 1)"
+minutes_entries=$(echo "$mos_zap" | grep "txg_log_time:minutes" | awk '{print $5}')
+days_entries=$(echo "$mos_zap" | grep "txg_log_time:days" | awk '{print $5}')
+months_entries=$(echo "$mos_zap" | grep "txg_log_time:months" | awk '{print $5}')
+
+[[ "$minutes_entries" -ne "0" ]] || log_fail "0 entries in the minutes table"
+[[ "$days_entries" -ne "0" ]] || log_fail "0 entries in the days table"
+[[ "$months_entries" -ne "0" ]] || log_fail "0 entries in the months table"
+
+log_pass "Verified all timestamp databases had entries as expected."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcstat_001_pos.ksh
index 700bd9a6f529..d63b72f8039d 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arcstat_001_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcstat_001_pos.ksh
@@ -37,7 +37,7 @@ log_assert "arcstat generates output and doesn't return an error code"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "arcstat ${args[i]} > /dev/null"
+ log_must eval "zarcstat ${args[i]} > /dev/null"
((i = i + 1))
done
log_pass "arcstat generates output and doesn't return an error code"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcsummary_001_pos.ksh
index 0840878fdb0d..b7faac5243c9 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_001_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcsummary_001_pos.ksh
@@ -30,16 +30,16 @@
is_freebsd && ! python3 -c 'import sysctl' 2>/dev/null && log_unsupported "python3 sysctl module missing"
-log_assert "arc_summary generates output and doesn't return an error code"
+log_assert "zarcsummary generates output and doesn't return an error code"
# Without this, the below checks aren't going to work the way we hope...
set -o pipefail
for arg in "" "-a" "-d" "-p 1" "-g" "-s arc" "-r"; do
- log_must eval "arc_summary $arg > /dev/null"
+ log_must eval "zarcsummary $arg > /dev/null"
done
-log_must eval "arc_summary | head > /dev/null"
-log_must eval "arc_summary | head -1 > /dev/null"
+log_must eval "zarcsummary | head > /dev/null"
+log_must eval "zarcsummary | head -1 > /dev/null"
-log_pass "arc_summary generates output and doesn't return an error code"
+log_pass "zarcsummary generates output and doesn't return an error code"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcsummary_002_neg.ksh
index ec4abe35409d..227646777ba0 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/arc_summary_002_neg.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zarcsummary_002_neg.ksh
@@ -30,10 +30,10 @@
is_freebsd && ! python3 -c 'import sysctl' 2>/dev/null && log_unsupported "python3 sysctl module missing"
-log_assert "arc_summary generates an error code with invalid options"
+log_assert "zarcsummary generates an error code with invalid options"
for arg in "-x" "-5" "-p 7" "--err" "-@"; do
- log_mustnot eval "arc_summary $arg > /dev/null"
+ log_mustnot eval "zarcsummary $arg > /dev/null"
done
-log_pass "arc_summary generates an error code with invalid options"
+log_pass "zarcsummary generates an error code with invalid options"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/cleanup.ksh
new file mode 100755
index 000000000000..4a59e15cc693
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/cleanup.ksh
@@ -0,0 +1,43 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/delegate/delegate_common.kshlib
+
+
+poolexists $TESTPOOL1 && \
+ destroy_pool $TESTPOOL1
+
+del_user $STAFF1
+del_user $STAFF2
+del_group $STAFF_GROUP
+
+del_user $OTHER1
+del_user $OTHER2
+del_group $OTHER_GROUP
+
+default_cleanup
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/setup.ksh
new file mode 100755
index 000000000000..0978193eddc4
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/setup.ksh
@@ -0,0 +1,50 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/delegate/delegate_common.kshlib
+
+# Create staff group and add two user to it
+log_must add_group $STAFF_GROUP
+if ! id $STAFF1 > /dev/null 2>&1; then
+ log_must add_user $STAFF_GROUP $STAFF1
+fi
+if ! id $STAFF2 > /dev/null 2>&1; then
+ log_must add_user $STAFF_GROUP $STAFF2
+fi
+
+# Create other group and add two user to it
+log_must add_group $OTHER_GROUP
+if ! id $OTHER1 > /dev/null 2>&1; then
+ log_must add_user $OTHER_GROUP $OTHER1
+fi
+if ! id $OTHER2 > /dev/null 2>&1; then
+ log_must add_user $OTHER_GROUP $OTHER2
+fi
+DISK=${DISKS%% *}
+
+default_raidz_setup $DISKS
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/zfs_send_usertest.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/zfs_send_usertest.ksh
new file mode 100755
index 000000000000..f62f2b07929c
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/zfs_send_delegation_user/zfs_send_usertest.ksh
@@ -0,0 +1,145 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara Inc.
+#
+
+# STRATEGY:
+# 1. Create a pool (this is done by the test framework)
+# 2. Create a user
+# 3. Create an encrypted dataset
+# 4. Write random data to the encrypted dataset
+# 5. Snapshot the dataset
+# 6. As root: attempt a send and raw send (both should succeed)
+# 7. As user: attempt a send and raw send (both should fail, no permission)
+# 8. Create a delegation (zfs allow -u user send testpool/encrypted_dataset)
+# 9. As root: attempt a send and raw send (both should succeed)
+# 10. As user: attempt a send and raw send (both should succeed)
+# 11. Create a delegation (zfs allow -u user sendraw testpool/encrypted_dataset)
+# 12. As root: attempt a send and raw send (both should succeed)
+# 13. As user: attempt a send and raw send (send should fail, raw send should succeed)
+# 14. Disable delegation (zfs unallow)
+# 15. As root: attempt a send and raw send (both should succeed)
+# 16. As user: attempt a send and raw send (both should fail, no permission)
+# 17. Clean up (handled by framework)
+# root tests to verify this doesnt affect root user under ../cli_root/zfs_send_delegation/
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zfs_create/zfs_create_common.kshlib
+. $STF_SUITE/tests/functional/cli_root/zfs_create/properties.kshlib
+. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
+. $STF_SUITE/tests/functional/delegate/delegate.cfg
+
+# create encrypted dataset
+
+log_must eval "echo $PASSPHRASE | zfs create -o encryption=on -o keyformat=passphrase $TESTPOOL/$TESTFS1"
+
+# create target dataset for receives
+log_must zfs create $TESTPOOL/$TESTFS2
+
+# set user perms
+# need to run chown for fs permissions for $OTHER1
+typeset perms="snapshot,reservation,compression,checksum,userprop,receive,mount,create"
+
+log_must zfs allow $OTHER1 $perms $TESTPOOL/$TESTFS1
+log_must zfs allow $OTHER1 $perms $TESTPOOL/$TESTFS2
+log_must chown ${OTHER1}:${OTHER_GROUP} /$TESTPOOL/$TESTFS2
+
+# create random data
+log_must fill_fs $TESTPOOL/$TESTFS1/child 1 2047 1024 1 R
+
+# snapshot
+log_must zfs snapshot $TESTPOOL/$TESTFS1@snap1
+
+# note
+# we need to use `sh -c` here becuase the quoting on <<<"$*" in the user_run wrapper is broken once pipes and redirects get involved
+
+# check baseline send abilities (should fail)
+log_mustnot user_run $OTHER1 sh -c "'zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv0_user_datastream.$$'"
+# verify nothing went through
+if [ -s $TESTPOOL/$TESTFS2/zfsrecv0_user_datastream.$$ ]
+then
+ log_fail "A zfs recieve was completed in $TESTPOOL/$TESTFS2/zfsrecv0_user_datastream !"
+fi
+log_mustnot user_run $OTHER1 sh -c "'zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv0raw_user_datastream.$$'"
+# verify nothing went through
+if [ -s $TESTPOOL/$TESTFS2/zfsrecv0raw_user_datastream.$$ ]
+then
+ log_fail "A zfs recieve was completed in $TESTPOOL/$TESTFS2/zfsrecv0raw_user_datastream !"
+fi
+
+# create delegation
+log_must zfs allow $OTHER1 send $TESTPOOL/$TESTFS1
+
+# attempt send with full allow (should pass)
+log_must user_run $OTHER1 sh -c "'zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv1_user_datastream.$$'"
+log_must user_run $OTHER1 sh -c "'zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv1raw_user_datastream.$$'"
+
+
+# create raw delegation
+log_must zfs allow $OTHER1 send:raw $TESTPOOL/$TESTFS1
+# We have to remove 'send' to confirm 'send raw' only allows what we want
+log_must zfs unallow -u $OTHER1 send $TESTPOOL/$TESTFS1
+
+# test new sendraw abilities (send should fail, sendraw should pass)
+log_mustnot user_run $OTHER1 sh -c "'zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv2_user_datastream.$$'"
+ verify nothing went through
+if [ -s $TESTPOOL/$TESTFS2/zfsrecv2_user_datastream.$$ ]
+then
+ log_fail "A zfs recieve was completed in $TESTPOOL/$TESTFS2/zfsrecv2_user_datastream !"
+fi
+log_must user_run $OTHER1 sh -c "'zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv2raw_user_datastream.$$'"
+
+# disable raw delegation
+log_must zfs unallow -u $OTHER1 send:raw $TESTPOOL/$TESTFS1
+log_must zfs allow $OTHER1 send $TESTPOOL/$TESTFS1
+
+# test with raw taken away (should pass)
+log_must user_run $OTHER1 sh -c "'zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv3_user_datastream.$$'"
+log_must user_run $OTHER1 sh -c "'zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv3raw_user_datastream.$$'"
+
+# disable send abilities
+log_must zfs unallow -u $OTHER1 send $TESTPOOL/$TESTFS1
+
+# verify original send abilities (should fail)
+log_mustnot user_run $OTHER1 sh -c "'zfs send $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv4_user_datastream.$$'"
+ verify nothing went through
+if [ -s $TESTPOOL/$TESTFS2/zfsrecv4_user_datastream.$$ ]
+then
+ log_fail "A zfs recieve was completed in $TESTPOOL/$TESTFS2/zfsrecv4_user_datastream !"
+fi
+log_mustnot user_run $OTHER1 sh -c "'zfs send -w $TESTPOOL/$TESTFS1@snap1 | zfs receive -u $TESTPOOL/$TESTFS2/zfsrecv4raw_user_datastream.$$'"
+ verify nothing went through
+if [ -s $TESTPOOL/$TESTFS2/zfsrecv4raw_user_datastream.$$ ]
+then
+ log_fail "A zfs recieve was completed in $TESTPOOL/$TESTFS2/zfsrecv4raw_user_datastream !"
+fi
+
+
+function cleanup
+{
+ datasetexists $TESTPOOL/$TESTFS1 && \
+ destroy_dataset $TESTPOOL/$TESTFS1 -r \
+ destroy_dataset $TESTPOOL/$TESTFS2 -r
+
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib
index 0a402e71ee68..345239b88680 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib
@@ -1234,10 +1234,10 @@ function verify_fs_aedsx
typeset oldval
set -A modes "on" "off"
oldval=$(get_prop $perm $fs)
- if [[ $oldval == "on" ]]; then
- n=1
- elif [[ $oldval == "off" ]]; then
+ if [[ $oldval == "off" ]]; then
n=0
+ else
+ n=1
fi
log_note "$user zfs set $perm=${modes[$n]} $fs"
user_run $user zfs set $perm=${modes[$n]} $fs
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_degraded_sit_out.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_degraded_sit_out.ksh
new file mode 100755
index 000000000000..d5feb6936b4b
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_degraded_sit_out.ksh
@@ -0,0 +1,106 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2024 by Lawrence Livermore National Security, LLC.
+# Copyright (c) 2025 by Klara, Inc.
+
+# DESCRIPTION:
+# Verify that vdevs 'sit out' when they are slow
+#
+# STRATEGY:
+# 1. Create various raidz/draid pools
+# 2. Degrade/fault one of the disks.
+# 3. Inject delays into one of the disks
+# 4. Verify disk is set to 'sit out' for awhile.
+# 5. Wait for READ_SIT_OUT_SECS and verify sit out state is lifted.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+function cleanup
+{
+ restore_tunable READ_SIT_OUT_SECS
+ restore_tunable SIT_OUT_CHECK_INTERVAL
+ log_must zinject -c all
+ log_must zpool events -c
+ destroy_pool $TESTPOOL2
+ log_must rm -f $TEST_BASE_DIR/vdev.$$.*
+}
+
+log_assert "Verify sit_out works"
+
+log_onexit cleanup
+
+# shorten sit out period for testing
+save_tunable READ_SIT_OUT_SECS
+set_tunable32 READ_SIT_OUT_SECS 5
+
+save_tunable SIT_OUT_CHECK_INTERVAL
+set_tunable64 SIT_OUT_CHECK_INTERVAL 20
+
+log_must truncate -s 150M $TEST_BASE_DIR/vdev.$$.{0..9}
+
+for raidtype in raidz2 raidz3 draid2 draid3 ; do
+ log_must zpool create $TESTPOOL2 $raidtype $TEST_BASE_DIR/vdev.$$.{0..9}
+ log_must zpool set autosit=on $TESTPOOL2 "${raidtype}-0"
+ log_must dd if=/dev/urandom of=/$TESTPOOL2/bigfile bs=1M count=400
+ log_must zpool export $TESTPOOL2
+ log_must zpool import -d $TEST_BASE_DIR $TESTPOOL2
+
+ BAD_VDEV=$TEST_BASE_DIR/vdev.$$.9
+ SLOW_VDEV=$TEST_BASE_DIR/vdev.$$.8
+
+ # Initial state should not be sitting out
+ log_must eval [[ "$(get_vdev_prop sit_out $TESTPOOL2 $SLOW_VDEV)" == "off" ]]
+
+ # Delay our reads 200ms to trigger sit out
+ log_must zinject -d $SLOW_VDEV -D200:1 -T read $TESTPOOL2
+ type=$((RANDOM % 2))
+ [[ "$type" -eq "0" ]] && action="degrade" || action="fault"
+ log_must zinject -d $BAD_VDEV -A $action -T read $TESTPOOL2
+
+ # Do some reads and wait for us to sit out
+ for i in {0..99} ; do
+ dd if=/$TESTPOOL2/bigfile skip=$i bs=2M count=1 of=/dev/null &
+ dd if=/$TESTPOOL2/bigfile skip=$((i + 100)) bs=2M count=1 of=/dev/null
+
+ sit_out=$(get_vdev_prop sit_out $TESTPOOL2 $SLOW_VDEV)
+ if [[ "$sit_out" == "on" ]] ; then
+ break
+ fi
+ done
+
+ log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $SLOW_VDEV)" == "on"
+
+ # Clear fault injection
+ log_must zinject -c all
+
+ # Wait for us to exit our sit out period
+ log_must wait_sit_out $TESTPOOL2 $SLOW_VDEV 10
+
+ log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $SLOW_VDEV)" == "off"
+ destroy_pool $TESTPOOL2
+ log_must zpool labelclear -f $BAD_VDEV
+done
+
+log_pass "sit_out works correctly"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_sit_out.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_sit_out.ksh
new file mode 100755
index 000000000000..37f616cf56ee
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_sit_out.ksh
@@ -0,0 +1,102 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2024 by Lawrence Livermore National Security, LLC.
+
+# DESCRIPTION:
+# Verify that vdevs 'sit out' when they are slow
+#
+# STRATEGY:
+# 1. Create various raidz/draid pools
+# 2. Inject delays into one of the disks
+# 3. Verify disk is set to 'sit out' for awhile.
+# 4. Wait for READ_SIT_OUT_SECS and verify sit out state is lifted.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+function cleanup
+{
+ restore_tunable READ_SIT_OUT_SECS
+ restore_tunable SIT_OUT_CHECK_INTERVAL
+ log_must zinject -c all
+ log_must zpool events -c
+ destroy_pool $TESTPOOL2
+ log_must rm -f $TEST_BASE_DIR/vdev.$$.*
+}
+
+log_assert "Verify sit_out works"
+
+log_onexit cleanup
+
+# shorten sit out period for testing
+save_tunable READ_SIT_OUT_SECS
+set_tunable32 READ_SIT_OUT_SECS 5
+
+save_tunable SIT_OUT_CHECK_INTERVAL
+set_tunable64 SIT_OUT_CHECK_INTERVAL 20
+
+log_must truncate -s200M $TEST_BASE_DIR/vdev.$$.{0..9}
+
+for raidtype in raidz raidz2 raidz3 draid1 draid2 draid3 ; do
+ log_must zpool create $TESTPOOL2 $raidtype $TEST_BASE_DIR/vdev.$$.{0..9}
+ log_must zpool set autosit=on $TESTPOOL2 "${raidtype}-0"
+ log_must dd if=/dev/urandom of=/$TESTPOOL2/bigfile bs=1M count=600
+ log_must zpool export $TESTPOOL2
+ log_must zpool import -d $TEST_BASE_DIR $TESTPOOL2
+
+ BAD_VDEV=$TEST_BASE_DIR/vdev.$$.9
+
+ # Initial state should not be sitting out
+ log_must eval [[ "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV)" == "off" ]]
+
+ # Delay our reads 200ms to trigger sit out
+ log_must zinject -d $BAD_VDEV -D200:1 -T read $TESTPOOL2
+
+ # Do some reads and wait for us to sit out
+ for i in {0..99} ; do
+ dd if=/$TESTPOOL2/bigfile skip=$i bs=2M count=1 of=/dev/null &
+ dd if=/$TESTPOOL2/bigfile skip=$((i + 100)) bs=2M count=1 of=/dev/null &
+ dd if=/$TESTPOOL2/bigfile skip=$((i + 200)) bs=2M count=1 of=/dev/null
+
+ sit_out=$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV)
+ if [[ "$sit_out" == "on" ]] ; then
+ break
+ fi
+ done
+
+ log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV)" == "on"
+
+ # Clear fault injection
+ log_must zinject -c all
+
+ # Wait for us to exit our sit out period
+ log_must wait_sit_out $TESTPOOL2 $BAD_VDEV 10
+
+ # Verify sit_out was cleared during wait_sit_out
+ log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV)" == "off"
+
+ destroy_pool $TESTPOOL2
+done
+
+log_pass "sit_out works correctly"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_sit_out_neg.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_sit_out_neg.ksh
new file mode 100755
index 000000000000..457105a66453
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/slow_vdev_sit_out_neg.ksh
@@ -0,0 +1,116 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2024 by Lawrence Livermore National Security, LLC.
+# Copyright (c) 2025 by Klara, Inc.
+
+# DESCRIPTION:
+# Verify that we don't sit out too many vdevs
+#
+# STRATEGY:
+# 1. Create draid2 pool
+# 2. Inject delays into three of the disks
+# 3. Do reads to trigger sit-outs
+# 4. Verify exactly 2 disks sit out
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+function cleanup
+{
+ restore_tunable READ_SIT_OUT_SECS
+ restore_tunable SIT_OUT_CHECK_INTERVAL
+ log_must zinject -c all
+ log_must zpool events -c
+ destroy_pool $TESTPOOL2
+ log_must rm -f $TEST_BASE_DIR/vdev.$$.*
+}
+
+log_assert "Verify sit_out works"
+
+log_onexit cleanup
+
+save_tunable SIT_OUT_CHECK_INTERVAL
+set_tunable64 SIT_OUT_CHECK_INTERVAL 20
+
+log_must truncate -s 150M $TEST_BASE_DIR/vdev.$$.{0..9}
+
+log_must zpool create $TESTPOOL2 draid2 $TEST_BASE_DIR/vdev.$$.{0..9}
+log_must zpool set autosit=on $TESTPOOL2 draid2-0
+log_must dd if=/dev/urandom of=/$TESTPOOL2/bigfile bs=1M count=400
+log_must zpool export $TESTPOOL2
+log_must zpool import -d $TEST_BASE_DIR $TESTPOOL2
+
+BAD_VDEV1=$TEST_BASE_DIR/vdev.$$.7
+BAD_VDEV2=$TEST_BASE_DIR/vdev.$$.8
+BAD_VDEV3=$TEST_BASE_DIR/vdev.$$.9
+
+# Initial state should not be sitting out
+log_must eval [[ "$(get_vdev_prop autosit $TESTPOOL2 draid2-0)" == "on" ]]
+log_must eval [[ "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV1)" == "off" ]]
+log_must eval [[ "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV2)" == "off" ]]
+log_must eval [[ "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV3)" == "off" ]]
+
+# Delay our reads 200ms to trigger sit out
+log_must zinject -d $BAD_VDEV1 -D200:1 -T read $TESTPOOL2
+
+# Do some reads and wait for us to sit out
+for i in {0..99} ; do
+ dd if=/$TESTPOOL2/bigfile skip=$i bs=2M count=1 of=/dev/null &
+ dd if=/$TESTPOOL2/bigfile skip=$((i + 100)) bs=2M count=1 of=/dev/null
+
+ sit_out=$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV1)
+ if [[ "$sit_out" == "on" ]] ; then
+ break
+ fi
+done
+log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV1)" == "on"
+
+log_must zinject -d $BAD_VDEV2 -D200:1 -T read $TESTPOOL2
+# Do some reads and wait for us to sit out
+for i in {0..99} ; do
+ dd if=/$TESTPOOL2/bigfile skip=$i bs=2M count=1 of=/dev/null &
+ dd if=/$TESTPOOL2/bigfile skip=$((i + 100)) bs=2M count=1 of=/dev/null
+
+ sit_out=$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV2)
+ if [[ "$sit_out" == "on" ]] ; then
+ break
+ fi
+done
+log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV2)" == "on"
+
+log_must zinject -d $BAD_VDEV3 -D200:1 -T read $TESTPOOL2
+# Do some reads and wait for us to sit out
+for i in {0..99} ; do
+ dd if=/$TESTPOOL2/bigfile skip=$i bs=2M count=1 of=/dev/null &
+ dd if=/$TESTPOOL2/bigfile skip=$((i + 100)) bs=2M count=1 of=/dev/null
+
+ sit_out=$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV3)
+ if [[ "$sit_out" == "on" ]] ; then
+ break
+ fi
+done
+log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $BAD_VDEV3)" == "off"
+
+
+log_pass "sit_out works correctly"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/zed_synchronous_zedlet.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/zed_synchronous_zedlet.ksh
new file mode 100755
index 000000000000..6b732ea96d0c
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/zed_synchronous_zedlet.ksh
@@ -0,0 +1,149 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025 by Lawrence Livermore National Security, LLC.
+#
+
+# DESCRIPTION:
+# Verify ZED synchronous zedlets work as expected
+#
+# STRATEGY:
+# 1. Create a scrub_start zedlet that runs quickly
+# 2. Create a scrub_start zedlet that runs slowly (takes seconds)
+# 3. Create a scrub_finish zedlet that is synchronous and runs slowly
+# 4. Create a trim_start zedlet that runs quickly
+# 4. Scrub the pool
+# 5. Trim the pool
+# 6. Verify the synchronous scrub_finish zedlet waited for the scrub_start
+# zedlets to finish (including the slow one). If the scrub_finish zedlet
+# was not synchronous, it would have completed before the slow scrub_start
+# zedlet.
+# 7. Verify the trim_start zedlet waited for the slow synchronous scrub_finish
+# zedlet to complete.
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/events/events_common.kshlib
+
+verify_runnable "both"
+
+OUR_ZEDLETS="scrub_start-async.sh scrub_start-slow.sh scrub_finish-sync-slow.sh trim_start-async.sh"
+
+OUTFILE="$TEST_BASE_DIR/zed_synchronous_zedlet_lines"
+TESTPOOL2=testpool2
+
+function cleanup
+{
+ zed_stop
+
+ for i in $OUR_ZEDLETS ; do
+ log_must rm -f $ZEDLET_DIR/$i
+ done
+ destroy_pool $TESTPOOL2
+ log_must rm -f $TEST_BASE_DIR/vdev-file-sync-zedlet
+ log_must rm -f $OUTFILE
+}
+
+log_assert "Verify ZED synchronous zedlets work as expected"
+
+log_onexit cleanup
+
+# Make a pool
+log_must truncate -s 100M $TEST_BASE_DIR/vdev-file-sync-zedlet
+log_must zpool create $TESTPOOL2 $TEST_BASE_DIR/vdev-file-sync-zedlet
+
+# Do an initial scrub
+log_must zpool scrub -w $TESTPOOL2
+
+log_must zpool events -c
+
+mkdir -p $ZEDLET_DIR
+
+# Create zedlets
+cat << EOF > $ZEDLET_DIR/scrub_start-async.sh
+#!/bin/ksh -p
+echo "\$(date) \$(basename \$0)" >> $OUTFILE
+EOF
+
+cat << EOF > $ZEDLET_DIR/scrub_start-slow.sh
+#!/bin/ksh -p
+sleep 3
+echo "\$(date) \$(basename \$0)" >> $OUTFILE
+EOF
+
+cat << EOF > $ZEDLET_DIR/scrub_finish-sync-slow.sh
+#!/bin/ksh -p
+sleep 3
+echo "\$(date) \$(basename \$0)" >> $OUTFILE
+EOF
+
+cat << EOF > $ZEDLET_DIR/trim_start-async.sh
+#!/bin/ksh -p
+echo "\$(date) \$(basename \$0)" >> $OUTFILE
+EOF
+
+for i in $OUR_ZEDLETS ; do
+ log_must chmod +x $ZEDLET_DIR/$i
+done
+
+log_must zed_start
+
+# Do a scrub - it should be instantaneous.
+log_must zpool scrub -w $TESTPOOL2
+
+# Start off a trim immediately after scrubiung. The trim should be
+# instantaneous and generate a trimp_start event. This will happen in parallel
+# with the slow 'scrub_finish-sync-slow.sh' zedlet still running.
+log_must zpool trim -w $TESTPOOL2
+
+# Wait for scrub_finish event to happen for sanity. This is the *event*, not
+# the completion of zedlets for the event.
+log_must file_wait_event $ZED_DEBUG_LOG 'sysevent\.fs\.zfs\.trim_finish' 10
+
+# At a minimum, scrub_start-slow.sh + scrub_finish-sync-slow.sh will take a
+# total of 6 seconds to run, so wait 7 sec to be sure.
+sleep 7
+
+# If our zedlets were run in the right order, with sync correctly honored, you
+# will see this ordering in $OUTFILE:
+#
+# Fri May 16 12:04:23 PDT 2025 scrub_start-async.sh
+# Fri May 16 12:04:26 PDT 2025 scrub_start-slow.sh
+# Fri May 16 12:04:31 PDT 2025 scrub_finish-sync-slow.sh
+# Fri May 16 12:04:31 PDT 2025 trim_start-async.sh
+#
+# Check for this ordering
+
+# Get a list of just the script names in the order they were executed
+# from OUTFILE
+lines="$(echo $(grep -Eo '(scrub|trim)_.+\.sh$' $OUTFILE))"
+
+# Compare it to the ordering we expect
+expected="\
+scrub_start-async.sh \
+scrub_start-slow.sh \
+scrub_finish-sync-slow.sh \
+trim_start-async.sh"
+log_must test "$lines" == "$expected"
+
+log_pass "Verified synchronous zedlets"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode.kshlib
new file mode 100644
index 000000000000..d0b7404557ab
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode.kshlib
@@ -0,0 +1,149 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+typeset -A failmode_sync_helper_cmd=(
+ ["fsync"]='dd if=/dev/urandom of=DATAFILE bs=128k count=1 conv=fsync'
+ ["msync"]='mmap_write_sync DATAFILE'
+ ["osync"]='dd if=/dev/urandom of=DATAFILE bs=128k count=1 oflag=sync'
+ ["syncalways"]='dd if=/dev/urandom of=DATAFILE bs=128k count=1'
+)
+
+typeset -A failmode_sync_helper_dsopts=(
+ ["syncalways"]="-o sync=always"
+)
+
+function failmode_sync_cleanup
+{
+ zinject -c all || true
+ zpool clear $TESTPOOL || true
+ destroy_pool $TESTPOOL
+}
+
+#
+# failmode_sync_test <failmode> <helper>
+#
+# run a failmode sync test:
+# - failmode: wait|continue
+# - helper: fsync|msync|osync|syncalways
+#
+function failmode_sync_test
+{
+ typeset failmode=$1
+ typeset helper=$2
+
+ # we'll need two disks, one for the main pool, one for the log
+ read -r DISK1 DISK2 _ <<<"$DISKS"
+
+ # file to write to the pool
+ typeset datafile="/$TESTPOOL/$TESTFS/datafile"
+
+ # create a single-disk pool with a separate log and the wanted failmode
+ log_must zpool create \
+ -f -o failmode=$failmode $TESTPOOL $DISK1 log $DISK2
+
+ # create the test dataset. we bias the ZIL towards the log device to
+ # try to ensure that the sync write never involves the main device
+ log_must zfs create \
+ -o recordsize=128k -o logbias=latency \
+ ${failmode_sync_helper_dsopts[$helper]} \
+ $TESTPOOL/$TESTFS
+
+ # create the target file. the ZIL head structure is created on first
+ # use, and does a full txg wait to finish, which we want to avoid
+ log_must dd if=/dev/zero of=$datafile bs=128k count=1 conv=fsync
+ log_must zpool sync
+
+ # inject errors. writes will fail, as will the followup probes
+ zinject -d $DISK1 -e io -T write $TESTPOOL
+ zinject -d $DISK1 -e nxio -T probe $TESTPOOL
+ zinject -d $DISK2 -e io -T write $TESTPOOL
+ zinject -d $DISK2 -e nxio -T probe $TESTPOOL
+
+ # run the helper program in the background. the pool should immediately
+ # suspend, and the sync op block or fail based on the failmode
+ typeset helper_cmd=${failmode_sync_helper_cmd[$helper]/DATAFILE/$datafile}
+ log_note "running failmode sync helper: $helper_cmd"
+ $helper_cmd &
+ typeset -i pid=$!
+
+ # should only take a moment, but give it a chance
+ log_note "waiting for pool to suspend"
+ typeset -i tries=10
+ until [[ $(kstat_pool $TESTPOOL state) == "SUSPENDED" ]] ; do
+ if ((tries-- == 0)); then
+ log_fail "pool didn't suspend"
+ fi
+ sleep 1
+ done
+
+ # zil_commit() should have noticed the suspend by now
+ typeset -i zilerr=$(kstat zil.zil_commit_error_count)
+
+ # see if the helper program blocked
+ typeset -i blocked
+ if kill -0 $pid ; then
+ blocked=1
+ log_note "$helper: blocked in the kernel"
+ else
+ blocked=0
+ log_note "$helper: exited while pool suspended"
+ fi
+
+ # bring the pool back online
+ zinject -c all
+ zpool clear $TESTPOOL
+
+ # program definitely exited now, get its return code
+ wait $pid
+ typeset -i rc=$?
+
+ failmode_sync_cleanup
+
+ log_note "$helper: zilerr=$zilerr blocked=$blocked rc=$rc"
+
+ # confirm expected results for the failmode
+ if [[ $failmode = "wait" ]] ; then
+ # - the ZIL saw an error, and fell back to a txg sync
+ # - sync op blocked when the pool suspended
+ # - after resume, sync op succeeded, helper returned success
+ log_must test $zilerr -ne 0
+ log_must test $blocked -eq 1
+ log_must test $rc -eq 0
+ elif [[ $failmode = "continue" ]] ; then
+ # confirm expected results:
+ # - the ZIL saw an error, and fell back to a txg sync
+ # - helper exited when the pool suspended
+ # - sync op returned an error, so helper returned failure
+ log_must test $zilerr -ne 0
+ log_must test $blocked -eq 0
+ log_must test $rc -ne 0
+ else
+ log_fail "impossible failmode: $failmode"
+ fi
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_fsync_continue.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_fsync_continue.ksh
new file mode 100755
index 000000000000..7b145d3a2b4c
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_fsync_continue.ksh
@@ -0,0 +1,36 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/failmode/failmode.kshlib
+
+typeset desc="fsync() returns when pool suspends with failmode=continue"
+
+log_assert $desc
+log_onexit failmode_sync_cleanup
+log_must failmode_sync_test continue fsync
+log_pass $desc
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_fsync_wait.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_fsync_wait.ksh
new file mode 100755
index 000000000000..677d226b5481
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_fsync_wait.ksh
@@ -0,0 +1,36 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/failmode/failmode.kshlib
+
+typeset desc="fsync() blocks when pool suspends with failmode=wait"
+
+log_assert $desc
+log_onexit failmode_sync_cleanup
+log_must failmode_sync_test wait fsync
+log_pass $desc
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_msync_continue.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_msync_continue.ksh
new file mode 100755
index 000000000000..0c79ee15a1ba
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_msync_continue.ksh
@@ -0,0 +1,36 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/failmode/failmode.kshlib
+
+typeset desc="msync() returns when pool suspends with failmode=continue"
+
+log_assert $desc
+log_onexit failmode_sync_cleanup
+log_must failmode_sync_test continue msync
+log_pass $desc
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_msync_wait.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_msync_wait.ksh
new file mode 100755
index 000000000000..a59d8cc50d61
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_msync_wait.ksh
@@ -0,0 +1,36 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/failmode/failmode.kshlib
+
+typeset desc="msync() blocks when pool suspends with failmode=wait"
+
+log_assert $desc
+log_onexit failmode_sync_cleanup
+log_must failmode_sync_test wait msync
+log_pass $desc
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_osync_continue.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_osync_continue.ksh
new file mode 100755
index 000000000000..c4fa0c8f042c
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_osync_continue.ksh
@@ -0,0 +1,36 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/failmode/failmode.kshlib
+
+typeset desc="O_SYNC returns when pool suspends with failmode=continue"
+
+log_assert $desc
+log_onexit failmode_sync_cleanup
+log_must failmode_sync_test continue osync
+log_pass $desc
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_osync_wait.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_osync_wait.ksh
new file mode 100755
index 000000000000..5f65cf92ad33
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_osync_wait.ksh
@@ -0,0 +1,37 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/failmode/failmode.kshlib
+
+typeset desc="O_SYNC blocks when pool suspends with failmode=wait"
+
+log_assert $desc
+log_onexit failmode_sync_cleanup
+log_must failmode_sync_test wait osync
+log_pass $desc
+
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_syncalways_continue.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_syncalways_continue.ksh
new file mode 100755
index 000000000000..b80d776224a0
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_syncalways_continue.ksh
@@ -0,0 +1,37 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/failmode/failmode.kshlib
+
+typeset desc="write()+sync=always returns when pool suspends with failmode=continue"
+
+log_assert $desc
+log_onexit failmode_sync_cleanup
+log_must failmode_sync_test continue syncalways
+log_pass $desc
+
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_syncalways_wait.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_syncalways_wait.ksh
new file mode 100755
index 000000000000..4fcb167b5c77
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/failmode/failmode_syncalways_wait.ksh
@@ -0,0 +1,37 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/failmode/failmode.kshlib
+
+typeset desc="write()+sync=always blocks when pool suspends with failmode=wait"
+
+log_assert $desc
+log_onexit failmode_sync_cleanup
+log_must failmode_sync_test wait syncalways
+log_pass $desc
+
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_001_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_001_pos.ksh
index ef49a5d50f6c..45848bec1f5a 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_001_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_001_pos.ksh
@@ -86,7 +86,7 @@ log_must zpool set autoreplace=on $TESTPOOL
# Add some data to the pool
log_must zfs create $TESTPOOL/fs
-log_must fill_fs /$TESTPOOL/fs 4 100 4096 512 Z
+log_must fill_fs /$TESTPOOL/fs 4 100 4096 512 R
log_must zpool export $TESTPOOL
# Record the partition UUID for later comparison
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_002_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_002_pos.ksh
index a77957f32255..878b4e450340 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_002_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_replace_002_pos.ksh
@@ -119,7 +119,7 @@ log_must zpool set autoreplace=on $TESTPOOL
# Add some data to the pool
log_must zfs create $TESTPOOL/fs
-log_must fill_fs /$TESTPOOL/fs 4 100 4096 512 Z
+log_must fill_fs /$TESTPOOL/fs 4 100 4096 512 R
log_must zpool export $TESTPOOL
# Record the partition UUID for later comparison
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/fault_limits.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/fault_limits.ksh
index 1b3310edb98b..45b041503e22 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/fault_limits.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/fault_limits.ksh
@@ -67,7 +67,7 @@ log_must zpool create -f ${TESTPOOL} raidz${PARITY} ${disks[1..$((VDEV_CNT - 1))
# Add some data to the pool
log_must zfs create $TESTPOOL/fs
MNTPOINT="$(get_prop mountpoint $TESTPOOL/fs)"
-log_must fill_fs $MNTPOINT $PARITY 200 32768 1000 Z
+log_must fill_fs $MNTPOINT $PARITY 200 32768 100 R
sync_pool $TESTPOOL
# Replace the last child vdev to form a replacing vdev
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh
index 340994bb60c5..b5df1c7e37f8 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh
@@ -101,7 +101,7 @@ sync_pool $TESTPOOL
log_must zfs create $TESTPOOL/fs
MNTPOINT="$(get_prop mountpoint $TESTPOOL/fs)"
SECONDS=0
-log_must fill_fs $MNTPOINT 1 200 4096 10 Z
+log_must fill_fs $MNTPOINT 1 200 4096 10 R
log_note "fill_fs took $SECONDS seconds"
sync_pool $TESTPOOL
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/mount/mount_loopback.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/mount/mount_loopback.ksh
new file mode 100755
index 000000000000..86adef7ea032
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/mount/mount_loopback.ksh
@@ -0,0 +1,111 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2025 by Lawrence Livermore National Security, LLC.
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# Verify that we can make an xfs filesystem on a ZFS-backed loopback device.
+#
+# See:
+# https://github.com/openzfs/zfs/pull/17298
+# https://github.com/openzfs/zfs/issues/17277
+#
+# STRATEGY:
+# 1. Make a pool
+# 2. Make a file on the pool or create zvol
+# 3. Mount the file/zvol behind a loopback device
+# 4. Create & mount an xfs filesystem on the loopback device
+
+function cleanup
+{
+ if [ -d $TEST_BASE_DIR/mnt ] ; then
+ umount $TEST_BASE_DIR/mnt
+ log_must rmdir $TEST_BASE_DIR/mnt
+ fi
+ if [ -n "$DEV" ] ; then
+ log_must losetup -d $DEV
+ fi
+ destroy_pool $TESTPOOL2
+ log_must rm -f $TEST_BASE_DIR/file1
+}
+
+if [ ! -x "$(which mkfs.xfs)" ] ; then
+ log_unsupported "No mkfs.xfs binary"
+fi
+
+if [ ! -d /lib/modules/$(uname -r)/kernel/fs/xfs ] && \
+ ! grep -qE '\sxfs$' /proc/filesystems ; then
+ log_unsupported "No XFS kernel support"
+fi
+
+log_assert "Make an xfs filesystem on a ZFS-backed loopback device"
+log_onexit cleanup
+
+# fio options
+export NUMJOBS=2
+export RUNTIME=3
+export PERF_RANDSEED=1234
+export PERF_COMPPERCENT=66
+export PERF_COMPCHUNK=0
+export BLOCKSIZE=128K
+export SYNC_TYPE=0
+export FILE_SIZE=$(( 1024 * 1024 ))
+
+function do_test
+{
+ imgfile=$1
+ log_note "Running test on $imgfile"
+ log_must losetup -f $imgfile
+ DEV=$(losetup --associated $imgfile | grep -Eo '^/dev/loop[0-9]+')
+ log_must mkfs.xfs $DEV
+ mkdir $TEST_BASE_DIR/mnt
+ log_must mount $DEV $TEST_BASE_DIR/mnt
+ export DIRECTORY=$TEST_BASE_DIR/mnt
+
+ for d in 0 1 ; do
+ # fio options
+ export DIRECT=$d
+ log_must fio $FIO_SCRIPTS/mkfiles.fio
+ log_must fio $FIO_SCRIPTS/random_reads.fio
+ done
+ log_must umount $TEST_BASE_DIR/mnt
+ log_must rmdir $TEST_BASE_DIR/mnt
+ log_must losetup -d $DEV
+ DEV=""
+}
+
+log_must truncate -s 1G $TEST_BASE_DIR/file1
+log_must zpool create $TESTPOOL2 $TEST_BASE_DIR/file1
+log_must truncate -s 512M /$TESTPOOL2/img
+do_test /$TESTPOOL2/img
+log_must rm /$TESTPOOL2/img
+log_must zfs create -V 512M $TESTPOOL2/vol
+
+blkdev="$ZVOL_DEVDIR/$TESTPOOL2/vol"
+block_device_wait $blkdev
+do_test $blkdev
+
+log_pass "Verified xfs filesystem on a ZFS-backed loopback device"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh
index 3249bd93d5ce..a1da4a8631e1 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh
@@ -17,6 +17,7 @@
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/refreserv/refreserv.cfg
+. $STF_SUITE/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
#
# DESCRIPTION:
@@ -24,7 +25,7 @@
#
# STRATEGY:
# 1. Create a pool with a single raidz vdev
-# 2. For each block size [512b, 1k, 128k] or [4k, 8k, 128k]
+# 2. For each block size [4k, 8k, 128k]
# - create a volume
# - fully overwrite it
# - verify that referenced is less than or equal to reservation
@@ -38,6 +39,7 @@
# 1. This test will use up to 14 disks but can cover the key concepts with
# 5 disks.
# 2. If the disks are a mixture of 4Kn and 512n/512e, failures are likely.
+# Therefore, when creating the pool we specify 4Kn sectors.
#
verify_runnable "global"
@@ -60,29 +62,10 @@ log_onexit cleanup
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
-# Testing tiny block sizes on ashift=12 pools causes so much size inflation
-# that small test disks may fill before creating small volumes. However,
-# testing 512b and 1K blocks on ashift=9 pools is an ok approximation for
-# testing the problems that arise from 4K and 8K blocks on ashift=12 pools.
-if is_freebsd; then
- bps=$(diskinfo -v ${alldisks[0]} | awk '/sectorsize/ { print $1 }')
-elif is_linux; then
- bps=$(lsblk -nrdo min-io /dev/${alldisks[0]})
-fi
-log_must test "$bps" -eq 512 -o "$bps" -eq 4096
-case "$bps" in
-512)
- allshifts=(9 10 17)
- maxpct=151
- ;;
-4096)
- allshifts=(12 13 17)
- maxpct=110
- ;;
-*)
- log_fail "bytes/sector: $bps != (512|4096)"
- ;;
-esac
+ashift=12
+allshifts=(12 13 17)
+maxpct=110
+
log_note "Testing in ashift=${allshifts[0]} mode"
# This loop handles all iterations of steps 1 through 4 described in strategy
@@ -99,18 +82,21 @@ for parity in 1 2 3; do
continue
fi
- log_must zpool create -O compression=off "$TESTPOOL" "$raid" "${disks[@]}"
+ log_must zpool create -o ashift=$ashift "$TESTPOOL" "$raid" "${disks[@]}"
for bits in "${allshifts[@]}"; do
vbs=$((1 << bits))
log_note "Testing $raid-$ndisks volblocksize=$vbs"
- vol=$TESTPOOL/$TESTVOL
+ vol=$TESTPOOL/$TESTVOL-$vbs
+ zdev=$ZVOL_DEVDIR/$vol
log_must zfs create -V ${volsize}m \
-o volblocksize=$vbs "$vol"
- block_device_wait "/dev/zvol/$vol"
- log_must dd if=/dev/zero of=/dev/zvol/$vol \
- bs=1024k count=$volsize
+ block_device_wait $zdev
+ blockdev_exists $zdev
+
+ log_must timeout 120 dd if=/dev/urandom of=$zdev \
+ bs=1024k count=$volsize status=progress
sync_pool $TESTPOOL
ref=$(zfs get -Hpo value referenced "$vol")
@@ -126,7 +112,7 @@ for parity in 1 2 3; do
log_must test "$deltapct" -le $maxpct
log_must_busy zfs destroy "$vol"
- block_device_wait
+ blockdev_missing $zdev
done
log_must_busy zpool destroy "$TESTPOOL"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/attach_resilver_sit_out.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/attach_resilver_sit_out.ksh
new file mode 100755
index 000000000000..6820aba184b7
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/attach_resilver_sit_out.ksh
@@ -0,0 +1,189 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/replacement/replacement.cfg
+
+#
+# DESCRIPTION:
+# Attaching disks while a disk is sitting out reads should pass
+#
+# STRATEGY:
+# 1. Create raidz pools
+# 2. Make one disk slower and trigger a read sit out for that disk
+# 3. Start some random I/O
+# 4. Attach a disk to the pool.
+# 5. Verify the integrity of the file system and the resilvering.
+
+verify_runnable "global"
+
+save_tunable READ_SIT_OUT_SECS
+set_tunable32 READ_SIT_OUT_SECS 120
+save_tunable SIT_OUT_CHECK_INTERVAL
+set_tunable64 SIT_OUT_CHECK_INTERVAL 20
+
+function cleanup
+{
+ restore_tunable READ_SIT_OUT_SECS
+ restore_tunable SIT_OUT_CHECK_INTERVAL
+ log_must zinject -c all
+ log_must zpool events -c
+
+ if [[ -n "$child_pids" ]]; then
+ for wait_pid in $child_pids; do
+ kill $wait_pid
+ done
+ fi
+
+ if poolexists $TESTPOOL1; then
+ destroy_pool $TESTPOOL1
+ fi
+
+ [[ -e $TESTDIR ]] && log_must rm -rf $TESTDIR/*
+}
+
+log_assert "Replacing a disk during I/O with a sit out completes."
+
+options=""
+options_display="default options"
+
+log_onexit cleanup
+
+[[ -n "$HOLES_FILESIZE" ]] && options=" $options -f $HOLES_FILESIZE "
+
+[[ -n "$HOLES_BLKSIZE" ]] && options="$options -b $HOLES_BLKSIZE "
+
+[[ -n "$HOLES_COUNT" ]] && options="$options -c $HOLES_COUNT "
+
+[[ -n "$HOLES_SEED" ]] && options="$options -s $HOLES_SEED "
+
+[[ -n "$HOLES_FILEOFFSET" ]] && options="$options -o $HOLES_FILEOFFSET "
+
+options="$options -r "
+
+[[ -n "$options" ]] && options_display=$options
+
+child_pids=""
+
+function attach_test
+{
+ typeset vdev=$1
+ typeset disk=$2
+
+ typeset i=0
+ while [[ $i -lt $iters ]]; do
+ log_note "Invoking file_trunc with: $options_display on $TESTFILE.$i"
+ file_trunc $options $TESTDIR/$TESTFILE.$i &
+ typeset pid=$!
+
+ sleep 1
+
+ child_pids="$child_pids $pid"
+ ((i = i + 1))
+ done
+
+ # attach disk with a slow drive still present
+ SECONDS=0
+ log_must zpool attach -w $TESTPOOL1 $vdev $disk
+ log_note took $SECONDS seconds to attach disk
+
+ for wait_pid in $child_pids
+ do
+ kill $wait_pid
+ done
+ child_pids=""
+
+ log_must zinject -c all
+ log_must zpool export $TESTPOOL1
+ log_must zpool import -d $TESTDIR $TESTPOOL1
+ log_must zfs umount $TESTPOOL1/$TESTFS1
+ log_must zdb -cdui $TESTPOOL1/$TESTFS1
+ log_must zfs mount $TESTPOOL1/$TESTFS1
+ verify_pool $TESTPOOL1
+}
+
+DEVSIZE="150M"
+specials_list=""
+i=0
+while [[ $i != 10 ]]; do
+ truncate -s $DEVSIZE $TESTDIR/$TESTFILE1.$i
+ specials_list="$specials_list $TESTDIR/$TESTFILE1.$i"
+
+ ((i = i + 1))
+done
+
+slow_disk=$TESTDIR/$TESTFILE1.3
+log_must truncate -s $DEVSIZE $TESTDIR/$REPLACEFILE
+
+# Test file size in MB
+count=200
+
+for type in "raidz1" "raidz2" "raidz3" ; do
+ create_pool $TESTPOOL1 $type $specials_list
+ log_must zpool set autosit=on $TESTPOOL1 "${type}-0"
+ log_must zfs create -o primarycache=none -o recordsize=512K \
+ $TESTPOOL1/$TESTFS1
+ log_must zfs set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1
+
+ log_must dd if=/dev/urandom of=/$TESTDIR1/bigfile bs=1M count=$count
+
+ # Make one disk 100ms slower to trigger a sit out
+ log_must zinject -d $slow_disk -D100:1 -T read $TESTPOOL1
+
+ # Do some reads and wait for sit out on slow disk
+ SECONDS=0
+ typeset -i size=0
+ for i in $(seq 1 $count) ; do
+ dd if=/$TESTDIR1/bigfile skip=$i bs=1M count=1 of=/dev/null
+ size=$i
+
+ sit_out=$(get_vdev_prop sit_out $TESTPOOL1 $slow_disk)
+ if [[ "$sit_out" == "on" ]] ; then
+ break
+ fi
+ done
+
+ log_must test "$(get_vdev_prop sit_out $TESTPOOL1 $slow_disk)" == "on"
+ log_note took $SECONDS seconds to reach sit out reading ${size}M
+ log_must zpool status -s $TESTPOOL1
+
+ typeset top=$(zpool status -j | jq -r ".pools.$TESTPOOL1.vdevs[].vdevs[].name")
+ attach_test $top $TESTDIR/$REPLACEFILE
+
+ log_must eval "zpool iostat -v $TESTPOOL1 | grep \"$REPLACEFILE\""
+
+ destroy_pool $TESTPOOL1
+ log_must rm -rf /$TESTPOOL1
+done
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/replace_resilver_sit_out.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/replace_resilver_sit_out.ksh
new file mode 100755
index 000000000000..4109dbaf45ac
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/replace_resilver_sit_out.ksh
@@ -0,0 +1,199 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/replacement/replacement.cfg
+
+#
+# DESCRIPTION:
+# Replacing disks while a disk is sitting out reads should pass
+#
+# STRATEGY:
+# 1. Create raidz and draid pools
+# 2. Make one disk slower and trigger a read sit out for that disk
+# 3. Start some random I/O
+# 4. Replace a disk in the pool with another disk.
+# 5. Verify the integrity of the file system and the resilvering.
+#
+
+verify_runnable "global"
+
+save_tunable READ_SIT_OUT_SECS
+set_tunable32 READ_SIT_OUT_SECS 120
+save_tunable SIT_OUT_CHECK_INTERVAL
+set_tunable64 SIT_OUT_CHECK_INTERVAL 20
+
+function cleanup
+{
+ restore_tunable READ_SIT_OUT_SECS
+ restore_tunable SIT_OUT_CHECK_INTERVAL
+ log_must zinject -c all
+ log_must zpool events -c
+
+ if [[ -n "$child_pids" ]]; then
+ for wait_pid in $child_pids
+ do
+ kill $wait_pid
+ done
+ fi
+
+ if poolexists $TESTPOOL1; then
+ destroy_pool $TESTPOOL1
+ fi
+
+ [[ -e $TESTDIR ]] && log_must rm -rf $TESTDIR/*
+}
+
+log_assert "Replacing a disk during I/O with a sit out completes."
+
+options=""
+options_display="default options"
+
+log_onexit cleanup
+
+[[ -n "$HOLES_FILESIZE" ]] && options=" $options -f $HOLES_FILESIZE "
+
+[[ -n "$HOLES_BLKSIZE" ]] && options="$options -b $HOLES_BLKSIZE "
+
+[[ -n "$HOLES_COUNT" ]] && options="$options -c $HOLES_COUNT "
+
+[[ -n "$HOLES_SEED" ]] && options="$options -s $HOLES_SEED "
+
+[[ -n "$HOLES_FILEOFFSET" ]] && options="$options -o $HOLES_FILEOFFSET "
+
+options="$options -r "
+
+[[ -n "$options" ]] && options_display=$options
+
+child_pids=""
+
+function replace_test
+{
+ typeset -i iters=2
+ typeset disk1=$1
+ typeset disk2=$2
+ typeset repl_type=$3
+
+ typeset i=0
+ while [[ $i -lt $iters ]]; do
+ log_note "Invoking file_trunc with: $options_display on $TESTFILE.$i"
+ file_trunc $options $TESTDIR/$TESTFILE.$i &
+ typeset pid=$!
+
+ sleep 1
+
+ child_pids="$child_pids $pid"
+ ((i = i + 1))
+ done
+
+ typeset repl_flag="-w"
+ if [[ "$repl_type" == "seq" ]]; then
+ repl_flag="-ws"
+ fi
+ # replace disk with a slow drive still present
+ SECONDS=0
+ log_must zpool replace $repl_flag $TESTPOOL1 $disk1 $disk2
+ log_note took $SECONDS seconds to replace disk
+
+ for wait_pid in $child_pids
+ do
+ kill $wait_pid
+ done
+ child_pids=""
+
+ log_must zinject -c all
+ log_must zpool export $TESTPOOL1
+ log_must zpool import -d $TESTDIR $TESTPOOL1
+ log_must zfs umount $TESTPOOL1/$TESTFS1
+ log_must zdb -cdui $TESTPOOL1/$TESTFS1
+ log_must zfs mount $TESTPOOL1/$TESTFS1
+ verify_pool $TESTPOOL1
+}
+
+DEVSIZE="150M"
+specials_list=""
+i=0
+while [[ $i != 10 ]]; do
+ log_must truncate -s $DEVSIZE $TESTDIR/$TESTFILE1.$i
+ specials_list="$specials_list $TESTDIR/$TESTFILE1.$i"
+
+ ((i = i + 1))
+done
+
+slow_disk=$TESTDIR/$TESTFILE1.3
+log_must truncate -s $DEVSIZE $TESTDIR/$REPLACEFILE
+
+# Test file size in MB
+count=400
+
+for type in "raidz2" "raidz3" "draid2"; do
+ create_pool $TESTPOOL1 $type $specials_list
+ log_must zpool set autosit=on $TESTPOOL1 "${type}-0"
+ log_must zfs create -o primarycache=none -o recordsize=512K \
+ $TESTPOOL1/$TESTFS1
+ log_must zfs set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1
+
+ log_must dd if=/dev/urandom of=/$TESTDIR1/bigfile bs=1M count=$count
+
+ # Make one disk 100ms slower to trigger a sit out
+ log_must zinject -d $slow_disk -D100:1 -T read $TESTPOOL1
+
+ # Do some reads and wait for sit out on slow disk
+ SECONDS=0
+ typeset -i size=0
+ for i in $(seq 1 $count) ; do
+ dd if=/$TESTDIR1/bigfile skip=$i bs=1M count=1 of=/dev/null
+ size=$i
+
+ sit_out=$(get_vdev_prop sit_out $TESTPOOL1 $slow_disk)
+ if [[ "$sit_out" == "on" ]] ; then
+ break
+ fi
+ done
+ log_must test "$(get_vdev_prop sit_out $TESTPOOL1 $slow_disk)" == "on"
+ log_note took $SECONDS seconds to reach sit out reading ${size}M
+ log_must zpool status -s $TESTPOOL1
+
+ typeset repl_type="replace"
+ if [[ "$type" == "draid2" && $((RANDOM % 2)) -eq 0 ]]; then
+ repl_type="seq"
+ fi
+ replace_test $TESTDIR/$TESTFILE1.1 $TESTDIR/$REPLACEFILE $repl_type
+
+ log_must eval "zpool iostat -v $TESTPOOL1 | grep \"$REPLACEFILE\""
+
+ destroy_pool $TESTPOOL1
+ log_must rm -rf /$TESTPOOL1
+done
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh
index 26153aafbc02..0e79e9b8b70c 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/setup.ksh
@@ -39,6 +39,6 @@
verify_runnable "global"
# create a pool without any features
-log_must mkfile 128m $TMPDEV
+log_must truncate -s $MINVDEVSIZE $TMPDEV
log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh
index d6bd69b7e134..e81d07794689 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/upgrade/upgrade_readonly_pool.ksh
@@ -35,17 +35,19 @@
verify_runnable "global"
-TESTFILE="$TESTDIR/file.bin"
-
log_assert "User accounting upgrade should not be executed on readonly pool"
log_onexit cleanup_upgrade
# 1. Create a pool with the feature@userobj_accounting disabled to simulate
# a legacy pool from a previous ZFS version.
-log_must zpool create -d -m $TESTDIR $TESTPOOL $TMPDEV
+log_must zpool create -d $TESTPOOL $TMPDEV
+log_must zfs create $TESTPOOL/$TESTFS
+
+MNTPNT=$(get_prop mountpoint $TESTPOOL/$TESTFS)
+TESTFILE="$MNTPNT/file.bin"
# 2. Create a file on the "legecy" dataset
-log_must touch $TESTDIR/file.bin
+log_must touch $TESTFILE
# 3. Enable feature@userobj_accounting on the pool and verify it is only
# "enabled" and not "active": upgrading starts when the filesystem is mounted
@@ -54,12 +56,12 @@ log_must test "enabled" == "$(get_pool_prop 'feature@userobj_accounting' $TESTPO
# 4. Export the pool and re-import is readonly, without mounting any filesystem
log_must zpool export $TESTPOOL
-log_must zpool import -o readonly=on -N -d "$(dirname $TMPDEV)" $TESTPOOL
+log_must zpool import -o readonly=on -N -d $TEST_BASE_DIR $TESTPOOL
# 5. Try to mount the root dataset manually without the "ro" option, then verify
# filesystem status and the pool feature status (not "active") to ensure the
# pool "readonly" status is enforced.
-log_must mount -t zfs -o zfsutil $TESTPOOL $TESTDIR
+log_must zfs mount -R $TESTPOOL
log_must stat "$TESTFILE"
log_mustnot touch "$TESTFILE"
log_must test "enabled" == "$(get_pool_prop 'feature@userobj_accounting' $TESTPOOL)"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_014_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_014_pos.ksh
new file mode 100755
index 000000000000..d4c9a0a41816
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_014_pos.ksh
@@ -0,0 +1,53 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025 by Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/xattr/xattr_common.kshlib
+
+#
+# DESCRIPTION:
+# The default xattr should be shown as 'sa', not 'on', for clarity.
+#
+# STRATEGY:
+# 1. Create a filesystem.
+# 2. Verify that the xattra is shown as 'sa'.
+# 3. Manually set the value to 'dir', 'sa', 'on', and 'off'.
+# 4. Verify that it is shown as 'dir', 'sa', 'sa', and 'off.
+#
+
+log_assert "The default and specific xattr values are displayed correctly."
+
+set -A args "dir" "sa" "on" "off"
+set -A display "dir" "sa" "sa" "off"
+
+log_must eval "[[ 'sa' == '$(zfs get -Hpo value xattr $TESTPOOL)' ]]"
+
+for i in `seq 0 3`; do
+ log_must zfs set xattr="${args[$i]}" $TESTPOOL
+ log_must eval "[[ '${display[$i]}' == '$(zfs get -Hpo value xattr $TESTPOOL)' ]]"
+done
+log_pass "The default and specific xattr values are displayed correctly."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
index 571a698eb63a..502ebada22dc 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
@@ -50,17 +50,53 @@ fi
typeset datafile1="$(mktemp -t zvol_misc_fua1.XXXXXX)"
typeset datafile2="$(mktemp -t zvol_misc_fua2.XXXXXX)"
+typeset datafile3="$(mktemp -t zvol_misc_fua3_log.XXXXXX)"
typeset zvolpath=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
+typeset DISK1=${DISKS%% *}
function cleanup
{
- rm "$datafile1" "$datafile2"
+ log_must zpool remove $TESTPOOL $datafile3
+ rm "$datafile1" "$datafile2" "$datafile2"
+}
+
+# Prints the total number of sync writes for a vdev
+# $1: vdev
+function get_sync
+{
+ zpool iostat -p -H -v -r $TESTPOOL $1 | \
+ awk '/[0-9]+$/{s+=$4+$5} END{print s}'
}
function do_test {
# Wait for udev to create symlinks to our zvol
block_device_wait $zvolpath
+ # Write using sync (creates FLUSH calls after writes, but not FUA)
+ old_vdev_writes=$(get_sync $DISK1)
+ old_log_writes=$(get_sync $datafile3)
+
+ log_must fio --name=write_iops --size=5M \
+ --ioengine=libaio --verify=0 --bs=4K \
+ --iodepth=1 --rw=randwrite --group_reporting=1 \
+ --filename=$zvolpath --sync=1
+
+ vdev_writes=$(( $(get_sync $DISK1) - $old_vdev_writes))
+ log_writes=$(( $(get_sync $datafile3) - $old_log_writes))
+
+ # When we're doing sync writes, we should see many more writes go to
+ # the log vs the first vdev. Experiments show anywhere from a 160-320x
+ # ratio of writes to the log vs the first vdev (due to some straggler
+ # writes to the first vdev).
+ #
+ # Check that we have a large ratio (100x) of sync writes going to the
+ # log device
+ ratio=$(($log_writes / $vdev_writes))
+ log_note "Got $log_writes log writes, $vdev_writes vdev writes."
+ if [ $ratio -lt 100 ] ; then
+ log_fail "Expected > 100x more log writes than vdev writes. "
+ fi
+
# Create a data file
log_must dd if=/dev/urandom of="$datafile1" bs=1M count=5
@@ -81,6 +117,8 @@ log_assert "Verify that a ZFS volume can do Force Unit Access (FUA)"
log_onexit cleanup
log_must zfs set compression=off $TESTPOOL/$TESTVOL
+log_must truncate -s 100M $datafile3
+log_must zpool add $TESTPOOL log $datafile3
log_note "Testing without blk-mq"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh
new file mode 100755
index 000000000000..669b59fac01f
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh
@@ -0,0 +1,66 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+verify_runnable "global"
+
+typeset -i nzvols=1000
+typeset -i parallel=$(( $(get_num_cpus) * 2 ))
+
+function cleanup {
+ for zvol in $(zfs list -Ho name -t vol) ; do
+ log_must_busy zfs destroy $zvol
+ done
+}
+
+log_onexit cleanup
+
+log_assert "stress test concurrent zvol create/destroy"
+
+function destroy_zvols_until {
+ typeset cond=$1
+ while true ; do
+ IFS='' zfs list -Ho name -t vol | read -r -d '' zvols
+ if [[ -n $zvols ]] ; then
+ echo $zvols | xargs -n 1 -P $parallel zfs destroy
+ fi
+ if ! $cond ; then
+ break
+ fi
+ done
+}
+
+( seq $nzvols | \
+ xargs -P $parallel -I % zfs create -s -V 1G $TESTPOOL/testvol% ) &
+cpid=$!
+sleep 1
+
+destroy_zvols_until "kill -0 $cpid"
+destroy_zvols_until "false"
+
+log_pass "stress test done"
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
index ab8981e25cb2..0150ce72f0a4 100644
--- a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
@@ -464,7 +464,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
(type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages);
if (actual_pages != num_pages) {
- vm_page_unhold_pages(pages, actual_pages);
+ if (actual_pages > 0)
+ vm_page_unhold_pages(pages, actual_pages);
free(pagelist, M_VCPAGELIST);
return (-ENOMEM);
}
diff --git a/sys/crypto/ccp/ccp.c b/sys/crypto/ccp/ccp.c
index 7db9a27ab059..c3d40f6e99ac 100644
--- a/sys/crypto/ccp/ccp.c
+++ b/sys/crypto/ccp/ccp.c
@@ -79,7 +79,7 @@ static struct pciid {
{ 0x15df1022, "AMD CCP-5a" },
};
-static struct random_source random_ccp = {
+static const struct random_source random_ccp = {
.rs_ident = "AMD CCP TRNG",
.rs_source = RANDOM_PURE_CCP,
.rs_read = random_ccp_read,
diff --git a/sys/crypto/chacha20/chacha.c b/sys/crypto/chacha20/chacha.c
index 52f7e18c651c..cb06003b0ecf 100644
--- a/sys/crypto/chacha20/chacha.c
+++ b/sys/crypto/chacha20/chacha.c
@@ -138,7 +138,7 @@ chacha_encrypt_bytes(chacha_ctx *x,const u8 *m,u8 *c,u32 bytes)
for (;;) {
if (bytes < 64) {
#ifndef KEYSTREAM_ONLY
- for (i = 0;i < bytes;++i) tmp[i] = m[i];
+ for (i = 0; i < bytes; ++i) tmp[i] = m[i];
m = tmp;
#endif
ctarget = c;
@@ -160,7 +160,7 @@ chacha_encrypt_bytes(chacha_ctx *x,const u8 *m,u8 *c,u32 bytes)
x13 = j13;
x14 = j14;
x15 = j15;
- for (i = 20;i > 0;i -= 2) {
+ for (i = 20; i > 0; i -= 2) {
QUARTERROUND( x0, x4, x8,x12)
QUARTERROUND( x1, x5, x9,x13)
QUARTERROUND( x2, x6,x10,x14)
@@ -240,7 +240,7 @@ chacha_encrypt_bytes(chacha_ctx *x,const u8 *m,u8 *c,u32 bytes)
if (bytes <= 64) {
if (bytes < 64) {
- for (i = 0;i < bytes;++i) ctarget[i] = c[i];
+ for (i = 0; i < bytes; ++i) ctarget[i] = c[i];
}
x->input[12] = j12;
x->input[13] = j13;
diff --git a/sys/crypto/openssl/amd64/ossl_aes_gcm_avx512.c b/sys/crypto/openssl/amd64/ossl_aes_gcm_avx512.c
new file mode 100644
index 000000000000..694ed4fc8b32
--- /dev/null
+++ b/sys/crypto/openssl/amd64/ossl_aes_gcm_avx512.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2010-2022 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright (c) 2021, Intel Corporation. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*
+ * This file contains an AES-GCM wrapper implementation from OpenSSL, using
+ * VAES extensions. It was ported from cipher_aes_gcm_hw_vaes_avx512.inc.
+ */
+
+#include <sys/endian.h>
+#include <sys/systm.h>
+
+#include <crypto/openssl/ossl.h>
+#include <crypto/openssl/ossl_aes_gcm.h>
+#include <crypto/openssl/ossl_cipher.h>
+
+#include <opencrypto/cryptodev.h>
+
+_Static_assert(
+ sizeof(struct ossl_gcm_context) <= sizeof(struct ossl_cipher_context),
+ "ossl_gcm_context too large");
+
+void aesni_set_encrypt_key(const void *key, int bits, void *ctx);
+
+static void
+gcm_init(struct ossl_gcm_context *ctx, const void *key, size_t keylen)
+{
+ KASSERT(keylen == 128 || keylen == 192 || keylen == 256,
+ ("%s: invalid key length %zu", __func__, keylen));
+
+ memset(&ctx->gcm, 0, sizeof(ctx->gcm));
+ memset(&ctx->aes_ks, 0, sizeof(ctx->aes_ks));
+ aesni_set_encrypt_key(key, keylen, &ctx->aes_ks);
+ ctx->ops->init(ctx, key, keylen);
+}
+
+static void
+gcm_tag(struct ossl_gcm_context *ctx, unsigned char *tag, size_t len)
+{
+ (void)ctx->ops->finish(ctx, NULL, 0);
+ memcpy(tag, ctx->gcm.Xi.c, len);
+}
+
+void ossl_gcm_gmult_avx512(uint64_t Xi[2], void *gcm128ctx);
+void ossl_aes_gcm_init_avx512(const void *ks, void *gcm128ctx);
+void ossl_aes_gcm_setiv_avx512(const void *ks, void *gcm128ctx,
+ const unsigned char *iv, size_t ivlen);
+void ossl_aes_gcm_update_aad_avx512(void *gcm128ctx, const unsigned char *aad,
+ size_t len);
+void ossl_aes_gcm_encrypt_avx512(const void *ks, void *gcm128ctx,
+ unsigned int *pblocklen, const unsigned char *in, size_t len,
+ unsigned char *out);
+void ossl_aes_gcm_decrypt_avx512(const void *ks, void *gcm128ctx,
+ unsigned int *pblocklen, const unsigned char *in, size_t len,
+ unsigned char *out);
+void ossl_aes_gcm_finalize_avx512(void *gcm128ctx, unsigned int pblocklen);
+
+static void
+gcm_init_avx512(struct ossl_gcm_context *ctx, const void *key, size_t keylen)
+{
+ ossl_aes_gcm_init_avx512(&ctx->aes_ks, &ctx->gcm);
+}
+
+static void
+gcm_setiv_avx512(struct ossl_gcm_context *ctx, const unsigned char *iv,
+ size_t len)
+{
+ KASSERT(len == AES_GCM_IV_LEN,
+ ("%s: invalid IV length %zu", __func__, len));
+
+ ctx->gcm.Yi.u[0] = 0; /* Current counter */
+ ctx->gcm.Yi.u[1] = 0;
+ ctx->gcm.Xi.u[0] = 0; /* AAD hash */
+ ctx->gcm.Xi.u[1] = 0;
+ ctx->gcm.len.u[0] = 0; /* AAD length */
+ ctx->gcm.len.u[1] = 0; /* Message length */
+ ctx->gcm.ares = 0;
+ ctx->gcm.mres = 0;
+
+ ossl_aes_gcm_setiv_avx512(&ctx->aes_ks, ctx, iv, len);
+}
+
+static int
+gcm_aad_avx512(struct ossl_gcm_context *ctx, const unsigned char *aad,
+ size_t len)
+{
+ uint64_t alen = ctx->gcm.len.u[0];
+ size_t lenblks;
+ unsigned int ares;
+
+ /* Bad sequence: call of AAD update after message processing */
+ if (ctx->gcm.len.u[1])
+ return -2;
+
+ alen += len;
+ /* AAD is limited by 2^64 bits, thus 2^61 bytes */
+ if (alen > (1ull << 61) || (sizeof(len) == 8 && alen < len))
+ return -1;
+ ctx->gcm.len.u[0] = alen;
+
+ ares = ctx->gcm.ares;
+ /* Partial AAD block left from previous AAD update calls */
+ if (ares > 0) {
+ /*
+ * Fill partial block buffer till full block
+ * (note, the hash is stored reflected)
+ */
+ while (ares > 0 && len > 0) {
+ ctx->gcm.Xi.c[15 - ares] ^= *(aad++);
+ --len;
+ ares = (ares + 1) % AES_BLOCK_LEN;
+ }
+ /* Full block gathered */
+ if (ares == 0) {
+ ossl_gcm_gmult_avx512(ctx->gcm.Xi.u, ctx);
+ } else { /* no more AAD */
+ ctx->gcm.ares = ares;
+ return 0;
+ }
+ }
+
+ /* Bulk AAD processing */
+ lenblks = len & ((size_t)(-AES_BLOCK_LEN));
+ if (lenblks > 0) {
+ ossl_aes_gcm_update_aad_avx512(ctx, aad, lenblks);
+ aad += lenblks;
+ len -= lenblks;
+ }
+
+ /* Add remaining AAD to the hash (note, the hash is stored reflected) */
+ if (len > 0) {
+ ares = (unsigned int)len;
+ for (size_t i = 0; i < len; ++i)
+ ctx->gcm.Xi.c[15 - i] ^= aad[i];
+ }
+
+ ctx->gcm.ares = ares;
+
+ return 0;
+}
+
+static int
+_gcm_encrypt_avx512(struct ossl_gcm_context *ctx, const unsigned char *in,
+ unsigned char *out, size_t len, bool encrypt)
+{
+ uint64_t mlen = ctx->gcm.len.u[1];
+
+ mlen += len;
+ if (mlen > ((1ull << 36) - 32) || (sizeof(len) == 8 && mlen < len))
+ return -1;
+
+ ctx->gcm.len.u[1] = mlen;
+
+ /* Finalize GHASH(AAD) if AAD partial blocks left unprocessed */
+ if (ctx->gcm.ares > 0) {
+ ossl_gcm_gmult_avx512(ctx->gcm.Xi.u, ctx);
+ ctx->gcm.ares = 0;
+ }
+
+ if (encrypt) {
+ ossl_aes_gcm_encrypt_avx512(&ctx->aes_ks, ctx, &ctx->gcm.mres,
+ in, len, out);
+ } else {
+ ossl_aes_gcm_decrypt_avx512(&ctx->aes_ks, ctx, &ctx->gcm.mres,
+ in, len, out);
+ }
+
+ return 0;
+}
+
+static int
+gcm_encrypt_avx512(struct ossl_gcm_context *ctx, const unsigned char *in,
+ unsigned char *out, size_t len)
+{
+ return _gcm_encrypt_avx512(ctx, in, out, len, true);
+}
+
+static int
+gcm_decrypt_avx512(struct ossl_gcm_context *ctx, const unsigned char *in,
+ unsigned char *out, size_t len)
+{
+ return _gcm_encrypt_avx512(ctx, in, out, len, false);
+}
+
+static int
+gcm_finish_avx512(struct ossl_gcm_context *ctx, const unsigned char *tag,
+ size_t len)
+{
+ unsigned int *res = &ctx->gcm.mres;
+
+ /* Finalize AAD processing */
+ if (ctx->gcm.ares > 0)
+ res = &ctx->gcm.ares;
+
+ ossl_aes_gcm_finalize_avx512(ctx, *res);
+
+ ctx->gcm.ares = ctx->gcm.mres = 0;
+
+ if (tag != NULL)
+ return timingsafe_bcmp(ctx->gcm.Xi.c, tag, len);
+ return 0;
+}
+
+static const struct ossl_aes_gcm_ops gcm_ops_avx512 = {
+ .init = gcm_init_avx512,
+ .setiv = gcm_setiv_avx512,
+ .aad = gcm_aad_avx512,
+ .encrypt = gcm_encrypt_avx512,
+ .decrypt = gcm_decrypt_avx512,
+ .finish = gcm_finish_avx512,
+ .tag = gcm_tag,
+};
+
+int ossl_aes_gcm_setkey_avx512(const unsigned char *key, int klen, void *_ctx);
+
+int
+ossl_aes_gcm_setkey_avx512(const unsigned char *key, int klen,
+ void *_ctx)
+{
+ struct ossl_gcm_context *ctx;
+
+ ctx = _ctx;
+ ctx->ops = &gcm_ops_avx512;
+ gcm_init(ctx, key, klen);
+ return (0);
+}
diff --git a/sys/crypto/openssl/arm/ossl_aes_gcm.c b/sys/crypto/openssl/arm/ossl_aes_gcm_neon.c
index e51b7b4fbc04..e51b7b4fbc04 100644
--- a/sys/crypto/openssl/arm/ossl_aes_gcm.c
+++ b/sys/crypto/openssl/arm/ossl_aes_gcm_neon.c
diff --git a/sys/crypto/openssl/amd64/ossl_aes_gcm.c b/sys/crypto/openssl/ossl_aes_gcm.c
index d08b2ac8a759..e1cdc710c3aa 100644
--- a/sys/crypto/openssl/amd64/ossl_aes_gcm.c
+++ b/sys/crypto/openssl/ossl_aes_gcm.c
@@ -1,6 +1,7 @@
/*
* Copyright 2010-2022 The OpenSSL Project Authors. All Rights Reserved.
* Copyright (c) 2021, Intel Corporation. All Rights Reserved.
+ * Copyright (c) 2023, Raptor Engineering, LLC. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
@@ -9,11 +10,10 @@
*/
/*
- * This file contains 2 AES-GCM wrapper implementations from OpenSSL, using
- * AES-NI and VAES extensions respectively. These were ported from
- * cipher_aes_gcm_hw_aesni.inc and cipher_aes_gcm_hw_vaes_avx512.inc. The
- * AES-NI implementation makes use of a generic C implementation for partial
- * blocks, ported from gcm128.c with OPENSSL_SMALL_FOOTPRINT defined.
+ * This file contains an AES-GCM wrapper implementation from OpenSSL, using
+ * AES-NI (x86) or POWER8 Crypto Extensions (ppc). It was ported from
+ * cipher_aes_gcm_hw_aesni.inc and it makes use of a generic C implementation
+ * for partial blocks, ported from gcm128.c with OPENSSL_SMALL_FOOTPRINT defined.
*/
#include <sys/endian.h>
@@ -29,225 +29,152 @@ _Static_assert(
sizeof(struct ossl_gcm_context) <= sizeof(struct ossl_cipher_context),
"ossl_gcm_context too large");
-void aesni_set_encrypt_key(const void *key, int bits, void *ctx);
-
-static void
-gcm_init(struct ossl_gcm_context *ctx, const void *key, size_t keylen)
-{
- KASSERT(keylen == 128 || keylen == 192 || keylen == 256,
- ("%s: invalid key length %zu", __func__, keylen));
-
- memset(&ctx->gcm, 0, sizeof(ctx->gcm));
- memset(&ctx->aes_ks, 0, sizeof(ctx->aes_ks));
- aesni_set_encrypt_key(key, keylen, &ctx->aes_ks);
- ctx->ops->init(ctx, key, keylen);
-}
-
-static void
-gcm_tag(struct ossl_gcm_context *ctx, unsigned char *tag, size_t len)
-{
- (void)ctx->ops->finish(ctx, NULL, 0);
- memcpy(tag, ctx->gcm.Xi.c, len);
-}
+#if defined(__amd64__) || defined(__i386__)
+#define AES_set_encrypt_key aesni_set_encrypt_key
+#define AES_gcm_encrypt aesni_gcm_encrypt
+#define AES_gcm_decrypt aesni_gcm_decrypt
+#define AES_encrypt aesni_encrypt
+#define AES_ctr32_encrypt_blocks aesni_ctr32_encrypt_blocks
+#define GCM_init gcm_init_avx
+#define GCM_gmult gcm_gmult_avx
+#define GCM_ghash gcm_ghash_avx
+
+void AES_set_encrypt_key(const void *key, int bits, void *ctx);
+size_t AES_gcm_encrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], uint64_t *Xi);
+size_t AES_gcm_decrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], uint64_t *Xi);
+void AES_encrypt(const unsigned char *in, unsigned char *out, void *ks);
+void AES_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
+ size_t blocks, void *ks, const unsigned char *iv);
-void ossl_gcm_gmult_avx512(uint64_t Xi[2], void *gcm128ctx);
-void ossl_aes_gcm_init_avx512(const void *ks, void *gcm128ctx);
-void ossl_aes_gcm_setiv_avx512(const void *ks, void *gcm128ctx,
- const unsigned char *iv, size_t ivlen);
-void ossl_aes_gcm_update_aad_avx512(void *gcm128ctx, const unsigned char *aad,
+void GCM_init(__uint128_t Htable[16], uint64_t Xi[2]);
+void GCM_gmult(uint64_t Xi[2], const __uint128_t Htable[16]);
+void GCM_ghash(uint64_t Xi[2], const __uint128_t Htable[16], const void *in,
size_t len);
-void ossl_aes_gcm_encrypt_avx512(const void *ks, void *gcm128ctx,
- unsigned int *pblocklen, const unsigned char *in, size_t len,
- unsigned char *out);
-void ossl_aes_gcm_decrypt_avx512(const void *ks, void *gcm128ctx,
- unsigned int *pblocklen, const unsigned char *in, size_t len,
- unsigned char *out);
-void ossl_aes_gcm_finalize_avx512(void *gcm128ctx, unsigned int pblocklen);
-
-static void
-gcm_init_avx512(struct ossl_gcm_context *ctx, const void *key, size_t keylen)
-{
- ossl_aes_gcm_init_avx512(&ctx->aes_ks, &ctx->gcm);
-}
-static void
-gcm_setiv_avx512(struct ossl_gcm_context *ctx, const unsigned char *iv,
- size_t len)
-{
- KASSERT(len == AES_GCM_IV_LEN,
- ("%s: invalid IV length %zu", __func__, len));
+#elif defined(__powerpc64__)
+#define AES_set_encrypt_key aes_p8_set_encrypt_key
+#define AES_gcm_encrypt(i,o,l,k,v,x) ppc_aes_gcm_crypt(i,o,l,k,v,x,1)
+#define AES_gcm_decrypt(i,o,l,k,v,x) ppc_aes_gcm_crypt(i,o,l,k,v,x,0)
+#define AES_encrypt aes_p8_encrypt
+#define AES_ctr32_encrypt_blocks aes_p8_ctr32_encrypt_blocks
+#define GCM_init gcm_init_p8
+#define GCM_gmult gcm_gmult_p8
+#define GCM_ghash gcm_ghash_p8
+
+size_t ppc_aes_gcm_encrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], uint64_t *Xi);
+size_t ppc_aes_gcm_decrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], uint64_t *Xi);
- ctx->gcm.Yi.u[0] = 0; /* Current counter */
- ctx->gcm.Yi.u[1] = 0;
- ctx->gcm.Xi.u[0] = 0; /* AAD hash */
- ctx->gcm.Xi.u[1] = 0;
- ctx->gcm.len.u[0] = 0; /* AAD length */
- ctx->gcm.len.u[1] = 0; /* Message length */
- ctx->gcm.ares = 0;
- ctx->gcm.mres = 0;
+void AES_set_encrypt_key(const void *key, int bits, void *ctx);
+void AES_encrypt(const unsigned char *in, unsigned char *out, void *ks);
+void AES_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
+ size_t blocks, void *ks, const unsigned char *iv);
- ossl_aes_gcm_setiv_avx512(&ctx->aes_ks, ctx, iv, len);
-}
+void GCM_init(__uint128_t Htable[16], uint64_t Xi[2]);
+void GCM_gmult(uint64_t Xi[2], const __uint128_t Htable[16]);
+void GCM_ghash(uint64_t Xi[2], const __uint128_t Htable[16], const void *in,
+ size_t len);
-static int
-gcm_aad_avx512(struct ossl_gcm_context *ctx, const unsigned char *aad,
- size_t len)
+static size_t
+ppc_aes_gcm_crypt(const unsigned char *in, unsigned char *out,
+ size_t len, const void *key, unsigned char ivec_[16], uint64_t *Xi,
+ int encrypt)
{
- uint64_t alen = ctx->gcm.len.u[0];
- size_t lenblks;
- unsigned int ares;
-
- /* Bad sequence: call of AAD update after message processing */
- if (ctx->gcm.len.u[1])
- return -2;
-
- alen += len;
- /* AAD is limited by 2^64 bits, thus 2^61 bytes */
- if (alen > (1ull << 61) || (sizeof(len) == 8 && alen < len))
- return -1;
- ctx->gcm.len.u[0] = alen;
+ union {
+ uint32_t d[4];
+ uint8_t c[16];
+ } *ivec = (void *)ivec_;
+ int s = 0;
+ int ndone = 0;
+ int ctr_reset = 0;
+ uint32_t ivec_val;
+ uint64_t blocks_unused;
+ uint64_t nb = len / 16;
+ uint64_t next_ctr = 0;
+ unsigned char ctr_saved[12];
+
+ memcpy(ctr_saved, ivec, 12);
+
+ while (nb) {
+ ivec_val = ivec->d[3];
+#if BYTE_ORDER == LITTLE_ENDIAN
+ ivec_val = bswap32(ivec_val);
+#endif
- ares = ctx->gcm.ares;
- /* Partial AAD block left from previous AAD update calls */
- if (ares > 0) {
- /*
- * Fill partial block buffer till full block
- * (note, the hash is stored reflected)
- */
- while (ares > 0 && len > 0) {
- ctx->gcm.Xi.c[15 - ares] ^= *(aad++);
- --len;
- ares = (ares + 1) % AES_BLOCK_LEN;
- }
- /* Full block gathered */
- if (ares == 0) {
- ossl_gcm_gmult_avx512(ctx->gcm.Xi.u, ctx);
- } else { /* no more AAD */
- ctx->gcm.ares = ares;
- return 0;
+ blocks_unused = (uint64_t)0xffffffffU + 1 - (uint64_t)ivec_val;
+ if (nb > blocks_unused) {
+ len = blocks_unused * 16;
+ nb -= blocks_unused;
+ next_ctr = blocks_unused;
+ ctr_reset = 1;
+ } else {
+ len = nb * 16;
+ next_ctr = nb;
+ nb = 0;
}
- }
- /* Bulk AAD processing */
- lenblks = len & ((size_t)(-AES_BLOCK_LEN));
- if (lenblks > 0) {
- ossl_aes_gcm_update_aad_avx512(ctx, aad, lenblks);
- aad += lenblks;
- len -= lenblks;
- }
+ s = encrypt ? ppc_aes_gcm_encrypt(in, out, len, key, ivec->c, Xi) :
+ ppc_aes_gcm_decrypt(in, out, len, key, ivec->c, Xi);
- /* Add remaining AAD to the hash (note, the hash is stored reflected) */
- if (len > 0) {
- ares = (unsigned int)len;
- for (size_t i = 0; i < len; ++i)
- ctx->gcm.Xi.c[15 - i] ^= aad[i];
+ /* add counter to ivec */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ ivec->d[3] = bswap32(ivec_val + next_ctr);
+#else
+ ivec->d[3] += next_ctr;
+#endif
+ if (ctr_reset) {
+ ctr_reset = 0;
+ in += len;
+ out += len;
+ }
+ memcpy(ivec, ctr_saved, 12);
+ ndone += s;
}
- ctx->gcm.ares = ares;
-
- return 0;
+ return ndone;
}
-static int
-_gcm_encrypt_avx512(struct ossl_gcm_context *ctx, const unsigned char *in,
- unsigned char *out, size_t len, bool encrypt)
-{
- uint64_t mlen = ctx->gcm.len.u[1];
-
- mlen += len;
- if (mlen > ((1ull << 36) - 32) || (sizeof(len) == 8 && mlen < len))
- return -1;
-
- ctx->gcm.len.u[1] = mlen;
-
- /* Finalize GHASH(AAD) if AAD partial blocks left unprocessed */
- if (ctx->gcm.ares > 0) {
- ossl_gcm_gmult_avx512(ctx->gcm.Xi.u, ctx);
- ctx->gcm.ares = 0;
- }
-
- if (encrypt) {
- ossl_aes_gcm_encrypt_avx512(&ctx->aes_ks, ctx, &ctx->gcm.mres,
- in, len, out);
- } else {
- ossl_aes_gcm_decrypt_avx512(&ctx->aes_ks, ctx, &ctx->gcm.mres,
- in, len, out);
- }
-
- return 0;
-}
+#else
+#error "Unsupported architecture!"
+#endif
-static int
-gcm_encrypt_avx512(struct ossl_gcm_context *ctx, const unsigned char *in,
- unsigned char *out, size_t len)
+static void
+gcm_init(struct ossl_gcm_context *ctx, const void *key, size_t keylen)
{
- return _gcm_encrypt_avx512(ctx, in, out, len, true);
-}
+ KASSERT(keylen == 128 || keylen == 192 || keylen == 256,
+ ("%s: invalid key length %zu", __func__, keylen));
-static int
-gcm_decrypt_avx512(struct ossl_gcm_context *ctx, const unsigned char *in,
- unsigned char *out, size_t len)
-{
- return _gcm_encrypt_avx512(ctx, in, out, len, false);
+ memset(&ctx->gcm, 0, sizeof(ctx->gcm));
+ memset(&ctx->aes_ks, 0, sizeof(ctx->aes_ks));
+ AES_set_encrypt_key(key, keylen, &ctx->aes_ks);
+ ctx->ops->init(ctx, key, keylen);
}
-static int
-gcm_finish_avx512(struct ossl_gcm_context *ctx, const unsigned char *tag,
- size_t len)
+static void
+gcm_tag_op(struct ossl_gcm_context *ctx, unsigned char *tag, size_t len)
{
- unsigned int *res = &ctx->gcm.mres;
-
- /* Finalize AAD processing */
- if (ctx->gcm.ares > 0)
- res = &ctx->gcm.ares;
-
- ossl_aes_gcm_finalize_avx512(ctx, *res);
-
- ctx->gcm.ares = ctx->gcm.mres = 0;
-
- if (tag != NULL)
- return timingsafe_bcmp(ctx->gcm.Xi.c, tag, len);
- return 0;
+ (void)ctx->ops->finish(ctx, NULL, 0);
+ memcpy(tag, ctx->gcm.Xi.c, len);
}
-static const struct ossl_aes_gcm_ops gcm_ops_avx512 = {
- .init = gcm_init_avx512,
- .setiv = gcm_setiv_avx512,
- .aad = gcm_aad_avx512,
- .encrypt = gcm_encrypt_avx512,
- .decrypt = gcm_decrypt_avx512,
- .finish = gcm_finish_avx512,
- .tag = gcm_tag,
-};
-
-size_t aesni_gcm_encrypt(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], uint64_t *Xi);
-size_t aesni_gcm_decrypt(const unsigned char *in, unsigned char *out, size_t len,
- const void *key, unsigned char ivec[16], uint64_t *Xi);
-void aesni_encrypt(const unsigned char *in, unsigned char *out, void *ks);
-void aesni_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
- size_t blocks, void *ks, const unsigned char *iv);
-
-void gcm_init_avx(__uint128_t Htable[16], uint64_t Xi[2]);
-void gcm_gmult_avx(uint64_t Xi[2], const __uint128_t Htable[16]);
-void gcm_ghash_avx(uint64_t Xi[2], const __uint128_t Htable[16], const void *in,
- size_t len);
-
static void
-gcm_init_aesni(struct ossl_gcm_context *ctx, const void *key, size_t keylen)
+gcm_init_op(struct ossl_gcm_context *ctx, const void *key, size_t keylen)
{
- aesni_encrypt(ctx->gcm.H.c, ctx->gcm.H.c, &ctx->aes_ks);
+ AES_encrypt(ctx->gcm.H.c, ctx->gcm.H.c, &ctx->aes_ks);
#if BYTE_ORDER == LITTLE_ENDIAN
ctx->gcm.H.u[0] = bswap64(ctx->gcm.H.u[0]);
ctx->gcm.H.u[1] = bswap64(ctx->gcm.H.u[1]);
#endif
- gcm_init_avx(ctx->gcm.Htable, ctx->gcm.H.u);
+ GCM_init(ctx->gcm.Htable, ctx->gcm.H.u);
}
static void
-gcm_setiv_aesni(struct ossl_gcm_context *ctx, const unsigned char *iv,
+gcm_setiv_op(struct ossl_gcm_context *ctx, const unsigned char *iv,
size_t len)
{
uint32_t ctr;
@@ -269,7 +196,7 @@ gcm_setiv_aesni(struct ossl_gcm_context *ctx, const unsigned char *iv,
ctx->gcm.Xi.u[0] = 0;
ctx->gcm.Xi.u[1] = 0;
- aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EK0.c, &ctx->aes_ks);
+ AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EK0.c, &ctx->aes_ks);
ctr++;
#if BYTE_ORDER == LITTLE_ENDIAN
@@ -280,7 +207,7 @@ gcm_setiv_aesni(struct ossl_gcm_context *ctx, const unsigned char *iv,
}
static int
-gcm_aad_aesni(struct ossl_gcm_context *ctx, const unsigned char *aad,
+gcm_aad_op(struct ossl_gcm_context *ctx, const unsigned char *aad,
size_t len)
{
size_t i;
@@ -303,14 +230,14 @@ gcm_aad_aesni(struct ossl_gcm_context *ctx, const unsigned char *aad,
n = (n + 1) % 16;
}
if (n == 0)
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
else {
ctx->gcm.ares = n;
return 0;
}
}
if ((i = (len & (size_t)-AES_BLOCK_LEN))) {
- gcm_ghash_avx(ctx->gcm.Xi.u, ctx->gcm.Htable, aad, i);
+ GCM_ghash(ctx->gcm.Xi.u, ctx->gcm.Htable, aad, i);
aad += i;
len -= i;
}
@@ -341,7 +268,7 @@ gcm_encrypt(struct ossl_gcm_context *ctx, const unsigned char *in,
if (ctx->gcm.ares) {
/* First call to encrypt finalizes GHASH(AAD) */
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
ctx->gcm.ares = 0;
}
@@ -354,7 +281,7 @@ gcm_encrypt(struct ossl_gcm_context *ctx, const unsigned char *in,
n = mres % 16;
for (i = 0; i < len; ++i) {
if (n == 0) {
- aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c,
+ AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c,
&ctx->aes_ks);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
@@ -366,7 +293,7 @@ gcm_encrypt(struct ossl_gcm_context *ctx, const unsigned char *in,
ctx->gcm.Xi.c[n] ^= out[i] = in[i] ^ ctx->gcm.EKi.c[n];
mres = n = (n + 1) % 16;
if (n == 0)
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
}
ctx->gcm.mres = mres;
@@ -390,7 +317,7 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
if (ctx->gcm.ares) {
/* First call to encrypt finalizes GHASH(AAD) */
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
ctx->gcm.ares = 0;
}
@@ -408,7 +335,7 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
n = (n + 1) % 16;
}
if (n == 0) {
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
mres = 0;
} else {
ctx->gcm.mres = n;
@@ -418,7 +345,7 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
if ((i = (len & (size_t)-16))) {
size_t j = i / 16;
- aesni_ctr32_encrypt_blocks(in, out, j, &ctx->aes_ks, ctx->gcm.Yi.c);
+ AES_ctr32_encrypt_blocks(in, out, j, &ctx->aes_ks, ctx->gcm.Yi.c);
ctr += (unsigned int)j;
#if BYTE_ORDER == LITTLE_ENDIAN
ctx->gcm.Yi.d[3] = bswap32(ctr);
@@ -430,12 +357,12 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
while (j--) {
for (i = 0; i < 16; ++i)
ctx->gcm.Xi.c[i] ^= out[i];
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
out += 16;
}
}
if (len) {
- aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks);
+ AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
ctx->gcm.Yi.d[3] = bswap32(ctr);
@@ -453,7 +380,7 @@ gcm_encrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
}
static int
-gcm_encrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in,
+gcm_encrypt_op(struct ossl_gcm_context *ctx, const unsigned char *in,
unsigned char *out, size_t len)
{
size_t bulk = 0, res;
@@ -463,7 +390,7 @@ gcm_encrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in,
if ((error = gcm_encrypt(ctx, in, out, res)) != 0)
return error;
- bulk = aesni_gcm_encrypt(in + res, out + res, len - res,
+ bulk = AES_gcm_encrypt(in + res, out + res, len - res,
&ctx->aes_ks, ctx->gcm.Yi.c, ctx->gcm.Xi.u);
ctx->gcm.len.u[1] += bulk;
bulk += res;
@@ -492,7 +419,7 @@ gcm_decrypt(struct ossl_gcm_context *ctx, const unsigned char *in,
if (ctx->gcm.ares) {
/* First call to encrypt finalizes GHASH(AAD) */
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
ctx->gcm.ares = 0;
}
@@ -506,7 +433,7 @@ gcm_decrypt(struct ossl_gcm_context *ctx, const unsigned char *in,
for (i = 0; i < len; ++i) {
uint8_t c;
if (n == 0) {
- aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c,
+ AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c,
&ctx->aes_ks);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
@@ -520,7 +447,7 @@ gcm_decrypt(struct ossl_gcm_context *ctx, const unsigned char *in,
ctx->gcm.Xi.c[n] ^= c;
mres = n = (n + 1) % 16;
if (n == 0)
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
}
ctx->gcm.mres = mres;
@@ -544,7 +471,7 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
if (ctx->gcm.ares) {
/* First call to decrypt finalizes GHASH(AAD) */
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
ctx->gcm.ares = 0;
}
@@ -564,7 +491,7 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
n = (n + 1) % 16;
}
if (n == 0) {
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
mres = 0;
} else {
ctx->gcm.mres = n;
@@ -578,12 +505,12 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
size_t k;
for (k = 0; k < 16; ++k)
ctx->gcm.Xi.c[k] ^= in[k];
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
in += 16;
}
j = i / 16;
in -= i;
- aesni_ctr32_encrypt_blocks(in, out, j, &ctx->aes_ks, ctx->gcm.Yi.c);
+ AES_ctr32_encrypt_blocks(in, out, j, &ctx->aes_ks, ctx->gcm.Yi.c);
ctr += (unsigned int)j;
#if BYTE_ORDER == LITTLE_ENDIAN
ctx->gcm.Yi.d[3] = bswap32(ctr);
@@ -595,7 +522,7 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
len -= i;
}
if (len) {
- aesni_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks);
+ AES_encrypt(ctx->gcm.Yi.c, ctx->gcm.EKi.c, &ctx->aes_ks);
++ctr;
#if BYTE_ORDER == LITTLE_ENDIAN
ctx->gcm.Yi.d[3] = bswap32(ctr);
@@ -615,7 +542,7 @@ gcm_decrypt_ctr32(struct ossl_gcm_context *ctx, const unsigned char *in,
}
static int
-gcm_decrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in,
+gcm_decrypt_op(struct ossl_gcm_context *ctx, const unsigned char *in,
unsigned char *out, size_t len)
{
size_t bulk = 0, res;
@@ -625,7 +552,7 @@ gcm_decrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in,
if ((error = gcm_decrypt(ctx, in, out, res)) != 0)
return error;
- bulk = aesni_gcm_decrypt(in + res, out + res, len - res, &ctx->aes_ks,
+ bulk = AES_gcm_decrypt(in + res, out + res, len - res, &ctx->aes_ks,
ctx->gcm.Yi.c, ctx->gcm.Xi.u);
ctx->gcm.len.u[1] += bulk;
bulk += res;
@@ -637,14 +564,14 @@ gcm_decrypt_aesni(struct ossl_gcm_context *ctx, const unsigned char *in,
}
static int
-gcm_finish_aesni(struct ossl_gcm_context *ctx, const unsigned char *tag,
+gcm_finish_op(struct ossl_gcm_context *ctx, const unsigned char *tag,
size_t len)
{
uint64_t alen = ctx->gcm.len.u[0] << 3;
uint64_t clen = ctx->gcm.len.u[1] << 3;
if (ctx->gcm.mres || ctx->gcm.ares)
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
#if BYTE_ORDER == LITTLE_ENDIAN
alen = bswap64(alen);
@@ -653,7 +580,7 @@ gcm_finish_aesni(struct ossl_gcm_context *ctx, const unsigned char *tag,
ctx->gcm.Xi.u[0] ^= alen;
ctx->gcm.Xi.u[1] ^= clen;
- gcm_gmult_avx(ctx->gcm.Xi.u, ctx->gcm.Htable);
+ GCM_gmult(ctx->gcm.Xi.u, ctx->gcm.Htable);
ctx->gcm.Xi.u[0] ^= ctx->gcm.EK0.u[0];
ctx->gcm.Xi.u[1] ^= ctx->gcm.EK0.u[1];
@@ -663,40 +590,26 @@ gcm_finish_aesni(struct ossl_gcm_context *ctx, const unsigned char *tag,
return 0;
}
-static const struct ossl_aes_gcm_ops gcm_ops_aesni = {
- .init = gcm_init_aesni,
- .setiv = gcm_setiv_aesni,
- .aad = gcm_aad_aesni,
- .encrypt = gcm_encrypt_aesni,
- .decrypt = gcm_decrypt_aesni,
- .finish = gcm_finish_aesni,
- .tag = gcm_tag,
+static const struct ossl_aes_gcm_ops gcm_ops = {
+ .init = gcm_init_op,
+ .setiv = gcm_setiv_op,
+ .aad = gcm_aad_op,
+ .encrypt = gcm_encrypt_op,
+ .decrypt = gcm_decrypt_op,
+ .finish = gcm_finish_op,
+ .tag = gcm_tag_op,
};
-int ossl_aes_gcm_setkey_aesni(const unsigned char *key, int klen, void *_ctx);
-
-int
-ossl_aes_gcm_setkey_aesni(const unsigned char *key, int klen,
- void *_ctx)
-{
- struct ossl_gcm_context *ctx;
-
- ctx = _ctx;
- ctx->ops = &gcm_ops_aesni;
- gcm_init(ctx, key, klen);
- return (0);
-}
-
-int ossl_aes_gcm_setkey_avx512(const unsigned char *key, int klen, void *_ctx);
+int ossl_aes_gcm_setkey(const unsigned char *key, int klen, void *_ctx);
int
-ossl_aes_gcm_setkey_avx512(const unsigned char *key, int klen,
+ossl_aes_gcm_setkey(const unsigned char *key, int klen,
void *_ctx)
{
struct ossl_gcm_context *ctx;
ctx = _ctx;
- ctx->ops = &gcm_ops_avx512;
+ ctx->ops = &gcm_ops;
gcm_init(ctx, key, klen);
return (0);
}
diff --git a/sys/crypto/openssl/ossl_ppc.c b/sys/crypto/openssl/ossl_ppc.c
index 0951745c4b43..980211f46a76 100644
--- a/sys/crypto/openssl/ossl_ppc.c
+++ b/sys/crypto/openssl/ossl_ppc.c
@@ -38,9 +38,12 @@ unsigned int OPENSSL_ppccap_P = 0;
ossl_cipher_setkey_t aes_p8_set_encrypt_key;
ossl_cipher_setkey_t aes_p8_set_decrypt_key;
+
ossl_cipher_setkey_t vpaes_set_encrypt_key;
ossl_cipher_setkey_t vpaes_set_decrypt_key;
+ossl_cipher_setkey_t ossl_aes_gcm_setkey;
+
void
ossl_cpuid(struct ossl_softc *sc)
{
@@ -75,7 +78,11 @@ ossl_cpuid(struct ossl_softc *sc)
ossl_cipher_aes_cbc.set_encrypt_key = aes_p8_set_encrypt_key;
ossl_cipher_aes_cbc.set_decrypt_key = aes_p8_set_decrypt_key;
sc->has_aes = true;
- } else if (OPENSSL_ppccap_P & PPC_ALTIVEC) {
+
+ ossl_cipher_aes_gcm.set_encrypt_key = ossl_aes_gcm_setkey;
+ ossl_cipher_aes_gcm.set_decrypt_key = ossl_aes_gcm_setkey;
+ sc->has_aes_gcm = true;
+ } else if (OPENSSL_ppccap_P & PPC_ALTIVEC) {
ossl_cipher_aes_cbc.set_encrypt_key = vpaes_set_encrypt_key;
ossl_cipher_aes_cbc.set_decrypt_key = vpaes_set_decrypt_key;
sc->has_aes = true;
diff --git a/sys/crypto/openssl/ossl_sha256.c b/sys/crypto/openssl/ossl_sha256.c
index 4613a9409b44..50cb9739d114 100644
--- a/sys/crypto/openssl/ossl_sha256.c
+++ b/sys/crypto/openssl/ossl_sha256.c
@@ -74,11 +74,11 @@ ossl_sha256_init(void *c_)
unsigned int nn; \
switch ((c)->md_len) \
{ case SHA224_DIGEST_LENGTH: \
- for (nn=0;nn<SHA224_DIGEST_LENGTH/4;nn++) \
+ for (nn=0; nn < SHA224_DIGEST_LENGTH / 4; nn++) \
{ ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \
break; \
case SHA256_DIGEST_LENGTH: \
- for (nn=0;nn<SHA256_DIGEST_LENGTH/4;nn++) \
+ for (nn=0; nn < SHA256_DIGEST_LENGTH / 4; nn++) \
{ ll=(c)->h[nn]; (void)HOST_l2c(ll,(s)); } \
break; \
default: \
diff --git a/sys/crypto/openssl/ossl_x86.c b/sys/crypto/openssl/ossl_x86.c
index 9161eaf39beb..bf034f05b783 100644
--- a/sys/crypto/openssl/ossl_x86.c
+++ b/sys/crypto/openssl/ossl_x86.c
@@ -56,7 +56,7 @@ ossl_cipher_setkey_t aesni_set_decrypt_key;
#ifdef __amd64__
int ossl_vaes_vpclmulqdq_capable(void);
-ossl_cipher_setkey_t ossl_aes_gcm_setkey_aesni;
+ossl_cipher_setkey_t ossl_aes_gcm_setkey;
ossl_cipher_setkey_t ossl_aes_gcm_setkey_avx512;
#endif
@@ -141,8 +141,8 @@ ossl_cpuid(struct ossl_softc *sc)
} else if ((cpu_feature2 &
(CPUID2_AVX | CPUID2_PCLMULQDQ | CPUID2_MOVBE)) ==
(CPUID2_AVX | CPUID2_PCLMULQDQ | CPUID2_MOVBE)) {
- ossl_cipher_aes_gcm.set_encrypt_key = ossl_aes_gcm_setkey_aesni;
- ossl_cipher_aes_gcm.set_decrypt_key = ossl_aes_gcm_setkey_aesni;
+ ossl_cipher_aes_gcm.set_encrypt_key = ossl_aes_gcm_setkey;
+ ossl_cipher_aes_gcm.set_decrypt_key = ossl_aes_gcm_setkey;
sc->has_aes_gcm = true;
} else {
sc->has_aes_gcm = false;
diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c
index 733c440f5ee3..a26cf8161294 100644
--- a/sys/ddb/db_ps.c
+++ b/sys/ddb/db_ps.c
@@ -459,12 +459,11 @@ DB_SHOW_COMMAND(proc, db_show_proc)
db_printf("??? (%#x)\n", p->p_state);
}
if (p->p_ucred != NULL) {
- db_printf(" uid: %d gids: ", p->p_ucred->cr_uid);
- for (i = 0; i < p->p_ucred->cr_ngroups; i++) {
- db_printf("%d", p->p_ucred->cr_groups[i]);
- if (i < (p->p_ucred->cr_ngroups - 1))
- db_printf(", ");
- }
+ db_printf(" uid: %d gid: %d supp gids: ",
+ p->p_ucred->cr_uid, p->p_ucred->cr_gid);
+ for (i = 0; i < p->p_ucred->cr_ngroups; i++)
+ db_printf(i == 0 ? "%d" : ", %d",
+ p->p_ucred->cr_groups[i]);
db_printf("\n");
}
if (p->p_pptr != NULL)
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index a2159b12876f..3f0a7b40245d 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -4,6 +4,10 @@
* Copyright (c) 2000, 2001 Michael Smith
* Copyright (c) 2000 BSDi
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -98,6 +102,11 @@ struct acpi_interface {
int num;
};
+struct acpi_wake_prep_context {
+ struct acpi_softc *sc;
+ enum power_stype stype;
+};
+
static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
/* Global mutex for locking access to the ACPI subsystem. */
@@ -107,8 +116,9 @@ struct callout acpi_sleep_timer;
/* Bitmap of device quirks. */
int acpi_quirks;
-/* Supported sleep states. */
-static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT];
+/* Supported sleep states and types. */
+static bool acpi_supported_stypes[POWER_STYPE_COUNT];
+static bool acpi_supported_sstates[ACPI_S_STATE_COUNT];
static void acpi_lookup(void *arg, const char *name, device_t *dev);
static int acpi_modevent(struct module *mod, int event, void *junk);
@@ -165,23 +175,32 @@ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
void *context, void **status);
static void acpi_sleep_enable(void *arg);
static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
-static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
+static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc,
+ enum power_stype stype);
static void acpi_shutdown_final(void *arg, int howto);
static void acpi_enable_fixed_events(struct acpi_softc *sc);
static void acpi_resync_clock(struct acpi_softc *sc);
-static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
-static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
-static int acpi_wake_prep_walk(int sstate);
+static int acpi_wake_sleep_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype);
+static int acpi_wake_run_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype);
+static int acpi_wake_prep_walk(struct acpi_softc *sc, enum power_stype stype);
static int acpi_wake_sysctl_walk(device_t dev);
static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
-static void acpi_system_eventhandler_sleep(void *arg, int state);
-static void acpi_system_eventhandler_wakeup(void *arg, int state);
-static int acpi_sname2sstate(const char *sname);
-static const char *acpi_sstate2sname(int sstate);
static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
+static void acpi_system_eventhandler_sleep(void *arg,
+ enum power_stype stype);
+static void acpi_system_eventhandler_wakeup(void *arg,
+ enum power_stype stype);
+static enum power_stype acpi_sstate_to_stype(int sstate);
+static int acpi_sname_to_sstate(const char *sname);
+static const char *acpi_sstate_to_sname(int sstate);
+static int acpi_suspend_state_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_stype_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
-static int acpi_pm_func(u_long cmd, void *arg, ...);
+static int acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype);
+static int acpi_pm_func(u_long cmd, void *arg, enum power_stype stype);
static void acpi_enable_pcie(void);
static void acpi_reset_interfaces(device_t dev);
@@ -472,6 +491,7 @@ acpi_attach(device_t dev)
UINT32 flags;
UINT8 TypeA, TypeB;
char *env;
+ enum power_stype stype;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -584,31 +604,35 @@ acpi_attach(device_t dev)
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "power_button_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A",
+ &sc->acpi_power_button_stype, 0, acpi_stype_sysctl, "A",
"Power button ACPI sleep state.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_button_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A",
+ &sc->acpi_sleep_button_stype, 0, acpi_stype_sysctl, "A",
"Sleep button ACPI sleep state.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "lid_switch_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A",
- "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid.");
+ &sc->acpi_lid_switch_stype, 0, acpi_stype_sysctl, "A",
+ "Lid ACPI sleep state. Set to s2idle or s2mem if you want to suspend "
+ "your laptop when close the lid.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "standby_state",
- CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ NULL, 0, acpi_suspend_state_sysctl, "A",
+ "Current ACPI suspend state. This sysctl is deprecated; you probably "
+ "want to use kern.power.suspend instead.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "suspend_state",
+ OID_AUTO, "standby_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A",
+ "ACPI Sx state to use when going standby (S1 or S2).");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
"sleep delay in seconds");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
+ OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0,
+ "Use S4BIOS when hibernating.");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
@@ -654,31 +678,38 @@ acpi_attach(device_t dev)
sc->acpi_s4bios = 1;
#endif
- /* Probe all supported sleep states. */
- acpi_sleep_states[ACPI_STATE_S0] = TRUE;
- for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
+ /*
+ * Probe all supported ACPI sleep states. Awake (S0) is always supported.
+ */
+ acpi_supported_sstates[ACPI_STATE_S0] = TRUE;
+ acpi_supported_stypes[POWER_STYPE_AWAKE] = true;
+ for (state = ACPI_STATE_S1; state <= ACPI_STATE_S5; state++)
if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
__DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
- ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
- acpi_sleep_states[state] = TRUE;
+ ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) {
+ acpi_supported_sstates[state] = TRUE;
+ acpi_supported_stypes[acpi_sstate_to_stype(state)] = true;
+ }
/*
- * Dispatch the default sleep state to devices. The lid switch is set
+ * Dispatch the default sleep type to devices. The lid switch is set
* to UNKNOWN by default to avoid surprising users.
*/
- sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
- ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
- sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
- sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
- ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
- sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
- ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
-
- /* Pick the first valid sleep state for the sleep button default. */
- sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
- for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
- if (acpi_sleep_states[state]) {
- sc->acpi_sleep_button_sx = state;
+ sc->acpi_power_button_stype = acpi_supported_stypes[POWER_STYPE_POWEROFF] ?
+ POWER_STYPE_POWEROFF : POWER_STYPE_UNKNOWN;
+ sc->acpi_lid_switch_stype = POWER_STYPE_UNKNOWN;
+
+ sc->acpi_standby_sx = ACPI_STATE_UNKNOWN;
+ if (acpi_supported_sstates[ACPI_STATE_S1])
+ sc->acpi_standby_sx = ACPI_STATE_S1;
+ else if (acpi_supported_sstates[ACPI_STATE_S2])
+ sc->acpi_standby_sx = ACPI_STATE_S2;
+
+ /* Pick the first valid sleep type for the sleep button default. */
+ sc->acpi_sleep_button_stype = POWER_STYPE_UNKNOWN;
+ for (stype = POWER_STYPE_STANDBY; stype <= POWER_STYPE_HIBERNATE; stype++)
+ if (acpi_supported_stypes[stype]) {
+ sc->acpi_sleep_button_stype = stype;
break;
}
@@ -703,7 +734,7 @@ acpi_attach(device_t dev)
/* Flag our initial states. */
sc->acpi_enabled = TRUE;
- sc->acpi_sstate = ACPI_STATE_S0;
+ sc->acpi_stype = POWER_STYPE_AWAKE;
sc->acpi_sleep_disabled = TRUE;
/* Create the control device */
@@ -715,7 +746,8 @@ acpi_attach(device_t dev)
goto out;
/* Register ACPI again to pass the correct argument of pm_func. */
- power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc,
+ acpi_supported_stypes);
acpi_platform_osc(dev);
@@ -741,6 +773,58 @@ acpi_attach(device_t dev)
return_VALUE (error);
}
+static int
+acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype)
+{
+ switch (stype) {
+ case POWER_STYPE_AWAKE:
+ return (ACPI_STATE_S0);
+ case POWER_STYPE_STANDBY:
+ return (sc->acpi_standby_sx);
+ case POWER_STYPE_SUSPEND_TO_MEM:
+ return (ACPI_STATE_S3);
+ case POWER_STYPE_HIBERNATE:
+ return (ACPI_STATE_S4);
+ case POWER_STYPE_POWEROFF:
+ return (ACPI_STATE_S5);
+ case POWER_STYPE_SUSPEND_TO_IDLE:
+ case POWER_STYPE_COUNT:
+ case POWER_STYPE_UNKNOWN:
+ return (ACPI_STATE_UNKNOWN);
+ }
+ return (ACPI_STATE_UNKNOWN);
+}
+
+/*
+ * XXX It would be nice if we didn't need this function, but we'd need
+ * acpi_EnterSleepState and acpi_ReqSleepState to take in actual ACPI S-states,
+ * which won't be possible at the moment because suspend-to-idle (which is not
+ * an ACPI S-state nor maps to one) will be implemented here.
+ *
+ * In the future, we should make generic a lot of the logic in these functions
+ * to enable suspend-to-idle on non-ACPI builds, and then make
+ * acpi_EnterSleepState and acpi_ReqSleepState truly take in ACPI S-states
+ * again.
+ */
+static enum power_stype
+acpi_sstate_to_stype(int sstate)
+{
+ switch (sstate) {
+ case ACPI_STATE_S0:
+ return (POWER_STYPE_AWAKE);
+ case ACPI_STATE_S1:
+ case ACPI_STATE_S2:
+ return (POWER_STYPE_STANDBY);
+ case ACPI_STATE_S3:
+ return (POWER_STYPE_SUSPEND_TO_MEM);
+ case ACPI_STATE_S4:
+ return (POWER_STYPE_HIBERNATE);
+ case ACPI_STATE_S5:
+ return (POWER_STYPE_POWEROFF);
+ }
+ return (POWER_STYPE_UNKNOWN);
+}
+
static void
acpi_set_power_children(device_t dev, int state)
{
@@ -793,6 +877,7 @@ acpi_resume(device_t dev)
static int
acpi_shutdown(device_t dev)
{
+ struct acpi_softc *sc = device_get_softc(dev);
bus_topo_assert();
@@ -803,7 +888,7 @@ acpi_shutdown(device_t dev)
* Enable any GPEs that are able to power-on the system (i.e., RTC).
* Also, disable any that are not valid for this state (most).
*/
- acpi_wake_prep_walk(ACPI_STATE_S5);
+ acpi_wake_prep_walk(sc, POWER_STYPE_POWEROFF);
return (0);
}
@@ -2036,7 +2121,7 @@ acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
* Note illegal _S0D is evaluated because some systems expect this.
*/
sc = device_get_softc(bus);
- snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
+ snprintf(sxd, sizeof(sxd), "_S%dD", acpi_stype_to_sstate(sc, sc->acpi_stype));
status = acpi_GetInteger(handle, sxd, dstate);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
device_printf(dev, "failed to get %s on %s: %s\n", sxd,
@@ -3134,9 +3219,9 @@ acpi_sleep_force_task(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
- device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
- sc->acpi_next_sstate);
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_stype)))
+ device_printf(sc->acpi_dev, "force sleep state %s failed\n",
+ power_stype_to_name(sc->acpi_next_stype));
}
static void
@@ -3163,24 +3248,24 @@ acpi_sleep_force(void *arg)
* acks are in.
*/
int
-acpi_ReqSleepState(struct acpi_softc *sc, int state)
+acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
{
#if defined(__amd64__) || defined(__i386__)
struct apm_clone_data *clone;
ACPI_STATUS status;
- if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
+ if (stype < POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
return (EINVAL);
- if (!acpi_sleep_states[state])
+ if (!acpi_supported_stypes[stype])
return (EOPNOTSUPP);
/*
* If a reboot/shutdown/suspend request is already in progress or
* suspend is blocked due to an upcoming shutdown, just return.
*/
- if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) {
+ if (rebooting || sc->acpi_next_stype != POWER_STYPE_AWAKE ||
+ suspend_blocked)
return (0);
- }
/* Wait until sleep is enabled. */
while (sc->acpi_sleep_disabled) {
@@ -3189,12 +3274,12 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
ACPI_LOCK(acpi);
- sc->acpi_next_sstate = state;
+ sc->acpi_next_stype = stype;
/* S5 (soft-off) should be entered directly with no waiting. */
- if (state == ACPI_STATE_S5) {
+ if (stype == POWER_STYPE_POWEROFF) {
ACPI_UNLOCK(acpi);
- status = acpi_EnterSleepState(sc, state);
+ status = acpi_EnterSleepState(sc, stype);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
@@ -3210,7 +3295,7 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
/* If devd(8) is not running, immediately enter the sleep state. */
if (!devctl_process_running()) {
ACPI_UNLOCK(acpi);
- status = acpi_EnterSleepState(sc, state);
+ status = acpi_EnterSleepState(sc, stype);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
@@ -3225,7 +3310,7 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
ACPI_UNLOCK(acpi);
/* Now notify devd(8) also. */
- acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
+ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, stype);
return (0);
#else
@@ -3248,17 +3333,17 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
struct acpi_softc *sc;
int ret, sleeping;
- /* If no pending sleep state, return an error. */
+ /* If no pending sleep type, return an error. */
ACPI_LOCK(acpi);
sc = clone->acpi_sc;
- if (sc->acpi_next_sstate == 0) {
+ if (sc->acpi_next_stype == POWER_STYPE_AWAKE) {
ACPI_UNLOCK(acpi);
return (ENXIO);
}
/* Caller wants to abort suspend process. */
if (error) {
- sc->acpi_next_sstate = 0;
+ sc->acpi_next_stype = POWER_STYPE_AWAKE;
callout_stop(&sc->susp_force_to);
device_printf(sc->acpi_dev,
"listener on %s cancelled the pending suspend\n",
@@ -3288,7 +3373,7 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
ACPI_UNLOCK(acpi);
ret = 0;
if (sleeping) {
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_stype)))
ret = ENODEV;
}
return (ret);
@@ -3345,24 +3430,27 @@ enum acpi_sleep_state {
* Currently we support S1-S5 but S4 is only S4BIOS
*/
static ACPI_STATUS
-acpi_EnterSleepState(struct acpi_softc *sc, int state)
+acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
{
register_t intr;
ACPI_STATUS status;
ACPI_EVENT_STATUS power_button_status;
enum acpi_sleep_state slp_state;
+ int acpi_sstate;
int sleep_result;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
- if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
+ if (stype <= POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
return_ACPI_STATUS (AE_BAD_PARAMETER);
- if (!acpi_sleep_states[state]) {
- device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
- state);
+ if (!acpi_supported_stypes[stype]) {
+ device_printf(sc->acpi_dev, "Sleep type %s not supported on this "
+ "platform\n", power_stype_to_name(stype));
return (AE_SUPPORT);
}
+ acpi_sstate = acpi_stype_to_sstate(sc, stype);
+
/* Re-entry once we're suspending is not allowed. */
status = acpi_sleep_disable(sc);
if (ACPI_FAILURE(status)) {
@@ -3371,7 +3459,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
return (status);
}
- if (state == ACPI_STATE_S5) {
+ if (stype == POWER_STYPE_POWEROFF) {
/*
* Shut down cleanly and power off. This will call us back through the
* shutdown handlers.
@@ -3380,10 +3468,10 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
return_ACPI_STATUS (AE_OK);
}
- EVENTHANDLER_INVOKE(power_suspend_early);
+ EVENTHANDLER_INVOKE(power_suspend_early, stype);
stop_all_proc();
suspend_all_fs();
- EVENTHANDLER_INVOKE(power_suspend);
+ EVENTHANDLER_INVOKE(power_suspend, stype);
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started);
@@ -3399,16 +3487,16 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
#endif
/*
- * Be sure to hold Giant across DEVICE_SUSPEND/RESUME
+ * Be sure to hold bus topology lock across DEVICE_SUSPEND/RESUME.
*/
bus_topo_lock();
slp_state = ACPI_SS_NONE;
- sc->acpi_sstate = state;
+ sc->acpi_stype = stype;
/* Enable any GPEs as appropriate and requested by the user. */
- acpi_wake_prep_walk(state);
+ acpi_wake_prep_walk(sc, stype);
slp_state = ACPI_SS_GPE_SET;
/*
@@ -3425,7 +3513,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
}
slp_state = ACPI_SS_DEV_SUSPEND;
- status = AcpiEnterSleepStatePrep(state);
+ status = AcpiEnterSleepStatePrep(acpi_sstate);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
AcpiFormatException(status));
@@ -3438,9 +3526,9 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
suspendclock();
intr = intr_disable();
- if (state != ACPI_STATE_S1) {
- sleep_result = acpi_sleep_machdep(sc, state);
- acpi_wakeup_machdep(sc, state, sleep_result, 0);
+ if (stype != POWER_STYPE_STANDBY) {
+ sleep_result = acpi_sleep_machdep(sc, acpi_sstate);
+ acpi_wakeup_machdep(sc, acpi_sstate, sleep_result, 0);
/*
* XXX According to ACPI specification SCI_EN bit should be restored
@@ -3451,10 +3539,10 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
* This hack is picked up from Linux, which claims that it follows
* Windows behavior.
*/
- if (sleep_result == 1 && state != ACPI_STATE_S4)
+ if (sleep_result == 1 && stype != POWER_STYPE_HIBERNATE)
AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
- if (sleep_result == 1 && state == ACPI_STATE_S3) {
+ if (sleep_result == 1 && stype == POWER_STYPE_SUSPEND_TO_MEM) {
/*
* Prevent mis-interpretation of the wakeup by power button
* as a request for power off.
@@ -3480,20 +3568,20 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
intr_restore(intr);
/* call acpi_wakeup_machdep() again with interrupt enabled */
- acpi_wakeup_machdep(sc, state, sleep_result, 1);
+ acpi_wakeup_machdep(sc, acpi_sstate, sleep_result, 1);
- AcpiLeaveSleepStatePrep(state);
+ AcpiLeaveSleepStatePrep(acpi_sstate);
if (sleep_result == -1)
goto backout;
- /* Re-enable ACPI hardware on wakeup from sleep state 4. */
- if (state == ACPI_STATE_S4)
+ /* Re-enable ACPI hardware on wakeup from hibernate. */
+ if (stype == POWER_STYPE_HIBERNATE)
AcpiEnable();
} else {
- status = AcpiEnterSleepState(state);
+ status = AcpiEnterSleepState(acpi_sstate);
intr_restore(intr);
- AcpiLeaveSleepStatePrep(state);
+ AcpiLeaveSleepStatePrep(acpi_sstate);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
AcpiFormatException(status));
@@ -3510,13 +3598,13 @@ backout:
if (slp_state >= ACPI_SS_SLP_PREP)
resumeclock();
if (slp_state >= ACPI_SS_GPE_SET) {
- acpi_wake_prep_walk(state);
- sc->acpi_sstate = ACPI_STATE_S0;
+ acpi_wake_prep_walk(sc, stype);
+ sc->acpi_stype = POWER_STYPE_AWAKE;
}
if (slp_state >= ACPI_SS_DEV_SUSPEND)
DEVICE_RESUME(root_bus);
if (slp_state >= ACPI_SS_SLP_PREP)
- AcpiLeaveSleepState(state);
+ AcpiLeaveSleepState(acpi_sstate);
if (slp_state >= ACPI_SS_SLEPT) {
#if defined(__i386__) || defined(__amd64__)
/* NB: we are still using ACPI timecounter at this point. */
@@ -3525,7 +3613,7 @@ backout:
acpi_resync_clock(sc);
acpi_enable_fixed_events(sc);
}
- sc->acpi_next_sstate = 0;
+ sc->acpi_next_stype = POWER_STYPE_AWAKE;
bus_topo_unlock();
@@ -3544,14 +3632,14 @@ backout:
resume_all_fs();
resume_all_proc();
- EVENTHANDLER_INVOKE(power_resume);
+ EVENTHANDLER_INVOKE(power_resume, stype);
/* Allow another sleep request after a while. */
callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
/* Run /etc/rc.resume after we are back. */
if (devctl_process_running())
- acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
+ acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, stype);
return_ACPI_STATUS (status);
}
@@ -3602,8 +3690,10 @@ acpi_wake_set_enable(device_t dev, int enable)
}
static int
-acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
+acpi_wake_sleep_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype)
{
+ int sstate;
struct acpi_prw_data prw;
device_t dev;
@@ -3612,6 +3702,8 @@ acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
return (ENXIO);
dev = acpi_get_device(handle);
+ sstate = acpi_stype_to_sstate(sc, stype);
+
/*
* The destination sleep state must be less than (i.e., higher power)
* or equal to the value specified by _PRW. If this GPE cannot be
@@ -3622,22 +3714,24 @@ acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
if (sstate > prw.lowest_wake) {
AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
if (bootverbose)
- device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
- acpi_name(handle), sstate);
+ device_printf(dev, "wake_prep disabled wake for %s (%s)\n",
+ acpi_name(handle), power_stype_to_name(stype));
} else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
acpi_pwr_wake_enable(handle, 1);
acpi_SetInteger(handle, "_PSW", 1);
if (bootverbose)
- device_printf(dev, "wake_prep enabled for %s (S%d)\n",
- acpi_name(handle), sstate);
+ device_printf(dev, "wake_prep enabled for %s (%s)\n",
+ acpi_name(handle), power_stype_to_name(stype));
}
return (0);
}
static int
-acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
+acpi_wake_run_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype)
{
+ int sstate;
struct acpi_prw_data prw;
device_t dev;
@@ -3651,6 +3745,8 @@ acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
return (0);
+ sstate = acpi_stype_to_sstate(sc, stype);
+
/*
* If this GPE couldn't be enabled for the previous sleep state, it was
* disabled before going to sleep so re-enable it. If it was enabled,
@@ -3674,26 +3770,29 @@ acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
static ACPI_STATUS
acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
{
- int sstate;
+ struct acpi_wake_prep_context *ctx = context;
/* If suspending, run the sleep prep function, otherwise wake. */
- sstate = *(int *)context;
if (AcpiGbl_SystemAwakeAndRunning)
- acpi_wake_sleep_prep(handle, sstate);
+ acpi_wake_sleep_prep(ctx->sc, handle, ctx->stype);
else
- acpi_wake_run_prep(handle, sstate);
+ acpi_wake_run_prep(ctx->sc, handle, ctx->stype);
return (AE_OK);
}
/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
static int
-acpi_wake_prep_walk(int sstate)
+acpi_wake_prep_walk(struct acpi_softc *sc, enum power_stype stype)
{
ACPI_HANDLE sb_handle;
+ struct acpi_wake_prep_context ctx = {
+ .sc = sc,
+ .stype = stype,
+ };
if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
- acpi_wake_prep, NULL, &sstate, NULL);
+ acpi_wake_prep, NULL, &ctx, NULL);
return (0);
}
@@ -3852,31 +3951,35 @@ out:
/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
static void
-acpi_system_eventhandler_sleep(void *arg, int state)
+acpi_system_eventhandler_sleep(void *arg, enum power_stype stype)
{
struct acpi_softc *sc = (struct acpi_softc *)arg;
int ret;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
/* Check if button action is disabled or unknown. */
- if (state == ACPI_STATE_UNKNOWN)
+ if (stype == ACPI_STATE_UNKNOWN)
return;
- /* Request that the system prepare to enter the given suspend state. */
- ret = acpi_ReqSleepState(sc, state);
+ /*
+ * Request that the system prepare to enter the given suspend state. We can
+ * totally pass an ACPI S-state to an enum power_stype.
+ */
+ ret = acpi_ReqSleepState(sc, stype);
if (ret != 0)
device_printf(sc->acpi_dev,
- "request to enter state S%d failed (err %d)\n", state, ret);
+ "request to enter state %s failed (err %d)\n",
+ power_stype_to_name(stype), ret);
return_VOID;
}
static void
-acpi_system_eventhandler_wakeup(void *arg, int state)
+acpi_system_eventhandler_wakeup(void *arg, enum power_stype stype)
{
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
/* Currently, nothing to do for wakeup. */
@@ -3890,14 +3993,14 @@ static void
acpi_invoke_sleep_eventhandler(void *context)
{
- EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
+ EVENTHANDLER_INVOKE(acpi_sleep_event, *(enum power_stype *)context);
}
static void
acpi_invoke_wake_eventhandler(void *context)
{
- EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, *(enum power_stype *)context);
}
UINT32
@@ -3913,7 +4016,7 @@ acpi_event_power_button_sleep(void *context)
#if defined(__amd64__) || defined(__i386__)
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
+ acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
#else
shutdown_nice(RB_POWEROFF);
@@ -3930,7 +4033,7 @@ acpi_event_power_button_wake(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
+ acpi_invoke_wake_eventhandler, &sc->acpi_power_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -3943,7 +4046,7 @@ acpi_event_sleep_button_sleep(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
+ acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -3956,7 +4059,7 @@ acpi_event_sleep_button_wake(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
+ acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -4152,7 +4255,8 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
{
struct acpi_softc *sc;
struct acpi_ioctl_hook *hp;
- int error, state;
+ int error;
+ int sstate;
error = 0;
hp = NULL;
@@ -4182,9 +4286,9 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
/* Core system ioctls. */
switch (cmd) {
case ACPIIO_REQSLPSTATE:
- state = *(int *)addr;
- if (state != ACPI_STATE_S5)
- return (acpi_ReqSleepState(sc, state));
+ sstate = *(int *)addr;
+ if (sstate != ACPI_STATE_S5)
+ return (acpi_ReqSleepState(sc, acpi_sstate_to_stype(sstate)));
device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
error = EOPNOTSUPP;
break;
@@ -4193,12 +4297,12 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
error = acpi_AckSleepState(sc->acpi_clone, error);
break;
case ACPIIO_SETSLPSTATE: /* DEPRECATED */
- state = *(int *)addr;
- if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
+ sstate = *(int *)addr;
+ if (sstate < ACPI_STATE_S0 || sstate > ACPI_STATE_S5)
return (EINVAL);
- if (!acpi_sleep_states[state])
+ if (!acpi_supported_sstates[sstate])
return (EOPNOTSUPP);
- if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
+ if (ACPI_FAILURE(acpi_SetSleepState(sc, acpi_sstate_to_stype(sstate))))
error = ENXIO;
break;
default:
@@ -4210,7 +4314,7 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
}
static int
-acpi_sname2sstate(const char *sname)
+acpi_sname_to_sstate(const char *sname)
{
int sstate;
@@ -4225,14 +4329,15 @@ acpi_sname2sstate(const char *sname)
}
static const char *
-acpi_sstate2sname(int sstate)
+acpi_sstate_to_sname(int state)
{
- static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
+ static const char *snames[ACPI_S_STATE_COUNT] = {"S0", "S1", "S2", "S3",
+ "S4", "S5"};
- if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
- return (snames[sstate]);
- else if (sstate == ACPI_STATE_UNKNOWN)
+ if (state == ACPI_STATE_UNKNOWN)
return ("NONE");
+ if (state >= ACPI_STATE_S0 && state < ACPI_S_STATE_COUNT)
+ return (snames[state]);
return (NULL);
}
@@ -4245,8 +4350,8 @@ acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
- if (acpi_sleep_states[state])
- sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
+ if (acpi_supported_sstates[state])
+ sbuf_printf(&sb, "%s ", acpi_sstate_to_sname(state));
sbuf_trim(&sb);
sbuf_finish(&sb);
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
@@ -4255,26 +4360,89 @@ acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
}
static int
+acpi_suspend_state_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ char name[10];
+ int err;
+ struct acpi_softc *sc = oidp->oid_arg1;
+ enum power_stype new_stype;
+ enum power_stype old_stype = power_suspend_stype;
+ int old_sstate = acpi_stype_to_sstate(sc, old_stype);
+ int new_sstate;
+
+ strlcpy(name, acpi_sstate_to_sname(old_sstate), sizeof(name));
+ err = sysctl_handle_string(oidp, name, sizeof(name), req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ new_sstate = acpi_sname_to_sstate(name);
+ if (new_sstate < 0)
+ return (EINVAL);
+ new_stype = acpi_sstate_to_stype(new_sstate);
+ if (acpi_supported_stypes[new_stype] == false)
+ return (EOPNOTSUPP);
+ if (new_stype != old_stype)
+ power_suspend_stype = new_stype;
+ return (err);
+}
+
+static int
acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
{
char sleep_state[10];
- int error, new_state, old_state;
+ int error;
+ int new_sstate, old_sstate;
- old_state = *(int *)oidp->oid_arg1;
- strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
+ old_sstate = *(int *)oidp->oid_arg1;
+ strlcpy(sleep_state, acpi_sstate_to_sname(old_sstate), sizeof(sleep_state));
error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
if (error == 0 && req->newptr != NULL) {
- new_state = acpi_sname2sstate(sleep_state);
- if (new_state < ACPI_STATE_S1)
+ new_sstate = acpi_sname_to_sstate(sleep_state);
+ if (new_sstate < 0)
return (EINVAL);
- if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
+ if (new_sstate < ACPI_S_STATE_COUNT &&
+ !acpi_supported_sstates[new_sstate])
return (EOPNOTSUPP);
- if (new_state != old_state)
- *(int *)oidp->oid_arg1 = new_state;
+ if (new_sstate != old_sstate)
+ *(int *)oidp->oid_arg1 = new_sstate;
}
return (error);
}
+static int
+acpi_stype_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ char name[10];
+ int err;
+ int sstate;
+ enum power_stype new_stype, old_stype;
+
+ old_stype = *(enum power_stype *)oidp->oid_arg1;
+ strlcpy(name, power_stype_to_name(old_stype), sizeof(name));
+ err = sysctl_handle_string(oidp, name, sizeof(name), req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ new_stype = power_name_to_stype(name);
+ if (new_stype == POWER_STYPE_UNKNOWN) {
+ sstate = acpi_sname_to_sstate(name);
+ if (sstate < 0)
+ return (EINVAL);
+ printf("warning: this sysctl expects a sleep type, but an ACPI S-state has "
+ "been passed to it. This functionality is deprecated; see acpi(4).\n");
+ MPASS(sstate < ACPI_S_STATE_COUNT);
+ if (acpi_supported_sstates[sstate] == false)
+ return (EOPNOTSUPP);
+ new_stype = acpi_sstate_to_stype(sstate);
+ }
+
+ if (acpi_supported_stypes[new_stype] == false)
+ return (EOPNOTSUPP);
+ if (new_stype != old_stype)
+ *(enum power_stype *)oidp->oid_arg1 = new_stype;
+ return (0);
+}
+
/* Inform devctl(4) when we receive a Notify. */
void
acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
@@ -4621,12 +4789,10 @@ acpi_reset_interfaces(device_t dev)
}
static int
-acpi_pm_func(u_long cmd, void *arg, ...)
+acpi_pm_func(u_long cmd, void *arg, enum power_stype stype)
{
- int state, acpi_state;
int error;
struct acpi_softc *sc;
- va_list ap;
error = 0;
switch (cmd) {
@@ -4636,27 +4802,7 @@ acpi_pm_func(u_long cmd, void *arg, ...)
error = EINVAL;
goto out;
}
-
- va_start(ap, arg);
- state = va_arg(ap, int);
- va_end(ap);
-
- switch (state) {
- case POWER_SLEEP_STATE_STANDBY:
- acpi_state = sc->acpi_standby_sx;
- break;
- case POWER_SLEEP_STATE_SUSPEND:
- acpi_state = sc->acpi_suspend_sx;
- break;
- case POWER_SLEEP_STATE_HIBERNATE:
- acpi_state = ACPI_STATE_S4;
- break;
- default:
- error = EINVAL;
- goto out;
- }
-
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, stype)))
error = ENXIO;
break;
default:
@@ -4674,7 +4820,8 @@ acpi_pm_register(void *arg)
if (!cold || resource_disabled("acpi", 0))
return;
- power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL,
+ acpi_supported_stypes);
}
SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL);
diff --git a/sys/dev/acpica/acpi_apei.c b/sys/dev/acpica/acpi_apei.c
index 9cfd46c97430..624c81ad1b4f 100644
--- a/sys/dev/acpica/acpi_apei.c
+++ b/sys/dev/acpica/acpi_apei.c
@@ -754,7 +754,7 @@ apei_detach(device_t dev)
apei_nmi = NULL;
apei_nmi_nges = NULL;
if (sc->nges.swi_ih != NULL) {
- swi_remove(&sc->nges.swi_ih);
+ swi_remove(sc->nges.swi_ih);
sc->nges.swi_ih = NULL;
}
if (acpi_get_handle(dev) != NULL) {
diff --git a/sys/dev/acpica/acpi_lid.c b/sys/dev/acpica/acpi_lid.c
index 142791f7282a..fb8755d9f0fe 100644
--- a/sys/dev/acpica/acpi_lid.c
+++ b/sys/dev/acpica/acpi_lid.c
@@ -235,9 +235,9 @@ acpi_lid_notify_status_changed(void *arg)
sc->lid_status ? "opened" : "closed");
if (sc->lid_status == 0)
- EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_sx);
+ EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_stype);
else
- EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_sx);
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_stype);
out:
ACPI_SERIAL_END(lid);
diff --git a/sys/dev/acpica/acpi_timer.c b/sys/dev/acpica/acpi_timer.c
index 3d51a4211b80..b20912e2f5fb 100644
--- a/sys/dev/acpica/acpi_timer.c
+++ b/sys/dev/acpica/acpi_timer.c
@@ -34,6 +34,7 @@
#include <sys/module.h>
#include <sys/sysctl.h>
#include <sys/timetc.h>
+#include <sys/power.h>
#include <machine/bus.h>
#include <machine/resource.h>
@@ -69,8 +70,10 @@ bool acpi_timer_disabled = false;
static void acpi_timer_identify(driver_t *driver, device_t parent);
static int acpi_timer_probe(device_t dev);
static int acpi_timer_attach(device_t dev);
-static void acpi_timer_resume_handler(struct timecounter *);
-static void acpi_timer_suspend_handler(struct timecounter *);
+static void acpi_timer_resume_handler(struct timecounter *,
+ enum power_stype);
+static void acpi_timer_suspend_handler(struct timecounter *,
+ enum power_stype);
static u_int acpi_timer_get_timecount(struct timecounter *tc);
static u_int acpi_timer_get_timecount_safe(struct timecounter *tc);
static int acpi_timer_sysctl_freq(SYSCTL_HANDLER_ARGS);
@@ -235,7 +238,7 @@ acpi_timer_attach(device_t dev)
}
static void
-acpi_timer_resume_handler(struct timecounter *newtc)
+acpi_timer_resume_handler(struct timecounter *newtc, enum power_stype stype)
{
struct timecounter *tc;
@@ -251,7 +254,7 @@ acpi_timer_resume_handler(struct timecounter *newtc)
}
static void
-acpi_timer_suspend_handler(struct timecounter *newtc)
+acpi_timer_suspend_handler(struct timecounter *newtc, enum power_stype stype)
{
struct timecounter *tc;
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 6887f080311d..71d8e46ab310 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -40,6 +40,7 @@
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/power.h>
#include <sys/selinfo.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
@@ -53,20 +54,19 @@ struct acpi_softc {
struct cdev *acpi_dev_t;
int acpi_enabled;
- int acpi_sstate;
+ enum power_stype acpi_stype;
int acpi_sleep_disabled;
struct sysctl_ctx_list acpi_sysctl_ctx;
struct sysctl_oid *acpi_sysctl_tree;
- int acpi_power_button_sx;
- int acpi_sleep_button_sx;
- int acpi_lid_switch_sx;
+ enum power_stype acpi_power_button_stype;
+ enum power_stype acpi_sleep_button_stype;
+ enum power_stype acpi_lid_switch_stype;
int acpi_standby_sx;
- int acpi_suspend_sx;
+ int acpi_s4bios;
int acpi_sleep_delay;
- int acpi_s4bios;
int acpi_do_disable;
int acpi_verbose;
int acpi_handle_reboot;
@@ -74,7 +74,7 @@ struct acpi_softc {
vm_offset_t acpi_wakeaddr;
vm_paddr_t acpi_wakephys;
- int acpi_next_sstate; /* Next suspend Sx state. */
+ enum power_stype acpi_next_stype; /* Next suspend sleep type. */
struct apm_clone_data *acpi_clone; /* Pseudo-dev for devd(8). */
STAILQ_HEAD(,apm_clone_data) apm_cdevs; /* All apm/apmctl/acpi cdevs. */
struct callout susp_force_to; /* Force suspend if no acks. */
@@ -411,7 +411,7 @@ ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid,
uint32_t *caps_out, bool query);
ACPI_STATUS acpi_OverrideInterruptLevel(UINT32 InterruptNumber);
ACPI_STATUS acpi_SetIntrModel(int model);
-int acpi_ReqSleepState(struct acpi_softc *sc, int state);
+int acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype);
int acpi_AckSleepState(struct apm_clone_data *clone, int error);
ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state);
int acpi_wake_set_enable(device_t dev, int enable);
diff --git a/sys/dev/ahci/ahci_pci.c b/sys/dev/ahci/ahci_pci.c
index f29d803e99a8..2b4cb37275a6 100644
--- a/sys/dev/ahci/ahci_pci.c
+++ b/sys/dev/ahci/ahci_pci.c
@@ -195,6 +195,7 @@ static const struct {
{0x1f3f8086, 0x00, "Intel Avoton (RAID)", 0},
{0x23a38086, 0x00, "Intel Coleto Creek", 0},
{0x31e38086, 0x00, "Intel Gemini Lake", 0},
+ {0x4b638086, 0x00, "Intel Elkhart Lake", 0},
{0x5ae38086, 0x00, "Intel Apollo Lake", 0},
{0x7ae28086, 0x00, "Intel Alder Lake", 0},
{0x8c028086, 0x00, "Intel Lynx Point", 0},
@@ -466,28 +467,6 @@ ahci_ata_probe(device_t dev)
}
static int
-ahci_pci_read_msix_bars(device_t dev, uint8_t *table_bar, uint8_t *pba_bar)
-{
- int cap_offset = 0, ret;
- uint32_t val;
-
- if ((table_bar == NULL) || (pba_bar == NULL))
- return (EINVAL);
-
- ret = pci_find_cap(dev, PCIY_MSIX, &cap_offset);
- if (ret != 0)
- return (EINVAL);
-
- val = pci_read_config(dev, cap_offset + PCIR_MSIX_TABLE, 4);
- *table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
-
- val = pci_read_config(dev, cap_offset + PCIR_MSIX_PBA, 4);
- *pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
-
- return (0);
-}
-
-static int
ahci_pci_attach(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
@@ -495,7 +474,6 @@ ahci_pci_attach(device_t dev)
uint32_t devid = pci_get_devid(dev);
uint8_t revid = pci_get_revid(dev);
int msi_count, msix_count;
- uint8_t table_bar = 0, pba_bar = 0;
uint32_t caps, pi;
msi_count = pci_msi_count(dev);
@@ -583,20 +561,11 @@ ahci_pci_attach(device_t dev)
if (ctlr->quirks & AHCI_Q_NOMSIX)
msix_count = 0;
- /* Read MSI-x BAR IDs if supported */
- if (msix_count > 0) {
- error = ahci_pci_read_msix_bars(dev, &table_bar, &pba_bar);
- if (error == 0) {
- ctlr->r_msix_tab_rid = table_bar;
- ctlr->r_msix_pba_rid = pba_bar;
- } else {
- /* Failed to read BARs, disable MSI-x */
- msix_count = 0;
- }
- }
-
/* Allocate resources for MSI-x table and PBA */
if (msix_count > 0) {
+ ctlr->r_msix_tab_rid = pci_msix_table_bar(dev);
+ ctlr->r_msix_pba_rid = pci_msix_pba_bar(dev);
+
/*
* Allocate new MSI-x table only if not
* allocated before.
@@ -607,8 +576,8 @@ ahci_pci_attach(device_t dev)
ctlr->r_msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ctlr->r_msix_tab_rid, RF_ACTIVE);
if (ctlr->r_msix_table == NULL) {
- ahci_free_mem(dev);
- return (ENXIO);
+ msix_count = 0;
+ goto no_msix;
}
}
@@ -623,12 +592,12 @@ ahci_pci_attach(device_t dev)
ctlr->r_msix_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ctlr->r_msix_pba_rid, RF_ACTIVE);
if (ctlr->r_msix_pba == NULL) {
- ahci_free_mem(dev);
- return (ENXIO);
+ msix_count = 0;
}
}
}
+no_msix:
pci_enable_busmaster(dev);
/* Reset controller */
if ((error = ahci_pci_ctlr_reset(dev)) != 0) {
diff --git a/sys/dev/aic7xxx/aic79xx.c b/sys/dev/aic7xxx/aic79xx.c
index 2b5015b20e41..cee45fa5cc8a 100644
--- a/sys/dev/aic7xxx/aic79xx.c
+++ b/sys/dev/aic7xxx/aic79xx.c
@@ -7788,8 +7788,8 @@ ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel,
}
if (role != ROLE_TARGET) {
- for (;i < maxtarget; i++) {
- for (j = minlun;j < maxlun; j++) {
+ for (; i < maxtarget; i++) {
+ for (j = minlun; j < maxlun; j++) {
u_int scbid;
u_int tcl;
diff --git a/sys/dev/aic7xxx/aic7xxx.c b/sys/dev/aic7xxx/aic7xxx.c
index c09876e9f589..18f68b806948 100644
--- a/sys/dev/aic7xxx/aic7xxx.c
+++ b/sys/dev/aic7xxx/aic7xxx.c
@@ -5903,8 +5903,8 @@ ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
}
if (role != ROLE_TARGET) {
- for (;i < maxtarget; i++) {
- for (j = minlun;j < maxlun; j++) {
+ for (; i < maxtarget; i++) {
+ for (j = minlun; j < maxlun; j++) {
u_int scbid;
u_int tcl;
diff --git a/sys/dev/amdgpio/amdgpio.c b/sys/dev/amdgpio/amdgpio.c
index f39006d95805..20589ff71b0b 100644
--- a/sys/dev/amdgpio/amdgpio.c
+++ b/sys/dev/amdgpio/amdgpio.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2018 Advanced Micro Devices
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -51,11 +55,11 @@
#include <dev/acpica/acpivar.h>
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
#include "amdgpio.h"
static struct resource_spec amdgpio_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
@@ -196,7 +200,7 @@ static int
amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
{
struct amdgpio_softc *sc;
- uint32_t reg, val, allowed;
+ uint32_t reg, val;
sc = device_get_softc(dev);
@@ -204,18 +208,19 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
if (!amdgpio_valid_pin(sc, pin))
return (EINVAL);
- allowed = GPIO_PIN_INPUT | GPIO_PIN_OUTPUT;
+ if ((flags & ~AMDGPIO_DEFAULT_CAPS) != 0) {
+ device_printf(dev, "disallowed flags (0x%x) trying to be set "
+ "(allowed is 0x%x)\n", flags, AMDGPIO_DEFAULT_CAPS);
+ return (EINVAL);
+ }
- /*
- * Only directtion flag allowed
- */
- if (flags & ~allowed)
+ /* Either input or output must be selected. */
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == 0)
return (EINVAL);
- /*
- * Not both directions simultaneously
- */
- if ((flags & allowed) == allowed)
+ /* Not both directions simultaneously. */
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) ==
+ (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT))
return (EINVAL);
/* Set the GPIO mode and state */
@@ -224,16 +229,21 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
reg = AMDGPIO_PIN_REGISTER(pin);
val = amdgpio_read_4(sc, reg);
- if (flags & GPIO_PIN_INPUT) {
+ if ((flags & GPIO_PIN_INPUT) != 0)
val &= ~BIT(OUTPUT_ENABLE_OFF);
- sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_INPUT;
- } else {
+ else
val |= BIT(OUTPUT_ENABLE_OFF);
- sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_OUTPUT;
- }
+
+ val &= ~(BIT(PULL_DOWN_ENABLE_OFF) | BIT(PULL_UP_ENABLE_OFF));
+
+ if ((flags & GPIO_PIN_PULLDOWN) != 0)
+ val |= BIT(PULL_DOWN_ENABLE_OFF);
+ if ((flags & GPIO_PIN_PULLUP) != 0)
+ val |= BIT(PULL_UP_ENABLE_OFF);
amdgpio_write_4(sc, reg, val);
+ sc->sc_gpio_pins[pin].gp_flags = flags;
dprintf("pin %d flags 0x%x val 0x%x gp_flags 0x%x\n",
pin, flags, val, sc->sc_gpio_pins[pin].gp_flags);
@@ -359,11 +369,73 @@ amdgpio_probe(device_t dev)
return (rv);
}
+static void
+amdgpio_eoi_locked(struct amdgpio_softc *sc)
+{
+ uint32_t master_reg = amdgpio_read_4(sc, WAKE_INT_MASTER_REG);
+
+ AMDGPIO_ASSERT_LOCKED(sc);
+ master_reg |= EOI_MASK;
+ amdgpio_write_4(sc, WAKE_INT_MASTER_REG, master_reg);
+}
+
+static void
+amdgpio_eoi(struct amdgpio_softc *sc)
+{
+ AMDGPIO_LOCK(sc);
+ amdgpio_eoi_locked(sc);
+ AMDGPIO_UNLOCK(sc);
+}
+
+static int
+amdgpio_intr_filter(void *arg)
+{
+ struct amdgpio_softc *sc = arg;
+ int off, rv = FILTER_STRAY;
+ uint32_t reg;
+
+ /* We can lock in the filter routine as it is MTX_SPIN. */
+ AMDGPIO_LOCK(sc);
+
+ /*
+ * TODO Instead of just reading the registers of all pins, we should
+ * read WAKE_INT_STATUS_REG0/1. A bit set in here denotes a group of
+ * 4 pins where at least one has an interrupt for us. Then we can just
+ * iterate over those 4 pins.
+ *
+ * See GPIO_Interrupt_Status_Index_0 in BKDG.
+ */
+ for (size_t pin = 0; pin < AMD_GPIO_PINS_EXPOSED; pin++) {
+ off = AMDGPIO_PIN_REGISTER(pin);
+ reg = amdgpio_read_4(sc, off);
+ if ((reg & UNSERVICED_INTERRUPT_MASK) == 0)
+ continue;
+ /*
+ * Must write 1's to wake/interrupt status bits to clear them.
+ * We can do this simply by writing back to the register.
+ */
+ amdgpio_write_4(sc, off, reg);
+ }
+
+ amdgpio_eoi_locked(sc);
+ AMDGPIO_UNLOCK(sc);
+
+ rv = FILTER_HANDLED;
+ return (rv);
+}
+
+static void
+amdgpio_intr_handler(void *arg)
+{
+ /* TODO */
+}
+
static int
amdgpio_attach(device_t dev)
{
struct amdgpio_softc *sc;
- int i, pin, bank;
+ int i, pin, bank, reg;
+ uint32_t flags;
sc = device_get_softc(dev);
sc->sc_dev = dev;
@@ -386,6 +458,14 @@ amdgpio_attach(device_t dev)
sc->sc_bst = rman_get_bustag(sc->sc_res[0]);
sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]);
+ /* Set up interrupt handler. */
+ if (bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_MISC | INTR_MPSAFE,
+ amdgpio_intr_filter, amdgpio_intr_handler, sc, &sc->sc_intr_handle)
+ != 0) {
+ device_printf(dev, "couldn't set up interrupt\n");
+ goto err_intr;
+ }
+
/* Initialize all possible pins to be Invalid */
for (i = 0; i < AMD_GPIO_PINS_MAX ; i++) {
snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME,
@@ -395,7 +475,12 @@ amdgpio_attach(device_t dev)
sc->sc_gpio_pins[i].gp_flags = 0;
}
- /* Initialize only driver exposed pins with appropriate capabilities */
+ /*
+ * Initialize only driver exposed pins with appropriate capabilities.
+ *
+ * XXX Also mask and disable interrupts on all pins, since we don't
+ * support them at the moment.
+ */
for (i = 0; i < AMD_GPIO_PINS_EXPOSED ; i++) {
pin = kernzp_pins[i].pin_num;
bank = pin/AMD_GPIO_PINS_PER_BANK;
@@ -406,19 +491,28 @@ amdgpio_attach(device_t dev)
sc->sc_gpio_pins[pin].gp_flags =
amdgpio_is_pin_output(sc, pin) ?
GPIO_PIN_OUTPUT : GPIO_PIN_INPUT;
+
+ reg = AMDGPIO_PIN_REGISTER(pin);
+ flags = amdgpio_read_4(sc, reg);
+ flags &= ~(1 << INTERRUPT_ENABLE_OFF);
+ flags &= ~(1 << INTERRUPT_MASK_OFF);
+ amdgpio_write_4(sc, reg, flags);
}
+ amdgpio_eoi(sc);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
device_printf(dev, "could not attach gpiobus\n");
goto err_bus;
}
+ bus_attach_children(dev);
return (0);
err_bus:
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle);
+err_intr:
bus_release_resources(dev, amdgpio_spec, sc->sc_res);
-
err_rsrc:
AMDGPIO_LOCK_DESTROY(sc);
@@ -433,7 +527,8 @@ amdgpio_detach(device_t dev)
if (sc->sc_busdev)
gpiobus_detach_bus(dev);
-
+ if (sc->sc_intr_handle)
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle);
bus_release_resources(dev, amdgpio_spec, sc->sc_res);
AMDGPIO_LOCK_DESTROY(sc);
diff --git a/sys/dev/amdgpio/amdgpio.h b/sys/dev/amdgpio/amdgpio.h
index aca3039bfc98..3743eba23e17 100644
--- a/sys/dev/amdgpio/amdgpio.h
+++ b/sys/dev/amdgpio/amdgpio.h
@@ -50,7 +50,8 @@
AMD_GPIO_PINS_BANK1 + \
AMD_GPIO_PINS_BANK2 + \
AMD_GPIO_PINS_BANK3)
-#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)
+#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \
+ GPIO_PIN_PULLDOWN | GPIO_PIN_PULLUP)
/* Register related macros */
#define AMDGPIO_PIN_REGISTER(pin) (pin * 4)
@@ -84,6 +85,9 @@
#define INTERRUPT_STS_OFF 28
#define WAKE_STS_OFF 29
+#define UNSERVICED_INTERRUPT_MASK \
+ ((1 << INTERRUPT_STS_OFF) | (1 << WAKE_STS_OFF))
+
#define DB_TMR_OUT_MASK 0xFUL
#define DB_CNTRL_MASK 0x3UL
#define ACTIVE_LEVEL_MASK 0x3UL
@@ -316,12 +320,13 @@ struct amdgpio_softc {
int sc_npins;
int sc_ngroups;
struct mtx sc_mtx;
- struct resource *sc_res[AMD_GPIO_NUM_PIN_BANK + 1];
+ struct resource *sc_res[2];
bus_space_tag_t sc_bst;
bus_space_handle_t sc_bsh;
struct gpio_pin sc_gpio_pins[AMD_GPIO_PINS_MAX];
const struct pin_info *sc_pin_info;
const struct amd_pingroup *sc_groups;
+ void *sc_intr_handle;
};
struct amdgpio_sysctl {
diff --git a/sys/dev/ath/ath_rate/sample/sample.c b/sys/dev/ath/ath_rate/sample/sample.c
index 291d1ec64ed7..79bf08678249 100644
--- a/sys/dev/ath/ath_rate/sample/sample.c
+++ b/sys/dev/ath/ath_rate/sample/sample.c
@@ -179,7 +179,7 @@ ath_rate_sample_find_min_pktlength(struct ath_softc *sc,
const struct txschedule *sched = &sn->sched[rix0];
int max_pkt_length = 65530; // ATH_AGGR_MAXSIZE
// Note: this may not be true in all cases; need to check?
- int is_ht40 = (an->an_node.ni_chw == IEEE80211_STA_RX_BW_40);
+ int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40);
// Note: not great, but good enough..
int idx = is_ht40 ? MCS_HT40 : MCS_HT20;
@@ -979,7 +979,7 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
const int size_bin = size_to_bin(frame_size);
const int size = bin_to_size(size_bin);
int tt;
- int is_ht40 = (an->an_node.ni_chw == IEEE80211_STA_RX_BW_40);
+ int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40);
int pct;
if (!IS_RATE_DEFINED(sn, rix0))
@@ -1365,7 +1365,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
continue;
printf(" %d %s/%d", dot11rate(rt, rix), dot11rate_label(rt, rix),
calc_usecs_unicast_packet(sc, 1600, rix, 0,0,
- (ni->ni_chw == IEEE80211_STA_RX_BW_40)));
+ (ni->ni_chw == NET80211_STA_RX_BW_40)));
}
printf("\n");
}
@@ -1396,7 +1396,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
sn->stats[y][rix].perfect_tx_time =
calc_usecs_unicast_packet(sc, size, rix, 0, 0,
- (ni->ni_chw == IEEE80211_STA_RX_BW_40));
+ (ni->ni_chw == NET80211_STA_RX_BW_40));
sn->stats[y][rix].average_tx_time =
sn->stats[y][rix].perfect_tx_time;
}
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 934024ddfbcf..1304b597c545 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -924,6 +924,9 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
| IEEE80211_C_PMGT /* Station side power mgmt */
| IEEE80211_C_SWSLEEP
;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Query the hal to figure out h/w crypto support.
*/
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index 1559b66a7c7d..9ac591c14943 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -971,6 +971,12 @@ ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_handoff_hw(sc, txq, bf);
}
+/*
+ * Setup a frame for encryption.
+ *
+ * If this fails, then an non-zero error is returned. The mbuf
+ * must be freed by the caller.
+ */
static int
ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
@@ -1547,6 +1553,10 @@ ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
*
* Note that this may cause the mbuf to be reallocated, so
* m0 may not be valid.
+ *
+ * If there's a problem then the mbuf is freed and an error
+ * is returned. The ath_buf then needs to be freed by the
+ * caller.
*/
static int
ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
@@ -1588,6 +1598,10 @@ ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
*/
pktlen = m0->m_pkthdr.len - (hdrlen & 3);
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m0->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
&pktlen, &keyix)) {
@@ -2069,9 +2083,8 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
/* This also sets up the DMA map; crypto; frame parameters, etc */
r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
-
if (r != 0)
- goto done;
+ return (r);
/* At this point m0 could have changed! */
m0 = bf->bf_m;
@@ -2128,7 +2141,6 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
ath_tx_leak_count_update(sc, tid, bf);
ath_tx_xmit_normal(sc, txq, bf);
#endif
-done:
return 0;
}
@@ -2201,6 +2213,10 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
* for QoS frames.
*/
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m0->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni,
m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
@@ -2981,6 +2997,8 @@ ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
ATH_TX_LOCK_ASSERT(sc);
+ /* TODO: can this use ieee80211_output_seqno_assign() now? */
+
/*
* Is it a QOS NULL Data frame? Give it a sequence number from
* the default TID (IEEE80211_NONQOS_TID.)
diff --git a/sys/dev/ath/if_ath_tx_ht.c b/sys/dev/ath/if_ath_tx_ht.c
index e7ee029fecf0..f42058bacb0d 100644
--- a/sys/dev/ath/if_ath_tx_ht.c
+++ b/sys/dev/ath/if_ath_tx_ht.c
@@ -283,7 +283,7 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
if (IS_HT_RATE(rate)) {
rc[i].flags |= ATH_RC_HT_FLAG;
- if (ni->ni_chw == IEEE80211_STA_RX_BW_40)
+ if (ni->ni_chw == NET80211_STA_RX_BW_40)
rc[i].flags |= ATH_RC_CW40_FLAG;
/*
@@ -295,13 +295,13 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
* and doesn't return the fractional part, so
* we are always "out" by some amount.
*/
- if (ni->ni_chw == IEEE80211_STA_RX_BW_40 &&
+ if (ni->ni_chw == NET80211_STA_RX_BW_40 &&
ieee80211_ht_check_tx_shortgi_40(ni) &&
(bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) {
rc[i].flags |= ATH_RC_SGI_FLAG;
}
- if (ni->ni_chw == IEEE80211_STA_RX_BW_20 &&
+ if (ni->ni_chw == NET80211_STA_RX_BW_20 &&
ieee80211_ht_check_tx_shortgi_20(ni) &&
(bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) {
rc[i].flags |= ATH_RC_SGI_FLAG;
diff --git a/sys/dev/axgbe/if_axgbe_pci.c b/sys/dev/axgbe/if_axgbe_pci.c
index 290156ff11ca..6bc4bd33e162 100644
--- a/sys/dev/axgbe/if_axgbe_pci.c
+++ b/sys/dev/axgbe/if_axgbe_pci.c
@@ -2415,7 +2415,8 @@ axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_OPACKETS:
return (pstats->txframecount_gb);
case IFCOUNTER_OERRORS:
- return (pstats->txframecount_gb - pstats->txframecount_g);
+ return (if_get_counter_default(ifp, cnt) +
+ pstats->txframecount_gb - pstats->txframecount_g);
case IFCOUNTER_IBYTES:
return (pstats->rxoctetcount_gb);
case IFCOUNTER_OBYTES:
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index 16bfce5338a7..6cf39e035ea6 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -1221,7 +1221,7 @@ bce_attach(device_t dev)
sc->bce_bc_ver[j++] = '.';
}
- /* Check if any management firwmare is enabled. */
+ /* Check if any management firmware is enabled. */
val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
diff --git a/sys/dev/bhnd/cores/chipc/chipc_gpio.c b/sys/dev/bhnd/cores/chipc/chipc_gpio.c
index a110bdda5fa7..429de0fc1fd8 100644
--- a/sys/dev/bhnd/cores/chipc/chipc_gpio.c
+++ b/sys/dev/bhnd/cores/chipc/chipc_gpio.c
@@ -173,11 +173,13 @@ chipc_gpio_attach(device_t dev)
if (CC_GPIO_QUIRK(sc, NO_GPIOC)) {
sc->gpiobus = NULL;
} else {
- if ((sc->gpiobus = gpiobus_attach_bus(dev)) == NULL) {
+ if ((sc->gpiobus = gpiobus_add_bus(dev)) == NULL) {
device_printf(dev, "failed to attach gpiobus\n");
error = ENXIO;
goto failed;
}
+
+ bus_attach_children(dev);
}
/* Register as the bus GPIO provider */
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
index 7dd555cfaadb..9e7f4614d9f9 100644
--- a/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
@@ -714,7 +714,7 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- return hwrm_send_message(softc, &req, sizeof(req));
+ return hwrm_send_message(softc, &req, req_len);
}
int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all)
diff --git a/sys/dev/bnxt/bnxt_en/if_bnxt.c b/sys/dev/bnxt/bnxt_en/if_bnxt.c
index feac3ce54a29..471e26a4b252 100644
--- a/sys/dev/bnxt/bnxt_en/if_bnxt.c
+++ b/sys/dev/bnxt/bnxt_en/if_bnxt.c
@@ -48,6 +48,7 @@
#include <net/ethernet.h>
#include <net/iflib.h>
+#define WANT_NATIVE_PCI_GET_SLOT
#include <linux/pci.h>
#include <linux/kmod.h>
#include <linux/module.h>
diff --git a/sys/dev/bnxt/bnxt_re/qplib_res.c b/sys/dev/bnxt/bnxt_re/qplib_res.c
index 69661c67708c..f527af031176 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_res.c
+++ b/sys/dev/bnxt/bnxt_re/qplib_res.c
@@ -875,7 +875,7 @@ int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
dpi->umdbr = umaddr;
switch (type) {
case BNXT_QPLIB_DPI_TYPE_KERNEL:
- /* priviledged dbr was already mapped just initialize it. */
+ /* privileged dbr was already mapped just initialize it. */
dpi->umdbr = dpit->ucreg.bar_base +
dpit->ucreg.offset + bit_num * PAGE_SIZE;
dpi->dbr = dpit->priv_db;
@@ -1150,7 +1150,7 @@ int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
}
ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
if (!ucreg->bar_reg) {
- dev_err(&res->pdev->dev, "priviledged dpi map failed!\n");
+ dev_err(&res->pdev->dev, "privileged dpi map failed!\n");
return -ENOMEM;
}
diff --git a/sys/dev/bwi/if_bwi.c b/sys/dev/bwi/if_bwi.c
index 1087ca813d65..85146d4c4010 100644
--- a/sys/dev/bwi/if_bwi.c
+++ b/sys/dev/bwi/if_bwi.c
@@ -498,6 +498,9 @@ bwi_attach(struct bwi_softc *sc)
IEEE80211_C_BGSCAN |
IEEE80211_C_MONITOR;
ic->ic_opmode = IEEE80211_M_STA;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ieee80211_ifattach(ic);
ic->ic_headroom = sizeof(struct bwi_txbuf_hdr);
@@ -1361,6 +1364,7 @@ bwi_start_locked(struct bwi_softc *sc)
(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) != 0 &&
ieee80211_crypto_encap(ni, m) == NULL) {
if_inc_counter(ni->ni_vap->iv_ifp,
diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c
index 38bf6f5d31a3..ec9d56661034 100644
--- a/sys/dev/bwn/if_bwn.c
+++ b/sys/dev/bwn/if_bwn.c
@@ -774,6 +774,7 @@ bwn_attach_post(struct bwn_softc *sc)
;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
/* Determine the NVRAM variable containing our MAC address */
core_unit = bhnd_get_core_unit(sc->sc_dev);
@@ -999,6 +1000,7 @@ bwn_start(struct bwn_softc *sc)
continue;
}
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/cpuctl/cpuctl.c b/sys/dev/cpuctl/cpuctl.c
index 9253b17a259d..deaabaaaa1fc 100644
--- a/sys/dev/cpuctl/cpuctl.c
+++ b/sys/dev/cpuctl/cpuctl.c
@@ -344,7 +344,7 @@ ucode_intel_load_rv(void *arg)
d = arg;
if (PCPU_GET(cpuid) == d->cpu)
- d->ret = ucode_intel_load(d->ptr, true, NULL, NULL);
+ d->ret = ucode_intel_load(d->ptr, SAFE, NULL, NULL);
}
static int
@@ -402,19 +402,20 @@ out:
* its workings.
*/
static void
-amd_ucode_wrmsr(void *ucode_ptr)
+amd_ucode_wrmsr(void *arg)
{
+ struct ucode_update_data *d = arg;
uint32_t tmp[4];
- wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)ucode_ptr);
+ if (PCPU_GET(cpuid) == d->cpu)
+ d->ret = wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)d->ptr);
do_cpuid(0, tmp);
}
static int
update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
{
- void *ptr;
- int ret;
+ struct ucode_update_data d = { .cpu = cpu };
if (args->size == 0 || args->data == NULL) {
DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
@@ -430,18 +431,17 @@ update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
* malloc(9) always returns the pointer aligned at least on
* the size of the allocation.
*/
- ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
- if (copyin(args->data, ptr, args->size) != 0) {
+ d.ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
+ if (copyin(args->data, d.ptr, args->size) != 0) {
DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
__LINE__, args->data, ptr, args->size);
- ret = EFAULT;
+ d.ret = EFAULT;
goto fail;
}
- smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, ptr);
- ret = 0;
+ smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, &d);
fail:
- free(ptr, M_CPUCTL);
- return (ret);
+ free(d.ptr, M_CPUCTL);
+ return (d.ret);
}
static int
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index d3820245837a..55f09fefb7e3 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -319,7 +318,7 @@ struct port_info {
char lockname[16];
unsigned long flags;
- uint8_t lport; /* associated offload logical port */
+ uint8_t hw_port; /* associated hardware port idx */
int8_t mdio_addr;
uint8_t port_type;
uint8_t mod_type;
@@ -413,6 +412,24 @@ enum {
NUM_CPL_COOKIES = 8 /* Limited by M_COOKIE. Do not increase. */
};
+/*
+ * Crypto replies use the low bit in the 64-bit cookie of CPL_FW6_PLD as a
+ * CPL cookie to identify the sender/receiver.
+ */
+enum {
+ CPL_FW6_COOKIE_CCR = 0,
+ CPL_FW6_COOKIE_KTLS,
+
+ NUM_CPL_FW6_COOKIES = 2 /* Low bits of cookie value. */
+};
+
+_Static_assert(powerof2(NUM_CPL_FW6_COOKIES),
+ "NUM_CPL_FW6_COOKIES must be a power of 2");
+
+#define CPL_FW6_COOKIE_MASK (NUM_CPL_FW6_COOKIES - 1)
+
+#define CPL_FW6_PLD_COOKIE(cpl) (be64toh((cpl)->data[1]) & ~CPL_FW6_COOKIE_MASK)
+
struct sge_iq;
struct rss_header;
typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
@@ -477,6 +494,7 @@ struct sge_eq {
uint8_t doorbells;
uint8_t port_id; /* port_id of the port associated with the eq */
uint8_t tx_chan; /* tx channel used by the eq */
+ uint8_t hw_port; /* hw port used by the eq */
struct mtx eq_lock;
struct tx_desc *desc; /* KVA of descriptor ring */
@@ -640,12 +658,26 @@ struct sge_txq {
uint64_t kern_tls_full;
uint64_t kern_tls_octets;
uint64_t kern_tls_waste;
- uint64_t kern_tls_options;
uint64_t kern_tls_header;
- uint64_t kern_tls_fin;
uint64_t kern_tls_fin_short;
uint64_t kern_tls_cbc;
uint64_t kern_tls_gcm;
+ union {
+ struct {
+ /* T6 only. */
+ uint64_t kern_tls_options;
+ uint64_t kern_tls_fin;
+ };
+ struct {
+ /* T7 only. */
+ uint64_t kern_tls_ghash_received;
+ uint64_t kern_tls_ghash_requested;
+ uint64_t kern_tls_lso;
+ uint64_t kern_tls_partial_ghash;
+ uint64_t kern_tls_splitmode;
+ uint64_t kern_tls_trailer;
+ };
+ };
/* stats for not-that-common events */
@@ -769,6 +801,16 @@ struct sge_ofld_txq {
counter_u64_t tx_toe_tls_octets;
} __aligned(CACHE_LINE_SIZE);
+static inline int
+ofld_txq_group(int val, int mask)
+{
+ const uint32_t ngroup = 1 << bitcount32(mask);
+ const int mshift = ffs(mask) - 1;
+ const uint32_t gmask = ngroup - 1;
+
+ return (val >> mshift & gmask);
+}
+
#define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1))
struct sge_nm_rxq {
/* Items used by the driver rx ithread are in this cacheline. */
@@ -836,6 +878,7 @@ struct sge_nm_txq {
} __aligned(CACHE_LINE_SIZE);
struct sge {
+ int nctrlq; /* total # of control queues */
int nrxq; /* total # of Ethernet rx queues */
int ntxq; /* total # of Ethernet tx queues */
int nofldrxq; /* total # of TOE rx queues */
@@ -937,7 +980,8 @@ struct adapter {
struct taskqueue *tq[MAX_NPORTS]; /* General purpose taskqueues */
struct port_info *port[MAX_NPORTS];
- uint8_t chan_map[MAX_NCHAN]; /* channel -> port */
+ uint8_t chan_map[MAX_NCHAN]; /* tx_chan -> port_id */
+ uint8_t port_map[MAX_NPORTS]; /* hw_port -> port_id */
CXGBE_LIST_HEAD(, clip_entry) *clip_table;
TAILQ_HEAD(, clip_entry) clip_pending; /* these need hw update. */
@@ -959,9 +1003,12 @@ struct adapter {
vmem_t *key_map;
struct tls_tunables tlst;
+ vmem_t *pbl_arena;
+ vmem_t *stag_arena;
+
uint8_t doorbells;
int offload_map; /* port_id's with IFCAP_TOE enabled */
- int bt_map; /* tx_chan's with BASE-T */
+ int bt_map; /* hw_port's that are BASE-T */
int active_ulds; /* ULDs activated on this adapter */
int flags;
int debug_flags;
@@ -988,6 +1035,7 @@ struct adapter {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -1409,6 +1457,14 @@ void t6_ktls_modunload(void);
int t6_ktls_try(if_t, struct socket *, struct ktls_session *);
int t6_ktls_parse_pkt(struct mbuf *);
int t6_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int);
+
+/* t7_kern_tls.c */
+int t7_tls_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
+ struct m_snd_tag **);
+void t7_ktls_modload(void);
+void t7_ktls_modunload(void);
+int t7_ktls_parse_pkt(struct mbuf *);
+int t7_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int);
#endif
/* t4_keyctx.c */
@@ -1536,6 +1592,27 @@ int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbu
int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
void free_hftid_hash(struct tid_info *);
+/* t4_tpt.c */
+#define T4_STAG_UNSET 0xffffffff
+#define T4_WRITE_MEM_DMA_LEN \
+ roundup2(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_sgl), 16)
+#define T4_ULPTX_MIN_IO 32
+#define T4_MAX_INLINE_SIZE 96
+#define T4_WRITE_MEM_INLINE_LEN(len) \
+ roundup2(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_idata) + \
+ roundup((len), T4_ULPTX_MIN_IO), 16)
+
+uint32_t t4_pblpool_alloc(struct adapter *, int);
+void t4_pblpool_free(struct adapter *, uint32_t, int);
+uint32_t t4_stag_alloc(struct adapter *, int);
+void t4_stag_free(struct adapter *, uint32_t, int);
+void t4_init_tpt(struct adapter *);
+void t4_free_tpt(struct adapter *);
+void t4_write_mem_dma_wr(struct adapter *, void *, int, int, uint32_t,
+ uint32_t, vm_paddr_t, uint64_t);
+void t4_write_mem_inline_wr(struct adapter *, void *, int, int, uint32_t,
+ uint32_t, void *, uint64_t);
+
static inline struct wrqe *
alloc_wrqe(int wr_len, struct sge_wrq *wrq)
{
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index 6e80ce40648b..6b36832a7464 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +31,15 @@
#include "t4_hw.h"
+#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC0 | F_EDC0 | \
+ F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
+ F_CPL_SWITCH | F_SGE | F_ULP_TX | F_SF)
+
+#define GLBL_T7_INTR_MASK (F_CIM | F_MPS | F_PL | F_T7_PCIE | F_T7_MC0 | \
+ F_T7_EDC0 | F_T7_EDC1 | F_T7_LE | F_T7_TP | \
+ F_T7_MA | F_T7_PM_TX | F_T7_PM_RX | F_T7_ULP_RX | \
+ F_T7_CPL_SWITCH | F_T7_SGE | F_T7_ULP_TX | F_SF)
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -77,6 +85,18 @@ enum {
FEC_MODULE = 1 << 6, /* FEC suggested by the cable/transceiver. */
};
+enum {
+ ULP_T10DIF_ISCSI = 1 << 0,
+ ULP_T10DIF_FCOE = 1 << 1
+};
+
+enum {
+ ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_INLINE_TLS = 1 << 1,
+ ULP_CRYPTO_INLINE_IPSEC = 1 << 2,
+ ULP_CRYPTO_OFLD_OVER_IPSEC_INLINE = 1 << 4
+};
+
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
struct port_stats {
@@ -230,6 +250,15 @@ struct tp_cpl_stats {
struct tp_rdma_stats {
u32 rqe_dfr_pkt;
u32 rqe_dfr_mod;
+ u32 pkts_in[MAX_NCHAN];
+ u64 bytes_in[MAX_NCHAN];
+ /*
+ * When reading rdma stats, the address difference b/w RDMA_IN and
+ * RDMA_OUT is 4*u32, to read both at once, added padding
+ */
+ u32 padding[4];
+ u32 pkts_out[MAX_NCHAN];
+ u64 bytes_out[MAX_NCHAN];
};
struct sge_params {
@@ -259,7 +288,10 @@ struct tp_params {
uint32_t max_rx_pdu;
uint32_t max_tx_pdu;
bool rx_pkt_encap;
+ uint8_t lb_mode;
+ uint8_t lb_nchan;
+ int8_t ipsecidx_shift;
int8_t fcoe_shift;
int8_t port_shift;
int8_t vnic_shift;
@@ -270,6 +302,9 @@ struct tp_params {
int8_t macmatch_shift;
int8_t matchtype_shift;
int8_t frag_shift;
+ int8_t roce_shift;
+ int8_t synonly_shift;
+ int8_t tcpflags_shift;
};
/* Use same modulation queue as the tx channel. */
@@ -285,6 +320,22 @@ struct vpd_params {
u8 md[MD_LEN + 1];
};
+/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct pci_params {
unsigned int vpd_cap_addr;
unsigned int mps;
@@ -308,8 +359,11 @@ struct chip_params {
u8 pm_stats_cnt;
u8 cng_ch_bits_log; /* congestion channel map bits width */
u8 nsched_cls;
+ u8 cim_num_ibq;
u8 cim_num_obq;
- u8 filter_opt_len;
+ u8 filter_opt_len; /* number of bits for optional fields */
+ u8 filter_num_opt; /* number of optional fields */
+ u8 sge_ctxt_size;
u16 mps_rplc_size;
u16 vfcount;
u32 sge_fl_db;
@@ -360,6 +414,7 @@ struct adapter_params {
struct sge_params sge;
struct tp_params tp; /* PF-only */
struct vpd_params vpd;
+ struct pf_resources pfres; /* PF-only */
struct pci_params pci;
struct devlog_params devlog; /* PF-only */
struct rss_params rss; /* VF-only */
@@ -399,12 +454,13 @@ struct adapter_params {
unsigned int ofldq_wr_cred;
unsigned int eo_wr_cred;
- unsigned int max_ordird_qp;
- unsigned int max_ird_adapter;
+ unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
+ unsigned int max_ird_adapter; /* Max read depth per adapter */
/* These values are for all ports (8b/port, upto 4 ports) */
uint32_t mps_bg_map; /* MPS rx buffer group map */
uint32_t tp_ch_map; /* TPCHMAP from firmware */
+ uint32_t tx_tp_ch_map; /* TX_TPCHMAP from firmware */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
@@ -412,11 +468,15 @@ struct adapter_params {
bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */
unsigned int max_pkts_per_eth_tx_pkts_wr;
uint8_t nsched_cls; /* # of usable sched classes per port */
+
+ uint8_t ncores;
+ uint32_t tid_qid_sel_mask; /* TID based QID selection mask */
};
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
#define CHELSIO_T6 0x6
+#define CHELSIO_T7 0x7
/*
* State needed to monitor the forward progress of SGE Ingress DMA activities
@@ -509,10 +569,11 @@ static inline int is_hashfilter(const struct adapter *adap)
static inline int is_ktls(const struct adapter *adap)
{
- return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW;
+ return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW ||
+ adap->params.chipid == CHELSIO_T7;
}
-static inline int chip_id(struct adapter *adap)
+static inline int chip_id(const struct adapter *adap)
{
return adap->params.chipid;
}
@@ -537,6 +598,11 @@ static inline int is_t6(struct adapter *adap)
return adap->params.chipid == CHELSIO_T6;
}
+static inline int is_t7(struct adapter *adap)
+{
+ return adap->params.chipid == CHELSIO_T7;
+}
+
static inline int is_fpga(struct adapter *adap)
{
return adap->params.fpga;
@@ -641,7 +707,7 @@ int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int si
int t4_load_boot(struct adapter *adap, u8 *boot_data,
unsigned int boot_addr, unsigned int size);
int t4_flash_erase_sectors(struct adapter *adapter, int start, int end);
-int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp);
int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr);
@@ -655,9 +721,10 @@ int t4_init_hw(struct adapter *adapter, u32 fw_params);
const struct chip_params *t4_get_chip_params(int chipid);
int t4_prep_adapter(struct adapter *adapter, u32 *buf);
int t4_shutdown_adapter(struct adapter *adapter);
-int t4_init_devlog_params(struct adapter *adapter, int fw_attach);
+int t4_init_devlog_ncores_params(struct adapter *adapter, int fw_attach);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_width(const struct adapter *adap, int filter_field);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id);
void t4_fatal_err(struct adapter *adapter, bool fw_error);
@@ -665,6 +732,7 @@ int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
int filter_index, int enable);
void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
int filter_index, int *enabled);
+void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -691,19 +759,60 @@ void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok);
int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp);
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
- const unsigned int *valp);
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp);
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[]);
+void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
+ u16 *size, u16 *thres);
+int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n);
+int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n);
+int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ const unsigned int *valp);
+int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
+ u32 *wrptr);
void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
+
+static inline void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size,
+ u16 *thres)
+{
+ t4_read_cimq_cfg_core(adap, 0, base, size, thres);
+}
+
+static inline int t4_read_cim_ibq(struct adapter *adap, u32 qid, u32 *data,
+ size_t n)
+{
+ return t4_read_cim_ibq_core(adap, 0, qid, data, n);
+}
+
+static inline int t4_read_cim_obq(struct adapter *adap, u32 qid, u32 *data,
+ size_t n)
+{
+ return t4_read_cim_obq_core(adap, 0, qid, data, n);
+}
+
+static inline int t4_cim_read(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ return t4_cim_read_core(adap, 0, 0, addr, n, valp);
+}
+
+static inline int t4_cim_write(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ return t4_cim_write_core(adap, 0, 0, addr, n, valp);
+}
+
+static inline int t4_cim_read_la(struct adapter *adap, u32 *la_buf, u32 *wrptr)
+{
+ return t4_cim_read_la_core(adap, 0, la_buf, wrptr);
+}
+
int t4_get_flash_params(struct adapter *adapter);
u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach);
@@ -919,6 +1028,8 @@ int t4_configure_ringbb(struct adapter *adap);
int t4_configure_add_smac(struct adapter *adap);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
+int t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
+ unsigned int *lenp);
static inline int t4vf_query_params(struct adapter *adapter,
unsigned int nparams, const u32 *params,
@@ -969,8 +1080,8 @@ port_top_speed(const struct port_info *pi)
sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
static inline void *
-mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
- uint64_t mask, uint64_t val)
+mk_set_tcb_field_ulp_with_rpl(struct adapter *sc, void *cur, int tid,
+ uint16_t word, uint64_t mask, uint64_t val, const int qid)
{
struct ulp_txpkt *ulpmc;
struct ulptx_idata *ulpsc;
@@ -989,8 +1100,21 @@ mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htobe16(F_NO_REPLY);
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+
+ if (qid == -1) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+ } else {
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ req->word_cookie = htobe16(V_WORD(word) |
+ V_COOKIE(CPL_COOKIE_TOM));
+ }
req->mask = htobe64(mask);
req->val = htobe64(val);
@@ -1006,4 +1130,11 @@ mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
return (ulpsc + 1);
}
+
+static inline void *
+mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
+ uint64_t mask, uint64_t val)
+{
+ return (mk_set_tcb_field_ulp_with_rpl(sc, cur, tid, word, mask, val, -1));
+}
#endif /* __CHELSIO_COMMON_H */
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 07940a44f66e..eb7ea9acc108 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2012, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -246,6 +245,8 @@ struct port_tx_state {
u32
t4_port_reg(struct adapter *adap, u8 port, u32 reg)
{
+ if (chip_id(adap) > CHELSIO_T6)
+ return T7_PORT_REG(port, reg);
if (chip_id(adap) > CHELSIO_T4)
return T5_PORT_REG(port, reg);
return PORT_REG(port, reg);
@@ -268,8 +269,10 @@ read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
{
int i;
- for_each_port(sc, i)
- read_tx_state_one(sc, i, &tx_state[i]);
+ for (i = 0; i < MAX_NCHAN; i++) {
+ if (sc->chan_map[i] != 0xff)
+ read_tx_state_one(sc, i, &tx_state[i]);
+ }
}
static void
@@ -279,7 +282,9 @@ check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
uint64_t tx_frames, rx_pause;
int i;
- for_each_port(sc, i) {
+ for (i = 0; i < MAX_NCHAN; i++) {
+ if (sc->chan_map[i] == 0xff)
+ continue;
rx_pause = tx_state[i].rx_pause;
tx_frames = tx_state[i].tx_frames;
read_tx_state_one(sc, i, &tx_state[i]); /* update */
@@ -351,7 +356,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
return -EINVAL;
if (adap->flags & IS_VF) {
- if (is_t6(adap))
+ if (chip_id(adap) >= CHELSIO_T6)
data_reg = FW_T6VF_MBDATA_BASE_ADDR;
else
data_reg = FW_T4VF_MBDATA_BASE_ADDR;
@@ -508,9 +513,8 @@ failed:
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
- return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
- sleep_ok, FW_CMD_MAX_TIMEOUT);
-
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
+ sleep_ok, FW_CMD_MAX_TIMEOUT);
}
static int t4_edc_err_read(struct adapter *adap, int idx)
@@ -799,6 +803,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
case CHELSIO_T5:
case CHELSIO_T6:
+ case CHELSIO_T7:
if (adapter->flags & IS_VF)
return FW_T4VF_REGMAP_SIZE;
return T5_REGMAP_SIZE;
@@ -2639,6 +2644,638 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
};
+ static const unsigned int t7_reg_ranges[] = {
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x115c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11d0,
+ 0x11fc, 0x1278,
+ 0x1280, 0x1368,
+ 0x1700, 0x172c,
+ 0x173c, 0x1760,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3044,
+ 0x3060, 0x3064,
+ 0x30a4, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35e0, 0x35ec,
+ 0x3600, 0x37fc,
+ 0x3804, 0x3818,
+ 0x3880, 0x388c,
+ 0x3900, 0x3904,
+ 0x3910, 0x3978,
+ 0x3980, 0x399c,
+ 0x4700, 0x4720,
+ 0x4728, 0x475c,
+ 0x480c, 0x4814,
+ 0x4890, 0x489c,
+ 0x48a4, 0x48ac,
+ 0x48b8, 0x48c4,
+ 0x4900, 0x4924,
+ 0x4ffc, 0x4ffc,
+ 0x5500, 0x5624,
+ 0x56c4, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x598c,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ed0,
+ 0x5ee0, 0x5ee0,
+ 0x5ef0, 0x5ef0,
+ 0x5f00, 0x5f04,
+ 0x5f0c, 0x5f10,
+ 0x5f20, 0x5f88,
+ 0x5f90, 0x5fd8,
+ 0x6000, 0x6020,
+ 0x6028, 0x6030,
+ 0x6044, 0x609c,
+ 0x60a8, 0x60ac,
+ 0x60b8, 0x60ec,
+ 0x6100, 0x6104,
+ 0x6118, 0x611c,
+ 0x6150, 0x6150,
+ 0x6180, 0x61b8,
+ 0x7700, 0x77a8,
+ 0x77b0, 0x7888,
+ 0x78cc, 0x7970,
+ 0x7b00, 0x7b00,
+ 0x7b08, 0x7b0c,
+ 0x7b24, 0x7b84,
+ 0x7b8c, 0x7c2c,
+ 0x7c34, 0x7c40,
+ 0x7c48, 0x7c68,
+ 0x7c70, 0x7c7c,
+ 0x7d00, 0x7ddc,
+ 0x7de4, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e74,
+ 0x7e80, 0x7ee0,
+ 0x7ee8, 0x7f0c,
+ 0x7f20, 0x7f5c,
+ 0x8dc0, 0x8de8,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e30,
+ 0x8e7c, 0x8ee8,
+ 0x8f88, 0x8f88,
+ 0x8f90, 0x8fb0,
+ 0x8fb8, 0x9058,
+ 0x9074, 0x90f8,
+ 0x9100, 0x912c,
+ 0x9138, 0x9188,
+ 0x9400, 0x9414,
+ 0x9430, 0x9440,
+ 0x9454, 0x9454,
+ 0x945c, 0x947c,
+ 0x9498, 0x94b8,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
+ 0x9800, 0x9804,
+ 0x9854, 0x9854,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xa000, 0xa06c,
+ 0xa080, 0xa0ec,
+ 0xa100, 0xa16c,
+ 0xa180, 0xa1ec,
+ 0xa200, 0xa26c,
+ 0xa280, 0xa2ec,
+ 0xa300, 0xa36c,
+ 0xa380, 0xa458,
+ 0xa460, 0xa4f8,
+ 0xd000, 0xd03c,
+ 0xd100, 0xd134,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xe00c,
+ 0xf000, 0xf008,
+ 0xf010, 0xf06c,
+ 0x11000, 0x11014,
+ 0x11048, 0x11120,
+ 0x11130, 0x11144,
+ 0x11174, 0x11178,
+ 0x11190, 0x111a0,
+ 0x111e4, 0x112f0,
+ 0x11300, 0x1133c,
+ 0x11408, 0x1146c,
+ 0x12000, 0x12004,
+ 0x12060, 0x122c4,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191a0,
+ 0x191ac, 0x191c8,
+ 0x191d0, 0x191e4,
+ 0x19250, 0x19250,
+ 0x19258, 0x19268,
+ 0x19278, 0x19278,
+ 0x19280, 0x192b0,
+ 0x192bc, 0x192f0,
+ 0x19300, 0x19308,
+ 0x19310, 0x19318,
+ 0x19320, 0x19328,
+ 0x19330, 0x19330,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x1947c,
+ 0x19488, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cdc,
+ 0x19ce4, 0x19cf8,
+ 0x19d00, 0x19d30,
+ 0x19d50, 0x19d80,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19de0,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fb4,
+ 0x19fbc, 0x19fbc,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a108,
+ 0x1a114, 0x1a130,
+ 0x1a138, 0x1a1c4,
+ 0x1a1fc, 0x1a29c,
+ 0x1a2a8, 0x1a2b8,
+ 0x1a2c0, 0x1a388,
+ 0x1a398, 0x1a3ac,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e4,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e4,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae4,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee4,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e4,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e4,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae4,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee4,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30038,
+ 0x30100, 0x3017c,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301e0,
+ 0x30200, 0x30344,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3065c,
+ 0x30800, 0x30848,
+ 0x30850, 0x308a8,
+ 0x308b8, 0x308c0,
+ 0x308cc, 0x308dc,
+ 0x30900, 0x30904,
+ 0x3090c, 0x30914,
+ 0x3091c, 0x30928,
+ 0x30930, 0x3093c,
+ 0x30944, 0x30948,
+ 0x30954, 0x30974,
+ 0x3097c, 0x30980,
+ 0x30a00, 0x30a20,
+ 0x30a38, 0x30a3c,
+ 0x30a50, 0x30a50,
+ 0x30a80, 0x30a80,
+ 0x30a88, 0x30aa8,
+ 0x30ab0, 0x30ab4,
+ 0x30ac8, 0x30ad4,
+ 0x30b28, 0x30b84,
+ 0x30b98, 0x30bb8,
+ 0x30c98, 0x30d14,
+ 0x31000, 0x31020,
+ 0x31038, 0x3103c,
+ 0x31050, 0x31050,
+ 0x31080, 0x31080,
+ 0x31088, 0x310a8,
+ 0x310b0, 0x310b4,
+ 0x310c8, 0x310d4,
+ 0x31128, 0x31184,
+ 0x31198, 0x311b8,
+ 0x32000, 0x32038,
+ 0x32100, 0x3217c,
+ 0x32190, 0x321a0,
+ 0x321a8, 0x321b8,
+ 0x321c4, 0x321c8,
+ 0x321d0, 0x321e0,
+ 0x32200, 0x32344,
+ 0x32400, 0x324b4,
+ 0x324c0, 0x3252c,
+ 0x32540, 0x3265c,
+ 0x32800, 0x32848,
+ 0x32850, 0x328a8,
+ 0x328b8, 0x328c0,
+ 0x328cc, 0x328dc,
+ 0x32900, 0x32904,
+ 0x3290c, 0x32914,
+ 0x3291c, 0x32928,
+ 0x32930, 0x3293c,
+ 0x32944, 0x32948,
+ 0x32954, 0x32974,
+ 0x3297c, 0x32980,
+ 0x32a00, 0x32a20,
+ 0x32a38, 0x32a3c,
+ 0x32a50, 0x32a50,
+ 0x32a80, 0x32a80,
+ 0x32a88, 0x32aa8,
+ 0x32ab0, 0x32ab4,
+ 0x32ac8, 0x32ad4,
+ 0x32b28, 0x32b84,
+ 0x32b98, 0x32bb8,
+ 0x32c98, 0x32d14,
+ 0x33000, 0x33020,
+ 0x33038, 0x3303c,
+ 0x33050, 0x33050,
+ 0x33080, 0x33080,
+ 0x33088, 0x330a8,
+ 0x330b0, 0x330b4,
+ 0x330c8, 0x330d4,
+ 0x33128, 0x33184,
+ 0x33198, 0x331b8,
+ 0x34000, 0x34038,
+ 0x34100, 0x3417c,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341e0,
+ 0x34200, 0x34344,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3465c,
+ 0x34800, 0x34848,
+ 0x34850, 0x348a8,
+ 0x348b8, 0x348c0,
+ 0x348cc, 0x348dc,
+ 0x34900, 0x34904,
+ 0x3490c, 0x34914,
+ 0x3491c, 0x34928,
+ 0x34930, 0x3493c,
+ 0x34944, 0x34948,
+ 0x34954, 0x34974,
+ 0x3497c, 0x34980,
+ 0x34a00, 0x34a20,
+ 0x34a38, 0x34a3c,
+ 0x34a50, 0x34a50,
+ 0x34a80, 0x34a80,
+ 0x34a88, 0x34aa8,
+ 0x34ab0, 0x34ab4,
+ 0x34ac8, 0x34ad4,
+ 0x34b28, 0x34b84,
+ 0x34b98, 0x34bb8,
+ 0x34c98, 0x34d14,
+ 0x35000, 0x35020,
+ 0x35038, 0x3503c,
+ 0x35050, 0x35050,
+ 0x35080, 0x35080,
+ 0x35088, 0x350a8,
+ 0x350b0, 0x350b4,
+ 0x350c8, 0x350d4,
+ 0x35128, 0x35184,
+ 0x35198, 0x351b8,
+ 0x36000, 0x36038,
+ 0x36100, 0x3617c,
+ 0x36190, 0x361a0,
+ 0x361a8, 0x361b8,
+ 0x361c4, 0x361c8,
+ 0x361d0, 0x361e0,
+ 0x36200, 0x36344,
+ 0x36400, 0x364b4,
+ 0x364c0, 0x3652c,
+ 0x36540, 0x3665c,
+ 0x36800, 0x36848,
+ 0x36850, 0x368a8,
+ 0x368b8, 0x368c0,
+ 0x368cc, 0x368dc,
+ 0x36900, 0x36904,
+ 0x3690c, 0x36914,
+ 0x3691c, 0x36928,
+ 0x36930, 0x3693c,
+ 0x36944, 0x36948,
+ 0x36954, 0x36974,
+ 0x3697c, 0x36980,
+ 0x36a00, 0x36a20,
+ 0x36a38, 0x36a3c,
+ 0x36a50, 0x36a50,
+ 0x36a80, 0x36a80,
+ 0x36a88, 0x36aa8,
+ 0x36ab0, 0x36ab4,
+ 0x36ac8, 0x36ad4,
+ 0x36b28, 0x36b84,
+ 0x36b98, 0x36bb8,
+ 0x36c98, 0x36d14,
+ 0x37000, 0x37020,
+ 0x37038, 0x3703c,
+ 0x37050, 0x37050,
+ 0x37080, 0x37080,
+ 0x37088, 0x370a8,
+ 0x370b0, 0x370b4,
+ 0x370c8, 0x370d4,
+ 0x37128, 0x37184,
+ 0x37198, 0x371b8,
+ 0x38000, 0x380b0,
+ 0x380b8, 0x38130,
+ 0x38140, 0x38140,
+ 0x38150, 0x38154,
+ 0x38160, 0x381c4,
+ 0x381f0, 0x38204,
+ 0x3820c, 0x38214,
+ 0x3821c, 0x3822c,
+ 0x38244, 0x38244,
+ 0x38254, 0x38274,
+ 0x3827c, 0x38280,
+ 0x38300, 0x38304,
+ 0x3830c, 0x38314,
+ 0x3831c, 0x3832c,
+ 0x38344, 0x38344,
+ 0x38354, 0x38374,
+ 0x3837c, 0x38380,
+ 0x38400, 0x38424,
+ 0x38438, 0x3843c,
+ 0x38480, 0x38480,
+ 0x384a8, 0x384a8,
+ 0x384b0, 0x384b4,
+ 0x384c8, 0x38514,
+ 0x38600, 0x3860c,
+ 0x3861c, 0x38624,
+ 0x38900, 0x38924,
+ 0x38938, 0x3893c,
+ 0x38980, 0x38980,
+ 0x389a8, 0x389a8,
+ 0x389b0, 0x389b4,
+ 0x389c8, 0x38a14,
+ 0x38b00, 0x38b0c,
+ 0x38b1c, 0x38b24,
+ 0x38e00, 0x38e00,
+ 0x38e18, 0x38e20,
+ 0x38e38, 0x38e40,
+ 0x38e58, 0x38e60,
+ 0x38e78, 0x38e80,
+ 0x38e98, 0x38ea0,
+ 0x38eb8, 0x38ec0,
+ 0x38ed8, 0x38ee0,
+ 0x38ef8, 0x38f08,
+ 0x38f10, 0x38f2c,
+ 0x38f80, 0x38ffc,
+ 0x39080, 0x39080,
+ 0x39088, 0x39090,
+ 0x39100, 0x39108,
+ 0x39120, 0x39128,
+ 0x39140, 0x39148,
+ 0x39160, 0x39168,
+ 0x39180, 0x39188,
+ 0x391a0, 0x391a8,
+ 0x391c0, 0x391c8,
+ 0x391e0, 0x391e8,
+ 0x39200, 0x39200,
+ 0x39208, 0x39240,
+ 0x39300, 0x39300,
+ 0x39308, 0x39340,
+ 0x39400, 0x39400,
+ 0x39408, 0x39440,
+ 0x39500, 0x39500,
+ 0x39508, 0x39540,
+ 0x39600, 0x39600,
+ 0x39608, 0x39640,
+ 0x39700, 0x39700,
+ 0x39708, 0x39740,
+ 0x39800, 0x39800,
+ 0x39808, 0x39840,
+ 0x39900, 0x39900,
+ 0x39908, 0x39940,
+ 0x39a00, 0x39a04,
+ 0x39a10, 0x39a14,
+ 0x39a1c, 0x39aa8,
+ 0x39b00, 0x39ecc,
+ 0x3a000, 0x3a004,
+ 0x3a050, 0x3a084,
+ 0x3a090, 0x3a09c,
+ 0x3e000, 0x3e020,
+ 0x3e03c, 0x3e05c,
+ 0x3e100, 0x3e120,
+ 0x3e13c, 0x3e15c,
+ 0x3e200, 0x3e220,
+ 0x3e23c, 0x3e25c,
+ 0x3e300, 0x3e320,
+ 0x3e33c, 0x3e35c,
+ 0x3f000, 0x3f034,
+ 0x3f100, 0x3f130,
+ 0x3f200, 0x3f218,
+ 0x44000, 0x44014,
+ 0x44020, 0x44028,
+ 0x44030, 0x44030,
+ 0x44100, 0x44114,
+ 0x44120, 0x44128,
+ 0x44130, 0x44130,
+ 0x44200, 0x44214,
+ 0x44220, 0x44228,
+ 0x44230, 0x44230,
+ 0x44300, 0x44314,
+ 0x44320, 0x44328,
+ 0x44330, 0x44330,
+ 0x44400, 0x44414,
+ 0x44420, 0x44428,
+ 0x44430, 0x44430,
+ 0x44500, 0x44514,
+ 0x44520, 0x44528,
+ 0x44530, 0x44530,
+ 0x44714, 0x44718,
+ 0x44730, 0x44730,
+ 0x447c0, 0x447c0,
+ 0x447f0, 0x447f0,
+ 0x447f8, 0x447fc,
+ 0x45000, 0x45014,
+ 0x45020, 0x45028,
+ 0x45030, 0x45030,
+ 0x45100, 0x45114,
+ 0x45120, 0x45128,
+ 0x45130, 0x45130,
+ 0x45200, 0x45214,
+ 0x45220, 0x45228,
+ 0x45230, 0x45230,
+ 0x45300, 0x45314,
+ 0x45320, 0x45328,
+ 0x45330, 0x45330,
+ 0x45400, 0x45414,
+ 0x45420, 0x45428,
+ 0x45430, 0x45430,
+ 0x45500, 0x45514,
+ 0x45520, 0x45528,
+ 0x45530, 0x45530,
+ 0x45714, 0x45718,
+ 0x45730, 0x45730,
+ 0x457c0, 0x457c0,
+ 0x457f0, 0x457f0,
+ 0x457f8, 0x457fc,
+ 0x46000, 0x46010,
+ 0x46020, 0x46034,
+ 0x46040, 0x46050,
+ 0x46060, 0x46088,
+ 0x47000, 0x4709c,
+ 0x470c0, 0x470d4,
+ 0x47100, 0x471a8,
+ 0x471b0, 0x471e8,
+ 0x47200, 0x47210,
+ 0x4721c, 0x47230,
+ 0x47238, 0x47238,
+ 0x47240, 0x472ac,
+ 0x472d0, 0x472f4,
+ 0x47300, 0x47310,
+ 0x47318, 0x47348,
+ 0x47350, 0x47354,
+ 0x47380, 0x47388,
+ 0x47390, 0x47394,
+ 0x47400, 0x47448,
+ 0x47450, 0x47458,
+ 0x47500, 0x4751c,
+ 0x47530, 0x4754c,
+ 0x47560, 0x4757c,
+ 0x47590, 0x475ac,
+ 0x47600, 0x47630,
+ 0x47640, 0x47644,
+ 0x47660, 0x4769c,
+ 0x47700, 0x47710,
+ 0x47740, 0x47750,
+ 0x4775c, 0x4779c,
+ 0x477b0, 0x477bc,
+ 0x477c4, 0x477c8,
+ 0x477d4, 0x477fc,
+ 0x48000, 0x48004,
+ 0x48018, 0x4801c,
+ 0x49304, 0x493f0,
+ 0x49400, 0x49410,
+ 0x49460, 0x494f4,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
+ 0x50400, 0x50404,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
+ 0x50c00, 0x50c04,
+ 0x51000, 0x51020,
+ 0x51028, 0x510c4,
+ 0x51104, 0x51108,
+ 0x51200, 0x51274,
+ 0x51300, 0x51324,
+ 0x51400, 0x51548,
+ 0x51550, 0x51554,
+ 0x5155c, 0x51584,
+ 0x5158c, 0x515c8,
+ 0x515f0, 0x515f4,
+ 0x58000, 0x58004,
+ 0x58018, 0x5801c,
+ 0x59304, 0x593f0,
+ 0x59400, 0x59410,
+ 0x59460, 0x594f4,
+ };
+
u32 *buf_end = (u32 *)(buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -2679,6 +3316,16 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
}
break;
+ case CHELSIO_T7:
+ if (adap->flags & IS_VF) {
+ reg_ranges = t6vf_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
+ } else {
+ reg_ranges = t7_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t7_reg_ranges);
+ }
+ break;
+
default:
CH_ERR(adap,
"Unsupported chip version %d\n", chip_version);
@@ -3086,6 +3733,56 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
return 0;
}
+/* Flash Layout {start sector, # of sectors} for T4/T5/T6 adapters */
+static const struct t4_flash_loc_entry t4_flash_loc_arr[] = {
+ [FLASH_LOC_EXP_ROM] = { 0, 6 },
+ [FLASH_LOC_IBFT] = { 6, 1 },
+ [FLASH_LOC_BOOTCFG] = { 7, 1 },
+ [FLASH_LOC_FW] = { 8, 16 },
+ [FLASH_LOC_FWBOOTSTRAP] = { 27, 1 },
+ [FLASH_LOC_ISCSI_CRASH] = { 29, 1 },
+ [FLASH_LOC_FCOE_CRASH] = { 30, 1 },
+ [FLASH_LOC_CFG] = { 31, 1 },
+ [FLASH_LOC_CUDBG] = { 32, 32 },
+ [FLASH_LOC_BOOT_AREA] = { 0, 8 }, /* Spans complete Boot Area */
+ [FLASH_LOC_END] = { 64, 0 },
+};
+
+/* Flash Layout {start sector, # of sectors} for T7 adapters */
+static const struct t4_flash_loc_entry t7_flash_loc_arr[] = {
+ [FLASH_LOC_VPD] = { 0, 1 },
+ [FLASH_LOC_FWBOOTSTRAP] = { 1, 1 },
+ [FLASH_LOC_FW] = { 2, 29 },
+ [FLASH_LOC_CFG] = { 31, 1 },
+ [FLASH_LOC_EXP_ROM] = { 32, 15 },
+ [FLASH_LOC_IBFT] = { 47, 1 },
+ [FLASH_LOC_BOOTCFG] = { 48, 1 },
+ [FLASH_LOC_DPU_BOOT] = { 49, 13 },
+ [FLASH_LOC_ISCSI_CRASH] = { 62, 1 },
+ [FLASH_LOC_FCOE_CRASH] = { 63, 1 },
+ [FLASH_LOC_VPD_BACKUP] = { 64, 1 },
+ [FLASH_LOC_FWBOOTSTRAP_BACKUP] = { 65, 1 },
+ [FLASH_LOC_FW_BACKUP] = { 66, 29 },
+ [FLASH_LOC_CFG_BACK] = { 95, 1 },
+ [FLASH_LOC_CUDBG] = { 96, 48 },
+ [FLASH_LOC_CHIP_DUMP] = { 144, 48 },
+ [FLASH_LOC_DPU_AREA] = { 192, 64 },
+ [FLASH_LOC_BOOT_AREA] = { 32, 17 }, /* Spans complete UEFI/PXE Boot Area */
+ [FLASH_LOC_END] = { 256, 0 },
+};
+
+int
+t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
+ unsigned int *lenp)
+{
+ const struct t4_flash_loc_entry *l = chip_id(adap) >= CHELSIO_T7 ?
+ &t7_flash_loc_arr[loc] : &t4_flash_loc_arr[loc];
+
+ if (lenp != NULL)
+ *lenp = FLASH_MAX_SIZE(l->nsecs);
+ return (FLASH_START(l->start_sec));
+}
+
/* serial flash and firmware constants and flash config file constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -3116,13 +3813,16 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
int lock, u32 *valp)
{
int ret;
+ uint32_t op;
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
return -EBUSY;
- t4_write_reg(adapter, A_SF_OP,
- V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ op = V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1);
+ if (chip_id(adapter) >= CHELSIO_T7)
+ op |= F_QUADREADDISABLE;
+ t4_write_reg(adapter, A_SF_OP, op);
ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, A_SF_DATA);
@@ -3294,9 +3994,10 @@ unlock:
*/
int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FW_START +
- offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
+ 1, vers, 0);
}
/**
@@ -3308,8 +4009,10 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers)
*/
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
{
- return t4_read_flash(adapter, FLASH_FW_START,
- sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start, sizeof (*hdr) / sizeof (uint32_t),
+ (uint32_t *)hdr, 1);
}
/**
@@ -3321,9 +4024,11 @@ int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
*/
int t4_get_bs_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
- offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FWBOOTSTRAP,
+ NULL);
+
+ return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
+ 1, vers, 0);
}
/**
@@ -3335,9 +4040,10 @@ int t4_get_bs_version(struct adapter *adapter, u32 *vers)
*/
int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FW_START +
- offsetof(struct fw_hdr, tp_microcode_ver),
- 1, vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start +
+ offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0);
}
/**
@@ -3359,10 +4065,10 @@ int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
sizeof(u32))];
int ret;
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_EXP_ROM, NULL);
- ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
- ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
- 0);
+ ret = t4_read_flash(adapter, start, ARRAY_SIZE(exprom_header_buf),
+ exprom_header_buf, 0);
if (ret)
return ret;
@@ -3520,16 +4226,20 @@ int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
* File is stored, or an error if the device FLASH is too small to contain
* a Firmware Configuration File.
*/
-int t4_flash_cfg_addr(struct adapter *adapter)
+int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp)
{
+ unsigned int len = 0;
+ const int cfg_start = t4_flash_loc_start(adapter, FLASH_LOC_CFG, &len);
+
/*
* If the device FLASH isn't large enough to hold a Firmware
* Configuration File, return an error.
*/
- if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
+ if (adapter->params.sf_size < cfg_start + len)
return -ENOSPC;
-
- return FLASH_CFG_START;
+ if (lenp != NULL)
+ *lenp = len;
+ return (cfg_start);
}
/*
@@ -3547,7 +4257,8 @@ static int t4_fw_matches_chip(struct adapter *adap,
*/
if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
(is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
- (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
+ (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6) ||
+ (is_t7(adap) && hdr->chip == FW_HDR_CHIP_T7))
return 1;
CH_ERR(adap,
@@ -3572,20 +4283,15 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
u8 first_page[SF_PAGE_SIZE];
const u32 *p = (const u32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_start_sec;
unsigned int fw_start;
unsigned int fw_size;
+ enum t4_flash_loc loc;
- if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
- fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
- fw_start = FLASH_FWBOOTSTRAP_START;
- fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
- } else {
- fw_start_sec = FLASH_FW_START_SEC;
- fw_start = FLASH_FW_START;
- fw_size = FLASH_FW_MAX_SIZE;
- }
+ loc = ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP ?
+ FLASH_LOC_FWBOOTSTRAP : FLASH_LOC_FW;
+ fw_start = t4_flash_loc_start(adap, loc, &fw_size);
+ fw_start_sec = fw_start / SF_SEC_SIZE;
if (!size) {
CH_ERR(adap, "FW image has no data\n");
@@ -3618,7 +4324,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
}
- i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
if (ret)
goto out;
@@ -3672,7 +4378,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.param[0].mnem =
cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
- c.param[0].val = (__force __be32)op;
+ c.param[0].val = cpu_to_be32(op);
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
@@ -3922,15 +4628,12 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
* speed and let the firmware pick one.
*/
fec |= FW_PORT_CAP32_FORCE_FEC;
- if (speed & FW_PORT_CAP32_SPEED_100G) {
+ if (speed & FW_PORT_CAP32_SPEED_25G) {
fec |= FW_PORT_CAP32_FEC_RS;
- fec |= FW_PORT_CAP32_FEC_NO_FEC;
- } else if (speed & FW_PORT_CAP32_SPEED_50G) {
fec |= FW_PORT_CAP32_FEC_BASER_RS;
fec |= FW_PORT_CAP32_FEC_NO_FEC;
} else {
fec |= FW_PORT_CAP32_FEC_RS;
- fec |= FW_PORT_CAP32_FEC_BASER_RS;
fec |= FW_PORT_CAP32_FEC_NO_FEC;
}
} else {
@@ -3948,12 +4651,9 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
* the potential top speed. Request the best
* FEC at that speed instead.
*/
- if (speed & FW_PORT_CAP32_SPEED_100G) {
- if (fec == FW_PORT_CAP32_FEC_BASER_RS)
- fec = FW_PORT_CAP32_FEC_RS;
- } else if (speed & FW_PORT_CAP32_SPEED_50G) {
- if (fec == FW_PORT_CAP32_FEC_RS)
- fec = FW_PORT_CAP32_FEC_BASER_RS;
+ if ((speed & FW_PORT_CAP32_SPEED_25G) == 0 &&
+ fec == FW_PORT_CAP32_FEC_BASER_RS) {
+ fec = FW_PORT_CAP32_FEC_RS;
}
}
} else {
@@ -4925,6 +5625,15 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = mps_trc_intr_details,
.actions = NULL,
};
+ static const struct intr_info t7_mps_trc_intr_info = {
+ .name = "T7_MPS_TRC_INT_CAUSE",
+ .cause_reg = A_T7_MPS_TRC_INT_CAUSE,
+ .enable_reg = A_T7_MPS_TRC_INT_ENABLE,
+ .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
+ .flags = 0,
+ .details = mps_trc_intr_details,
+ .actions = NULL,
+ };
static const struct intr_details mps_stat_sram_intr_details[] = {
{ 0xffffffff, "MPS statistics SRAM parity error" },
{ 0 }
@@ -4998,7 +5707,10 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
fatal = false;
fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
+ if (chip_id(adap) > CHELSIO_T6)
+ fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, verbose);
+ else
+ fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
@@ -5225,7 +5937,7 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.flags = 0;
ii.details = mac_intr_details;
ii.actions = NULL;
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
@@ -5234,10 +5946,29 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.flags = 0;
ii.details = mac_intr_details;
ii.actions = NULL;
+ } else {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_INT_CAUSE", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN);
+ ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
+ ii.flags = 0;
+ ii.details = mac_intr_details;
+ ii.actions = NULL;
}
fatal |= t4_handle_intr(adap, &ii, 0, verbose);
- if (chip_id(adap) >= CHELSIO_T5) {
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN);
+ ii.fatal = 0;
+ ii.flags = 0;
+ ii.details = NULL;
+ ii.actions = NULL;
+ fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ } else if (chip_id(adap) >= CHELSIO_T5) {
snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
@@ -5249,7 +5980,17 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
fatal |= t4_handle_intr(adap, &ii, 0, verbose);
}
- if (chip_id(adap) >= CHELSIO_T6) {
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE_100G", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G);
+ ii.fatal = 0;
+ ii.flags = 0;
+ ii.details = NULL;
+ ii.actions = NULL;
+ fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ } else if (is_t6(adap)) {
snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
@@ -5346,13 +6087,42 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, "CIM" },
{ 0 }
};
- static const struct intr_info pl_perr_cause = {
+ static const struct intr_details t7_pl_intr_details[] = {
+ { F_T7_MC1, "MC1" },
+ { F_T7_ULP_TX, "ULP TX" },
+ { F_T7_SGE, "SGE" },
+ { F_T7_CPL_SWITCH, "CPL Switch" },
+ { F_T7_ULP_RX, "ULP RX" },
+ { F_T7_PM_RX, "PM RX" },
+ { F_T7_PM_TX, "PM TX" },
+ { F_T7_MA, "MA" },
+ { F_T7_TP, "TP" },
+ { F_T7_LE, "LE" },
+ { F_T7_EDC1, "EDC1" },
+ { F_T7_EDC0, "EDC0" },
+ { F_T7_MC0, "MC0" },
+ { F_T7_PCIE, "PCIE" },
+ { F_MAC3, "MAC3" },
+ { F_MAC2, "MAC2" },
+ { F_MAC1, "MAC1" },
+ { F_MAC0, "MAC0" },
+ { F_SMB, "SMB" },
+ { F_PL, "PL" },
+ { F_NCSI, "NC-SI" },
+ { F_MPS, "MPS" },
+ { F_DBG, "DBG" },
+ { F_I2CM, "I2CM" },
+ { F_MI, "MI" },
+ { F_CIM, "CIM" },
+ { 0 }
+ };
+ struct intr_info pl_perr_cause = {
.name = "PL_PERR_CAUSE",
.cause_reg = A_PL_PERR_CAUSE,
.enable_reg = A_PL_PERR_ENABLE,
.fatal = 0xffffffff,
- .flags = 0,
- .details = pl_intr_details,
+ .flags = NONFATAL_IF_DISABLED,
+ .details = NULL,
.actions = NULL,
};
static const struct intr_action pl_intr_action[] = {
@@ -5381,17 +6151,53 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, -1, cim_intr_handler },
{ 0 }
};
- static const struct intr_info pl_intr_info = {
+ static const struct intr_action t7_pl_intr_action[] = {
+ { F_T7_ULP_TX, -1, ulptx_intr_handler },
+ { F_T7_SGE, -1, sge_intr_handler },
+ { F_T7_CPL_SWITCH, -1, cplsw_intr_handler },
+ { F_T7_ULP_RX, -1, ulprx_intr_handler },
+ { F_T7_PM_RX, -1, pmrx_intr_handler},
+ { F_T7_PM_TX, -1, pmtx_intr_handler},
+ { F_T7_MA, -1, ma_intr_handler },
+ { F_T7_TP, -1, tp_intr_handler },
+ { F_T7_LE, -1, le_intr_handler },
+ { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
+ { F_T7_EDC0, MEM_EDC0, mem_intr_handler },
+ { F_T7_MC1, MEM_MC1, mem_intr_handler },
+ { F_T7_MC0, MEM_MC0, mem_intr_handler },
+ { F_T7_PCIE, -1, pcie_intr_handler },
+ { F_MAC3, 3, mac_intr_handler},
+ { F_MAC2, 2, mac_intr_handler},
+ { F_MAC1, 1, mac_intr_handler},
+ { F_MAC0, 0, mac_intr_handler},
+ { F_SMB, -1, smb_intr_handler},
+ { F_PL, -1, plpl_intr_handler },
+ { F_NCSI, -1, ncsi_intr_handler},
+ { F_MPS, -1, mps_intr_handler },
+ { F_CIM, -1, cim_intr_handler },
+ { 0 }
+ };
+ struct intr_info pl_intr_info = {
.name = "PL_INT_CAUSE",
.cause_reg = A_PL_INT_CAUSE,
.enable_reg = A_PL_INT_ENABLE,
.fatal = 0,
.flags = 0,
- .details = pl_intr_details,
- .actions = pl_intr_action,
+ .details = NULL,
+ .actions = NULL,
};
u32 perr;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ pl_perr_cause.details = t7_pl_intr_details;
+ pl_intr_info.details = t7_pl_intr_details;
+ pl_intr_info.actions = t7_pl_intr_action;
+ } else {
+ pl_perr_cause.details = pl_intr_details;
+ pl_intr_info.details = pl_intr_details;
+ pl_intr_info.actions = pl_intr_action;
+ }
+
perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
if (verbose || perr != 0) {
t4_show_intr_info(adap, &pl_perr_cause, perr);
@@ -5421,19 +6227,20 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
*/
void t4_intr_enable(struct adapter *adap)
{
- u32 val = 0;
+ u32 mask, val;
if (chip_id(adap) <= CHELSIO_T5)
- val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
+ val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT |
+ F_DBFIFO_LP_INT;
else
val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
- F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT |
- F_EGRESS_SIZE_ERR;
- t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
+ F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR;
+ mask = val;
+ t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val);
t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
@@ -6184,6 +6991,11 @@ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
{
t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
sleep_ok);
+
+ if (chip_id(adap) >= CHELSIO_T7)
+ /* read RDMA stats IN and OUT for all ports at once */
+ t4_tp_mib_read(adap, &st->pkts_in[0], 28, A_TP_MIB_RDMA_IN_PKT_0,
+ sleep_ok);
}
/**
@@ -6564,16 +7376,24 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
int idx, int enable)
{
- int i, ofst = idx * 4;
+ int i, ofst;
+ u32 match_ctl_a, match_ctl_b;
u32 data_reg, mask_reg, cfg;
u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
if (idx < 0 || idx >= NTRACE)
return -EINVAL;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ match_ctl_a = T7_MPS_TRC_FILTER_MATCH_CTL_A(idx);
+ match_ctl_b = T7_MPS_TRC_FILTER_MATCH_CTL_B(idx);
+ } else {
+ match_ctl_a = MPS_TRC_FILTER_MATCH_CTL_A(idx);
+ match_ctl_b = MPS_TRC_FILTER_MATCH_CTL_B(idx);
+ }
+
if (tp == NULL || !enable) {
- t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
- enable ? en : 0);
+ t4_set_reg_field(adap, match_ctl_a, en, enable ? en : 0);
return 0;
}
@@ -6610,22 +7430,20 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
return -EINVAL;
/* stop the tracer we'll be changing */
- t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
+ t4_set_reg_field(adap, match_ctl_a, en, 0);
- idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
- data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
- mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
+ ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
+ data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
+ mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
t4_write_reg(adap, data_reg, tp->data[i]);
t4_write_reg(adap, mask_reg, ~tp->mask[i]);
}
- t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
- V_TFCAPTUREMAX(tp->snap_len) |
+ t4_write_reg(adap, match_ctl_b, V_TFCAPTUREMAX(tp->snap_len) |
V_TFMINPKTSIZE(tp->min_len));
- t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
- V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
- (is_t4(adap) ?
+ t4_write_reg(adap, match_ctl_a, V_TFOFFSET(tp->skip_ofst) |
+ V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ?
V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
@@ -6645,11 +7463,16 @@ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
int *enabled)
{
u32 ctla, ctlb;
- int i, ofst = idx * 4;
+ int i, ofst;
u32 data_reg, mask_reg;
- ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
- ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ ctla = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_A(idx));
+ ctlb = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_B(idx));
+ } else {
+ ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A(idx));
+ ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B(idx));
+ }
if (is_t4(adap)) {
*enabled = !!(ctla & F_TFEN);
@@ -6676,6 +7499,37 @@ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
}
/**
+ * t4_set_trace_rss_control - configure the trace rss control register
+ * @adap: the adapter
+ * @chan: the channel number for RSS control
+ * @qid: queue number
+ *
+ * Configures the MPS tracing RSS control parameter for specified
+ * @chan channel and @qid queue number.
+ */
+void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid)
+{
+ u32 mps_trc_rss_control;
+
+ switch (chip_id(adap)) {
+ case CHELSIO_T4:
+ mps_trc_rss_control = A_MPS_TRC_RSS_CONTROL;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ mps_trc_rss_control = A_MPS_T5_TRC_RSS_CONTROL;
+ break;
+ case CHELSIO_T7:
+ default:
+ mps_trc_rss_control = A_T7_MPS_T5_TRC_RSS_CONTROL;
+ break;
+ }
+
+ t4_write_reg(adap, mps_trc_rss_control,
+ V_RSSCONTROL(chan) | V_QUEUENUMBER(qid));
+}
+
+/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
* @cnt: where to store the count statistics
@@ -6696,6 +7550,8 @@ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
else {
t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
A_PM_TX_DBG_DATA, data, 2,
+ chip_id(adap) >= CHELSIO_T7 ?
+ A_T7_PM_TX_DBG_STAT_MSB :
A_PM_TX_DBG_STAT_MSB);
cycles[i] = (((u64)data[0] << 32) | data[1]);
}
@@ -6730,6 +7586,25 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
+ * t4_pmrx_cache_get_stats - returns the HW PMRX cache stats
+ * @adap: the adapter
+ * @stats: where to store the statistics
+ *
+ * Returns performance statistics of PMRX cache.
+ */
+void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[])
+{
+ u8 i, j;
+
+ for (i = 0, j = 0; i < T7_PM_RX_CACHE_NSTATS / 3; i++, j += 3) {
+ t4_write_reg(adap, A_PM_RX_STAT_CONFIG, 0x100 + i);
+ stats[j] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
+ t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA,
+ &stats[j + 1], 2, A_PM_RX_DBG_STAT_MSB);
+ }
+}
+
+/**
* t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
@@ -6762,11 +7637,24 @@ static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
const u32 n = adap->params.nports;
const u32 all_chan = (1 << adap->chip_params->nchan) - 1;
- if (n == 1)
- return idx == 0 ? all_chan : 0;
- if (n == 2 && chip_id(adap) <= CHELSIO_T5)
- return idx < 2 ? (3 << (2 * idx)) : 0;
- return 1 << idx;
+ switch (adap->params.tp.lb_mode) {
+ case 0:
+ if (n == 1)
+ return (all_chan);
+ if (n == 2 && chip_id(adap) <= CHELSIO_T5)
+ return (3 << (2 * idx));
+ return (1 << idx);
+ case 1:
+ MPASS(n == 1);
+ return (all_chan);
+ case 2:
+ MPASS(n <= 2);
+ return (3 << (2 * idx));
+ default:
+ CH_ERR(adap, "Unsupported LB mode %d\n",
+ adap->params.tp.lb_mode);
+ return (0);
+ }
}
/*
@@ -6784,6 +7672,8 @@ static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx)
*/
static unsigned int t4_get_tx_c_chan(struct adapter *adap, int idx)
{
+ if (adap->params.tx_tp_ch_map != UINT32_MAX)
+ return (adap->params.tx_tp_ch_map >> (8 * idx)) & 0xff;
return idx;
}
@@ -6856,79 +7746,89 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
- struct port_info *pi = adap->port[idx];
- u32 bgmap = pi->mps_bg_map;
- u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
+ struct port_info *pi;
+ int port_id, tx_chan;
+ u32 bgmap, stat_ctl;
+
+ port_id = adap->port_map[idx];
+ MPASS(port_id >= 0 && port_id <= adap->params.nports);
+ pi = adap->port[port_id];
#define GET_STAT(name) \
t4_read_reg64(adap, \
- t4_port_reg(adap, pi->tx_chan, A_MPS_PORT_STAT_##name##_L));
-#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+ t4_port_reg(adap, tx_chan, A_MPS_PORT_STAT_##name##_L));
+ memset(p, 0, sizeof(*p));
+ for (tx_chan = pi->tx_chan;
+ tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
+ p->tx_pause += GET_STAT(TX_PORT_PAUSE);
+ p->tx_octets += GET_STAT(TX_PORT_BYTES);
+ p->tx_frames += GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames += GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames += GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames += GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames += GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 += GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 += GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 += GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 += GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 += GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 += GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max += GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop += GET_STAT(TX_PORT_DROP);
+ p->tx_ppp0 += GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 += GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 += GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 += GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 += GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 += GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 += GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 += GET_STAT(TX_PORT_PPP7);
+
+ p->rx_pause += GET_STAT(RX_PORT_PAUSE);
+ p->rx_octets += GET_STAT(RX_PORT_BYTES);
+ p->rx_frames += GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames += GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames += GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames += GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long += GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber += GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_len_err += GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err += GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt += GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 += GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 += GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 += GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 += GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 += GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 += GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max += GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_ppp0 += GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 += GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 += GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 += GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 += GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 += GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 += GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 += GET_STAT(RX_PORT_PPP7);
+ if (!is_t6(adap)) {
+ MPASS(pi->fcs_reg == A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
+ p->rx_fcs_err += GET_STAT(RX_PORT_CRC_ERROR);
+ }
+ }
+#undef GET_STAT
- p->tx_pause = GET_STAT(TX_PORT_PAUSE);
- p->tx_octets = GET_STAT(TX_PORT_BYTES);
- p->tx_frames = GET_STAT(TX_PORT_FRAMES);
- p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
- p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
- p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
- p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
- p->tx_frames_64 = GET_STAT(TX_PORT_64B);
- p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
- p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
- p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
- p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
- p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
- p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
- p->tx_drop = GET_STAT(TX_PORT_DROP);
- p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
- p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
- p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
- p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
- p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
- p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
- p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
- p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (is_t6(adap) && pi->fcs_reg != -1)
+ p->rx_fcs_err = t4_read_reg64(adap,
+ t4_port_reg(adap, pi->tx_chan, pi->fcs_reg)) - pi->fcs_base;
if (chip_id(adap) >= CHELSIO_T5) {
+ stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
if (stat_ctl & F_COUNTPAUSESTATTX) {
p->tx_frames -= p->tx_pause;
p->tx_octets -= p->tx_pause * 64;
}
if (stat_ctl & F_COUNTPAUSEMCTX)
p->tx_mcast_frames -= p->tx_pause;
- }
-
- p->rx_pause = GET_STAT(RX_PORT_PAUSE);
- p->rx_octets = GET_STAT(RX_PORT_BYTES);
- p->rx_frames = GET_STAT(RX_PORT_FRAMES);
- p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
- p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
- p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
- p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
- p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
- p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
- p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
- p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
- p->rx_frames_64 = GET_STAT(RX_PORT_64B);
- p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
- p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
- p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
- p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
- p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
- p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
- p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
- p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
- p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
- p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
- p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
- p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
- p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
- p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
-
- if (pi->fcs_reg != -1)
- p->rx_fcs_err = t4_read_reg64(adap, pi->fcs_reg) - pi->fcs_base;
-
- if (chip_id(adap) >= CHELSIO_T5) {
if (stat_ctl & F_COUNTPAUSESTATRX) {
p->rx_frames -= p->rx_pause;
p->rx_octets -= p->rx_pause * 64;
@@ -6937,6 +7837,8 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_mcast_frames -= p->rx_pause;
}
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+ bgmap = pi->mps_bg_map;
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -6945,8 +7847,6 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
-
-#undef GET_STAT
#undef GET_STAT_COM
}
@@ -7016,10 +7916,14 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
}
if (addr) {
@@ -7056,8 +7960,10 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
if (is_t4(adap))
port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
- else
+ else if (chip_id(adap) < CHELSIO_T7)
port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ else
+ port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
if (!enable) {
t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
@@ -7348,6 +8254,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
break;
case CHELSIO_T6:
+ case CHELSIO_T7:
sge_idma_decode = (const char * const *)t6_decode;
sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
break;
@@ -8964,7 +9871,7 @@ static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_READ |
- V_FW_PORT_CMD_PORTID(pi->tx_chan));
+ V_FW_PORT_CMD_PORTID(pi->hw_port));
action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
FW_PORT_ACTION_GET_PORT_INFO;
cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
@@ -8996,16 +9903,12 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
(action == FW_PORT_ACTION_GET_PORT_INFO ||
action == FW_PORT_ACTION_GET_PORT_INFO32)) {
/* link/module state change message */
- int i;
- int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
- struct port_info *pi = NULL;
-
- for_each_port(adap, i) {
- pi = adap2pinfo(adap, i);
- if (pi->tx_chan == chan)
- break;
- }
+ int hw_port = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
+ int port_id = adap->port_map[hw_port];
+ struct port_info *pi;
+ MPASS(port_id >= 0 && port_id < adap->params.nports);
+ pi = adap->port[port_id];
PORT_LOCK(pi);
handle_port_info(pi, p, action, &mod_changed, &link_changed);
PORT_UNLOCK(pi);
@@ -9159,14 +10062,15 @@ int t4_get_flash_params(struct adapter *adapter)
}
/* If we didn't recognize the FLASH part, that's no real issue: the
- * Hardware/Software contract says that Hardware will _*ALWAYS*_
- * use a FLASH part which is at least 4MB in size and has 64KB
- * sectors. The unrecognized FLASH part is likely to be much larger
- * than 4MB, but that's all we really need.
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_ use a
+ * FLASH part which has 64KB sectors and is at least 4MB or 16MB in
+ * size, depending on the board.
*/
if (size == 0) {
- CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
- size = 1 << 22;
+ size = chip_id(adapter) >= CHELSIO_T7 ? 16 : 4;
+ CH_WARN(adapter, "Unknown Flash Part %#x, assuming %uMB\n",
+ flashid, size);
+ size <<= 20;
}
/*
@@ -9212,11 +10116,14 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = PM_NSTATS,
.cng_ch_bits_log = 2,
.nsched_cls = 15,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ,
.filter_opt_len = FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 128,
.vfcount = 128,
.sge_fl_db = F_DBPRIO,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
.rss_nentries = RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE,
@@ -9227,11 +10134,14 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = PM_NSTATS,
.cng_ch_bits_log = 2,
.nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ_T5,
.filter_opt_len = T5_FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 128,
.vfcount = 128,
.sge_fl_db = F_DBPRIO | F_DBTYPE,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
.rss_nentries = RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE,
@@ -9242,15 +10152,36 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = T6_PM_NSTATS,
.cng_ch_bits_log = 3,
.nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ_T5,
.filter_opt_len = T5_FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 256,
.vfcount = 256,
.sge_fl_db = 0,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
.rss_nentries = T6_RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE_T6,
},
+ {
+ /* T7 */
+ .nchan = NCHAN,
+ .pm_stats_cnt = T6_PM_NSTATS,
+ .cng_ch_bits_log = 2,
+ .nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ_T7,
+ .cim_num_obq = CIM_NUM_OBQ_T7,
+ .filter_opt_len = T7_FILTER_OPT_LEN,
+ .filter_num_opt = S_T7_FT_LAST + 1,
+ .mps_rplc_size = 256,
+ .vfcount = 256,
+ .sge_fl_db = 0,
+ .sge_ctxt_size = SGE_CTXT_SIZE_T7,
+ .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
+ .rss_nentries = T7_RSS_NENTRIES,
+ .cim_la_size = CIMLA_SIZE_T6,
+ },
};
chipid -= CHELSIO_T4;
@@ -9466,14 +10397,11 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
}
/**
- * t4_init_devlog_params - initialize adapter->params.devlog
+ * t4_init_devlog_ncores_params - initialize adap->params.devlog and ncores
* @adap: the adapter
* @fw_attach: whether we can talk to the firmware
- *
- * Initialize various fields of the adapter's Firmware Device Log
- * Parameters structure.
*/
-int t4_init_devlog_params(struct adapter *adap, int fw_attach)
+int t4_init_devlog_ncores_params(struct adapter *adap, int fw_attach)
{
struct devlog_params *dparams = &adap->params.devlog;
u32 pf_dparams;
@@ -9487,12 +10415,15 @@ int t4_init_devlog_params(struct adapter *adap, int fw_attach)
*/
pf_dparams =
t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
- if (pf_dparams) {
- unsigned int nentries, nentries128;
+ if (pf_dparams && pf_dparams != UINT32_MAX) {
+ unsigned int nentries, nentries128, ncore_shift;
+
+ ncore_shift = (G_PCIE_FW_PF_DEVLOG_COUNT_MSB(pf_dparams) << 1) |
+ G_PCIE_FW_PF_DEVLOG_COUNT_LSB(pf_dparams);
+ adap->params.ncores = 1 << ncore_shift;
dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
-
nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
nentries = (nentries128 + 1) * 128;
dparams->size = nentries * sizeof(struct fw_devlog_e);
@@ -9503,6 +10434,7 @@ int t4_init_devlog_params(struct adapter *adap, int fw_attach)
/*
* For any failing returns ...
*/
+ adap->params.ncores = 1;
memset(dparams, 0, sizeof *dparams);
/*
@@ -9624,21 +10556,28 @@ int t4_init_sge_params(struct adapter *adapter)
/* Convert the LE's hardware hash mask to a shorter filter mask. */
static inline uint16_t
-hashmask_to_filtermask(uint64_t hashmask, uint16_t filter_mode)
+hashmask_to_filtermask(struct adapter *adap, uint64_t hashmask, uint16_t filter_mode)
{
- static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
- int i;
+ int first, last, i;
uint16_t filter_mask;
- uint64_t mask; /* field mask */
+ uint64_t mask; /* field mask */
+
+
+ if (chip_id(adap) >= CHELSIO_T7) {
+ first = S_T7_FT_FIRST;
+ last = S_T7_FT_LAST;
+ } else {
+ first = S_FT_FIRST;
+ last = S_FT_LAST;
+ }
- filter_mask = 0;
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
+ for (filter_mask = 0, i = first; i <= last; i++) {
if ((filter_mode & (1 << i)) == 0)
continue;
- mask = (1 << width[i]) - 1;
+ mask = (1 << t4_filter_field_width(adap, i)) - 1;
if ((hashmask & mask) == mask)
filter_mask |= 1 << i;
- hashmask >>= width[i];
+ hashmask >>= t4_filter_field_width(adap, i);
}
return (filter_mask);
@@ -9681,7 +10620,15 @@ read_filter_mode_and_ingress_config(struct adapter *adap)
v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
hash_mask |= (u64)v << 32;
}
- tpp->filter_mask = hashmask_to_filtermask(hash_mask,
+ if (chip_id(adap) >= CHELSIO_T7) {
+ /*
+ * This param came before T7 so T7+ firmwares should
+ * always support this query.
+ */
+ CH_WARN(adap, "query for filter mode/mask failed: %d\n",
+ rc);
+ }
+ tpp->filter_mask = hashmask_to_filtermask(adap, hash_mask,
tpp->filter_mode);
t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true);
@@ -9696,16 +10643,37 @@ read_filter_mode_and_ingress_config(struct adapter *adap)
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
- tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
- tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
- tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
- tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
- tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
- tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
- tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
- tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
- tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
- tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ tpp->ipsecidx_shift = t4_filter_field_shift(adap, F_IPSECIDX);
+ tpp->fcoe_shift = t4_filter_field_shift(adap, F_T7_FCOE);
+ tpp->port_shift = t4_filter_field_shift(adap, F_T7_PORT);
+ tpp->vnic_shift = t4_filter_field_shift(adap, F_T7_VNIC_ID);
+ tpp->vlan_shift = t4_filter_field_shift(adap, F_T7_VLAN);
+ tpp->tos_shift = t4_filter_field_shift(adap, F_T7_TOS);
+ tpp->protocol_shift = t4_filter_field_shift(adap, F_T7_PROTOCOL);
+ tpp->ethertype_shift = t4_filter_field_shift(adap, F_T7_ETHERTYPE);
+ tpp->macmatch_shift = t4_filter_field_shift(adap, F_T7_MACMATCH);
+ tpp->matchtype_shift = t4_filter_field_shift(adap, F_T7_MPSHITTYPE);
+ tpp->frag_shift = t4_filter_field_shift(adap, F_T7_FRAGMENTATION);
+ tpp->roce_shift = t4_filter_field_shift(adap, F_ROCE);
+ tpp->synonly_shift = t4_filter_field_shift(adap, F_SYNONLY);
+ tpp->tcpflags_shift = t4_filter_field_shift(adap, F_TCPFLAGS);
+ } else {
+ tpp->ipsecidx_shift = -1;
+ tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
+ tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
+ tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
+ tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
+ tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
+ tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
+ tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
+ tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+ tpp->roce_shift = -1;
+ tpp->synonly_shift = -1;
+ tpp->tcpflags_shift = -1;
+ }
}
/**
@@ -9725,11 +10693,21 @@ int t4_init_tp_params(struct adapter *adap)
read_filter_mode_and_ingress_config(adap);
+ tpp->rx_pkt_encap = false;
+ tpp->lb_mode = 0;
+ tpp->lb_nchan = 1;
if (chip_id(adap) > CHELSIO_T5) {
v = t4_read_reg(adap, A_TP_OUT_CONFIG);
tpp->rx_pkt_encap = v & F_CRXPKTENC;
- } else
- tpp->rx_pkt_encap = false;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ t4_tp_pio_read(adap, &v, 1, A_TP_CHANNEL_MAP, true);
+ tpp->lb_mode = G_T7_LB_MODE(v);
+ if (tpp->lb_mode == 1)
+ tpp->lb_nchan = 4;
+ else if (tpp->lb_mode == 2)
+ tpp->lb_nchan = 2;
+ }
+ }
rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE);
tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE);
@@ -9750,6 +10728,53 @@ int t4_init_tp_params(struct adapter *adap)
}
/**
+ * t4_filter_field_width - returns the width of a filter field
+ * @adap: the adapter
+ * @filter_field: the filter field whose width is being requested
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_width(const struct adapter *adap, int filter_field)
+{
+ const int nopt = adap->chip_params->filter_num_opt;
+ static const uint8_t width_t7[] = {
+ W_FT_IPSECIDX,
+ W_FT_FCOE,
+ W_FT_PORT,
+ W_FT_VNIC_ID,
+ W_FT_VLAN,
+ W_FT_TOS,
+ W_FT_PROTOCOL,
+ W_FT_ETHERTYPE,
+ W_FT_MACMATCH,
+ W_FT_MPSHITTYPE,
+ W_FT_FRAGMENTATION,
+ W_FT_ROCE,
+ W_FT_SYNONLY,
+ W_FT_TCPFLAGS
+ };
+ static const uint8_t width_t4[] = {
+ W_FT_FCOE,
+ W_FT_PORT,
+ W_FT_VNIC_ID,
+ W_FT_VLAN,
+ W_FT_TOS,
+ W_FT_PROTOCOL,
+ W_FT_ETHERTYPE,
+ W_FT_MACMATCH,
+ W_FT_MPSHITTYPE,
+ W_FT_FRAGMENTATION
+ };
+ const uint8_t *width = chip_id(adap) >= CHELSIO_T7 ? width_t7 : width_t4;
+
+ if (filter_field < 0 || filter_field >= nopt)
+ return (0);
+ return (width[filter_field]);
+}
+
+/**
* t4_filter_field_shift - calculate filter field shift
* @adap: the adapter
* @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
@@ -9767,6 +10792,56 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
if ((filter_mode & filter_sel) == 0)
return -1;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_IPSECIDX:
+ field_shift += W_FT_IPSECIDX;
+ break;
+ case F_T7_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_T7_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_T7_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_T7_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_T7_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_T7_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_T7_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_T7_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_T7_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_T7_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ case F_ROCE:
+ field_shift += W_FT_ROCE;
+ break;
+ case F_SYNONLY:
+ field_shift += W_FT_SYNONLY;
+ break;
+ case F_TCPFLAGS:
+ field_shift += W_FT_TCPFLAGS;
+ break;
+ }
+ }
+ return field_shift;
+ }
+
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
case F_FCOE:
@@ -9818,11 +10893,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
} while ((adap->params.portvec & (1 << j)) == 0);
}
+ p->hw_port = j;
p->tx_chan = t4_get_tx_c_chan(adap, j);
p->rx_chan = t4_get_rx_c_chan(adap, j);
p->mps_bg_map = t4_get_mps_bg_map(adap, j);
p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
- p->lport = j;
if (!(adap->flags & IS_VF) ||
adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
@@ -9851,232 +10926,321 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
return 0;
}
+static void t4_read_cimq_cfg_ibq_core(struct adapter *adap, u8 coreid, u32 qid,
+ u16 *base, u16 *size, u16 *thres)
+{
+ unsigned int v, m;
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ v = F_T7_IBQSELECT | V_T7_QUENUMSELECT(qid) |
+ V_CORESELECT(coreid);
+ /* value is in 512-byte units */
+ m = 512;
+ } else {
+ v = F_IBQSELECT | V_QUENUMSELECT(qid);
+ /* value is in 256-byte units */
+ m = 256;
+ }
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ if (base)
+ *base = G_CIMQBASE(v) * m;
+ if (size)
+ *size = G_CIMQSIZE(v) * m;
+ if (thres)
+ *thres = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
+}
+
+static void t4_read_cimq_cfg_obq_core(struct adapter *adap, u8 coreid, u32 qid,
+ u16 *base, u16 *size)
+{
+ unsigned int v, m;
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ v = F_T7_OBQSELECT | V_T7_QUENUMSELECT(qid) |
+ V_CORESELECT(coreid);
+ /* value is in 512-byte units */
+ m = 512;
+ } else {
+ v = F_OBQSELECT | V_QUENUMSELECT(qid);
+ /* value is in 256-byte units */
+ m = 256;
+ }
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ if (base)
+ *base = G_CIMQBASE(v) * m;
+ if (size)
+ *size = G_CIMQSIZE(v) * m;
+}
+
/**
- * t4_read_cimq_cfg - read CIM queue configuration
+ * t4_read_cimq_cfg_core - read CIM queue configuration on specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @base: holds the queue base addresses in bytes
* @size: holds the queue sizes in bytes
* @thres: holds the queue full thresholds in bytes
*
* Returns the current configuration of the CIM queues, starting with
- * the IBQs, then the OBQs.
+ * the IBQs, then the OBQs, on a specific @coreid.
*/
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
+ u16 *size, u16 *thres)
{
- unsigned int i, v;
- int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
+ unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int i;
- for (i = 0; i < CIM_NUM_IBQ; i++) {
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
- V_QUENUMSELECT(i));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
- /* value is in 256-byte units */
- *base++ = G_CIMQBASE(v) * 256;
- *size++ = G_CIMQSIZE(v) * 256;
- *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
- }
- for (i = 0; i < cim_num_obq; i++) {
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
- V_QUENUMSELECT(i));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
- /* value is in 256-byte units */
- *base++ = G_CIMQBASE(v) * 256;
- *size++ = G_CIMQSIZE(v) * 256;
- }
+ for (i = 0; i < cim_num_ibq; i++, base++, size++, thres++)
+ t4_read_cimq_cfg_ibq_core(adap, coreid, i, base, size, thres);
+
+ for (i = 0; i < cim_num_obq; i++, base++, size++)
+ t4_read_cimq_cfg_obq_core(adap, coreid, i, base, size);
+}
+
+static int t4_read_cim_ibq_data_core(struct adapter *adap, u8 coreid, u32 addr,
+ u32 *data)
+{
+ int ret, attempts;
+ unsigned int v;
+
+ /* It might take 3-10ms before the IBQ debug read access is allowed.
+ * Wait for 1 Sec with a delay of 1 usec.
+ */
+ attempts = 1000000;
+
+ if (chip_id(adap) > CHELSIO_T6)
+ v = V_T7_IBQDBGADDR(addr) | V_IBQDBGCORE(coreid);
+ else
+ v = V_IBQDBGADDR(addr);
+
+ t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, v | F_IBQDBGEN);
+ ret = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
+ attempts, 1);
+ if (ret)
+ return ret;
+
+ *data = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ return 0;
}
/**
- * t4_read_cim_ibq - read the contents of a CIM inbound queue
+ * t4_read_cim_ibq_core - read the contents of a CIM inbound queue on
+ * specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
- * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
- * error and the number of 32-bit words actually read on success.
+ * to the capacity of @data on a specific @coreid. @n must be a multiple
+ * of 4. Returns < 0 on error and the number of 32-bit words actually
+ * read on success.
*/
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n)
{
- int i, err, attempts;
- unsigned int addr;
- const unsigned int nwords = CIM_IBQ_SIZE * 4;
+ unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
+ u16 i, addr, nwords;
+ int ret;
- if (qid > 5 || (n & 3))
+ if (qid > (cim_num_ibq - 1) || (n & 3))
return -EINVAL;
- addr = qid * nwords;
+ t4_read_cimq_cfg_ibq_core(adap, coreid, qid, &addr, &nwords, NULL);
+ addr >>= sizeof(u16);
+ nwords >>= sizeof(u16);
if (n > nwords)
n = nwords;
- /* It might take 3-10ms before the IBQ debug read access is allowed.
- * Wait for 1 Sec with a delay of 1 usec.
- */
- attempts = 1000000;
-
- for (i = 0; i < n; i++, addr++) {
- t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
- F_IBQDBGEN);
- err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
- attempts, 1);
- if (err)
- return err;
- *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ for (i = 0; i < n; i++, addr++, data++) {
+ ret = t4_read_cim_ibq_data_core(adap, coreid, addr, data);
+ if (ret < 0)
+ return ret;
}
+
t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
return i;
}
+static int t4_read_cim_obq_data_core(struct adapter *adap, u8 coreid, u32 addr,
+ u32 *data)
+{
+ unsigned int v;
+ int ret;
+
+ if (chip_id(adap) > CHELSIO_T6)
+ v = V_T7_OBQDBGADDR(addr) | V_OBQDBGCORE(coreid);
+ else
+ v = V_OBQDBGADDR(addr);
+
+ t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, v | F_OBQDBGEN);
+ ret = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1);
+ if (ret)
+ return ret;
+
+ *data = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ return 0;
+}
+
/**
- * t4_read_cim_obq - read the contents of a CIM outbound queue
+ * t4_read_cim_obq_core - read the contents of a CIM outbound queue on
+ * specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
- * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
- * error and the number of 32-bit words actually read on success.
+ * to the capacity of @data on specific @coreid. @n must be a multiple
+ * of 4. Returns < 0 on error and the number of 32-bit words actually
+ * read on success.
*/
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n)
{
- int i, err;
- unsigned int addr, v, nwords;
- int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
+ u16 i, addr, nwords;
+ int ret;
if ((qid > (cim_num_obq - 1)) || (n & 3))
return -EINVAL;
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
- V_QUENUMSELECT(qid));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
-
- addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
- nwords = G_CIMQSIZE(v) * 64; /* same */
+ t4_read_cimq_cfg_obq_core(adap, coreid, qid, &addr, &nwords);
+ addr >>= sizeof(u16);
+ nwords >>= sizeof(u16);
if (n > nwords)
n = nwords;
- for (i = 0; i < n; i++, addr++) {
- t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
- F_OBQDBGEN);
- err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
- 2, 1);
- if (err)
- return err;
- *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ for (i = 0; i < n; i++, addr++, data++) {
+ ret = t4_read_cim_obq_data_core(adap, coreid, addr, data);
+ if (ret < 0)
+ return ret;
}
+
t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
- return i;
+ return i;
}
-enum {
- CIM_QCTL_BASE = 0,
- CIM_CTL_BASE = 0x2000,
- CIM_PBT_ADDR_BASE = 0x2800,
- CIM_PBT_LRF_BASE = 0x3000,
- CIM_PBT_DATA_BASE = 0x3800
-};
-
/**
- * t4_cim_read - read a block from CIM internal address space
+ * t4_cim_read_core - read a block from CIM internal address space
+ * of a control register group on specific core.
* @adap: the adapter
+ * @group: the control register group to select for read
+ * @coreid: the uP coreid
* @addr: the start address within the CIM address space
* @n: number of words to read
* @valp: where to store the result
*
- * Reads a block of 4-byte words from the CIM intenal address space.
+ * Reads a block of 4-byte words from the CIM intenal address space
+ * of a control register @group on a specific @coreid.
*/
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp)
+int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ unsigned int *valp)
{
+ unsigned int hostbusy, v = 0;
int ret = 0;
- if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ if (chip_id(adap) > CHELSIO_T6) {
+ hostbusy = F_T7_HOSTBUSY;
+ v = V_HOSTGRPSEL(group) | V_HOSTCORESEL(coreid);
+ } else {
+ hostbusy = F_HOSTBUSY;
+ }
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
- t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
- ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
0, 5, 2);
if (!ret)
*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
}
+
return ret;
}
/**
- * t4_cim_write - write a block into CIM internal address space
+ * t4_cim_write_core - write a block into CIM internal address space
+ * of a control register group on specific core.
* @adap: the adapter
+ * @group: the control register group to select for write
+ * @coreid: the uP coreid
* @addr: the start address within the CIM address space
* @n: number of words to write
* @valp: set of values to write
*
- * Writes a block of 4-byte words into the CIM intenal address space.
+ * Writes a block of 4-byte words into the CIM intenal address space
+ * of a control register @group on a specific @coreid.
*/
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
- const unsigned int *valp)
+int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ const unsigned int *valp)
{
+ unsigned int hostbusy, v;
int ret = 0;
- if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ if (chip_id(adap) > CHELSIO_T6) {
+ hostbusy = F_T7_HOSTBUSY;
+ v = F_T7_HOSTWRITE | V_HOSTGRPSEL(group) |
+ V_HOSTCORESEL(coreid);
+ } else {
+ hostbusy = F_HOSTBUSY;
+ v = F_HOSTWRITE;
+ }
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
- t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
- ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
0, 5, 2);
}
- return ret;
-}
-static int t4_cim_write1(struct adapter *adap, unsigned int addr,
- unsigned int val)
-{
- return t4_cim_write(adap, addr, 1, &val);
-}
-
-/**
- * t4_cim_ctl_read - read a block from CIM control region
- * @adap: the adapter
- * @addr: the start address within the CIM control region
- * @n: number of words to read
- * @valp: where to store the result
- *
- * Reads a block of 4-byte words from the CIM control region.
- */
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp)
-{
- return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
+ return ret;
}
/**
- * t4_cim_read_la - read CIM LA capture buffer
+ * t4_cim_read_la_core - read CIM LA capture buffer on specific core
* @adap: the adapter
+ * @coreid: uP coreid
* @la_buf: where to store the LA data
* @wrptr: the HW write pointer within the capture buffer
*
- * Reads the contents of the CIM LA buffer with the most recent entry at
- * the end of the returned data and with the entry at @wrptr first.
- * We try to leave the LA in the running state we find it in.
+ * Reads the contents of the CIM LA buffer on a specific @coreid
+ * with the most recent entry at the end of the returned data
+ * and with the entry at @wrptr first. We try to leave the LA
+ * in the running state we find it in.
*/
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
+ u32 *wrptr)
{
- int i, ret;
unsigned int cfg, val, idx;
+ int i, ret;
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &cfg);
if (ret)
return ret;
if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
- ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
+ val = 0;
+ ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
return ret;
}
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &val);
if (ret)
goto restart;
@@ -10085,25 +11249,28 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
*wrptr = idx;
for (i = 0; i < adap->params.cim_la_size; i++) {
- ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
- V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
+ val = V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN;
+ ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
break;
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
break;
if (val & F_UPDBGLARDEN) {
ret = -ETIMEDOUT;
break;
}
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_DATA, 1,
+ &la_buf[i]);
if (ret)
break;
/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
* identify the 32-bit portion of the full 312-bit data
*/
- if (is_t6(adap) && (idx & 0xf) >= 9)
+ if ((chip_id(adap) > CHELSIO_T5) && (idx & 0xf) >= 9)
idx = (idx & 0xff0) + 0x10;
else
idx++;
@@ -10112,11 +11279,15 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
}
restart:
if (cfg & F_UPDBGLAEN) {
- int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
- cfg & ~F_UPDBGLARDEN);
+ int r;
+
+ val = cfg & ~F_UPDBGLARDEN;
+ r = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (!ret)
ret = r;
}
+
return ret;
}
@@ -10403,25 +11574,20 @@ void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbp
int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
{
int ret, i, n, cfg_addr;
- unsigned int addr;
+ unsigned int addr, len;
unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- cfg_addr = t4_flash_cfg_addr(adap);
+ cfg_addr = t4_flash_cfg_addr(adap, &len);
if (cfg_addr < 0)
return cfg_addr;
- addr = cfg_addr;
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_CFG_MAX_SIZE) {
- CH_ERR(adap, "cfg file too large, max is %u bytes\n",
- FLASH_CFG_MAX_SIZE);
+ if (size > len) {
+ CH_ERR(adap, "cfg file too large, max is %u bytes\n", len);
return -EFBIG;
}
- i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
- sf_sec_size);
+ flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
+ i = DIV_ROUND_UP(len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
flash_cfg_start_sec + i - 1);
/*
@@ -10432,15 +11598,12 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
goto out;
/* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i< size; i+= SF_PAGE_SIZE) {
- if ( (size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
+ addr = cfg_addr;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ n = min(size - i, SF_PAGE_SIZE);
ret = t4_write_flash(adap, addr, n, cfg_data, 1);
if (ret)
goto out;
-
addr += SF_PAGE_SIZE;
cfg_data += SF_PAGE_SIZE;
}
@@ -10644,25 +11807,25 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
pcir_data_t *pcir_header;
int ret, addr;
uint16_t device_id;
- unsigned int i;
- unsigned int boot_sector = (boot_addr * 1024 );
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+ unsigned int i, start, len;
+ unsigned int boot_sector = boot_addr * 1024;
/*
- * Make sure the boot image does not encroach on the firmware region
+ * Make sure the boot image does not exceed its available space.
*/
- if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
- CH_ERR(adap, "boot image encroaching on firmware region\n");
+ len = 0;
+ start = t4_flash_loc_start(adap, FLASH_LOC_BOOT_AREA, &len);
+ if (boot_sector + size > start + len) {
+ CH_ERR(adap, "boot data is larger than available BOOT area\n");
return -EFBIG;
}
/*
* The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
* and Boot configuration data sections. These 3 boot sections span
- * sectors 0 to 7 in flash and live right before the FW image location.
+ * the entire FLASH_LOC_BOOT_AREA.
*/
- i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
- sf_sec_size);
+ i = DIV_ROUND_UP(size ? size : len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
(boot_sector >> 16) + i - 1);
@@ -10765,40 +11928,39 @@ out:
* is stored, or an error if the device FLASH is too small to contain
* a OptionROM Configuration.
*/
-static int t4_flash_bootcfg_addr(struct adapter *adapter)
+static int t4_flash_bootcfg_addr(struct adapter *adapter, unsigned int *lenp)
{
+ unsigned int len = 0;
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_BOOTCFG, &len);
+
/*
* If the device FLASH isn't large enough to hold a Firmware
* Configuration File, return an error.
*/
- if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
+ if (adapter->params.sf_size < start + len)
return -ENOSPC;
-
- return FLASH_BOOTCFG_START;
+ if (lenp != NULL)
+ *lenp = len;
+ return (start);
}
int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
{
int ret, i, n, cfg_addr;
- unsigned int addr;
+ unsigned int addr, len;
unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- cfg_addr = t4_flash_bootcfg_addr(adap);
+ cfg_addr = t4_flash_bootcfg_addr(adap, &len);
if (cfg_addr < 0)
return cfg_addr;
- addr = cfg_addr;
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_BOOTCFG_MAX_SIZE) {
- CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
- FLASH_BOOTCFG_MAX_SIZE);
+ if (size > len) {
+ CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", len);
return -EFBIG;
}
- i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
- sf_sec_size);
+ flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
+ i = DIV_ROUND_UP(len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
flash_cfg_start_sec + i - 1);
@@ -10810,15 +11972,12 @@ int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
goto out;
/* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i< size; i+= SF_PAGE_SIZE) {
- if ( (size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
+ addr = cfg_addr;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ n = min(size - i, SF_PAGE_SIZE);
ret = t4_write_flash(adap, addr, n, cfg_data, 0);
if (ret)
goto out;
-
addr += SF_PAGE_SIZE;
cfg_data += SF_PAGE_SIZE;
}
@@ -10844,19 +12003,20 @@ out:
*/
int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
{
- static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
int i, nbits, rc;
uint32_t param, val;
uint16_t fmode, fmask;
const int maxbits = adap->chip_params->filter_opt_len;
+ const int nopt = adap->chip_params->filter_num_opt;
+ int width;
if (mode != -1 || mask != -1) {
if (mode != -1) {
fmode = mode;
nbits = 0;
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
+ for (i = 0; i < nopt; i++) {
if (fmode & (1 << i))
- nbits += width[i];
+ nbits += t4_filter_field_width(adap, i);
}
if (nbits > maxbits) {
CH_ERR(adap, "optional fields in the filter "
@@ -10867,17 +12027,20 @@ int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
}
/*
- * Hardware wants the bits to be maxed out. Keep
+ * Hardware < T7 wants the bits to be maxed out. Keep
* setting them until there's no room for more.
*/
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
- if (fmode & (1 << i))
- continue;
- if (nbits + width[i] <= maxbits) {
- fmode |= 1 << i;
- nbits += width[i];
- if (nbits == maxbits)
- break;
+ if (chip_id(adap) < CHELSIO_T7) {
+ for (i = 0; i < nopt; i++) {
+ if (fmode & (1 << i))
+ continue;
+ width = t4_filter_field_width(adap, i);
+ if (nbits + width <= maxbits) {
+ fmode |= 1 << i;
+ nbits += width;
+ if (nbits == maxbits)
+ break;
+ }
}
}
@@ -10936,21 +12099,26 @@ int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
*/
void t4_clr_port_stats(struct adapter *adap, int idx)
{
- unsigned int i;
- u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
- u32 port_base_addr;
+ struct port_info *pi;
+ int i, port_id, tx_chan;
+ u32 bgmap, port_base_addr;
- if (is_t4(adap))
- port_base_addr = PORT_BASE(idx);
- else
- port_base_addr = T5_PORT_BASE(idx);
-
- for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
- t4_write_reg(adap, port_base_addr + i, 0);
- for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
- t4_write_reg(adap, port_base_addr + i, 0);
+ port_id = adap->port_map[idx];
+ MPASS(port_id >= 0 && port_id <= adap->params.nports);
+ pi = adap->port[port_id];
+
+ for (tx_chan = pi->tx_chan;
+ tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
+ port_base_addr = t4_port_reg(adap, tx_chan, 0);
+
+ for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ }
+ bgmap = pi->mps_bg_map;
for (i = 0; i < 4; i++)
if (bgmap & (1 << i)) {
t4_write_reg(adap,
@@ -11078,6 +12246,8 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
+ if (chip_id(adap) > CHELSIO_T6)
+ data[6] = be32_to_cpu(c.u.idctxt.ctxt_data6);
}
return ret;
}
@@ -11099,9 +12269,12 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type cty
t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
- if (!ret)
+ if (!ret) {
for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
*data++ = t4_read_reg(adap, i);
+ if (chip_id(adap) > CHELSIO_T6)
+ *data++ = t4_read_reg(adap, i);
+ }
return ret;
}
diff --git a/sys/dev/cxgbe/common/t4_hw.h b/sys/dev/cxgbe/common/t4_hw.h
index 79ec690cd5e6..09bd9ac9e637 100644
--- a/sys/dev/cxgbe/common/t4_hw.h
+++ b/sys/dev/cxgbe/common/t4_hw.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,30 +41,36 @@ enum {
EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
T6_RSS_NENTRIES = 4096,
+ T7_RSS_NENTRIES = 16384,
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
NTX_SCHED = 8, /* # of HW Tx scheduling queues */
PM_NSTATS = 5, /* # of PM stats */
- T6_PM_NSTATS = 7,
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
MAX_PM_NSTATS = 7,
+ T7_PM_RX_CACHE_NSTATS = 27, /* # of PM Rx Cache stats in T7 */
MBOX_LEN = 64, /* mailbox size in bytes */
NTRACE = 4, /* # of tracing filters */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width of optional components */
T5_FILTER_OPT_LEN = 40,
+ T7_FILTER_OPT_LEN = 63,
NWOL_PAT = 8, /* # of WoL patterns */
WOL_PAT_LEN = 128, /* length of WoL patterns */
UDBS_SEG_SIZE = 128, /* Segment size of BAR2 doorbells */
UDBS_SEG_SHIFT = 7, /* log2(UDBS_SEG_SIZE) */
UDBS_DB_OFFSET = 8, /* offset of the 4B doorbell in a segment */
UDBS_WR_OFFSET = 64, /* offset of the work request in a segment */
+ MAX_UP_CORES = 8, /* Max # of uP cores that can be enabled */
};
enum {
CIM_NUM_IBQ = 6, /* # of CIM IBQs */
+ CIM_NUM_IBQ_T7 = 16, /* # of CIM IBQs for T7 */
CIM_NUM_OBQ = 6, /* # of CIM OBQs */
CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
+ CIM_NUM_OBQ_T7 = 16, /* # of CIM OBQs for T7 adapter */
CIMLA_SIZE = 256 * 8, /* 256 rows * ceil(235/32) 32-bit words */
CIMLA_SIZE_T6 = 256 * 10, /* 256 rows * ceil(311/32) 32-bit words */
CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
@@ -91,6 +96,7 @@ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_CTXT_SIZE = 24, /* size of SGE context */
+ SGE_CTXT_SIZE_T7 = 28, /* size of SGE context for T7 */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */
@@ -161,6 +167,18 @@ struct rsp_ctrl {
#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+#define S_ARM_QTYPE 11
+#define M_ARM_QTYPE 1
+#define V_ARM_QTYPE(x) ((x) << S_ARM_QTYPE)
+
+#define S_ARM_PIDX 0
+#define M_ARM_PIDX 0x7ffU
+#define V_ARM_PIDX(x) ((x) << S_ARM_PIDX)
+
+#define S_ARM_CIDXINC 0
+#define M_ARM_CIDXINC 0x7ffU
+#define V_ARM_CIDXINC(x) ((x) << S_ARM_CIDXINC)
+
/* # of pages a pagepod can hold without needing another pagepod */
#define PPOD_PAGES 4U
@@ -206,95 +224,116 @@ struct pagepod {
*/
#define FLASH_START(start) ((start) * SF_SEC_SIZE)
#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+#define FLASH_MIN_SIZE FLASH_START(32)
-enum {
+enum t4_flash_loc {
/*
* Various Expansion-ROM boot images, etc.
*/
- FLASH_EXP_ROM_START_SEC = 0,
- FLASH_EXP_ROM_NSECS = 6,
- FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
- FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+ FLASH_LOC_EXP_ROM = 0,
/*
* iSCSI Boot Firmware Table (iBFT) and other driver-related
* parameters ...
*/
- FLASH_IBFT_START_SEC = 6,
- FLASH_IBFT_NSECS = 1,
- FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
- FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+ FLASH_LOC_IBFT,
/*
* Boot configuration data.
*/
- FLASH_BOOTCFG_START_SEC = 7,
- FLASH_BOOTCFG_NSECS = 1,
- FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
- FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+ FLASH_LOC_BOOTCFG,
/*
* Location of firmware image in FLASH.
*/
- FLASH_FW_START_SEC = 8,
- FLASH_FW_NSECS = 16,
- FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
- FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ FLASH_LOC_FW,
/*
* Location of bootstrap firmware image in FLASH.
*/
- FLASH_FWBOOTSTRAP_START_SEC = 27,
- FLASH_FWBOOTSTRAP_NSECS = 1,
- FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
- FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+ FLASH_LOC_FWBOOTSTRAP,
/*
* iSCSI persistent/crash information.
*/
- FLASH_ISCSI_CRASH_START_SEC = 29,
- FLASH_ISCSI_CRASH_NSECS = 1,
- FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
- FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+ FLASH_LOC_ISCSI_CRASH,
/*
* FCoE persistent/crash information.
*/
- FLASH_FCOE_CRASH_START_SEC = 30,
- FLASH_FCOE_CRASH_NSECS = 1,
- FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
- FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+ FLASH_LOC_FCOE_CRASH,
/*
* Location of Firmware Configuration File in FLASH.
*/
- FLASH_CFG_START_SEC = 31,
- FLASH_CFG_NSECS = 1,
- FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
- FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+ FLASH_LOC_CFG,
+
+ /*
+ * CUDBG chip dump.
+ */
+ FLASH_LOC_CUDBG,
+
+ /*
+ * FW chip dump.
+ */
+ FLASH_LOC_CHIP_DUMP,
+
+ /*
+ * DPU boot information store.
+ */
+ FLASH_LOC_DPU_BOOT,
+
+ /*
+ * DPU peristent information store.
+ */
+ FLASH_LOC_DPU_AREA,
/*
- * We don't support FLASH devices which can't support the full
- * standard set of sections which we need for normal operations.
+ * VPD location.
*/
- FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+ FLASH_LOC_VPD,
/*
- * Sectors 32-63 for CUDBG.
+ * Backup init/vpd.
*/
- FLASH_CUDBG_START_SEC = 32,
- FLASH_CUDBG_NSECS = 32,
- FLASH_CUDBG_START = FLASH_START(FLASH_CUDBG_START_SEC),
- FLASH_CUDBG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CUDBG_NSECS),
+ FLASH_LOC_VPD_BACKUP,
/*
- * Size of defined FLASH regions.
+ * Backup firmware image.
*/
- FLASH_END_SEC = 64,
+ FLASH_LOC_FW_BACKUP,
+
+ /*
+ * Backup bootstrap firmware image.
+ */
+ FLASH_LOC_FWBOOTSTRAP_BACKUP,
+
+ /*
+ * Backup Location of Firmware Configuration File in FLASH.
+ */
+ FLASH_LOC_CFG_BACK,
+
+ /*
+ * Helper to retrieve info that spans the entire Boot related area.
+ */
+ FLASH_LOC_BOOT_AREA,
+
+ /*
+ * Helper to determine minimum standard set of sections needed for
+ * normal operations.
+ */
+ FLASH_LOC_MIN_SIZE,
+
+ /*
+ * End of FLASH regions.
+ */
+ FLASH_LOC_END
};
-#undef FLASH_START
-#undef FLASH_MAX_SIZE
+struct t4_flash_loc_entry {
+ u16 start_sec;
+ u16 nsecs;
+};
#define S_SGE_TIMESTAMP 0
#define M_SGE_TIMESTAMP 0xfffffffffffffffULL
diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h
index d356d0d99f36..0d12ccf2e910 100644
--- a/sys/dev/cxgbe/common/t4_msg.h
+++ b/sys/dev/cxgbe/common/t4_msg.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +29,7 @@
#ifndef T4_MSG_H
#define T4_MSG_H
-enum {
+enum cpl_opcodes {
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_ACCEPT_RPL = 0x2,
CPL_ACT_OPEN_REQ = 0x3,
@@ -68,13 +67,16 @@ enum {
CPL_PEER_CLOSE = 0x26,
CPL_RTE_DELETE_RPL = 0x27,
CPL_RTE_WRITE_RPL = 0x28,
+ CPL_ROCE_FW_NOTIFY = 0x28,
CPL_RX_URG_PKT = 0x29,
CPL_TAG_WRITE_RPL = 0x2A,
+ CPL_RDMA_ASYNC_EVENT = 0x2A,
CPL_ABORT_REQ_RSS = 0x2B,
CPL_RX_URG_NOTIFY = 0x2C,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_SMT_WRITE_RPL = 0x2E,
CPL_TX_DATA_ACK = 0x2F,
+ CPL_RDMA_INV_REQ = 0x2F,
CPL_RX_PHYS_ADDR = 0x30,
CPL_PCMD_READ_RPL = 0x31,
@@ -107,19 +109,30 @@ enum {
CPL_RX_DATA_DIF = 0x4B,
CPL_ERR_NOTIFY = 0x4D,
CPL_RX_TLS_CMP = 0x4E,
+ CPL_T6_TX_DATA_ACK = 0x4F,
CPL_RDMA_READ_REQ = 0x60,
CPL_RX_ISCSI_DIF = 0x60,
+ CPL_RDMA_CQE_EXT = 0x61,
+ CPL_RDMA_CQE_FW_EXT = 0x62,
+ CPL_RDMA_CQE_ERR_EXT = 0x63,
+ CPL_TX_DATA_ACK_XT = 0x64,
+ CPL_ROCE_CQE = 0x68,
+ CPL_ROCE_CQE_FW = 0x69,
+ CPL_ROCE_CQE_ERR = 0x6A,
+
+ CPL_SACK_REQ = 0x70,
CPL_SET_LE_REQ = 0x80,
CPL_PASS_OPEN_REQ6 = 0x81,
CPL_ACT_OPEN_REQ6 = 0x83,
CPL_TX_TLS_PDU = 0x88,
CPL_TX_TLS_SFO = 0x89,
-
CPL_TX_SEC_PDU = 0x8A,
CPL_TX_TLS_ACK = 0x8B,
+ CPL_RCB_UPD = 0x8C,
+ CPL_SGE_FLR_FLUSH = 0xA0,
CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5,
@@ -138,15 +151,27 @@ enum {
CPL_TLS_DATA = 0xB1,
CPL_ISCSI_DATA = 0xB2,
CPL_FCOE_DATA = 0xB3,
+ CPL_NVMT_DATA = 0xB4,
+ CPL_NVMT_CMP = 0xB5,
+ CPL_NVMT_CMP_IMM = 0xB6,
+ CPL_NVMT_CMP_SRQ = 0xB7,
+ CPL_ROCE_ACK_NAK_REQ = 0xBC,
+ CPL_ROCE_ACK_NAK = 0xBD,
CPL_FW4_MSG = 0xC0,
CPL_FW4_PLD = 0xC1,
+ CPL_RDMA_CQE_SRQ = 0xC2,
+ CPL_ACCELERATOR_ACK = 0xC4,
CPL_FW4_ACK = 0xC3,
+ CPL_RX_PKT_IPSEC = 0xC6,
CPL_SRQ_TABLE_RPL = 0xCC,
+ CPL_TX_DATA_REQ = 0xCF,
+
CPL_RX_PHYS_DSGL = 0xD0,
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_ACCELERATOR_HDR = 0xE8,
CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -233,6 +258,8 @@ enum {
ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
ULP_MODE_TLS = 8,
+ ULP_MODE_RDMA_V2 = 10,
+ ULP_MODE_NVMET = 11,
};
enum {
@@ -325,9 +352,14 @@ union opcode_tid {
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
/* extract the TID from a CPL command */
-#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+#define GET_TID(cmd) (G_TID(be32toh(OPCODE_TID(cmd))))
#define GET_OPCODE(cmd) ((cmd)->ot.opcode)
+
+/*
+ * Note that this driver splits the 14b opaque atid into an 11b atid and a 3b
+ * cookie that is used to demux replies for shared CPLs.
+ */
/* partitioning of TID fields that also carry a queue id */
#define S_TID_TID 0
#define M_TID_TID 0x7ff
@@ -717,7 +749,7 @@ struct cpl_pass_establish {
struct cpl_pass_accept_req {
RSS_HDR
union opcode_tid ot;
- __be16 rsvd;
+ __be16 ipsecen_outiphdrlen;
__be16 len;
__be32 hdr_len;
__be16 vlan;
@@ -775,6 +807,155 @@ struct cpl_pass_accept_req {
#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+struct cpl_t7_pass_accept_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 ipsecen_to_outiphdrlen;
+ __be16 length;
+ __be32 ethhdrlen_to_rxchannel;
+ __be16 vlantag;
+ __be16 interface_to_mac_ix;
+ __be32 tos_ptid;
+ __be16 tcpmss;
+ __u8 tcpwsc;
+ __u8 tcptmstp_to_tcpunkn;
+};
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN 12
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPSECEN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPSECEN)
+#define F_CPL_PASS_T7_ACCEPT_REQ_IPSECEN \
+ V_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE 10
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE 0x3
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN 0x3ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN 24
+#define M_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN 0xff
+#define V_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN 14
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN 0x3ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN 8
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN 0x3f
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL 0xf
+#define V_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL)
+#define G_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE 12
+#define M_CPL_T7_PASS_ACCEPT_REQ_INTERFACE 0xf
+#define V_CPL_T7_PASS_ACCEPT_REQ_INTERFACE(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE)
+#define G_CPL_T7_PASS_ACCEPT_REQ_INTERFACE(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_INTERFACE)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH 9
+#define M_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH)
+#define G_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH)
+#define F_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH \
+ V_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_MAC_IX 0x1ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_MAC_IX(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX)
+#define G_CPL_T7_PASS_ACCEPT_REQ_MAC_IX(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX) & M_CPL_T7_PASS_ACCEPT_REQ_MAC_IX)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TOS 24
+#define M_CPL_T7_PASS_ACCEPT_REQ_TOS 0xff
+#define V_CPL_T7_PASS_ACCEPT_REQ_TOS(x) ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TOS)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TOS(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TOS) & M_CPL_T7_PASS_ACCEPT_REQ_TOS)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_PTID 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_PTID 0xffffff
+#define V_CPL_T7_PASS_ACCEPT_REQ_PTID(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_PTID)
+#define G_CPL_T7_PASS_ACCEPT_REQ_PTID(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_PTID) & M_CPL_T7_PASS_ACCEPT_REQ_PTID)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP 7
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK 6
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPSACK 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPSACK)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPSACK \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPECN 5
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPECN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPECN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPECN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPECN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPECN) & M_CPL_T7_PASS_ACCEPT_REQ_TCPECN)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPECN \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPECN(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN 4
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(1U)
+
struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
@@ -810,6 +991,7 @@ struct cpl_act_open_req {
#define M_FILTER_TUPLE 0xFFFFFFFFFF
#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+
struct cpl_t5_act_open_req {
WR_HDR;
union opcode_tid ot;
@@ -843,6 +1025,26 @@ struct cpl_t6_act_open_req {
#define V_AOPEN_FCOEMASK(x) ((x) << S_AOPEN_FCOEMASK)
#define F_AOPEN_FCOEMASK V_AOPEN_FCOEMASK(1U)
+struct cpl_t7_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 iss;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_T7_FILTER_TUPLE 1
+#define M_T7_FILTER_TUPLE 0x7FFFFFFFFFFFFFFFULL
+#define V_T7_FILTER_TUPLE(x) ((x) << S_T7_FILTER_TUPLE)
+#define G_T7_FILTER_TUPLE(x) (((x) >> S_T7_FILTER_TUPLE) & M_T7_FILTER_TUPLE)
+
struct cpl_act_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -889,6 +1091,23 @@ struct cpl_t6_act_open_req6 {
__be32 opt3;
};
+struct cpl_t7_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 iss;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
struct cpl_act_open_rpl {
RSS_HDR
union opcode_tid ot;
@@ -921,8 +1140,7 @@ struct cpl_get_tcb {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
- __u8 rsvd;
- __u8 cookie;
+ __be16 cookie;
};
/* cpl_get_tcb.reply_ctrl fields */
@@ -931,10 +1149,20 @@ struct cpl_get_tcb {
#define V_QUEUENO(x) ((x) << S_QUEUENO)
#define G_QUEUENO(x) (((x) >> S_QUEUENO) & M_QUEUENO)
+#define S_T7_QUEUENO 0
+#define M_T7_QUEUENO 0xFFF
+#define V_T7_QUEUENO(x) ((x) << S_T7_QUEUENO)
+#define G_T7_QUEUENO(x) (((x) >> S_T7_QUEUENO) & M_T7_QUEUENO)
+
#define S_REPLY_CHAN 14
#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
#define F_REPLY_CHAN V_REPLY_CHAN(1U)
+#define S_T7_REPLY_CHAN 12
+#define M_T7_REPLY_CHAN 0x7
+#define V_T7_REPLY_CHAN(x) ((x) << S_T7_REPLY_CHAN)
+#define G_T7_REPLY_CHAN(x) (((x) >> S_T7_REPLY_CHAN) & M_T7_REPLY_CHAN)
+
#define S_NO_REPLY 15
#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
#define F_NO_REPLY V_NO_REPLY(1U)
@@ -1018,6 +1246,40 @@ struct cpl_close_listsvr_req {
#define V_LISTSVR_IPV6(x) ((x) << S_LISTSVR_IPV6)
#define F_LISTSVR_IPV6 V_LISTSVR_IPV6(1U)
+struct cpl_t7_close_listsvr_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 noreply_to_queue;
+ __be16 r2;
+};
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY 15
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY 0x1
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY) & \
+ M_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY)
+#define F_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY \
+ V_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 14
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 0x1
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6) & M_CPL_T7_CLOSE_LISTSVR_REQ_IPV6)
+#define F_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 \
+ V_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(1U)
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE 0
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE 0xfff
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE) & \
+ M_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE)
+
struct cpl_close_listsvr_rpl {
RSS_HDR
union opcode_tid ot;
@@ -1250,6 +1512,71 @@ struct cpl_tx_data_ack {
__be32 snd_una;
};
+struct cpl_tx_data_ack_xt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+ __be32 extinfoh[2];
+ __be32 extinfol[2];
+};
+
+struct cpl_tx_data_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+};
+
+#define S_CPL_TX_DATA_REQ_TID 0
+#define M_CPL_TX_DATA_REQ_TID 0xffffff
+#define V_CPL_TX_DATA_REQ_TID(x) ((x) << S_CPL_TX_DATA_REQ_TID)
+#define G_CPL_TX_DATA_REQ_TID(x) \
+ (((x) >> S_CPL_TX_DATA_REQ_TID) & M_CPL_TX_DATA_REQ_TID)
+
+struct cpl_sack_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+ __be32 block1[2];
+ __be32 block2[2];
+ __be32 block3[2];
+};
+
+struct cpl_sge_flr_flush {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 cookievalue_cookiesel;
+};
+
+#define S_CPL_SGE_FLR_FLUSH_COOKIEVALUE 4
+#define M_CPL_SGE_FLR_FLUSH_COOKIEVALUE 0x3ff
+#define V_CPL_SGE_FLR_FLUSH_COOKIEVALUE(x) \
+ ((x) << S_CPL_SGE_FLR_FLUSH_COOKIEVALUE)
+#define G_CPL_SGE_FLR_FLUSH_COOKIEVALUE(x) \
+ (((x) >> S_CPL_SGE_FLR_FLUSH_COOKIEVALUE) & \
+ M_CPL_SGE_FLR_FLUSH_COOKIEVALUE)
+
+#define S_CPL_SGE_FLR_FLUSH_COOKIESEL 0
+#define M_CPL_SGE_FLR_FLUSH_COOKIESEL 0xf
+#define V_CPL_SGE_FLR_FLUSH_COOKIESEL(x) \
+ ((x) << S_CPL_SGE_FLR_FLUSH_COOKIESEL)
+#define G_CPL_SGE_FLR_FLUSH_COOKIESEL(x) \
+ (((x) >> S_CPL_SGE_FLR_FLUSH_COOKIESEL) & M_CPL_SGE_FLR_FLUSH_COOKIESEL)
+
struct cpl_wr_ack { /* XXX */
RSS_HDR
union opcode_tid ot;
@@ -1271,8 +1598,6 @@ struct cpl_tx_pkt {
struct cpl_tx_pkt_core c;
};
-#define cpl_tx_pkt_xt cpl_tx_pkt
-
/* cpl_tx_pkt_core.ctrl0 fields */
#define S_TXPKT_VF 0
#define M_TXPKT_VF 0xFF
@@ -1404,6 +1729,261 @@ struct cpl_tx_pkt {
#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL)
+struct cpl_tx_pkt_xt {
+ WR_HDR;
+ __be32 ctrl0;
+ __be16 pack;
+ __be16 len;
+ __be32 ctrl1;
+ __be32 ctrl2;
+};
+
+/* cpl_tx_pkt_xt.core.ctrl0 fields */
+#define S_CPL_TX_PKT_XT_OPCODE 24
+#define M_CPL_TX_PKT_XT_OPCODE 0xff
+#define V_CPL_TX_PKT_XT_OPCODE(x) ((x) << S_CPL_TX_PKT_XT_OPCODE)
+#define G_CPL_TX_PKT_XT_OPCODE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OPCODE) & M_CPL_TX_PKT_XT_OPCODE)
+
+#define S_CPL_TX_PKT_XT_TIMESTAMP 23
+#define M_CPL_TX_PKT_XT_TIMESTAMP 0x1
+#define V_CPL_TX_PKT_XT_TIMESTAMP(x) ((x) << S_CPL_TX_PKT_XT_TIMESTAMP)
+#define G_CPL_TX_PKT_XT_TIMESTAMP(x) \
+ (((x) >> S_CPL_TX_PKT_XT_TIMESTAMP) & M_CPL_TX_PKT_XT_TIMESTAMP)
+#define F_CPL_TX_PKT_XT_TIMESTAMP V_CPL_TX_PKT_XT_TIMESTAMP(1U)
+
+#define S_CPL_TX_PKT_XT_STATDISABLE 22
+#define M_CPL_TX_PKT_XT_STATDISABLE 0x1
+#define V_CPL_TX_PKT_XT_STATDISABLE(x) ((x) << S_CPL_TX_PKT_XT_STATDISABLE)
+#define G_CPL_TX_PKT_XT_STATDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_STATDISABLE) & M_CPL_TX_PKT_XT_STATDISABLE)
+#define F_CPL_TX_PKT_XT_STATDISABLE V_CPL_TX_PKT_XT_STATDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_FCSDIS 21
+#define M_CPL_TX_PKT_XT_FCSDIS 0x1
+#define V_CPL_TX_PKT_XT_FCSDIS(x) ((x) << S_CPL_TX_PKT_XT_FCSDIS)
+#define G_CPL_TX_PKT_XT_FCSDIS(x) \
+ (((x) >> S_CPL_TX_PKT_XT_FCSDIS) & M_CPL_TX_PKT_XT_FCSDIS)
+#define F_CPL_TX_PKT_XT_FCSDIS V_CPL_TX_PKT_XT_FCSDIS(1U)
+
+#define S_CPL_TX_PKT_XT_STATSPECIAL 20
+#define M_CPL_TX_PKT_XT_STATSPECIAL 0x1
+#define V_CPL_TX_PKT_XT_STATSPECIAL(x) ((x) << S_CPL_TX_PKT_XT_STATSPECIAL)
+#define G_CPL_TX_PKT_XT_STATSPECIAL(x) \
+ (((x) >> S_CPL_TX_PKT_XT_STATSPECIAL) & M_CPL_TX_PKT_XT_STATSPECIAL)
+#define F_CPL_TX_PKT_XT_STATSPECIAL V_CPL_TX_PKT_XT_STATSPECIAL(1U)
+
+#define S_CPL_TX_PKT_XT_INTERFACE 16
+#define M_CPL_TX_PKT_XT_INTERFACE 0xf
+#define V_CPL_TX_PKT_XT_INTERFACE(x) ((x) << S_CPL_TX_PKT_XT_INTERFACE)
+#define G_CPL_TX_PKT_XT_INTERFACE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_INTERFACE) & M_CPL_TX_PKT_XT_INTERFACE)
+
+#define S_CPL_TX_PKT_XT_OVLAN 15
+#define M_CPL_TX_PKT_XT_OVLAN 0x1
+#define V_CPL_TX_PKT_XT_OVLAN(x) ((x) << S_CPL_TX_PKT_XT_OVLAN)
+#define G_CPL_TX_PKT_XT_OVLAN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OVLAN) & M_CPL_TX_PKT_XT_OVLAN)
+#define F_CPL_TX_PKT_XT_OVLAN V_CPL_TX_PKT_XT_OVLAN(1U)
+
+#define S_CPL_TX_PKT_XT_OVLANIDX 12
+#define M_CPL_TX_PKT_XT_OVLANIDX 0x7
+#define V_CPL_TX_PKT_XT_OVLANIDX(x) ((x) << S_CPL_TX_PKT_XT_OVLANIDX)
+#define G_CPL_TX_PKT_XT_OVLANIDX(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OVLANIDX) & M_CPL_TX_PKT_XT_OVLANIDX)
+
+#define S_CPL_TX_PKT_XT_VFVALID 11
+#define M_CPL_TX_PKT_XT_VFVALID 0x1
+#define V_CPL_TX_PKT_XT_VFVALID(x) ((x) << S_CPL_TX_PKT_XT_VFVALID)
+#define G_CPL_TX_PKT_XT_VFVALID(x) \
+ (((x) >> S_CPL_TX_PKT_XT_VFVALID) & M_CPL_TX_PKT_XT_VFVALID)
+#define F_CPL_TX_PKT_XT_VFVALID V_CPL_TX_PKT_XT_VFVALID(1U)
+
+#define S_CPL_TX_PKT_XT_PF 8
+#define M_CPL_TX_PKT_XT_PF 0x7
+#define V_CPL_TX_PKT_XT_PF(x) ((x) << S_CPL_TX_PKT_XT_PF)
+#define G_CPL_TX_PKT_XT_PF(x) \
+ (((x) >> S_CPL_TX_PKT_XT_PF) & M_CPL_TX_PKT_XT_PF)
+
+#define S_CPL_TX_PKT_XT_VF 0
+#define M_CPL_TX_PKT_XT_VF 0xff
+#define V_CPL_TX_PKT_XT_VF(x) ((x) << S_CPL_TX_PKT_XT_VF)
+#define G_CPL_TX_PKT_XT_VF(x) \
+ (((x) >> S_CPL_TX_PKT_XT_VF) & M_CPL_TX_PKT_XT_VF)
+
+/* cpl_tx_pkt_xt.core.ctrl1 fields */
+#define S_CPL_TX_PKT_XT_L4CHKDISABLE 31
+#define M_CPL_TX_PKT_XT_L4CHKDISABLE 0x1
+#define V_CPL_TX_PKT_XT_L4CHKDISABLE(x) ((x) << S_CPL_TX_PKT_XT_L4CHKDISABLE)
+#define G_CPL_TX_PKT_XT_L4CHKDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_L4CHKDISABLE) & M_CPL_TX_PKT_XT_L4CHKDISABLE)
+#define F_CPL_TX_PKT_XT_L4CHKDISABLE V_CPL_TX_PKT_XT_L4CHKDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_L3CHKDISABLE 30
+#define M_CPL_TX_PKT_XT_L3CHKDISABLE 0x1
+#define V_CPL_TX_PKT_XT_L3CHKDISABLE(x) ((x) << S_CPL_TX_PKT_XT_L3CHKDISABLE)
+#define G_CPL_TX_PKT_XT_L3CHKDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_L3CHKDISABLE) & M_CPL_TX_PKT_XT_L3CHKDISABLE)
+#define F_CPL_TX_PKT_XT_L3CHKDISABLE V_CPL_TX_PKT_XT_L3CHKDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_OUTL4CHKEN 29
+#define M_CPL_TX_PKT_XT_OUTL4CHKEN 0x1
+#define V_CPL_TX_PKT_XT_OUTL4CHKEN(x) ((x) << S_CPL_TX_PKT_XT_OUTL4CHKEN)
+#define G_CPL_TX_PKT_XT_OUTL4CHKEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OUTL4CHKEN) & M_CPL_TX_PKT_XT_OUTL4CHKEN)
+#define F_CPL_TX_PKT_XT_OUTL4CHKEN V_CPL_TX_PKT_XT_OUTL4CHKEN(1U)
+
+#define S_CPL_TX_PKT_XT_IVLAN 28
+#define M_CPL_TX_PKT_XT_IVLAN 0x1
+#define V_CPL_TX_PKT_XT_IVLAN(x) ((x) << S_CPL_TX_PKT_XT_IVLAN)
+#define G_CPL_TX_PKT_XT_IVLAN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IVLAN) & M_CPL_TX_PKT_XT_IVLAN)
+#define F_CPL_TX_PKT_XT_IVLAN V_CPL_TX_PKT_XT_IVLAN(1U)
+
+#define S_CPL_TX_PKT_XT_IVLANTAG 12
+#define M_CPL_TX_PKT_XT_IVLANTAG 0xffff
+#define V_CPL_TX_PKT_XT_IVLANTAG(x) ((x) << S_CPL_TX_PKT_XT_IVLANTAG)
+#define G_CPL_TX_PKT_XT_IVLANTAG(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IVLANTAG) & M_CPL_TX_PKT_XT_IVLANTAG)
+
+#define S_CPL_TX_PKT_XT_CHKTYPE 8
+#define M_CPL_TX_PKT_XT_CHKTYPE 0xf
+#define V_CPL_TX_PKT_XT_CHKTYPE(x) ((x) << S_CPL_TX_PKT_XT_CHKTYPE)
+#define G_CPL_TX_PKT_XT_CHKTYPE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKTYPE) & M_CPL_TX_PKT_XT_CHKTYPE)
+
+#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI 0
+#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI 0xff
+#define V_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI)
+#define G_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI) & \
+ M_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI)
+
+#define S_CPL_TX_PKT_XT_ETHHDRLEN 0
+#define M_CPL_TX_PKT_XT_ETHHDRLEN 0xff
+#define V_CPL_TX_PKT_XT_ETHHDRLEN(x) ((x) << S_CPL_TX_PKT_XT_ETHHDRLEN)
+#define G_CPL_TX_PKT_XT_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ETHHDRLEN) & M_CPL_TX_PKT_XT_ETHHDRLEN)
+
+#define S_CPL_TX_PKT_XT_ROCECHKINSMODE 6
+#define M_CPL_TX_PKT_XT_ROCECHKINSMODE 0x3
+#define V_CPL_TX_PKT_XT_ROCECHKINSMODE(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCECHKINSMODE)
+#define G_CPL_TX_PKT_XT_ROCECHKINSMODE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCECHKINSMODE) & M_CPL_TX_PKT_XT_ROCECHKINSMODE)
+
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI 0
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI 0x3f
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
+
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 30
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 0x3
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+
+/* cpl_tx_pkt_xt.core.ctrl2 fields */
+#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 30
+#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 0x3
+#define V_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+#define G_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO) & \
+ M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+
+#define S_CPL_TX_PKT_XT_CHKSTARTOFFSET 20
+#define M_CPL_TX_PKT_XT_CHKSTARTOFFSET 0x3ff
+#define V_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKSTARTOFFSET)
+#define G_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKSTARTOFFSET) & M_CPL_TX_PKT_XT_CHKSTARTOFFSET)
+
+#define S_CPL_TX_PKT_XT_IPHDRLEN 20
+#define M_CPL_TX_PKT_XT_IPHDRLEN 0xfff
+#define V_CPL_TX_PKT_XT_IPHDRLEN(x) ((x) << S_CPL_TX_PKT_XT_IPHDRLEN)
+#define G_CPL_TX_PKT_XT_IPHDRLEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IPHDRLEN) & M_CPL_TX_PKT_XT_IPHDRLEN)
+
+#define S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET 20
+#define M_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET 0x3ff
+#define V_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET)
+#define G_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET) & \
+ M_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET)
+
+#define S_CPL_TX_PKT_XT_CHKSTOPOFFSET 12
+#define M_CPL_TX_PKT_XT_CHKSTOPOFFSET 0xff
+#define V_CPL_TX_PKT_XT_CHKSTOPOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKSTOPOFFSET)
+#define G_CPL_TX_PKT_XT_CHKSTOPOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKSTOPOFFSET) & M_CPL_TX_PKT_XT_CHKSTOPOFFSET)
+
+#define S_CPL_TX_PKT_XT_IPSECIDX 0
+#define M_CPL_TX_PKT_XT_IPSECIDX 0xfff
+#define V_CPL_TX_PKT_XT_IPSECIDX(x) ((x) << S_CPL_TX_PKT_XT_IPSECIDX)
+#define G_CPL_TX_PKT_XT_IPSECIDX(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IPSECIDX) & M_CPL_TX_PKT_XT_IPSECIDX)
+
+#define S_CPL_TX_TNL_LSO_BTH_OPCODE 24
+#define M_CPL_TX_TNL_LSO_BTH_OPCODE 0xff
+#define V_CPL_TX_TNL_LSO_BTH_OPCODE(x) ((x) << S_CPL_TX_TNL_LSO_BTH_OPCODE)
+#define G_CPL_TX_TNL_LSO_BTH_OPCODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_BTH_OPCODE) & \
+ M_CPL_TX_TNL_LSO_BTH_OPCODE)
+
+#define S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0
+#define M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0xffffff
+#define V_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+#define G_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN) & \
+ M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+
+#define S_CPL_TX_TNL_LSO_MSS_TVER 8
+#define M_CPL_TX_TNL_LSO_MSS_TVER 0xf
+#define V_CPL_TX_TNL_LSO_MSS_TVER(x) ((x) << S_CPL_TX_TNL_LSO_MSS_TVER)
+#define G_CPL_TX_TNL_LSO_MSS_TVER(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_TVER) & M_CPL_TX_TNL_LSO_MSS_TVER)
+
+#define S_CPL_TX_TNL_LSO_MSS_M 7
+#define M_CPL_TX_TNL_LSO_MSS_M 0x1
+#define V_CPL_TX_TNL_LSO_MSS_M(x) ((x) << S_CPL_TX_TNL_LSO_MSS_M)
+#define G_CPL_TX_TNL_LSO_MSS_M(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_M) & M_CPL_TX_TNL_LSO_MSS_M)
+
+#define S_CPL_TX_TNL_LSO_MSS_PMTU 4
+#define M_CPL_TX_TNL_LSO_MSS_PMTU 0x7
+#define V_CPL_TX_TNL_LSO_MSS_PMTU(x) ((x) << S_CPL_TX_TNL_LSO_MSS_PMTU)
+#define G_CPL_TX_TNL_LSO_MSS_PMTU(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_PMTU) & M_CPL_TX_TNL_LSO_MSS_PMTU)
+
+#define S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 3
+#define M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 0x1
+#define V_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ ((x) << S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+#define G_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR) & M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+
+#define S_CPL_TX_TNL_LSO_MSS_ACKREQ 1
+#define M_CPL_TX_TNL_LSO_MSS_ACKREQ 0x3
+#define V_CPL_TX_TNL_LSO_MSS_ACKREQ(x) ((x) << S_CPL_TX_TNL_LSO_MSS_ACKREQ)
+#define G_CPL_TX_TNL_LSO_MSS_ACKREQ(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_ACKREQ) & M_CPL_TX_TNL_LSO_MSS_ACKREQ)
+
+#define S_CPL_TX_TNL_LSO_MSS_SE 0
+#define M_CPL_TX_TNL_LSO_MSS_SE 0x1
+#define V_CPL_TX_TNL_LSO_MSS_SE(x) ((x) << S_CPL_TX_TNL_LSO_MSS_SE)
+#define G_CPL_TX_TNL_LSO_MSS_SE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_SE) & M_CPL_TX_TNL_LSO_MSS_SE)
+
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
__be16 ipid_ofst;
@@ -1600,6 +2180,100 @@ struct cpl_tx_data_iso {
(((x) >> S_CPL_TX_DATA_ISO_SEGLEN_OFFSET) & \
M_CPL_TX_DATA_ISO_SEGLEN_OFFSET)
+struct cpl_t7_tx_data_iso {
+ __be32 op_to_scsi;
+ __u8 nvme_tcp_pkd;
+ __u8 ahs;
+ __be16 mpdu;
+ __be32 burst;
+ __be32 size;
+ __be32 num_pi_bytes_seglen_offset;
+ __be32 datasn_offset;
+ __be32 buffer_offset;
+ __be32 reserved3;
+};
+
+#define S_CPL_T7_TX_DATA_ISO_OPCODE 24
+#define M_CPL_T7_TX_DATA_ISO_OPCODE 0xff
+#define V_CPL_T7_TX_DATA_ISO_OPCODE(x) ((x) << S_CPL_T7_TX_DATA_ISO_OPCODE)
+#define G_CPL_T7_TX_DATA_ISO_OPCODE(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_OPCODE) & M_CPL_T7_TX_DATA_ISO_OPCODE)
+
+#define S_CPL_T7_TX_DATA_ISO_FIRST 23
+#define M_CPL_T7_TX_DATA_ISO_FIRST 0x1
+#define V_CPL_T7_TX_DATA_ISO_FIRST(x) ((x) << S_CPL_T7_TX_DATA_ISO_FIRST)
+#define G_CPL_T7_TX_DATA_ISO_FIRST(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_FIRST) & M_CPL_T7_TX_DATA_ISO_FIRST)
+#define F_CPL_T7_TX_DATA_ISO_FIRST V_CPL_T7_TX_DATA_ISO_FIRST(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_LAST 22
+#define M_CPL_T7_TX_DATA_ISO_LAST 0x1
+#define V_CPL_T7_TX_DATA_ISO_LAST(x) ((x) << S_CPL_T7_TX_DATA_ISO_LAST)
+#define G_CPL_T7_TX_DATA_ISO_LAST(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_LAST) & M_CPL_T7_TX_DATA_ISO_LAST)
+#define F_CPL_T7_TX_DATA_ISO_LAST V_CPL_T7_TX_DATA_ISO_LAST(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_CPLHDRLEN 21
+#define M_CPL_T7_TX_DATA_ISO_CPLHDRLEN 0x1
+#define V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_CPLHDRLEN)
+#define G_CPL_T7_TX_DATA_ISO_CPLHDRLEN(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_CPLHDRLEN) & M_CPL_T7_TX_DATA_ISO_CPLHDRLEN)
+#define F_CPL_T7_TX_DATA_ISO_CPLHDRLEN V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_HDRCRC 20
+#define M_CPL_T7_TX_DATA_ISO_HDRCRC 0x1
+#define V_CPL_T7_TX_DATA_ISO_HDRCRC(x) ((x) << S_CPL_T7_TX_DATA_ISO_HDRCRC)
+#define G_CPL_T7_TX_DATA_ISO_HDRCRC(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_HDRCRC) & M_CPL_T7_TX_DATA_ISO_HDRCRC)
+#define F_CPL_T7_TX_DATA_ISO_HDRCRC V_CPL_T7_TX_DATA_ISO_HDRCRC(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_PLDCRC 19
+#define M_CPL_T7_TX_DATA_ISO_PLDCRC 0x1
+#define V_CPL_T7_TX_DATA_ISO_PLDCRC(x) ((x) << S_CPL_T7_TX_DATA_ISO_PLDCRC)
+#define G_CPL_T7_TX_DATA_ISO_PLDCRC(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_PLDCRC) & M_CPL_T7_TX_DATA_ISO_PLDCRC)
+#define F_CPL_T7_TX_DATA_ISO_PLDCRC V_CPL_T7_TX_DATA_ISO_PLDCRC(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_IMMEDIATE 18
+#define M_CPL_T7_TX_DATA_ISO_IMMEDIATE 0x1
+#define V_CPL_T7_TX_DATA_ISO_IMMEDIATE(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_IMMEDIATE)
+#define G_CPL_T7_TX_DATA_ISO_IMMEDIATE(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_IMMEDIATE) & M_CPL_T7_TX_DATA_ISO_IMMEDIATE)
+#define F_CPL_T7_TX_DATA_ISO_IMMEDIATE \
+ V_CPL_T7_TX_DATA_ISO_IMMEDIATE(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_SCSI 16
+#define M_CPL_T7_TX_DATA_ISO_SCSI 0x3
+#define V_CPL_T7_TX_DATA_ISO_SCSI(x) ((x) << S_CPL_T7_TX_DATA_ISO_SCSI)
+#define G_CPL_T7_TX_DATA_ISO_SCSI(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_SCSI) & M_CPL_T7_TX_DATA_ISO_SCSI)
+
+#define S_CPL_T7_TX_DATA_ISO_NVME_TCP 0
+#define M_CPL_T7_TX_DATA_ISO_NVME_TCP 0x1
+#define V_CPL_T7_TX_DATA_ISO_NVME_TCP(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_NVME_TCP)
+#define G_CPL_T7_TX_DATA_ISO_NVME_TCP(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_NVME_TCP) & M_CPL_T7_TX_DATA_ISO_NVME_TCP)
+#define F_CPL_T7_TX_DATA_ISO_NVME_TCP \
+ V_CPL_T7_TX_DATA_ISO_NVME_TCP(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_NUMPIBYTES 24
+#define M_CPL_T7_TX_DATA_ISO_NUMPIBYTES 0xff
+#define V_CPL_T7_TX_DATA_ISO_NUMPIBYTES(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_NUMPIBYTES)
+#define G_CPL_T7_TX_DATA_ISO_NUMPIBYTES(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_NUMPIBYTES) & M_CPL_T7_TX_DATA_ISO_NUMPIBYTES)
+
+#define S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET 0
+#define M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET 0xffffff
+#define V_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+#define G_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET) & \
+ M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+
struct cpl_iscsi_hdr {
RSS_HDR
union opcode_tid ot;
@@ -2324,6 +2998,18 @@ struct cpl_l2t_write_req {
#define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
#define F_L2T_W_NOREPLY V_L2T_W_NOREPLY(1U)
+
+/* cpl_l2t_write_req.vlan fields */
+#define S_L2T_VLANTAG 0
+#define M_L2T_VLANTAG 0xFFF
+#define V_L2T_VLANTAG(x) ((x) << S_L2T_VLANTAG)
+#define G_L2T_VLANTAG(x) (((x) >> S_L2T_VLANTAG) & M_L2T_VLANTAG)
+
+#define S_L2T_VLANPRIO 13
+#define M_L2T_VLANPRIO 0x7
+#define V_L2T_VLANPRIO(x) ((x) << S_L2T_VLANPRIO)
+#define G_L2T_VLANPRIO(x) (((x) >> S_L2T_VLANPRIO) & M_L2T_VLANPRIO)
+
#define CPL_L2T_VLAN_NONE 0xfff
struct cpl_l2t_write_rpl {
@@ -2400,6 +3086,175 @@ struct cpl_srq_table_rpl {
#define V_SRQT_IDX(x) ((x) << S_SRQT_IDX)
#define G_SRQT_IDX(x) (((x) >> S_SRQT_IDX) & M_SRQT_IDX)
+struct cpl_t7_srq_table_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_index;
+ __be16 srqlimit_pkd;
+ __be16 cqid;
+ __be16 xdid;
+ __be16 pdid;
+ __be32 quelen_quebase;
+ __be32 curmsn_maxmsn;
+};
+
+#define S_CPL_T7_SRQ_TABLE_REQ_NOREPLY 31
+#define M_CPL_T7_SRQ_TABLE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_NOREPLY)
+#define G_CPL_T7_SRQ_TABLE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_NOREPLY) & M_CPL_T7_SRQ_TABLE_REQ_NOREPLY)
+#define F_CPL_T7_SRQ_TABLE_REQ_NOREPLY \
+ V_CPL_T7_SRQ_TABLE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_WRITE 30
+#define M_CPL_T7_SRQ_TABLE_REQ_WRITE 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_WRITE(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_WRITE)
+#define G_CPL_T7_SRQ_TABLE_REQ_WRITE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_WRITE) & M_CPL_T7_SRQ_TABLE_REQ_WRITE)
+#define F_CPL_T7_SRQ_TABLE_REQ_WRITE V_CPL_T7_SRQ_TABLE_REQ_WRITE(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_INCR 28
+#define M_CPL_T7_SRQ_TABLE_REQ_INCR 0x3
+#define V_CPL_T7_SRQ_TABLE_REQ_INCR(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_INCR)
+#define G_CPL_T7_SRQ_TABLE_REQ_INCR(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_INCR) & M_CPL_T7_SRQ_TABLE_REQ_INCR)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_OVER 24
+#define M_CPL_T7_SRQ_TABLE_REQ_OVER 0xf
+#define V_CPL_T7_SRQ_TABLE_REQ_OVER(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_OVER)
+#define G_CPL_T7_SRQ_TABLE_REQ_OVER(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_OVER) & M_CPL_T7_SRQ_TABLE_REQ_OVER)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD 23
+#define M_CPL_T7_SRQ_TABLE_REQ_LIMITUPD 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD)
+#define G_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD) & M_CPL_T7_SRQ_TABLE_REQ_LIMITUPD)
+#define F_CPL_T7_SRQ_TABLE_REQ_LIMITUPD V_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_INDEX 0
+#define M_CPL_T7_SRQ_TABLE_REQ_INDEX 0x3ff
+#define V_CPL_T7_SRQ_TABLE_REQ_INDEX(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_INDEX)
+#define G_CPL_T7_SRQ_TABLE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_INDEX) & M_CPL_T7_SRQ_TABLE_REQ_INDEX)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT 0
+#define M_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT 0x3f
+#define V_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT)
+#define G_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT) & M_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_QUELEN 28
+#define M_CPL_T7_SRQ_TABLE_REQ_QUELEN 0xf
+#define V_CPL_T7_SRQ_TABLE_REQ_QUELEN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_QUELEN)
+#define G_CPL_T7_SRQ_TABLE_REQ_QUELEN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_QUELEN) & M_CPL_T7_SRQ_TABLE_REQ_QUELEN)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_QUEBASE 0
+#define M_CPL_T7_SRQ_TABLE_REQ_QUEBASE 0x3ffffff
+#define V_CPL_T7_SRQ_TABLE_REQ_QUEBASE(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_QUEBASE)
+#define G_CPL_T7_SRQ_TABLE_REQ_QUEBASE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_QUEBASE) & M_CPL_T7_SRQ_TABLE_REQ_QUEBASE)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_CURMSN 16
+#define M_CPL_T7_SRQ_TABLE_REQ_CURMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_REQ_CURMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_CURMSN)
+#define G_CPL_T7_SRQ_TABLE_REQ_CURMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_CURMSN) & M_CPL_T7_SRQ_TABLE_REQ_CURMSN)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_MAXMSN 0
+#define M_CPL_T7_SRQ_TABLE_REQ_MAXMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_REQ_MAXMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_MAXMSN)
+#define G_CPL_T7_SRQ_TABLE_REQ_MAXMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_MAXMSN) & M_CPL_T7_SRQ_TABLE_REQ_MAXMSN)
+
+struct cpl_t7_srq_table_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 status_index;
+ __be16 srqlimit_pkd;
+ __be16 cqid;
+ __be16 xdid;
+ __be16 pdid;
+ __be32 quelen_quebase;
+ __be32 curmsn_maxmsn;
+};
+
+#define S_CPL_T7_SRQ_TABLE_RPL_STATUS 24
+#define M_CPL_T7_SRQ_TABLE_RPL_STATUS 0xff
+#define V_CPL_T7_SRQ_TABLE_RPL_STATUS(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_STATUS)
+#define G_CPL_T7_SRQ_TABLE_RPL_STATUS(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_STATUS) & M_CPL_T7_SRQ_TABLE_RPL_STATUS)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_INDEX 0
+#define M_CPL_T7_SRQ_TABLE_RPL_INDEX 0x3ff
+#define V_CPL_T7_SRQ_TABLE_RPL_INDEX(x) ((x) << S_CPL_T7_SRQ_TABLE_RPL_INDEX)
+#define G_CPL_T7_SRQ_TABLE_RPL_INDEX(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_INDEX) & M_CPL_T7_SRQ_TABLE_RPL_INDEX)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT 0
+#define M_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT 0x3f
+#define V_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT)
+#define G_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT) & M_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_QUELEN 28
+#define M_CPL_T7_SRQ_TABLE_RPL_QUELEN 0xf
+#define V_CPL_T7_SRQ_TABLE_RPL_QUELEN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_QUELEN)
+#define G_CPL_T7_SRQ_TABLE_RPL_QUELEN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_QUELEN) & M_CPL_T7_SRQ_TABLE_RPL_QUELEN)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_QUEBASE 0
+#define M_CPL_T7_SRQ_TABLE_RPL_QUEBASE 0x3ffffff
+#define V_CPL_T7_SRQ_TABLE_RPL_QUEBASE(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_QUEBASE)
+#define G_CPL_T7_SRQ_TABLE_RPL_QUEBASE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_QUEBASE) & M_CPL_T7_SRQ_TABLE_RPL_QUEBASE)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_CURMSN 16
+#define M_CPL_T7_SRQ_TABLE_RPL_CURMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_RPL_CURMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_CURMSN)
+#define G_CPL_T7_SRQ_TABLE_RPL_CURMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_CURMSN) & M_CPL_T7_SRQ_TABLE_RPL_CURMSN)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_MAXMSN 0
+#define M_CPL_T7_SRQ_TABLE_RPL_MAXMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_RPL_MAXMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_MAXMSN)
+#define G_CPL_T7_SRQ_TABLE_RPL_MAXMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_MAXMSN) & M_CPL_T7_SRQ_TABLE_RPL_MAXMSN)
+
+struct cpl_rdma_async_event {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 EventInfo;
+};
+
+#define S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE 16
+#define M_CPL_RDMA_ASYNC_EVENT_EVENTTYPE 0xf
+#define V_CPL_RDMA_ASYNC_EVENT_EVENTTYPE(x) \
+ ((x) << S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE)
+#define G_CPL_RDMA_ASYNC_EVENT_EVENTTYPE(x) \
+ (((x) >> S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE) & \
+ M_CPL_RDMA_ASYNC_EVENT_EVENTTYPE)
+
+#define S_CPL_RDMA_ASYNC_EVENT_INDEX 0
+#define M_CPL_RDMA_ASYNC_EVENT_INDEX 0xffff
+#define V_CPL_RDMA_ASYNC_EVENT_INDEX(x) ((x) << S_CPL_RDMA_ASYNC_EVENT_INDEX)
+#define G_CPL_RDMA_ASYNC_EVENT_INDEX(x) \
+ (((x) >> S_CPL_RDMA_ASYNC_EVENT_INDEX) & M_CPL_RDMA_ASYNC_EVENT_INDEX)
+
struct cpl_smt_write_req {
WR_HDR;
union opcode_tid ot;
@@ -2479,6 +3334,118 @@ struct cpl_smt_read_rpl {
#define V_SMTW_VF_VLD(x) ((x) << S_SMTW_VF_VLD)
#define F_SMTW_VF_VLD V_SMTW_VF_VLD(1U)
+struct cpl_t7_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_mtu;
+ union smt_write_req {
+ struct smt_write_req_pfvf {
+ __be64 tagvalue;
+ __be32 pfvf_smac_hi;
+ __be32 smac_lo;
+ __be64 tagext;
+ } pfvf;
+ struct smt_write_req_ipv4 {
+ __be32 srcipv4;
+ __be32 destipv4;
+ } ipv4;
+ struct smt_write_req_ipv6 {
+ __be64 ipv6ms;
+ __be64 ipv6ls;
+ } ipv6;
+ } u;
+};
+
+#define S_CPL_T7_SMT_WRITE_REQ_NOREPLY 31
+#define M_CPL_T7_SMT_WRITE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_NOREPLY)
+#define G_CPL_T7_SMT_WRITE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_NOREPLY) & M_CPL_T7_SMT_WRITE_REQ_NOREPLY)
+#define F_CPL_T7_SMT_WRITE_REQ_NOREPLY \
+ V_CPL_T7_SMT_WRITE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_TAGINSERT 30
+#define M_CPL_T7_SMT_WRITE_REQ_TAGINSERT 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_TAGINSERT(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_TAGINSERT)
+#define G_CPL_T7_SMT_WRITE_REQ_TAGINSERT(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_TAGINSERT) & \
+ M_CPL_T7_SMT_WRITE_REQ_TAGINSERT)
+#define F_CPL_T7_SMT_WRITE_REQ_TAGINSERT \
+ V_CPL_T7_SMT_WRITE_REQ_TAGINSERT(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_TAGTYPE 28
+#define M_CPL_T7_SMT_WRITE_REQ_TAGTYPE 0x3
+#define V_CPL_T7_SMT_WRITE_REQ_TAGTYPE(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_TAGTYPE)
+#define G_CPL_T7_SMT_WRITE_REQ_TAGTYPE(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_TAGTYPE) & M_CPL_T7_SMT_WRITE_REQ_TAGTYPE)
+
+#define S_CPL_T7_SMT_WRITE_REQ_INDEX 20
+#define M_CPL_T7_SMT_WRITE_REQ_INDEX 0xff
+#define V_CPL_T7_SMT_WRITE_REQ_INDEX(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_INDEX)
+#define G_CPL_T7_SMT_WRITE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_INDEX) & M_CPL_T7_SMT_WRITE_REQ_INDEX)
+
+#define S_CPL_T7_SMT_WRITE_REQ_OVLAN 16
+#define M_CPL_T7_SMT_WRITE_REQ_OVLAN 0xf
+#define V_CPL_T7_SMT_WRITE_REQ_OVLAN(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_OVLAN)
+#define G_CPL_T7_SMT_WRITE_REQ_OVLAN(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_OVLAN) & M_CPL_T7_SMT_WRITE_REQ_OVLAN)
+
+#define S_CPL_T7_SMT_WRITE_REQ_IPSEC 14
+#define M_CPL_T7_SMT_WRITE_REQ_IPSEC 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_IPSEC(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_IPSEC)
+#define G_CPL_T7_SMT_WRITE_REQ_IPSEC(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_IPSEC) & M_CPL_T7_SMT_WRITE_REQ_IPSEC)
+#define F_CPL_T7_SMT_WRITE_REQ_IPSEC V_CPL_T7_SMT_WRITE_REQ_IPSEC(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_MTU 0
+#define M_CPL_T7_SMT_WRITE_REQ_MTU 0x3fff
+#define V_CPL_T7_SMT_WRITE_REQ_MTU(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_MTU)
+#define G_CPL_T7_SMT_WRITE_REQ_MTU(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_MTU) & M_CPL_T7_SMT_WRITE_REQ_MTU)
+
+#define S_CPL_T7_SMT_WRITE_REQ_PFVF 16
+#define M_CPL_T7_SMT_WRITE_REQ_PFVF 0xfff
+#define V_CPL_T7_SMT_WRITE_REQ_PFVF(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_PFVF)
+#define G_CPL_T7_SMT_WRITE_REQ_PFVF(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_PFVF) & M_CPL_T7_SMT_WRITE_REQ_PFVF)
+
+#define S_CPL_T7_SMT_WRITE_REQ_SMAC_HI 0
+#define M_CPL_T7_SMT_WRITE_REQ_SMAC_HI 0xffff
+#define V_CPL_T7_SMT_WRITE_REQ_SMAC_HI(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_SMAC_HI)
+#define G_CPL_T7_SMT_WRITE_REQ_SMAC_HI(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_SMAC_HI) & M_CPL_T7_SMT_WRITE_REQ_SMAC_HI)
+
+struct cpl_t7_smt_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 index_to_ipsecidx;
+};
+
+#define S_CPL_T7_SMT_READ_REQ_INDEX 20
+#define M_CPL_T7_SMT_READ_REQ_INDEX 0xff
+#define V_CPL_T7_SMT_READ_REQ_INDEX(x) ((x) << S_CPL_T7_SMT_READ_REQ_INDEX)
+#define G_CPL_T7_SMT_READ_REQ_INDEX(x) \
+ (((x) >> S_CPL_SMT_READ_REQ_INDEX) & M_CPL_T7_SMT_READ_REQ_INDEX)
+
+#define S_CPL_T7_SMT_READ_REQ_IPSEC 14
+#define M_CPL_T7_SMT_READ_REQ_IPSEC 0x1
+#define V_CPL_T7_SMT_READ_REQ_IPSEC(x) ((x) << S_CPL_T7_SMT_READ_REQ_IPSEC)
+#define G_CPL_T7_SMT_READ_REQ_IPSEC(x) \
+ (((x) >> S_CPL_T7_SMT_READ_REQ_IPSEC) & M_CPL_T7_SMT_READ_REQ_IPSEC)
+#define F_CPL_T7_SMT_READ_REQ_IPSEC V_CPL_T7_SMT_READ_REQ_IPSEC(1U)
+
+#define S_CPL_T7_SMT_READ_REQ_IPSECIDX 0
+#define M_CPL_T7_SMT_READ_REQ_IPSECIDX 0x1fff
+#define V_CPL_T7_SMT_READ_REQ_IPSECIDX(x) \
+ ((x) << S_CPL_T7_SMT_READ_REQ_IPSECIDX)
+#define G_CPL_T7_SMT_READ_REQ_IPSECIDX(x) \
+ (((x) >> S_CPL_T7_SMT_READ_REQ_IPSECIDX) & M_CPL_T7_SMT_READ_REQ_IPSECIDX)
+
struct cpl_tag_write_req {
WR_HDR;
union opcode_tid ot;
@@ -2611,6 +3578,352 @@ struct cpl_pkt_notify {
#define V_NTFY_T5_ETHHDR_LEN(x) ((x) << S_NTFY_T5_ETHHDR_LEN)
#define G_NTFY_T5_ETHHDR_LEN(x) (((x) >> S_NTFY_T5_ETHHDR_LEN) & M_NTFY_T5_ETHHDR_LEN)
+struct cpl_t7_pkt_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r1;
+ __be16 length;
+ __be32 ethhdrlen_to_macindex;
+ __be32 lineinfo;
+};
+
+#define S_CPL_T7_PKT_NOTIFY_ETHHDRLEN 24
+#define M_CPL_T7_PKT_NOTIFY_ETHHDRLEN 0xff
+#define V_CPL_T7_PKT_NOTIFY_ETHHDRLEN(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_ETHHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_ETHHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_ETHHDRLEN) & M_CPL_T7_PKT_NOTIFY_ETHHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_IPHDRLEN 18
+#define M_CPL_T7_PKT_NOTIFY_IPHDRLEN 0x3f
+#define V_CPL_T7_PKT_NOTIFY_IPHDRLEN(x) ((x) << S_CPL_T7_PKT_NOTIFY_IPHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_IPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_IPHDRLEN) & M_CPL_T7_PKT_NOTIFY_IPHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_TCPHDRLEN 14
+#define M_CPL_T7_PKT_NOTIFY_TCPHDRLEN 0xf
+#define V_CPL_T7_PKT_NOTIFY_TCPHDRLEN(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_TCPHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_TCPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_TCPHDRLEN) & M_CPL_T7_PKT_NOTIFY_TCPHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_INTERFACE 10
+#define M_CPL_T7_PKT_NOTIFY_INTERFACE 0xf
+#define V_CPL_T7_PKT_NOTIFY_INTERFACE(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_INTERFACE)
+#define G_CPL_T7_PKT_NOTIFY_INTERFACE(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_INTERFACE) & M_CPL_T7_PKT_NOTIFY_INTERFACE)
+
+#define S_CPL_T7_PKT_NOTIFY_MACINDEX 0
+#define M_CPL_T7_PKT_NOTIFY_MACINDEX 0x1ff
+#define V_CPL_T7_PKT_NOTIFY_MACINDEX(x) ((x) << S_CPL_T7_PKT_NOTIFY_MACINDEX)
+#define G_CPL_T7_PKT_NOTIFY_MACINDEX(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_MACINDEX) & M_CPL_T7_PKT_NOTIFY_MACINDEX)
+
+struct cpl_rdma_cqe {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_RSSCTRL 16
+#define M_CPL_RDMA_CQE_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_RSSCTRL)
+#define G_CPL_RDMA_CQE_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_RSSCTRL) & M_CPL_RDMA_CQE_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_CQID 0
+#define M_CPL_RDMA_CQE_CQID 0xffff
+#define V_CPL_RDMA_CQE_CQID(x) ((x) << S_CPL_RDMA_CQE_CQID)
+#define G_CPL_RDMA_CQE_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_CQID) & M_CPL_RDMA_CQE_CQID)
+
+#define S_CPL_RDMA_CQE_TID 8
+#define M_CPL_RDMA_CQE_TID 0xfffff
+#define V_CPL_RDMA_CQE_TID(x) ((x) << S_CPL_RDMA_CQE_TID)
+#define G_CPL_RDMA_CQE_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_TID) & M_CPL_RDMA_CQE_TID)
+
+#define S_CPL_RDMA_CQE_FLITCNT 0
+#define M_CPL_RDMA_CQE_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_FLITCNT)
+#define G_CPL_RDMA_CQE_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FLITCNT) & M_CPL_RDMA_CQE_FLITCNT)
+
+#define S_CPL_RDMA_CQE_QPID 12
+#define M_CPL_RDMA_CQE_QPID 0xfffff
+#define V_CPL_RDMA_CQE_QPID(x) ((x) << S_CPL_RDMA_CQE_QPID)
+#define G_CPL_RDMA_CQE_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_QPID) & M_CPL_RDMA_CQE_QPID)
+
+#define S_CPL_RDMA_CQE_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_GENERATION_BIT) & M_CPL_RDMA_CQE_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_GENERATION_BIT V_CPL_RDMA_CQE_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_STATUS 5
+#define M_CPL_RDMA_CQE_STATUS 0x1f
+#define V_CPL_RDMA_CQE_STATUS(x) ((x) << S_CPL_RDMA_CQE_STATUS)
+#define G_CPL_RDMA_CQE_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_STATUS) & M_CPL_RDMA_CQE_STATUS)
+
+#define S_CPL_RDMA_CQE_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_CQE_TYPE)
+#define G_CPL_RDMA_CQE_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_CQE_TYPE) & M_CPL_RDMA_CQE_CQE_TYPE)
+#define F_CPL_RDMA_CQE_CQE_TYPE V_CPL_RDMA_CQE_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_WR_TYPE 0
+#define M_CPL_RDMA_CQE_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_WR_TYPE)
+#define G_CPL_RDMA_CQE_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_WR_TYPE) & M_CPL_RDMA_CQE_WR_TYPE)
+
+struct cpl_rdma_cqe_srq {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 r3;
+ __be32 rqe;
+};
+
+#define S_CPL_RDMA_CQE_SRQ_OPCODE 24
+#define M_CPL_RDMA_CQE_SRQ_OPCODE 0xff
+#define V_CPL_RDMA_CQE_SRQ_OPCODE(x) ((x) << S_CPL_RDMA_CQE_SRQ_OPCODE)
+#define G_CPL_RDMA_CQE_SRQ_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_OPCODE) & M_CPL_RDMA_CQE_SRQ_OPCODE)
+
+#define S_CPL_RDMA_CQE_SRQ_RSSCTRL 16
+#define M_CPL_RDMA_CQE_SRQ_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_SRQ_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_SRQ_RSSCTRL)
+#define G_CPL_RDMA_CQE_SRQ_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_RSSCTRL) & M_CPL_RDMA_CQE_SRQ_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_SRQ_CQID 0
+#define M_CPL_RDMA_CQE_SRQ_CQID 0xffff
+#define V_CPL_RDMA_CQE_SRQ_CQID(x) ((x) << S_CPL_RDMA_CQE_SRQ_CQID)
+#define G_CPL_RDMA_CQE_SRQ_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_CQID) & M_CPL_RDMA_CQE_SRQ_CQID)
+
+#define S_CPL_RDMA_CQE_SRQ_TID 8
+#define M_CPL_RDMA_CQE_SRQ_TID 0xfffff
+#define V_CPL_RDMA_CQE_SRQ_TID(x) ((x) << S_CPL_RDMA_CQE_SRQ_TID)
+#define G_CPL_RDMA_CQE_SRQ_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_TID) & M_CPL_RDMA_CQE_SRQ_TID)
+
+#define S_CPL_RDMA_CQE_SRQ_FLITCNT 0
+#define M_CPL_RDMA_CQE_SRQ_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_SRQ_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_SRQ_FLITCNT)
+#define G_CPL_RDMA_CQE_SRQ_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_FLITCNT) & M_CPL_RDMA_CQE_SRQ_FLITCNT)
+
+#define S_CPL_RDMA_CQE_SRQ_QPID 12
+#define M_CPL_RDMA_CQE_SRQ_QPID 0xfffff
+#define V_CPL_RDMA_CQE_SRQ_QPID(x) ((x) << S_CPL_RDMA_CQE_SRQ_QPID)
+#define G_CPL_RDMA_CQE_SRQ_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_QPID) & M_CPL_RDMA_CQE_SRQ_QPID)
+
+#define S_CPL_RDMA_CQE_SRQ_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_SRQ_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_SRQ_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_SRQ_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_SRQ_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_SRQ_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_SRQ_GENERATION_BIT \
+ V_CPL_RDMA_CQE_SRQ_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_SRQ_STATUS 5
+#define M_CPL_RDMA_CQE_SRQ_STATUS 0x1f
+#define V_CPL_RDMA_CQE_SRQ_STATUS(x) ((x) << S_CPL_RDMA_CQE_SRQ_STATUS)
+#define G_CPL_RDMA_CQE_SRQ_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_STATUS) & M_CPL_RDMA_CQE_SRQ_STATUS)
+
+#define S_CPL_RDMA_CQE_SRQ_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_SRQ_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_SRQ_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_SRQ_CQE_TYPE)
+#define G_CPL_RDMA_CQE_SRQ_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_CQE_TYPE) & M_CPL_RDMA_CQE_SRQ_CQE_TYPE)
+#define F_CPL_RDMA_CQE_SRQ_CQE_TYPE V_CPL_RDMA_CQE_SRQ_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_SRQ_WR_TYPE 0
+#define M_CPL_RDMA_CQE_SRQ_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_SRQ_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_SRQ_WR_TYPE)
+#define G_CPL_RDMA_CQE_SRQ_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_WR_TYPE) & M_CPL_RDMA_CQE_SRQ_WR_TYPE)
+
+struct cpl_rdma_cqe_read_rsp {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_READ_RSP_RSSCTRL 16
+#define M_CPL_RDMA_CQE_READ_RSP_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_READ_RSP_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_RSSCTRL)
+#define G_CPL_RDMA_CQE_READ_RSP_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_RSSCTRL) & \
+ M_CPL_RDMA_CQE_READ_RSP_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_READ_RSP_CQID 0
+#define M_CPL_RDMA_CQE_READ_RSP_CQID 0xffff
+#define V_CPL_RDMA_CQE_READ_RSP_CQID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_CQID)
+#define G_CPL_RDMA_CQE_READ_RSP_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_CQID) & M_CPL_RDMA_CQE_READ_RSP_CQID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_TID 8
+#define M_CPL_RDMA_CQE_READ_RSP_TID 0xfffff
+#define V_CPL_RDMA_CQE_READ_RSP_TID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_TID)
+#define G_CPL_RDMA_CQE_READ_RSP_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_TID) & M_CPL_RDMA_CQE_READ_RSP_TID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_FLITCNT 0
+#define M_CPL_RDMA_CQE_READ_RSP_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_READ_RSP_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_FLITCNT)
+#define G_CPL_RDMA_CQE_READ_RSP_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_FLITCNT) & \
+ M_CPL_RDMA_CQE_READ_RSP_FLITCNT)
+
+#define S_CPL_RDMA_CQE_READ_RSP_QPID 12
+#define M_CPL_RDMA_CQE_READ_RSP_QPID 0xfffff
+#define V_CPL_RDMA_CQE_READ_RSP_QPID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_QPID)
+#define G_CPL_RDMA_CQE_READ_RSP_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_QPID) & M_CPL_RDMA_CQE_READ_RSP_QPID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT \
+ V_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_READ_RSP_STATUS 5
+#define M_CPL_RDMA_CQE_READ_RSP_STATUS 0x1f
+#define V_CPL_RDMA_CQE_READ_RSP_STATUS(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_STATUS)
+#define G_CPL_RDMA_CQE_READ_RSP_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_STATUS) & M_CPL_RDMA_CQE_READ_RSP_STATUS)
+
+#define S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_READ_RSP_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE)
+#define G_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE) & \
+ M_CPL_RDMA_CQE_READ_RSP_CQE_TYPE)
+#define F_CPL_RDMA_CQE_READ_RSP_CQE_TYPE V_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_READ_RSP_WR_TYPE 0
+#define M_CPL_RDMA_CQE_READ_RSP_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_READ_RSP_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_WR_TYPE)
+#define G_CPL_RDMA_CQE_READ_RSP_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_WR_TYPE) & \
+ M_CPL_RDMA_CQE_READ_RSP_WR_TYPE)
+
+struct cpl_rdma_cqe_err {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_ERR_RSSCTRL 16
+#define M_CPL_RDMA_CQE_ERR_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_ERR_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_ERR_RSSCTRL)
+#define G_CPL_RDMA_CQE_ERR_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_RSSCTRL) & M_CPL_RDMA_CQE_ERR_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_ERR_CQID 0
+#define M_CPL_RDMA_CQE_ERR_CQID 0xffff
+#define V_CPL_RDMA_CQE_ERR_CQID(x) ((x) << S_CPL_RDMA_CQE_ERR_CQID)
+#define G_CPL_RDMA_CQE_ERR_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_CQID) & M_CPL_RDMA_CQE_ERR_CQID)
+
+#define S_CPL_RDMA_CQE_ERR_TID 8
+#define M_CPL_RDMA_CQE_ERR_TID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_TID(x) ((x) << S_CPL_RDMA_CQE_ERR_TID)
+#define G_CPL_RDMA_CQE_ERR_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_TID) & M_CPL_RDMA_CQE_ERR_TID)
+
+#define S_CPL_RDMA_CQE_ERR_FLITCNT 0
+#define M_CPL_RDMA_CQE_ERR_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_ERR_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_ERR_FLITCNT)
+#define G_CPL_RDMA_CQE_ERR_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_FLITCNT) & M_CPL_RDMA_CQE_ERR_FLITCNT)
+
+#define S_CPL_RDMA_CQE_ERR_QPID 12
+#define M_CPL_RDMA_CQE_ERR_QPID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_QPID(x) ((x) << S_CPL_RDMA_CQE_ERR_QPID)
+#define G_CPL_RDMA_CQE_ERR_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_QPID) & M_CPL_RDMA_CQE_ERR_QPID)
+
+#define S_CPL_RDMA_CQE_ERR_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_ERR_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_ERR_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_ERR_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_ERR_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_ERR_GENERATION_BIT \
+ V_CPL_RDMA_CQE_ERR_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_ERR_STATUS 5
+#define M_CPL_RDMA_CQE_ERR_STATUS 0x1f
+#define V_CPL_RDMA_CQE_ERR_STATUS(x) ((x) << S_CPL_RDMA_CQE_ERR_STATUS)
+#define G_CPL_RDMA_CQE_ERR_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_STATUS) & M_CPL_RDMA_CQE_ERR_STATUS)
+
+#define S_CPL_RDMA_CQE_ERR_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_ERR_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_ERR_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_ERR_CQE_TYPE)
+#define G_CPL_RDMA_CQE_ERR_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_CQE_TYPE) & M_CPL_RDMA_CQE_ERR_CQE_TYPE)
+#define F_CPL_RDMA_CQE_ERR_CQE_TYPE V_CPL_RDMA_CQE_ERR_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_WR_TYPE 0
+#define M_CPL_RDMA_CQE_ERR_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_ERR_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_ERR_WR_TYPE)
+#define G_CPL_RDMA_CQE_ERR_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_WR_TYPE) & M_CPL_RDMA_CQE_ERR_WR_TYPE)
+
+struct cpl_rdma_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 srq_pkd;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_READ_REQ_SRQ 0
+#define M_CPL_RDMA_READ_REQ_SRQ 0xfff
+#define V_CPL_RDMA_READ_REQ_SRQ(x) ((x) << S_CPL_RDMA_READ_REQ_SRQ)
+#define G_CPL_RDMA_READ_REQ_SRQ(x) \
+ (((x) >> S_CPL_RDMA_READ_REQ_SRQ) & M_CPL_RDMA_READ_REQ_SRQ)
+
struct cpl_rdma_terminate {
RSS_HDR
union opcode_tid ot;
@@ -2618,6 +3931,404 @@ struct cpl_rdma_terminate {
__be16 len;
};
+struct cpl_rdma_atomic_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 opcode_srq;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_ATOMIC_REQ_OPCODE 12
+#define M_CPL_RDMA_ATOMIC_REQ_OPCODE 0xf
+#define V_CPL_RDMA_ATOMIC_REQ_OPCODE(x) ((x) << S_CPL_RDMA_ATOMIC_REQ_OPCODE)
+#define G_CPL_RDMA_ATOMIC_REQ_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_REQ_OPCODE) & M_CPL_RDMA_ATOMIC_REQ_OPCODE)
+
+#define S_CPL_RDMA_ATOMIC_REQ_SRQ 0
+#define M_CPL_RDMA_ATOMIC_REQ_SRQ 0xfff
+#define V_CPL_RDMA_ATOMIC_REQ_SRQ(x) ((x) << S_CPL_RDMA_ATOMIC_REQ_SRQ)
+#define G_CPL_RDMA_ATOMIC_REQ_SRQ(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_REQ_SRQ) & M_CPL_RDMA_ATOMIC_REQ_SRQ)
+
+struct cpl_rdma_atomic_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 opcode_srq;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_ATOMIC_RPL_OPCODE 12
+#define M_CPL_RDMA_ATOMIC_RPL_OPCODE 0xf
+#define V_CPL_RDMA_ATOMIC_RPL_OPCODE(x) ((x) << S_CPL_RDMA_ATOMIC_RPL_OPCODE)
+#define G_CPL_RDMA_ATOMIC_RPL_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_RPL_OPCODE) & M_CPL_RDMA_ATOMIC_RPL_OPCODE)
+
+#define S_CPL_RDMA_ATOMIC_RPL_SRQ 0
+#define M_CPL_RDMA_ATOMIC_RPL_SRQ 0xfff
+#define V_CPL_RDMA_ATOMIC_RPL_SRQ(x) ((x) << S_CPL_RDMA_ATOMIC_RPL_SRQ)
+#define G_CPL_RDMA_ATOMIC_RPL_SRQ(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_RPL_SRQ) & M_CPL_RDMA_ATOMIC_RPL_SRQ)
+
+struct cpl_rdma_imm_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r;
+ __be16 Length;
+};
+
+struct cpl_rdma_imm_data_se {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r;
+ __be16 Length;
+};
+
+struct cpl_rdma_inv_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 stag;
+ __be32 cqid_pdid_hi;
+ __be32 pdid_lo_qpid;
+};
+
+#define S_CPL_RDMA_INV_REQ_CQID 8
+#define M_CPL_RDMA_INV_REQ_CQID 0xfffff
+#define V_CPL_RDMA_INV_REQ_CQID(x) ((x) << S_CPL_RDMA_INV_REQ_CQID)
+#define G_CPL_RDMA_INV_REQ_CQID(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_CQID) & M_CPL_RDMA_INV_REQ_CQID)
+
+#define S_CPL_RDMA_INV_REQ_PDID_HI 0
+#define M_CPL_RDMA_INV_REQ_PDID_HI 0xff
+#define V_CPL_RDMA_INV_REQ_PDID_HI(x) ((x) << S_CPL_RDMA_INV_REQ_PDID_HI)
+#define G_CPL_RDMA_INV_REQ_PDID_HI(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_PDID_HI) & M_CPL_RDMA_INV_REQ_PDID_HI)
+
+#define S_CPL_RDMA_INV_REQ_PDID_LO 20
+#define M_CPL_RDMA_INV_REQ_PDID_LO 0xfff
+#define V_CPL_RDMA_INV_REQ_PDID_LO(x) ((x) << S_CPL_RDMA_INV_REQ_PDID_LO)
+#define G_CPL_RDMA_INV_REQ_PDID_LO(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_PDID_LO) & M_CPL_RDMA_INV_REQ_PDID_LO)
+
+#define S_CPL_RDMA_INV_REQ_QPID 0
+#define M_CPL_RDMA_INV_REQ_QPID 0xfffff
+#define V_CPL_RDMA_INV_REQ_QPID(x) ((x) << S_CPL_RDMA_INV_REQ_QPID)
+#define G_CPL_RDMA_INV_REQ_QPID(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_QPID) & M_CPL_RDMA_INV_REQ_QPID)
+
+struct cpl_rdma_cqe_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_EXT_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_RSSCTRL) & M_CPL_RDMA_CQE_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_EXT_CQID 0
+#define M_CPL_RDMA_CQE_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_EXT_CQID)
+#define G_CPL_RDMA_CQE_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_CQID) & M_CPL_RDMA_CQE_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_EXT_TID 8
+#define M_CPL_RDMA_CQE_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_EXT_TID)
+#define G_CPL_RDMA_CQE_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_TID) & M_CPL_RDMA_CQE_EXT_TID)
+
+#define S_CPL_RDMA_CQE_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_EXT_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_FLITCNT) & M_CPL_RDMA_CQE_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_EXT_QPID 12
+#define M_CPL_RDMA_CQE_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_EXT_QPID)
+#define G_CPL_RDMA_CQE_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_QPID) & M_CPL_RDMA_CQE_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_EXT_EXTMODE(x) ((x) << S_CPL_RDMA_CQE_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_EXTMODE) & M_CPL_RDMA_CQE_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_EXT_EXTMODE V_CPL_RDMA_CQE_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_EXT_STATUS(x) ((x) << S_CPL_RDMA_CQE_EXT_STATUS)
+#define G_CPL_RDMA_CQE_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_STATUS) & M_CPL_RDMA_CQE_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_EXT_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_CQE_TYPE) & M_CPL_RDMA_CQE_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_EXT_CQE_TYPE V_CPL_RDMA_CQE_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_EXT_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE) & M_CPL_RDMA_CQE_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_EXT_SE 31
+#define M_CPL_RDMA_CQE_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_EXT_SE)
+#define G_CPL_RDMA_CQE_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_SE) & M_CPL_RDMA_CQE_EXT_SE)
+#define F_CPL_RDMA_CQE_EXT_SE V_CPL_RDMA_CQE_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT) & M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_EXT_SRQ)
+#define G_CPL_RDMA_CQE_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_SRQ) & M_CPL_RDMA_CQE_EXT_SRQ)
+
+struct cpl_rdma_cqe_fw_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_FW_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_FW_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_FW_EXT_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_FW_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_RSSCTRL) & M_CPL_RDMA_CQE_FW_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_FW_EXT_CQID 0
+#define M_CPL_RDMA_CQE_FW_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_FW_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_CQID)
+#define G_CPL_RDMA_CQE_FW_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_CQID) & M_CPL_RDMA_CQE_FW_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_TID 8
+#define M_CPL_RDMA_CQE_FW_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_FW_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_TID)
+#define G_CPL_RDMA_CQE_FW_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_TID) & M_CPL_RDMA_CQE_FW_EXT_TID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_FW_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_FW_EXT_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_FW_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_FLITCNT) & M_CPL_RDMA_CQE_FW_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_FW_EXT_QPID 12
+#define M_CPL_RDMA_CQE_FW_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_FW_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_QPID)
+#define G_CPL_RDMA_CQE_FW_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_QPID) & M_CPL_RDMA_CQE_FW_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_FW_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_EXTMODE) & M_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_FW_EXT_EXTMODE V_CPL_RDMA_CQE_FW_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_FW_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_FW_EXT_STATUS(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_STATUS)
+#define G_CPL_RDMA_CQE_FW_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_STATUS) & M_CPL_RDMA_CQE_FW_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_FW_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE) & M_CPL_RDMA_CQE_FW_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_FW_EXT_CQE_TYPE V_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_FW_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_FW_EXT_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_FW_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE) & M_CPL_RDMA_CQE_FW_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_FW_EXT_SE 31
+#define M_CPL_RDMA_CQE_FW_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SE)
+#define G_CPL_RDMA_CQE_FW_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_SE) & M_CPL_RDMA_CQE_FW_EXT_SE)
+#define F_CPL_RDMA_CQE_FW_EXT_SE V_CPL_RDMA_CQE_FW_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT) & \
+ M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_FW_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_FW_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_FW_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SRQ)
+#define G_CPL_RDMA_CQE_FW_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_SRQ) & M_CPL_RDMA_CQE_FW_EXT_SRQ)
+
+struct cpl_rdma_cqe_err_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_ERR_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_ERR_EXT_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_ERR_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL) & M_CPL_RDMA_CQE_ERR_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_CQID 0
+#define M_CPL_RDMA_CQE_ERR_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_ERR_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_CQID)
+#define G_CPL_RDMA_CQE_ERR_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_CQID) & M_CPL_RDMA_CQE_ERR_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_TID 8
+#define M_CPL_RDMA_CQE_ERR_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_TID)
+#define G_CPL_RDMA_CQE_ERR_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_TID) & M_CPL_RDMA_CQE_ERR_EXT_TID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_ERR_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_ERR_EXT_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_ERR_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_FLITCNT) & M_CPL_RDMA_CQE_ERR_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_QPID 12
+#define M_CPL_RDMA_CQE_ERR_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_QPID)
+#define G_CPL_RDMA_CQE_ERR_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_QPID) & M_CPL_RDMA_CQE_ERR_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_ERR_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_EXTMODE) & M_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_ERR_EXT_EXTMODE V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_ERR_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_ERR_EXT_STATUS(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_STATUS)
+#define G_CPL_RDMA_CQE_ERR_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_STATUS) & M_CPL_RDMA_CQE_ERR_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE) & \
+ M_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE V_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_ERR_EXT_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_ERR_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE) & M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_SE 31
+#define M_CPL_RDMA_CQE_ERR_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SE)
+#define G_CPL_RDMA_CQE_ERR_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_SE) & M_CPL_RDMA_CQE_ERR_EXT_SE)
+#define F_CPL_RDMA_CQE_ERR_EXT_SE V_CPL_RDMA_CQE_ERR_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT) & \
+ M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_ERR_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_ERR_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SRQ)
+#define G_CPL_RDMA_CQE_ERR_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_SRQ) & M_CPL_RDMA_CQE_ERR_EXT_SRQ)
+
struct cpl_set_le_req {
WR_HDR;
union opcode_tid ot;
@@ -2630,6 +4341,13 @@ struct cpl_set_le_req {
};
/* cpl_set_le_req.reply_ctrl additional fields */
+#define S_LE_REQ_RXCHANNEL 14
+#define M_LE_REQ_RXCHANNEL 0x1
+#define V_LE_REQ_RXCHANNEL(x) ((x) << S_LE_REQ_RXCHANNEL)
+#define G_LE_REQ_RXCHANNEL(x) \
+ (((x) >> S_LE_REQ_RXCHANNEL) & M_LE_REQ_RXCHANNEL)
+#define F_LE_REQ_RXCHANNEL V_LE_REQ_RXCHANNEL(1U)
+
#define S_LE_REQ_IP6 13
#define V_LE_REQ_IP6(x) ((x) << S_LE_REQ_IP6)
#define F_LE_REQ_IP6 V_LE_REQ_IP6(1U)
@@ -2659,6 +4377,80 @@ struct cpl_set_le_req {
#define V_LE_REQCMD(x) ((x) << S_LE_REQCMD)
#define G_LE_REQCMD(x) (((x) >> S_LE_REQCMD) & M_LE_REQCMD)
+struct cpl_t7_set_le_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_channel;
+ __be32 mask1[2];
+ __be32 mask0[2];
+ __be32 value1[2];
+ __be32 value0[2];
+};
+
+#define S_CPL_T7_SET_LE_REQ_INDEX 0
+#define M_CPL_T7_SET_LE_REQ_INDEX 0xffffff
+#define V_CPL_T7_SET_LE_REQ_INDEX(x) ((x) << S_CPL_T7_SET_LE_REQ_INDEX)
+#define G_CPL_T7_SET_LE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_INDEX) & M_CPL_T7_SET_LE_REQ_INDEX)
+
+#define S_CPL_T7_SET_LE_REQ_NOREPLY 31
+#define M_CPL_T7_SET_LE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SET_LE_REQ_NOREPLY(x) ((x) << S_CPL_T7_SET_LE_REQ_NOREPLY)
+#define G_CPL_T7_SET_LE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_NOREPLY) & M_CPL_T7_SET_LE_REQ_NOREPLY)
+#define F_CPL_T7_SET_LE_REQ_NOREPLY V_CPL_T7_SET_LE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SET_LE_REQ_RXCHANNEL 28
+#define M_CPL_T7_SET_LE_REQ_RXCHANNEL 0x7
+#define V_CPL_T7_SET_LE_REQ_RXCHANNEL(x) \
+ ((x) << S_CPL_T7_SET_LE_REQ_RXCHANNEL)
+#define G_CPL_T7_SET_LE_REQ_RXCHANNEL(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_RXCHANNEL) & M_CPL_T7_SET_LE_REQ_RXCHANNEL)
+
+#define S_CPL_T7_SET_LE_REQ_QUEUE 16
+#define M_CPL_T7_SET_LE_REQ_QUEUE 0xfff
+#define V_CPL_T7_SET_LE_REQ_QUEUE(x) ((x) << S_CPL_T7_SET_LE_REQ_QUEUE)
+#define G_CPL_T7_SET_LE_REQ_QUEUE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_QUEUE) & M_CPL_T7_SET_LE_REQ_QUEUE)
+
+#define S_CPL_T7_SET_LE_REQ_REQCMD 12
+#define M_CPL_T7_SET_LE_REQ_REQCMD 0xf
+#define V_CPL_T7_SET_LE_REQ_REQCMD(x) ((x) << S_CPL_T7_SET_LE_REQ_REQCMD)
+#define G_CPL_T7_SET_LE_REQ_REQCMD(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQCMD) & M_CPL_T7_SET_LE_REQ_REQCMD)
+
+#define S_CPL_T7_SET_LE_REQ_REQSIZE 9
+#define M_CPL_T7_SET_LE_REQ_REQSIZE 0x7
+#define V_CPL_T7_SET_LE_REQ_REQSIZE(x) ((x) << S_CPL_T7_SET_LE_REQ_REQSIZE)
+#define G_CPL_T7_SET_LE_REQ_REQSIZE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQSIZE) & M_CPL_T7_SET_LE_REQ_REQSIZE)
+
+#define S_CPL_T7_SET_LE_REQ_MORE 8
+#define M_CPL_T7_SET_LE_REQ_MORE 0x1
+#define V_CPL_T7_SET_LE_REQ_MORE(x) ((x) << S_CPL_T7_SET_LE_REQ_MORE)
+#define G_CPL_T7_SET_LE_REQ_MORE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_MORE) & M_CPL_T7_SET_LE_REQ_MORE)
+#define F_CPL_T7_SET_LE_REQ_MORE V_CPL_T7_SET_LE_REQ_MORE(1U)
+
+#define S_CPL_T7_SET_LE_REQ_OFFSET 5
+#define M_CPL_T7_SET_LE_REQ_OFFSET 0x7
+#define V_CPL_T7_SET_LE_REQ_OFFSET(x) ((x) << S_CPL_T7_SET_LE_REQ_OFFSET)
+#define G_CPL_T7_SET_LE_REQ_OFFSET(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_OFFSET) & M_CPL_T7_SET_LE_REQ_OFFSET)
+
+#define S_CPL_T7_SET_LE_REQ_REQTYPE 4
+#define M_CPL_T7_SET_LE_REQ_REQTYPE 0x1
+#define V_CPL_T7_SET_LE_REQ_REQTYPE(x) ((x) << S_CPL_T7_SET_LE_REQ_REQTYPE)
+#define G_CPL_T7_SET_LE_REQ_REQTYPE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQTYPE) & M_CPL_T7_SET_LE_REQ_REQTYPE)
+#define F_CPL_T7_SET_LE_REQ_REQTYPE V_CPL_T7_SET_LE_REQ_REQTYPE(1U)
+
+#define S_CPL_T7_SET_LE_REQ_CHANNEL 0
+#define M_CPL_T7_SET_LE_REQ_CHANNEL 0x3
+#define V_CPL_T7_SET_LE_REQ_CHANNEL(x) ((x) << S_CPL_T7_SET_LE_REQ_CHANNEL)
+#define G_CPL_T7_SET_LE_REQ_CHANNEL(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_CHANNEL) & M_CPL_T7_SET_LE_REQ_CHANNEL)
+
struct cpl_set_le_rpl {
RSS_HDR
union opcode_tid ot;
@@ -2710,6 +4502,7 @@ enum {
FW_TYPE_WRERR_RPL = 5,
FW_TYPE_PI_ERR = 6,
FW_TYPE_TLS_KEY = 7,
+ FW_TYPE_IPSEC_SA = 8,
};
struct cpl_fw2_pld {
@@ -2811,6 +4604,8 @@ enum {
FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL,
FW6_TYPE_WRERR_RPL = FW_TYPE_WRERR_RPL,
FW6_TYPE_PI_ERR = FW_TYPE_PI_ERR,
+ FW6_TYPE_TLS_KEY = FW_TYPE_TLS_KEY,
+ FW6_TYPE_IPSEC_SA = FW_TYPE_IPSEC_SA,
NUM_FW6_TYPES
};
@@ -2932,6 +4727,10 @@ struct ulp_mem_io {
#define M_ULP_MEMIO_DATA_LEN 0x1F
#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+#define S_T7_ULP_MEMIO_DATA_LEN 0
+#define M_T7_ULP_MEMIO_DATA_LEN 0x7FF
+#define V_T7_ULP_MEMIO_DATA_LEN(x) ((x) << S_T7_ULP_MEMIO_DATA_LEN)
+
/* ULP_TXPKT field values */
enum {
ULP_TXPKT_DEST_TP = 0,
@@ -2960,11 +4759,25 @@ struct ulp_txpkt {
(((x) >> S_ULP_TXPKT_CHANNELID) & M_ULP_TXPKT_CHANNELID)
#define F_ULP_TXPKT_CHANNELID V_ULP_TXPKT_CHANNELID(1U)
+#define S_T7_ULP_TXPKT_CHANNELID 22
+#define M_T7_ULP_TXPKT_CHANNELID 0x3
+#define V_T7_ULP_TXPKT_CHANNELID(x) ((x) << S_T7_ULP_TXPKT_CHANNELID)
+#define G_T7_ULP_TXPKT_CHANNELID(x) \
+ (((x) >> S_T7_ULP_TXPKT_CHANNELID) & M_T7_ULP_TXPKT_CHANNELID)
+#define F_T7_ULP_TXPKT_CHANNELID V_T7_ULP_TXPKT_CHANNELID(1U)
+
/* ulp_txpkt.cmd_dest fields */
#define S_ULP_TXPKT_DEST 16
#define M_ULP_TXPKT_DEST 0x3
#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+#define S_ULP_TXPKT_CMDMORE 15
+#define M_ULP_TXPKT_CMDMORE 0x1
+#define V_ULP_TXPKT_CMDMORE(x) ((x) << S_ULP_TXPKT_CMDMORE)
+#define G_ULP_TXPKT_CMDMORE(x) \
+ (((x) >> S_ULP_TXPKT_CMDMORE) & M_ULP_TXPKT_CMDMORE)
+#define F_ULP_TXPKT_CMDMORE V_ULP_TXPKT_CMDMORE(1U)
+
#define S_ULP_TXPKT_FID 4
#define M_ULP_TXPKT_FID 0x7ff
#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID)
@@ -2978,13 +4791,15 @@ enum cpl_tx_tnl_lso_type {
TX_TNL_TYPE_NVGRE,
TX_TNL_TYPE_VXLAN,
TX_TNL_TYPE_GENEVE,
+ TX_TNL_TYPE_IPSEC,
};
struct cpl_tx_tnl_lso {
__be32 op_to_IpIdSplitOut;
__be16 IpIdOffsetOut;
__be16 UdpLenSetOut_to_TnlHdrLen;
- __be64 r1;
+ __be32 ipsecen_to_rocev2;
+ __be32 roce_eth;
__be32 Flow_to_TcpHdrLen;
__be16 IpIdOffset;
__be16 IpIdSplit_to_Mss;
@@ -3098,6 +4913,68 @@ struct cpl_tx_tnl_lso {
#define G_CPL_TX_TNL_LSO_TNLHDRLEN(x) \
(((x) >> S_CPL_TX_TNL_LSO_TNLHDRLEN) & M_CPL_TX_TNL_LSO_TNLHDRLEN)
+#define S_CPL_TX_TNL_LSO_IPSECEN 31
+#define M_CPL_TX_TNL_LSO_IPSECEN 0x1
+#define V_CPL_TX_TNL_LSO_IPSECEN(x) ((x) << S_CPL_TX_TNL_LSO_IPSECEN)
+#define G_CPL_TX_TNL_LSO_IPSECEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECEN) & M_CPL_TX_TNL_LSO_IPSECEN)
+#define F_CPL_TX_TNL_LSO_IPSECEN V_CPL_TX_TNL_LSO_IPSECEN(1U)
+
+#define S_CPL_TX_TNL_LSO_ENCAPDIS 30
+#define M_CPL_TX_TNL_LSO_ENCAPDIS 0x1
+#define V_CPL_TX_TNL_LSO_ENCAPDIS(x) ((x) << S_CPL_TX_TNL_LSO_ENCAPDIS)
+#define G_CPL_TX_TNL_LSO_ENCAPDIS(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ENCAPDIS) & M_CPL_TX_TNL_LSO_ENCAPDIS)
+#define F_CPL_TX_TNL_LSO_ENCAPDIS V_CPL_TX_TNL_LSO_ENCAPDIS(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECMODE 29
+#define M_CPL_TX_TNL_LSO_IPSECMODE 0x1
+#define V_CPL_TX_TNL_LSO_IPSECMODE(x) ((x) << S_CPL_TX_TNL_LSO_IPSECMODE)
+#define G_CPL_TX_TNL_LSO_IPSECMODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECMODE) & M_CPL_TX_TNL_LSO_IPSECMODE)
+#define F_CPL_TX_TNL_LSO_IPSECMODE V_CPL_TX_TNL_LSO_IPSECMODE(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPV6 28
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPV6 0x1
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPV6(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPV6)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPV6(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPV6) & M_CPL_TX_TNL_LSO_IPSECTNLIPV6)
+#define F_CPL_TX_TNL_LSO_IPSECTNLIPV6 V_CPL_TX_TNL_LSO_IPSECTNLIPV6(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN 20
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN 0xff
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN) & \
+ M_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT 19
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT 0x1
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT) & \
+ M_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT)
+#define F_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT \
+ V_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(1U)
+
+#define S_CPL_TX_TNL_LSO_ROCEV2 18
+#define M_CPL_TX_TNL_LSO_ROCEV2 0x1
+#define V_CPL_TX_TNL_LSO_ROCEV2(x) ((x) << S_CPL_TX_TNL_LSO_ROCEV2)
+#define G_CPL_TX_TNL_LSO_ROCEV2(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ROCEV2) & M_CPL_TX_TNL_LSO_ROCEV2)
+#define F_CPL_TX_TNL_LSO_ROCEV2 V_CPL_TX_TNL_LSO_ROCEV2(1U)
+
+#define S_CPL_TX_TNL_LSO_UDPCHKUPDOUT 17
+#define M_CPL_TX_TNL_LSO_UDPCHKUPDOUT 0x1
+#define V_CPL_TX_TNL_LSO_UDPCHKUPDOUT(x) \
+ ((x) << S_CPL_TX_TNL_LSO_UDPCHKUPDOUT)
+#define G_CPL_TX_TNL_LSO_UDPCHKUPDOUT(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_UDPCHKUPDOUT) & M_CPL_TX_TNL_LSO_UDPCHKUPDOUT)
+#define F_CPL_TX_TNL_LSO_UDPCHKUPDOUT V_CPL_TX_TNL_LSO_UDPCHKUPDOUT(1U)
+
#define S_CPL_TX_TNL_LSO_FLOW 21
#define M_CPL_TX_TNL_LSO_FLOW 0x1
#define V_CPL_TX_TNL_LSO_FLOW(x) ((x) << S_CPL_TX_TNL_LSO_FLOW)
@@ -3180,6 +5057,12 @@ struct cpl_rx_mps_pkt {
#define G_CPL_RX_MPS_PKT_TYPE(x) \
(((x) >> S_CPL_RX_MPS_PKT_TYPE) & M_CPL_RX_MPS_PKT_TYPE)
+#define S_CPL_RX_MPS_PKT_LENGTH 0
+#define M_CPL_RX_MPS_PKT_LENGTH 0xffff
+#define V_CPL_RX_MPS_PKT_LENGTH(x) ((x) << S_CPL_RX_MPS_PKT_LENGTH)
+#define G_CPL_RX_MPS_PKT_LENGTH(x) \
+ (((x) >> S_CPL_RX_MPS_PKT_LENGTH) & M_CPL_RX_MPS_PKT_LENGTH)
+
/*
* Values for CPL_RX_MPS_PKT_TYPE, a bit-wise orthogonal field.
*/
@@ -3188,6 +5071,88 @@ struct cpl_rx_mps_pkt {
#define X_CPL_RX_MPS_PKT_TYPE_QFC (1 << 2)
#define X_CPL_RX_MPS_PKT_TYPE_PTP (1 << 3)
+struct cpl_t7_rx_mps_pkt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 length_pkd;
+};
+
+#define S_CPL_T7_RX_MPS_PKT_TYPE 20
+#define M_CPL_T7_RX_MPS_PKT_TYPE 0xf
+#define V_CPL_T7_RX_MPS_PKT_TYPE(x) ((x) << S_CPL_T7_RX_MPS_PKT_TYPE)
+#define G_CPL_T7_RX_MPS_PKT_TYPE(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_TYPE) & M_CPL_T7_RX_MPS_PKT_TYPE)
+
+#define S_CPL_T7_RX_MPS_PKT_INTERFACE 16
+#define M_CPL_T7_RX_MPS_PKT_INTERFACE 0xf
+#define V_CPL_T7_RX_MPS_PKT_INTERFACE(x) \
+ ((x) << S_CPL_T7_RX_MPS_PKT_INTERFACE)
+#define G_CPL_T7_RX_MPS_PKT_INTERFACE(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_INTERFACE) & M_CPL_T7_RX_MPS_PKT_INTERFACE)
+
+#define S_CPL_T7_RX_MPS_PKT_TRUNCATED 7
+#define M_CPL_T7_RX_MPS_PKT_TRUNCATED 0x1
+#define V_CPL_T7_RX_MPS_PKT_TRUNCATED(x) \
+ ((x) << S_CPL_T7_RX_MPS_PKT_TRUNCATED)
+#define G_CPL_T7_RX_MPS_PKT_TRUNCATED(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_TRUNCATED) & M_CPL_T7_RX_MPS_PKT_TRUNCATED)
+#define F_CPL_T7_RX_MPS_PKT_TRUNCATED V_CPL_T7_RX_MPS_PKT_TRUNCATED(1U)
+
+#define S_CPL_T7_RX_MPS_PKT_PKTERR 6
+#define M_CPL_T7_RX_MPS_PKT_PKTERR 0x1
+#define V_CPL_T7_RX_MPS_PKT_PKTERR(x) ((x) << S_CPL_T7_RX_MPS_PKT_PKTERR)
+#define G_CPL_T7_RX_MPS_PKT_PKTERR(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_PKTERR) & M_CPL_T7_RX_MPS_PKT_PKTERR)
+#define F_CPL_T7_RX_MPS_PKT_PKTERR V_CPL_T7_RX_MPS_PKT_PKTERR(1U)
+
+#define S_CPL_T7_RX_MPS_PKT_LENGTH 0
+#define M_CPL_T7_RX_MPS_PKT_LENGTH 0xffff
+#define V_CPL_T7_RX_MPS_PKT_LENGTH(x) ((x) << S_CPL_T7_RX_MPS_PKT_LENGTH)
+#define G_CPL_T7_RX_MPS_PKT_LENGTH(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_LENGTH) & M_CPL_T7_RX_MPS_PKT_LENGTH)
+
+struct cpl_tx_tls_pdu {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 pldlen_pkd;
+ __be32 customtype_customprotover;
+ __be32 r2_lo;
+ __be32 scmd0[2];
+ __be32 scmd1[2];
+};
+
+#define S_CPL_TX_TLS_PDU_DATATYPE 20
+#define M_CPL_TX_TLS_PDU_DATATYPE 0xf
+#define V_CPL_TX_TLS_PDU_DATATYPE(x) ((x) << S_CPL_TX_TLS_PDU_DATATYPE)
+#define G_CPL_TX_TLS_PDU_DATATYPE(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_DATATYPE) & M_CPL_TX_TLS_PDU_DATATYPE)
+
+#define S_CPL_TX_TLS_PDU_CPLLEN 16
+#define M_CPL_TX_TLS_PDU_CPLLEN 0xf
+#define V_CPL_TX_TLS_PDU_CPLLEN(x) ((x) << S_CPL_TX_TLS_PDU_CPLLEN)
+#define G_CPL_TX_TLS_PDU_CPLLEN(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CPLLEN) & M_CPL_TX_TLS_PDU_CPLLEN)
+
+#define S_CPL_TX_TLS_PDU_PLDLEN 0
+#define M_CPL_TX_TLS_PDU_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_PDU_PLDLEN(x) ((x) << S_CPL_TX_TLS_PDU_PLDLEN)
+#define G_CPL_TX_TLS_PDU_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_PLDLEN) & M_CPL_TX_TLS_PDU_PLDLEN)
+
+#define S_CPL_TX_TLS_PDU_CUSTOMTYPE 24
+#define M_CPL_TX_TLS_PDU_CUSTOMTYPE 0xff
+#define V_CPL_TX_TLS_PDU_CUSTOMTYPE(x) ((x) << S_CPL_TX_TLS_PDU_CUSTOMTYPE)
+#define G_CPL_TX_TLS_PDU_CUSTOMTYPE(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CUSTOMTYPE) & M_CPL_TX_TLS_PDU_CUSTOMTYPE)
+
+#define S_CPL_TX_TLS_PDU_CUSTOMPROTOVER 8
+#define M_CPL_TX_TLS_PDU_CUSTOMPROTOVER 0xffff
+#define V_CPL_TX_TLS_PDU_CUSTOMPROTOVER(x) \
+ ((x) << S_CPL_TX_TLS_PDU_CUSTOMPROTOVER)
+#define G_CPL_TX_TLS_PDU_CUSTOMPROTOVER(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CUSTOMPROTOVER) & \
+ M_CPL_TX_TLS_PDU_CUSTOMPROTOVER)
+
struct cpl_tx_tls_sfo {
__be32 op_to_seg_len;
__be32 pld_len;
@@ -3223,6 +5188,12 @@ struct cpl_tx_tls_sfo {
#define G_CPL_TX_TLS_SFO_SEG_LEN(x) \
(((x) >> S_CPL_TX_TLS_SFO_SEG_LEN) & M_CPL_TX_TLS_SFO_SEG_LEN)
+#define S_CPL_TX_TLS_SFO_PLDLEN 0
+#define M_CPL_TX_TLS_SFO_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_SFO_PLDLEN(x) ((x) << S_CPL_TX_TLS_SFO_PLDLEN)
+#define G_CPL_TX_TLS_SFO_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_SFO_PLDLEN) & M_CPL_TX_TLS_SFO_PLDLEN)
+
#define S_CPL_TX_TLS_SFO_TYPE 24
#define M_CPL_TX_TLS_SFO_TYPE 0xff
#define V_CPL_TX_TLS_SFO_TYPE(x) ((x) << S_CPL_TX_TLS_SFO_TYPE)
@@ -3454,6 +5425,119 @@ struct cpl_rx_tls_cmp {
#define G_SCMD_HDR_LEN(x) \
(((x) >> S_SCMD_HDR_LEN) & M_SCMD_HDR_LEN)
+struct cpl_rx_pkt_ipsec {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 vlan;
+ __be16 length;
+ __be32 rxchannel_to_ethhdrlen;
+ __be32 iphdrlen_to_rxerror;
+ __be64 timestamp;
+};
+
+#define S_CPL_RX_PKT_IPSEC_OPCODE 24
+#define M_CPL_RX_PKT_IPSEC_OPCODE 0xff
+#define V_CPL_RX_PKT_IPSEC_OPCODE(x) ((x) << S_CPL_RX_PKT_IPSEC_OPCODE)
+#define G_CPL_RX_PKT_IPSEC_OPCODE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_OPCODE) & M_CPL_RX_PKT_IPSEC_OPCODE)
+
+#define S_CPL_RX_PKT_IPSEC_IPFRAG 23
+#define M_CPL_RX_PKT_IPSEC_IPFRAG 0x1
+#define V_CPL_RX_PKT_IPSEC_IPFRAG(x) ((x) << S_CPL_RX_PKT_IPSEC_IPFRAG)
+#define G_CPL_RX_PKT_IPSEC_IPFRAG(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPFRAG) & M_CPL_RX_PKT_IPSEC_IPFRAG)
+#define F_CPL_RX_PKT_IPSEC_IPFRAG V_CPL_RX_PKT_IPSEC_IPFRAG(1U)
+
+#define S_CPL_RX_PKT_IPSEC_VLAN_EX 22
+#define M_CPL_RX_PKT_IPSEC_VLAN_EX 0x1
+#define V_CPL_RX_PKT_IPSEC_VLAN_EX(x) ((x) << S_CPL_RX_PKT_IPSEC_VLAN_EX)
+#define G_CPL_RX_PKT_IPSEC_VLAN_EX(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_VLAN_EX) & M_CPL_RX_PKT_IPSEC_VLAN_EX)
+#define F_CPL_RX_PKT_IPSEC_VLAN_EX V_CPL_RX_PKT_IPSEC_VLAN_EX(1U)
+
+#define S_CPL_RX_PKT_IPSEC_IPMI 21
+#define M_CPL_RX_PKT_IPSEC_IPMI 0x1
+#define V_CPL_RX_PKT_IPSEC_IPMI(x) ((x) << S_CPL_RX_PKT_IPSEC_IPMI)
+#define G_CPL_RX_PKT_IPSEC_IPMI(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPMI) & M_CPL_RX_PKT_IPSEC_IPMI)
+#define F_CPL_RX_PKT_IPSEC_IPMI V_CPL_RX_PKT_IPSEC_IPMI(1U)
+
+#define S_CPL_RX_PKT_IPSEC_INTERFACE 16
+#define M_CPL_RX_PKT_IPSEC_INTERFACE 0xf
+#define V_CPL_RX_PKT_IPSEC_INTERFACE(x) ((x) << S_CPL_RX_PKT_IPSEC_INTERFACE)
+#define G_CPL_RX_PKT_IPSEC_INTERFACE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_INTERFACE) & M_CPL_RX_PKT_IPSEC_INTERFACE)
+
+#define S_CPL_RX_PKT_IPSEC_IPSECEXTERR 12
+#define M_CPL_RX_PKT_IPSEC_IPSECEXTERR 0xf
+#define V_CPL_RX_PKT_IPSEC_IPSECEXTERR(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_IPSECEXTERR)
+#define G_CPL_RX_PKT_IPSEC_IPSECEXTERR(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPSECEXTERR) & M_CPL_RX_PKT_IPSEC_IPSECEXTERR)
+
+#define S_CPL_RX_PKT_IPSEC_IPSECTYPE 10
+#define M_CPL_RX_PKT_IPSEC_IPSECTYPE 0x3
+#define V_CPL_RX_PKT_IPSEC_IPSECTYPE(x) ((x) << S_CPL_RX_PKT_IPSEC_IPSECTYPE)
+#define G_CPL_RX_PKT_IPSEC_IPSECTYPE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPSECTYPE) & M_CPL_RX_PKT_IPSEC_IPSECTYPE)
+
+#define S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN 0
+#define M_CPL_RX_PKT_IPSEC_OUTIPHDRLEN 0x3ff
+#define V_CPL_RX_PKT_IPSEC_OUTIPHDRLEN(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_OUTIPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN) & M_CPL_RX_PKT_IPSEC_OUTIPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_RXCHANNEL 28
+#define M_CPL_RX_PKT_IPSEC_RXCHANNEL 0xf
+#define V_CPL_RX_PKT_IPSEC_RXCHANNEL(x) ((x) << S_CPL_RX_PKT_IPSEC_RXCHANNEL)
+#define G_CPL_RX_PKT_IPSEC_RXCHANNEL(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_RXCHANNEL) & M_CPL_RX_PKT_IPSEC_RXCHANNEL)
+
+#define S_CPL_RX_PKT_IPSEC_FLAGS 20
+#define M_CPL_RX_PKT_IPSEC_FLAGS 0xff
+#define V_CPL_RX_PKT_IPSEC_FLAGS(x) ((x) << S_CPL_RX_PKT_IPSEC_FLAGS)
+#define G_CPL_RX_PKT_IPSEC_FLAGS(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_FLAGS) & M_CPL_RX_PKT_IPSEC_FLAGS)
+
+#define S_CPL_RX_PKT_IPSEC_MACMATCHTYPE 17
+#define M_CPL_RX_PKT_IPSEC_MACMATCHTYPE 0x7
+#define V_CPL_RX_PKT_IPSEC_MACMATCHTYPE(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_MACMATCHTYPE)
+#define G_CPL_RX_PKT_IPSEC_MACMATCHTYPE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_MACMATCHTYPE) & \
+ M_CPL_RX_PKT_IPSEC_MACMATCHTYPE)
+
+#define S_CPL_RX_PKT_IPSEC_MACINDEX 8
+#define M_CPL_RX_PKT_IPSEC_MACINDEX 0x1ff
+#define V_CPL_RX_PKT_IPSEC_MACINDEX(x) ((x) << S_CPL_RX_PKT_IPSEC_MACINDEX)
+#define G_CPL_RX_PKT_IPSEC_MACINDEX(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_MACINDEX) & M_CPL_RX_PKT_IPSEC_MACINDEX)
+
+#define S_CPL_RX_PKT_IPSEC_ETHHDRLEN 0
+#define M_CPL_RX_PKT_IPSEC_ETHHDRLEN 0xff
+#define V_CPL_RX_PKT_IPSEC_ETHHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_ETHHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_ETHHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_ETHHDRLEN) & M_CPL_RX_PKT_IPSEC_ETHHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_IPHDRLEN 22
+#define M_CPL_RX_PKT_IPSEC_IPHDRLEN 0x3ff
+#define V_CPL_RX_PKT_IPSEC_IPHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_IPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_IPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPHDRLEN) & M_CPL_RX_PKT_IPSEC_IPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_TCPHDRLEN 16
+#define M_CPL_RX_PKT_IPSEC_TCPHDRLEN 0x3f
+#define V_CPL_RX_PKT_IPSEC_TCPHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_TCPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_TCPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_TCPHDRLEN) & M_CPL_RX_PKT_IPSEC_TCPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_RXERROR 0
+#define M_CPL_RX_PKT_IPSEC_RXERROR 0xffff
+#define V_CPL_RX_PKT_IPSEC_RXERROR(x) ((x) << S_CPL_RX_PKT_IPSEC_RXERROR)
+#define G_CPL_RX_PKT_IPSEC_RXERROR(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_RXERROR) & M_CPL_RX_PKT_IPSEC_RXERROR)
+
struct cpl_tx_sec_pdu {
__be32 op_ivinsrtofst;
__be32 pldlen;
@@ -3478,6 +5562,13 @@ struct cpl_tx_sec_pdu {
(((x) >> S_CPL_TX_SEC_PDU_RXCHID) & M_CPL_TX_SEC_PDU_RXCHID)
#define F_CPL_TX_SEC_PDU_RXCHID V_CPL_TX_SEC_PDU_RXCHID(1U)
+#define S_T7_CPL_TX_SEC_PDU_RXCHID 22
+#define M_T7_CPL_TX_SEC_PDU_RXCHID 0x3
+#define V_T7_CPL_TX_SEC_PDU_RXCHID(x) ((x) << S_T7_CPL_TX_SEC_PDU_RXCHID)
+#define G_T7_CPL_TX_SEC_PDU_RXCHID(x) \
+(((x) >> S_T7_CPL_TX_SEC_PDU_RXCHID) & M_T7_CPL_TX_SEC_PDU_RXCHID)
+#define F_T7_CPL_TX_SEC_PDU_RXCHID V_T7_CPL_TX_SEC_PDU_RXCHID(1U)
+
/* Ack Follows */
#define S_CPL_TX_SEC_PDU_ACKFOLLOWS 21
#define M_CPL_TX_SEC_PDU_ACKFOLLOWS 0x1
@@ -3501,6 +5592,13 @@ struct cpl_tx_sec_pdu {
#define G_CPL_TX_SEC_PDU_CPLLEN(x) \
(((x) >> S_CPL_TX_SEC_PDU_CPLLEN) & M_CPL_TX_SEC_PDU_CPLLEN)
+#define S_CPL_TX_SEC_PDU_ACKNEXT 15
+#define M_CPL_TX_SEC_PDU_ACKNEXT 0x1
+#define V_CPL_TX_SEC_PDU_ACKNEXT(x) ((x) << S_CPL_TX_SEC_PDU_ACKNEXT)
+#define G_CPL_TX_SEC_PDU_ACKNEXT(x) \
+ (((x) >> S_CPL_TX_SEC_PDU_ACKNEXT) & M_CPL_TX_SEC_PDU_ACKNEXT)
+#define F_CPL_TX_SEC_PDU_ACKNEXT V_CPL_TX_SEC_PDU_ACKNEXT(1U)
+
/* PlaceHolder */
#define S_CPL_TX_SEC_PDU_PLACEHOLDER 10
#define M_CPL_TX_SEC_PDU_PLACEHOLDER 0x1
@@ -3517,6 +5615,12 @@ struct cpl_tx_sec_pdu {
(((x) >> S_CPL_TX_SEC_PDU_IVINSRTOFST) & \
M_CPL_TX_SEC_PDU_IVINSRTOFST)
+#define S_CPL_TX_SEC_PDU_PLDLEN 0
+#define M_CPL_TX_SEC_PDU_PLDLEN 0xfffff
+#define V_CPL_TX_SEC_PDU_PLDLEN(x) ((x) << S_CPL_TX_SEC_PDU_PLDLEN)
+#define G_CPL_TX_SEC_PDU_PLDLEN(x) \
+ (((x) >> S_CPL_TX_SEC_PDU_PLDLEN) & M_CPL_TX_SEC_PDU_PLDLEN)
+
/* AadStartOffset: Offset in bytes for AAD start from
* the first byte following
* the pkt headers (0-255
@@ -3666,6 +5770,62 @@ struct cpl_rx_phys_dsgl {
(((x) >> S_CPL_RX_PHYS_DSGL_NOOFSGENTR) & \
M_CPL_RX_PHYS_DSGL_NOOFSGENTR)
+struct cpl_t7_rx_phys_dsgl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 PhysAddrFields_lo_to_NumSGE;
+ __be32 RSSCopy[2];
+};
+
+#define S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI 0
+#define M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI 0xffffff
+#define V_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI)
+#define G_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI) & \
+ M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI)
+
+#define S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO 16
+#define M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO 0xffff
+#define V_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO)
+#define G_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO) & \
+ M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO)
+
+#define S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR 11
+#define M_CPL_T7_RX_PHYS_DSGL_NUMSGEERR 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR)
+#define G_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR) & M_CPL_T7_RX_PHYS_DSGL_NUMSGEERR)
+#define F_CPL_T7_RX_PHYS_DSGL_NUMSGEERR V_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE 10
+#define M_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE)
+#define G_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE) & \
+ M_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE)
+#define F_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE \
+ V_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_SPLITMODE 9
+#define M_CPL_T7_RX_PHYS_DSGL_SPLITMODE 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_SPLITMODE(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_SPLITMODE)
+#define G_CPL_T7_RX_PHYS_DSGL_SPLITMODE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_SPLITMODE) & M_CPL_T7_RX_PHYS_DSGL_SPLITMODE)
+#define F_CPL_T7_RX_PHYS_DSGL_SPLITMODE \
+ V_CPL_T7_RX_PHYS_DSGL_SPLITMODE(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_NUMSGE 0
+#define M_CPL_T7_RX_PHYS_DSGL_NUMSGE 0x1ff
+#define V_CPL_T7_RX_PHYS_DSGL_NUMSGE(x) ((x) << S_CPL_T7_RX_PHYS_DSGL_NUMSGE)
+#define G_CPL_T7_RX_PHYS_DSGL_NUMSGE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_NUMSGE) & M_CPL_T7_RX_PHYS_DSGL_NUMSGE)
+
/* CPL_TX_TLS_ACK */
struct cpl_tx_tls_ack {
__be32 op_to_Rsvd2;
@@ -3679,12 +5839,11 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_OPCODE(x) \
(((x) >> S_CPL_TX_TLS_ACK_OPCODE) & M_CPL_TX_TLS_ACK_OPCODE)
-#define S_CPL_TX_TLS_ACK_RSVD1 23
-#define M_CPL_TX_TLS_ACK_RSVD1 0x1
-#define V_CPL_TX_TLS_ACK_RSVD1(x) ((x) << S_CPL_TX_TLS_ACK_RSVD1)
-#define G_CPL_TX_TLS_ACK_RSVD1(x) \
- (((x) >> S_CPL_TX_TLS_ACK_RSVD1) & M_CPL_TX_TLS_ACK_RSVD1)
-#define F_CPL_TX_TLS_ACK_RSVD1 V_CPL_TX_TLS_ACK_RSVD1(1U)
+#define S_T7_CPL_TX_TLS_ACK_RXCHID 22
+#define M_T7_CPL_TX_TLS_ACK_RXCHID 0x3
+#define V_T7_CPL_TX_TLS_ACK_RXCHID(x) ((x) << S_T7_CPL_TX_TLS_ACK_RXCHID)
+#define G_T7_CPL_TX_TLS_ACK_RXCHID(x) \
+ (((x) >> S_T7_CPL_TX_TLS_ACK_RXCHID) & M_T7_CPL_TX_TLS_ACK_RXCHID)
#define S_CPL_TX_TLS_ACK_RXCHID 22
#define M_CPL_TX_TLS_ACK_RXCHID 0x1
@@ -3740,4 +5899,822 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_RSVD2(x) \
(((x) >> S_CPL_TX_TLS_ACK_RSVD2) & M_CPL_TX_TLS_ACK_RSVD2)
+#define S_CPL_TX_TLS_ACK_PLDLEN 0
+#define M_CPL_TX_TLS_ACK_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_ACK_PLDLEN(x) ((x) << S_CPL_TX_TLS_ACK_PLDLEN)
+#define G_CPL_TX_TLS_ACK_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_ACK_PLDLEN) & M_CPL_TX_TLS_ACK_PLDLEN)
+
+struct cpl_rcb_upd {
+ __be32 op_to_tid;
+ __be32 opcode_psn;
+ __u8 nodata_to_cnprepclr;
+ __u8 r0;
+ __be16 wrptr;
+ __be32 length;
+};
+
+#define S_CPL_RCB_UPD_OPCODE 24
+#define M_CPL_RCB_UPD_OPCODE 0xff
+#define V_CPL_RCB_UPD_OPCODE(x) ((x) << S_CPL_RCB_UPD_OPCODE)
+#define G_CPL_RCB_UPD_OPCODE(x) \
+ (((x) >> S_CPL_RCB_UPD_OPCODE) & M_CPL_RCB_UPD_OPCODE)
+
+#define S_CPL_RCB_UPD_TID 0
+#define M_CPL_RCB_UPD_TID 0xffffff
+#define V_CPL_RCB_UPD_TID(x) ((x) << S_CPL_RCB_UPD_TID)
+#define G_CPL_RCB_UPD_TID(x) \
+ (((x) >> S_CPL_RCB_UPD_TID) & M_CPL_RCB_UPD_TID)
+
+#define S_CPL_RCB_UPD_OPCODE 24
+#define M_CPL_RCB_UPD_OPCODE 0xff
+#define V_CPL_RCB_UPD_OPCODE(x) ((x) << S_CPL_RCB_UPD_OPCODE)
+#define G_CPL_RCB_UPD_OPCODE(x) \
+ (((x) >> S_CPL_RCB_UPD_OPCODE) & M_CPL_RCB_UPD_OPCODE)
+
+#define S_CPL_RCB_UPD_PSN 0
+#define M_CPL_RCB_UPD_PSN 0xffffff
+#define V_CPL_RCB_UPD_PSN(x) ((x) << S_CPL_RCB_UPD_PSN)
+#define G_CPL_RCB_UPD_PSN(x) \
+ (((x) >> S_CPL_RCB_UPD_PSN) & M_CPL_RCB_UPD_PSN)
+
+#define S_CPL_RCB_UPD_NODATA 7
+#define M_CPL_RCB_UPD_NODATA 0x1
+#define V_CPL_RCB_UPD_NODATA(x) ((x) << S_CPL_RCB_UPD_NODATA)
+#define G_CPL_RCB_UPD_NODATA(x) \
+ (((x) >> S_CPL_RCB_UPD_NODATA) & M_CPL_RCB_UPD_NODATA)
+#define F_CPL_RCB_UPD_NODATA V_CPL_RCB_UPD_NODATA(1U)
+
+#define S_CPL_RCB_UPD_RTTSTAMP 6
+#define M_CPL_RCB_UPD_RTTSTAMP 0x1
+#define V_CPL_RCB_UPD_RTTSTAMP(x) ((x) << S_CPL_RCB_UPD_RTTSTAMP)
+#define G_CPL_RCB_UPD_RTTSTAMP(x) \
+ (((x) >> S_CPL_RCB_UPD_RTTSTAMP) & M_CPL_RCB_UPD_RTTSTAMP)
+#define F_CPL_RCB_UPD_RTTSTAMP V_CPL_RCB_UPD_RTTSTAMP(1U)
+
+#define S_CPL_RCB_UPD_ECNREPCLR 5
+#define M_CPL_RCB_UPD_ECNREPCLR 0x1
+#define V_CPL_RCB_UPD_ECNREPCLR(x) ((x) << S_CPL_RCB_UPD_ECNREPCLR)
+#define G_CPL_RCB_UPD_ECNREPCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_ECNREPCLR) & M_CPL_RCB_UPD_ECNREPCLR)
+#define F_CPL_RCB_UPD_ECNREPCLR V_CPL_RCB_UPD_ECNREPCLR(1U)
+
+#define S_CPL_RCB_UPD_NAKSEQCLR 4
+#define M_CPL_RCB_UPD_NAKSEQCLR 0x1
+#define V_CPL_RCB_UPD_NAKSEQCLR(x) ((x) << S_CPL_RCB_UPD_NAKSEQCLR)
+#define G_CPL_RCB_UPD_NAKSEQCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_NAKSEQCLR) & M_CPL_RCB_UPD_NAKSEQCLR)
+#define F_CPL_RCB_UPD_NAKSEQCLR V_CPL_RCB_UPD_NAKSEQCLR(1U)
+
+#define S_CPL_RCB_UPD_QPERRSET 3
+#define M_CPL_RCB_UPD_QPERRSET 0x1
+#define V_CPL_RCB_UPD_QPERRSET(x) ((x) << S_CPL_RCB_UPD_QPERRSET)
+#define G_CPL_RCB_UPD_QPERRSET(x) \
+ (((x) >> S_CPL_RCB_UPD_QPERRSET) & M_CPL_RCB_UPD_QPERRSET)
+#define F_CPL_RCB_UPD_QPERRSET V_CPL_RCB_UPD_QPERRSET(1U)
+
+#define S_CPL_RCB_UPD_RRQUPDEN 2
+#define M_CPL_RCB_UPD_RRQUPDEN 0x1
+#define V_CPL_RCB_UPD_RRQUPDEN(x) ((x) << S_CPL_RCB_UPD_RRQUPDEN)
+#define G_CPL_RCB_UPD_RRQUPDEN(x) \
+ (((x) >> S_CPL_RCB_UPD_RRQUPDEN) & M_CPL_RCB_UPD_RRQUPDEN)
+#define F_CPL_RCB_UPD_RRQUPDEN V_CPL_RCB_UPD_RRQUPDEN(1U)
+
+#define S_CPL_RCB_UPD_RQUPDEN 1
+#define M_CPL_RCB_UPD_RQUPDEN 0x1
+#define V_CPL_RCB_UPD_RQUPDEN(x) ((x) << S_CPL_RCB_UPD_RQUPDEN)
+#define G_CPL_RCB_UPD_RQUPDEN(x) \
+ (((x) >> S_CPL_RCB_UPD_RQUPDEN) & M_CPL_RCB_UPD_RQUPDEN)
+#define F_CPL_RCB_UPD_RQUPDEN V_CPL_RCB_UPD_RQUPDEN(1U)
+
+#define S_CPL_RCB_UPD_CNPREPCLR 0
+#define M_CPL_RCB_UPD_CNPREPCLR 0x1
+#define V_CPL_RCB_UPD_CNPREPCLR(x) ((x) << S_CPL_RCB_UPD_CNPREPCLR)
+#define G_CPL_RCB_UPD_CNPREPCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_CNPREPCLR) & M_CPL_RCB_UPD_CNPREPCLR)
+#define F_CPL_RCB_UPD_CNPREPCLR V_CPL_RCB_UPD_CNPREPCLR(1U)
+
+#define S_CPL_RCB_UPD_RSPNAKSEQCLR 7
+#define M_CPL_RCB_UPD_RSPNAKSEQCLR 0x1
+#define V_CPL_RCB_UPD_RSPNAKSEQCLR(x) ((x) << S_CPL_RCB_UPD_RSPNAKSEQCLR)
+#define G_CPL_RCB_UPD_RSPNAKSEQCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_RSPNAKSEQCLR) & M_CPL_RCB_UPD_RSPNAKSEQCLR)
+#define F_CPL_RCB_UPD_RSPNAKSEQCLR V_CPL_RCB_UPD_RSPNAKSEQCLR(1U)
+
+struct cpl_roce_fw_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 type_pkd;
+};
+
+#define S_CPL_ROCE_FW_NOTIFY_OPCODE 24
+#define M_CPL_ROCE_FW_NOTIFY_OPCODE 0xff
+#define V_CPL_ROCE_FW_NOTIFY_OPCODE(x) ((x) << S_CPL_ROCE_FW_NOTIFY_OPCODE)
+#define G_CPL_ROCE_FW_NOTIFY_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_OPCODE) & M_CPL_ROCE_FW_NOTIFY_OPCODE)
+
+#define S_CPL_ROCE_FW_NOTIFY_TID 0
+#define M_CPL_ROCE_FW_NOTIFY_TID 0xffffff
+#define V_CPL_ROCE_FW_NOTIFY_TID(x) ((x) << S_CPL_ROCE_FW_NOTIFY_TID)
+#define G_CPL_ROCE_FW_NOTIFY_TID(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_TID) & M_CPL_ROCE_FW_NOTIFY_TID)
+
+#define S_CPL_ROCE_FW_NOTIFY_TYPE 28
+#define M_CPL_ROCE_FW_NOTIFY_TYPE 0xf
+#define V_CPL_ROCE_FW_NOTIFY_TYPE(x) ((x) << S_CPL_ROCE_FW_NOTIFY_TYPE)
+#define G_CPL_ROCE_FW_NOTIFY_TYPE(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_TYPE) & M_CPL_ROCE_FW_NOTIFY_TYPE)
+
+struct cpl_roce_ack_nak_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 type_to_opcode;
+ __be16 length;
+ __be32 psn_msn_hi;
+ __be32 msn_lo_pkd;
+};
+
+#define S_CPL_ROCE_ACK_NAK_REQ_OPCODE 24
+#define M_CPL_ROCE_ACK_NAK_REQ_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_OPCODE(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_REQ_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_OPCODE) & M_CPL_ROCE_ACK_NAK_REQ_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_TID 0
+#define M_CPL_ROCE_ACK_NAK_REQ_TID 0xffffff
+#define V_CPL_ROCE_ACK_NAK_REQ_TID(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_TID)
+#define G_CPL_ROCE_ACK_NAK_REQ_TID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_TID) & M_CPL_ROCE_ACK_NAK_REQ_TID)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_TYPE 12
+#define M_CPL_ROCE_ACK_NAK_REQ_TYPE 0xf
+#define V_CPL_ROCE_ACK_NAK_REQ_TYPE(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_TYPE)
+#define G_CPL_ROCE_ACK_NAK_REQ_TYPE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_TYPE) & M_CPL_ROCE_ACK_NAK_REQ_TYPE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_STATUS 8
+#define M_CPL_ROCE_ACK_NAK_REQ_STATUS 0xf
+#define V_CPL_ROCE_ACK_NAK_REQ_STATUS(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_STATUS)
+#define G_CPL_ROCE_ACK_NAK_REQ_STATUS(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_STATUS) & M_CPL_ROCE_ACK_NAK_REQ_STATUS)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE 0
+#define M_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE) & M_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_PSN 8
+#define M_CPL_ROCE_ACK_NAK_REQ_PSN 0xffffff
+#define V_CPL_ROCE_ACK_NAK_REQ_PSN(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_PSN)
+#define G_CPL_ROCE_ACK_NAK_REQ_PSN(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_PSN) & M_CPL_ROCE_ACK_NAK_REQ_PSN)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_MSN_HI 0
+#define M_CPL_ROCE_ACK_NAK_REQ_MSN_HI 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_MSN_HI(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_MSN_HI)
+#define G_CPL_ROCE_ACK_NAK_REQ_MSN_HI(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_MSN_HI) & M_CPL_ROCE_ACK_NAK_REQ_MSN_HI)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_MSN_LO 16
+#define M_CPL_ROCE_ACK_NAK_REQ_MSN_LO 0xffff
+#define V_CPL_ROCE_ACK_NAK_REQ_MSN_LO(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_MSN_LO)
+#define G_CPL_ROCE_ACK_NAK_REQ_MSN_LO(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_MSN_LO) & M_CPL_ROCE_ACK_NAK_REQ_MSN_LO)
+
+struct cpl_roce_ack_nak {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 type_to_opcode;
+ __be16 length;
+ __be32 psn_rtt_hi;
+ __be32 rtt_lo_to_rttbad;
+};
+
+#define S_CPL_ROCE_ACK_NAK_OPCODE 24
+#define M_CPL_ROCE_ACK_NAK_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_OPCODE(x) ((x) << S_CPL_ROCE_ACK_NAK_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_OPCODE) & M_CPL_ROCE_ACK_NAK_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_TID 0
+#define M_CPL_ROCE_ACK_NAK_TID 0xffffff
+#define V_CPL_ROCE_ACK_NAK_TID(x) ((x) << S_CPL_ROCE_ACK_NAK_TID)
+#define G_CPL_ROCE_ACK_NAK_TID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_TID) & M_CPL_ROCE_ACK_NAK_TID)
+
+#define S_CPL_ROCE_ACK_NAK_TYPE 12
+#define M_CPL_ROCE_ACK_NAK_TYPE 0xf
+#define V_CPL_ROCE_ACK_NAK_TYPE(x) ((x) << S_CPL_ROCE_ACK_NAK_TYPE)
+#define G_CPL_ROCE_ACK_NAK_TYPE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_TYPE) & M_CPL_ROCE_ACK_NAK_TYPE)
+
+#define S_CPL_ROCE_ACK_NAK_STATUS 8
+#define M_CPL_ROCE_ACK_NAK_STATUS 0xf
+#define V_CPL_ROCE_ACK_NAK_STATUS(x) ((x) << S_CPL_ROCE_ACK_NAK_STATUS)
+#define G_CPL_ROCE_ACK_NAK_STATUS(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_STATUS) & M_CPL_ROCE_ACK_NAK_STATUS)
+
+#define S_CPL_ROCE_ACK_NAK_WIRE_OPCODE 0
+#define M_CPL_ROCE_ACK_NAK_WIRE_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_WIRE_OPCODE(x) ((x) << S_CPL_ROCE_ACK_NAK_WIRE_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_WIRE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_WIRE_OPCODE) & M_CPL_ROCE_ACK_NAK_WIRE_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_PSN 8
+#define M_CPL_ROCE_ACK_NAK_PSN 0xffffff
+#define V_CPL_ROCE_ACK_NAK_PSN(x) ((x) << S_CPL_ROCE_ACK_NAK_PSN)
+#define G_CPL_ROCE_ACK_NAK_PSN(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_PSN) & M_CPL_ROCE_ACK_NAK_PSN)
+
+#define S_CPL_ROCE_ACK_NAK_RTT_HI 0
+#define M_CPL_ROCE_ACK_NAK_RTT_HI 0xff
+#define V_CPL_ROCE_ACK_NAK_RTT_HI(x) ((x) << S_CPL_ROCE_ACK_NAK_RTT_HI)
+#define G_CPL_ROCE_ACK_NAK_RTT_HI(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTT_HI) & M_CPL_ROCE_ACK_NAK_RTT_HI)
+
+#define S_CPL_ROCE_ACK_NAK_RTT_LO 24
+#define M_CPL_ROCE_ACK_NAK_RTT_LO 0xff
+#define V_CPL_ROCE_ACK_NAK_RTT_LO(x) ((x) << S_CPL_ROCE_ACK_NAK_RTT_LO)
+#define G_CPL_ROCE_ACK_NAK_RTT_LO(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTT_LO) & M_CPL_ROCE_ACK_NAK_RTT_LO)
+
+#define S_CPL_ROCE_ACK_NAK_RTTVALID 23
+#define M_CPL_ROCE_ACK_NAK_RTTVALID 0x1
+#define V_CPL_ROCE_ACK_NAK_RTTVALID(x) ((x) << S_CPL_ROCE_ACK_NAK_RTTVALID)
+#define G_CPL_ROCE_ACK_NAK_RTTVALID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTTVALID) & M_CPL_ROCE_ACK_NAK_RTTVALID)
+#define F_CPL_ROCE_ACK_NAK_RTTVALID V_CPL_ROCE_ACK_NAK_RTTVALID(1U)
+
+#define S_CPL_ROCE_ACK_NAK_RTTBAD 22
+#define M_CPL_ROCE_ACK_NAK_RTTBAD 0x1
+#define V_CPL_ROCE_ACK_NAK_RTTBAD(x) ((x) << S_CPL_ROCE_ACK_NAK_RTTBAD)
+#define G_CPL_ROCE_ACK_NAK_RTTBAD(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTTBAD) & M_CPL_ROCE_ACK_NAK_RTTBAD)
+#define F_CPL_ROCE_ACK_NAK_RTTBAD V_CPL_ROCE_ACK_NAK_RTTBAD(1U)
+
+struct cpl_roce_cqe {
+ __be16 op_rssctrl;
+ __be16 cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_ROCE_CQE_OPCODE 8
+#define M_CPL_ROCE_CQE_OPCODE 0xff
+#define V_CPL_ROCE_CQE_OPCODE(x) ((x) << S_CPL_ROCE_CQE_OPCODE)
+#define G_CPL_ROCE_CQE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_OPCODE) & M_CPL_ROCE_CQE_OPCODE)
+
+#define S_CPL_ROCE_CQE_RSSCTRL 0
+#define M_CPL_ROCE_CQE_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_RSSCTRL)
+#define G_CPL_ROCE_CQE_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_RSSCTRL) & M_CPL_ROCE_CQE_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_TID 8
+#define M_CPL_ROCE_CQE_TID 0xfffff
+#define V_CPL_ROCE_CQE_TID(x) ((x) << S_CPL_ROCE_CQE_TID)
+#define G_CPL_ROCE_CQE_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_TID) & M_CPL_ROCE_CQE_TID)
+
+#define S_CPL_ROCE_CQE_FLITCNT 0
+#define M_CPL_ROCE_CQE_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_FLITCNT)
+#define G_CPL_ROCE_CQE_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FLITCNT) & M_CPL_ROCE_CQE_FLITCNT)
+
+#define S_CPL_ROCE_CQE_QPID 12
+#define M_CPL_ROCE_CQE_QPID 0xfffff
+#define V_CPL_ROCE_CQE_QPID(x) ((x) << S_CPL_ROCE_CQE_QPID)
+#define G_CPL_ROCE_CQE_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_QPID) & M_CPL_ROCE_CQE_QPID)
+
+#define S_CPL_ROCE_CQE_EXTMODE 11
+#define M_CPL_ROCE_CQE_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_EXTMODE)
+#define G_CPL_ROCE_CQE_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_EXTMODE) & M_CPL_ROCE_CQE_EXTMODE)
+#define F_CPL_ROCE_CQE_EXTMODE V_CPL_ROCE_CQE_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_GENERATION_BIT) & M_CPL_ROCE_CQE_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_GENERATION_BIT V_CPL_ROCE_CQE_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_STATUS 5
+#define M_CPL_ROCE_CQE_STATUS 0x1f
+#define V_CPL_ROCE_CQE_STATUS(x) ((x) << S_CPL_ROCE_CQE_STATUS)
+#define G_CPL_ROCE_CQE_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_STATUS) & M_CPL_ROCE_CQE_STATUS)
+
+#define S_CPL_ROCE_CQE_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_CQE_TYPE)
+#define G_CPL_ROCE_CQE_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_CQE_TYPE) & M_CPL_ROCE_CQE_CQE_TYPE)
+#define F_CPL_ROCE_CQE_CQE_TYPE V_CPL_ROCE_CQE_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_WR_TYPE 0
+#define M_CPL_ROCE_CQE_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_WR_TYPE)
+#define G_CPL_ROCE_CQE_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_WR_TYPE) & M_CPL_ROCE_CQE_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_SE 31
+#define M_CPL_ROCE_CQE_SE 0x1
+#define V_CPL_ROCE_CQE_SE(x) ((x) << S_CPL_ROCE_CQE_SE)
+#define G_CPL_ROCE_CQE_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_SE) & M_CPL_ROCE_CQE_SE)
+#define F_CPL_ROCE_CQE_SE V_CPL_ROCE_CQE_SE(1U)
+
+#define S_CPL_ROCE_CQE_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_WR_TYPE_EXT(x) ((x) << S_CPL_ROCE_CQE_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_WR_TYPE_EXT) & M_CPL_ROCE_CQE_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_SRQ 0
+#define M_CPL_ROCE_CQE_SRQ 0xfff
+#define V_CPL_ROCE_CQE_SRQ(x) ((x) << S_CPL_ROCE_CQE_SRQ)
+#define G_CPL_ROCE_CQE_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_SRQ) & M_CPL_ROCE_CQE_SRQ)
+
+struct cpl_roce_cqe_fw {
+ __be32 op_to_cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_ROCE_CQE_FW_OPCODE 24
+#define M_CPL_ROCE_CQE_FW_OPCODE 0xff
+#define V_CPL_ROCE_CQE_FW_OPCODE(x) ((x) << S_CPL_ROCE_CQE_FW_OPCODE)
+#define G_CPL_ROCE_CQE_FW_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_OPCODE) & M_CPL_ROCE_CQE_FW_OPCODE)
+
+#define S_CPL_ROCE_CQE_FW_RSSCTRL 16
+#define M_CPL_ROCE_CQE_FW_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_FW_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_FW_RSSCTRL)
+#define G_CPL_ROCE_CQE_FW_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_RSSCTRL) & M_CPL_ROCE_CQE_FW_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_FW_CQID 0
+#define M_CPL_ROCE_CQE_FW_CQID 0xffff
+#define V_CPL_ROCE_CQE_FW_CQID(x) ((x) << S_CPL_ROCE_CQE_FW_CQID)
+#define G_CPL_ROCE_CQE_FW_CQID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_CQID) & M_CPL_ROCE_CQE_FW_CQID)
+
+#define S_CPL_ROCE_CQE_FW_TID 8
+#define M_CPL_ROCE_CQE_FW_TID 0xfffff
+#define V_CPL_ROCE_CQE_FW_TID(x) ((x) << S_CPL_ROCE_CQE_FW_TID)
+#define G_CPL_ROCE_CQE_FW_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_TID) & M_CPL_ROCE_CQE_FW_TID)
+
+#define S_CPL_ROCE_CQE_FW_FLITCNT 0
+#define M_CPL_ROCE_CQE_FW_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_FW_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_FW_FLITCNT)
+#define G_CPL_ROCE_CQE_FW_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_FLITCNT) & M_CPL_ROCE_CQE_FW_FLITCNT)
+
+#define S_CPL_ROCE_CQE_FW_QPID 12
+#define M_CPL_ROCE_CQE_FW_QPID 0xfffff
+#define V_CPL_ROCE_CQE_FW_QPID(x) ((x) << S_CPL_ROCE_CQE_FW_QPID)
+#define G_CPL_ROCE_CQE_FW_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_QPID) & M_CPL_ROCE_CQE_FW_QPID)
+
+#define S_CPL_ROCE_CQE_FW_EXTMODE 11
+#define M_CPL_ROCE_CQE_FW_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_FW_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_FW_EXTMODE)
+#define G_CPL_ROCE_CQE_FW_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_EXTMODE) & M_CPL_ROCE_CQE_FW_EXTMODE)
+#define F_CPL_ROCE_CQE_FW_EXTMODE V_CPL_ROCE_CQE_FW_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_FW_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_FW_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_FW_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_GENERATION_BIT) & \
+ M_CPL_ROCE_CQE_FW_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_FW_GENERATION_BIT V_CPL_ROCE_CQE_FW_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_FW_STATUS 5
+#define M_CPL_ROCE_CQE_FW_STATUS 0x1f
+#define V_CPL_ROCE_CQE_FW_STATUS(x) ((x) << S_CPL_ROCE_CQE_FW_STATUS)
+#define G_CPL_ROCE_CQE_FW_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_STATUS) & M_CPL_ROCE_CQE_FW_STATUS)
+
+#define S_CPL_ROCE_CQE_FW_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_FW_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_FW_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_FW_CQE_TYPE)
+#define G_CPL_ROCE_CQE_FW_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_CQE_TYPE) & M_CPL_ROCE_CQE_FW_CQE_TYPE)
+#define F_CPL_ROCE_CQE_FW_CQE_TYPE V_CPL_ROCE_CQE_FW_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_FW_WR_TYPE 0
+#define M_CPL_ROCE_CQE_FW_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_FW_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_FW_WR_TYPE)
+#define G_CPL_ROCE_CQE_FW_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE) & M_CPL_ROCE_CQE_FW_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_FW_SE 31
+#define M_CPL_ROCE_CQE_FW_SE 0x1
+#define V_CPL_ROCE_CQE_FW_SE(x) ((x) << S_CPL_ROCE_CQE_FW_SE)
+#define G_CPL_ROCE_CQE_FW_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_SE) & M_CPL_ROCE_CQE_FW_SE)
+#define F_CPL_ROCE_CQE_FW_SE V_CPL_ROCE_CQE_FW_SE(1U)
+
+#define S_CPL_ROCE_CQE_FW_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_FW_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE_EXT) & M_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_FW_SRQ 0
+#define M_CPL_ROCE_CQE_FW_SRQ 0xfff
+#define V_CPL_ROCE_CQE_FW_SRQ(x) ((x) << S_CPL_ROCE_CQE_FW_SRQ)
+#define G_CPL_ROCE_CQE_FW_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_SRQ) & M_CPL_ROCE_CQE_FW_SRQ)
+
+struct cpl_roce_cqe_err {
+ __be32 op_to_CQID;
+ __be32 Tid_FlitCnt;
+ __be32 QPID_to_WR_type;
+ __be32 Length;
+ __be32 TAG;
+ __be32 MSN;
+ __be32 SE_to_SRQ;
+ __be32 RQE;
+ __be32 ExtInfoMS[2];
+ __be32 ExtInfoLS[2];
+};
+
+#define S_CPL_ROCE_CQE_ERR_OPCODE 24
+#define M_CPL_ROCE_CQE_ERR_OPCODE 0xff
+#define V_CPL_ROCE_CQE_ERR_OPCODE(x) ((x) << S_CPL_ROCE_CQE_ERR_OPCODE)
+#define G_CPL_ROCE_CQE_ERR_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_OPCODE) & M_CPL_ROCE_CQE_ERR_OPCODE)
+
+#define S_CPL_ROCE_CQE_ERR_RSSCTRL 16
+#define M_CPL_ROCE_CQE_ERR_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_ERR_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_ERR_RSSCTRL)
+#define G_CPL_ROCE_CQE_ERR_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_RSSCTRL) & M_CPL_ROCE_CQE_ERR_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_ERR_CQID 0
+#define M_CPL_ROCE_CQE_ERR_CQID 0xffff
+#define V_CPL_ROCE_CQE_ERR_CQID(x) ((x) << S_CPL_ROCE_CQE_ERR_CQID)
+#define G_CPL_ROCE_CQE_ERR_CQID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_CQID) & M_CPL_ROCE_CQE_ERR_CQID)
+
+#define S_CPL_ROCE_CQE_ERR_TID 8
+#define M_CPL_ROCE_CQE_ERR_TID 0xfffff
+#define V_CPL_ROCE_CQE_ERR_TID(x) ((x) << S_CPL_ROCE_CQE_ERR_TID)
+#define G_CPL_ROCE_CQE_ERR_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_TID) & M_CPL_ROCE_CQE_ERR_TID)
+
+#define S_CPL_ROCE_CQE_ERR_FLITCNT 0
+#define M_CPL_ROCE_CQE_ERR_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_ERR_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_ERR_FLITCNT)
+#define G_CPL_ROCE_CQE_ERR_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_FLITCNT) & M_CPL_ROCE_CQE_ERR_FLITCNT)
+
+#define S_CPL_ROCE_CQE_ERR_QPID 12
+#define M_CPL_ROCE_CQE_ERR_QPID 0xfffff
+#define V_CPL_ROCE_CQE_ERR_QPID(x) ((x) << S_CPL_ROCE_CQE_ERR_QPID)
+#define G_CPL_ROCE_CQE_ERR_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_QPID) & M_CPL_ROCE_CQE_ERR_QPID)
+
+#define S_CPL_ROCE_CQE_ERR_EXTMODE 11
+#define M_CPL_ROCE_CQE_ERR_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_ERR_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_ERR_EXTMODE)
+#define G_CPL_ROCE_CQE_ERR_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_EXTMODE) & M_CPL_ROCE_CQE_ERR_EXTMODE)
+#define F_CPL_ROCE_CQE_ERR_EXTMODE V_CPL_ROCE_CQE_ERR_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_ERR_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_ERR_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_GENERATION_BIT) & \
+ M_CPL_ROCE_CQE_ERR_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_ERR_GENERATION_BIT \
+ V_CPL_ROCE_CQE_ERR_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_ERR_STATUS 5
+#define M_CPL_ROCE_CQE_ERR_STATUS 0x1f
+#define V_CPL_ROCE_CQE_ERR_STATUS(x) ((x) << S_CPL_ROCE_CQE_ERR_STATUS)
+#define G_CPL_ROCE_CQE_ERR_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_STATUS) & M_CPL_ROCE_CQE_ERR_STATUS)
+
+#define S_CPL_ROCE_CQE_ERR_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_ERR_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_ERR_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_ERR_CQE_TYPE)
+#define G_CPL_ROCE_CQE_ERR_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_CQE_TYPE) & M_CPL_ROCE_CQE_ERR_CQE_TYPE)
+#define F_CPL_ROCE_CQE_ERR_CQE_TYPE V_CPL_ROCE_CQE_ERR_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_WR_TYPE 0
+#define M_CPL_ROCE_CQE_ERR_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_ERR_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_ERR_WR_TYPE)
+#define G_CPL_ROCE_CQE_ERR_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE) & M_CPL_ROCE_CQE_ERR_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_ERR_SE 31
+#define M_CPL_ROCE_CQE_ERR_SE 0x1
+#define V_CPL_ROCE_CQE_ERR_SE(x) ((x) << S_CPL_ROCE_CQE_ERR_SE)
+#define G_CPL_ROCE_CQE_ERR_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_SE) & M_CPL_ROCE_CQE_ERR_SE)
+#define F_CPL_ROCE_CQE_ERR_SE V_CPL_ROCE_CQE_ERR_SE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT) & M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_ERR_SRQ 0
+#define M_CPL_ROCE_CQE_ERR_SRQ 0xfff
+#define V_CPL_ROCE_CQE_ERR_SRQ(x) ((x) << S_CPL_ROCE_CQE_ERR_SRQ)
+#define G_CPL_ROCE_CQE_ERR_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_SRQ) & M_CPL_ROCE_CQE_ERR_SRQ)
+
+struct cpl_accelerator_hdr {
+ __be16 op_accelerator_id;
+ __be16 rxchid_payload_to_inner_cpl_length_ack;
+ __be32 inner_cpl_length_payload_status_loc;
+};
+
+#define S_CPL_ACCELERATOR_HDR_OPCODE 8
+#define M_CPL_ACCELERATOR_HDR_OPCODE 0xff
+#define V_CPL_ACCELERATOR_HDR_OPCODE(x) ((x) << S_CPL_ACCELERATOR_HDR_OPCODE)
+#define G_CPL_ACCELERATOR_HDR_OPCODE(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_OPCODE) & M_CPL_ACCELERATOR_HDR_OPCODE)
+
+#define S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID 0
+#define M_CPL_ACCELERATOR_HDR_ACCELERATOR_ID 0xff
+#define V_CPL_ACCELERATOR_HDR_ACCELERATOR_ID(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID)
+#define G_CPL_ACCELERATOR_HDR_ACCELERATOR_ID(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID) & \
+ M_CPL_ACCELERATOR_HDR_ACCELERATOR_ID)
+
+#define S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD 14
+#define M_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD 0x3
+#define V_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD 12
+#define M_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD 0x3
+#define V_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_RXCHID_ACK 10
+#define M_CPL_ACCELERATOR_HDR_RXCHID_ACK 0x3
+#define V_CPL_ACCELERATOR_HDR_RXCHID_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_RXCHID_ACK)
+#define G_CPL_ACCELERATOR_HDR_RXCHID_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_RXCHID_ACK) & \
+ M_CPL_ACCELERATOR_HDR_RXCHID_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_DESTID_ACK 8
+#define M_CPL_ACCELERATOR_HDR_DESTID_ACK 0x3
+#define V_CPL_ACCELERATOR_HDR_DESTID_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_DESTID_ACK)
+#define G_CPL_ACCELERATOR_HDR_DESTID_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_DESTID_ACK) & \
+ M_CPL_ACCELERATOR_HDR_DESTID_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK 0
+#define M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK 0xff
+#define V_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK)
+#define G_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK) & \
+ M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD 24
+#define M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD 0xff
+#define V_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_STATUS_LOC 22
+#define M_CPL_ACCELERATOR_HDR_STATUS_LOC 0x3
+#define V_CPL_ACCELERATOR_HDR_STATUS_LOC(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_STATUS_LOC)
+#define G_CPL_ACCELERATOR_HDR_STATUS_LOC(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_STATUS_LOC) & \
+ M_CPL_ACCELERATOR_HDR_STATUS_LOC)
+
+struct cpl_accelerator_ack {
+ RSS_HDR
+ __be16 op_accelerator_id;
+ __be16 r0;
+ __be32 status;
+ __be64 r1;
+ __be64 r2;
+};
+
+#define S_CPL_ACCELERATOR_ACK_OPCODE 8
+#define M_CPL_ACCELERATOR_ACK_OPCODE 0xff
+#define V_CPL_ACCELERATOR_ACK_OPCODE(x) ((x) << S_CPL_ACCELERATOR_ACK_OPCODE)
+#define G_CPL_ACCELERATOR_ACK_OPCODE(x) \
+ (((x) >> S_CPL_ACCELERATOR_ACK_OPCODE) & M_CPL_ACCELERATOR_ACK_OPCODE)
+
+#define S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID 0
+#define M_CPL_ACCELERATOR_ACK_ACCELERATOR_ID 0xff
+#define V_CPL_ACCELERATOR_ACK_ACCELERATOR_ID(x) \
+ ((x) << S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID)
+#define G_CPL_ACCELERATOR_ACK_ACCELERATOR_ID(x) \
+ (((x) >> S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID) & \
+ M_CPL_ACCELERATOR_ACK_ACCELERATOR_ID)
+
+struct cpl_nvmt_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r0;
+ __be16 length;
+ __be32 seq;
+ __be32 status_pkd;
+};
+
+#define S_CPL_NVMT_DATA_OPCODE 24
+#define M_CPL_NVMT_DATA_OPCODE 0xff
+#define V_CPL_NVMT_DATA_OPCODE(x) ((x) << S_CPL_NVMT_DATA_OPCODE)
+#define G_CPL_NVMT_DATA_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_DATA_OPCODE) & M_CPL_NVMT_DATA_OPCODE)
+
+#define S_CPL_NVMT_DATA_TID 0
+#define M_CPL_NVMT_DATA_TID 0xffffff
+#define V_CPL_NVMT_DATA_TID(x) ((x) << S_CPL_NVMT_DATA_TID)
+#define G_CPL_NVMT_DATA_TID(x) \
+ (((x) >> S_CPL_NVMT_DATA_TID) & M_CPL_NVMT_DATA_TID)
+
+#define S_CPL_NVMT_DATA_STATUS 0
+#define M_CPL_NVMT_DATA_STATUS 0xff
+#define V_CPL_NVMT_DATA_STATUS(x) ((x) << S_CPL_NVMT_DATA_STATUS)
+#define G_CPL_NVMT_DATA_STATUS(x) \
+ (((x) >> S_CPL_NVMT_DATA_STATUS) & M_CPL_NVMT_DATA_STATUS)
+
+struct cpl_nvmt_cmp {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 crch;
+ __be16 length;
+ __be32 seq;
+ __u8 t10status;
+ __u8 status;
+ __be16 crcl;
+};
+
+#define S_CPL_NVMT_CMP_OPCODE 24
+#define M_CPL_NVMT_CMP_OPCODE 0xff
+#define V_CPL_NVMT_CMP_OPCODE(x) ((x) << S_CPL_NVMT_CMP_OPCODE)
+#define G_CPL_NVMT_CMP_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_OPCODE) & M_CPL_NVMT_CMP_OPCODE)
+
+#define S_CPL_NVMT_CMP_TID 0
+#define M_CPL_NVMT_CMP_TID 0xffffff
+#define V_CPL_NVMT_CMP_TID(x) ((x) << S_CPL_NVMT_CMP_TID)
+#define G_CPL_NVMT_CMP_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_TID) & M_CPL_NVMT_CMP_TID)
+
+struct cpl_nvmt_cmp_imm {
+ __be32 op_to_cqid;
+ __be32 generation_bit_to_oprqinc;
+ __be32 seq;
+ __be16 crch;
+ __be16 length;
+ __be16 crcl;
+ __u8 t10status;
+ __u8 status;
+ __be32 r1;
+};
+
+#define S_CPL_NVMT_CMP_IMM_OPCODE 24
+#define M_CPL_NVMT_CMP_IMM_OPCODE 0xff
+#define V_CPL_NVMT_CMP_IMM_OPCODE(x) ((x) << S_CPL_NVMT_CMP_IMM_OPCODE)
+#define G_CPL_NVMT_CMP_IMM_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_OPCODE) & M_CPL_NVMT_CMP_IMM_OPCODE)
+
+#define S_CPL_NVMT_CMP_IMM_RSSCTRL 16
+#define M_CPL_NVMT_CMP_IMM_RSSCTRL 0xff
+#define V_CPL_NVMT_CMP_IMM_RSSCTRL(x) ((x) << S_CPL_NVMT_CMP_IMM_RSSCTRL)
+#define G_CPL_NVMT_CMP_IMM_RSSCTRL(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_RSSCTRL) & M_CPL_NVMT_CMP_IMM_RSSCTRL)
+
+#define S_CPL_NVMT_CMP_IMM_CQID 0
+#define M_CPL_NVMT_CMP_IMM_CQID 0xffff
+#define V_CPL_NVMT_CMP_IMM_CQID(x) ((x) << S_CPL_NVMT_CMP_IMM_CQID)
+#define G_CPL_NVMT_CMP_IMM_CQID(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_CQID) & M_CPL_NVMT_CMP_IMM_CQID)
+
+#define S_CPL_NVMT_CMP_IMM_GENERATION_BIT 31
+#define M_CPL_NVMT_CMP_IMM_GENERATION_BIT 0x1
+#define V_CPL_NVMT_CMP_IMM_GENERATION_BIT(x) \
+ ((x) << S_CPL_NVMT_CMP_IMM_GENERATION_BIT)
+#define G_CPL_NVMT_CMP_IMM_GENERATION_BIT(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_GENERATION_BIT) & \
+ M_CPL_NVMT_CMP_IMM_GENERATION_BIT)
+#define F_CPL_NVMT_CMP_IMM_GENERATION_BIT \
+ V_CPL_NVMT_CMP_IMM_GENERATION_BIT(1U)
+
+#define S_CPL_NVMT_CMP_IMM_TID 8
+#define M_CPL_NVMT_CMP_IMM_TID 0xfffff
+#define V_CPL_NVMT_CMP_IMM_TID(x) ((x) << S_CPL_NVMT_CMP_IMM_TID)
+#define G_CPL_NVMT_CMP_IMM_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_TID) & M_CPL_NVMT_CMP_IMM_TID)
+
+#define S_CPL_NVMT_CMP_IMM_OPRQINC 0
+#define M_CPL_NVMT_CMP_IMM_OPRQINC 0xff
+#define V_CPL_NVMT_CMP_IMM_OPRQINC(x) ((x) << S_CPL_NVMT_CMP_IMM_OPRQINC)
+#define G_CPL_NVMT_CMP_IMM_OPRQINC(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_OPRQINC) & M_CPL_NVMT_CMP_IMM_OPRQINC)
+
+struct cpl_nvmt_cmp_srq {
+ __be32 op_to_cqid;
+ __be32 generation_bit_to_oprqinc;
+ __be32 seq;
+ __be16 crch;
+ __be16 length;
+ __be16 crcl;
+ __u8 t10status;
+ __u8 status;
+ __be32 rqe;
+};
+
+#define S_CPL_NVMT_CMP_SRQ_OPCODE 24
+#define M_CPL_NVMT_CMP_SRQ_OPCODE 0xff
+#define V_CPL_NVMT_CMP_SRQ_OPCODE(x) ((x) << S_CPL_NVMT_CMP_SRQ_OPCODE)
+#define G_CPL_NVMT_CMP_SRQ_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_OPCODE) & M_CPL_NVMT_CMP_SRQ_OPCODE)
+
+#define S_CPL_NVMT_CMP_SRQ_RSSCTRL 16
+#define M_CPL_NVMT_CMP_SRQ_RSSCTRL 0xff
+#define V_CPL_NVMT_CMP_SRQ_RSSCTRL(x) ((x) << S_CPL_NVMT_CMP_SRQ_RSSCTRL)
+#define G_CPL_NVMT_CMP_SRQ_RSSCTRL(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_RSSCTRL) & M_CPL_NVMT_CMP_SRQ_RSSCTRL)
+
+#define S_CPL_NVMT_CMP_SRQ_CQID 0
+#define M_CPL_NVMT_CMP_SRQ_CQID 0xffff
+#define V_CPL_NVMT_CMP_SRQ_CQID(x) ((x) << S_CPL_NVMT_CMP_SRQ_CQID)
+#define G_CPL_NVMT_CMP_SRQ_CQID(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_CQID) & M_CPL_NVMT_CMP_SRQ_CQID)
+
+#define S_CPL_NVMT_CMP_SRQ_GENERATION_BIT 31
+#define M_CPL_NVMT_CMP_SRQ_GENERATION_BIT 0x1
+#define V_CPL_NVMT_CMP_SRQ_GENERATION_BIT(x) \
+ ((x) << S_CPL_NVMT_CMP_SRQ_GENERATION_BIT)
+#define G_CPL_NVMT_CMP_SRQ_GENERATION_BIT(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_GENERATION_BIT) & \
+ M_CPL_NVMT_CMP_SRQ_GENERATION_BIT)
+#define F_CPL_NVMT_CMP_SRQ_GENERATION_BIT \
+ V_CPL_NVMT_CMP_SRQ_GENERATION_BIT(1U)
+
+#define S_CPL_NVMT_CMP_SRQ_TID 8
+#define M_CPL_NVMT_CMP_SRQ_TID 0xfffff
+#define V_CPL_NVMT_CMP_SRQ_TID(x) ((x) << S_CPL_NVMT_CMP_SRQ_TID)
+#define G_CPL_NVMT_CMP_SRQ_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_TID) & M_CPL_NVMT_CMP_SRQ_TID)
+
+#define S_CPL_NVMT_CMP_SRQ_OPRQINC 0
+#define M_CPL_NVMT_CMP_SRQ_OPRQINC 0xff
+#define V_CPL_NVMT_CMP_SRQ_OPRQINC(x) ((x) << S_CPL_NVMT_CMP_SRQ_OPRQINC)
+#define G_CPL_NVMT_CMP_SRQ_OPRQINC(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_OPRQINC) & M_CPL_NVMT_CMP_SRQ_OPRQINC)
+
#endif /* T4_MSG_H */
diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h
index e3b2a29b2ea9..8f500ec0fbdd 100644
--- a/sys/dev/cxgbe/common/t4_regs.h
+++ b/sys/dev/cxgbe/common/t4_regs.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2013, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2013, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +27,11 @@
*/
/* This file is automatically generated --- changes will be lost */
-/* Generation Date : Wed Jan 27 10:57:51 IST 2016 */
-/* Directory name: t4_reg.txt, Changeset: */
-/* Directory name: t5_reg.txt, Changeset: 6936:7f6342b03d61 */
-/* Directory name: t6_reg.txt, Changeset: 4191:ce3ccd95c109 */
+/* Generation Date : Thu Sep 11 05:25:56 PM IST 2025 */
+/* Directory name: t4_reg.txt, Date: Not specified */
+/* Directory name: t5_reg.txt, Changeset: 6945:54ba4ba7ee8b */
+/* Directory name: t6_reg.txt, Changeset: 4277:9c165d0f4899 */
+/* Directory name: t7_reg.txt, Changeset: 5945:1487219ecb20 */
#define MYPF_BASE 0x1b000
#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
@@ -285,9 +285,6 @@
#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
-#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
-#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
-
#define PCIE_PF_INT_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
#define NUM_PCIE_PF_INT_INSTANCES 8
@@ -459,9 +456,6 @@
#define LE_DB_DBGI_REQ_MASK_T6(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
#define NUM_LE_DB_DBGI_REQ_MASK_T6_INSTANCES 11
-#define LE_DB_DBGI_RSP_DATA_T6(idx) (A_LE_DB_DBGI_RSP_DATA + (idx) * 4)
-#define NUM_LE_DB_DBGI_RSP_DATA_T6_INSTANCES 11
-
#define LE_DB_ACTIVE_MASK_IPV6_T6(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
#define NUM_LE_DB_ACTIVE_MASK_IPV6_T6_INSTANCES 8
@@ -501,12 +495,175 @@
#define CIM_CTL_MAILBOX_VFN_CTL_T6(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T6_INSTANCES 256
+#define T7_MYPORT_BASE 0x2e000
+#define T7_MYPORT_REG(reg_addr) (T7_MYPORT_BASE + (reg_addr))
+
+#define T7_PORT0_BASE 0x30000
+#define T7_PORT0_REG(reg_addr) (T7_PORT0_BASE + (reg_addr))
+
+#define T7_PORT1_BASE 0x32000
+#define T7_PORT1_REG(reg_addr) (T7_PORT1_BASE + (reg_addr))
+
+#define T7_PORT2_BASE 0x34000
+#define T7_PORT2_REG(reg_addr) (T7_PORT2_BASE + (reg_addr))
+
+#define T7_PORT3_BASE 0x36000
+#define T7_PORT3_REG(reg_addr) (T7_PORT3_BASE + (reg_addr))
+
+#define T7_PORT_STRIDE 0x2000
+#define T7_PORT_BASE(idx) (T7_PORT0_BASE + (idx) * T7_PORT_STRIDE)
+#define T7_PORT_REG(idx, reg) (T7_PORT_BASE(idx) + (reg))
+
+#define PCIE_MEM_ACCESS_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_MEM_ACCESS_T7_INSTANCES 16
+
+#define PCIE_T7_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T7_CMD_INSTANCES 1
+
+#define PCIE_T5_ARM_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T5_ARM_INSTANCES 1
+
+#define PCIE_JBOF_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_JBOF_INSTANCES 16
+
+#define PCIE_EMUADRRMAP_REG(reg_addr, idx) ((reg_addr) + (idx) * 32)
+#define NUM_PCIE_EMUADRRMAP_INSTANCES 3
+
+#define CIM_GFT_MASK(idx) (A_CIM_GFT_MASK + (idx) * 4)
+#define NUM_CIM_GFT_MASK_INSTANCES 4
+
+#define T7_MPS_TRC_FILTER_MATCH_CTL_A(idx) (A_T7_MPS_TRC_FILTER_MATCH_CTL_A + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_MATCH_CTL_A_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_MATCH_CTL_B(idx) (A_T7_MPS_TRC_FILTER_MATCH_CTL_B + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_MATCH_CTL_B_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_RUNT_CTL(idx) (A_T7_MPS_TRC_FILTER_RUNT_CTL + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_RUNT_CTL_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_DROP(idx) (A_T7_MPS_TRC_FILTER_DROP + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_DROP_INSTANCES 8
+
+#define MPS_TRC_FILTER4_MATCH(idx) (A_MPS_TRC_FILTER4_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER4_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER4_DONT_CARE(idx) (A_MPS_TRC_FILTER4_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER4_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER5_MATCH(idx) (A_MPS_TRC_FILTER5_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER5_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER5_DONT_CARE(idx) (A_MPS_TRC_FILTER5_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER5_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER6_MATCH(idx) (A_MPS_TRC_FILTER6_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER6_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER6_DONT_CARE(idx) (A_MPS_TRC_FILTER6_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER6_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER7_MATCH(idx) (A_MPS_TRC_FILTER7_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER7_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER7_DONT_CARE(idx) (A_MPS_TRC_FILTER7_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER7_DONT_CARE_INSTANCES 28
+
+#define LE_DB_DBGI_REQ_DATA_T7(idx) (A_LE_DB_DBGI_REQ_DATA + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_DATA_T7_INSTANCES 13
+
+#define LE_DB_DBGI_REQ_MASK_T7(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_MASK_T7_INSTANCES 13
+
+#define LE_DB_ACTIVE_MASK_IPV6_T7(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
+#define NUM_LE_DB_ACTIVE_MASK_IPV6_T7_INSTANCES 8
+
+#define LE_HASH_MASK_GEN_IPV4T7(idx) (A_LE_HASH_MASK_GEN_IPV4T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV4T7_INSTANCES 8
+
+#define T7_LE_HASH_MASK_GEN_IPV6T5(idx) (A_T7_LE_HASH_MASK_GEN_IPV6T5 + (idx) * 4)
+#define NUM_T7_LE_HASH_MASK_GEN_IPV6T5_INSTANCES 8
+
+#define LE_DB_SECOND_GEN_HASH_MASK_IPV4_T7(idx) (A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_SECOND_GEN_HASH_MASK_IPV4_T7_INSTANCES 8
+
+#define TLS_TX_CH_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_TLS_TX_CH_INSTANCES 6
+
+#define TLS_TX_CH_IND_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_TLS_TX_CH_IND_INSTANCES 6
+
+#define ARM_CPU_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_ARM_CPU_INSTANCES 4
+
+#define ARM_CCIM_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_ARM_CCIM_INSTANCES 4
+
+#define ARM_CCIS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_ARM_CCIS_INSTANCES 5
+
+#define ARM_CCI_EVNTBUS(idx) (A_ARM_CCI_EVNTBUS + (idx) * 4)
+#define NUM_ARM_CCI_EVNTBUS_INSTANCES 5
+
+#define ARM_ARM_CFG1(idx) (A_ARM_ARM_CFG1 + (idx) * 4)
+#define NUM_ARM_ARM_CFG1_INSTANCES 2
+
+#define ARM_ARM_CFG2(idx) (A_ARM_ARM_CFG2 + (idx) * 4)
+#define NUM_ARM_ARM_CFG2_INSTANCES 2
+
+#define ARM_MSG_REG(reg_addr, idx) ((reg_addr) + (idx) * 48)
+#define NUM_ARM_MSG_INSTANCES 4
+
+#define ARM_MSG_PCIE_MESSAGE2AXI_CFG4(idx) (A_ARM_MSG_PCIE_MESSAGE2AXI_CFG4 + (idx) * 4)
+#define NUM_ARM_MSG_PCIE_MESSAGE2AXI_CFG4_INSTANCES 2
+
+#define MC_CE_ERR_DATA_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_CE_ERR_DATA_T7_INSTANCES 16
+
+#define MC_UE_ERR_DATA_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_UE_ERR_DATA_T7_INSTANCES 16
+
+#define MC_P_BIST_USER_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_P_BIST_USER_INSTANCES 36
+
+#define HMA_H_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_HMA_H_BIST_STATUS_INSTANCES 18
+
+#define GCACHE_P_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_GCACHE_P_BIST_STATUS_INSTANCES 18
+
+#define CIM_CTL_MAILBOX_VF_STATUS_T7(idx) (A_CIM_CTL_MAILBOX_VF_STATUS + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VF_STATUS_T7_INSTANCES 8
+
+#define CIM_CTL_MAILBOX_VFN_CTL_T7(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T7_INSTANCES 256
+
+#define CIM_CTL_TID_MAP_EN(idx) (A_CIM_CTL_TID_MAP_EN + (idx) * 4)
+#define NUM_CIM_CTL_TID_MAP_EN_INSTANCES 8
+
+#define CIM_CTL_TID_MAP_CORE(idx) (A_CIM_CTL_TID_MAP_CORE + (idx) * 4)
+#define NUM_CIM_CTL_TID_MAP_CORE_INSTANCES 8
+
+#define CIM_CTL_CRYPTO_KEY_DATA(idx) (A_CIM_CTL_CRYPTO_KEY_DATA + (idx) * 4)
+#define NUM_CIM_CTL_CRYPTO_KEY_DATA_INSTANCES 17
+
+#define CIM_CTL_FLOWID_OP_VALID(idx) (A_CIM_CTL_FLOWID_OP_VALID + (idx) * 4)
+#define NUM_CIM_CTL_FLOWID_OP_VALID_INSTANCES 8
+
+#define CIM_CTL_SLV_REG(reg_addr, idx) ((reg_addr) + (idx) * 1024)
+#define NUM_CIM_CTL_SLV_INSTANCES 7
+
#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define MC_T7_STRIDE (MC_T71_BASE_ADDR - MC_T70_BASE_ADDR)
+#define MC_T7_REG(reg, idx) (reg + MC_T7_STRIDE * idx)
+
/* registers for module SGE */
#define SGE_BASE_ADDR 0x1000
@@ -637,6 +794,24 @@
#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
#define F_GLOBALENABLE V_GLOBALENABLE(1U)
+#define S_NUMOFFID 19
+#define M_NUMOFFID 0x7U
+#define V_NUMOFFID(x) ((x) << S_NUMOFFID)
+#define G_NUMOFFID(x) (((x) >> S_NUMOFFID) & M_NUMOFFID)
+
+#define S_INGHINTENABLE2 16
+#define V_INGHINTENABLE2(x) ((x) << S_INGHINTENABLE2)
+#define F_INGHINTENABLE2 V_INGHINTENABLE2(1U)
+
+#define S_INGHINTENABLE3 3
+#define V_INGHINTENABLE3(x) ((x) << S_INGHINTENABLE3)
+#define F_INGHINTENABLE3 V_INGHINTENABLE3(1U)
+
+#define S_TF_MODE 1
+#define M_TF_MODE 0x3U
+#define V_TF_MODE(x) ((x) << S_TF_MODE)
+#define G_TF_MODE(x) (((x) >> S_TF_MODE) & M_TF_MODE)
+
#define A_SGE_HOST_PAGE_SIZE 0x100c
#define S_HOSTPAGESIZEPF7 28
@@ -792,6 +967,16 @@
#define V_WR_ERROR_OPCODE(x) ((x) << S_WR_ERROR_OPCODE)
#define G_WR_ERROR_OPCODE(x) (((x) >> S_WR_ERROR_OPCODE) & M_WR_ERROR_OPCODE)
+#define S_WR_SENDPATH_ERROR_OPCODE 16
+#define M_WR_SENDPATH_ERROR_OPCODE 0xffU
+#define V_WR_SENDPATH_ERROR_OPCODE(x) ((x) << S_WR_SENDPATH_ERROR_OPCODE)
+#define G_WR_SENDPATH_ERROR_OPCODE(x) (((x) >> S_WR_SENDPATH_ERROR_OPCODE) & M_WR_SENDPATH_ERROR_OPCODE)
+
+#define S_WR_SENDPATH_OPCODE 8
+#define M_WR_SENDPATH_OPCODE 0xffU
+#define V_WR_SENDPATH_OPCODE(x) ((x) << S_WR_SENDPATH_OPCODE)
+#define G_WR_SENDPATH_OPCODE(x) (((x) >> S_WR_SENDPATH_OPCODE) & M_WR_SENDPATH_OPCODE)
+
#define A_SGE_PERR_INJECT 0x1020
#define S_MEMSEL 1
@@ -941,6 +1126,22 @@
#define V_PERR_PC_REQ(x) ((x) << S_PERR_PC_REQ)
#define F_PERR_PC_REQ V_PERR_PC_REQ(1U)
+#define S_PERR_HEADERSPLIT_FIFO3 28
+#define V_PERR_HEADERSPLIT_FIFO3(x) ((x) << S_PERR_HEADERSPLIT_FIFO3)
+#define F_PERR_HEADERSPLIT_FIFO3 V_PERR_HEADERSPLIT_FIFO3(1U)
+
+#define S_PERR_HEADERSPLIT_FIFO2 27
+#define V_PERR_HEADERSPLIT_FIFO2(x) ((x) << S_PERR_HEADERSPLIT_FIFO2)
+#define F_PERR_HEADERSPLIT_FIFO2 V_PERR_HEADERSPLIT_FIFO2(1U)
+
+#define S_PERR_PAYLOAD_FIFO3 26
+#define V_PERR_PAYLOAD_FIFO3(x) ((x) << S_PERR_PAYLOAD_FIFO3)
+#define F_PERR_PAYLOAD_FIFO3 V_PERR_PAYLOAD_FIFO3(1U)
+
+#define S_PERR_PAYLOAD_FIFO2 25
+#define V_PERR_PAYLOAD_FIFO2(x) ((x) << S_PERR_PAYLOAD_FIFO2)
+#define F_PERR_PAYLOAD_FIFO2 V_PERR_PAYLOAD_FIFO2(1U)
+
#define A_SGE_INT_ENABLE1 0x1028
#define A_SGE_PERR_ENABLE1 0x102c
#define A_SGE_INT_CAUSE2 0x1030
@@ -1105,6 +1306,22 @@
#define V_PERR_DB_FIFO(x) ((x) << S_PERR_DB_FIFO)
#define F_PERR_DB_FIFO V_PERR_DB_FIFO(1U)
+#define S_TF_FIFO_PERR 24
+#define V_TF_FIFO_PERR(x) ((x) << S_TF_FIFO_PERR)
+#define F_TF_FIFO_PERR V_TF_FIFO_PERR(1U)
+
+#define S_PERR_ISW_IDMA3_FIFO 15
+#define V_PERR_ISW_IDMA3_FIFO(x) ((x) << S_PERR_ISW_IDMA3_FIFO)
+#define F_PERR_ISW_IDMA3_FIFO V_PERR_ISW_IDMA3_FIFO(1U)
+
+#define S_PERR_ISW_IDMA2_FIFO 13
+#define V_PERR_ISW_IDMA2_FIFO(x) ((x) << S_PERR_ISW_IDMA2_FIFO)
+#define F_PERR_ISW_IDMA2_FIFO V_PERR_ISW_IDMA2_FIFO(1U)
+
+#define S_SGE_IPP_FIFO_PERR 5
+#define V_SGE_IPP_FIFO_PERR(x) ((x) << S_SGE_IPP_FIFO_PERR)
+#define F_SGE_IPP_FIFO_PERR V_SGE_IPP_FIFO_PERR(1U)
+
#define A_SGE_INT_ENABLE2 0x1034
#define A_SGE_PERR_ENABLE2 0x1038
#define A_SGE_INT_CAUSE3 0x103c
@@ -1259,110 +1476,20 @@
#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
#define A_SGE_FL_BUFFER_SIZE1 0x1048
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE2 0x104c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE3 0x1050
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE4 0x1054
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE5 0x1058
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE6 0x105c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE7 0x1060
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE8 0x1064
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE9 0x1068
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE10 0x106c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE11 0x1070
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE12 0x1074
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE13 0x1078
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE14 0x107c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE15 0x1080
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_DBQ_CTXT_BADDR 0x1084
#define S_BASEADDR 3
@@ -1426,6 +1553,10 @@
#define V_NULLPTREN(x) ((x) << S_NULLPTREN)
#define F_NULLPTREN V_NULLPTREN(1U)
+#define S_HDRSTARTFLQ4K 1
+#define V_HDRSTARTFLQ4K(x) ((x) << S_HDRSTARTFLQ4K)
+#define F_HDRSTARTFLQ4K V_HDRSTARTFLQ4K(1U)
+
#define A_SGE_CONM_CTRL 0x1094
#define S_EGRTHRESHOLD 8
@@ -2243,6 +2374,34 @@
#define V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(x) ((x) << S_PERR_IDMA_SWITCH_OUTPUT_FIFO0)
#define F_PERR_IDMA_SWITCH_OUTPUT_FIFO0 V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(1U)
+#define S_PERR_POINTER_HDR_FIFO3 10
+#define V_PERR_POINTER_HDR_FIFO3(x) ((x) << S_PERR_POINTER_HDR_FIFO3)
+#define F_PERR_POINTER_HDR_FIFO3 V_PERR_POINTER_HDR_FIFO3(1U)
+
+#define S_PERR_POINTER_HDR_FIFO2 9
+#define V_PERR_POINTER_HDR_FIFO2(x) ((x) << S_PERR_POINTER_HDR_FIFO2)
+#define F_PERR_POINTER_HDR_FIFO2 V_PERR_POINTER_HDR_FIFO2(1U)
+
+#define S_PERR_POINTER_DATA_FIFO3 8
+#define V_PERR_POINTER_DATA_FIFO3(x) ((x) << S_PERR_POINTER_DATA_FIFO3)
+#define F_PERR_POINTER_DATA_FIFO3 V_PERR_POINTER_DATA_FIFO3(1U)
+
+#define S_PERR_POINTER_DATA_FIFO2 7
+#define V_PERR_POINTER_DATA_FIFO2(x) ((x) << S_PERR_POINTER_DATA_FIFO2)
+#define F_PERR_POINTER_DATA_FIFO2 V_PERR_POINTER_DATA_FIFO2(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO3 3
+#define V_PERR_IDMA2IMSG_FIFO3(x) ((x) << S_PERR_IDMA2IMSG_FIFO3)
+#define F_PERR_IDMA2IMSG_FIFO3 V_PERR_IDMA2IMSG_FIFO3(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO2 2
+#define V_PERR_IDMA2IMSG_FIFO2(x) ((x) << S_PERR_IDMA2IMSG_FIFO2)
+#define F_PERR_IDMA2IMSG_FIFO2 V_PERR_IDMA2IMSG_FIFO2(1U)
+
+#define S_PERR_HINT_DELAY_FIFO 0
+#define V_PERR_HINT_DELAY_FIFO(x) ((x) << S_PERR_HINT_DELAY_FIFO)
+#define F_PERR_HINT_DELAY_FIFO V_PERR_HINT_DELAY_FIFO(1U)
+
#define A_SGE_INT_ENABLE5 0x1110
#define A_SGE_PERR_ENABLE5 0x1114
#define A_SGE_DBFIFO_STATUS2 0x1118
@@ -2359,6 +2518,46 @@
#define V_TX_COALESCE_PRI(x) ((x) << S_TX_COALESCE_PRI)
#define F_TX_COALESCE_PRI V_TX_COALESCE_PRI(1U)
+#define S_HINT_SGE_SEL 31
+#define V_HINT_SGE_SEL(x) ((x) << S_HINT_SGE_SEL)
+#define F_HINT_SGE_SEL V_HINT_SGE_SEL(1U)
+
+#define S_HINT_SEL 30
+#define V_HINT_SEL(x) ((x) << S_HINT_SEL)
+#define F_HINT_SEL V_HINT_SEL(1U)
+
+#define S_HINT_DISABLE 29
+#define V_HINT_DISABLE(x) ((x) << S_HINT_DISABLE)
+#define F_HINT_DISABLE V_HINT_DISABLE(1U)
+
+#define S_RXCPLMODE_ISCSI 28
+#define V_RXCPLMODE_ISCSI(x) ((x) << S_RXCPLMODE_ISCSI)
+#define F_RXCPLMODE_ISCSI V_RXCPLMODE_ISCSI(1U)
+
+#define S_RXCPLMODE_NVMT 27
+#define V_RXCPLMODE_NVMT(x) ((x) << S_RXCPLMODE_NVMT)
+#define F_RXCPLMODE_NVMT V_RXCPLMODE_NVMT(1U)
+
+#define S_WRE_REPLAY_INORDER 26
+#define V_WRE_REPLAY_INORDER(x) ((x) << S_WRE_REPLAY_INORDER)
+#define F_WRE_REPLAY_INORDER V_WRE_REPLAY_INORDER(1U)
+
+#define S_ETH2XEN 25
+#define V_ETH2XEN(x) ((x) << S_ETH2XEN)
+#define F_ETH2XEN V_ETH2XEN(1U)
+
+#define S_ARMDBENDDIS 24
+#define V_ARMDBENDDIS(x) ((x) << S_ARMDBENDDIS)
+#define F_ARMDBENDDIS V_ARMDBENDDIS(1U)
+
+#define S_PACKPADT7 23
+#define V_PACKPADT7(x) ((x) << S_PACKPADT7)
+#define F_PACKPADT7 V_PACKPADT7(1U)
+
+#define S_WRE_UPFLCREDIT 22
+#define V_WRE_UPFLCREDIT(x) ((x) << S_WRE_UPFLCREDIT)
+#define F_WRE_UPFLCREDIT V_WRE_UPFLCREDIT(1U)
+
#define A_SGE_DEEP_SLEEP 0x1128
#define S_IDMA1_SLEEP_STATUS 11
@@ -2493,6 +2692,42 @@
#define V_FATAL_DEQ(x) ((x) << S_FATAL_DEQ)
#define F_FATAL_DEQ V_FATAL_DEQ(1U)
+#define S_FATAL_DEQ0_DRDY 29
+#define M_FATAL_DEQ0_DRDY 0x7U
+#define V_FATAL_DEQ0_DRDY(x) ((x) << S_FATAL_DEQ0_DRDY)
+#define G_FATAL_DEQ0_DRDY(x) (((x) >> S_FATAL_DEQ0_DRDY) & M_FATAL_DEQ0_DRDY)
+
+#define S_FATAL_OUT0_DRDY 26
+#define M_FATAL_OUT0_DRDY 0x7U
+#define V_FATAL_OUT0_DRDY(x) ((x) << S_FATAL_OUT0_DRDY)
+#define G_FATAL_OUT0_DRDY(x) (((x) >> S_FATAL_OUT0_DRDY) & M_FATAL_OUT0_DRDY)
+
+#define S_IMSG_DBG3_STUCK 25
+#define V_IMSG_DBG3_STUCK(x) ((x) << S_IMSG_DBG3_STUCK)
+#define F_IMSG_DBG3_STUCK V_IMSG_DBG3_STUCK(1U)
+
+#define S_IMSG_DBG2_STUCK 24
+#define V_IMSG_DBG2_STUCK(x) ((x) << S_IMSG_DBG2_STUCK)
+#define F_IMSG_DBG2_STUCK V_IMSG_DBG2_STUCK(1U)
+
+#define S_IMSG_DBG1_STUCK 23
+#define V_IMSG_DBG1_STUCK(x) ((x) << S_IMSG_DBG1_STUCK)
+#define F_IMSG_DBG1_STUCK V_IMSG_DBG1_STUCK(1U)
+
+#define S_IMSG_DBG0_STUCK 22
+#define V_IMSG_DBG0_STUCK(x) ((x) << S_IMSG_DBG0_STUCK)
+#define F_IMSG_DBG0_STUCK V_IMSG_DBG0_STUCK(1U)
+
+#define S_FATAL_DEQ1_DRDY 3
+#define M_FATAL_DEQ1_DRDY 0x3U
+#define V_FATAL_DEQ1_DRDY(x) ((x) << S_FATAL_DEQ1_DRDY)
+#define G_FATAL_DEQ1_DRDY(x) (((x) >> S_FATAL_DEQ1_DRDY) & M_FATAL_DEQ1_DRDY)
+
+#define S_FATAL_OUT1_DRDY 1
+#define M_FATAL_OUT1_DRDY 0x3U
+#define V_FATAL_OUT1_DRDY(x) ((x) << S_FATAL_OUT1_DRDY)
+#define G_FATAL_OUT1_DRDY(x) (((x) >> S_FATAL_OUT1_DRDY) & M_FATAL_OUT1_DRDY)
+
#define A_SGE_DOORBELL_THROTTLE_THRESHOLD 0x112c
#define S_THROTTLE_THRESHOLD_FL 16
@@ -2612,6 +2847,55 @@
#define V_DBPTBUFRSV0(x) ((x) << S_DBPTBUFRSV0)
#define G_DBPTBUFRSV0(x) (((x) >> S_DBPTBUFRSV0) & M_DBPTBUFRSV0)
+#define A_SGE_TBUF_CONTROL0 0x114c
+#define A_SGE_TBUF_CONTROL1 0x1150
+
+#define S_DBPTBUFRSV3 9
+#define M_DBPTBUFRSV3 0x1ffU
+#define V_DBPTBUFRSV3(x) ((x) << S_DBPTBUFRSV3)
+#define G_DBPTBUFRSV3(x) (((x) >> S_DBPTBUFRSV3) & M_DBPTBUFRSV3)
+
+#define S_DBPTBUFRSV2 0
+#define M_DBPTBUFRSV2 0x1ffU
+#define V_DBPTBUFRSV2(x) ((x) << S_DBPTBUFRSV2)
+#define G_DBPTBUFRSV2(x) (((x) >> S_DBPTBUFRSV2) & M_DBPTBUFRSV2)
+
+#define A_SGE_TBUF_CONTROL2 0x1154
+
+#define S_DBPTBUFRSV5 9
+#define M_DBPTBUFRSV5 0x1ffU
+#define V_DBPTBUFRSV5(x) ((x) << S_DBPTBUFRSV5)
+#define G_DBPTBUFRSV5(x) (((x) >> S_DBPTBUFRSV5) & M_DBPTBUFRSV5)
+
+#define S_DBPTBUFRSV4 0
+#define M_DBPTBUFRSV4 0x1ffU
+#define V_DBPTBUFRSV4(x) ((x) << S_DBPTBUFRSV4)
+#define G_DBPTBUFRSV4(x) (((x) >> S_DBPTBUFRSV4) & M_DBPTBUFRSV4)
+
+#define A_SGE_TBUF_CONTROL3 0x1158
+
+#define S_DBPTBUFRSV7 9
+#define M_DBPTBUFRSV7 0x1ffU
+#define V_DBPTBUFRSV7(x) ((x) << S_DBPTBUFRSV7)
+#define G_DBPTBUFRSV7(x) (((x) >> S_DBPTBUFRSV7) & M_DBPTBUFRSV7)
+
+#define S_DBPTBUFRSV6 0
+#define M_DBPTBUFRSV6 0x1ffU
+#define V_DBPTBUFRSV6(x) ((x) << S_DBPTBUFRSV6)
+#define G_DBPTBUFRSV6(x) (((x) >> S_DBPTBUFRSV6) & M_DBPTBUFRSV6)
+
+#define A_SGE_TBUF_CONTROL4 0x115c
+
+#define S_DBPTBUFRSV9 9
+#define M_DBPTBUFRSV9 0x1ffU
+#define V_DBPTBUFRSV9(x) ((x) << S_DBPTBUFRSV9)
+#define G_DBPTBUFRSV9(x) (((x) >> S_DBPTBUFRSV9) & M_DBPTBUFRSV9)
+
+#define S_DBPTBUFRSV8 0
+#define M_DBPTBUFRSV8 0x1ffU
+#define V_DBPTBUFRSV8(x) ((x) << S_DBPTBUFRSV8)
+#define G_DBPTBUFRSV8(x) (((x) >> S_DBPTBUFRSV8) & M_DBPTBUFRSV8)
+
#define A_SGE_PC0_REQ_BIST_CMD 0x1180
#define A_SGE_PC0_REQ_BIST_ERROR_CNT 0x1184
#define A_SGE_PC1_REQ_BIST_CMD 0x1190
@@ -2620,6 +2904,113 @@
#define A_SGE_PC0_RSP_BIST_ERROR_CNT 0x11a4
#define A_SGE_PC1_RSP_BIST_CMD 0x11b0
#define A_SGE_PC1_RSP_BIST_ERROR_CNT 0x11b4
+#define A_SGE_DBQ_TIMER_THRESH0 0x11b8
+
+#define S_TXTIMETH3 24
+#define M_TXTIMETH3 0x3fU
+#define V_TXTIMETH3(x) ((x) << S_TXTIMETH3)
+#define G_TXTIMETH3(x) (((x) >> S_TXTIMETH3) & M_TXTIMETH3)
+
+#define S_TXTIMETH2 16
+#define M_TXTIMETH2 0x3fU
+#define V_TXTIMETH2(x) ((x) << S_TXTIMETH2)
+#define G_TXTIMETH2(x) (((x) >> S_TXTIMETH2) & M_TXTIMETH2)
+
+#define S_TXTIMETH1 8
+#define M_TXTIMETH1 0x3fU
+#define V_TXTIMETH1(x) ((x) << S_TXTIMETH1)
+#define G_TXTIMETH1(x) (((x) >> S_TXTIMETH1) & M_TXTIMETH1)
+
+#define S_TXTIMETH0 0
+#define M_TXTIMETH0 0x3fU
+#define V_TXTIMETH0(x) ((x) << S_TXTIMETH0)
+#define G_TXTIMETH0(x) (((x) >> S_TXTIMETH0) & M_TXTIMETH0)
+
+#define A_SGE_DBQ_TIMER_THRESH1 0x11bc
+
+#define S_TXTIMETH7 24
+#define M_TXTIMETH7 0x3fU
+#define V_TXTIMETH7(x) ((x) << S_TXTIMETH7)
+#define G_TXTIMETH7(x) (((x) >> S_TXTIMETH7) & M_TXTIMETH7)
+
+#define S_TXTIMETH6 16
+#define M_TXTIMETH6 0x3fU
+#define V_TXTIMETH6(x) ((x) << S_TXTIMETH6)
+#define G_TXTIMETH6(x) (((x) >> S_TXTIMETH6) & M_TXTIMETH6)
+
+#define S_TXTIMETH5 8
+#define M_TXTIMETH5 0x3fU
+#define V_TXTIMETH5(x) ((x) << S_TXTIMETH5)
+#define G_TXTIMETH5(x) (((x) >> S_TXTIMETH5) & M_TXTIMETH5)
+
+#define S_TXTIMETH4 0
+#define M_TXTIMETH4 0x3fU
+#define V_TXTIMETH4(x) ((x) << S_TXTIMETH4)
+#define G_TXTIMETH4(x) (((x) >> S_TXTIMETH4) & M_TXTIMETH4)
+
+#define A_SGE_DBQ_TIMER_CONFIG 0x11c0
+
+#define S_DBQ_TIMER_OP 0
+#define M_DBQ_TIMER_OP 0xffU
+#define V_DBQ_TIMER_OP(x) ((x) << S_DBQ_TIMER_OP)
+#define G_DBQ_TIMER_OP(x) (((x) >> S_DBQ_TIMER_OP) & M_DBQ_TIMER_OP)
+
+#define A_SGE_DBQ_TIMER_DBG 0x11c4
+
+#define S_DBQ_TIMER_CMD 31
+#define V_DBQ_TIMER_CMD(x) ((x) << S_DBQ_TIMER_CMD)
+#define F_DBQ_TIMER_CMD V_DBQ_TIMER_CMD(1U)
+
+#define S_DBQ_TIMER_INDEX 24
+#define M_DBQ_TIMER_INDEX 0x3fU
+#define V_DBQ_TIMER_INDEX(x) ((x) << S_DBQ_TIMER_INDEX)
+#define G_DBQ_TIMER_INDEX(x) (((x) >> S_DBQ_TIMER_INDEX) & M_DBQ_TIMER_INDEX)
+
+#define S_DBQ_TIMER_QCNT 0
+#define M_DBQ_TIMER_QCNT 0x1ffffU
+#define V_DBQ_TIMER_QCNT(x) ((x) << S_DBQ_TIMER_QCNT)
+#define G_DBQ_TIMER_QCNT(x) (((x) >> S_DBQ_TIMER_QCNT) & M_DBQ_TIMER_QCNT)
+
+#define A_SGE_INT_CAUSE8 0x11c8
+
+#define S_TRACE_RXPERR 8
+#define V_TRACE_RXPERR(x) ((x) << S_TRACE_RXPERR)
+#define F_TRACE_RXPERR V_TRACE_RXPERR(1U)
+
+#define S_U3_RXPERR 7
+#define V_U3_RXPERR(x) ((x) << S_U3_RXPERR)
+#define F_U3_RXPERR V_U3_RXPERR(1U)
+
+#define S_U2_RXPERR 6
+#define V_U2_RXPERR(x) ((x) << S_U2_RXPERR)
+#define F_U2_RXPERR V_U2_RXPERR(1U)
+
+#define S_U1_RXPERR 5
+#define V_U1_RXPERR(x) ((x) << S_U1_RXPERR)
+#define F_U1_RXPERR V_U1_RXPERR(1U)
+
+#define S_U0_RXPERR 4
+#define V_U0_RXPERR(x) ((x) << S_U0_RXPERR)
+#define F_U0_RXPERR V_U0_RXPERR(1U)
+
+#define S_T3_RXPERR 3
+#define V_T3_RXPERR(x) ((x) << S_T3_RXPERR)
+#define F_T3_RXPERR V_T3_RXPERR(1U)
+
+#define S_T2_RXPERR 2
+#define V_T2_RXPERR(x) ((x) << S_T2_RXPERR)
+#define F_T2_RXPERR V_T2_RXPERR(1U)
+
+#define S_T1_RXPERR 1
+#define V_T1_RXPERR(x) ((x) << S_T1_RXPERR)
+#define F_T1_RXPERR V_T1_RXPERR(1U)
+
+#define S_T0_RXPERR 0
+#define V_T0_RXPERR(x) ((x) << S_T0_RXPERR)
+#define F_T0_RXPERR V_T0_RXPERR(1U)
+
+#define A_SGE_INT_ENABLE8 0x11cc
+#define A_SGE_PERR_ENABLE8 0x11d0
#define A_SGE_CTXT_CMD 0x11fc
#define S_BUSY 31
@@ -2648,6 +3039,17 @@
#define A_SGE_CTXT_DATA4 0x1210
#define A_SGE_CTXT_DATA5 0x1214
#define A_SGE_CTXT_DATA6 0x1218
+
+#define S_DATA_UNUSED 7
+#define M_DATA_UNUSED 0x1ffffffU
+#define V_DATA_UNUSED(x) ((x) << S_DATA_UNUSED)
+#define G_DATA_UNUSED(x) (((x) >> S_DATA_UNUSED) & M_DATA_UNUSED)
+
+#define S_DATA6 0
+#define M_DATA6 0x7fU
+#define V_DATA6(x) ((x) << S_DATA6)
+#define G_DATA6(x) (((x) >> S_DATA6) & M_DATA6)
+
#define A_SGE_CTXT_DATA7 0x121c
#define A_SGE_CTXT_MASK0 0x1220
#define A_SGE_CTXT_MASK1 0x1224
@@ -2656,6 +3058,17 @@
#define A_SGE_CTXT_MASK4 0x1230
#define A_SGE_CTXT_MASK5 0x1234
#define A_SGE_CTXT_MASK6 0x1238
+
+#define S_MASK_UNUSED 7
+#define M_MASK_UNUSED 0x1ffffffU
+#define V_MASK_UNUSED(x) ((x) << S_MASK_UNUSED)
+#define G_MASK_UNUSED(x) (((x) >> S_MASK_UNUSED) & M_MASK_UNUSED)
+
+#define S_MASK 0
+#define M_MASK 0x7fU
+#define V_MASK(x) ((x) << S_MASK)
+#define G_MASK(x) (((x) >> S_MASK) & M_MASK)
+
#define A_SGE_CTXT_MASK7 0x123c
#define A_SGE_QBASE_MAP0 0x1240
@@ -2674,6 +3087,10 @@
#define V_INGRESS0_SIZE(x) ((x) << S_INGRESS0_SIZE)
#define G_INGRESS0_SIZE(x) (((x) >> S_INGRESS0_SIZE) & M_INGRESS0_SIZE)
+#define S_DESTINATION 31
+#define V_DESTINATION(x) ((x) << S_DESTINATION)
+#define F_DESTINATION V_DESTINATION(1U)
+
#define A_SGE_QBASE_MAP1 0x1244
#define S_EGRESS0_BASE 0
@@ -2719,6 +3136,10 @@
#define V_FLMTHRESH(x) ((x) << S_FLMTHRESH)
#define G_FLMTHRESH(x) (((x) >> S_FLMTHRESH) & M_FLMTHRESH)
+#define S_CONENMIDDLE 7
+#define V_CONENMIDDLE(x) ((x) << S_CONENMIDDLE)
+#define F_CONENMIDDLE V_CONENMIDDLE(1U)
+
#define A_SGE_DEBUG_CONM 0x1258
#define S_MPS_CH_CNG 16
@@ -2745,6 +3166,16 @@
#define V_LAST_QID(x) ((x) << S_LAST_QID)
#define G_LAST_QID(x) (((x) >> S_LAST_QID) & M_LAST_QID)
+#define S_CH_CNG 16
+#define M_CH_CNG 0xffffU
+#define V_CH_CNG(x) ((x) << S_CH_CNG)
+#define G_CH_CNG(x) (((x) >> S_CH_CNG) & M_CH_CNG)
+
+#define S_CH_SEL 14
+#define M_CH_SEL 0x3U
+#define V_CH_SEL(x) ((x) << S_CH_SEL)
+#define G_CH_SEL(x) (((x) >> S_CH_SEL) & M_CH_SEL)
+
#define A_SGE_DBG_QUEUE_STAT0_CTRL 0x125c
#define S_IMSG_GTS_SEL 18
@@ -2766,6 +3197,7 @@
#define A_SGE_DBG_BAR2_PKT_CNT 0x126c
#define A_SGE_DBG_DB_PKT_CNT 0x1270
#define A_SGE_DBG_GTS_PKT_CNT 0x1274
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_16 0x1278
#define A_SGE_DEBUG_DATA_HIGH_INDEX_0 0x1280
#define S_CIM_WM 24
@@ -3965,6 +4397,352 @@
#define V_VFWCOFFSET(x) ((x) << S_VFWCOFFSET)
#define G_VFWCOFFSET(x) (((x) >> S_VFWCOFFSET) & M_VFWCOFFSET)
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_17 0x1340
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_18 0x1344
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_19 0x1348
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_20 0x134c
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_21 0x1350
+#define A_SGE_DEBUG_DATA_LOW_INDEX_16 0x1354
+#define A_SGE_DEBUG_DATA_LOW_INDEX_17 0x1358
+#define A_SGE_DEBUG_DATA_LOW_INDEX_18 0x135c
+#define A_SGE_INT_CAUSE7 0x1360
+
+#define S_HINT_FIFO_FULL 25
+#define V_HINT_FIFO_FULL(x) ((x) << S_HINT_FIFO_FULL)
+#define F_HINT_FIFO_FULL V_HINT_FIFO_FULL(1U)
+
+#define S_CERR_HINT_DELAY_FIFO 24
+#define V_CERR_HINT_DELAY_FIFO(x) ((x) << S_CERR_HINT_DELAY_FIFO)
+#define F_CERR_HINT_DELAY_FIFO V_CERR_HINT_DELAY_FIFO(1U)
+
+#define S_COAL_TIMER_FIFO_PERR 23
+#define V_COAL_TIMER_FIFO_PERR(x) ((x) << S_COAL_TIMER_FIFO_PERR)
+#define F_COAL_TIMER_FIFO_PERR V_COAL_TIMER_FIFO_PERR(1U)
+
+#define S_CMP_FIFO_PERR 22
+#define V_CMP_FIFO_PERR(x) ((x) << S_CMP_FIFO_PERR)
+#define F_CMP_FIFO_PERR V_CMP_FIFO_PERR(1U)
+
+#define S_SGE_IPP_FIFO_CERR 21
+#define V_SGE_IPP_FIFO_CERR(x) ((x) << S_SGE_IPP_FIFO_CERR)
+#define F_SGE_IPP_FIFO_CERR V_SGE_IPP_FIFO_CERR(1U)
+
+#define S_CERR_ING_CTXT_CACHE 20
+#define V_CERR_ING_CTXT_CACHE(x) ((x) << S_CERR_ING_CTXT_CACHE)
+#define F_CERR_ING_CTXT_CACHE V_CERR_ING_CTXT_CACHE(1U)
+
+#define S_IMSG_CNTX_PERR 19
+#define V_IMSG_CNTX_PERR(x) ((x) << S_IMSG_CNTX_PERR)
+#define F_IMSG_CNTX_PERR V_IMSG_CNTX_PERR(1U)
+
+#define S_PD_FIFO_PERR 18
+#define V_PD_FIFO_PERR(x) ((x) << S_PD_FIFO_PERR)
+#define F_PD_FIFO_PERR V_PD_FIFO_PERR(1U)
+
+#define S_IMSG_512_FIFO_PERR 17
+#define V_IMSG_512_FIFO_PERR(x) ((x) << S_IMSG_512_FIFO_PERR)
+#define F_IMSG_512_FIFO_PERR V_IMSG_512_FIFO_PERR(1U)
+
+#define S_CPLSW_FIFO_PERR 16
+#define V_CPLSW_FIFO_PERR(x) ((x) << S_CPLSW_FIFO_PERR)
+#define F_CPLSW_FIFO_PERR V_CPLSW_FIFO_PERR(1U)
+
+#define S_IMSG_FIFO_PERR 15
+#define V_IMSG_FIFO_PERR(x) ((x) << S_IMSG_FIFO_PERR)
+#define F_IMSG_FIFO_PERR V_IMSG_FIFO_PERR(1U)
+
+#define S_CERR_ITP_EVR 14
+#define V_CERR_ITP_EVR(x) ((x) << S_CERR_ITP_EVR)
+#define F_CERR_ITP_EVR V_CERR_ITP_EVR(1U)
+
+#define S_CERR_CONM_SRAM 13
+#define V_CERR_CONM_SRAM(x) ((x) << S_CERR_CONM_SRAM)
+#define F_CERR_CONM_SRAM V_CERR_CONM_SRAM(1U)
+
+#define S_CERR_EGR_CTXT_CACHE 12
+#define V_CERR_EGR_CTXT_CACHE(x) ((x) << S_CERR_EGR_CTXT_CACHE)
+#define F_CERR_EGR_CTXT_CACHE V_CERR_EGR_CTXT_CACHE(1U)
+
+#define S_CERR_FLM_CNTXMEM 11
+#define V_CERR_FLM_CNTXMEM(x) ((x) << S_CERR_FLM_CNTXMEM)
+#define F_CERR_FLM_CNTXMEM V_CERR_FLM_CNTXMEM(1U)
+
+#define S_CERR_FUNC_QBASE 10
+#define V_CERR_FUNC_QBASE(x) ((x) << S_CERR_FUNC_QBASE)
+#define F_CERR_FUNC_QBASE V_CERR_FUNC_QBASE(1U)
+
+#define S_IMSG_CNTX_CERR 9
+#define V_IMSG_CNTX_CERR(x) ((x) << S_IMSG_CNTX_CERR)
+#define F_IMSG_CNTX_CERR V_IMSG_CNTX_CERR(1U)
+
+#define S_PD_FIFO_CERR 8
+#define V_PD_FIFO_CERR(x) ((x) << S_PD_FIFO_CERR)
+#define F_PD_FIFO_CERR V_PD_FIFO_CERR(1U)
+
+#define S_IMSG_512_FIFO_CERR 7
+#define V_IMSG_512_FIFO_CERR(x) ((x) << S_IMSG_512_FIFO_CERR)
+#define F_IMSG_512_FIFO_CERR V_IMSG_512_FIFO_CERR(1U)
+
+#define S_CPLSW_FIFO_CERR 6
+#define V_CPLSW_FIFO_CERR(x) ((x) << S_CPLSW_FIFO_CERR)
+#define F_CPLSW_FIFO_CERR V_CPLSW_FIFO_CERR(1U)
+
+#define S_IMSG_FIFO_CERR 5
+#define V_IMSG_FIFO_CERR(x) ((x) << S_IMSG_FIFO_CERR)
+#define F_IMSG_FIFO_CERR V_IMSG_FIFO_CERR(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO3 4
+#define V_CERR_HEADERSPLIT_FIFO3(x) ((x) << S_CERR_HEADERSPLIT_FIFO3)
+#define F_CERR_HEADERSPLIT_FIFO3 V_CERR_HEADERSPLIT_FIFO3(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO2 3
+#define V_CERR_HEADERSPLIT_FIFO2(x) ((x) << S_CERR_HEADERSPLIT_FIFO2)
+#define F_CERR_HEADERSPLIT_FIFO2 V_CERR_HEADERSPLIT_FIFO2(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO1 2
+#define V_CERR_HEADERSPLIT_FIFO1(x) ((x) << S_CERR_HEADERSPLIT_FIFO1)
+#define F_CERR_HEADERSPLIT_FIFO1 V_CERR_HEADERSPLIT_FIFO1(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO0 1
+#define V_CERR_HEADERSPLIT_FIFO0(x) ((x) << S_CERR_HEADERSPLIT_FIFO0)
+#define F_CERR_HEADERSPLIT_FIFO0 V_CERR_HEADERSPLIT_FIFO0(1U)
+
+#define S_CERR_FLM_L1CACHE 0
+#define V_CERR_FLM_L1CACHE(x) ((x) << S_CERR_FLM_L1CACHE)
+#define F_CERR_FLM_L1CACHE V_CERR_FLM_L1CACHE(1U)
+
+#define A_SGE_INT_ENABLE7 0x1364
+#define A_SGE_PERR_ENABLE7 0x1368
+#define A_SGE_ING_COMP_COAL_CFG 0x1700
+
+#define S_USE_PTP_TIMER 27
+#define V_USE_PTP_TIMER(x) ((x) << S_USE_PTP_TIMER)
+#define F_USE_PTP_TIMER V_USE_PTP_TIMER(1U)
+
+#define S_IMSG_SET_OFLOW_ALL_ENTRIES_43060 26
+#define V_IMSG_SET_OFLOW_ALL_ENTRIES_43060(x) ((x) << S_IMSG_SET_OFLOW_ALL_ENTRIES_43060)
+#define F_IMSG_SET_OFLOW_ALL_ENTRIES_43060 V_IMSG_SET_OFLOW_ALL_ENTRIES_43060(1U)
+
+#define S_IMSG_STUCK_INDIRECT_QUEUE_42907 25
+#define V_IMSG_STUCK_INDIRECT_QUEUE_42907(x) ((x) << S_IMSG_STUCK_INDIRECT_QUEUE_42907)
+#define F_IMSG_STUCK_INDIRECT_QUEUE_42907 V_IMSG_STUCK_INDIRECT_QUEUE_42907(1U)
+
+#define S_COMP_COAL_PIDX_INCR 24
+#define V_COMP_COAL_PIDX_INCR(x) ((x) << S_COMP_COAL_PIDX_INCR)
+#define F_COMP_COAL_PIDX_INCR V_COMP_COAL_PIDX_INCR(1U)
+
+#define S_COMP_COAL_TIMER_CNT 16
+#define M_COMP_COAL_TIMER_CNT 0xffU
+#define V_COMP_COAL_TIMER_CNT(x) ((x) << S_COMP_COAL_TIMER_CNT)
+#define G_COMP_COAL_TIMER_CNT(x) (((x) >> S_COMP_COAL_TIMER_CNT) & M_COMP_COAL_TIMER_CNT)
+
+#define S_COMP_COAL_CNTR_TH 8
+#define M_COMP_COAL_CNTR_TH 0xffU
+#define V_COMP_COAL_CNTR_TH(x) ((x) << S_COMP_COAL_CNTR_TH)
+#define G_COMP_COAL_CNTR_TH(x) (((x) >> S_COMP_COAL_CNTR_TH) & M_COMP_COAL_CNTR_TH)
+
+#define S_COMP_COAL_OPCODE 0
+#define M_COMP_COAL_OPCODE 0xffU
+#define V_COMP_COAL_OPCODE(x) ((x) << S_COMP_COAL_OPCODE)
+#define G_COMP_COAL_OPCODE(x) (((x) >> S_COMP_COAL_OPCODE) & M_COMP_COAL_OPCODE)
+
+#define A_SGE_ING_IMSG_DBG 0x1704
+
+#define S_STUCK_CTR_TH 1
+#define M_STUCK_CTR_TH 0xffU
+#define V_STUCK_CTR_TH(x) ((x) << S_STUCK_CTR_TH)
+#define G_STUCK_CTR_TH(x) (((x) >> S_STUCK_CTR_TH) & M_STUCK_CTR_TH)
+
+#define S_STUCK_INT_EN 0
+#define V_STUCK_INT_EN(x) ((x) << S_STUCK_INT_EN)
+#define F_STUCK_INT_EN V_STUCK_INT_EN(1U)
+
+#define A_SGE_ING_IMSG_RSP0_DBG 0x1708
+
+#define S_IDMA1_QID 16
+#define M_IDMA1_QID 0xffffU
+#define V_IDMA1_QID(x) ((x) << S_IDMA1_QID)
+#define G_IDMA1_QID(x) (((x) >> S_IDMA1_QID) & M_IDMA1_QID)
+
+#define S_IDMA0_QID 0
+#define M_IDMA0_QID 0xffffU
+#define V_IDMA0_QID(x) ((x) << S_IDMA0_QID)
+#define G_IDMA0_QID(x) (((x) >> S_IDMA0_QID) & M_IDMA0_QID)
+
+#define A_SGE_ING_IMSG_RSP1_DBG 0x170c
+
+#define S_IDMA3_QID 16
+#define M_IDMA3_QID 0xffffU
+#define V_IDMA3_QID(x) ((x) << S_IDMA3_QID)
+#define G_IDMA3_QID(x) (((x) >> S_IDMA3_QID) & M_IDMA3_QID)
+
+#define S_IDMA2_QID 0
+#define M_IDMA2_QID 0xffffU
+#define V_IDMA2_QID(x) ((x) << S_IDMA2_QID)
+#define G_IDMA2_QID(x) (((x) >> S_IDMA2_QID) & M_IDMA2_QID)
+
+#define A_SGE_LB_MODE 0x1710
+
+#define S_LB_MODE 0
+#define M_LB_MODE 0x3U
+#define V_LB_MODE(x) ((x) << S_LB_MODE)
+#define G_LB_MODE(x) (((x) >> S_LB_MODE) & M_LB_MODE)
+
+#define A_SGE_IMSG_QUESCENT 0x1714
+
+#define S_IMSG_QUESCENT 0
+#define V_IMSG_QUESCENT(x) ((x) << S_IMSG_QUESCENT)
+#define F_IMSG_QUESCENT V_IMSG_QUESCENT(1U)
+
+#define A_SGE_LA_CTRL 0x1718
+
+#define S_LA_GLOBAL_EN 8
+#define V_LA_GLOBAL_EN(x) ((x) << S_LA_GLOBAL_EN)
+#define F_LA_GLOBAL_EN V_LA_GLOBAL_EN(1U)
+
+#define S_PTP_TIMESTAMP_SEL 7
+#define V_PTP_TIMESTAMP_SEL(x) ((x) << S_PTP_TIMESTAMP_SEL)
+#define F_PTP_TIMESTAMP_SEL V_PTP_TIMESTAMP_SEL(1U)
+
+#define S_CIM2SGE_ID_CHK_VLD 6
+#define V_CIM2SGE_ID_CHK_VLD(x) ((x) << S_CIM2SGE_ID_CHK_VLD)
+#define F_CIM2SGE_ID_CHK_VLD V_CIM2SGE_ID_CHK_VLD(1U)
+
+#define S_CPLSW_ID_CHK_VLD 5
+#define V_CPLSW_ID_CHK_VLD(x) ((x) << S_CPLSW_ID_CHK_VLD)
+#define F_CPLSW_ID_CHK_VLD V_CPLSW_ID_CHK_VLD(1U)
+
+#define S_FLM_ID_CHK_VLD 4
+#define V_FLM_ID_CHK_VLD(x) ((x) << S_FLM_ID_CHK_VLD)
+#define F_FLM_ID_CHK_VLD V_FLM_ID_CHK_VLD(1U)
+
+#define S_IQ_DBP_ID_CHK_VLD 3
+#define V_IQ_DBP_ID_CHK_VLD(x) ((x) << S_IQ_DBP_ID_CHK_VLD)
+#define F_IQ_DBP_ID_CHK_VLD V_IQ_DBP_ID_CHK_VLD(1U)
+
+#define S_UP_OBQ_ID_CHK_VLD 2
+#define V_UP_OBQ_ID_CHK_VLD(x) ((x) << S_UP_OBQ_ID_CHK_VLD)
+#define F_UP_OBQ_ID_CHK_VLD V_UP_OBQ_ID_CHK_VLD(1U)
+
+#define S_CIM_ID_CHK_VLD 1
+#define V_CIM_ID_CHK_VLD(x) ((x) << S_CIM_ID_CHK_VLD)
+#define F_CIM_ID_CHK_VLD V_CIM_ID_CHK_VLD(1U)
+
+#define S_DBP_ID_CHK_VLD 0
+#define V_DBP_ID_CHK_VLD(x) ((x) << S_DBP_ID_CHK_VLD)
+#define F_DBP_ID_CHK_VLD V_DBP_ID_CHK_VLD(1U)
+
+#define A_SGE_LA_CTRL_EQID_LOW 0x171c
+
+#define S_EQ_ID_CHK_LOW 0
+#define M_EQ_ID_CHK_LOW 0x1ffffU
+#define V_EQ_ID_CHK_LOW(x) ((x) << S_EQ_ID_CHK_LOW)
+#define G_EQ_ID_CHK_LOW(x) (((x) >> S_EQ_ID_CHK_LOW) & M_EQ_ID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_EQID_HIGH 0x1720
+
+#define S_EQ_ID_CHK_HIGH 0
+#define M_EQ_ID_CHK_HIGH 0x1ffffU
+#define V_EQ_ID_CHK_HIGH(x) ((x) << S_EQ_ID_CHK_HIGH)
+#define G_EQ_ID_CHK_HIGH(x) (((x) >> S_EQ_ID_CHK_HIGH) & M_EQ_ID_CHK_HIGH)
+
+#define A_SGE_LA_CTRL_IQID 0x1724
+
+#define S_IQ_ID_CHK_HIGH 16
+#define M_IQ_ID_CHK_HIGH 0xffffU
+#define V_IQ_ID_CHK_HIGH(x) ((x) << S_IQ_ID_CHK_HIGH)
+#define G_IQ_ID_CHK_HIGH(x) (((x) >> S_IQ_ID_CHK_HIGH) & M_IQ_ID_CHK_HIGH)
+
+#define S_IQ_ID_CHK_LOW 0
+#define M_IQ_ID_CHK_LOW 0xffffU
+#define V_IQ_ID_CHK_LOW(x) ((x) << S_IQ_ID_CHK_LOW)
+#define G_IQ_ID_CHK_LOW(x) (((x) >> S_IQ_ID_CHK_LOW) & M_IQ_ID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_TID_LOW 0x1728
+
+#define S_TID_CHK_LOW 0
+#define M_TID_CHK_LOW 0xffffffU
+#define V_TID_CHK_LOW(x) ((x) << S_TID_CHK_LOW)
+#define G_TID_CHK_LOW(x) (((x) >> S_TID_CHK_LOW) & M_TID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_TID_HIGH 0x172c
+
+#define S_TID_CHK_HIGH 0
+#define M_TID_CHK_HIGH 0xffffffU
+#define V_TID_CHK_HIGH(x) ((x) << S_TID_CHK_HIGH)
+#define G_TID_CHK_HIGH(x) (((x) >> S_TID_CHK_HIGH) & M_TID_CHK_HIGH)
+
+#define A_SGE_CFG_TP_ERR 0x173c
+
+#define S_TP_ERR_STATUS_CH3 30
+#define M_TP_ERR_STATUS_CH3 0x3U
+#define V_TP_ERR_STATUS_CH3(x) ((x) << S_TP_ERR_STATUS_CH3)
+#define G_TP_ERR_STATUS_CH3(x) (((x) >> S_TP_ERR_STATUS_CH3) & M_TP_ERR_STATUS_CH3)
+
+#define S_TP_ERR_STATUS_CH2 28
+#define M_TP_ERR_STATUS_CH2 0x3U
+#define V_TP_ERR_STATUS_CH2(x) ((x) << S_TP_ERR_STATUS_CH2)
+#define G_TP_ERR_STATUS_CH2(x) (((x) >> S_TP_ERR_STATUS_CH2) & M_TP_ERR_STATUS_CH2)
+
+#define S_TP_ERR_STATUS_CH1 26
+#define M_TP_ERR_STATUS_CH1 0x3U
+#define V_TP_ERR_STATUS_CH1(x) ((x) << S_TP_ERR_STATUS_CH1)
+#define G_TP_ERR_STATUS_CH1(x) (((x) >> S_TP_ERR_STATUS_CH1) & M_TP_ERR_STATUS_CH1)
+
+#define S_TP_ERR_STATUS_CH0 24
+#define M_TP_ERR_STATUS_CH0 0x3U
+#define V_TP_ERR_STATUS_CH0(x) ((x) << S_TP_ERR_STATUS_CH0)
+#define G_TP_ERR_STATUS_CH0(x) (((x) >> S_TP_ERR_STATUS_CH0) & M_TP_ERR_STATUS_CH0)
+
+#define S_CPL0_SIZE 16
+#define M_CPL0_SIZE 0xffU
+#define V_CPL0_SIZE(x) ((x) << S_CPL0_SIZE)
+#define G_CPL0_SIZE(x) (((x) >> S_CPL0_SIZE) & M_CPL0_SIZE)
+
+#define S_CPL1_SIZE 8
+#define M_CPL1_SIZE 0xffU
+#define V_CPL1_SIZE(x) ((x) << S_CPL1_SIZE)
+#define G_CPL1_SIZE(x) (((x) >> S_CPL1_SIZE) & M_CPL1_SIZE)
+
+#define S_SIZE_LATCH_CLR 3
+#define V_SIZE_LATCH_CLR(x) ((x) << S_SIZE_LATCH_CLR)
+#define F_SIZE_LATCH_CLR V_SIZE_LATCH_CLR(1U)
+
+#define S_EXT_LATCH_CLR 2
+#define V_EXT_LATCH_CLR(x) ((x) << S_EXT_LATCH_CLR)
+#define F_EXT_LATCH_CLR V_EXT_LATCH_CLR(1U)
+
+#define S_EXT_CHANGE_42875 1
+#define V_EXT_CHANGE_42875(x) ((x) << S_EXT_CHANGE_42875)
+#define F_EXT_CHANGE_42875 V_EXT_CHANGE_42875(1U)
+
+#define S_SIZE_CHANGE_42913 0
+#define V_SIZE_CHANGE_42913(x) ((x) << S_SIZE_CHANGE_42913)
+#define F_SIZE_CHANGE_42913 V_SIZE_CHANGE_42913(1U)
+
+#define A_SGE_CHNL0_CTX_ERROR_COUNT_PER_TID 0x1740
+#define A_SGE_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1744
+#define A_SGE_CHNL2_CTX_ERROR_COUNT_PER_TID 0x1748
+#define A_SGE_CHNL3_CTX_ERROR_COUNT_PER_TID 0x174c
+#define A_SGE_CTX_ACC_CH0 0x1750
+
+#define S_RDMA_INV_HANDLING 24
+#define M_RDMA_INV_HANDLING 0x3U
+#define V_RDMA_INV_HANDLING(x) ((x) << S_RDMA_INV_HANDLING)
+#define G_RDMA_INV_HANDLING(x) (((x) >> S_RDMA_INV_HANDLING) & M_RDMA_INV_HANDLING)
+
+#define S_T7_TERMINATE_STATUS_EN 23
+#define V_T7_TERMINATE_STATUS_EN(x) ((x) << S_T7_TERMINATE_STATUS_EN)
+#define F_T7_TERMINATE_STATUS_EN V_T7_TERMINATE_STATUS_EN(1U)
+
+#define S_T7_DISABLE 22
+#define V_T7_DISABLE(x) ((x) << S_T7_DISABLE)
+#define F_T7_DISABLE V_T7_DISABLE(1U)
+
+#define A_SGE_CTX_ACC_CH1 0x1754
+#define A_SGE_CTX_ACC_CH2 0x1758
+#define A_SGE_CTX_ACC_CH3 0x175c
+#define A_SGE_CTX_BASE 0x1760
#define A_SGE_LA_RDPTR_0 0x1800
#define A_SGE_LA_RDDATA_0 0x1804
#define A_SGE_LA_WRPTR_0 0x1808
@@ -4296,6 +5074,11 @@
#define A_PCIE_INT_CAUSE 0x3004
#define A_PCIE_PERR_ENABLE 0x3008
+
+#define S_TGTTAGQCLIENT1PERR 29
+#define V_TGTTAGQCLIENT1PERR(x) ((x) << S_TGTTAGQCLIENT1PERR)
+#define F_TGTTAGQCLIENT1PERR V_TGTTAGQCLIENT1PERR(1U)
+
#define A_PCIE_PERR_INJECT 0x300c
#define S_IDE 0
@@ -4582,10 +5365,6 @@
#define V_LINKREQRSTPCIECRSTMODE(x) ((x) << S_LINKREQRSTPCIECRSTMODE)
#define F_LINKREQRSTPCIECRSTMODE V_LINKREQRSTPCIECRSTMODE(1U)
-#define S_T6_PIOSTOPEN 31
-#define V_T6_PIOSTOPEN(x) ((x) << S_T6_PIOSTOPEN)
-#define F_T6_PIOSTOPEN V_T6_PIOSTOPEN(1U)
-
#define A_PCIE_DMA_CTRL 0x3018
#define S_LITTLEENDIAN 7
@@ -4618,6 +5397,14 @@
#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
#define G_T6_TOTMAXTAG(x) (((x) >> S_T6_TOTMAXTAG) & M_T6_TOTMAXTAG)
+#define S_REG_VDM_ONLY 17
+#define V_REG_VDM_ONLY(x) ((x) << S_REG_VDM_ONLY)
+#define F_REG_VDM_ONLY V_REG_VDM_ONLY(1U)
+
+#define S_MULT_REQID_SUP 16
+#define V_MULT_REQID_SUP(x) ((x) << S_MULT_REQID_SUP)
+#define F_MULT_REQID_SUP V_MULT_REQID_SUP(1U)
+
#define A_PCIE_DMA_CFG 0x301c
#define S_MAXPYLDSIZE 28
@@ -4668,6 +5455,10 @@
#define V_DMADCASTFIRSTONLY(x) ((x) << S_DMADCASTFIRSTONLY)
#define F_DMADCASTFIRSTONLY V_DMADCASTFIRSTONLY(1U)
+#define S_ARMDCASTFIRSTONLY 7
+#define V_ARMDCASTFIRSTONLY(x) ((x) << S_ARMDCASTFIRSTONLY)
+#define F_ARMDCASTFIRSTONLY V_ARMDCASTFIRSTONLY(1U)
+
#define A_PCIE_DMA_STAT 0x3020
#define S_STATEREQ 28
@@ -4748,7 +5539,157 @@
#define G_PERSTTIMER(x) (((x) >> S_PERSTTIMER) & M_PERSTTIMER)
#define A_PCIE_CFG7 0x302c
+#define A_PCIE_INT_ENABLE_EXT 0x3030
+
+#define S_TCAMRSPERR 31
+#define V_TCAMRSPERR(x) ((x) << S_TCAMRSPERR)
+#define F_TCAMRSPERR V_TCAMRSPERR(1U)
+
+#define S_IPFORMQPERR 30
+#define V_IPFORMQPERR(x) ((x) << S_IPFORMQPERR)
+#define F_IPFORMQPERR V_IPFORMQPERR(1U)
+
+#define S_IPFORMQCERR 29
+#define V_IPFORMQCERR(x) ((x) << S_IPFORMQCERR)
+#define F_IPFORMQCERR V_IPFORMQCERR(1U)
+
+#define S_TRGT1GRPCERR 28
+#define V_TRGT1GRPCERR(x) ((x) << S_TRGT1GRPCERR)
+#define F_TRGT1GRPCERR V_TRGT1GRPCERR(1U)
+
+#define S_IPSOTCERR 27
+#define V_IPSOTCERR(x) ((x) << S_IPSOTCERR)
+#define F_IPSOTCERR V_IPSOTCERR(1U)
+
+#define S_IPRETRYCERR 26
+#define V_IPRETRYCERR(x) ((x) << S_IPRETRYCERR)
+#define F_IPRETRYCERR V_IPRETRYCERR(1U)
+
+#define S_IPRXDATAGRPCERR 25
+#define V_IPRXDATAGRPCERR(x) ((x) << S_IPRXDATAGRPCERR)
+#define F_IPRXDATAGRPCERR V_IPRXDATAGRPCERR(1U)
+
+#define S_IPRXHDRGRPCERR 24
+#define V_IPRXHDRGRPCERR(x) ((x) << S_IPRXHDRGRPCERR)
+#define F_IPRXHDRGRPCERR V_IPRXHDRGRPCERR(1U)
+
+#define S_A0ARBRSPORDFIFOPERR 19
+#define V_A0ARBRSPORDFIFOPERR(x) ((x) << S_A0ARBRSPORDFIFOPERR)
+#define F_A0ARBRSPORDFIFOPERR V_A0ARBRSPORDFIFOPERR(1U)
+
+#define S_HRSPCERR 18
+#define V_HRSPCERR(x) ((x) << S_HRSPCERR)
+#define F_HRSPCERR V_HRSPCERR(1U)
+
+#define S_HREQRDCERR 17
+#define V_HREQRDCERR(x) ((x) << S_HREQRDCERR)
+#define F_HREQRDCERR V_HREQRDCERR(1U)
+
+#define S_HREQWRCERR 16
+#define V_HREQWRCERR(x) ((x) << S_HREQWRCERR)
+#define F_HREQWRCERR V_HREQWRCERR(1U)
+
+#define S_DRSPCERR 15
+#define V_DRSPCERR(x) ((x) << S_DRSPCERR)
+#define F_DRSPCERR V_DRSPCERR(1U)
+
+#define S_DREQRDCERR 14
+#define V_DREQRDCERR(x) ((x) << S_DREQRDCERR)
+#define F_DREQRDCERR V_DREQRDCERR(1U)
+
+#define S_DREQWRCERR 13
+#define V_DREQWRCERR(x) ((x) << S_DREQWRCERR)
+#define F_DREQWRCERR V_DREQWRCERR(1U)
+
+#define S_CRSPCERR 12
+#define V_CRSPCERR(x) ((x) << S_CRSPCERR)
+#define F_CRSPCERR V_CRSPCERR(1U)
+
+#define S_ARSPPERR 11
+#define V_ARSPPERR(x) ((x) << S_ARSPPERR)
+#define F_ARSPPERR V_ARSPPERR(1U)
+
+#define S_AREQRDPERR 10
+#define V_AREQRDPERR(x) ((x) << S_AREQRDPERR)
+#define F_AREQRDPERR V_AREQRDPERR(1U)
+
+#define S_AREQWRPERR 9
+#define V_AREQWRPERR(x) ((x) << S_AREQWRPERR)
+#define F_AREQWRPERR V_AREQWRPERR(1U)
+
+#define S_PIOREQGRPCERR 8
+#define V_PIOREQGRPCERR(x) ((x) << S_PIOREQGRPCERR)
+#define F_PIOREQGRPCERR V_PIOREQGRPCERR(1U)
+
+#define S_ARSPCERR 7
+#define V_ARSPCERR(x) ((x) << S_ARSPCERR)
+#define F_ARSPCERR V_ARSPCERR(1U)
+
+#define S_AREQRDCERR 6
+#define V_AREQRDCERR(x) ((x) << S_AREQRDCERR)
+#define F_AREQRDCERR V_AREQRDCERR(1U)
+
+#define S_AREQWRCERR 5
+#define V_AREQWRCERR(x) ((x) << S_AREQWRCERR)
+#define F_AREQWRCERR V_AREQWRCERR(1U)
+
+#define S_MARSPPERR 4
+#define V_MARSPPERR(x) ((x) << S_MARSPPERR)
+#define F_MARSPPERR V_MARSPPERR(1U)
+
+#define S_INICMAWDATAORDPERR 3
+#define V_INICMAWDATAORDPERR(x) ((x) << S_INICMAWDATAORDPERR)
+#define F_INICMAWDATAORDPERR V_INICMAWDATAORDPERR(1U)
+
+#define S_EMUPERR 2
+#define V_EMUPERR(x) ((x) << S_EMUPERR)
+#define F_EMUPERR V_EMUPERR(1U)
+
+#define S_ERRSPPERR 1
+#define V_ERRSPPERR(x) ((x) << S_ERRSPPERR)
+#define F_ERRSPPERR V_ERRSPPERR(1U)
+
+#define S_MSTGRPCERR 0
+#define V_MSTGRPCERR(x) ((x) << S_MSTGRPCERR)
+#define F_MSTGRPCERR V_MSTGRPCERR(1U)
+
+#define A_PCIE_INT_ENABLE_X8 0x3034
+
+#define S_X8TGTGRPPERR 23
+#define V_X8TGTGRPPERR(x) ((x) << S_X8TGTGRPPERR)
+#define F_X8TGTGRPPERR V_X8TGTGRPPERR(1U)
+
+#define S_X8IPSOTPERR 22
+#define V_X8IPSOTPERR(x) ((x) << S_X8IPSOTPERR)
+#define F_X8IPSOTPERR V_X8IPSOTPERR(1U)
+
+#define S_X8IPRETRYPERR 21
+#define V_X8IPRETRYPERR(x) ((x) << S_X8IPRETRYPERR)
+#define F_X8IPRETRYPERR V_X8IPRETRYPERR(1U)
+
+#define S_X8IPRXDATAGRPPERR 20
+#define V_X8IPRXDATAGRPPERR(x) ((x) << S_X8IPRXDATAGRPPERR)
+#define F_X8IPRXDATAGRPPERR V_X8IPRXDATAGRPPERR(1U)
+
+#define S_X8IPRXHDRGRPPERR 19
+#define V_X8IPRXHDRGRPPERR(x) ((x) << S_X8IPRXHDRGRPPERR)
+#define F_X8IPRXHDRGRPPERR V_X8IPRXHDRGRPPERR(1U)
+
+#define S_X8IPCORECERR 3
+#define V_X8IPCORECERR(x) ((x) << S_X8IPCORECERR)
+#define F_X8IPCORECERR V_X8IPCORECERR(1U)
+
+#define S_X8MSTGRPPERR 2
+#define V_X8MSTGRPPERR(x) ((x) << S_X8MSTGRPPERR)
+#define F_X8MSTGRPPERR V_X8MSTGRPPERR(1U)
+
+#define S_X8MSTGRPCERR 1
+#define V_X8MSTGRPCERR(x) ((x) << S_X8MSTGRPCERR)
+#define F_X8MSTGRPCERR V_X8MSTGRPCERR(1U)
+
+#define A_PCIE_INT_CAUSE_EXT 0x3038
#define A_PCIE_CMD_CTRL 0x303c
+#define A_PCIE_INT_CAUSE_X8 0x303c
#define A_PCIE_CMD_CFG 0x3040
#define S_MAXRSPCNT 16
@@ -4761,6 +5702,40 @@
#define V_MAXREQCNT(x) ((x) << S_MAXREQCNT)
#define G_MAXREQCNT(x) (((x) >> S_MAXREQCNT) & M_MAXREQCNT)
+#define A_PCIE_PERR_ENABLE_EXT 0x3040
+
+#define S_T7_ARSPPERR 18
+#define V_T7_ARSPPERR(x) ((x) << S_T7_ARSPPERR)
+#define F_T7_ARSPPERR V_T7_ARSPPERR(1U)
+
+#define S_T7_AREQRDPERR 17
+#define V_T7_AREQRDPERR(x) ((x) << S_T7_AREQRDPERR)
+#define F_T7_AREQRDPERR V_T7_AREQRDPERR(1U)
+
+#define S_T7_AREQWRPERR 16
+#define V_T7_AREQWRPERR(x) ((x) << S_T7_AREQWRPERR)
+#define F_T7_AREQWRPERR V_T7_AREQWRPERR(1U)
+
+#define S_T7_A0ARBRSPORDFIFOPERR 15
+#define V_T7_A0ARBRSPORDFIFOPERR(x) ((x) << S_T7_A0ARBRSPORDFIFOPERR)
+#define F_T7_A0ARBRSPORDFIFOPERR V_T7_A0ARBRSPORDFIFOPERR(1U)
+
+#define S_T7_MARSPPERR 14
+#define V_T7_MARSPPERR(x) ((x) << S_T7_MARSPPERR)
+#define F_T7_MARSPPERR V_T7_MARSPPERR(1U)
+
+#define S_T7_INICMAWDATAORDPERR 13
+#define V_T7_INICMAWDATAORDPERR(x) ((x) << S_T7_INICMAWDATAORDPERR)
+#define F_T7_INICMAWDATAORDPERR V_T7_INICMAWDATAORDPERR(1U)
+
+#define S_T7_EMUPERR 12
+#define V_T7_EMUPERR(x) ((x) << S_T7_EMUPERR)
+#define F_T7_EMUPERR V_T7_EMUPERR(1U)
+
+#define S_T7_ERRSPPERR 11
+#define V_T7_ERRSPPERR(x) ((x) << S_T7_ERRSPPERR)
+#define F_T7_ERRSPPERR V_T7_ERRSPPERR(1U)
+
#define A_PCIE_CMD_STAT 0x3044
#define S_RSPCNT 16
@@ -4773,6 +5748,32 @@
#define V_REQCNT(x) ((x) << S_REQCNT)
#define G_REQCNT(x) (((x) >> S_REQCNT) & M_REQCNT)
+#define A_PCIE_PERR_ENABLE_X8 0x3044
+
+#define S_T7_X8TGTGRPPERR 28
+#define V_T7_X8TGTGRPPERR(x) ((x) << S_T7_X8TGTGRPPERR)
+#define F_T7_X8TGTGRPPERR V_T7_X8TGTGRPPERR(1U)
+
+#define S_T7_X8IPSOTPERR 27
+#define V_T7_X8IPSOTPERR(x) ((x) << S_T7_X8IPSOTPERR)
+#define F_T7_X8IPSOTPERR V_T7_X8IPSOTPERR(1U)
+
+#define S_T7_X8IPRETRYPERR 26
+#define V_T7_X8IPRETRYPERR(x) ((x) << S_T7_X8IPRETRYPERR)
+#define F_T7_X8IPRETRYPERR V_T7_X8IPRETRYPERR(1U)
+
+#define S_T7_X8IPRXDATAGRPPERR 25
+#define V_T7_X8IPRXDATAGRPPERR(x) ((x) << S_T7_X8IPRXDATAGRPPERR)
+#define F_T7_X8IPRXDATAGRPPERR V_T7_X8IPRXDATAGRPPERR(1U)
+
+#define S_T7_X8IPRXHDRGRPPERR 24
+#define V_T7_X8IPRXHDRGRPPERR(x) ((x) << S_T7_X8IPRXHDRGRPPERR)
+#define F_T7_X8IPRXHDRGRPPERR V_T7_X8IPRXHDRGRPPERR(1U)
+
+#define S_T7_X8MSTGRPPERR 0
+#define V_T7_X8MSTGRPPERR(x) ((x) << S_T7_X8MSTGRPPERR)
+#define F_T7_X8MSTGRPPERR V_T7_X8MSTGRPPERR(1U)
+
#define A_PCIE_HMA_CTRL 0x3050
#define S_IPLTSSM 12
@@ -4889,9 +5890,9 @@
#define V_T6_ENABLE(x) ((x) << S_T6_ENABLE)
#define F_T6_ENABLE V_T6_ENABLE(1U)
-#define S_T6_AI 30
-#define V_T6_AI(x) ((x) << S_T6_AI)
-#define F_T6_AI V_T6_AI(1U)
+#define S_T6_1_AI 30
+#define V_T6_1_AI(x) ((x) << S_T6_1_AI)
+#define F_T6_1_AI V_T6_1_AI(1U)
#define S_T6_CS2 29
#define V_T6_CS2(x) ((x) << S_T6_CS2)
@@ -4936,6 +5937,7 @@
#define V_MEMOFST(x) ((x) << S_MEMOFST)
#define G_MEMOFST(x) (((x) >> S_MEMOFST) & M_MEMOFST)
+#define A_T7_PCIE_MAILBOX_BASE_WIN 0x30a4
#define A_PCIE_MAILBOX_BASE_WIN 0x30a8
#define S_MBOXPCIEOFST 6
@@ -4953,7 +5955,21 @@
#define V_MBOXWIN(x) ((x) << S_MBOXWIN)
#define G_MBOXWIN(x) (((x) >> S_MBOXWIN) & M_MBOXWIN)
+#define A_PCIE_MAILBOX_OFFSET0 0x30a8
+
+#define S_MEMOFST0 3
+#define M_MEMOFST0 0x1fffffffU
+#define V_MEMOFST0(x) ((x) << S_MEMOFST0)
+#define G_MEMOFST0(x) (((x) >> S_MEMOFST0) & M_MEMOFST0)
+
#define A_PCIE_MAILBOX_OFFSET 0x30ac
+#define A_PCIE_MAILBOX_OFFSET1 0x30ac
+
+#define S_MEMOFST1 0
+#define M_MEMOFST1 0xfU
+#define V_MEMOFST1(x) ((x) << S_MEMOFST1)
+#define G_MEMOFST1(x) (((x) >> S_MEMOFST1) & M_MEMOFST1)
+
#define A_PCIE_MA_CTRL 0x30b0
#define S_MA_TAGFREE 29
@@ -5098,6 +6114,11 @@
#define V_STATIC_SPARE3(x) ((x) << S_STATIC_SPARE3)
#define G_STATIC_SPARE3(x) (((x) >> S_STATIC_SPARE3) & M_STATIC_SPARE3)
+#define S_T7_STATIC_SPARE3 0
+#define M_T7_STATIC_SPARE3 0x7fffU
+#define V_T7_STATIC_SPARE3(x) ((x) << S_T7_STATIC_SPARE3)
+#define G_T7_STATIC_SPARE3(x) (((x) >> S_T7_STATIC_SPARE3) & M_T7_STATIC_SPARE3)
+
#define A_PCIE_DBG_INDIR_REQ 0x30ec
#define S_DBGENABLE 31
@@ -5173,6 +6194,17 @@
#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
#define A_PCIE_PF_INT_CFG 0x3140
+
+#define S_T7_VECNUM 12
+#define M_T7_VECNUM 0x7ffU
+#define V_T7_VECNUM(x) ((x) << S_T7_VECNUM)
+#define G_T7_VECNUM(x) (((x) >> S_T7_VECNUM) & M_T7_VECNUM)
+
+#define S_T7_VECBASE 0
+#define M_T7_VECBASE 0xfffU
+#define V_T7_VECBASE(x) ((x) << S_T7_VECBASE)
+#define G_T7_VECBASE(x) (((x) >> S_T7_VECBASE) & M_T7_VECBASE)
+
#define A_PCIE_PF_INT_CFG2 0x3144
#define A_PCIE_VF_INT_CFG 0x3180
#define A_PCIE_VF_INT_CFG2 0x3184
@@ -5198,6 +6230,20 @@
#define A_PCIE_VF_MSIX_EN_1 0x35c4
#define A_PCIE_VF_MSIX_EN_2 0x35c8
#define A_PCIE_VF_MSIX_EN_3 0x35cc
+#define A_PCIE_FID_PASID 0x35e0
+#define A_PCIE_FID_VFID_CTL 0x35e4
+
+#define S_T7_WRITE 0
+#define V_T7_WRITE(x) ((x) << S_T7_WRITE)
+#define F_T7_WRITE V_T7_WRITE(1U)
+
+#define A_T7_PCIE_FID_VFID_SEL 0x35e8
+
+#define S_T7_ADDR 2
+#define M_T7_ADDR 0x1fffU
+#define V_T7_ADDR(x) ((x) << S_T7_ADDR)
+#define G_T7_ADDR(x) (((x) >> S_T7_ADDR) & M_T7_ADDR)
+
#define A_PCIE_FID_VFID_SEL 0x35ec
#define S_FID_VFID_SEL_SELECT 0
@@ -5205,6 +6251,17 @@
#define V_FID_VFID_SEL_SELECT(x) ((x) << S_FID_VFID_SEL_SELECT)
#define G_FID_VFID_SEL_SELECT(x) (((x) >> S_FID_VFID_SEL_SELECT) & M_FID_VFID_SEL_SELECT)
+#define A_T7_PCIE_FID_VFID 0x35ec
+
+#define S_FID_VFID_NVMEGROUPEN 29
+#define V_FID_VFID_NVMEGROUPEN(x) ((x) << S_FID_VFID_NVMEGROUPEN)
+#define F_FID_VFID_NVMEGROUPEN V_FID_VFID_NVMEGROUPEN(1U)
+
+#define S_FID_VFID_GROUPSEL 25
+#define M_FID_VFID_GROUPSEL 0xfU
+#define V_FID_VFID_GROUPSEL(x) ((x) << S_FID_VFID_GROUPSEL)
+#define G_FID_VFID_GROUPSEL(x) (((x) >> S_FID_VFID_GROUPSEL) & M_FID_VFID_GROUPSEL)
+
#define A_PCIE_FID_VFID 0x3600
#define S_FID_VFID_SELECT 30
@@ -5264,6 +6321,227 @@
#define V_T6_FID_VFID_RVF(x) ((x) << S_T6_FID_VFID_RVF)
#define G_T6_FID_VFID_RVF(x) (((x) >> S_T6_FID_VFID_RVF) & M_T6_FID_VFID_RVF)
+#define A_PCIE_JBOF_NVME_HIGH_DW_START_ADDR 0x3600
+#define A_PCIE_JBOF_NVME_LOW_DW_START_ADDR 0x3604
+#define A_PCIE_JBOF_NVME_LENGTH 0x3608
+
+#define S_NVMEDISABLE 31
+#define V_NVMEDISABLE(x) ((x) << S_NVMEDISABLE)
+#define F_NVMEDISABLE V_NVMEDISABLE(1U)
+
+#define S_NVMELENGTH 0
+#define M_NVMELENGTH 0x3fffffffU
+#define V_NVMELENGTH(x) ((x) << S_NVMELENGTH)
+#define G_NVMELENGTH(x) (((x) >> S_NVMELENGTH) & M_NVMELENGTH)
+
+#define A_PCIE_JBOF_NVME_GROUP 0x360c
+
+#define S_NVMEGROUPSEL 0
+#define M_NVMEGROUPSEL 0xfU
+#define V_NVMEGROUPSEL(x) ((x) << S_NVMEGROUPSEL)
+#define G_NVMEGROUPSEL(x) (((x) >> S_NVMEGROUPSEL) & M_NVMEGROUPSEL)
+
+#define A_T7_PCIE_MEM_ACCESS_BASE_WIN 0x3700
+#define A_PCIE_MEM_ACCESS_BASE_WIN1 0x3704
+
+#define S_PCIEOFST1 0
+#define M_PCIEOFST1 0xffU
+#define V_PCIEOFST1(x) ((x) << S_PCIEOFST1)
+#define G_PCIEOFST1(x) (((x) >> S_PCIEOFST1) & M_PCIEOFST1)
+
+#define A_PCIE_MEM_ACCESS_OFFSET0 0x3708
+#define A_PCIE_MEM_ACCESS_OFFSET1 0x370c
+#define A_PCIE_PTM_EP_EXT_STROBE 0x3804
+
+#define S_PTM_AUTO_UPDATE 1
+#define V_PTM_AUTO_UPDATE(x) ((x) << S_PTM_AUTO_UPDATE)
+#define F_PTM_AUTO_UPDATE V_PTM_AUTO_UPDATE(1U)
+
+#define S_PTM_EXT_STROBE 0
+#define V_PTM_EXT_STROBE(x) ((x) << S_PTM_EXT_STROBE)
+#define F_PTM_EXT_STROBE V_PTM_EXT_STROBE(1U)
+
+#define A_PCIE_PTM_EP_EXT_TIME0 0x3808
+#define A_PCIE_PTM_EP_EXT_TIME1 0x380c
+#define A_PCIE_PTM_MAN_UPD_PULSE 0x3810
+
+#define S_PTM_MAN_UPD_PULSE 0
+#define V_PTM_MAN_UPD_PULSE(x) ((x) << S_PTM_MAN_UPD_PULSE)
+#define F_PTM_MAN_UPD_PULSE V_PTM_MAN_UPD_PULSE(1U)
+
+#define A_PCIE_SWAP_DATA_B2L_X16 0x3814
+#define A_PCIE_PCIE_RC_RST 0x3818
+
+#define S_PERST 0
+#define V_PERST(x) ((x) << S_PERST)
+#define F_PERST V_PERST(1U)
+
+#define A_PCIE_PCIE_LN_CLKSEL 0x3880
+
+#define S_DS8_SEL 30
+#define M_DS8_SEL 0x3U
+#define V_DS8_SEL(x) ((x) << S_DS8_SEL)
+#define G_DS8_SEL(x) (((x) >> S_DS8_SEL) & M_DS8_SEL)
+
+#define S_DS7_SEL 28
+#define M_DS7_SEL 0x3U
+#define V_DS7_SEL(x) ((x) << S_DS7_SEL)
+#define G_DS7_SEL(x) (((x) >> S_DS7_SEL) & M_DS7_SEL)
+
+#define S_DS6_SEL 26
+#define M_DS6_SEL 0x3U
+#define V_DS6_SEL(x) ((x) << S_DS6_SEL)
+#define G_DS6_SEL(x) (((x) >> S_DS6_SEL) & M_DS6_SEL)
+
+#define S_DS5_SEL 24
+#define M_DS5_SEL 0x3U
+#define V_DS5_SEL(x) ((x) << S_DS5_SEL)
+#define G_DS5_SEL(x) (((x) >> S_DS5_SEL) & M_DS5_SEL)
+
+#define S_DS4_SEL 22
+#define M_DS4_SEL 0x3U
+#define V_DS4_SEL(x) ((x) << S_DS4_SEL)
+#define G_DS4_SEL(x) (((x) >> S_DS4_SEL) & M_DS4_SEL)
+
+#define S_DS3_SEL 20
+#define M_DS3_SEL 0x3U
+#define V_DS3_SEL(x) ((x) << S_DS3_SEL)
+#define G_DS3_SEL(x) (((x) >> S_DS3_SEL) & M_DS3_SEL)
+
+#define S_DS2_SEL 18
+#define M_DS2_SEL 0x3U
+#define V_DS2_SEL(x) ((x) << S_DS2_SEL)
+#define G_DS2_SEL(x) (((x) >> S_DS2_SEL) & M_DS2_SEL)
+
+#define S_DS1_SEL 16
+#define M_DS1_SEL 0x3U
+#define V_DS1_SEL(x) ((x) << S_DS1_SEL)
+#define G_DS1_SEL(x) (((x) >> S_DS1_SEL) & M_DS1_SEL)
+
+#define S_LN14_SEL 14
+#define M_LN14_SEL 0x3U
+#define V_LN14_SEL(x) ((x) << S_LN14_SEL)
+#define G_LN14_SEL(x) (((x) >> S_LN14_SEL) & M_LN14_SEL)
+
+#define S_LN12_SEL 12
+#define M_LN12_SEL 0x3U
+#define V_LN12_SEL(x) ((x) << S_LN12_SEL)
+#define G_LN12_SEL(x) (((x) >> S_LN12_SEL) & M_LN12_SEL)
+
+#define S_LN10_SEL 10
+#define M_LN10_SEL 0x3U
+#define V_LN10_SEL(x) ((x) << S_LN10_SEL)
+#define G_LN10_SEL(x) (((x) >> S_LN10_SEL) & M_LN10_SEL)
+
+#define S_LN8_SEL 8
+#define M_LN8_SEL 0x3U
+#define V_LN8_SEL(x) ((x) << S_LN8_SEL)
+#define G_LN8_SEL(x) (((x) >> S_LN8_SEL) & M_LN8_SEL)
+
+#define S_LN6_SEL 6
+#define M_LN6_SEL 0x3U
+#define V_LN6_SEL(x) ((x) << S_LN6_SEL)
+#define G_LN6_SEL(x) (((x) >> S_LN6_SEL) & M_LN6_SEL)
+
+#define S_LN4_SEL 4
+#define M_LN4_SEL 0x3U
+#define V_LN4_SEL(x) ((x) << S_LN4_SEL)
+#define G_LN4_SEL(x) (((x) >> S_LN4_SEL) & M_LN4_SEL)
+
+#define S_LN2_SEL 2
+#define M_LN2_SEL 0x3U
+#define V_LN2_SEL(x) ((x) << S_LN2_SEL)
+#define G_LN2_SEL(x) (((x) >> S_LN2_SEL) & M_LN2_SEL)
+
+#define S_LN0_SEL 0
+#define M_LN0_SEL 0x3U
+#define V_LN0_SEL(x) ((x) << S_LN0_SEL)
+#define G_LN0_SEL(x) (((x) >> S_LN0_SEL) & M_LN0_SEL)
+
+#define A_PCIE_PCIE_MSIX_EN 0x3884
+
+#define S_MSIX_ENABLE 0
+#define M_MSIX_ENABLE 0xffU
+#define V_MSIX_ENABLE(x) ((x) << S_MSIX_ENABLE)
+#define G_MSIX_ENABLE(x) (((x) >> S_MSIX_ENABLE) & M_MSIX_ENABLE)
+
+#define A_PCIE_LFSR_WRCTRL 0x3888
+
+#define S_WR_LFSR_CMP_DATA 16
+#define M_WR_LFSR_CMP_DATA 0xffffU
+#define V_WR_LFSR_CMP_DATA(x) ((x) << S_WR_LFSR_CMP_DATA)
+#define G_WR_LFSR_CMP_DATA(x) (((x) >> S_WR_LFSR_CMP_DATA) & M_WR_LFSR_CMP_DATA)
+
+#define S_WR_LFSR_RSVD 2
+#define M_WR_LFSR_RSVD 0x3fffU
+#define V_WR_LFSR_RSVD(x) ((x) << S_WR_LFSR_RSVD)
+#define G_WR_LFSR_RSVD(x) (((x) >> S_WR_LFSR_RSVD) & M_WR_LFSR_RSVD)
+
+#define S_WR_LFSR_EN 1
+#define V_WR_LFSR_EN(x) ((x) << S_WR_LFSR_EN)
+#define F_WR_LFSR_EN V_WR_LFSR_EN(1U)
+
+#define S_WR_LFSR_START 0
+#define V_WR_LFSR_START(x) ((x) << S_WR_LFSR_START)
+#define F_WR_LFSR_START V_WR_LFSR_START(1U)
+
+#define A_PCIE_LFSR_RDCTRL 0x388c
+
+#define S_CMD_LFSR_CMP_DATA 24
+#define M_CMD_LFSR_CMP_DATA 0xffU
+#define V_CMD_LFSR_CMP_DATA(x) ((x) << S_CMD_LFSR_CMP_DATA)
+#define G_CMD_LFSR_CMP_DATA(x) (((x) >> S_CMD_LFSR_CMP_DATA) & M_CMD_LFSR_CMP_DATA)
+
+#define S_RD_LFSR_CMD_DATA 16
+#define M_RD_LFSR_CMD_DATA 0xffU
+#define V_RD_LFSR_CMD_DATA(x) ((x) << S_RD_LFSR_CMD_DATA)
+#define G_RD_LFSR_CMD_DATA(x) (((x) >> S_RD_LFSR_CMD_DATA) & M_RD_LFSR_CMD_DATA)
+
+#define S_RD_LFSR_RSVD 10
+#define M_RD_LFSR_RSVD 0x3fU
+#define V_RD_LFSR_RSVD(x) ((x) << S_RD_LFSR_RSVD)
+#define G_RD_LFSR_RSVD(x) (((x) >> S_RD_LFSR_RSVD) & M_RD_LFSR_RSVD)
+
+#define S_RD3_LFSR_EN 9
+#define V_RD3_LFSR_EN(x) ((x) << S_RD3_LFSR_EN)
+#define F_RD3_LFSR_EN V_RD3_LFSR_EN(1U)
+
+#define S_RD3_LFSR_START 8
+#define V_RD3_LFSR_START(x) ((x) << S_RD3_LFSR_START)
+#define F_RD3_LFSR_START V_RD3_LFSR_START(1U)
+
+#define S_RD2_LFSR_EN 7
+#define V_RD2_LFSR_EN(x) ((x) << S_RD2_LFSR_EN)
+#define F_RD2_LFSR_EN V_RD2_LFSR_EN(1U)
+
+#define S_RD2_LFSR_START 6
+#define V_RD2_LFSR_START(x) ((x) << S_RD2_LFSR_START)
+#define F_RD2_LFSR_START V_RD2_LFSR_START(1U)
+
+#define S_RD1_LFSR_EN 5
+#define V_RD1_LFSR_EN(x) ((x) << S_RD1_LFSR_EN)
+#define F_RD1_LFSR_EN V_RD1_LFSR_EN(1U)
+
+#define S_RD1_LFSR_START 4
+#define V_RD1_LFSR_START(x) ((x) << S_RD1_LFSR_START)
+#define F_RD1_LFSR_START V_RD1_LFSR_START(1U)
+
+#define S_RD0_LFSR_EN 3
+#define V_RD0_LFSR_EN(x) ((x) << S_RD0_LFSR_EN)
+#define F_RD0_LFSR_EN V_RD0_LFSR_EN(1U)
+
+#define S_RD0_LFSR_START 2
+#define V_RD0_LFSR_START(x) ((x) << S_RD0_LFSR_START)
+#define F_RD0_LFSR_START V_RD0_LFSR_START(1U)
+
+#define S_CMD_LFSR_EN 1
+#define V_CMD_LFSR_EN(x) ((x) << S_CMD_LFSR_EN)
+#define F_CMD_LFSR_EN V_CMD_LFSR_EN(1U)
+
+#define S_CMD_LFSR_START 0
+#define V_CMD_LFSR_START(x) ((x) << S_CMD_LFSR_START)
+#define F_CMD_LFSR_START V_CMD_LFSR_START(1U)
+
#define A_PCIE_FID 0x3900
#define S_PAD 11
@@ -5280,6 +6558,309 @@
#define V_FUNC(x) ((x) << S_FUNC)
#define G_FUNC(x) (((x) >> S_FUNC) & M_FUNC)
+#define A_PCIE_EMU_ADDR 0x3900
+
+#define S_EMU_ADDR 0
+#define M_EMU_ADDR 0x1ffU
+#define V_EMU_ADDR(x) ((x) << S_EMU_ADDR)
+#define G_EMU_ADDR(x) (((x) >> S_EMU_ADDR) & M_EMU_ADDR)
+
+#define A_PCIE_EMU_CFG 0x3904
+
+#define S_EMUENABLE 16
+#define V_EMUENABLE(x) ((x) << S_EMUENABLE)
+#define F_EMUENABLE V_EMUENABLE(1U)
+
+#define S_EMUTYPE 14
+#define M_EMUTYPE 0x3U
+#define V_EMUTYPE(x) ((x) << S_EMUTYPE)
+#define G_EMUTYPE(x) (((x) >> S_EMUTYPE) & M_EMUTYPE)
+
+#define S_BAR0TARGET 12
+#define M_BAR0TARGET 0x3U
+#define V_BAR0TARGET(x) ((x) << S_BAR0TARGET)
+#define G_BAR0TARGET(x) (((x) >> S_BAR0TARGET) & M_BAR0TARGET)
+
+#define S_BAR2TARGET 10
+#define M_BAR2TARGET 0x3U
+#define V_BAR2TARGET(x) ((x) << S_BAR2TARGET)
+#define G_BAR2TARGET(x) (((x) >> S_BAR2TARGET) & M_BAR2TARGET)
+
+#define S_BAR4TARGET 8
+#define M_BAR4TARGET 0x3U
+#define V_BAR4TARGET(x) ((x) << S_BAR4TARGET)
+#define G_BAR4TARGET(x) (((x) >> S_BAR4TARGET) & M_BAR4TARGET)
+
+#define S_RELEATIVEEMUID 0
+#define M_RELEATIVEEMUID 0xffU
+#define V_RELEATIVEEMUID(x) ((x) << S_RELEATIVEEMUID)
+#define G_RELEATIVEEMUID(x) (((x) >> S_RELEATIVEEMUID) & M_RELEATIVEEMUID)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET0_BAR0 0x3910
+
+#define S_T7_MEMOFST0 0
+#define M_T7_MEMOFST0 0xfffffffU
+#define V_T7_MEMOFST0(x) ((x) << S_T7_MEMOFST0)
+#define G_T7_MEMOFST0(x) (((x) >> S_T7_MEMOFST0) & M_T7_MEMOFST0)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG0_BAR0 0x3914
+
+#define S_SIZE0 0
+#define M_SIZE0 0x1fU
+#define V_SIZE0(x) ((x) << S_SIZE0)
+#define G_SIZE0(x) (((x) >> S_SIZE0) & M_SIZE0)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET1_BAR0 0x3918
+
+#define S_T7_MEMOFST1 0
+#define M_T7_MEMOFST1 0xfffffffU
+#define V_T7_MEMOFST1(x) ((x) << S_T7_MEMOFST1)
+#define G_T7_MEMOFST1(x) (((x) >> S_T7_MEMOFST1) & M_T7_MEMOFST1)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG1_BAR0 0x391c
+
+#define S_SIZE1 0
+#define M_SIZE1 0x1fU
+#define V_SIZE1(x) ((x) << S_SIZE1)
+#define G_SIZE1(x) (((x) >> S_SIZE1) & M_SIZE1)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET2_BAR0 0x3920
+
+#define S_MEMOFST2 0
+#define M_MEMOFST2 0xfffffffU
+#define V_MEMOFST2(x) ((x) << S_MEMOFST2)
+#define G_MEMOFST2(x) (((x) >> S_MEMOFST2) & M_MEMOFST2)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG2_BAR0 0x3924
+
+#define S_SIZE2 0
+#define M_SIZE2 0x1fU
+#define V_SIZE2(x) ((x) << S_SIZE2)
+#define G_SIZE2(x) (((x) >> S_SIZE2) & M_SIZE2)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET3_BAR0 0x3928
+
+#define S_MEMOFST3 0
+#define M_MEMOFST3 0xfffffffU
+#define V_MEMOFST3(x) ((x) << S_MEMOFST3)
+#define G_MEMOFST3(x) (((x) >> S_MEMOFST3) & M_MEMOFST3)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG3_BAR0 0x392c
+
+#define S_SIZE3 0
+#define M_SIZE3 0x1fU
+#define V_SIZE3(x) ((x) << S_SIZE3)
+#define G_SIZE3(x) (((x) >> S_SIZE3) & M_SIZE3)
+
+#define A_PCIE_TCAM_DATA 0x3970
+#define A_PCIE_TCAM_CTL 0x3974
+
+#define S_TCAMADDR 8
+#define M_TCAMADDR 0x3ffU
+#define V_TCAMADDR(x) ((x) << S_TCAMADDR)
+#define G_TCAMADDR(x) (((x) >> S_TCAMADDR) & M_TCAMADDR)
+
+#define S_CAMEN 0
+#define V_CAMEN(x) ((x) << S_CAMEN)
+#define F_CAMEN V_CAMEN(1U)
+
+#define A_PCIE_TCAM_DBG 0x3978
+
+#define S_CBPASS 24
+#define V_CBPASS(x) ((x) << S_CBPASS)
+#define F_CBPASS V_CBPASS(1U)
+
+#define S_CBBUSY 20
+#define V_CBBUSY(x) ((x) << S_CBBUSY)
+#define F_CBBUSY V_CBBUSY(1U)
+
+#define S_CBSTART 17
+#define V_CBSTART(x) ((x) << S_CBSTART)
+#define F_CBSTART V_CBSTART(1U)
+
+#define S_RSTCB 16
+#define V_RSTCB(x) ((x) << S_RSTCB)
+#define F_RSTCB V_RSTCB(1U)
+
+#define S_TCAM_DBG_DATA 0
+#define M_TCAM_DBG_DATA 0xffffU
+#define V_TCAM_DBG_DATA(x) ((x) << S_TCAM_DBG_DATA)
+#define G_TCAM_DBG_DATA(x) (((x) >> S_TCAM_DBG_DATA) & M_TCAM_DBG_DATA)
+
+#define A_PCIE_TEST_CTRL0 0x3980
+#define A_PCIE_TEST_CTRL1 0x3984
+#define A_PCIE_TEST_CTRL2 0x3988
+#define A_PCIE_TEST_CTRL3 0x398c
+#define A_PCIE_TEST_STS0 0x3990
+#define A_PCIE_TEST_STS1 0x3994
+#define A_PCIE_TEST_STS2 0x3998
+#define A_PCIE_TEST_STS3 0x399c
+#define A_PCIE_X8_CORE_ACK_LATENCY_TIMER_REPLAY_TIMER 0x4700
+#define A_PCIE_X8_CORE_VENDOR_SPECIFIC_DLLP 0x4704
+#define A_PCIE_X8_CORE_PORT_FORCE_LINK 0x4708
+#define A_PCIE_X8_CORE_ACK_FREQUENCY_L0L1_ASPM_CONTROL 0x470c
+#define A_PCIE_X8_CORE_PORT_LINK_CONTROL 0x4710
+#define A_PCIE_X8_CORE_LANE_SKEW 0x4714
+#define A_PCIE_X8_CORE_SYMBOL_NUMBER 0x4718
+#define A_PCIE_X8_CORE_SYMBOL_TIMER_FILTER_MASK1 0x471c
+#define A_PCIE_X8_CORE_FILTER_MASK2 0x4720
+#define A_PCIE_X8_CORE_DEBUG_0 0x4728
+#define A_PCIE_X8_CORE_DEBUG_1 0x472c
+#define A_PCIE_X8_CORE_TRANSMIT_POSTED_FC_CREDIT_STATUS 0x4730
+#define A_PCIE_X8_CORE_TRANSMIT_NONPOSTED_FC_CREDIT_STATUS 0x4734
+#define A_PCIE_X8_CORE_TRANSMIT_COMPLETION_FC_CREDIT_STATUS 0x4738
+#define A_PCIE_X8_CORE_QUEUE_STATUS 0x473c
+#define A_PCIE_X8_CORE_VC_TRANSMIT_ARBITRATION_1 0x4740
+#define A_PCIE_X8_CORE_VC_TRANSMIT_ARBITRATION_2 0x4744
+#define A_PCIE_X8_CORE_VC0_POSTED_RECEIVE_QUEUE_CONTROL 0x4748
+#define A_PCIE_X8_CORE_VC0_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x474c
+#define A_PCIE_X8_CORE_VC0_COMPLETION_RECEIVE_QUEUE_CONTROL 0x4750
+#define A_PCIE_X8_CORE_VC1_POSTED_RECEIVE_QUEUE_CONTROL 0x4754
+#define A_PCIE_X8_CORE_VC1_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x4758
+#define A_PCIE_X8_CORE_VC1_COMPLETION_RECEIVE_QUEUE_CONTROL 0x475c
+#define A_PCIE_X8_CORE_LINK_WIDTH_SPEED_CHANGE 0x480c
+#define A_PCIE_X8_CORE_PHY_STATUS 0x4810
+#define A_PCIE_X8_CORE_PHY_CONTROL 0x4814
+#define A_PCIE_X8_CORE_GEN3_CONTROL 0x4890
+#define A_PCIE_X8_CORE_GEN3_EQ_FS_LF 0x4894
+#define A_PCIE_X8_CORE_GEN3_EQ_PRESET_COEFF 0x4898
+#define A_PCIE_X8_CORE_GEN3_EQ_PRESET_INDEX 0x489c
+#define A_PCIE_X8_CORE_GEN3_EQ_STATUS 0x48a4
+#define A_PCIE_X8_CORE_GEN3_EQ_CONTROL 0x48a8
+#define A_PCIE_X8_CORE_GEN3_EQ_DIRCHANGE_FEEDBACK 0x48ac
+#define A_PCIE_X8_CORE_PIPE_CONTROL 0x48b8
+#define A_PCIE_X8_CORE_DBI_RO_WE 0x48bc
+#define A_PCIE_X8_CFG_SPACE_REQ 0x48c0
+#define A_PCIE_X8_CFG_SPACE_DATA 0x48c4
+#define A_PCIE_X8_CFG_MPS_MRS 0x4900
+
+#define S_MRS 3
+#define M_MRS 0x7U
+#define V_MRS(x) ((x) << S_MRS)
+#define G_MRS(x) (((x) >> S_MRS) & M_MRS)
+
+#define S_T7_MPS 0
+#define M_T7_MPS 0x7U
+#define V_T7_MPS(x) ((x) << S_T7_MPS)
+#define G_T7_MPS(x) (((x) >> S_T7_MPS) & M_T7_MPS)
+
+#define A_PCIE_X8_CFG_ATTRIBUTES 0x4904
+
+#define S_T7_DCAEN 2
+#define V_T7_DCAEN(x) ((x) << S_T7_DCAEN)
+#define F_T7_DCAEN V_T7_DCAEN(1U)
+
+#define S_DCASTFITTRAONLEN 1
+#define V_DCASTFITTRAONLEN(x) ((x) << S_DCASTFITTRAONLEN)
+#define F_DCASTFITTRAONLEN V_DCASTFITTRAONLEN(1U)
+
+#define S_REQCTLDYNSTCLKEN 0
+#define V_REQCTLDYNSTCLKEN(x) ((x) << S_REQCTLDYNSTCLKEN)
+#define F_REQCTLDYNSTCLKEN V_REQCTLDYNSTCLKEN(1U)
+
+#define A_PCIE_X8_CFG_LTSSM 0x4908
+
+#define S_APP_LTSSM_ENABLE 0
+#define V_APP_LTSSM_ENABLE(x) ((x) << S_APP_LTSSM_ENABLE)
+#define F_APP_LTSSM_ENABLE V_APP_LTSSM_ENABLE(1U)
+
+#define A_PCIE_ARM_REQUESTER_ID_X8 0x490c
+
+#define S_A1_RSVD1 24
+#define M_A1_RSVD1 0xffU
+#define V_A1_RSVD1(x) ((x) << S_A1_RSVD1)
+#define G_A1_RSVD1(x) (((x) >> S_A1_RSVD1) & M_A1_RSVD1)
+
+#define S_A1_PRIMBUSNUMBER 16
+#define M_A1_PRIMBUSNUMBER 0xffU
+#define V_A1_PRIMBUSNUMBER(x) ((x) << S_A1_PRIMBUSNUMBER)
+#define G_A1_PRIMBUSNUMBER(x) (((x) >> S_A1_PRIMBUSNUMBER) & M_A1_PRIMBUSNUMBER)
+
+#define S_A1_REQUESTERID 0
+#define M_A1_REQUESTERID 0xffffU
+#define V_A1_REQUESTERID(x) ((x) << S_A1_REQUESTERID)
+#define G_A1_REQUESTERID(x) (((x) >> S_A1_REQUESTERID) & M_A1_REQUESTERID)
+
+#define A_PCIE_SWAP_DATA_B2L_X8 0x4910
+
+#define S_CFGRD_SWAP_EN 1
+#define V_CFGRD_SWAP_EN(x) ((x) << S_CFGRD_SWAP_EN)
+#define F_CFGRD_SWAP_EN V_CFGRD_SWAP_EN(1U)
+
+#define S_CFGWR_SWAP_EN 0
+#define V_CFGWR_SWAP_EN(x) ((x) << S_CFGWR_SWAP_EN)
+#define F_CFGWR_SWAP_EN V_CFGWR_SWAP_EN(1U)
+
+#define A_PCIE_PDEBUG_DATA0_X8 0x4914
+#define A_PCIE_PDEBUG_DATA1_X8 0x4918
+#define A_PCIE_PDEBUG_DATA2_X8 0x491c
+#define A_PCIE_PDEBUG_CTRL_X8 0x4920
+#define A_PCIE_PDEBUG_DATA_X8 0x4924
+#define A_PCIE_SPARE_REGISTER_SPACES_X8 0x4ffc
+#define A_PCIE_PIPE_LANE0_REG0 0x5500
+#define A_PCIE_PIPE_LANE0_REG1 0x5504
+#define A_PCIE_PIPE_LANE0_REG2 0x5508
+#define A_PCIE_PIPE_LANE0_REG3 0x550c
+#define A_PCIE_PIPE_LANE1_REG0 0x5510
+#define A_PCIE_PIPE_LANE1_REG1 0x5514
+#define A_PCIE_PIPE_LANE1_REG2 0x5518
+#define A_PCIE_PIPE_LANE1_REG3 0x551c
+#define A_PCIE_PIPE_LANE2_REG0 0x5520
+#define A_PCIE_PIPE_LANE2_REG1 0x5524
+#define A_PCIE_PIPE_LANE2_REG2 0x5528
+#define A_PCIE_PIPE_LANE2_REG3 0x552c
+#define A_PCIE_PIPE_LANE3_REG0 0x5530
+#define A_PCIE_PIPE_LANE3_REG1 0x5534
+#define A_PCIE_PIPE_LANE3_REG2 0x5538
+#define A_PCIE_PIPE_LANE3_REG3 0x553c
+#define A_PCIE_PIPE_LANE4_REG0 0x5540
+#define A_PCIE_PIPE_LANE4_REG1 0x5544
+#define A_PCIE_PIPE_LANE4_REG2 0x5548
+#define A_PCIE_PIPE_LANE4_REG3 0x554c
+#define A_PCIE_PIPE_LANE5_REG0 0x5550
+#define A_PCIE_PIPE_LANE5_REG1 0x5554
+#define A_PCIE_PIPE_LANE5_REG2 0x5558
+#define A_PCIE_PIPE_LANE5_REG3 0x555c
+#define A_PCIE_PIPE_LANE6_REG0 0x5560
+#define A_PCIE_PIPE_LANE6_REG1 0x5564
+#define A_PCIE_PIPE_LANE6_REG2 0x5568
+#define A_PCIE_PIPE_LANE6_REG3 0x556c
+#define A_PCIE_PIPE_LANE7_REG0 0x5570
+#define A_PCIE_PIPE_LANE7_REG1 0x5574
+#define A_PCIE_PIPE_LANE7_REG2 0x5578
+#define A_PCIE_PIPE_LANE7_REG3 0x557c
+#define A_PCIE_PIPE_LANE8_REG0 0x5580
+#define A_PCIE_PIPE_LANE8_REG1 0x5584
+#define A_PCIE_PIPE_LANE8_REG2 0x5588
+#define A_PCIE_PIPE_LANE8_REG3 0x558c
+#define A_PCIE_PIPE_LANE9_REG0 0x5590
+#define A_PCIE_PIPE_LANE9_REG1 0x5594
+#define A_PCIE_PIPE_LANE9_REG2 0x5598
+#define A_PCIE_PIPE_LANE9_REG3 0x559c
+#define A_PCIE_PIPE_LANE10_REG0 0x55a0
+#define A_PCIE_PIPE_LANE10_REG1 0x55a4
+#define A_PCIE_PIPE_LANE10_REG2 0x55a8
+#define A_PCIE_PIPE_LANE10_REG3 0x55ac
+#define A_PCIE_PIPE_LANE11_REG0 0x55b0
+#define A_PCIE_PIPE_LANE11_REG1 0x55b4
+#define A_PCIE_PIPE_LANE11_REG2 0x55b8
+#define A_PCIE_PIPE_LANE11_REG3 0x55bc
+#define A_PCIE_PIPE_LANE12_REG0 0x55c0
+#define A_PCIE_PIPE_LANE12_REG1 0x55c4
+#define A_PCIE_PIPE_LANE12_REG2 0x55c8
+#define A_PCIE_PIPE_LANE12_REG3 0x55cc
+#define A_PCIE_PIPE_LANE13_REG0 0x55d0
+#define A_PCIE_PIPE_LANE13_REG1 0x55d4
+#define A_PCIE_PIPE_LANE13_REG2 0x55d8
+#define A_PCIE_PIPE_LANE13_REG3 0x55dc
+#define A_PCIE_PIPE_LANE14_REG0 0x55e0
+#define A_PCIE_PIPE_LANE14_REG1 0x55e4
+#define A_PCIE_PIPE_LANE14_REG2 0x55e8
+#define A_PCIE_PIPE_LANE14_REG3 0x55ec
+#define A_PCIE_PIPE_LANE15_REG0 0x55f0
+#define A_PCIE_PIPE_LANE15_REG1 0x55f4
+#define A_PCIE_PIPE_LANE15_REG2 0x55f8
+#define A_PCIE_PIPE_LANE15_REG3 0x55fc
#define A_PCIE_COOKIE_STAT 0x5600
#define S_COOKIEB 16
@@ -5346,6 +6927,30 @@
#define V_T6_RCVDPIOREQCOOKIE(x) ((x) << S_T6_RCVDPIOREQCOOKIE)
#define G_T6_RCVDPIOREQCOOKIE(x) (((x) >> S_T6_RCVDPIOREQCOOKIE) & M_T6_RCVDPIOREQCOOKIE)
+#define A_T7_PCIE_VC0_CDTS0 0x56c4
+
+#define S_T7_CPLD0 16
+#define M_T7_CPLD0 0xffffU
+#define V_T7_CPLD0(x) ((x) << S_T7_CPLD0)
+#define G_T7_CPLD0(x) (((x) >> S_T7_CPLD0) & M_T7_CPLD0)
+
+#define S_T7_CPLH0 0
+#define M_T7_CPLH0 0xfffU
+#define V_T7_CPLH0(x) ((x) << S_T7_CPLH0)
+#define G_T7_CPLH0(x) (((x) >> S_T7_CPLH0) & M_T7_CPLH0)
+
+#define A_T7_PCIE_VC0_CDTS1 0x56c8
+
+#define S_T7_PD0 16
+#define M_T7_PD0 0xffffU
+#define V_T7_PD0(x) ((x) << S_T7_PD0)
+#define G_T7_PD0(x) (((x) >> S_T7_PD0) & M_T7_PD0)
+
+#define S_T7_PH0 0
+#define M_T7_PH0 0xfffU
+#define V_T7_PH0(x) ((x) << S_T7_PH0)
+#define G_T7_PH0(x) (((x) >> S_T7_PH0) & M_T7_PH0)
+
#define A_PCIE_VC0_CDTS0 0x56cc
#define S_CPLD0 20
@@ -5363,6 +6968,18 @@
#define V_PD0(x) ((x) << S_PD0)
#define G_PD0(x) (((x) >> S_PD0) & M_PD0)
+#define A_PCIE_VC0_CDTS2 0x56cc
+
+#define S_T7_NPD0 16
+#define M_T7_NPD0 0xffffU
+#define V_T7_NPD0(x) ((x) << S_T7_NPD0)
+#define G_T7_NPD0(x) (((x) >> S_T7_NPD0) & M_T7_NPD0)
+
+#define S_T7_NPH0 0
+#define M_T7_NPH0 0xfffU
+#define V_T7_NPH0(x) ((x) << S_T7_NPH0)
+#define G_T7_NPH0(x) (((x) >> S_T7_NPH0) & M_T7_NPH0)
+
#define A_PCIE_VC0_CDTS1 0x56d0
#define S_CPLH0 20
@@ -5380,6 +6997,7 @@
#define V_NPD0(x) ((x) << S_NPD0)
#define G_NPD0(x) (((x) >> S_NPD0) & M_NPD0)
+#define A_T7_PCIE_VC1_CDTS0 0x56d0
#define A_PCIE_VC1_CDTS0 0x56d4
#define S_CPLD1 20
@@ -5397,6 +7015,7 @@
#define V_PD1(x) ((x) << S_PD1)
#define G_PD1(x) (((x) >> S_PD1) & M_PD1)
+#define A_T7_PCIE_VC1_CDTS1 0x56d4
#define A_PCIE_VC1_CDTS1 0x56d8
#define S_CPLH1 20
@@ -5414,6 +7033,7 @@
#define V_NPD1(x) ((x) << S_NPD1)
#define G_NPD1(x) (((x) >> S_NPD1) & M_NPD1)
+#define A_PCIE_VC1_CDTS2 0x56d8
#define A_PCIE_FLR_PF_STATUS 0x56dc
#define A_PCIE_FLR_VF0_STATUS 0x56e0
#define A_PCIE_FLR_VF1_STATUS 0x56e4
@@ -5916,6 +7536,11 @@
#define V_DISABLE_SCRAMBLER(x) ((x) << S_DISABLE_SCRAMBLER)
#define F_DISABLE_SCRAMBLER V_DISABLE_SCRAMBLER(1U)
+#define S_RATE_SHADOW_SEL 24
+#define M_RATE_SHADOW_SEL 0x3U
+#define V_RATE_SHADOW_SEL(x) ((x) << S_RATE_SHADOW_SEL)
+#define G_RATE_SHADOW_SEL(x) (((x) >> S_RATE_SHADOW_SEL) & M_RATE_SHADOW_SEL)
+
#define A_PCIE_CORE_GEN3_EQ_FS_LF 0x5894
#define S_FULL_SWING 6
@@ -6347,6 +7972,35 @@
#define V_RDSOPCNT(x) ((x) << S_RDSOPCNT)
#define G_RDSOPCNT(x) (((x) >> S_RDSOPCNT) & M_RDSOPCNT)
+#define S_DMA_COOKIECNT 24
+#define M_DMA_COOKIECNT 0xfU
+#define V_DMA_COOKIECNT(x) ((x) << S_DMA_COOKIECNT)
+#define G_DMA_COOKIECNT(x) (((x) >> S_DMA_COOKIECNT) & M_DMA_COOKIECNT)
+
+#define S_DMA_RDSEQNUMUPDCNT 20
+#define M_DMA_RDSEQNUMUPDCNT 0xfU
+#define V_DMA_RDSEQNUMUPDCNT(x) ((x) << S_DMA_RDSEQNUMUPDCNT)
+#define G_DMA_RDSEQNUMUPDCNT(x) (((x) >> S_DMA_RDSEQNUMUPDCNT) & M_DMA_RDSEQNUMUPDCNT)
+
+#define S_DMA_SIREQCNT 16
+#define M_DMA_SIREQCNT 0xfU
+#define V_DMA_SIREQCNT(x) ((x) << S_DMA_SIREQCNT)
+#define G_DMA_SIREQCNT(x) (((x) >> S_DMA_SIREQCNT) & M_DMA_SIREQCNT)
+
+#define S_DMA_WREOPMATCHSOP 12
+#define V_DMA_WREOPMATCHSOP(x) ((x) << S_DMA_WREOPMATCHSOP)
+#define F_DMA_WREOPMATCHSOP V_DMA_WREOPMATCHSOP(1U)
+
+#define S_DMA_WRSOPCNT 8
+#define M_DMA_WRSOPCNT 0xfU
+#define V_DMA_WRSOPCNT(x) ((x) << S_DMA_WRSOPCNT)
+#define G_DMA_WRSOPCNT(x) (((x) >> S_DMA_WRSOPCNT) & M_DMA_WRSOPCNT)
+
+#define S_DMA_RDSOPCNT 0
+#define M_DMA_RDSOPCNT 0xffU
+#define V_DMA_RDSOPCNT(x) ((x) << S_DMA_RDSOPCNT)
+#define G_DMA_RDSOPCNT(x) (((x) >> S_DMA_RDSOPCNT) & M_DMA_RDSOPCNT)
+
#define A_PCIE_T5_DMA_STAT3 0x594c
#define S_ATMREQSOPCNT 24
@@ -6372,6 +8026,29 @@
#define V_RSPSOPCNT(x) ((x) << S_RSPSOPCNT)
#define G_RSPSOPCNT(x) (((x) >> S_RSPSOPCNT) & M_RSPSOPCNT)
+#define S_DMA_ATMREQSOPCNT 24
+#define M_DMA_ATMREQSOPCNT 0xffU
+#define V_DMA_ATMREQSOPCNT(x) ((x) << S_DMA_ATMREQSOPCNT)
+#define G_DMA_ATMREQSOPCNT(x) (((x) >> S_DMA_ATMREQSOPCNT) & M_DMA_ATMREQSOPCNT)
+
+#define S_DMA_ATMEOPMATCHSOP 17
+#define V_DMA_ATMEOPMATCHSOP(x) ((x) << S_DMA_ATMEOPMATCHSOP)
+#define F_DMA_ATMEOPMATCHSOP V_DMA_ATMEOPMATCHSOP(1U)
+
+#define S_DMA_RSPEOPMATCHSOP 16
+#define V_DMA_RSPEOPMATCHSOP(x) ((x) << S_DMA_RSPEOPMATCHSOP)
+#define F_DMA_RSPEOPMATCHSOP V_DMA_RSPEOPMATCHSOP(1U)
+
+#define S_DMA_RSPERRCNT 8
+#define M_DMA_RSPERRCNT 0xffU
+#define V_DMA_RSPERRCNT(x) ((x) << S_DMA_RSPERRCNT)
+#define G_DMA_RSPERRCNT(x) (((x) >> S_DMA_RSPERRCNT) & M_DMA_RSPERRCNT)
+
+#define S_DMA_RSPSOPCNT 0
+#define M_DMA_RSPSOPCNT 0xffU
+#define V_DMA_RSPSOPCNT(x) ((x) << S_DMA_RSPSOPCNT)
+#define G_DMA_RSPSOPCNT(x) (((x) >> S_DMA_RSPSOPCNT) & M_DMA_RSPSOPCNT)
+
#define A_PCIE_CORE_OUTBOUND_POSTED_HEADER_BUFFER_ALLOCATION 0x5960
#define S_OP0H 24
@@ -6507,11 +8184,6 @@
#define V_T6_USECMDPOOL(x) ((x) << S_T6_USECMDPOOL)
#define F_T6_USECMDPOOL V_T6_USECMDPOOL(1U)
-#define S_T6_MINTAG 0
-#define M_T6_MINTAG 0xffU
-#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
-#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
-
#define A_PCIE_T5_CMD_STAT 0x5984
#define S_T5_STAT_RSPCNT 20
@@ -6558,6 +8230,21 @@
#define A_PCIE_T5_CMD_STAT2 0x5988
#define A_PCIE_T5_CMD_STAT3 0x598c
+
+#define S_CMD_RSPEOPMATCHSOP 16
+#define V_CMD_RSPEOPMATCHSOP(x) ((x) << S_CMD_RSPEOPMATCHSOP)
+#define F_CMD_RSPEOPMATCHSOP V_CMD_RSPEOPMATCHSOP(1U)
+
+#define S_CMD_RSPERRCNT 8
+#define M_CMD_RSPERRCNT 0xffU
+#define V_CMD_RSPERRCNT(x) ((x) << S_CMD_RSPERRCNT)
+#define G_CMD_RSPERRCNT(x) (((x) >> S_CMD_RSPERRCNT) & M_CMD_RSPERRCNT)
+
+#define S_CMD_RSPSOPCNT 0
+#define M_CMD_RSPSOPCNT 0xffU
+#define V_CMD_RSPSOPCNT(x) ((x) << S_CMD_RSPSOPCNT)
+#define G_CMD_RSPSOPCNT(x) (((x) >> S_CMD_RSPSOPCNT) & M_CMD_RSPSOPCNT)
+
#define A_PCIE_CORE_PCI_EXPRESS_TAGS_ALLOCATION 0x5990
#define S_OC0T 24
@@ -6868,14 +8555,14 @@
#define V_T6_T5_HMA_MAXRSPCNT(x) ((x) << S_T6_T5_HMA_MAXRSPCNT)
#define G_T6_T5_HMA_MAXRSPCNT(x) (((x) >> S_T6_T5_HMA_MAXRSPCNT) & M_T6_T5_HMA_MAXRSPCNT)
-#define S_T6_SEQCHKDIS 8
-#define V_T6_SEQCHKDIS(x) ((x) << S_T6_SEQCHKDIS)
-#define F_T6_SEQCHKDIS V_T6_SEQCHKDIS(1U)
+#define S_T5_HMA_SEQCHKDIS 8
+#define V_T5_HMA_SEQCHKDIS(x) ((x) << S_T5_HMA_SEQCHKDIS)
+#define F_T5_HMA_SEQCHKDIS V_T5_HMA_SEQCHKDIS(1U)
-#define S_T6_MINTAG 0
-#define M_T6_MINTAG 0xffU
-#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
-#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
+#define S_T5_MINTAG 0
+#define M_T5_MINTAG 0xffU
+#define V_T5_MINTAG(x) ((x) << S_T5_MINTAG)
+#define G_T5_MINTAG(x) (((x) >> S_T5_MINTAG) & M_T5_MINTAG)
#define A_PCIE_CORE_ROOT_COMPLEX_ERROR_SEVERITY 0x59b4
@@ -6992,6 +8679,31 @@
#define F_CRSI V_CRSI(1U)
#define A_PCIE_T5_HMA_STAT2 0x59b8
+
+#define S_HMA_COOKIECNT 24
+#define M_HMA_COOKIECNT 0xfU
+#define V_HMA_COOKIECNT(x) ((x) << S_HMA_COOKIECNT)
+#define G_HMA_COOKIECNT(x) (((x) >> S_HMA_COOKIECNT) & M_HMA_COOKIECNT)
+
+#define S_HMA_RDSEQNUMUPDCNT 20
+#define M_HMA_RDSEQNUMUPDCNT 0xfU
+#define V_HMA_RDSEQNUMUPDCNT(x) ((x) << S_HMA_RDSEQNUMUPDCNT)
+#define G_HMA_RDSEQNUMUPDCNT(x) (((x) >> S_HMA_RDSEQNUMUPDCNT) & M_HMA_RDSEQNUMUPDCNT)
+
+#define S_HMA_WREOPMATCHSOP 12
+#define V_HMA_WREOPMATCHSOP(x) ((x) << S_HMA_WREOPMATCHSOP)
+#define F_HMA_WREOPMATCHSOP V_HMA_WREOPMATCHSOP(1U)
+
+#define S_HMA_WRSOPCNT 8
+#define M_HMA_WRSOPCNT 0xfU
+#define V_HMA_WRSOPCNT(x) ((x) << S_HMA_WRSOPCNT)
+#define G_HMA_WRSOPCNT(x) (((x) >> S_HMA_WRSOPCNT) & M_HMA_WRSOPCNT)
+
+#define S_HMA_RDSOPCNT 0
+#define M_HMA_RDSOPCNT 0xffU
+#define V_HMA_RDSOPCNT(x) ((x) << S_HMA_RDSOPCNT)
+#define G_HMA_RDSOPCNT(x) (((x) >> S_HMA_RDSOPCNT) & M_HMA_RDSOPCNT)
+
#define A_PCIE_CORE_ENDPOINT_STATUS 0x59bc
#define S_PTOM 31
@@ -7035,6 +8747,21 @@
#define F_PMC7 V_PMC7(1U)
#define A_PCIE_T5_HMA_STAT3 0x59bc
+
+#define S_HMA_RSPEOPMATCHSOP 16
+#define V_HMA_RSPEOPMATCHSOP(x) ((x) << S_HMA_RSPEOPMATCHSOP)
+#define F_HMA_RSPEOPMATCHSOP V_HMA_RSPEOPMATCHSOP(1U)
+
+#define S_HMA_RSPERRCNT 8
+#define M_HMA_RSPERRCNT 0xffU
+#define V_HMA_RSPERRCNT(x) ((x) << S_HMA_RSPERRCNT)
+#define G_HMA_RSPERRCNT(x) (((x) >> S_HMA_RSPERRCNT) & M_HMA_RSPERRCNT)
+
+#define S_HMA_RSPSOPCNT 0
+#define M_HMA_RSPSOPCNT 0xffU
+#define V_HMA_RSPSOPCNT(x) ((x) << S_HMA_RSPSOPCNT)
+#define G_HMA_RSPSOPCNT(x) (((x) >> S_HMA_RSPSOPCNT) & M_HMA_RSPSOPCNT)
+
#define A_PCIE_CORE_ENDPOINT_ERROR_SEVERITY 0x59c0
#define S_PTOS 31
@@ -7187,6 +8914,14 @@
#define V_STI_SLEEPREQ(x) ((x) << S_STI_SLEEPREQ)
#define F_STI_SLEEPREQ V_STI_SLEEPREQ(1U)
+#define S_ARM_STATIC_CGEN 28
+#define V_ARM_STATIC_CGEN(x) ((x) << S_ARM_STATIC_CGEN)
+#define F_ARM_STATIC_CGEN V_ARM_STATIC_CGEN(1U)
+
+#define S_ARM_DYNAMIC_CGEN 27
+#define V_ARM_DYNAMIC_CGEN(x) ((x) << S_ARM_DYNAMIC_CGEN)
+#define F_ARM_DYNAMIC_CGEN V_ARM_DYNAMIC_CGEN(1U)
+
#define A_PCIE_CORE_ENDPOINT_INTERRUPT_ENABLE 0x59c4
#define S_PTOI 31
@@ -7521,6 +9256,14 @@
#define V_PIOCPL_VDMTXDATAPERR(x) ((x) << S_PIOCPL_VDMTXDATAPERR)
#define F_PIOCPL_VDMTXDATAPERR V_PIOCPL_VDMTXDATAPERR(1U)
+#define S_TGT1_MEM_PERR 28
+#define V_TGT1_MEM_PERR(x) ((x) << S_TGT1_MEM_PERR)
+#define F_TGT1_MEM_PERR V_TGT1_MEM_PERR(1U)
+
+#define S_TGT2_MEM_PERR 27
+#define V_TGT2_MEM_PERR(x) ((x) << S_TGT2_MEM_PERR)
+#define F_TGT2_MEM_PERR V_TGT2_MEM_PERR(1U)
+
#define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_2 0x59d4
#define A_PCIE_RSP_ERR_INT_LOG_EN 0x59d4
@@ -7622,6 +9365,16 @@
#define V_T6_REQVFID(x) ((x) << S_T6_REQVFID)
#define G_T6_REQVFID(x) (((x) >> S_T6_REQVFID) & M_T6_REQVFID)
+#define S_LOGADDR10B 9
+#define M_LOGADDR10B 0x3ffU
+#define V_LOGADDR10B(x) ((x) << S_LOGADDR10B)
+#define G_LOGADDR10B(x) (((x) >> S_LOGADDR10B) & M_LOGADDR10B)
+
+#define S_LOGREQVFID 0
+#define M_LOGREQVFID 0x1ffU
+#define V_LOGREQVFID(x) ((x) << S_LOGREQVFID)
+#define G_LOGREQVFID(x) (((x) >> S_LOGREQVFID) & M_LOGREQVFID)
+
#define A_PCIE_CHANGESET 0x59fc
#define A_PCIE_REVISION 0x5a00
#define A_PCIE_PDEBUG_INDEX 0x5a04
@@ -7646,6 +9399,16 @@
#define V_T6_PDEBUGSELL(x) ((x) << S_T6_PDEBUGSELL)
#define G_T6_PDEBUGSELL(x) (((x) >> S_T6_PDEBUGSELL) & M_T6_PDEBUGSELL)
+#define S_T7_1_PDEBUGSELH 16
+#define M_T7_1_PDEBUGSELH 0xffU
+#define V_T7_1_PDEBUGSELH(x) ((x) << S_T7_1_PDEBUGSELH)
+#define G_T7_1_PDEBUGSELH(x) (((x) >> S_T7_1_PDEBUGSELH) & M_T7_1_PDEBUGSELH)
+
+#define S_T7_1_PDEBUGSELL 0
+#define M_T7_1_PDEBUGSELL 0xffU
+#define V_T7_1_PDEBUGSELL(x) ((x) << S_T7_1_PDEBUGSELL)
+#define G_T7_1_PDEBUGSELL(x) (((x) >> S_T7_1_PDEBUGSELL) & M_T7_1_PDEBUGSELL)
+
#define A_PCIE_PDEBUG_DATA_HIGH 0x5a08
#define A_PCIE_PDEBUG_DATA_LOW 0x5a0c
#define A_PCIE_CDEBUG_INDEX 0x5a10
@@ -8468,6 +10231,21 @@
#define A_PCIE_PHY_INDIR_DATA 0x5bf4
#define A_PCIE_STATIC_SPARE1 0x5bf8
#define A_PCIE_STATIC_SPARE2 0x5bfc
+
+#define S_X8_SW_EN 30
+#define V_X8_SW_EN(x) ((x) << S_X8_SW_EN)
+#define F_X8_SW_EN V_X8_SW_EN(1U)
+
+#define S_SWITCHCFG 28
+#define M_SWITCHCFG 0x3U
+#define V_SWITCHCFG(x) ((x) << S_SWITCHCFG)
+#define G_SWITCHCFG(x) (((x) >> S_SWITCHCFG) & M_SWITCHCFG)
+
+#define S_STATIC_SPARE2 0
+#define M_STATIC_SPARE2 0xfffffffU
+#define V_STATIC_SPARE2(x) ((x) << S_STATIC_SPARE2)
+#define G_STATIC_SPARE2(x) (((x) >> S_STATIC_SPARE2) & M_STATIC_SPARE2)
+
#define A_PCIE_KDOORBELL_GTS_PF_BASE_LEN 0x5c10
#define S_KDB_PF_LEN 24
@@ -8872,9 +10650,13 @@
#define A_PCIE_FLR_VF6_STATUS 0x5e78
#define A_PCIE_FLR_VF7_STATUS 0x5e7c
#define A_T6_PCIE_BUS_MST_STAT_4 0x5e80
+#define A_T7_PCIE_BUS_MST_STAT_4 0x5e80
#define A_T6_PCIE_BUS_MST_STAT_5 0x5e84
+#define A_T7_PCIE_BUS_MST_STAT_5 0x5e84
#define A_T6_PCIE_BUS_MST_STAT_6 0x5e88
+#define A_T7_PCIE_BUS_MST_STAT_6 0x5e88
#define A_T6_PCIE_BUS_MST_STAT_7 0x5e8c
+#define A_T7_PCIE_BUS_MST_STAT_7 0x5e8c
#define A_PCIE_BUS_MST_STAT_8 0x5e90
#define S_BUSMST_263_256 0
@@ -8895,9 +10677,13 @@
#define G_DATAFREECNT(x) (((x) >> S_DATAFREECNT) & M_DATAFREECNT)
#define A_T6_PCIE_RSP_ERR_STAT_4 0x5ea0
+#define A_T7_PCIE_RSP_ERR_STAT_4 0x5ea0
#define A_T6_PCIE_RSP_ERR_STAT_5 0x5ea4
+#define A_T7_PCIE_RSP_ERR_STAT_5 0x5ea4
#define A_T6_PCIE_RSP_ERR_STAT_6 0x5ea8
+#define A_T7_PCIE_RSP_ERR_STAT_6 0x5ea8
#define A_T6_PCIE_RSP_ERR_STAT_7 0x5eac
+#define A_T7_PCIE_RSP_ERR_STAT_7 0x5eac
#define A_PCIE_RSP_ERR_STAT_8 0x5eb0
#define S_RSPERR_263_256 0
@@ -9025,6 +10811,1028 @@
#define A_PCIE_DEBUG_ADDR_RANGE1 0x5ee0
#define A_PCIE_DEBUG_ADDR_RANGE2 0x5ef0
#define A_PCIE_DEBUG_ADDR_RANGE_CNT 0x5f00
+#define A_PCIE_PHY_PGM_LOAD_CTRL 0x5f04
+
+#define S_HSS_PMLD_ACC_EN 31
+#define V_HSS_PMLD_ACC_EN(x) ((x) << S_HSS_PMLD_ACC_EN)
+#define F_HSS_PMLD_ACC_EN V_HSS_PMLD_ACC_EN(1U)
+
+#define S_HSS_PMRDWR_ADDR 0
+#define M_HSS_PMRDWR_ADDR 0x3ffffU
+#define V_HSS_PMRDWR_ADDR(x) ((x) << S_HSS_PMRDWR_ADDR)
+#define G_HSS_PMRDWR_ADDR(x) (((x) >> S_HSS_PMRDWR_ADDR) & M_HSS_PMRDWR_ADDR)
+
+#define A_PCIE_PHY_PGM_LOAD_DATA 0x5f08
+#define A_PCIE_HSS_CFG 0x5f0c
+
+#define S_HSS_PCS_AGGREGATION_MODE 30
+#define M_HSS_PCS_AGGREGATION_MODE 0x3U
+#define V_HSS_PCS_AGGREGATION_MODE(x) ((x) << S_HSS_PCS_AGGREGATION_MODE)
+#define G_HSS_PCS_AGGREGATION_MODE(x) (((x) >> S_HSS_PCS_AGGREGATION_MODE) & M_HSS_PCS_AGGREGATION_MODE)
+
+#define S_HSS_PCS_FURCATE_MODE 28
+#define M_HSS_PCS_FURCATE_MODE 0x3U
+#define V_HSS_PCS_FURCATE_MODE(x) ((x) << S_HSS_PCS_FURCATE_MODE)
+#define G_HSS_PCS_FURCATE_MODE(x) (((x) >> S_HSS_PCS_FURCATE_MODE) & M_HSS_PCS_FURCATE_MODE)
+
+#define S_HSS_PCS_PCLK_ON_IN_P2 27
+#define V_HSS_PCS_PCLK_ON_IN_P2(x) ((x) << S_HSS_PCS_PCLK_ON_IN_P2)
+#define F_HSS_PCS_PCLK_ON_IN_P2 V_HSS_PCS_PCLK_ON_IN_P2(1U)
+
+#define S_HSS0_PHY_CTRL_REFCLK 17
+#define M_HSS0_PHY_CTRL_REFCLK 0x1fU
+#define V_HSS0_PHY_CTRL_REFCLK(x) ((x) << S_HSS0_PHY_CTRL_REFCLK)
+#define G_HSS0_PHY_CTRL_REFCLK(x) (((x) >> S_HSS0_PHY_CTRL_REFCLK) & M_HSS0_PHY_CTRL_REFCLK)
+
+#define S_HSS1_PHY_CTRL_REFCLK 12
+#define M_HSS1_PHY_CTRL_REFCLK 0x1fU
+#define V_HSS1_PHY_CTRL_REFCLK(x) ((x) << S_HSS1_PHY_CTRL_REFCLK)
+#define G_HSS1_PHY_CTRL_REFCLK(x) (((x) >> S_HSS1_PHY_CTRL_REFCLK) & M_HSS1_PHY_CTRL_REFCLK)
+
+#define S_HSS0_PHY_REXT_MASTER 11
+#define V_HSS0_PHY_REXT_MASTER(x) ((x) << S_HSS0_PHY_REXT_MASTER)
+#define F_HSS0_PHY_REXT_MASTER V_HSS0_PHY_REXT_MASTER(1U)
+
+#define S_HSS1_PHY_REXT_MASTER 10
+#define V_HSS1_PHY_REXT_MASTER(x) ((x) << S_HSS1_PHY_REXT_MASTER)
+#define F_HSS1_PHY_REXT_MASTER V_HSS1_PHY_REXT_MASTER(1U)
+
+#define S_HSS0_PHY_CTRL_VDDA_SEL 9
+#define V_HSS0_PHY_CTRL_VDDA_SEL(x) ((x) << S_HSS0_PHY_CTRL_VDDA_SEL)
+#define F_HSS0_PHY_CTRL_VDDA_SEL V_HSS0_PHY_CTRL_VDDA_SEL(1U)
+
+#define S_HSS0_PHY_CTRL_VDDHA_SEL 8
+#define V_HSS0_PHY_CTRL_VDDHA_SEL(x) ((x) << S_HSS0_PHY_CTRL_VDDHA_SEL)
+#define F_HSS0_PHY_CTRL_VDDHA_SEL V_HSS0_PHY_CTRL_VDDHA_SEL(1U)
+
+#define S_HSS1_PHY_CTRL_VDDA_SEL 7
+#define V_HSS1_PHY_CTRL_VDDA_SEL(x) ((x) << S_HSS1_PHY_CTRL_VDDA_SEL)
+#define F_HSS1_PHY_CTRL_VDDA_SEL V_HSS1_PHY_CTRL_VDDA_SEL(1U)
+
+#define S_HSS1_PHY_CTRL_VDDHA_SEL 6
+#define V_HSS1_PHY_CTRL_VDDHA_SEL(x) ((x) << S_HSS1_PHY_CTRL_VDDHA_SEL)
+#define F_HSS1_PHY_CTRL_VDDHA_SEL V_HSS1_PHY_CTRL_VDDHA_SEL(1U)
+
+#define S_HSS1_CPU_MEMPSACK 5
+#define V_HSS1_CPU_MEMPSACK(x) ((x) << S_HSS1_CPU_MEMPSACK)
+#define F_HSS1_CPU_MEMPSACK V_HSS1_CPU_MEMPSACK(1U)
+
+#define S_HSS0_CPU_MEMPSACK 3
+#define V_HSS0_CPU_MEMPSACK(x) ((x) << S_HSS0_CPU_MEMPSACK)
+#define F_HSS0_CPU_MEMPSACK V_HSS0_CPU_MEMPSACK(1U)
+
+#define S_HSS1_CPU_MEMACK 4
+#define V_HSS1_CPU_MEMACK(x) ((x) << S_HSS1_CPU_MEMACK)
+#define F_HSS1_CPU_MEMACK V_HSS1_CPU_MEMACK(1U)
+
+#define S_HSS0_CPU_MEMACK 2
+#define V_HSS0_CPU_MEMACK(x) ((x) << S_HSS0_CPU_MEMACK)
+#define F_HSS0_CPU_MEMACK V_HSS0_CPU_MEMACK(1U)
+
+#define S_HSS_PM_IS_ROM 1
+#define V_HSS_PM_IS_ROM(x) ((x) << S_HSS_PM_IS_ROM)
+#define F_HSS_PM_IS_ROM V_HSS_PM_IS_ROM(1U)
+
+#define A_PCIE_HSS_RST 0x5f10
+
+#define S_HSS_RST_CTRL_BY_FW 31
+#define V_HSS_RST_CTRL_BY_FW(x) ((x) << S_HSS_RST_CTRL_BY_FW)
+#define F_HSS_RST_CTRL_BY_FW V_HSS_RST_CTRL_BY_FW(1U)
+
+#define S_HSS_PIPE0_RESET_N 30
+#define V_HSS_PIPE0_RESET_N(x) ((x) << S_HSS_PIPE0_RESET_N)
+#define F_HSS_PIPE0_RESET_N V_HSS_PIPE0_RESET_N(1U)
+
+#define S_HSS0_POR_N 29
+#define V_HSS0_POR_N(x) ((x) << S_HSS0_POR_N)
+#define F_HSS0_POR_N V_HSS0_POR_N(1U)
+
+#define S_HSS1_POR_N 28
+#define V_HSS1_POR_N(x) ((x) << S_HSS1_POR_N)
+#define F_HSS1_POR_N V_HSS1_POR_N(1U)
+
+#define S_HSS0_CPU_RESET 27
+#define V_HSS0_CPU_RESET(x) ((x) << S_HSS0_CPU_RESET)
+#define F_HSS0_CPU_RESET V_HSS0_CPU_RESET(1U)
+
+#define S_HSS1_CPU_RESET 26
+#define V_HSS1_CPU_RESET(x) ((x) << S_HSS1_CPU_RESET)
+#define F_HSS1_CPU_RESET V_HSS1_CPU_RESET(1U)
+
+#define S_HSS_PCS_POR_N 25
+#define V_HSS_PCS_POR_N(x) ((x) << S_HSS_PCS_POR_N)
+#define F_HSS_PCS_POR_N V_HSS_PCS_POR_N(1U)
+
+#define S_SW_CRST_ 24
+#define V_SW_CRST_(x) ((x) << S_SW_CRST_)
+#define F_SW_CRST_ V_SW_CRST_(1U)
+
+#define S_SW_PCIECRST_ 23
+#define V_SW_PCIECRST_(x) ((x) << S_SW_PCIECRST_)
+#define F_SW_PCIECRST_ V_SW_PCIECRST_(1U)
+
+#define S_SW_PCIEPIPERST_ 22
+#define V_SW_PCIEPIPERST_(x) ((x) << S_SW_PCIEPIPERST_)
+#define F_SW_PCIEPIPERST_ V_SW_PCIEPIPERST_(1U)
+
+#define S_SW_PCIEPHYRST_ 21
+#define V_SW_PCIEPHYRST_(x) ((x) << S_SW_PCIEPHYRST_)
+#define F_SW_PCIEPHYRST_ V_SW_PCIEPHYRST_(1U)
+
+#define S_HSS1_ERR_O 3
+#define V_HSS1_ERR_O(x) ((x) << S_HSS1_ERR_O)
+#define F_HSS1_ERR_O V_HSS1_ERR_O(1U)
+
+#define S_HSS0_ERR_O 2
+#define V_HSS0_ERR_O(x) ((x) << S_HSS0_ERR_O)
+#define F_HSS0_ERR_O V_HSS0_ERR_O(1U)
+
+#define S_HSS1_PLL_LOCK 1
+#define V_HSS1_PLL_LOCK(x) ((x) << S_HSS1_PLL_LOCK)
+#define F_HSS1_PLL_LOCK V_HSS1_PLL_LOCK(1U)
+
+#define S_HSS0_PLL_LOCK 0
+#define V_HSS0_PLL_LOCK(x) ((x) << S_HSS0_PLL_LOCK)
+#define F_HSS0_PLL_LOCK V_HSS0_PLL_LOCK(1U)
+
+#define A_PCIE_T5_ARM_CFG 0x5f20
+
+#define S_T5_ARM_MAXREQCNT 20
+#define M_T5_ARM_MAXREQCNT 0x7fU
+#define V_T5_ARM_MAXREQCNT(x) ((x) << S_T5_ARM_MAXREQCNT)
+#define G_T5_ARM_MAXREQCNT(x) (((x) >> S_T5_ARM_MAXREQCNT) & M_T5_ARM_MAXREQCNT)
+
+#define S_T5_ARM_MAXRDREQSIZE 17
+#define M_T5_ARM_MAXRDREQSIZE 0x7U
+#define V_T5_ARM_MAXRDREQSIZE(x) ((x) << S_T5_ARM_MAXRDREQSIZE)
+#define G_T5_ARM_MAXRDREQSIZE(x) (((x) >> S_T5_ARM_MAXRDREQSIZE) & M_T5_ARM_MAXRDREQSIZE)
+
+#define S_T5_ARM_MAXRSPCNT 9
+#define M_T5_ARM_MAXRSPCNT 0xffU
+#define V_T5_ARM_MAXRSPCNT(x) ((x) << S_T5_ARM_MAXRSPCNT)
+#define G_T5_ARM_MAXRSPCNT(x) (((x) >> S_T5_ARM_MAXRSPCNT) & M_T5_ARM_MAXRSPCNT)
+
+#define A_PCIE_T5_ARM_STAT 0x5f24
+
+#define S_ARM_RESPCNT 20
+#define M_ARM_RESPCNT 0x1ffU
+#define V_ARM_RESPCNT(x) ((x) << S_ARM_RESPCNT)
+#define G_ARM_RESPCNT(x) (((x) >> S_ARM_RESPCNT) & M_ARM_RESPCNT)
+
+#define S_ARM_RDREQCNT 12
+#define M_ARM_RDREQCNT 0x3fU
+#define V_ARM_RDREQCNT(x) ((x) << S_ARM_RDREQCNT)
+#define G_ARM_RDREQCNT(x) (((x) >> S_ARM_RDREQCNT) & M_ARM_RDREQCNT)
+
+#define S_ARM_WRREQCNT 0
+#define M_ARM_WRREQCNT 0x1ffU
+#define V_ARM_WRREQCNT(x) ((x) << S_ARM_WRREQCNT)
+#define G_ARM_WRREQCNT(x) (((x) >> S_ARM_WRREQCNT) & M_ARM_WRREQCNT)
+
+#define A_PCIE_T5_ARM_STAT2 0x5f28
+
+#define S_ARM_COOKIECNT 24
+#define M_ARM_COOKIECNT 0xfU
+#define V_ARM_COOKIECNT(x) ((x) << S_ARM_COOKIECNT)
+#define G_ARM_COOKIECNT(x) (((x) >> S_ARM_COOKIECNT) & M_ARM_COOKIECNT)
+
+#define S_ARM_RDSEQNUMUPDCNT 20
+#define M_ARM_RDSEQNUMUPDCNT 0xfU
+#define V_ARM_RDSEQNUMUPDCNT(x) ((x) << S_ARM_RDSEQNUMUPDCNT)
+#define G_ARM_RDSEQNUMUPDCNT(x) (((x) >> S_ARM_RDSEQNUMUPDCNT) & M_ARM_RDSEQNUMUPDCNT)
+
+#define S_ARM_SIREQCNT 16
+#define M_ARM_SIREQCNT 0xfU
+#define V_ARM_SIREQCNT(x) ((x) << S_ARM_SIREQCNT)
+#define G_ARM_SIREQCNT(x) (((x) >> S_ARM_SIREQCNT) & M_ARM_SIREQCNT)
+
+#define S_ARM_WREOPMATCHSOP 12
+#define V_ARM_WREOPMATCHSOP(x) ((x) << S_ARM_WREOPMATCHSOP)
+#define F_ARM_WREOPMATCHSOP V_ARM_WREOPMATCHSOP(1U)
+
+#define S_ARM_WRSOPCNT 8
+#define M_ARM_WRSOPCNT 0xfU
+#define V_ARM_WRSOPCNT(x) ((x) << S_ARM_WRSOPCNT)
+#define G_ARM_WRSOPCNT(x) (((x) >> S_ARM_WRSOPCNT) & M_ARM_WRSOPCNT)
+
+#define S_ARM_RDSOPCNT 0
+#define M_ARM_RDSOPCNT 0xffU
+#define V_ARM_RDSOPCNT(x) ((x) << S_ARM_RDSOPCNT)
+#define G_ARM_RDSOPCNT(x) (((x) >> S_ARM_RDSOPCNT) & M_ARM_RDSOPCNT)
+
+#define A_PCIE_T5_ARM_STAT3 0x5f2c
+
+#define S_ARM_ATMREQSOPCNT 24
+#define M_ARM_ATMREQSOPCNT 0xffU
+#define V_ARM_ATMREQSOPCNT(x) ((x) << S_ARM_ATMREQSOPCNT)
+#define G_ARM_ATMREQSOPCNT(x) (((x) >> S_ARM_ATMREQSOPCNT) & M_ARM_ATMREQSOPCNT)
+
+#define S_ARM_ATMEOPMATCHSOP 17
+#define V_ARM_ATMEOPMATCHSOP(x) ((x) << S_ARM_ATMEOPMATCHSOP)
+#define F_ARM_ATMEOPMATCHSOP V_ARM_ATMEOPMATCHSOP(1U)
+
+#define S_ARM_RSPEOPMATCHSOP 16
+#define V_ARM_RSPEOPMATCHSOP(x) ((x) << S_ARM_RSPEOPMATCHSOP)
+#define F_ARM_RSPEOPMATCHSOP V_ARM_RSPEOPMATCHSOP(1U)
+
+#define S_ARM_RSPERRCNT 8
+#define M_ARM_RSPERRCNT 0xffU
+#define V_ARM_RSPERRCNT(x) ((x) << S_ARM_RSPERRCNT)
+#define G_ARM_RSPERRCNT(x) (((x) >> S_ARM_RSPERRCNT) & M_ARM_RSPERRCNT)
+
+#define S_ARM_RSPSOPCNT 0
+#define M_ARM_RSPSOPCNT 0xffU
+#define V_ARM_RSPSOPCNT(x) ((x) << S_ARM_RSPSOPCNT)
+#define G_ARM_RSPSOPCNT(x) (((x) >> S_ARM_RSPSOPCNT) & M_ARM_RSPSOPCNT)
+
+#define A_PCIE_ARM_REQUESTER_ID 0x5f30
+
+#define S_A0_RSVD1 24
+#define M_A0_RSVD1 0xffU
+#define V_A0_RSVD1(x) ((x) << S_A0_RSVD1)
+#define G_A0_RSVD1(x) (((x) >> S_A0_RSVD1) & M_A0_RSVD1)
+
+#define S_A0_PRIMBUSNUMBER 16
+#define M_A0_PRIMBUSNUMBER 0xffU
+#define V_A0_PRIMBUSNUMBER(x) ((x) << S_A0_PRIMBUSNUMBER)
+#define G_A0_PRIMBUSNUMBER(x) (((x) >> S_A0_PRIMBUSNUMBER) & M_A0_PRIMBUSNUMBER)
+
+#define S_A0_REQUESTERID 0
+#define M_A0_REQUESTERID 0xffffU
+#define V_A0_REQUESTERID(x) ((x) << S_A0_REQUESTERID)
+#define G_A0_REQUESTERID(x) (((x) >> S_A0_REQUESTERID) & M_A0_REQUESTERID)
+
+#define A_PCIE_SWITCH_CFG_SPACE_REQ0 0x5f34
+
+#define S_REQ0ENABLE 31
+#define V_REQ0ENABLE(x) ((x) << S_REQ0ENABLE)
+#define F_REQ0ENABLE V_REQ0ENABLE(1U)
+
+#define S_RDREQ0TYPE 19
+#define V_RDREQ0TYPE(x) ((x) << S_RDREQ0TYPE)
+#define F_RDREQ0TYPE V_RDREQ0TYPE(1U)
+
+#define S_BYTEENABLE0 15
+#define M_BYTEENABLE0 0xfU
+#define V_BYTEENABLE0(x) ((x) << S_BYTEENABLE0)
+#define G_BYTEENABLE0(x) (((x) >> S_BYTEENABLE0) & M_BYTEENABLE0)
+
+#define S_REGADDR0 0
+#define M_REGADDR0 0x7fffU
+#define V_REGADDR0(x) ((x) << S_REGADDR0)
+#define G_REGADDR0(x) (((x) >> S_REGADDR0) & M_REGADDR0)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA0 0x5f38
+#define A_PCIE_SWITCH_CFG_SPACE_REQ1 0x5f3c
+
+#define S_REQ1ENABLE 31
+#define V_REQ1ENABLE(x) ((x) << S_REQ1ENABLE)
+#define F_REQ1ENABLE V_REQ1ENABLE(1U)
+
+#define S_RDREQ1TYPE 26
+#define M_RDREQ1TYPE 0xfU
+#define V_RDREQ1TYPE(x) ((x) << S_RDREQ1TYPE)
+#define G_RDREQ1TYPE(x) (((x) >> S_RDREQ1TYPE) & M_RDREQ1TYPE)
+
+#define S_BYTEENABLE1 15
+#define M_BYTEENABLE1 0x7ffU
+#define V_BYTEENABLE1(x) ((x) << S_BYTEENABLE1)
+#define G_BYTEENABLE1(x) (((x) >> S_BYTEENABLE1) & M_BYTEENABLE1)
+
+#define S_REGADDR1 0
+#define M_REGADDR1 0x7fffU
+#define V_REGADDR1(x) ((x) << S_REGADDR1)
+#define G_REGADDR1(x) (((x) >> S_REGADDR1) & M_REGADDR1)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA1 0x5f40
+#define A_PCIE_SWITCH_CFG_SPACE_REQ2 0x5f44
+
+#define S_REQ2ENABLE 31
+#define V_REQ2ENABLE(x) ((x) << S_REQ2ENABLE)
+#define F_REQ2ENABLE V_REQ2ENABLE(1U)
+
+#define S_RDREQ2TYPE 26
+#define M_RDREQ2TYPE 0xfU
+#define V_RDREQ2TYPE(x) ((x) << S_RDREQ2TYPE)
+#define G_RDREQ2TYPE(x) (((x) >> S_RDREQ2TYPE) & M_RDREQ2TYPE)
+
+#define S_BYTEENABLE2 15
+#define M_BYTEENABLE2 0x7ffU
+#define V_BYTEENABLE2(x) ((x) << S_BYTEENABLE2)
+#define G_BYTEENABLE2(x) (((x) >> S_BYTEENABLE2) & M_BYTEENABLE2)
+
+#define S_REGADDR2 0
+#define M_REGADDR2 0x7fffU
+#define V_REGADDR2(x) ((x) << S_REGADDR2)
+#define G_REGADDR2(x) (((x) >> S_REGADDR2) & M_REGADDR2)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA2 0x5f48
+#define A_PCIE_SWITCH_CFG_SPACE_REQ3 0x5f4c
+
+#define S_REQ3ENABLE 31
+#define V_REQ3ENABLE(x) ((x) << S_REQ3ENABLE)
+#define F_REQ3ENABLE V_REQ3ENABLE(1U)
+
+#define S_RDREQ3TYPE 26
+#define M_RDREQ3TYPE 0xfU
+#define V_RDREQ3TYPE(x) ((x) << S_RDREQ3TYPE)
+#define G_RDREQ3TYPE(x) (((x) >> S_RDREQ3TYPE) & M_RDREQ3TYPE)
+
+#define S_BYTEENABLE3 15
+#define M_BYTEENABLE3 0x7ffU
+#define V_BYTEENABLE3(x) ((x) << S_BYTEENABLE3)
+#define G_BYTEENABLE3(x) (((x) >> S_BYTEENABLE3) & M_BYTEENABLE3)
+
+#define S_REGADDR3 0
+#define M_REGADDR3 0x7fffU
+#define V_REGADDR3(x) ((x) << S_REGADDR3)
+#define G_REGADDR3(x) (((x) >> S_REGADDR3) & M_REGADDR3)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA3 0x5f50
+#define A_PCIE_SWITCH_CFG_SPACE_REQ4 0x5f54
+
+#define S_REQ4ENABLE 31
+#define V_REQ4ENABLE(x) ((x) << S_REQ4ENABLE)
+#define F_REQ4ENABLE V_REQ4ENABLE(1U)
+
+#define S_RDREQ4TYPE 26
+#define M_RDREQ4TYPE 0xfU
+#define V_RDREQ4TYPE(x) ((x) << S_RDREQ4TYPE)
+#define G_RDREQ4TYPE(x) (((x) >> S_RDREQ4TYPE) & M_RDREQ4TYPE)
+
+#define S_BYTEENABLE4 15
+#define M_BYTEENABLE4 0x7ffU
+#define V_BYTEENABLE4(x) ((x) << S_BYTEENABLE4)
+#define G_BYTEENABLE4(x) (((x) >> S_BYTEENABLE4) & M_BYTEENABLE4)
+
+#define S_REGADDR4 0
+#define M_REGADDR4 0x7fffU
+#define V_REGADDR4(x) ((x) << S_REGADDR4)
+#define G_REGADDR4(x) (((x) >> S_REGADDR4) & M_REGADDR4)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA4 0x5f58
+#define A_PCIE_SWITCH_CFG_SPACE_REQ5 0x5f5c
+
+#define S_REQ5ENABLE 31
+#define V_REQ5ENABLE(x) ((x) << S_REQ5ENABLE)
+#define F_REQ5ENABLE V_REQ5ENABLE(1U)
+
+#define S_RDREQ5TYPE 26
+#define M_RDREQ5TYPE 0xfU
+#define V_RDREQ5TYPE(x) ((x) << S_RDREQ5TYPE)
+#define G_RDREQ5TYPE(x) (((x) >> S_RDREQ5TYPE) & M_RDREQ5TYPE)
+
+#define S_BYTEENABLE5 15
+#define M_BYTEENABLE5 0x7ffU
+#define V_BYTEENABLE5(x) ((x) << S_BYTEENABLE5)
+#define G_BYTEENABLE5(x) (((x) >> S_BYTEENABLE5) & M_BYTEENABLE5)
+
+#define S_REGADDR5 0
+#define M_REGADDR5 0x7fffU
+#define V_REGADDR5(x) ((x) << S_REGADDR5)
+#define G_REGADDR5(x) (((x) >> S_REGADDR5) & M_REGADDR5)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA5 0x5f60
+#define A_PCIE_SWITCH_CFG_SPACE_REQ6 0x5f64
+
+#define S_REQ6ENABLE 31
+#define V_REQ6ENABLE(x) ((x) << S_REQ6ENABLE)
+#define F_REQ6ENABLE V_REQ6ENABLE(1U)
+
+#define S_RDREQ6TYPE 26
+#define M_RDREQ6TYPE 0xfU
+#define V_RDREQ6TYPE(x) ((x) << S_RDREQ6TYPE)
+#define G_RDREQ6TYPE(x) (((x) >> S_RDREQ6TYPE) & M_RDREQ6TYPE)
+
+#define S_BYTEENABLE6 15
+#define M_BYTEENABLE6 0x7ffU
+#define V_BYTEENABLE6(x) ((x) << S_BYTEENABLE6)
+#define G_BYTEENABLE6(x) (((x) >> S_BYTEENABLE6) & M_BYTEENABLE6)
+
+#define S_REGADDR6 0
+#define M_REGADDR6 0x7fffU
+#define V_REGADDR6(x) ((x) << S_REGADDR6)
+#define G_REGADDR6(x) (((x) >> S_REGADDR6) & M_REGADDR6)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA6 0x5f68
+#define A_PCIE_SWITCH_CFG_SPACE_REQ7 0x5f6c
+
+#define S_REQ7ENABLE 31
+#define V_REQ7ENABLE(x) ((x) << S_REQ7ENABLE)
+#define F_REQ7ENABLE V_REQ7ENABLE(1U)
+
+#define S_RDREQ7TYPE 26
+#define M_RDREQ7TYPE 0xfU
+#define V_RDREQ7TYPE(x) ((x) << S_RDREQ7TYPE)
+#define G_RDREQ7TYPE(x) (((x) >> S_RDREQ7TYPE) & M_RDREQ7TYPE)
+
+#define S_BYTEENABLE7 15
+#define M_BYTEENABLE7 0x7ffU
+#define V_BYTEENABLE7(x) ((x) << S_BYTEENABLE7)
+#define G_BYTEENABLE7(x) (((x) >> S_BYTEENABLE7) & M_BYTEENABLE7)
+
+#define S_REGADDR7 0
+#define M_REGADDR7 0x7fffU
+#define V_REGADDR7(x) ((x) << S_REGADDR7)
+#define G_REGADDR7(x) (((x) >> S_REGADDR7) & M_REGADDR7)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA7 0x5f70
+#define A_PCIE_SWITCH_CFG_SPACE_REQ8 0x5f74
+
+#define S_REQ8ENABLE 31
+#define V_REQ8ENABLE(x) ((x) << S_REQ8ENABLE)
+#define F_REQ8ENABLE V_REQ8ENABLE(1U)
+
+#define S_RDREQ8TYPE 26
+#define M_RDREQ8TYPE 0xfU
+#define V_RDREQ8TYPE(x) ((x) << S_RDREQ8TYPE)
+#define G_RDREQ8TYPE(x) (((x) >> S_RDREQ8TYPE) & M_RDREQ8TYPE)
+
+#define S_BYTEENABLE8 15
+#define M_BYTEENABLE8 0x7ffU
+#define V_BYTEENABLE8(x) ((x) << S_BYTEENABLE8)
+#define G_BYTEENABLE8(x) (((x) >> S_BYTEENABLE8) & M_BYTEENABLE8)
+
+#define S_REGADDR8 0
+#define M_REGADDR8 0x7fffU
+#define V_REGADDR8(x) ((x) << S_REGADDR8)
+#define G_REGADDR8(x) (((x) >> S_REGADDR8) & M_REGADDR8)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA8 0x5f78
+#define A_PCIE_SNPS_G5_PHY_CR_REQ 0x5f7c
+
+#define S_REGSEL 31
+#define V_REGSEL(x) ((x) << S_REGSEL)
+#define F_REGSEL V_REGSEL(1U)
+
+#define S_RDENABLE 30
+#define V_RDENABLE(x) ((x) << S_RDENABLE)
+#define F_RDENABLE V_RDENABLE(1U)
+
+#define S_WRENABLE 29
+#define V_WRENABLE(x) ((x) << S_WRENABLE)
+#define F_WRENABLE V_WRENABLE(1U)
+
+#define S_AUTOINCRVAL 21
+#define M_AUTOINCRVAL 0x3U
+#define V_AUTOINCRVAL(x) ((x) << S_AUTOINCRVAL)
+#define G_AUTOINCRVAL(x) (((x) >> S_AUTOINCRVAL) & M_AUTOINCRVAL)
+
+#define S_AUTOINCR 20
+#define V_AUTOINCR(x) ((x) << S_AUTOINCR)
+#define F_AUTOINCR V_AUTOINCR(1U)
+
+#define S_PHYSEL 16
+#define M_PHYSEL 0xfU
+#define V_PHYSEL(x) ((x) << S_PHYSEL)
+#define G_PHYSEL(x) (((x) >> S_PHYSEL) & M_PHYSEL)
+
+#define S_T7_REGADDR 0
+#define M_T7_REGADDR 0xffffU
+#define V_T7_REGADDR(x) ((x) << S_T7_REGADDR)
+#define G_T7_REGADDR(x) (((x) >> S_T7_REGADDR) & M_T7_REGADDR)
+
+#define A_PCIE_SNPS_G5_PHY_CR_DATA 0x5f80
+#define A_PCIE_SNPS_G5_PHY_SRAM_CFG 0x5f84
+
+#define S_PHY3_SRAM_BOOTLOAD_BYPASS 27
+#define V_PHY3_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY3_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY3_SRAM_BOOTLOAD_BYPASS V_PHY3_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY3_SRAM_BYPASS 26
+#define V_PHY3_SRAM_BYPASS(x) ((x) << S_PHY3_SRAM_BYPASS)
+#define F_PHY3_SRAM_BYPASS V_PHY3_SRAM_BYPASS(1U)
+
+#define S_PHY3_SRAM_ECC_EN 25
+#define V_PHY3_SRAM_ECC_EN(x) ((x) << S_PHY3_SRAM_ECC_EN)
+#define F_PHY3_SRAM_ECC_EN V_PHY3_SRAM_ECC_EN(1U)
+
+#define S_PHY3_SRAM_EXT_LD_DONE 24
+#define V_PHY3_SRAM_EXT_LD_DONE(x) ((x) << S_PHY3_SRAM_EXT_LD_DONE)
+#define F_PHY3_SRAM_EXT_LD_DONE V_PHY3_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY2_SRAM_BOOTLOAD_BYPASS 19
+#define V_PHY2_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY2_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY2_SRAM_BOOTLOAD_BYPASS V_PHY2_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY2_SRAM_BYPASS 18
+#define V_PHY2_SRAM_BYPASS(x) ((x) << S_PHY2_SRAM_BYPASS)
+#define F_PHY2_SRAM_BYPASS V_PHY2_SRAM_BYPASS(1U)
+
+#define S_PHY2_SRAM_ECC_EN 17
+#define V_PHY2_SRAM_ECC_EN(x) ((x) << S_PHY2_SRAM_ECC_EN)
+#define F_PHY2_SRAM_ECC_EN V_PHY2_SRAM_ECC_EN(1U)
+
+#define S_PHY2_SRAM_EXT_LD_DONE 16
+#define V_PHY2_SRAM_EXT_LD_DONE(x) ((x) << S_PHY2_SRAM_EXT_LD_DONE)
+#define F_PHY2_SRAM_EXT_LD_DONE V_PHY2_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY1_SRAM_BOOTLOAD_BYPASS 11
+#define V_PHY1_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY1_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY1_SRAM_BOOTLOAD_BYPASS V_PHY1_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY1_SRAM_BYPASS 10
+#define V_PHY1_SRAM_BYPASS(x) ((x) << S_PHY1_SRAM_BYPASS)
+#define F_PHY1_SRAM_BYPASS V_PHY1_SRAM_BYPASS(1U)
+
+#define S_PHY1_SRAM_ECC_EN 9
+#define V_PHY1_SRAM_ECC_EN(x) ((x) << S_PHY1_SRAM_ECC_EN)
+#define F_PHY1_SRAM_ECC_EN V_PHY1_SRAM_ECC_EN(1U)
+
+#define S_PHY1_SRAM_EXT_LD_DONE 8
+#define V_PHY1_SRAM_EXT_LD_DONE(x) ((x) << S_PHY1_SRAM_EXT_LD_DONE)
+#define F_PHY1_SRAM_EXT_LD_DONE V_PHY1_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY_CR_PARA_SEL 4
+#define M_PHY_CR_PARA_SEL 0xfU
+#define V_PHY_CR_PARA_SEL(x) ((x) << S_PHY_CR_PARA_SEL)
+#define G_PHY_CR_PARA_SEL(x) (((x) >> S_PHY_CR_PARA_SEL) & M_PHY_CR_PARA_SEL)
+
+#define S_PHY0_SRAM_BOOTLOAD_BYPASS 3
+#define V_PHY0_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY0_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY0_SRAM_BOOTLOAD_BYPASS V_PHY0_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY0_SRAM_BYPASS 2
+#define V_PHY0_SRAM_BYPASS(x) ((x) << S_PHY0_SRAM_BYPASS)
+#define F_PHY0_SRAM_BYPASS V_PHY0_SRAM_BYPASS(1U)
+
+#define S_PHY0_SRAM_ECC_EN 1
+#define V_PHY0_SRAM_ECC_EN(x) ((x) << S_PHY0_SRAM_ECC_EN)
+#define F_PHY0_SRAM_ECC_EN V_PHY0_SRAM_ECC_EN(1U)
+
+#define S_PHY0_SRAM_EXT_LD_DONE 0
+#define V_PHY0_SRAM_EXT_LD_DONE(x) ((x) << S_PHY0_SRAM_EXT_LD_DONE)
+#define F_PHY0_SRAM_EXT_LD_DONE V_PHY0_SRAM_EXT_LD_DONE(1U)
+
+#define A_PCIE_SNPS_G5_PHY_SRAM_STS 0x5f88
+
+#define S_PHY3_SRAM_INIT_DONE 3
+#define V_PHY3_SRAM_INIT_DONE(x) ((x) << S_PHY3_SRAM_INIT_DONE)
+#define F_PHY3_SRAM_INIT_DONE V_PHY3_SRAM_INIT_DONE(1U)
+
+#define S_PHY2_SRAM_INIT_DONE 2
+#define V_PHY2_SRAM_INIT_DONE(x) ((x) << S_PHY2_SRAM_INIT_DONE)
+#define F_PHY2_SRAM_INIT_DONE V_PHY2_SRAM_INIT_DONE(1U)
+
+#define S_PHY1_SRAM_INIT_DONE 1
+#define V_PHY1_SRAM_INIT_DONE(x) ((x) << S_PHY1_SRAM_INIT_DONE)
+#define F_PHY1_SRAM_INIT_DONE V_PHY1_SRAM_INIT_DONE(1U)
+
+#define S_PHY0_SRAM_INIT_DONE 0
+#define V_PHY0_SRAM_INIT_DONE(x) ((x) << S_PHY0_SRAM_INIT_DONE)
+#define F_PHY0_SRAM_INIT_DONE V_PHY0_SRAM_INIT_DONE(1U)
+
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_0_TO_3 0x5f90
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_0_DATA 0x5f94
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_1_DATA 0x5f98
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_2_DATA 0x5f9c
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_3_DATA 0x5fa0
+#define A_PCIE_SNPS_G5_PHY_DEFAULTS 0x5fa4
+#define A_PCIE_SNPS_G5_PHY_0_VALUES 0x5fa8
+
+#define S_RX_TERM_OFFSET 28
+#define V_RX_TERM_OFFSET(x) ((x) << S_RX_TERM_OFFSET)
+#define F_RX_TERM_OFFSET V_RX_TERM_OFFSET(1U)
+
+#define S_REFB_RAW_CLK_DIV2_EN 27
+#define V_REFB_RAW_CLK_DIV2_EN(x) ((x) << S_REFB_RAW_CLK_DIV2_EN)
+#define F_REFB_RAW_CLK_DIV2_EN V_REFB_RAW_CLK_DIV2_EN(1U)
+
+#define S_REFB_RANGE 23
+#define M_REFB_RANGE 0xfU
+#define V_REFB_RANGE(x) ((x) << S_REFB_RANGE)
+#define G_REFB_RANGE(x) (((x) >> S_REFB_RANGE) & M_REFB_RANGE)
+
+#define S_REFB_LANE_CLK_EN 22
+#define V_REFB_LANE_CLK_EN(x) ((x) << S_REFB_LANE_CLK_EN)
+#define F_REFB_LANE_CLK_EN V_REFB_LANE_CLK_EN(1U)
+
+#define S_REFB_CLK_DIV2_EN 21
+#define V_REFB_CLK_DIV2_EN(x) ((x) << S_REFB_CLK_DIV2_EN)
+#define F_REFB_CLK_DIV2_EN V_REFB_CLK_DIV2_EN(1U)
+
+#define S_REFA_RAW_CLK_DIV2_EN 20
+#define V_REFA_RAW_CLK_DIV2_EN(x) ((x) << S_REFA_RAW_CLK_DIV2_EN)
+#define F_REFA_RAW_CLK_DIV2_EN V_REFA_RAW_CLK_DIV2_EN(1U)
+
+#define S_REFA_RANGE 16
+#define M_REFA_RANGE 0xfU
+#define V_REFA_RANGE(x) ((x) << S_REFA_RANGE)
+#define G_REFA_RANGE(x) (((x) >> S_REFA_RANGE) & M_REFA_RANGE)
+
+#define S_REFA_LANE_CLK_EN 15
+#define V_REFA_LANE_CLK_EN(x) ((x) << S_REFA_LANE_CLK_EN)
+#define F_REFA_LANE_CLK_EN V_REFA_LANE_CLK_EN(1U)
+
+#define S_REFA_CLK_DIV2_EN 14
+#define V_REFA_CLK_DIV2_EN(x) ((x) << S_REFA_CLK_DIV2_EN)
+#define F_REFA_CLK_DIV2_EN V_REFA_CLK_DIV2_EN(1U)
+
+#define S_NOMINAL_VPH_SEL 10
+#define M_NOMINAL_VPH_SEL 0x3U
+#define V_NOMINAL_VPH_SEL(x) ((x) << S_NOMINAL_VPH_SEL)
+#define G_NOMINAL_VPH_SEL(x) (((x) >> S_NOMINAL_VPH_SEL) & M_NOMINAL_VPH_SEL)
+
+#define S_NOMINAL_VP_SEL 8
+#define M_NOMINAL_VP_SEL 0x3U
+#define V_NOMINAL_VP_SEL(x) ((x) << S_NOMINAL_VP_SEL)
+#define G_NOMINAL_VP_SEL(x) (((x) >> S_NOMINAL_VP_SEL) & M_NOMINAL_VP_SEL)
+
+#define S_MPLLB_WORD_CLK_EN 7
+#define V_MPLLB_WORD_CLK_EN(x) ((x) << S_MPLLB_WORD_CLK_EN)
+#define F_MPLLB_WORD_CLK_EN V_MPLLB_WORD_CLK_EN(1U)
+
+#define S_MPLLB_SSC_EN 6
+#define V_MPLLB_SSC_EN(x) ((x) << S_MPLLB_SSC_EN)
+#define F_MPLLB_SSC_EN V_MPLLB_SSC_EN(1U)
+
+#define S_MPLLB_SHORT_LOCK_EN 5
+#define V_MPLLB_SHORT_LOCK_EN(x) ((x) << S_MPLLB_SHORT_LOCK_EN)
+#define F_MPLLB_SHORT_LOCK_EN V_MPLLB_SHORT_LOCK_EN(1U)
+
+#define S_MPLLB_FORCE_EN 4
+#define V_MPLLB_FORCE_EN(x) ((x) << S_MPLLB_FORCE_EN)
+#define F_MPLLB_FORCE_EN V_MPLLB_FORCE_EN(1U)
+
+#define S_MPLLA_WORD_CLK_EN 3
+#define V_MPLLA_WORD_CLK_EN(x) ((x) << S_MPLLA_WORD_CLK_EN)
+#define F_MPLLA_WORD_CLK_EN V_MPLLA_WORD_CLK_EN(1U)
+
+#define S_MPLLA_SSC_EN 2
+#define V_MPLLA_SSC_EN(x) ((x) << S_MPLLA_SSC_EN)
+#define F_MPLLA_SSC_EN V_MPLLA_SSC_EN(1U)
+
+#define S_MPLLA_SHORT_LOCK_EN 1
+#define V_MPLLA_SHORT_LOCK_EN(x) ((x) << S_MPLLA_SHORT_LOCK_EN)
+#define F_MPLLA_SHORT_LOCK_EN V_MPLLA_SHORT_LOCK_EN(1U)
+
+#define S_MPLLA_FORCE_EN 0
+#define V_MPLLA_FORCE_EN(x) ((x) << S_MPLLA_FORCE_EN)
+#define F_MPLLA_FORCE_EN V_MPLLA_FORCE_EN(1U)
+
+#define A_PCIE_SNPS_G5_PHY_1_VALUES 0x5fac
+
+#define S_REF_ALT1_CLK_M 13
+#define V_REF_ALT1_CLK_M(x) ((x) << S_REF_ALT1_CLK_M)
+#define F_REF_ALT1_CLK_M V_REF_ALT1_CLK_M(1U)
+
+#define S_REF_ALT1_CLK_P 12
+#define V_REF_ALT1_CLK_P(x) ((x) << S_REF_ALT1_CLK_P)
+#define F_REF_ALT1_CLK_P V_REF_ALT1_CLK_P(1U)
+
+#define A_PCIE_SNPS_G5_PHY_2_VALUES 0x5fb0
+#define A_PCIE_SNPS_G5_PHY_3_VALUES 0x5fb4
+#define A_PCIE_SNPS_G5_PHY_0_RX_LANEPLL_BYPASS_MODE 0x5fb8
+
+#define S_T7_LANE3 15
+#define M_T7_LANE3 0x1fU
+#define V_T7_LANE3(x) ((x) << S_T7_LANE3)
+#define G_T7_LANE3(x) (((x) >> S_T7_LANE3) & M_T7_LANE3)
+
+#define S_T7_LANE2 10
+#define M_T7_LANE2 0x1fU
+#define V_T7_LANE2(x) ((x) << S_T7_LANE2)
+#define G_T7_LANE2(x) (((x) >> S_T7_LANE2) & M_T7_LANE2)
+
+#define S_T7_LANE1 5
+#define M_T7_LANE1 0x1fU
+#define V_T7_LANE1(x) ((x) << S_T7_LANE1)
+#define G_T7_LANE1(x) (((x) >> S_T7_LANE1) & M_T7_LANE1)
+
+#define S_T7_LANE0 0
+#define M_T7_LANE0 0x1fU
+#define V_T7_LANE0(x) ((x) << S_T7_LANE0)
+#define G_T7_LANE0(x) (((x) >> S_T7_LANE0) & M_T7_LANE0)
+
+#define A_PCIE_SNPS_G5_PHY_1_RX_LANEPLL_BYPASS_MODE 0x5fbc
+#define A_PCIE_SNPS_G5_PHY_2_RX_LANEPLL_BYPASS_MODE 0x5fc0
+#define A_PCIE_SNPS_G5_PHY_3_RX_LANEPLL_BYPASS_MODE 0x5fc4
+#define A_PCIE_SNPS_G5_PHY_0_1_RX_LANEPLL_SRC_SEL 0x5fc8
+
+#define S_LANE7_LANEPLL_SRC_SEL 28
+#define M_LANE7_LANEPLL_SRC_SEL 0xfU
+#define V_LANE7_LANEPLL_SRC_SEL(x) ((x) << S_LANE7_LANEPLL_SRC_SEL)
+#define G_LANE7_LANEPLL_SRC_SEL(x) (((x) >> S_LANE7_LANEPLL_SRC_SEL) & M_LANE7_LANEPLL_SRC_SEL)
+
+#define S_LANE6_LANEPLL_SRC_SEL 24
+#define M_LANE6_LANEPLL_SRC_SEL 0xfU
+#define V_LANE6_LANEPLL_SRC_SEL(x) ((x) << S_LANE6_LANEPLL_SRC_SEL)
+#define G_LANE6_LANEPLL_SRC_SEL(x) (((x) >> S_LANE6_LANEPLL_SRC_SEL) & M_LANE6_LANEPLL_SRC_SEL)
+
+#define S_LANE5_LANEPLL_SRC_SEL 20
+#define M_LANE5_LANEPLL_SRC_SEL 0xfU
+#define V_LANE5_LANEPLL_SRC_SEL(x) ((x) << S_LANE5_LANEPLL_SRC_SEL)
+#define G_LANE5_LANEPLL_SRC_SEL(x) (((x) >> S_LANE5_LANEPLL_SRC_SEL) & M_LANE5_LANEPLL_SRC_SEL)
+
+#define S_LANE4_LANEPLL_SRC_SEL 16
+#define M_LANE4_LANEPLL_SRC_SEL 0xfU
+#define V_LANE4_LANEPLL_SRC_SEL(x) ((x) << S_LANE4_LANEPLL_SRC_SEL)
+#define G_LANE4_LANEPLL_SRC_SEL(x) (((x) >> S_LANE4_LANEPLL_SRC_SEL) & M_LANE4_LANEPLL_SRC_SEL)
+
+#define S_LANE3_LANEPLL_SRC_SEL 12
+#define M_LANE3_LANEPLL_SRC_SEL 0xfU
+#define V_LANE3_LANEPLL_SRC_SEL(x) ((x) << S_LANE3_LANEPLL_SRC_SEL)
+#define G_LANE3_LANEPLL_SRC_SEL(x) (((x) >> S_LANE3_LANEPLL_SRC_SEL) & M_LANE3_LANEPLL_SRC_SEL)
+
+#define S_LANE2_LANEPLL_SRC_SEL 8
+#define M_LANE2_LANEPLL_SRC_SEL 0xfU
+#define V_LANE2_LANEPLL_SRC_SEL(x) ((x) << S_LANE2_LANEPLL_SRC_SEL)
+#define G_LANE2_LANEPLL_SRC_SEL(x) (((x) >> S_LANE2_LANEPLL_SRC_SEL) & M_LANE2_LANEPLL_SRC_SEL)
+
+#define S_LANE1_LANEPLL_SRC_SEL 4
+#define M_LANE1_LANEPLL_SRC_SEL 0xfU
+#define V_LANE1_LANEPLL_SRC_SEL(x) ((x) << S_LANE1_LANEPLL_SRC_SEL)
+#define G_LANE1_LANEPLL_SRC_SEL(x) (((x) >> S_LANE1_LANEPLL_SRC_SEL) & M_LANE1_LANEPLL_SRC_SEL)
+
+#define S_LANE0_LANEPLL_SRC_SEL 0
+#define M_LANE0_LANEPLL_SRC_SEL 0xfU
+#define V_LANE0_LANEPLL_SRC_SEL(x) ((x) << S_LANE0_LANEPLL_SRC_SEL)
+#define G_LANE0_LANEPLL_SRC_SEL(x) (((x) >> S_LANE0_LANEPLL_SRC_SEL) & M_LANE0_LANEPLL_SRC_SEL)
+
+#define A_PCIE_SNPS_G5_PHY_2_3_RX_LANEPLL_SRC_SEL 0x5fcc
+#define A_PCIE_SNPS_G5_PHY_RX_DECERR 0x5fd0
+
+#define S_LANE15_REC_OVRD_8B10B_DECERR 30
+#define M_LANE15_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE15_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE15_REC_OVRD_8B10B_DECERR)
+#define G_LANE15_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE15_REC_OVRD_8B10B_DECERR) & M_LANE15_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE14_REC_OVRD_8B10B_DECERR 28
+#define M_LANE14_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE14_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE14_REC_OVRD_8B10B_DECERR)
+#define G_LANE14_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE14_REC_OVRD_8B10B_DECERR) & M_LANE14_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE13_REC_OVRD_8B10B_DECERR 26
+#define M_LANE13_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE13_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE13_REC_OVRD_8B10B_DECERR)
+#define G_LANE13_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE13_REC_OVRD_8B10B_DECERR) & M_LANE13_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE12_REC_OVRD_8B10B_DECERR 24
+#define M_LANE12_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE12_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE12_REC_OVRD_8B10B_DECERR)
+#define G_LANE12_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE12_REC_OVRD_8B10B_DECERR) & M_LANE12_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE11_REC_OVRD_8B10B_DECERR 22
+#define M_LANE11_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE11_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE11_REC_OVRD_8B10B_DECERR)
+#define G_LANE11_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE11_REC_OVRD_8B10B_DECERR) & M_LANE11_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE10_REC_OVRD_8B10B_DECERR 20
+#define M_LANE10_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE10_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE10_REC_OVRD_8B10B_DECERR)
+#define G_LANE10_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE10_REC_OVRD_8B10B_DECERR) & M_LANE10_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE9_REC_OVRD_8B10B_DECERR 18
+#define M_LANE9_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE9_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE9_REC_OVRD_8B10B_DECERR)
+#define G_LANE9_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE9_REC_OVRD_8B10B_DECERR) & M_LANE9_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE8_REC_OVRD_8B10B_DECERR 16
+#define M_LANE8_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE8_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE8_REC_OVRD_8B10B_DECERR)
+#define G_LANE8_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE8_REC_OVRD_8B10B_DECERR) & M_LANE8_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE7_REC_OVRD_8B10B_DECERR 14
+#define M_LANE7_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE7_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE7_REC_OVRD_8B10B_DECERR)
+#define G_LANE7_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE7_REC_OVRD_8B10B_DECERR) & M_LANE7_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE6_REC_OVRD_8B10B_DECERR 12
+#define M_LANE6_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE6_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE6_REC_OVRD_8B10B_DECERR)
+#define G_LANE6_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE6_REC_OVRD_8B10B_DECERR) & M_LANE6_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE5_REC_OVRD_8B10B_DECERR 10
+#define M_LANE5_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE5_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE5_REC_OVRD_8B10B_DECERR)
+#define G_LANE5_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE5_REC_OVRD_8B10B_DECERR) & M_LANE5_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE4_REC_OVRD_8B10B_DECERR 8
+#define M_LANE4_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE4_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE4_REC_OVRD_8B10B_DECERR)
+#define G_LANE4_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE4_REC_OVRD_8B10B_DECERR) & M_LANE4_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE3_REC_OVRD_8B10B_DECERR 6
+#define M_LANE3_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE3_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE3_REC_OVRD_8B10B_DECERR)
+#define G_LANE3_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE3_REC_OVRD_8B10B_DECERR) & M_LANE3_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE2_REC_OVRD_8B10B_DECERR 4
+#define M_LANE2_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE2_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE2_REC_OVRD_8B10B_DECERR)
+#define G_LANE2_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE2_REC_OVRD_8B10B_DECERR) & M_LANE2_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE1_REC_OVRD_8B10B_DECERR 2
+#define M_LANE1_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE1_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE1_REC_OVRD_8B10B_DECERR)
+#define G_LANE1_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE1_REC_OVRD_8B10B_DECERR) & M_LANE1_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE0_REC_OVRD_8B10B_DECERR 0
+#define M_LANE0_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE0_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE0_REC_OVRD_8B10B_DECERR)
+#define G_LANE0_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE0_REC_OVRD_8B10B_DECERR) & M_LANE0_REC_OVRD_8B10B_DECERR)
+
+#define A_PCIE_SNPS_G5_PHY_TX2RX_LOOPBK_REC_OVRD_EN 0x5fd4
+
+#define S_LANE15_REC_OVRD_EN 31
+#define V_LANE15_REC_OVRD_EN(x) ((x) << S_LANE15_REC_OVRD_EN)
+#define F_LANE15_REC_OVRD_EN V_LANE15_REC_OVRD_EN(1U)
+
+#define S_LANE14_REC_OVRD_EN 30
+#define V_LANE14_REC_OVRD_EN(x) ((x) << S_LANE14_REC_OVRD_EN)
+#define F_LANE14_REC_OVRD_EN V_LANE14_REC_OVRD_EN(1U)
+
+#define S_LANE13_REC_OVRD_EN 29
+#define V_LANE13_REC_OVRD_EN(x) ((x) << S_LANE13_REC_OVRD_EN)
+#define F_LANE13_REC_OVRD_EN V_LANE13_REC_OVRD_EN(1U)
+
+#define S_LANE11_REC_OVRD_EN 27
+#define V_LANE11_REC_OVRD_EN(x) ((x) << S_LANE11_REC_OVRD_EN)
+#define F_LANE11_REC_OVRD_EN V_LANE11_REC_OVRD_EN(1U)
+
+#define S_LANE12_REC_OVRD_EN 28
+#define V_LANE12_REC_OVRD_EN(x) ((x) << S_LANE12_REC_OVRD_EN)
+#define F_LANE12_REC_OVRD_EN V_LANE12_REC_OVRD_EN(1U)
+
+#define S_LANE10_REC_OVRD_EN 26
+#define V_LANE10_REC_OVRD_EN(x) ((x) << S_LANE10_REC_OVRD_EN)
+#define F_LANE10_REC_OVRD_EN V_LANE10_REC_OVRD_EN(1U)
+
+#define S_LANE9_REC_OVRD_EN 25
+#define V_LANE9_REC_OVRD_EN(x) ((x) << S_LANE9_REC_OVRD_EN)
+#define F_LANE9_REC_OVRD_EN V_LANE9_REC_OVRD_EN(1U)
+
+#define S_LANE8_REC_OVRD_EN 24
+#define V_LANE8_REC_OVRD_EN(x) ((x) << S_LANE8_REC_OVRD_EN)
+#define F_LANE8_REC_OVRD_EN V_LANE8_REC_OVRD_EN(1U)
+
+#define S_LANE7_REC_OVRD_EN 23
+#define V_LANE7_REC_OVRD_EN(x) ((x) << S_LANE7_REC_OVRD_EN)
+#define F_LANE7_REC_OVRD_EN V_LANE7_REC_OVRD_EN(1U)
+
+#define S_LANE6_REC_OVRD_EN 22
+#define V_LANE6_REC_OVRD_EN(x) ((x) << S_LANE6_REC_OVRD_EN)
+#define F_LANE6_REC_OVRD_EN V_LANE6_REC_OVRD_EN(1U)
+
+#define S_LANE5_REC_OVRD_EN 21
+#define V_LANE5_REC_OVRD_EN(x) ((x) << S_LANE5_REC_OVRD_EN)
+#define F_LANE5_REC_OVRD_EN V_LANE5_REC_OVRD_EN(1U)
+
+#define S_LANE4_REC_OVRD_EN 20
+#define V_LANE4_REC_OVRD_EN(x) ((x) << S_LANE4_REC_OVRD_EN)
+#define F_LANE4_REC_OVRD_EN V_LANE4_REC_OVRD_EN(1U)
+
+#define S_LANE3_REC_OVRD_EN 19
+#define V_LANE3_REC_OVRD_EN(x) ((x) << S_LANE3_REC_OVRD_EN)
+#define F_LANE3_REC_OVRD_EN V_LANE3_REC_OVRD_EN(1U)
+
+#define S_LANE2_REC_OVRD_EN 18
+#define V_LANE2_REC_OVRD_EN(x) ((x) << S_LANE2_REC_OVRD_EN)
+#define F_LANE2_REC_OVRD_EN V_LANE2_REC_OVRD_EN(1U)
+
+#define S_LANE1_REC_OVRD_EN 17
+#define V_LANE1_REC_OVRD_EN(x) ((x) << S_LANE1_REC_OVRD_EN)
+#define F_LANE1_REC_OVRD_EN V_LANE1_REC_OVRD_EN(1U)
+
+#define S_LANE0_REC_OVRD_EN 16
+#define V_LANE0_REC_OVRD_EN(x) ((x) << S_LANE0_REC_OVRD_EN)
+#define F_LANE0_REC_OVRD_EN V_LANE0_REC_OVRD_EN(1U)
+
+#define S_LANE15_TX2RX_LOOPBK 15
+#define V_LANE15_TX2RX_LOOPBK(x) ((x) << S_LANE15_TX2RX_LOOPBK)
+#define F_LANE15_TX2RX_LOOPBK V_LANE15_TX2RX_LOOPBK(1U)
+
+#define S_LANE14_TX2RX_LOOPBK 14
+#define V_LANE14_TX2RX_LOOPBK(x) ((x) << S_LANE14_TX2RX_LOOPBK)
+#define F_LANE14_TX2RX_LOOPBK V_LANE14_TX2RX_LOOPBK(1U)
+
+#define S_LANE13_TX2RX_LOOPBK 13
+#define V_LANE13_TX2RX_LOOPBK(x) ((x) << S_LANE13_TX2RX_LOOPBK)
+#define F_LANE13_TX2RX_LOOPBK V_LANE13_TX2RX_LOOPBK(1U)
+
+#define S_LANE12_TX2RX_LOOPBK 12
+#define V_LANE12_TX2RX_LOOPBK(x) ((x) << S_LANE12_TX2RX_LOOPBK)
+#define F_LANE12_TX2RX_LOOPBK V_LANE12_TX2RX_LOOPBK(1U)
+
+#define S_LANE11_TX2RX_LOOPBK 11
+#define V_LANE11_TX2RX_LOOPBK(x) ((x) << S_LANE11_TX2RX_LOOPBK)
+#define F_LANE11_TX2RX_LOOPBK V_LANE11_TX2RX_LOOPBK(1U)
+
+#define S_LANE10_TX2RX_LOOPBK 10
+#define V_LANE10_TX2RX_LOOPBK(x) ((x) << S_LANE10_TX2RX_LOOPBK)
+#define F_LANE10_TX2RX_LOOPBK V_LANE10_TX2RX_LOOPBK(1U)
+
+#define S_LANE9_TX2RX_LOOPBK 9
+#define V_LANE9_TX2RX_LOOPBK(x) ((x) << S_LANE9_TX2RX_LOOPBK)
+#define F_LANE9_TX2RX_LOOPBK V_LANE9_TX2RX_LOOPBK(1U)
+
+#define S_LANE8_TX2RX_LOOPBK 8
+#define V_LANE8_TX2RX_LOOPBK(x) ((x) << S_LANE8_TX2RX_LOOPBK)
+#define F_LANE8_TX2RX_LOOPBK V_LANE8_TX2RX_LOOPBK(1U)
+
+#define S_LANE7_TX2RX_LOOPBK 7
+#define V_LANE7_TX2RX_LOOPBK(x) ((x) << S_LANE7_TX2RX_LOOPBK)
+#define F_LANE7_TX2RX_LOOPBK V_LANE7_TX2RX_LOOPBK(1U)
+
+#define S_LANE6_TX2RX_LOOPBK 6
+#define V_LANE6_TX2RX_LOOPBK(x) ((x) << S_LANE6_TX2RX_LOOPBK)
+#define F_LANE6_TX2RX_LOOPBK V_LANE6_TX2RX_LOOPBK(1U)
+
+#define S_LANE5_TX2RX_LOOPBK 5
+#define V_LANE5_TX2RX_LOOPBK(x) ((x) << S_LANE5_TX2RX_LOOPBK)
+#define F_LANE5_TX2RX_LOOPBK V_LANE5_TX2RX_LOOPBK(1U)
+
+#define S_LANE4_TX2RX_LOOPBK 4
+#define V_LANE4_TX2RX_LOOPBK(x) ((x) << S_LANE4_TX2RX_LOOPBK)
+#define F_LANE4_TX2RX_LOOPBK V_LANE4_TX2RX_LOOPBK(1U)
+
+#define S_LANE3_TX2RX_LOOPBK 3
+#define V_LANE3_TX2RX_LOOPBK(x) ((x) << S_LANE3_TX2RX_LOOPBK)
+#define F_LANE3_TX2RX_LOOPBK V_LANE3_TX2RX_LOOPBK(1U)
+
+#define S_LANE2_TX2RX_LOOPBK 2
+#define V_LANE2_TX2RX_LOOPBK(x) ((x) << S_LANE2_TX2RX_LOOPBK)
+#define F_LANE2_TX2RX_LOOPBK V_LANE2_TX2RX_LOOPBK(1U)
+
+#define S_LANE1_TX2RX_LOOPBK 1
+#define V_LANE1_TX2RX_LOOPBK(x) ((x) << S_LANE1_TX2RX_LOOPBK)
+#define F_LANE1_TX2RX_LOOPBK V_LANE1_TX2RX_LOOPBK(1U)
+
+#define S_LANE0_TX2RX_LOOPBK 0
+#define V_LANE0_TX2RX_LOOPBK(x) ((x) << S_LANE0_TX2RX_LOOPBK)
+#define F_LANE0_TX2RX_LOOPBK V_LANE0_TX2RX_LOOPBK(1U)
+
+#define A_PCIE_PHY_TX_DISABLE_UPCS_PIPE_CONFIG 0x5fd8
+
+#define S_UPCS_PIPE_CONFIG 16
+#define M_UPCS_PIPE_CONFIG 0xffffU
+#define V_UPCS_PIPE_CONFIG(x) ((x) << S_UPCS_PIPE_CONFIG)
+#define G_UPCS_PIPE_CONFIG(x) (((x) >> S_UPCS_PIPE_CONFIG) & M_UPCS_PIPE_CONFIG)
+
+#define S_TX15_DISABLE 15
+#define V_TX15_DISABLE(x) ((x) << S_TX15_DISABLE)
+#define F_TX15_DISABLE V_TX15_DISABLE(1U)
+
+#define S_TX14_DISABLE 14
+#define V_TX14_DISABLE(x) ((x) << S_TX14_DISABLE)
+#define F_TX14_DISABLE V_TX14_DISABLE(1U)
+
+#define S_TX13_DISABLE 13
+#define V_TX13_DISABLE(x) ((x) << S_TX13_DISABLE)
+#define F_TX13_DISABLE V_TX13_DISABLE(1U)
+
+#define S_TX12_DISABLE 12
+#define V_TX12_DISABLE(x) ((x) << S_TX12_DISABLE)
+#define F_TX12_DISABLE V_TX12_DISABLE(1U)
+
+#define S_TX11_DISABLE 11
+#define V_TX11_DISABLE(x) ((x) << S_TX11_DISABLE)
+#define F_TX11_DISABLE V_TX11_DISABLE(1U)
+
+#define S_TX10_DISABLE 10
+#define V_TX10_DISABLE(x) ((x) << S_TX10_DISABLE)
+#define F_TX10_DISABLE V_TX10_DISABLE(1U)
+
+#define S_TX9_DISABLE 9
+#define V_TX9_DISABLE(x) ((x) << S_TX9_DISABLE)
+#define F_TX9_DISABLE V_TX9_DISABLE(1U)
+
+#define S_TX8_DISABLE 8
+#define V_TX8_DISABLE(x) ((x) << S_TX8_DISABLE)
+#define F_TX8_DISABLE V_TX8_DISABLE(1U)
+
+#define S_TX7_DISABLE 7
+#define V_TX7_DISABLE(x) ((x) << S_TX7_DISABLE)
+#define F_TX7_DISABLE V_TX7_DISABLE(1U)
+
+#define S_TX6_DISABLE 6
+#define V_TX6_DISABLE(x) ((x) << S_TX6_DISABLE)
+#define F_TX6_DISABLE V_TX6_DISABLE(1U)
+
+#define S_TX5_DISABLE 5
+#define V_TX5_DISABLE(x) ((x) << S_TX5_DISABLE)
+#define F_TX5_DISABLE V_TX5_DISABLE(1U)
+
+#define S_TX4_DISABLE 4
+#define V_TX4_DISABLE(x) ((x) << S_TX4_DISABLE)
+#define F_TX4_DISABLE V_TX4_DISABLE(1U)
+
+#define S_TX3_DISABLE 3
+#define V_TX3_DISABLE(x) ((x) << S_TX3_DISABLE)
+#define F_TX3_DISABLE V_TX3_DISABLE(1U)
+
+#define S_TX2_DISABLE 2
+#define V_TX2_DISABLE(x) ((x) << S_TX2_DISABLE)
+#define F_TX2_DISABLE V_TX2_DISABLE(1U)
+
+#define S_TX1_DISABLE 1
+#define V_TX1_DISABLE(x) ((x) << S_TX1_DISABLE)
+#define F_TX1_DISABLE V_TX1_DISABLE(1U)
+
+#define S_TX0_DISABLE 0
+#define V_TX0_DISABLE(x) ((x) << S_TX0_DISABLE)
+#define F_TX0_DISABLE V_TX0_DISABLE(1U)
+
#define A_PCIE_PDEBUG_REG_0X0 0x0
#define A_PCIE_PDEBUG_REG_0X1 0x1
#define A_PCIE_PDEBUG_REG_0X2 0x2
@@ -11668,6 +14476,40 @@
#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+#define A_DBG_GPIO_OUT 0x6010
+
+#define S_GPIO23_OUT_VAL 23
+#define V_GPIO23_OUT_VAL(x) ((x) << S_GPIO23_OUT_VAL)
+#define F_GPIO23_OUT_VAL V_GPIO23_OUT_VAL(1U)
+
+#define S_GPIO22_OUT_VAL 22
+#define V_GPIO22_OUT_VAL(x) ((x) << S_GPIO22_OUT_VAL)
+#define F_GPIO22_OUT_VAL V_GPIO22_OUT_VAL(1U)
+
+#define S_GPIO21_OUT_VAL 21
+#define V_GPIO21_OUT_VAL(x) ((x) << S_GPIO21_OUT_VAL)
+#define F_GPIO21_OUT_VAL V_GPIO21_OUT_VAL(1U)
+
+#define S_GPIO20_OUT_VAL 20
+#define V_GPIO20_OUT_VAL(x) ((x) << S_GPIO20_OUT_VAL)
+#define F_GPIO20_OUT_VAL V_GPIO20_OUT_VAL(1U)
+
+#define S_T7_GPIO19_OUT_VAL 19
+#define V_T7_GPIO19_OUT_VAL(x) ((x) << S_T7_GPIO19_OUT_VAL)
+#define F_T7_GPIO19_OUT_VAL V_T7_GPIO19_OUT_VAL(1U)
+
+#define S_T7_GPIO18_OUT_VAL 18
+#define V_T7_GPIO18_OUT_VAL(x) ((x) << S_T7_GPIO18_OUT_VAL)
+#define F_T7_GPIO18_OUT_VAL V_T7_GPIO18_OUT_VAL(1U)
+
+#define S_T7_GPIO17_OUT_VAL 17
+#define V_T7_GPIO17_OUT_VAL(x) ((x) << S_T7_GPIO17_OUT_VAL)
+#define F_T7_GPIO17_OUT_VAL V_T7_GPIO17_OUT_VAL(1U)
+
+#define S_T7_GPIO16_OUT_VAL 16
+#define V_T7_GPIO16_OUT_VAL(x) ((x) << S_T7_GPIO16_OUT_VAL)
+#define F_T7_GPIO16_OUT_VAL V_T7_GPIO16_OUT_VAL(1U)
+
#define A_DBG_GPIO_IN 0x6014
#define S_GPIO15_CHG_DET 31
@@ -11798,6 +14640,38 @@
#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN)
#define F_GPIO0_IN V_GPIO0_IN(1U)
+#define S_GPIO23_IN 23
+#define V_GPIO23_IN(x) ((x) << S_GPIO23_IN)
+#define F_GPIO23_IN V_GPIO23_IN(1U)
+
+#define S_GPIO22_IN 22
+#define V_GPIO22_IN(x) ((x) << S_GPIO22_IN)
+#define F_GPIO22_IN V_GPIO22_IN(1U)
+
+#define S_GPIO21_IN 21
+#define V_GPIO21_IN(x) ((x) << S_GPIO21_IN)
+#define F_GPIO21_IN V_GPIO21_IN(1U)
+
+#define S_GPIO20_IN 20
+#define V_GPIO20_IN(x) ((x) << S_GPIO20_IN)
+#define F_GPIO20_IN V_GPIO20_IN(1U)
+
+#define S_T7_GPIO19_IN 19
+#define V_T7_GPIO19_IN(x) ((x) << S_T7_GPIO19_IN)
+#define F_T7_GPIO19_IN V_T7_GPIO19_IN(1U)
+
+#define S_T7_GPIO18_IN 18
+#define V_T7_GPIO18_IN(x) ((x) << S_T7_GPIO18_IN)
+#define F_T7_GPIO18_IN V_T7_GPIO18_IN(1U)
+
+#define S_T7_GPIO17_IN 17
+#define V_T7_GPIO17_IN(x) ((x) << S_T7_GPIO17_IN)
+#define F_T7_GPIO17_IN V_T7_GPIO17_IN(1U)
+
+#define S_T7_GPIO16_IN 16
+#define V_T7_GPIO16_IN(x) ((x) << S_T7_GPIO16_IN)
+#define F_T7_GPIO16_IN V_T7_GPIO16_IN(1U)
+
#define A_DBG_INT_ENABLE 0x6018
#define S_IBM_FDL_FAIL_INT_ENBL 25
@@ -11920,6 +14794,58 @@
#define V_GPIO16(x) ((x) << S_GPIO16)
#define F_GPIO16 V_GPIO16(1U)
+#define S_USBFIFOPARERR 12
+#define V_USBFIFOPARERR(x) ((x) << S_USBFIFOPARERR)
+#define F_USBFIFOPARERR V_USBFIFOPARERR(1U)
+
+#define S_T7_IBM_FDL_FAIL_INT_ENBL 11
+#define V_T7_IBM_FDL_FAIL_INT_ENBL(x) ((x) << S_T7_IBM_FDL_FAIL_INT_ENBL)
+#define F_T7_IBM_FDL_FAIL_INT_ENBL V_T7_IBM_FDL_FAIL_INT_ENBL(1U)
+
+#define S_T7_PLL_LOCK_LOST_INT_ENBL 10
+#define V_T7_PLL_LOCK_LOST_INT_ENBL(x) ((x) << S_T7_PLL_LOCK_LOST_INT_ENBL)
+#define F_T7_PLL_LOCK_LOST_INT_ENBL V_T7_PLL_LOCK_LOST_INT_ENBL(1U)
+
+#define S_M1_LOCK 9
+#define V_M1_LOCK(x) ((x) << S_M1_LOCK)
+#define F_M1_LOCK V_M1_LOCK(1U)
+
+#define S_T7_PCIE_LOCK 8
+#define V_T7_PCIE_LOCK(x) ((x) << S_T7_PCIE_LOCK)
+#define F_T7_PCIE_LOCK V_T7_PCIE_LOCK(1U)
+
+#define S_T7_U_LOCK 7
+#define V_T7_U_LOCK(x) ((x) << S_T7_U_LOCK)
+#define F_T7_U_LOCK V_T7_U_LOCK(1U)
+
+#define S_MAC_LOCK 6
+#define V_MAC_LOCK(x) ((x) << S_MAC_LOCK)
+#define F_MAC_LOCK V_MAC_LOCK(1U)
+
+#define S_ARM_LOCK 5
+#define V_ARM_LOCK(x) ((x) << S_ARM_LOCK)
+#define F_ARM_LOCK V_ARM_LOCK(1U)
+
+#define S_M0_LOCK 4
+#define V_M0_LOCK(x) ((x) << S_M0_LOCK)
+#define F_M0_LOCK V_M0_LOCK(1U)
+
+#define S_XGPBUS_LOCK 3
+#define V_XGPBUS_LOCK(x) ((x) << S_XGPBUS_LOCK)
+#define F_XGPBUS_LOCK V_XGPBUS_LOCK(1U)
+
+#define S_XGPHY_LOCK 2
+#define V_XGPHY_LOCK(x) ((x) << S_XGPHY_LOCK)
+#define F_XGPHY_LOCK V_XGPHY_LOCK(1U)
+
+#define S_USB_LOCK 1
+#define V_USB_LOCK(x) ((x) << S_USB_LOCK)
+#define F_USB_LOCK V_USB_LOCK(1U)
+
+#define S_T7_C_LOCK 0
+#define V_T7_C_LOCK(x) ((x) << S_T7_C_LOCK)
+#define F_T7_C_LOCK V_T7_C_LOCK(1U)
+
#define A_DBG_INT_CAUSE 0x601c
#define S_IBM_FDL_FAIL_INT_CAUSE 25
@@ -11938,6 +14864,14 @@
#define V_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_PLL_LOCK_LOST_INT_CAUSE)
#define F_PLL_LOCK_LOST_INT_CAUSE V_PLL_LOCK_LOST_INT_CAUSE(1U)
+#define S_T7_IBM_FDL_FAIL_INT_CAUSE 11
+#define V_T7_IBM_FDL_FAIL_INT_CAUSE(x) ((x) << S_T7_IBM_FDL_FAIL_INT_CAUSE)
+#define F_T7_IBM_FDL_FAIL_INT_CAUSE V_T7_IBM_FDL_FAIL_INT_CAUSE(1U)
+
+#define S_T7_PLL_LOCK_LOST_INT_CAUSE 10
+#define V_T7_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_T7_PLL_LOCK_LOST_INT_CAUSE)
+#define F_T7_PLL_LOCK_LOST_INT_CAUSE V_T7_PLL_LOCK_LOST_INT_CAUSE(1U)
+
#define A_DBG_DBG0_RST_VALUE 0x6020
#define S_DEBUGDATA 0
@@ -11977,6 +14911,10 @@
#define V_C_OCLK_EN(x) ((x) << S_C_OCLK_EN)
#define F_C_OCLK_EN V_C_OCLK_EN(1U)
+#define S_INIC_MODE_EN 0
+#define V_INIC_MODE_EN(x) ((x) << S_INIC_MODE_EN)
+#define F_INIC_MODE_EN V_INIC_MODE_EN(1U)
+
#define A_DBG_PLL_LOCK 0x602c
#define S_PLL_P_LOCK 20
@@ -12003,6 +14941,38 @@
#define V_PLL_C_LOCK(x) ((x) << S_PLL_C_LOCK)
#define F_PLL_C_LOCK V_PLL_C_LOCK(1U)
+#define S_T7_PLL_M_LOCK 9
+#define V_T7_PLL_M_LOCK(x) ((x) << S_T7_PLL_M_LOCK)
+#define F_T7_PLL_M_LOCK V_T7_PLL_M_LOCK(1U)
+
+#define S_PLL_PCIE_LOCK 8
+#define V_PLL_PCIE_LOCK(x) ((x) << S_PLL_PCIE_LOCK)
+#define F_PLL_PCIE_LOCK V_PLL_PCIE_LOCK(1U)
+
+#define S_T7_PLL_U_LOCK 7
+#define V_T7_PLL_U_LOCK(x) ((x) << S_T7_PLL_U_LOCK)
+#define F_T7_PLL_U_LOCK V_T7_PLL_U_LOCK(1U)
+
+#define S_PLL_MAC_LOCK 6
+#define V_PLL_MAC_LOCK(x) ((x) << S_PLL_MAC_LOCK)
+#define F_PLL_MAC_LOCK V_PLL_MAC_LOCK(1U)
+
+#define S_PLL_ARM_LOCK 5
+#define V_PLL_ARM_LOCK(x) ((x) << S_PLL_ARM_LOCK)
+#define F_PLL_ARM_LOCK V_PLL_ARM_LOCK(1U)
+
+#define S_PLL_XGPBUS_LOCK 3
+#define V_PLL_XGPBUS_LOCK(x) ((x) << S_PLL_XGPBUS_LOCK)
+#define F_PLL_XGPBUS_LOCK V_PLL_XGPBUS_LOCK(1U)
+
+#define S_PLL_XGPHY_LOCK 2
+#define V_PLL_XGPHY_LOCK(x) ((x) << S_PLL_XGPHY_LOCK)
+#define F_PLL_XGPHY_LOCK V_PLL_XGPHY_LOCK(1U)
+
+#define S_PLL_USB_LOCK 1
+#define V_PLL_USB_LOCK(x) ((x) << S_PLL_USB_LOCK)
+#define F_PLL_USB_LOCK V_PLL_USB_LOCK(1U)
+
#define A_DBG_GPIO_ACT_LOW 0x6030
#define S_P_LOCK_ACT_LOW 21
@@ -12109,6 +15079,48 @@
#define V_GPIO16_ACT_LOW(x) ((x) << S_GPIO16_ACT_LOW)
#define F_GPIO16_ACT_LOW V_GPIO16_ACT_LOW(1U)
+#define A_DBG_PLL_LOCK_ACT_LOW 0x6030
+
+#define S_M1_LOCK_ACT_LOW 9
+#define V_M1_LOCK_ACT_LOW(x) ((x) << S_M1_LOCK_ACT_LOW)
+#define F_M1_LOCK_ACT_LOW V_M1_LOCK_ACT_LOW(1U)
+
+#define S_PCIE_LOCK_ACT_LOW 8
+#define V_PCIE_LOCK_ACT_LOW(x) ((x) << S_PCIE_LOCK_ACT_LOW)
+#define F_PCIE_LOCK_ACT_LOW V_PCIE_LOCK_ACT_LOW(1U)
+
+#define S_T7_U_LOCK_ACT_LOW 7
+#define V_T7_U_LOCK_ACT_LOW(x) ((x) << S_T7_U_LOCK_ACT_LOW)
+#define F_T7_U_LOCK_ACT_LOW V_T7_U_LOCK_ACT_LOW(1U)
+
+#define S_MAC_LOCK_ACT_LOW 6
+#define V_MAC_LOCK_ACT_LOW(x) ((x) << S_MAC_LOCK_ACT_LOW)
+#define F_MAC_LOCK_ACT_LOW V_MAC_LOCK_ACT_LOW(1U)
+
+#define S_ARM_LOCK_ACT_LOW 5
+#define V_ARM_LOCK_ACT_LOW(x) ((x) << S_ARM_LOCK_ACT_LOW)
+#define F_ARM_LOCK_ACT_LOW V_ARM_LOCK_ACT_LOW(1U)
+
+#define S_M0_LOCK_ACT_LOW 4
+#define V_M0_LOCK_ACT_LOW(x) ((x) << S_M0_LOCK_ACT_LOW)
+#define F_M0_LOCK_ACT_LOW V_M0_LOCK_ACT_LOW(1U)
+
+#define S_XGPBUS_LOCK_ACT_LOW 3
+#define V_XGPBUS_LOCK_ACT_LOW(x) ((x) << S_XGPBUS_LOCK_ACT_LOW)
+#define F_XGPBUS_LOCK_ACT_LOW V_XGPBUS_LOCK_ACT_LOW(1U)
+
+#define S_XGPHY_LOCK_ACT_LOW 2
+#define V_XGPHY_LOCK_ACT_LOW(x) ((x) << S_XGPHY_LOCK_ACT_LOW)
+#define F_XGPHY_LOCK_ACT_LOW V_XGPHY_LOCK_ACT_LOW(1U)
+
+#define S_USB_LOCK_ACT_LOW 1
+#define V_USB_LOCK_ACT_LOW(x) ((x) << S_USB_LOCK_ACT_LOW)
+#define F_USB_LOCK_ACT_LOW V_USB_LOCK_ACT_LOW(1U)
+
+#define S_T7_C_LOCK_ACT_LOW 0
+#define V_T7_C_LOCK_ACT_LOW(x) ((x) << S_T7_C_LOCK_ACT_LOW)
+#define F_T7_C_LOCK_ACT_LOW V_T7_C_LOCK_ACT_LOW(1U)
+
#define A_DBG_EFUSE_BYTE0_3 0x6034
#define A_DBG_EFUSE_BYTE4_7 0x6038
#define A_DBG_EFUSE_BYTE8_11 0x603c
@@ -12140,6 +15152,32 @@
#define V_STATIC_U_PLL_TUNE(x) ((x) << S_STATIC_U_PLL_TUNE)
#define G_STATIC_U_PLL_TUNE(x) (((x) >> S_STATIC_U_PLL_TUNE) & M_STATIC_U_PLL_TUNE)
+#define A_T7_DBG_STATIC_U_PLL_CONF1 0x6044
+
+#define S_STATIC_U_PLL_RANGE 22
+#define M_STATIC_U_PLL_RANGE 0x7U
+#define V_STATIC_U_PLL_RANGE(x) ((x) << S_STATIC_U_PLL_RANGE)
+#define G_STATIC_U_PLL_RANGE(x) (((x) >> S_STATIC_U_PLL_RANGE) & M_STATIC_U_PLL_RANGE)
+
+#define S_STATIC_U_PLL_DIVQ 17
+#define M_STATIC_U_PLL_DIVQ 0x1fU
+#define V_STATIC_U_PLL_DIVQ(x) ((x) << S_STATIC_U_PLL_DIVQ)
+#define G_STATIC_U_PLL_DIVQ(x) (((x) >> S_STATIC_U_PLL_DIVQ) & M_STATIC_U_PLL_DIVQ)
+
+#define S_STATIC_U_PLL_DIVFI 8
+#define M_STATIC_U_PLL_DIVFI 0x1ffU
+#define V_STATIC_U_PLL_DIVFI(x) ((x) << S_STATIC_U_PLL_DIVFI)
+#define G_STATIC_U_PLL_DIVFI(x) (((x) >> S_STATIC_U_PLL_DIVFI) & M_STATIC_U_PLL_DIVFI)
+
+#define S_STATIC_U_PLL_DIVR 2
+#define M_STATIC_U_PLL_DIVR 0x3fU
+#define V_STATIC_U_PLL_DIVR(x) ((x) << S_STATIC_U_PLL_DIVR)
+#define G_STATIC_U_PLL_DIVR(x) (((x) >> S_STATIC_U_PLL_DIVR) & M_STATIC_U_PLL_DIVR)
+
+#define S_T7_1_STATIC_U_PLL_BYPASS 1
+#define V_T7_1_STATIC_U_PLL_BYPASS(x) ((x) << S_T7_1_STATIC_U_PLL_BYPASS)
+#define F_T7_1_STATIC_U_PLL_BYPASS V_T7_1_STATIC_U_PLL_BYPASS(1U)
+
#define A_DBG_STATIC_C_PLL_CONF 0x6048
#define S_STATIC_C_PLL_MULT 23
@@ -12167,6 +15205,26 @@
#define V_STATIC_C_PLL_TUNE(x) ((x) << S_STATIC_C_PLL_TUNE)
#define G_STATIC_C_PLL_TUNE(x) (((x) >> S_STATIC_C_PLL_TUNE) & M_STATIC_C_PLL_TUNE)
+#define A_T7_DBG_STATIC_U_PLL_CONF2 0x6048
+
+#define S_STATIC_U_PLL_SSMF 5
+#define M_STATIC_U_PLL_SSMF 0xfU
+#define V_STATIC_U_PLL_SSMF(x) ((x) << S_STATIC_U_PLL_SSMF)
+#define G_STATIC_U_PLL_SSMF(x) (((x) >> S_STATIC_U_PLL_SSMF) & M_STATIC_U_PLL_SSMF)
+
+#define S_STATIC_U_PLL_SSMD 2
+#define M_STATIC_U_PLL_SSMD 0x7U
+#define V_STATIC_U_PLL_SSMD(x) ((x) << S_STATIC_U_PLL_SSMD)
+#define G_STATIC_U_PLL_SSMD(x) (((x) >> S_STATIC_U_PLL_SSMD) & M_STATIC_U_PLL_SSMD)
+
+#define S_STATIC_U_PLL_SSDS 1
+#define V_STATIC_U_PLL_SSDS(x) ((x) << S_STATIC_U_PLL_SSDS)
+#define F_STATIC_U_PLL_SSDS V_STATIC_U_PLL_SSDS(1U)
+
+#define S_STATIC_U_PLL_SSE 0
+#define V_STATIC_U_PLL_SSE(x) ((x) << S_STATIC_U_PLL_SSE)
+#define F_STATIC_U_PLL_SSE V_STATIC_U_PLL_SSE(1U)
+
#define A_DBG_STATIC_M_PLL_CONF 0x604c
#define S_STATIC_M_PLL_MULT 23
@@ -12194,6 +15252,32 @@
#define V_STATIC_M_PLL_TUNE(x) ((x) << S_STATIC_M_PLL_TUNE)
#define G_STATIC_M_PLL_TUNE(x) (((x) >> S_STATIC_M_PLL_TUNE) & M_STATIC_M_PLL_TUNE)
+#define A_T7_DBG_STATIC_C_PLL_CONF1 0x604c
+
+#define S_STATIC_C_PLL_RANGE 22
+#define M_STATIC_C_PLL_RANGE 0x7U
+#define V_STATIC_C_PLL_RANGE(x) ((x) << S_STATIC_C_PLL_RANGE)
+#define G_STATIC_C_PLL_RANGE(x) (((x) >> S_STATIC_C_PLL_RANGE) & M_STATIC_C_PLL_RANGE)
+
+#define S_STATIC_C_PLL_DIVQ 17
+#define M_STATIC_C_PLL_DIVQ 0x1fU
+#define V_STATIC_C_PLL_DIVQ(x) ((x) << S_STATIC_C_PLL_DIVQ)
+#define G_STATIC_C_PLL_DIVQ(x) (((x) >> S_STATIC_C_PLL_DIVQ) & M_STATIC_C_PLL_DIVQ)
+
+#define S_STATIC_C_PLL_DIVFI 8
+#define M_STATIC_C_PLL_DIVFI 0x1ffU
+#define V_STATIC_C_PLL_DIVFI(x) ((x) << S_STATIC_C_PLL_DIVFI)
+#define G_STATIC_C_PLL_DIVFI(x) (((x) >> S_STATIC_C_PLL_DIVFI) & M_STATIC_C_PLL_DIVFI)
+
+#define S_STATIC_C_PLL_DIVR 2
+#define M_STATIC_C_PLL_DIVR 0x3fU
+#define V_STATIC_C_PLL_DIVR(x) ((x) << S_STATIC_C_PLL_DIVR)
+#define G_STATIC_C_PLL_DIVR(x) (((x) >> S_STATIC_C_PLL_DIVR) & M_STATIC_C_PLL_DIVR)
+
+#define S_T7_1_STATIC_C_PLL_BYPASS 1
+#define V_T7_1_STATIC_C_PLL_BYPASS(x) ((x) << S_T7_1_STATIC_C_PLL_BYPASS)
+#define F_T7_1_STATIC_C_PLL_BYPASS V_T7_1_STATIC_C_PLL_BYPASS(1U)
+
#define A_DBG_STATIC_KX_PLL_CONF 0x6050
#define S_STATIC_KX_PLL_C 21
@@ -12226,6 +15310,26 @@
#define V_STATIC_KX_PLL_P(x) ((x) << S_STATIC_KX_PLL_P)
#define G_STATIC_KX_PLL_P(x) (((x) >> S_STATIC_KX_PLL_P) & M_STATIC_KX_PLL_P)
+#define A_T7_DBG_STATIC_C_PLL_CONF2 0x6050
+
+#define S_STATIC_C_PLL_SSMF 5
+#define M_STATIC_C_PLL_SSMF 0xfU
+#define V_STATIC_C_PLL_SSMF(x) ((x) << S_STATIC_C_PLL_SSMF)
+#define G_STATIC_C_PLL_SSMF(x) (((x) >> S_STATIC_C_PLL_SSMF) & M_STATIC_C_PLL_SSMF)
+
+#define S_STATIC_C_PLL_SSMD 2
+#define M_STATIC_C_PLL_SSMD 0x7U
+#define V_STATIC_C_PLL_SSMD(x) ((x) << S_STATIC_C_PLL_SSMD)
+#define G_STATIC_C_PLL_SSMD(x) (((x) >> S_STATIC_C_PLL_SSMD) & M_STATIC_C_PLL_SSMD)
+
+#define S_STATIC_C_PLL_SSDS 1
+#define V_STATIC_C_PLL_SSDS(x) ((x) << S_STATIC_C_PLL_SSDS)
+#define F_STATIC_C_PLL_SSDS V_STATIC_C_PLL_SSDS(1U)
+
+#define S_STATIC_C_PLL_SSE 0
+#define V_STATIC_C_PLL_SSE(x) ((x) << S_STATIC_C_PLL_SSE)
+#define F_STATIC_C_PLL_SSE V_STATIC_C_PLL_SSE(1U)
+
#define A_DBG_STATIC_KR_PLL_CONF 0x6054
#define S_STATIC_KR_PLL_C 21
@@ -12258,6 +15362,38 @@
#define V_STATIC_KR_PLL_P(x) ((x) << S_STATIC_KR_PLL_P)
#define G_STATIC_KR_PLL_P(x) (((x) >> S_STATIC_KR_PLL_P) & M_STATIC_KR_PLL_P)
+#define A_DBG_STATIC_PLL_DFS_CONF 0x6054
+
+#define S_STATIC_U_DFS_ACK 23
+#define V_STATIC_U_DFS_ACK(x) ((x) << S_STATIC_U_DFS_ACK)
+#define F_STATIC_U_DFS_ACK V_STATIC_U_DFS_ACK(1U)
+
+#define S_STATIC_C_DFS_ACK 22
+#define V_STATIC_C_DFS_ACK(x) ((x) << S_STATIC_C_DFS_ACK)
+#define F_STATIC_C_DFS_ACK V_STATIC_C_DFS_ACK(1U)
+
+#define S_STATIC_U_DFS_DIVFI 13
+#define M_STATIC_U_DFS_DIVFI 0x1ffU
+#define V_STATIC_U_DFS_DIVFI(x) ((x) << S_STATIC_U_DFS_DIVFI)
+#define G_STATIC_U_DFS_DIVFI(x) (((x) >> S_STATIC_U_DFS_DIVFI) & M_STATIC_U_DFS_DIVFI)
+
+#define S_STATIC_U_DFS_NEWDIV 12
+#define V_STATIC_U_DFS_NEWDIV(x) ((x) << S_STATIC_U_DFS_NEWDIV)
+#define F_STATIC_U_DFS_NEWDIV V_STATIC_U_DFS_NEWDIV(1U)
+
+#define S_T7_STATIC_U_DFS_ENABLE 11
+#define V_T7_STATIC_U_DFS_ENABLE(x) ((x) << S_T7_STATIC_U_DFS_ENABLE)
+#define F_T7_STATIC_U_DFS_ENABLE V_T7_STATIC_U_DFS_ENABLE(1U)
+
+#define S_STATIC_C_DFS_DIVFI 2
+#define M_STATIC_C_DFS_DIVFI 0x1ffU
+#define V_STATIC_C_DFS_DIVFI(x) ((x) << S_STATIC_C_DFS_DIVFI)
+#define G_STATIC_C_DFS_DIVFI(x) (((x) >> S_STATIC_C_DFS_DIVFI) & M_STATIC_C_DFS_DIVFI)
+
+#define S_STATIC_C_DFS_NEWDIV 1
+#define V_STATIC_C_DFS_NEWDIV(x) ((x) << S_STATIC_C_DFS_NEWDIV)
+#define F_STATIC_C_DFS_NEWDIV V_STATIC_C_DFS_NEWDIV(1U)
+
#define A_DBG_EXTRA_STATIC_BITS_CONF 0x6058
#define S_STATIC_M_PLL_RESET 30
@@ -12343,6 +15479,14 @@
#define V_PSRO_SEL(x) ((x) << S_PSRO_SEL)
#define G_PSRO_SEL(x) (((x) >> S_PSRO_SEL) & M_PSRO_SEL)
+#define S_T7_STATIC_LVDS_CLKOUT_EN 21
+#define V_T7_STATIC_LVDS_CLKOUT_EN(x) ((x) << S_T7_STATIC_LVDS_CLKOUT_EN)
+#define F_T7_STATIC_LVDS_CLKOUT_EN V_T7_STATIC_LVDS_CLKOUT_EN(1U)
+
+#define S_T7_EXPHYCLK_SEL_EN 16
+#define V_T7_EXPHYCLK_SEL_EN(x) ((x) << S_T7_EXPHYCLK_SEL_EN)
+#define F_T7_EXPHYCLK_SEL_EN V_T7_EXPHYCLK_SEL_EN(1U)
+
#define A_DBG_STATIC_OCLK_MUXSEL_CONF 0x605c
#define S_M_OCLK_MUXSEL 12
@@ -12467,16 +15611,6 @@
#define V_T5_RD_ADDR0(x) ((x) << S_T5_RD_ADDR0)
#define G_T5_RD_ADDR0(x) (((x) >> S_T5_RD_ADDR0) & M_T5_RD_ADDR0)
-#define S_T6_RD_ADDR1 11
-#define M_T6_RD_ADDR1 0x1ffU
-#define V_T6_RD_ADDR1(x) ((x) << S_T6_RD_ADDR1)
-#define G_T6_RD_ADDR1(x) (((x) >> S_T6_RD_ADDR1) & M_T6_RD_ADDR1)
-
-#define S_T6_RD_ADDR0 2
-#define M_T6_RD_ADDR0 0x1ffU
-#define V_T6_RD_ADDR0(x) ((x) << S_T6_RD_ADDR0)
-#define G_T6_RD_ADDR0(x) (((x) >> S_T6_RD_ADDR0) & M_T6_RD_ADDR0)
-
#define A_DBG_TRACE_WRADDR 0x6090
#define S_WR_POINTER_ADDR1 16
@@ -12499,16 +15633,6 @@
#define V_T5_WR_POINTER_ADDR0(x) ((x) << S_T5_WR_POINTER_ADDR0)
#define G_T5_WR_POINTER_ADDR0(x) (((x) >> S_T5_WR_POINTER_ADDR0) & M_T5_WR_POINTER_ADDR0)
-#define S_T6_WR_POINTER_ADDR1 16
-#define M_T6_WR_POINTER_ADDR1 0x1ffU
-#define V_T6_WR_POINTER_ADDR1(x) ((x) << S_T6_WR_POINTER_ADDR1)
-#define G_T6_WR_POINTER_ADDR1(x) (((x) >> S_T6_WR_POINTER_ADDR1) & M_T6_WR_POINTER_ADDR1)
-
-#define S_T6_WR_POINTER_ADDR0 0
-#define M_T6_WR_POINTER_ADDR0 0x1ffU
-#define V_T6_WR_POINTER_ADDR0(x) ((x) << S_T6_WR_POINTER_ADDR0)
-#define G_T6_WR_POINTER_ADDR0(x) (((x) >> S_T6_WR_POINTER_ADDR0) & M_T6_WR_POINTER_ADDR0)
-
#define A_DBG_TRACE0_DATA_OUT 0x6094
#define A_DBG_TRACE1_DATA_OUT 0x6098
#define A_DBG_FUSE_SENSE_DONE 0x609c
@@ -12575,7 +15699,52 @@
#define V_T6_TVSENSE_RST(x) ((x) << S_T6_TVSENSE_RST)
#define F_T6_TVSENSE_RST V_T6_TVSENSE_RST(1U)
+#define A_DBG_PVT_EN1 0x60a8
+
+#define S_PVT_TRIMO 18
+#define M_PVT_TRIMO 0x3fU
+#define V_PVT_TRIMO(x) ((x) << S_PVT_TRIMO)
+#define G_PVT_TRIMO(x) (((x) >> S_PVT_TRIMO) & M_PVT_TRIMO)
+
+#define S_PVT_TRIMG 13
+#define M_PVT_TRIMG 0x1fU
+#define V_PVT_TRIMG(x) ((x) << S_PVT_TRIMG)
+#define G_PVT_TRIMG(x) (((x) >> S_PVT_TRIMG) & M_PVT_TRIMG)
+
+#define S_PVT_VSAMPLE 12
+#define V_PVT_VSAMPLE(x) ((x) << S_PVT_VSAMPLE)
+#define F_PVT_VSAMPLE V_PVT_VSAMPLE(1U)
+
+#define S_PVT_PSAMPLE 10
+#define M_PVT_PSAMPLE 0x3U
+#define V_PVT_PSAMPLE(x) ((x) << S_PVT_PSAMPLE)
+#define G_PVT_PSAMPLE(x) (((x) >> S_PVT_PSAMPLE) & M_PVT_PSAMPLE)
+
+#define S_PVT_ENA 9
+#define V_PVT_ENA(x) ((x) << S_PVT_ENA)
+#define F_PVT_ENA V_PVT_ENA(1U)
+
+#define S_PVT_RESET 8
+#define V_PVT_RESET(x) ((x) << S_PVT_RESET)
+#define F_PVT_RESET V_PVT_RESET(1U)
+
+#define S_PVT_DIV 0
+#define M_PVT_DIV 0xffU
+#define V_PVT_DIV(x) ((x) << S_PVT_DIV)
+#define G_PVT_DIV(x) (((x) >> S_PVT_DIV) & M_PVT_DIV)
+
#define A_DBG_CUST_EFUSE_OUT_EN 0x60ac
+#define A_DBG_PVT_EN2 0x60ac
+
+#define S_PVT_DATA_OUT 1
+#define M_PVT_DATA_OUT 0x3ffU
+#define V_PVT_DATA_OUT(x) ((x) << S_PVT_DATA_OUT)
+#define G_PVT_DATA_OUT(x) (((x) >> S_PVT_DATA_OUT) & M_PVT_DATA_OUT)
+
+#define S_PVT_DATA_VALID 0
+#define V_PVT_DATA_VALID(x) ((x) << S_PVT_DATA_VALID)
+#define F_PVT_DATA_VALID V_PVT_DATA_VALID(1U)
+
#define A_DBG_CUST_EFUSE_SEL1_EN 0x60b0
#define A_DBG_CUST_EFUSE_SEL2_EN 0x60b4
@@ -12638,6 +15807,36 @@
#define V_STATIC_M_PLL_FFSLEWRATE(x) ((x) << S_STATIC_M_PLL_FFSLEWRATE)
#define G_STATIC_M_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_M_PLL_FFSLEWRATE) & M_STATIC_M_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_M0_PLL_CONF1 0x60b8
+
+#define S_STATIC_M0_PLL_RANGE 22
+#define M_STATIC_M0_PLL_RANGE 0x7U
+#define V_STATIC_M0_PLL_RANGE(x) ((x) << S_STATIC_M0_PLL_RANGE)
+#define G_STATIC_M0_PLL_RANGE(x) (((x) >> S_STATIC_M0_PLL_RANGE) & M_STATIC_M0_PLL_RANGE)
+
+#define S_STATIC_M0_PLL_DIVQ 17
+#define M_STATIC_M0_PLL_DIVQ 0x1fU
+#define V_STATIC_M0_PLL_DIVQ(x) ((x) << S_STATIC_M0_PLL_DIVQ)
+#define G_STATIC_M0_PLL_DIVQ(x) (((x) >> S_STATIC_M0_PLL_DIVQ) & M_STATIC_M0_PLL_DIVQ)
+
+#define S_STATIC_M0_PLL_DIVFI 8
+#define M_STATIC_M0_PLL_DIVFI 0x1ffU
+#define V_STATIC_M0_PLL_DIVFI(x) ((x) << S_STATIC_M0_PLL_DIVFI)
+#define G_STATIC_M0_PLL_DIVFI(x) (((x) >> S_STATIC_M0_PLL_DIVFI) & M_STATIC_M0_PLL_DIVFI)
+
+#define S_STATIC_M0_PLL_DIVR 2
+#define M_STATIC_M0_PLL_DIVR 0x3fU
+#define V_STATIC_M0_PLL_DIVR(x) ((x) << S_STATIC_M0_PLL_DIVR)
+#define G_STATIC_M0_PLL_DIVR(x) (((x) >> S_STATIC_M0_PLL_DIVR) & M_STATIC_M0_PLL_DIVR)
+
+#define S_STATIC_M0_PLL_BYPASS 1
+#define V_STATIC_M0_PLL_BYPASS(x) ((x) << S_STATIC_M0_PLL_BYPASS)
+#define F_STATIC_M0_PLL_BYPASS V_STATIC_M0_PLL_BYPASS(1U)
+
+#define S_STATIC_M0_PLL_RESET 0
+#define V_STATIC_M0_PLL_RESET(x) ((x) << S_STATIC_M0_PLL_RESET)
+#define F_STATIC_M0_PLL_RESET V_STATIC_M0_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF2 0x60bc
#define S_T5_STATIC_M_PLL_DCO_BYPASS 23
@@ -12715,6 +15914,50 @@
#define V_STATIC_M_PLL_LOCKTUNE(x) ((x) << S_STATIC_M_PLL_LOCKTUNE)
#define G_STATIC_M_PLL_LOCKTUNE(x) (((x) >> S_STATIC_M_PLL_LOCKTUNE) & M_STATIC_M_PLL_LOCKTUNE)
+#define A_DBG_STATIC_M0_PLL_CONF2 0x60bc
+
+#define S_T7_STATIC_SWMC1RST_ 14
+#define V_T7_STATIC_SWMC1RST_(x) ((x) << S_T7_STATIC_SWMC1RST_)
+#define F_T7_STATIC_SWMC1RST_ V_T7_STATIC_SWMC1RST_(1U)
+
+#define S_T7_STATIC_SWMC1CFGRST_ 13
+#define V_T7_STATIC_SWMC1CFGRST_(x) ((x) << S_T7_STATIC_SWMC1CFGRST_)
+#define F_T7_STATIC_SWMC1CFGRST_ V_T7_STATIC_SWMC1CFGRST_(1U)
+
+#define S_T7_STATIC_PHY0RECRST_ 12
+#define V_T7_STATIC_PHY0RECRST_(x) ((x) << S_T7_STATIC_PHY0RECRST_)
+#define F_T7_STATIC_PHY0RECRST_ V_T7_STATIC_PHY0RECRST_(1U)
+
+#define S_T7_STATIC_PHY1RECRST_ 11
+#define V_T7_STATIC_PHY1RECRST_(x) ((x) << S_T7_STATIC_PHY1RECRST_)
+#define F_T7_STATIC_PHY1RECRST_ V_T7_STATIC_PHY1RECRST_(1U)
+
+#define S_T7_STATIC_SWMC0RST_ 10
+#define V_T7_STATIC_SWMC0RST_(x) ((x) << S_T7_STATIC_SWMC0RST_)
+#define F_T7_STATIC_SWMC0RST_ V_T7_STATIC_SWMC0RST_(1U)
+
+#define S_T7_STATIC_SWMC0CFGRST_ 9
+#define V_T7_STATIC_SWMC0CFGRST_(x) ((x) << S_T7_STATIC_SWMC0CFGRST_)
+#define F_T7_STATIC_SWMC0CFGRST_ V_T7_STATIC_SWMC0CFGRST_(1U)
+
+#define S_STATIC_M0_PLL_SSMF 5
+#define M_STATIC_M0_PLL_SSMF 0xfU
+#define V_STATIC_M0_PLL_SSMF(x) ((x) << S_STATIC_M0_PLL_SSMF)
+#define G_STATIC_M0_PLL_SSMF(x) (((x) >> S_STATIC_M0_PLL_SSMF) & M_STATIC_M0_PLL_SSMF)
+
+#define S_STATIC_M0_PLL_SSMD 2
+#define M_STATIC_M0_PLL_SSMD 0x7U
+#define V_STATIC_M0_PLL_SSMD(x) ((x) << S_STATIC_M0_PLL_SSMD)
+#define G_STATIC_M0_PLL_SSMD(x) (((x) >> S_STATIC_M0_PLL_SSMD) & M_STATIC_M0_PLL_SSMD)
+
+#define S_STATIC_M0_PLL_SSDS 1
+#define V_STATIC_M0_PLL_SSDS(x) ((x) << S_STATIC_M0_PLL_SSDS)
+#define F_STATIC_M0_PLL_SSDS V_STATIC_M0_PLL_SSDS(1U)
+
+#define S_STATIC_M0_PLL_SSE 0
+#define V_STATIC_M0_PLL_SSE(x) ((x) << S_STATIC_M0_PLL_SSE)
+#define F_STATIC_M0_PLL_SSE V_STATIC_M0_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF3 0x60c0
#define S_T5_STATIC_M_PLL_MULTPRE 30
@@ -12778,8 +16021,58 @@
#define V_T6_STATIC_M_PLL_RANGEA(x) ((x) << S_T6_STATIC_M_PLL_RANGEA)
#define G_T6_STATIC_M_PLL_RANGEA(x) (((x) >> S_T6_STATIC_M_PLL_RANGEA) & M_T6_STATIC_M_PLL_RANGEA)
+#define A_DBG_STATIC_MAC_PLL_CONF1 0x60c0
+
+#define S_STATIC_MAC_PLL_RANGE 22
+#define M_STATIC_MAC_PLL_RANGE 0x7U
+#define V_STATIC_MAC_PLL_RANGE(x) ((x) << S_STATIC_MAC_PLL_RANGE)
+#define G_STATIC_MAC_PLL_RANGE(x) (((x) >> S_STATIC_MAC_PLL_RANGE) & M_STATIC_MAC_PLL_RANGE)
+
+#define S_STATIC_MAC_PLL_DIVQ 17
+#define M_STATIC_MAC_PLL_DIVQ 0x1fU
+#define V_STATIC_MAC_PLL_DIVQ(x) ((x) << S_STATIC_MAC_PLL_DIVQ)
+#define G_STATIC_MAC_PLL_DIVQ(x) (((x) >> S_STATIC_MAC_PLL_DIVQ) & M_STATIC_MAC_PLL_DIVQ)
+
+#define S_STATIC_MAC_PLL_DIVFI 8
+#define M_STATIC_MAC_PLL_DIVFI 0x1ffU
+#define V_STATIC_MAC_PLL_DIVFI(x) ((x) << S_STATIC_MAC_PLL_DIVFI)
+#define G_STATIC_MAC_PLL_DIVFI(x) (((x) >> S_STATIC_MAC_PLL_DIVFI) & M_STATIC_MAC_PLL_DIVFI)
+
+#define S_STATIC_MAC_PLL_DIVR 2
+#define M_STATIC_MAC_PLL_DIVR 0x3fU
+#define V_STATIC_MAC_PLL_DIVR(x) ((x) << S_STATIC_MAC_PLL_DIVR)
+#define G_STATIC_MAC_PLL_DIVR(x) (((x) >> S_STATIC_MAC_PLL_DIVR) & M_STATIC_MAC_PLL_DIVR)
+
+#define S_STATIC_MAC_PLL_BYPASS 1
+#define V_STATIC_MAC_PLL_BYPASS(x) ((x) << S_STATIC_MAC_PLL_BYPASS)
+#define F_STATIC_MAC_PLL_BYPASS V_STATIC_MAC_PLL_BYPASS(1U)
+
+#define S_STATIC_MAC_PLL_RESET 0
+#define V_STATIC_MAC_PLL_RESET(x) ((x) << S_STATIC_MAC_PLL_RESET)
+#define F_STATIC_MAC_PLL_RESET V_STATIC_MAC_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF4 0x60c4
#define A_DBG_STATIC_M_PLL_CONF4 0x60c4
+#define A_DBG_STATIC_MAC_PLL_CONF2 0x60c4
+
+#define S_STATIC_MAC_PLL_SSMF 5
+#define M_STATIC_MAC_PLL_SSMF 0xfU
+#define V_STATIC_MAC_PLL_SSMF(x) ((x) << S_STATIC_MAC_PLL_SSMF)
+#define G_STATIC_MAC_PLL_SSMF(x) (((x) >> S_STATIC_MAC_PLL_SSMF) & M_STATIC_MAC_PLL_SSMF)
+
+#define S_STATIC_MAC_PLL_SSMD 2
+#define M_STATIC_MAC_PLL_SSMD 0x7U
+#define V_STATIC_MAC_PLL_SSMD(x) ((x) << S_STATIC_MAC_PLL_SSMD)
+#define G_STATIC_MAC_PLL_SSMD(x) (((x) >> S_STATIC_MAC_PLL_SSMD) & M_STATIC_MAC_PLL_SSMD)
+
+#define S_STATIC_MAC_PLL_SSDS 1
+#define V_STATIC_MAC_PLL_SSDS(x) ((x) << S_STATIC_MAC_PLL_SSDS)
+#define F_STATIC_MAC_PLL_SSDS V_STATIC_MAC_PLL_SSDS(1U)
+
+#define S_STATIC_MAC_PLL_SSE 0
+#define V_STATIC_MAC_PLL_SSE(x) ((x) << S_STATIC_MAC_PLL_SSE)
+#define F_STATIC_MAC_PLL_SSE V_STATIC_MAC_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF5 0x60c8
#define S_T5_STATIC_M_PLL_VCVTUNE 24
@@ -12835,6 +16128,36 @@
#define V_T6_STATIC_M_PLL_MULT(x) ((x) << S_T6_STATIC_M_PLL_MULT)
#define G_T6_STATIC_M_PLL_MULT(x) (((x) >> S_T6_STATIC_M_PLL_MULT) & M_T6_STATIC_M_PLL_MULT)
+#define A_DBG_STATIC_ARM_PLL_CONF1 0x60c8
+
+#define S_STATIC_ARM_PLL_RANGE 22
+#define M_STATIC_ARM_PLL_RANGE 0x7U
+#define V_STATIC_ARM_PLL_RANGE(x) ((x) << S_STATIC_ARM_PLL_RANGE)
+#define G_STATIC_ARM_PLL_RANGE(x) (((x) >> S_STATIC_ARM_PLL_RANGE) & M_STATIC_ARM_PLL_RANGE)
+
+#define S_STATIC_ARM_PLL_DIVQ 17
+#define M_STATIC_ARM_PLL_DIVQ 0x1fU
+#define V_STATIC_ARM_PLL_DIVQ(x) ((x) << S_STATIC_ARM_PLL_DIVQ)
+#define G_STATIC_ARM_PLL_DIVQ(x) (((x) >> S_STATIC_ARM_PLL_DIVQ) & M_STATIC_ARM_PLL_DIVQ)
+
+#define S_STATIC_ARM_PLL_DIVFI 8
+#define M_STATIC_ARM_PLL_DIVFI 0x1ffU
+#define V_STATIC_ARM_PLL_DIVFI(x) ((x) << S_STATIC_ARM_PLL_DIVFI)
+#define G_STATIC_ARM_PLL_DIVFI(x) (((x) >> S_STATIC_ARM_PLL_DIVFI) & M_STATIC_ARM_PLL_DIVFI)
+
+#define S_STATIC_ARM_PLL_DIVR 2
+#define M_STATIC_ARM_PLL_DIVR 0x3fU
+#define V_STATIC_ARM_PLL_DIVR(x) ((x) << S_STATIC_ARM_PLL_DIVR)
+#define G_STATIC_ARM_PLL_DIVR(x) (((x) >> S_STATIC_ARM_PLL_DIVR) & M_STATIC_ARM_PLL_DIVR)
+
+#define S_STATIC_ARM_PLL_BYPASS 1
+#define V_STATIC_ARM_PLL_BYPASS(x) ((x) << S_STATIC_ARM_PLL_BYPASS)
+#define F_STATIC_ARM_PLL_BYPASS V_STATIC_ARM_PLL_BYPASS(1U)
+
+#define S_STATIC_ARM_PLL_RESET 0
+#define V_STATIC_ARM_PLL_RESET(x) ((x) << S_STATIC_ARM_PLL_RESET)
+#define F_STATIC_ARM_PLL_RESET V_STATIC_ARM_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF6 0x60cc
#define S_T5_STATIC_PHY0RECRST_ 5
@@ -12913,6 +16236,26 @@
#define V_STATIC_SWMC1CFGRST_(x) ((x) << S_STATIC_SWMC1CFGRST_)
#define F_STATIC_SWMC1CFGRST_ V_STATIC_SWMC1CFGRST_(1U)
+#define A_DBG_STATIC_ARM_PLL_CONF2 0x60cc
+
+#define S_STATIC_ARM_PLL_SSMF 5
+#define M_STATIC_ARM_PLL_SSMF 0xfU
+#define V_STATIC_ARM_PLL_SSMF(x) ((x) << S_STATIC_ARM_PLL_SSMF)
+#define G_STATIC_ARM_PLL_SSMF(x) (((x) >> S_STATIC_ARM_PLL_SSMF) & M_STATIC_ARM_PLL_SSMF)
+
+#define S_STATIC_ARM_PLL_SSMD 2
+#define M_STATIC_ARM_PLL_SSMD 0x7U
+#define V_STATIC_ARM_PLL_SSMD(x) ((x) << S_STATIC_ARM_PLL_SSMD)
+#define G_STATIC_ARM_PLL_SSMD(x) (((x) >> S_STATIC_ARM_PLL_SSMD) & M_STATIC_ARM_PLL_SSMD)
+
+#define S_STATIC_ARM_PLL_SSDS 1
+#define V_STATIC_ARM_PLL_SSDS(x) ((x) << S_STATIC_ARM_PLL_SSDS)
+#define F_STATIC_ARM_PLL_SSDS V_STATIC_ARM_PLL_SSDS(1U)
+
+#define S_STATIC_ARM_PLL_SSE 0
+#define V_STATIC_ARM_PLL_SSE(x) ((x) << S_STATIC_ARM_PLL_SSE)
+#define F_STATIC_ARM_PLL_SSE V_STATIC_ARM_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF1 0x60d0
#define S_T5_STATIC_C_PLL_MULTFRAC 8
@@ -12937,6 +16280,36 @@
#define V_STATIC_C_PLL_FFSLEWRATE(x) ((x) << S_STATIC_C_PLL_FFSLEWRATE)
#define G_STATIC_C_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_C_PLL_FFSLEWRATE) & M_STATIC_C_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_USB_PLL_CONF1 0x60d0
+
+#define S_STATIC_USB_PLL_RANGE 22
+#define M_STATIC_USB_PLL_RANGE 0x7U
+#define V_STATIC_USB_PLL_RANGE(x) ((x) << S_STATIC_USB_PLL_RANGE)
+#define G_STATIC_USB_PLL_RANGE(x) (((x) >> S_STATIC_USB_PLL_RANGE) & M_STATIC_USB_PLL_RANGE)
+
+#define S_STATIC_USB_PLL_DIVQ 17
+#define M_STATIC_USB_PLL_DIVQ 0x1fU
+#define V_STATIC_USB_PLL_DIVQ(x) ((x) << S_STATIC_USB_PLL_DIVQ)
+#define G_STATIC_USB_PLL_DIVQ(x) (((x) >> S_STATIC_USB_PLL_DIVQ) & M_STATIC_USB_PLL_DIVQ)
+
+#define S_STATIC_USB_PLL_DIVFI 8
+#define M_STATIC_USB_PLL_DIVFI 0x1ffU
+#define V_STATIC_USB_PLL_DIVFI(x) ((x) << S_STATIC_USB_PLL_DIVFI)
+#define G_STATIC_USB_PLL_DIVFI(x) (((x) >> S_STATIC_USB_PLL_DIVFI) & M_STATIC_USB_PLL_DIVFI)
+
+#define S_STATIC_USB_PLL_DIVR 2
+#define M_STATIC_USB_PLL_DIVR 0x3fU
+#define V_STATIC_USB_PLL_DIVR(x) ((x) << S_STATIC_USB_PLL_DIVR)
+#define G_STATIC_USB_PLL_DIVR(x) (((x) >> S_STATIC_USB_PLL_DIVR) & M_STATIC_USB_PLL_DIVR)
+
+#define S_STATIC_USB_PLL_BYPASS 1
+#define V_STATIC_USB_PLL_BYPASS(x) ((x) << S_STATIC_USB_PLL_BYPASS)
+#define F_STATIC_USB_PLL_BYPASS V_STATIC_USB_PLL_BYPASS(1U)
+
+#define S_STATIC_USB_PLL_RESET 0
+#define V_STATIC_USB_PLL_RESET(x) ((x) << S_STATIC_USB_PLL_RESET)
+#define F_STATIC_USB_PLL_RESET V_STATIC_USB_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF2 0x60d4
#define S_T5_STATIC_C_PLL_DCO_BYPASS 23
@@ -13019,6 +16392,26 @@
#define V_STATIC_C_PLL_LOCKTUNE(x) ((x) << S_STATIC_C_PLL_LOCKTUNE)
#define G_STATIC_C_PLL_LOCKTUNE(x) (((x) >> S_STATIC_C_PLL_LOCKTUNE) & M_STATIC_C_PLL_LOCKTUNE)
+#define A_DBG_STATIC_USB_PLL_CONF2 0x60d4
+
+#define S_STATIC_USB_PLL_SSMF 5
+#define M_STATIC_USB_PLL_SSMF 0xfU
+#define V_STATIC_USB_PLL_SSMF(x) ((x) << S_STATIC_USB_PLL_SSMF)
+#define G_STATIC_USB_PLL_SSMF(x) (((x) >> S_STATIC_USB_PLL_SSMF) & M_STATIC_USB_PLL_SSMF)
+
+#define S_STATIC_USB_PLL_SSMD 2
+#define M_STATIC_USB_PLL_SSMD 0x7U
+#define V_STATIC_USB_PLL_SSMD(x) ((x) << S_STATIC_USB_PLL_SSMD)
+#define G_STATIC_USB_PLL_SSMD(x) (((x) >> S_STATIC_USB_PLL_SSMD) & M_STATIC_USB_PLL_SSMD)
+
+#define S_STATIC_USB_PLL_SSDS 1
+#define V_STATIC_USB_PLL_SSDS(x) ((x) << S_STATIC_USB_PLL_SSDS)
+#define F_STATIC_USB_PLL_SSDS V_STATIC_USB_PLL_SSDS(1U)
+
+#define S_STATIC_USB_PLL_SSE 0
+#define V_STATIC_USB_PLL_SSE(x) ((x) << S_STATIC_USB_PLL_SSE)
+#define F_STATIC_USB_PLL_SSE V_STATIC_USB_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF3 0x60d8
#define S_T5_STATIC_C_PLL_MULTPRE 30
@@ -13082,8 +16475,58 @@
#define V_T6_STATIC_C_PLL_RANGEA(x) ((x) << S_T6_STATIC_C_PLL_RANGEA)
#define G_T6_STATIC_C_PLL_RANGEA(x) (((x) >> S_T6_STATIC_C_PLL_RANGEA) & M_T6_STATIC_C_PLL_RANGEA)
+#define A_DBG_STATIC_XGPHY_PLL_CONF1 0x60d8
+
+#define S_STATIC_XGPHY_PLL_RANGE 22
+#define M_STATIC_XGPHY_PLL_RANGE 0x7U
+#define V_STATIC_XGPHY_PLL_RANGE(x) ((x) << S_STATIC_XGPHY_PLL_RANGE)
+#define G_STATIC_XGPHY_PLL_RANGE(x) (((x) >> S_STATIC_XGPHY_PLL_RANGE) & M_STATIC_XGPHY_PLL_RANGE)
+
+#define S_STATIC_XGPHY_PLL_DIVQ 17
+#define M_STATIC_XGPHY_PLL_DIVQ 0x1fU
+#define V_STATIC_XGPHY_PLL_DIVQ(x) ((x) << S_STATIC_XGPHY_PLL_DIVQ)
+#define G_STATIC_XGPHY_PLL_DIVQ(x) (((x) >> S_STATIC_XGPHY_PLL_DIVQ) & M_STATIC_XGPHY_PLL_DIVQ)
+
+#define S_STATIC_XGPHY_PLL_DIVFI 8
+#define M_STATIC_XGPHY_PLL_DIVFI 0x1ffU
+#define V_STATIC_XGPHY_PLL_DIVFI(x) ((x) << S_STATIC_XGPHY_PLL_DIVFI)
+#define G_STATIC_XGPHY_PLL_DIVFI(x) (((x) >> S_STATIC_XGPHY_PLL_DIVFI) & M_STATIC_XGPHY_PLL_DIVFI)
+
+#define S_STATIC_XGPHY_PLL_DIVR 2
+#define M_STATIC_XGPHY_PLL_DIVR 0x3fU
+#define V_STATIC_XGPHY_PLL_DIVR(x) ((x) << S_STATIC_XGPHY_PLL_DIVR)
+#define G_STATIC_XGPHY_PLL_DIVR(x) (((x) >> S_STATIC_XGPHY_PLL_DIVR) & M_STATIC_XGPHY_PLL_DIVR)
+
+#define S_STATIC_XGPHY_PLL_BYPASS 1
+#define V_STATIC_XGPHY_PLL_BYPASS(x) ((x) << S_STATIC_XGPHY_PLL_BYPASS)
+#define F_STATIC_XGPHY_PLL_BYPASS V_STATIC_XGPHY_PLL_BYPASS(1U)
+
+#define S_STATIC_XGPHY_PLL_RESET 0
+#define V_STATIC_XGPHY_PLL_RESET(x) ((x) << S_STATIC_XGPHY_PLL_RESET)
+#define F_STATIC_XGPHY_PLL_RESET V_STATIC_XGPHY_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF4 0x60dc
#define A_DBG_STATIC_C_PLL_CONF4 0x60dc
+#define A_DBG_STATIC_XGPHY_PLL_CONF2 0x60dc
+
+#define S_STATIC_XGPHY_PLL_SSMF 5
+#define M_STATIC_XGPHY_PLL_SSMF 0xfU
+#define V_STATIC_XGPHY_PLL_SSMF(x) ((x) << S_STATIC_XGPHY_PLL_SSMF)
+#define G_STATIC_XGPHY_PLL_SSMF(x) (((x) >> S_STATIC_XGPHY_PLL_SSMF) & M_STATIC_XGPHY_PLL_SSMF)
+
+#define S_STATIC_XGPHY_PLL_SSMD 2
+#define M_STATIC_XGPHY_PLL_SSMD 0x7U
+#define V_STATIC_XGPHY_PLL_SSMD(x) ((x) << S_STATIC_XGPHY_PLL_SSMD)
+#define G_STATIC_XGPHY_PLL_SSMD(x) (((x) >> S_STATIC_XGPHY_PLL_SSMD) & M_STATIC_XGPHY_PLL_SSMD)
+
+#define S_STATIC_XGPHY_PLL_SSDS 1
+#define V_STATIC_XGPHY_PLL_SSDS(x) ((x) << S_STATIC_XGPHY_PLL_SSDS)
+#define F_STATIC_XGPHY_PLL_SSDS V_STATIC_XGPHY_PLL_SSDS(1U)
+
+#define S_STATIC_XGPHY_PLL_SSE 0
+#define V_STATIC_XGPHY_PLL_SSE(x) ((x) << S_STATIC_XGPHY_PLL_SSE)
+#define F_STATIC_XGPHY_PLL_SSE V_STATIC_XGPHY_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF5 0x60e0
#define S_T5_STATIC_C_PLL_VCVTUNE 22
@@ -13140,6 +16583,40 @@
#define V_T6_STATIC_C_PLL_MULT(x) ((x) << S_T6_STATIC_C_PLL_MULT)
#define G_T6_STATIC_C_PLL_MULT(x) (((x) >> S_T6_STATIC_C_PLL_MULT) & M_T6_STATIC_C_PLL_MULT)
+#define A_DBG_STATIC_XGPBUS_PLL_CONF1 0x60e0
+
+#define S_STATIC_XGPBUS_SWRST_ 25
+#define V_STATIC_XGPBUS_SWRST_(x) ((x) << S_STATIC_XGPBUS_SWRST_)
+#define F_STATIC_XGPBUS_SWRST_ V_STATIC_XGPBUS_SWRST_(1U)
+
+#define S_STATIC_XGPBUS_PLL_RANGE 22
+#define M_STATIC_XGPBUS_PLL_RANGE 0x7U
+#define V_STATIC_XGPBUS_PLL_RANGE(x) ((x) << S_STATIC_XGPBUS_PLL_RANGE)
+#define G_STATIC_XGPBUS_PLL_RANGE(x) (((x) >> S_STATIC_XGPBUS_PLL_RANGE) & M_STATIC_XGPBUS_PLL_RANGE)
+
+#define S_STATIC_XGPBUS_PLL_DIVQ 17
+#define M_STATIC_XGPBUS_PLL_DIVQ 0x1fU
+#define V_STATIC_XGPBUS_PLL_DIVQ(x) ((x) << S_STATIC_XGPBUS_PLL_DIVQ)
+#define G_STATIC_XGPBUS_PLL_DIVQ(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVQ) & M_STATIC_XGPBUS_PLL_DIVQ)
+
+#define S_STATIC_XGPBUS_PLL_DIVFI 8
+#define M_STATIC_XGPBUS_PLL_DIVFI 0x1ffU
+#define V_STATIC_XGPBUS_PLL_DIVFI(x) ((x) << S_STATIC_XGPBUS_PLL_DIVFI)
+#define G_STATIC_XGPBUS_PLL_DIVFI(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVFI) & M_STATIC_XGPBUS_PLL_DIVFI)
+
+#define S_STATIC_XGPBUS_PLL_DIVR 2
+#define M_STATIC_XGPBUS_PLL_DIVR 0x3fU
+#define V_STATIC_XGPBUS_PLL_DIVR(x) ((x) << S_STATIC_XGPBUS_PLL_DIVR)
+#define G_STATIC_XGPBUS_PLL_DIVR(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVR) & M_STATIC_XGPBUS_PLL_DIVR)
+
+#define S_STATIC_XGPBUS_PLL_BYPASS 1
+#define V_STATIC_XGPBUS_PLL_BYPASS(x) ((x) << S_STATIC_XGPBUS_PLL_BYPASS)
+#define F_STATIC_XGPBUS_PLL_BYPASS V_STATIC_XGPBUS_PLL_BYPASS(1U)
+
+#define S_STATIC_XGPBUS_PLL_RESET 0
+#define V_STATIC_XGPBUS_PLL_RESET(x) ((x) << S_STATIC_XGPBUS_PLL_RESET)
+#define F_STATIC_XGPBUS_PLL_RESET V_STATIC_XGPBUS_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF1 0x60e4
#define S_T5_STATIC_U_PLL_MULTFRAC 8
@@ -13164,6 +16641,26 @@
#define V_STATIC_U_PLL_FFSLEWRATE(x) ((x) << S_STATIC_U_PLL_FFSLEWRATE)
#define G_STATIC_U_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_U_PLL_FFSLEWRATE) & M_STATIC_U_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_XGPBUS_PLL_CONF2 0x60e4
+
+#define S_STATIC_XGPBUS_PLL_SSMF 5
+#define M_STATIC_XGPBUS_PLL_SSMF 0xfU
+#define V_STATIC_XGPBUS_PLL_SSMF(x) ((x) << S_STATIC_XGPBUS_PLL_SSMF)
+#define G_STATIC_XGPBUS_PLL_SSMF(x) (((x) >> S_STATIC_XGPBUS_PLL_SSMF) & M_STATIC_XGPBUS_PLL_SSMF)
+
+#define S_STATIC_XGPBUS_PLL_SSMD 2
+#define M_STATIC_XGPBUS_PLL_SSMD 0x7U
+#define V_STATIC_XGPBUS_PLL_SSMD(x) ((x) << S_STATIC_XGPBUS_PLL_SSMD)
+#define G_STATIC_XGPBUS_PLL_SSMD(x) (((x) >> S_STATIC_XGPBUS_PLL_SSMD) & M_STATIC_XGPBUS_PLL_SSMD)
+
+#define S_STATIC_XGPBUS_PLL_SSDS 1
+#define V_STATIC_XGPBUS_PLL_SSDS(x) ((x) << S_STATIC_XGPBUS_PLL_SSDS)
+#define F_STATIC_XGPBUS_PLL_SSDS V_STATIC_XGPBUS_PLL_SSDS(1U)
+
+#define S_STATIC_XGPBUS_PLL_SSE 0
+#define V_STATIC_XGPBUS_PLL_SSE(x) ((x) << S_STATIC_XGPBUS_PLL_SSE)
+#define F_STATIC_XGPBUS_PLL_SSE V_STATIC_XGPBUS_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF2 0x60e8
#define S_T5_STATIC_U_PLL_DCO_BYPASS 23
@@ -13246,6 +16743,36 @@
#define V_STATIC_U_PLL_LOCKTUNE(x) ((x) << S_STATIC_U_PLL_LOCKTUNE)
#define G_STATIC_U_PLL_LOCKTUNE(x) (((x) >> S_STATIC_U_PLL_LOCKTUNE) & M_STATIC_U_PLL_LOCKTUNE)
+#define A_DBG_STATIC_M1_PLL_CONF1 0x60e8
+
+#define S_STATIC_M1_PLL_RANGE 22
+#define M_STATIC_M1_PLL_RANGE 0x7U
+#define V_STATIC_M1_PLL_RANGE(x) ((x) << S_STATIC_M1_PLL_RANGE)
+#define G_STATIC_M1_PLL_RANGE(x) (((x) >> S_STATIC_M1_PLL_RANGE) & M_STATIC_M1_PLL_RANGE)
+
+#define S_STATIC_M1_PLL_DIVQ 17
+#define M_STATIC_M1_PLL_DIVQ 0x1fU
+#define V_STATIC_M1_PLL_DIVQ(x) ((x) << S_STATIC_M1_PLL_DIVQ)
+#define G_STATIC_M1_PLL_DIVQ(x) (((x) >> S_STATIC_M1_PLL_DIVQ) & M_STATIC_M1_PLL_DIVQ)
+
+#define S_STATIC_M1_PLL_DIVFI 8
+#define M_STATIC_M1_PLL_DIVFI 0x1ffU
+#define V_STATIC_M1_PLL_DIVFI(x) ((x) << S_STATIC_M1_PLL_DIVFI)
+#define G_STATIC_M1_PLL_DIVFI(x) (((x) >> S_STATIC_M1_PLL_DIVFI) & M_STATIC_M1_PLL_DIVFI)
+
+#define S_STATIC_M1_PLL_DIVR 2
+#define M_STATIC_M1_PLL_DIVR 0x3fU
+#define V_STATIC_M1_PLL_DIVR(x) ((x) << S_STATIC_M1_PLL_DIVR)
+#define G_STATIC_M1_PLL_DIVR(x) (((x) >> S_STATIC_M1_PLL_DIVR) & M_STATIC_M1_PLL_DIVR)
+
+#define S_STATIC_M1_PLL_BYPASS 1
+#define V_STATIC_M1_PLL_BYPASS(x) ((x) << S_STATIC_M1_PLL_BYPASS)
+#define F_STATIC_M1_PLL_BYPASS V_STATIC_M1_PLL_BYPASS(1U)
+
+#define S_STATIC_M1_PLL_RESET 0
+#define V_STATIC_M1_PLL_RESET(x) ((x) << S_STATIC_M1_PLL_RESET)
+#define F_STATIC_M1_PLL_RESET V_STATIC_M1_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF3 0x60ec
#define S_T5_STATIC_U_PLL_MULTPRE 30
@@ -13309,6 +16836,26 @@
#define V_T6_STATIC_U_PLL_RANGEA(x) ((x) << S_T6_STATIC_U_PLL_RANGEA)
#define G_T6_STATIC_U_PLL_RANGEA(x) (((x) >> S_T6_STATIC_U_PLL_RANGEA) & M_T6_STATIC_U_PLL_RANGEA)
+#define A_DBG_STATIC_M1_PLL_CONF2 0x60ec
+
+#define S_STATIC_M1_PLL_SSMF 5
+#define M_STATIC_M1_PLL_SSMF 0xfU
+#define V_STATIC_M1_PLL_SSMF(x) ((x) << S_STATIC_M1_PLL_SSMF)
+#define G_STATIC_M1_PLL_SSMF(x) (((x) >> S_STATIC_M1_PLL_SSMF) & M_STATIC_M1_PLL_SSMF)
+
+#define S_STATIC_M1_PLL_SSMD 2
+#define M_STATIC_M1_PLL_SSMD 0x7U
+#define V_STATIC_M1_PLL_SSMD(x) ((x) << S_STATIC_M1_PLL_SSMD)
+#define G_STATIC_M1_PLL_SSMD(x) (((x) >> S_STATIC_M1_PLL_SSMD) & M_STATIC_M1_PLL_SSMD)
+
+#define S_STATIC_M1_PLL_SSDS 1
+#define V_STATIC_M1_PLL_SSDS(x) ((x) << S_STATIC_M1_PLL_SSDS)
+#define F_STATIC_M1_PLL_SSDS V_STATIC_M1_PLL_SSDS(1U)
+
+#define S_STATIC_M1_PLL_SSE 0
+#define V_STATIC_M1_PLL_SSE(x) ((x) << S_STATIC_M1_PLL_SSE)
+#define F_STATIC_M1_PLL_SSE V_STATIC_M1_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF4 0x60f0
#define A_DBG_STATIC_U_PLL_CONF4 0x60f0
#define A_DBG_T5_STATIC_U_PLL_CONF5 0x60f4
@@ -13557,6 +17104,104 @@
#define V_GPIO19_OUT_VAL(x) ((x) << S_GPIO19_OUT_VAL)
#define F_GPIO19_OUT_VAL V_GPIO19_OUT_VAL(1U)
+#define A_DBG_GPIO_OEN 0x6100
+
+#define S_GPIO23_OEN 23
+#define V_GPIO23_OEN(x) ((x) << S_GPIO23_OEN)
+#define F_GPIO23_OEN V_GPIO23_OEN(1U)
+
+#define S_GPIO22_OEN 22
+#define V_GPIO22_OEN(x) ((x) << S_GPIO22_OEN)
+#define F_GPIO22_OEN V_GPIO22_OEN(1U)
+
+#define S_GPIO21_OEN 21
+#define V_GPIO21_OEN(x) ((x) << S_GPIO21_OEN)
+#define F_GPIO21_OEN V_GPIO21_OEN(1U)
+
+#define S_GPIO20_OEN 20
+#define V_GPIO20_OEN(x) ((x) << S_GPIO20_OEN)
+#define F_GPIO20_OEN V_GPIO20_OEN(1U)
+
+#define S_T7_GPIO19_OEN 19
+#define V_T7_GPIO19_OEN(x) ((x) << S_T7_GPIO19_OEN)
+#define F_T7_GPIO19_OEN V_T7_GPIO19_OEN(1U)
+
+#define S_T7_GPIO18_OEN 18
+#define V_T7_GPIO18_OEN(x) ((x) << S_T7_GPIO18_OEN)
+#define F_T7_GPIO18_OEN V_T7_GPIO18_OEN(1U)
+
+#define S_T7_GPIO17_OEN 17
+#define V_T7_GPIO17_OEN(x) ((x) << S_T7_GPIO17_OEN)
+#define F_T7_GPIO17_OEN V_T7_GPIO17_OEN(1U)
+
+#define S_T7_GPIO16_OEN 16
+#define V_T7_GPIO16_OEN(x) ((x) << S_T7_GPIO16_OEN)
+#define F_T7_GPIO16_OEN V_T7_GPIO16_OEN(1U)
+
+#define S_T7_GPIO15_OEN 15
+#define V_T7_GPIO15_OEN(x) ((x) << S_T7_GPIO15_OEN)
+#define F_T7_GPIO15_OEN V_T7_GPIO15_OEN(1U)
+
+#define S_T7_GPIO14_OEN 14
+#define V_T7_GPIO14_OEN(x) ((x) << S_T7_GPIO14_OEN)
+#define F_T7_GPIO14_OEN V_T7_GPIO14_OEN(1U)
+
+#define S_T7_GPIO13_OEN 13
+#define V_T7_GPIO13_OEN(x) ((x) << S_T7_GPIO13_OEN)
+#define F_T7_GPIO13_OEN V_T7_GPIO13_OEN(1U)
+
+#define S_T7_GPIO12_OEN 12
+#define V_T7_GPIO12_OEN(x) ((x) << S_T7_GPIO12_OEN)
+#define F_T7_GPIO12_OEN V_T7_GPIO12_OEN(1U)
+
+#define S_T7_GPIO11_OEN 11
+#define V_T7_GPIO11_OEN(x) ((x) << S_T7_GPIO11_OEN)
+#define F_T7_GPIO11_OEN V_T7_GPIO11_OEN(1U)
+
+#define S_T7_GPIO10_OEN 10
+#define V_T7_GPIO10_OEN(x) ((x) << S_T7_GPIO10_OEN)
+#define F_T7_GPIO10_OEN V_T7_GPIO10_OEN(1U)
+
+#define S_T7_GPIO9_OEN 9
+#define V_T7_GPIO9_OEN(x) ((x) << S_T7_GPIO9_OEN)
+#define F_T7_GPIO9_OEN V_T7_GPIO9_OEN(1U)
+
+#define S_T7_GPIO8_OEN 8
+#define V_T7_GPIO8_OEN(x) ((x) << S_T7_GPIO8_OEN)
+#define F_T7_GPIO8_OEN V_T7_GPIO8_OEN(1U)
+
+#define S_T7_GPIO7_OEN 7
+#define V_T7_GPIO7_OEN(x) ((x) << S_T7_GPIO7_OEN)
+#define F_T7_GPIO7_OEN V_T7_GPIO7_OEN(1U)
+
+#define S_T7_GPIO6_OEN 6
+#define V_T7_GPIO6_OEN(x) ((x) << S_T7_GPIO6_OEN)
+#define F_T7_GPIO6_OEN V_T7_GPIO6_OEN(1U)
+
+#define S_T7_GPIO5_OEN 5
+#define V_T7_GPIO5_OEN(x) ((x) << S_T7_GPIO5_OEN)
+#define F_T7_GPIO5_OEN V_T7_GPIO5_OEN(1U)
+
+#define S_T7_GPIO4_OEN 4
+#define V_T7_GPIO4_OEN(x) ((x) << S_T7_GPIO4_OEN)
+#define F_T7_GPIO4_OEN V_T7_GPIO4_OEN(1U)
+
+#define S_T7_GPIO3_OEN 3
+#define V_T7_GPIO3_OEN(x) ((x) << S_T7_GPIO3_OEN)
+#define F_T7_GPIO3_OEN V_T7_GPIO3_OEN(1U)
+
+#define S_T7_GPIO2_OEN 2
+#define V_T7_GPIO2_OEN(x) ((x) << S_T7_GPIO2_OEN)
+#define F_T7_GPIO2_OEN V_T7_GPIO2_OEN(1U)
+
+#define S_T7_GPIO1_OEN 1
+#define V_T7_GPIO1_OEN(x) ((x) << S_T7_GPIO1_OEN)
+#define F_T7_GPIO1_OEN V_T7_GPIO1_OEN(1U)
+
+#define S_T7_GPIO0_OEN 0
+#define V_T7_GPIO0_OEN(x) ((x) << S_T7_GPIO0_OEN)
+#define F_T7_GPIO0_OEN V_T7_GPIO0_OEN(1U)
+
#define A_DBG_PVT_REG_UPDATE_CTL 0x6104
#define S_FAST_UPDATE 8
@@ -13605,6 +17250,104 @@
#define V_GPIO16_IN(x) ((x) << S_GPIO16_IN)
#define F_GPIO16_IN V_GPIO16_IN(1U)
+#define A_DBG_GPIO_CHG_DET 0x6104
+
+#define S_GPIO23_CHG_DET 23
+#define V_GPIO23_CHG_DET(x) ((x) << S_GPIO23_CHG_DET)
+#define F_GPIO23_CHG_DET V_GPIO23_CHG_DET(1U)
+
+#define S_GPIO22_CHG_DET 22
+#define V_GPIO22_CHG_DET(x) ((x) << S_GPIO22_CHG_DET)
+#define F_GPIO22_CHG_DET V_GPIO22_CHG_DET(1U)
+
+#define S_GPIO21_CHG_DET 21
+#define V_GPIO21_CHG_DET(x) ((x) << S_GPIO21_CHG_DET)
+#define F_GPIO21_CHG_DET V_GPIO21_CHG_DET(1U)
+
+#define S_GPIO20_CHG_DET 20
+#define V_GPIO20_CHG_DET(x) ((x) << S_GPIO20_CHG_DET)
+#define F_GPIO20_CHG_DET V_GPIO20_CHG_DET(1U)
+
+#define S_T7_GPIO19_CHG_DET 19
+#define V_T7_GPIO19_CHG_DET(x) ((x) << S_T7_GPIO19_CHG_DET)
+#define F_T7_GPIO19_CHG_DET V_T7_GPIO19_CHG_DET(1U)
+
+#define S_T7_GPIO18_CHG_DET 18
+#define V_T7_GPIO18_CHG_DET(x) ((x) << S_T7_GPIO18_CHG_DET)
+#define F_T7_GPIO18_CHG_DET V_T7_GPIO18_CHG_DET(1U)
+
+#define S_T7_GPIO17_CHG_DET 17
+#define V_T7_GPIO17_CHG_DET(x) ((x) << S_T7_GPIO17_CHG_DET)
+#define F_T7_GPIO17_CHG_DET V_T7_GPIO17_CHG_DET(1U)
+
+#define S_T7_GPIO16_CHG_DET 16
+#define V_T7_GPIO16_CHG_DET(x) ((x) << S_T7_GPIO16_CHG_DET)
+#define F_T7_GPIO16_CHG_DET V_T7_GPIO16_CHG_DET(1U)
+
+#define S_T7_GPIO15_CHG_DET 15
+#define V_T7_GPIO15_CHG_DET(x) ((x) << S_T7_GPIO15_CHG_DET)
+#define F_T7_GPIO15_CHG_DET V_T7_GPIO15_CHG_DET(1U)
+
+#define S_T7_GPIO14_CHG_DET 14
+#define V_T7_GPIO14_CHG_DET(x) ((x) << S_T7_GPIO14_CHG_DET)
+#define F_T7_GPIO14_CHG_DET V_T7_GPIO14_CHG_DET(1U)
+
+#define S_T7_GPIO13_CHG_DET 13
+#define V_T7_GPIO13_CHG_DET(x) ((x) << S_T7_GPIO13_CHG_DET)
+#define F_T7_GPIO13_CHG_DET V_T7_GPIO13_CHG_DET(1U)
+
+#define S_T7_GPIO12_CHG_DET 12
+#define V_T7_GPIO12_CHG_DET(x) ((x) << S_T7_GPIO12_CHG_DET)
+#define F_T7_GPIO12_CHG_DET V_T7_GPIO12_CHG_DET(1U)
+
+#define S_T7_GPIO11_CHG_DET 11
+#define V_T7_GPIO11_CHG_DET(x) ((x) << S_T7_GPIO11_CHG_DET)
+#define F_T7_GPIO11_CHG_DET V_T7_GPIO11_CHG_DET(1U)
+
+#define S_T7_GPIO10_CHG_DET 10
+#define V_T7_GPIO10_CHG_DET(x) ((x) << S_T7_GPIO10_CHG_DET)
+#define F_T7_GPIO10_CHG_DET V_T7_GPIO10_CHG_DET(1U)
+
+#define S_T7_GPIO9_CHG_DET 9
+#define V_T7_GPIO9_CHG_DET(x) ((x) << S_T7_GPIO9_CHG_DET)
+#define F_T7_GPIO9_CHG_DET V_T7_GPIO9_CHG_DET(1U)
+
+#define S_T7_GPIO8_CHG_DET 8
+#define V_T7_GPIO8_CHG_DET(x) ((x) << S_T7_GPIO8_CHG_DET)
+#define F_T7_GPIO8_CHG_DET V_T7_GPIO8_CHG_DET(1U)
+
+#define S_T7_GPIO7_CHG_DET 7
+#define V_T7_GPIO7_CHG_DET(x) ((x) << S_T7_GPIO7_CHG_DET)
+#define F_T7_GPIO7_CHG_DET V_T7_GPIO7_CHG_DET(1U)
+
+#define S_T7_GPIO6_CHG_DET 6
+#define V_T7_GPIO6_CHG_DET(x) ((x) << S_T7_GPIO6_CHG_DET)
+#define F_T7_GPIO6_CHG_DET V_T7_GPIO6_CHG_DET(1U)
+
+#define S_T7_GPIO5_CHG_DET 5
+#define V_T7_GPIO5_CHG_DET(x) ((x) << S_T7_GPIO5_CHG_DET)
+#define F_T7_GPIO5_CHG_DET V_T7_GPIO5_CHG_DET(1U)
+
+#define S_T7_GPIO4_CHG_DET 4
+#define V_T7_GPIO4_CHG_DET(x) ((x) << S_T7_GPIO4_CHG_DET)
+#define F_T7_GPIO4_CHG_DET V_T7_GPIO4_CHG_DET(1U)
+
+#define S_T7_GPIO3_CHG_DET 3
+#define V_T7_GPIO3_CHG_DET(x) ((x) << S_T7_GPIO3_CHG_DET)
+#define F_T7_GPIO3_CHG_DET V_T7_GPIO3_CHG_DET(1U)
+
+#define S_T7_GPIO2_CHG_DET 2
+#define V_T7_GPIO2_CHG_DET(x) ((x) << S_T7_GPIO2_CHG_DET)
+#define F_T7_GPIO2_CHG_DET V_T7_GPIO2_CHG_DET(1U)
+
+#define S_T7_GPIO1_CHG_DET 1
+#define V_T7_GPIO1_CHG_DET(x) ((x) << S_T7_GPIO1_CHG_DET)
+#define F_T7_GPIO1_CHG_DET V_T7_GPIO1_CHG_DET(1U)
+
+#define S_T7_GPIO0_CHG_DET 0
+#define V_T7_GPIO0_CHG_DET(x) ((x) << S_T7_GPIO0_CHG_DET)
+#define F_T7_GPIO0_CHG_DET V_T7_GPIO0_CHG_DET(1U)
+
#define A_DBG_PVT_REG_LAST_MEASUREMENT 0x6108
#define S_LAST_MEASUREMENT_SELECT 8
@@ -13964,6 +17707,22 @@
#define V_GPIO0_PE_EN(x) ((x) << S_GPIO0_PE_EN)
#define F_GPIO0_PE_EN V_GPIO0_PE_EN(1U)
+#define S_GPIO23_PE_EN 23
+#define V_GPIO23_PE_EN(x) ((x) << S_GPIO23_PE_EN)
+#define F_GPIO23_PE_EN V_GPIO23_PE_EN(1U)
+
+#define S_GPIO22_PE_EN 22
+#define V_GPIO22_PE_EN(x) ((x) << S_GPIO22_PE_EN)
+#define F_GPIO22_PE_EN V_GPIO22_PE_EN(1U)
+
+#define S_GPIO21_PE_EN 21
+#define V_GPIO21_PE_EN(x) ((x) << S_GPIO21_PE_EN)
+#define F_GPIO21_PE_EN V_GPIO21_PE_EN(1U)
+
+#define S_GPIO20_PE_EN 20
+#define V_GPIO20_PE_EN(x) ((x) << S_GPIO20_PE_EN)
+#define F_GPIO20_PE_EN V_GPIO20_PE_EN(1U)
+
#define A_DBG_PVT_REG_THRESHOLD 0x611c
#define S_PVT_CALIBRATION_DONE 8
@@ -14084,6 +17843,22 @@
#define V_GPIO0_PS_EN(x) ((x) << S_GPIO0_PS_EN)
#define F_GPIO0_PS_EN V_GPIO0_PS_EN(1U)
+#define S_GPIO23_PS_EN 23
+#define V_GPIO23_PS_EN(x) ((x) << S_GPIO23_PS_EN)
+#define F_GPIO23_PS_EN V_GPIO23_PS_EN(1U)
+
+#define S_GPIO22_PS_EN 22
+#define V_GPIO22_PS_EN(x) ((x) << S_GPIO22_PS_EN)
+#define F_GPIO22_PS_EN V_GPIO22_PS_EN(1U)
+
+#define S_GPIO21_PS_EN 21
+#define V_GPIO21_PS_EN(x) ((x) << S_GPIO21_PS_EN)
+#define F_GPIO21_PS_EN V_GPIO21_PS_EN(1U)
+
+#define S_GPIO20_PS_EN 20
+#define V_GPIO20_PS_EN(x) ((x) << S_GPIO20_PS_EN)
+#define F_GPIO20_PS_EN V_GPIO20_PS_EN(1U)
+
#define A_DBG_PVT_REG_IN_TERMP 0x6120
#define S_REG_IN_TERMP_B 4
@@ -14254,6 +18029,17 @@
#define V_STATIC_U_PLL_VREGTUNE(x) ((x) << S_STATIC_U_PLL_VREGTUNE)
#define G_STATIC_U_PLL_VREGTUNE(x) (((x) >> S_STATIC_U_PLL_VREGTUNE) & M_STATIC_U_PLL_VREGTUNE)
+#define A_DBG_STATIC_PLL_LOCK_WAIT_CONF 0x6150
+
+#define S_STATIC_WAIT_LOCK 24
+#define V_STATIC_WAIT_LOCK(x) ((x) << S_STATIC_WAIT_LOCK)
+#define F_STATIC_WAIT_LOCK V_STATIC_WAIT_LOCK(1U)
+
+#define S_STATIC_LOCK_WAIT_TIME 0
+#define M_STATIC_LOCK_WAIT_TIME 0xffffffU
+#define V_STATIC_LOCK_WAIT_TIME(x) ((x) << S_STATIC_LOCK_WAIT_TIME)
+#define G_STATIC_LOCK_WAIT_TIME(x) (((x) >> S_STATIC_LOCK_WAIT_TIME) & M_STATIC_LOCK_WAIT_TIME)
+
#define A_DBG_STATIC_C_PLL_CONF6 0x6154
#define S_STATIC_C_PLL_VREGTUNE 0
@@ -14303,13 +18089,274 @@
#define A_DBG_CUST_EFUSE_BYTE24_27 0x6178
#define A_DBG_CUST_EFUSE_BYTE28_31 0x617c
#define A_DBG_CUST_EFUSE_BYTE32_35 0x6180
+#define A_DBG_GPIO_INT_ENABLE 0x6180
+
+#define S_GPIO23 23
+#define V_GPIO23(x) ((x) << S_GPIO23)
+#define F_GPIO23 V_GPIO23(1U)
+
+#define S_GPIO22 22
+#define V_GPIO22(x) ((x) << S_GPIO22)
+#define F_GPIO22 V_GPIO22(1U)
+
+#define S_GPIO21 21
+#define V_GPIO21(x) ((x) << S_GPIO21)
+#define F_GPIO21 V_GPIO21(1U)
+
+#define S_GPIO20 20
+#define V_GPIO20(x) ((x) << S_GPIO20)
+#define F_GPIO20 V_GPIO20(1U)
+
+#define S_T7_GPIO19 19
+#define V_T7_GPIO19(x) ((x) << S_T7_GPIO19)
+#define F_T7_GPIO19 V_T7_GPIO19(1U)
+
+#define S_T7_GPIO18 18
+#define V_T7_GPIO18(x) ((x) << S_T7_GPIO18)
+#define F_T7_GPIO18 V_T7_GPIO18(1U)
+
+#define S_T7_GPIO17 17
+#define V_T7_GPIO17(x) ((x) << S_T7_GPIO17)
+#define F_T7_GPIO17 V_T7_GPIO17(1U)
+
+#define S_T7_GPIO16 16
+#define V_T7_GPIO16(x) ((x) << S_T7_GPIO16)
+#define F_T7_GPIO16 V_T7_GPIO16(1U)
+
#define A_DBG_CUST_EFUSE_BYTE36_39 0x6184
+#define A_DBG_GPIO_INT_CAUSE 0x6184
#define A_DBG_CUST_EFUSE_BYTE40_43 0x6188
+#define A_T7_DBG_GPIO_ACT_LOW 0x6188
+
+#define S_GPIO23_ACT_LOW 23
+#define V_GPIO23_ACT_LOW(x) ((x) << S_GPIO23_ACT_LOW)
+#define F_GPIO23_ACT_LOW V_GPIO23_ACT_LOW(1U)
+
+#define S_GPIO22_ACT_LOW 22
+#define V_GPIO22_ACT_LOW(x) ((x) << S_GPIO22_ACT_LOW)
+#define F_GPIO22_ACT_LOW V_GPIO22_ACT_LOW(1U)
+
+#define S_GPIO21_ACT_LOW 21
+#define V_GPIO21_ACT_LOW(x) ((x) << S_GPIO21_ACT_LOW)
+#define F_GPIO21_ACT_LOW V_GPIO21_ACT_LOW(1U)
+
+#define S_GPIO20_ACT_LOW 20
+#define V_GPIO20_ACT_LOW(x) ((x) << S_GPIO20_ACT_LOW)
+#define F_GPIO20_ACT_LOW V_GPIO20_ACT_LOW(1U)
+
+#define S_T7_GPIO19_ACT_LOW 19
+#define V_T7_GPIO19_ACT_LOW(x) ((x) << S_T7_GPIO19_ACT_LOW)
+#define F_T7_GPIO19_ACT_LOW V_T7_GPIO19_ACT_LOW(1U)
+
+#define S_T7_GPIO18_ACT_LOW 18
+#define V_T7_GPIO18_ACT_LOW(x) ((x) << S_T7_GPIO18_ACT_LOW)
+#define F_T7_GPIO18_ACT_LOW V_T7_GPIO18_ACT_LOW(1U)
+
+#define S_T7_GPIO17_ACT_LOW 17
+#define V_T7_GPIO17_ACT_LOW(x) ((x) << S_T7_GPIO17_ACT_LOW)
+#define F_T7_GPIO17_ACT_LOW V_T7_GPIO17_ACT_LOW(1U)
+
+#define S_T7_GPIO16_ACT_LOW 16
+#define V_T7_GPIO16_ACT_LOW(x) ((x) << S_T7_GPIO16_ACT_LOW)
+#define F_T7_GPIO16_ACT_LOW V_T7_GPIO16_ACT_LOW(1U)
+
#define A_DBG_CUST_EFUSE_BYTE44_47 0x618c
+#define A_DBG_DDR_CAL 0x618c
+
+#define S_CAL_ENDC 9
+#define V_CAL_ENDC(x) ((x) << S_CAL_ENDC)
+#define F_CAL_ENDC V_CAL_ENDC(1U)
+
+#define S_CAL_MODE 8
+#define V_CAL_MODE(x) ((x) << S_CAL_MODE)
+#define F_CAL_MODE V_CAL_MODE(1U)
+
+#define S_CAL_REFSEL 7
+#define V_CAL_REFSEL(x) ((x) << S_CAL_REFSEL)
+#define F_CAL_REFSEL V_CAL_REFSEL(1U)
+
+#define S_PD 6
+#define V_PD(x) ((x) << S_PD)
+#define F_PD V_PD(1U)
+
+#define S_CAL_RST 5
+#define V_CAL_RST(x) ((x) << S_CAL_RST)
+#define F_CAL_RST V_CAL_RST(1U)
+
+#define S_CAL_READ 4
+#define V_CAL_READ(x) ((x) << S_CAL_READ)
+#define F_CAL_READ V_CAL_READ(1U)
+
+#define S_CAL_SC 3
+#define V_CAL_SC(x) ((x) << S_CAL_SC)
+#define F_CAL_SC V_CAL_SC(1U)
+
+#define S_CAL_LC 2
+#define V_CAL_LC(x) ((x) << S_CAL_LC)
+#define F_CAL_LC V_CAL_LC(1U)
+
+#define S_CAL_CCAL 1
+#define V_CAL_CCAL(x) ((x) << S_CAL_CCAL)
+#define F_CAL_CCAL V_CAL_CCAL(1U)
+
+#define S_CAL_RES 0
+#define V_CAL_RES(x) ((x) << S_CAL_RES)
+#define F_CAL_RES V_CAL_RES(1U)
+
#define A_DBG_CUST_EFUSE_BYTE48_51 0x6190
+#define A_DBG_EFUSE_CTL_0 0x6190
+
+#define S_EFUSE_CSB 31
+#define V_EFUSE_CSB(x) ((x) << S_EFUSE_CSB)
+#define F_EFUSE_CSB V_EFUSE_CSB(1U)
+
+#define S_EFUSE_STROBE 30
+#define V_EFUSE_STROBE(x) ((x) << S_EFUSE_STROBE)
+#define F_EFUSE_STROBE V_EFUSE_STROBE(1U)
+
+#define S_EFUSE_LOAD 29
+#define V_EFUSE_LOAD(x) ((x) << S_EFUSE_LOAD)
+#define F_EFUSE_LOAD V_EFUSE_LOAD(1U)
+
+#define S_EFUSE_PGENB 28
+#define V_EFUSE_PGENB(x) ((x) << S_EFUSE_PGENB)
+#define F_EFUSE_PGENB V_EFUSE_PGENB(1U)
+
+#define S_EFUSE_PS 27
+#define V_EFUSE_PS(x) ((x) << S_EFUSE_PS)
+#define F_EFUSE_PS V_EFUSE_PS(1U)
+
+#define S_EFUSE_MR 26
+#define V_EFUSE_MR(x) ((x) << S_EFUSE_MR)
+#define F_EFUSE_MR V_EFUSE_MR(1U)
+
+#define S_EFUSE_PD 25
+#define V_EFUSE_PD(x) ((x) << S_EFUSE_PD)
+#define F_EFUSE_PD V_EFUSE_PD(1U)
+
+#define S_EFUSE_RWL 24
+#define V_EFUSE_RWL(x) ((x) << S_EFUSE_RWL)
+#define F_EFUSE_RWL V_EFUSE_RWL(1U)
+
+#define S_EFUSE_RSB 23
+#define V_EFUSE_RSB(x) ((x) << S_EFUSE_RSB)
+#define F_EFUSE_RSB V_EFUSE_RSB(1U)
+
+#define S_EFUSE_TRCS 22
+#define V_EFUSE_TRCS(x) ((x) << S_EFUSE_TRCS)
+#define F_EFUSE_TRCS V_EFUSE_TRCS(1U)
+
+#define S_EFUSE_AT 20
+#define M_EFUSE_AT 0x3U
+#define V_EFUSE_AT(x) ((x) << S_EFUSE_AT)
+#define G_EFUSE_AT(x) (((x) >> S_EFUSE_AT) & M_EFUSE_AT)
+
+#define S_EFUSE_RD_STATE 16
+#define M_EFUSE_RD_STATE 0xfU
+#define V_EFUSE_RD_STATE(x) ((x) << S_EFUSE_RD_STATE)
+#define G_EFUSE_RD_STATE(x) (((x) >> S_EFUSE_RD_STATE) & M_EFUSE_RD_STATE)
+
+#define S_EFUSE_BUSY 15
+#define V_EFUSE_BUSY(x) ((x) << S_EFUSE_BUSY)
+#define F_EFUSE_BUSY V_EFUSE_BUSY(1U)
+
+#define S_EFUSE_WR_RD 13
+#define M_EFUSE_WR_RD 0x3U
+#define V_EFUSE_WR_RD(x) ((x) << S_EFUSE_WR_RD)
+#define G_EFUSE_WR_RD(x) (((x) >> S_EFUSE_WR_RD) & M_EFUSE_WR_RD)
+
+#define S_EFUSE_A 0
+#define M_EFUSE_A 0x7ffU
+#define V_EFUSE_A(x) ((x) << S_EFUSE_A)
+#define G_EFUSE_A(x) (((x) >> S_EFUSE_A) & M_EFUSE_A)
+
#define A_DBG_CUST_EFUSE_BYTE52_55 0x6194
+#define A_DBG_EFUSE_CTL_1 0x6194
#define A_DBG_CUST_EFUSE_BYTE56_59 0x6198
+#define A_DBG_EFUSE_RD_CTL 0x6198
+
+#define S_EFUSE_RD_ID 6
+#define M_EFUSE_RD_ID 0x3U
+#define V_EFUSE_RD_ID(x) ((x) << S_EFUSE_RD_ID)
+#define G_EFUSE_RD_ID(x) (((x) >> S_EFUSE_RD_ID) & M_EFUSE_RD_ID)
+
+#define S_EFUSE_RD_ADDR 0
+#define M_EFUSE_RD_ADDR 0x3fU
+#define V_EFUSE_RD_ADDR(x) ((x) << S_EFUSE_RD_ADDR)
+#define G_EFUSE_RD_ADDR(x) (((x) >> S_EFUSE_RD_ADDR) & M_EFUSE_RD_ADDR)
+
#define A_DBG_CUST_EFUSE_BYTE60_63 0x619c
+#define A_DBG_EFUSE_RD_DATA 0x619c
+#define A_DBG_EFUSE_TIME_0 0x61a0
+
+#define S_EFUSE_TIME_1 16
+#define M_EFUSE_TIME_1 0xffffU
+#define V_EFUSE_TIME_1(x) ((x) << S_EFUSE_TIME_1)
+#define G_EFUSE_TIME_1(x) (((x) >> S_EFUSE_TIME_1) & M_EFUSE_TIME_1)
+
+#define S_EFUSE_TIME_0 0
+#define M_EFUSE_TIME_0 0xffffU
+#define V_EFUSE_TIME_0(x) ((x) << S_EFUSE_TIME_0)
+#define G_EFUSE_TIME_0(x) (((x) >> S_EFUSE_TIME_0) & M_EFUSE_TIME_0)
+
+#define A_DBG_EFUSE_TIME_1 0x61a4
+
+#define S_EFUSE_TIME_3 16
+#define M_EFUSE_TIME_3 0xffffU
+#define V_EFUSE_TIME_3(x) ((x) << S_EFUSE_TIME_3)
+#define G_EFUSE_TIME_3(x) (((x) >> S_EFUSE_TIME_3) & M_EFUSE_TIME_3)
+
+#define S_EFUSE_TIME_2 0
+#define M_EFUSE_TIME_2 0xffffU
+#define V_EFUSE_TIME_2(x) ((x) << S_EFUSE_TIME_2)
+#define G_EFUSE_TIME_2(x) (((x) >> S_EFUSE_TIME_2) & M_EFUSE_TIME_2)
+
+#define A_DBG_EFUSE_TIME_2 0x61a8
+
+#define S_EFUSE_TIME_5 16
+#define M_EFUSE_TIME_5 0xffffU
+#define V_EFUSE_TIME_5(x) ((x) << S_EFUSE_TIME_5)
+#define G_EFUSE_TIME_5(x) (((x) >> S_EFUSE_TIME_5) & M_EFUSE_TIME_5)
+
+#define S_EFUSE_TIME_4 0
+#define M_EFUSE_TIME_4 0xffffU
+#define V_EFUSE_TIME_4(x) ((x) << S_EFUSE_TIME_4)
+#define G_EFUSE_TIME_4(x) (((x) >> S_EFUSE_TIME_4) & M_EFUSE_TIME_4)
+
+#define A_DBG_EFUSE_TIME_3 0x61ac
+
+#define S_EFUSE_TIME_7 16
+#define M_EFUSE_TIME_7 0xffffU
+#define V_EFUSE_TIME_7(x) ((x) << S_EFUSE_TIME_7)
+#define G_EFUSE_TIME_7(x) (((x) >> S_EFUSE_TIME_7) & M_EFUSE_TIME_7)
+
+#define S_EFUSE_TIME_6 0
+#define M_EFUSE_TIME_6 0xffffU
+#define V_EFUSE_TIME_6(x) ((x) << S_EFUSE_TIME_6)
+#define G_EFUSE_TIME_6(x) (((x) >> S_EFUSE_TIME_6) & M_EFUSE_TIME_6)
+
+#define A_DBG_VREF_CTL 0x61b0
+
+#define S_VREF_SEL_1 15
+#define V_VREF_SEL_1(x) ((x) << S_VREF_SEL_1)
+#define F_VREF_SEL_1 V_VREF_SEL_1(1U)
+
+#define S_VREF_R_1 8
+#define M_VREF_R_1 0x7fU
+#define V_VREF_R_1(x) ((x) << S_VREF_R_1)
+#define G_VREF_R_1(x) (((x) >> S_VREF_R_1) & M_VREF_R_1)
+
+#define S_VREF_SEL_0 7
+#define V_VREF_SEL_0(x) ((x) << S_VREF_SEL_0)
+#define F_VREF_SEL_0 V_VREF_SEL_0(1U)
+
+#define S_VREF_R_0 0
+#define M_VREF_R_0 0x7fU
+#define V_VREF_R_0(x) ((x) << S_VREF_R_0)
+#define G_VREF_R_0(x) (((x) >> S_VREF_R_0) & M_VREF_R_0)
+
+#define A_DBG_FPGA_EFUSE_CTL 0x61b4
+#define A_DBG_FPGA_EFUSE_DATA 0x61b8
/* registers for module MC */
#define MC_BASE_ADDR 0x6200
@@ -16048,31 +20095,91 @@
#define V_THRESHOLD0_EN(x) ((x) << S_THRESHOLD0_EN)
#define F_THRESHOLD0_EN V_THRESHOLD0_EN(1U)
+#define A_MA_CLIENT0_PR_THRESHOLD 0x7700
+
+#define S_T7_THRESHOLD1_EN 31
+#define V_T7_THRESHOLD1_EN(x) ((x) << S_T7_THRESHOLD1_EN)
+#define F_T7_THRESHOLD1_EN V_T7_THRESHOLD1_EN(1U)
+
+#define S_T7_THRESHOLD1 16
+#define M_T7_THRESHOLD1 0x7fffU
+#define V_T7_THRESHOLD1(x) ((x) << S_T7_THRESHOLD1)
+#define G_T7_THRESHOLD1(x) (((x) >> S_T7_THRESHOLD1) & M_T7_THRESHOLD1)
+
+#define S_T7_THRESHOLD0_EN 15
+#define V_T7_THRESHOLD0_EN(x) ((x) << S_T7_THRESHOLD0_EN)
+#define F_T7_THRESHOLD0_EN V_T7_THRESHOLD0_EN(1U)
+
+#define S_T7_THRESHOLD0 0
+#define M_T7_THRESHOLD0 0x7fffU
+#define V_T7_THRESHOLD0(x) ((x) << S_T7_THRESHOLD0)
+#define G_T7_THRESHOLD0(x) (((x) >> S_T7_THRESHOLD0) & M_T7_THRESHOLD0)
+
#define A_MA_CLIENT0_WR_LATENCY_THRESHOLD 0x7704
+#define A_MA_CLIENT0_CR_THRESHOLD 0x7704
+
+#define S_CREDITSHAPER_EN 31
+#define V_CREDITSHAPER_EN(x) ((x) << S_CREDITSHAPER_EN)
+#define F_CREDITSHAPER_EN V_CREDITSHAPER_EN(1U)
+
+#define S_CREDIT_MAX 16
+#define M_CREDIT_MAX 0xfffU
+#define V_CREDIT_MAX(x) ((x) << S_CREDIT_MAX)
+#define G_CREDIT_MAX(x) (((x) >> S_CREDIT_MAX) & M_CREDIT_MAX)
+
+#define S_CREDIT_VAL 0
+#define M_CREDIT_VAL 0xfffU
+#define V_CREDIT_VAL(x) ((x) << S_CREDIT_VAL)
+#define G_CREDIT_VAL(x) (((x) >> S_CREDIT_VAL) & M_CREDIT_VAL)
+
#define A_MA_CLIENT1_RD_LATENCY_THRESHOLD 0x7708
+#define A_MA_CLIENT1_PR_THRESHOLD 0x7708
#define A_MA_CLIENT1_WR_LATENCY_THRESHOLD 0x770c
+#define A_MA_CLIENT1_CR_THRESHOLD 0x770c
#define A_MA_CLIENT2_RD_LATENCY_THRESHOLD 0x7710
+#define A_MA_CLIENT2_PR_THRESHOLD 0x7710
#define A_MA_CLIENT2_WR_LATENCY_THRESHOLD 0x7714
+#define A_MA_CLIENT2_CR_THRESHOLD 0x7714
#define A_MA_CLIENT3_RD_LATENCY_THRESHOLD 0x7718
+#define A_MA_CLIENT3_PR_THRESHOLD 0x7718
#define A_MA_CLIENT3_WR_LATENCY_THRESHOLD 0x771c
+#define A_MA_CLIENT3_CR_THRESHOLD 0x771c
#define A_MA_CLIENT4_RD_LATENCY_THRESHOLD 0x7720
+#define A_MA_CLIENT4_PR_THRESHOLD 0x7720
#define A_MA_CLIENT4_WR_LATENCY_THRESHOLD 0x7724
+#define A_MA_CLIENT4_CR_THRESHOLD 0x7724
#define A_MA_CLIENT5_RD_LATENCY_THRESHOLD 0x7728
+#define A_MA_CLIENT5_PR_THRESHOLD 0x7728
#define A_MA_CLIENT5_WR_LATENCY_THRESHOLD 0x772c
+#define A_MA_CLIENT5_CR_THRESHOLD 0x772c
#define A_MA_CLIENT6_RD_LATENCY_THRESHOLD 0x7730
+#define A_MA_CLIENT6_PR_THRESHOLD 0x7730
#define A_MA_CLIENT6_WR_LATENCY_THRESHOLD 0x7734
+#define A_MA_CLIENT6_CR_THRESHOLD 0x7734
#define A_MA_CLIENT7_RD_LATENCY_THRESHOLD 0x7738
+#define A_MA_CLIENT7_PR_THRESHOLD 0x7738
#define A_MA_CLIENT7_WR_LATENCY_THRESHOLD 0x773c
+#define A_MA_CLIENT7_CR_THRESHOLD 0x773c
#define A_MA_CLIENT8_RD_LATENCY_THRESHOLD 0x7740
+#define A_MA_CLIENT8_PR_THRESHOLD 0x7740
#define A_MA_CLIENT8_WR_LATENCY_THRESHOLD 0x7744
+#define A_MA_CLIENT8_CR_THRESHOLD 0x7744
#define A_MA_CLIENT9_RD_LATENCY_THRESHOLD 0x7748
+#define A_MA_CLIENT9_PR_THRESHOLD 0x7748
#define A_MA_CLIENT9_WR_LATENCY_THRESHOLD 0x774c
+#define A_MA_CLIENT9_CR_THRESHOLD 0x774c
#define A_MA_CLIENT10_RD_LATENCY_THRESHOLD 0x7750
+#define A_MA_CLIENT10_PR_THRESHOLD 0x7750
#define A_MA_CLIENT10_WR_LATENCY_THRESHOLD 0x7754
+#define A_MA_CLIENT10_CR_THRESHOLD 0x7754
#define A_MA_CLIENT11_RD_LATENCY_THRESHOLD 0x7758
+#define A_MA_CLIENT11_PR_THRESHOLD 0x7758
#define A_MA_CLIENT11_WR_LATENCY_THRESHOLD 0x775c
+#define A_MA_CLIENT11_CR_THRESHOLD 0x775c
#define A_MA_CLIENT12_RD_LATENCY_THRESHOLD 0x7760
+#define A_MA_CLIENT12_PR_THRESHOLD 0x7760
#define A_MA_CLIENT12_WR_LATENCY_THRESHOLD 0x7764
+#define A_MA_CLIENT12_CR_THRESHOLD 0x7764
#define A_MA_SGE_TH0_DEBUG_CNT 0x7768
#define S_DBG_READ_DATA_CNT 24
@@ -16103,10 +20210,359 @@
#define A_MA_TP_TH1_DEBUG_CNT 0x7780
#define A_MA_LE_DEBUG_CNT 0x7784
#define A_MA_CIM_DEBUG_CNT 0x7788
+#define A_MA_CIM_TH0_DEBUG_CNT 0x7788
#define A_MA_PCIE_DEBUG_CNT 0x778c
#define A_MA_PMTX_DEBUG_CNT 0x7790
#define A_MA_PMRX_DEBUG_CNT 0x7794
#define A_MA_HMA_DEBUG_CNT 0x7798
+#define A_MA_COR_ERROR_ENABLE1 0x779c
+
+#define S_ARB4_COR_WRQUEUE_ERROR_EN 9
+#define V_ARB4_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB4_COR_WRQUEUE_ERROR_EN)
+#define F_ARB4_COR_WRQUEUE_ERROR_EN V_ARB4_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_COR_WRQUEUE_ERROR_EN 8
+#define V_ARB3_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB3_COR_WRQUEUE_ERROR_EN)
+#define F_ARB3_COR_WRQUEUE_ERROR_EN V_ARB3_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_COR_WRQUEUE_ERROR_EN 7
+#define V_ARB2_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB2_COR_WRQUEUE_ERROR_EN)
+#define F_ARB2_COR_WRQUEUE_ERROR_EN V_ARB2_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_COR_WRQUEUE_ERROR_EN 6
+#define V_ARB1_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB1_COR_WRQUEUE_ERROR_EN)
+#define F_ARB1_COR_WRQUEUE_ERROR_EN V_ARB1_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_COR_WRQUEUE_ERROR_EN 5
+#define V_ARB0_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB0_COR_WRQUEUE_ERROR_EN)
+#define F_ARB0_COR_WRQUEUE_ERROR_EN V_ARB0_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB4_COR_RDQUEUE_ERROR_EN 4
+#define V_ARB4_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_COR_RDQUEUE_ERROR_EN)
+#define F_ARB4_COR_RDQUEUE_ERROR_EN V_ARB4_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_COR_RDQUEUE_ERROR_EN 3
+#define V_ARB3_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB3_COR_RDQUEUE_ERROR_EN)
+#define F_ARB3_COR_RDQUEUE_ERROR_EN V_ARB3_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_COR_RDQUEUE_ERROR_EN 2
+#define V_ARB2_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB2_COR_RDQUEUE_ERROR_EN)
+#define F_ARB2_COR_RDQUEUE_ERROR_EN V_ARB2_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_COR_RDQUEUE_ERROR_EN 1
+#define V_ARB1_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB1_COR_RDQUEUE_ERROR_EN)
+#define F_ARB1_COR_RDQUEUE_ERROR_EN V_ARB1_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_COR_RDQUEUE_ERROR_EN 0
+#define V_ARB0_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB0_COR_RDQUEUE_ERROR_EN)
+#define F_ARB0_COR_RDQUEUE_ERROR_EN V_ARB0_COR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS1 0x77a0
+
+#define S_ARB4_COR_WRQUEUE_ERROR 9
+#define V_ARB4_COR_WRQUEUE_ERROR(x) ((x) << S_ARB4_COR_WRQUEUE_ERROR)
+#define F_ARB4_COR_WRQUEUE_ERROR V_ARB4_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB3_COR_WRQUEUE_ERROR 8
+#define V_ARB3_COR_WRQUEUE_ERROR(x) ((x) << S_ARB3_COR_WRQUEUE_ERROR)
+#define F_ARB3_COR_WRQUEUE_ERROR V_ARB3_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB2_COR_WRQUEUE_ERROR 7
+#define V_ARB2_COR_WRQUEUE_ERROR(x) ((x) << S_ARB2_COR_WRQUEUE_ERROR)
+#define F_ARB2_COR_WRQUEUE_ERROR V_ARB2_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB1_COR_WRQUEUE_ERROR 6
+#define V_ARB1_COR_WRQUEUE_ERROR(x) ((x) << S_ARB1_COR_WRQUEUE_ERROR)
+#define F_ARB1_COR_WRQUEUE_ERROR V_ARB1_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB0_COR_WRQUEUE_ERROR 5
+#define V_ARB0_COR_WRQUEUE_ERROR(x) ((x) << S_ARB0_COR_WRQUEUE_ERROR)
+#define F_ARB0_COR_WRQUEUE_ERROR V_ARB0_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB4_COR_RDQUEUE_ERROR 4
+#define V_ARB4_COR_RDQUEUE_ERROR(x) ((x) << S_ARB4_COR_RDQUEUE_ERROR)
+#define F_ARB4_COR_RDQUEUE_ERROR V_ARB4_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB3_COR_RDQUEUE_ERROR 3
+#define V_ARB3_COR_RDQUEUE_ERROR(x) ((x) << S_ARB3_COR_RDQUEUE_ERROR)
+#define F_ARB3_COR_RDQUEUE_ERROR V_ARB3_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB2_COR_RDQUEUE_ERROR 2
+#define V_ARB2_COR_RDQUEUE_ERROR(x) ((x) << S_ARB2_COR_RDQUEUE_ERROR)
+#define F_ARB2_COR_RDQUEUE_ERROR V_ARB2_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB1_COR_RDQUEUE_ERROR 1
+#define V_ARB1_COR_RDQUEUE_ERROR(x) ((x) << S_ARB1_COR_RDQUEUE_ERROR)
+#define F_ARB1_COR_RDQUEUE_ERROR V_ARB1_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB0_COR_RDQUEUE_ERROR 0
+#define V_ARB0_COR_RDQUEUE_ERROR(x) ((x) << S_ARB0_COR_RDQUEUE_ERROR)
+#define F_ARB0_COR_RDQUEUE_ERROR V_ARB0_COR_RDQUEUE_ERROR(1U)
+
+#define A_MA_DBG_CTL 0x77a4
+
+#define S_DATAH_SEL 20
+#define V_DATAH_SEL(x) ((x) << S_DATAH_SEL)
+#define F_DATAH_SEL V_DATAH_SEL(1U)
+
+#define S_EN_DBG 16
+#define V_EN_DBG(x) ((x) << S_EN_DBG)
+#define F_EN_DBG V_EN_DBG(1U)
+
+#define S_T7_SEL 0
+#define M_T7_SEL 0xffU
+#define V_T7_SEL(x) ((x) << S_T7_SEL)
+#define G_T7_SEL(x) (((x) >> S_T7_SEL) & M_T7_SEL)
+
+#define A_MA_DBG_DATA 0x77a8
+#define A_MA_COR_ERROR_ENABLE2 0x77b0
+
+#define S_CL14_COR_WRQUEUE_ERROR_EN 14
+#define V_CL14_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL14_COR_WRQUEUE_ERROR_EN)
+#define F_CL14_COR_WRQUEUE_ERROR_EN V_CL14_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL13_COR_WRQUEUE_ERROR_EN 13
+#define V_CL13_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL13_COR_WRQUEUE_ERROR_EN)
+#define F_CL13_COR_WRQUEUE_ERROR_EN V_CL13_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL12_COR_WRQUEUE_ERROR_EN 12
+#define V_CL12_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL12_COR_WRQUEUE_ERROR_EN)
+#define F_CL12_COR_WRQUEUE_ERROR_EN V_CL12_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL11_COR_WRQUEUE_ERROR_EN 11
+#define V_CL11_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL11_COR_WRQUEUE_ERROR_EN)
+#define F_CL11_COR_WRQUEUE_ERROR_EN V_CL11_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL10_COR_WRQUEUE_ERROR_EN 10
+#define V_CL10_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL10_COR_WRQUEUE_ERROR_EN)
+#define F_CL10_COR_WRQUEUE_ERROR_EN V_CL10_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL9_COR_WRQUEUE_ERROR_EN 9
+#define V_CL9_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL9_COR_WRQUEUE_ERROR_EN)
+#define F_CL9_COR_WRQUEUE_ERROR_EN V_CL9_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL8_COR_WRQUEUE_ERROR_EN 8
+#define V_CL8_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL8_COR_WRQUEUE_ERROR_EN)
+#define F_CL8_COR_WRQUEUE_ERROR_EN V_CL8_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL7_COR_WRQUEUE_ERROR_EN 7
+#define V_CL7_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL7_COR_WRQUEUE_ERROR_EN)
+#define F_CL7_COR_WRQUEUE_ERROR_EN V_CL7_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL6_COR_WRQUEUE_ERROR_EN 6
+#define V_CL6_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL6_COR_WRQUEUE_ERROR_EN)
+#define F_CL6_COR_WRQUEUE_ERROR_EN V_CL6_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL5_COR_WRQUEUE_ERROR_EN 5
+#define V_CL5_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL5_COR_WRQUEUE_ERROR_EN)
+#define F_CL5_COR_WRQUEUE_ERROR_EN V_CL5_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL4_COR_WRQUEUE_ERROR_EN 4
+#define V_CL4_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL4_COR_WRQUEUE_ERROR_EN)
+#define F_CL4_COR_WRQUEUE_ERROR_EN V_CL4_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL3_COR_WRQUEUE_ERROR_EN 3
+#define V_CL3_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL3_COR_WRQUEUE_ERROR_EN)
+#define F_CL3_COR_WRQUEUE_ERROR_EN V_CL3_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL2_COR_WRQUEUE_ERROR_EN 2
+#define V_CL2_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL2_COR_WRQUEUE_ERROR_EN)
+#define F_CL2_COR_WRQUEUE_ERROR_EN V_CL2_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL1_COR_WRQUEUE_ERROR_EN 1
+#define V_CL1_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL1_COR_WRQUEUE_ERROR_EN)
+#define F_CL1_COR_WRQUEUE_ERROR_EN V_CL1_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL0_COR_WRQUEUE_ERROR_EN 0
+#define V_CL0_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL0_COR_WRQUEUE_ERROR_EN)
+#define F_CL0_COR_WRQUEUE_ERROR_EN V_CL0_COR_WRQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS2 0x77b4
+
+#define S_CL14_COR_WRQUEUE_ERROR 14
+#define V_CL14_COR_WRQUEUE_ERROR(x) ((x) << S_CL14_COR_WRQUEUE_ERROR)
+#define F_CL14_COR_WRQUEUE_ERROR V_CL14_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL13_COR_WRQUEUE_ERROR 13
+#define V_CL13_COR_WRQUEUE_ERROR(x) ((x) << S_CL13_COR_WRQUEUE_ERROR)
+#define F_CL13_COR_WRQUEUE_ERROR V_CL13_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL12_COR_WRQUEUE_ERROR 12
+#define V_CL12_COR_WRQUEUE_ERROR(x) ((x) << S_CL12_COR_WRQUEUE_ERROR)
+#define F_CL12_COR_WRQUEUE_ERROR V_CL12_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL11_COR_WRQUEUE_ERROR 11
+#define V_CL11_COR_WRQUEUE_ERROR(x) ((x) << S_CL11_COR_WRQUEUE_ERROR)
+#define F_CL11_COR_WRQUEUE_ERROR V_CL11_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL10_COR_WRQUEUE_ERROR 10
+#define V_CL10_COR_WRQUEUE_ERROR(x) ((x) << S_CL10_COR_WRQUEUE_ERROR)
+#define F_CL10_COR_WRQUEUE_ERROR V_CL10_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL9_COR_WRQUEUE_ERROR 9
+#define V_CL9_COR_WRQUEUE_ERROR(x) ((x) << S_CL9_COR_WRQUEUE_ERROR)
+#define F_CL9_COR_WRQUEUE_ERROR V_CL9_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL8_COR_WRQUEUE_ERROR 8
+#define V_CL8_COR_WRQUEUE_ERROR(x) ((x) << S_CL8_COR_WRQUEUE_ERROR)
+#define F_CL8_COR_WRQUEUE_ERROR V_CL8_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL7_COR_WRQUEUE_ERROR 7
+#define V_CL7_COR_WRQUEUE_ERROR(x) ((x) << S_CL7_COR_WRQUEUE_ERROR)
+#define F_CL7_COR_WRQUEUE_ERROR V_CL7_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL6_COR_WRQUEUE_ERROR 6
+#define V_CL6_COR_WRQUEUE_ERROR(x) ((x) << S_CL6_COR_WRQUEUE_ERROR)
+#define F_CL6_COR_WRQUEUE_ERROR V_CL6_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL5_COR_WRQUEUE_ERROR 5
+#define V_CL5_COR_WRQUEUE_ERROR(x) ((x) << S_CL5_COR_WRQUEUE_ERROR)
+#define F_CL5_COR_WRQUEUE_ERROR V_CL5_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL4_COR_WRQUEUE_ERROR 4
+#define V_CL4_COR_WRQUEUE_ERROR(x) ((x) << S_CL4_COR_WRQUEUE_ERROR)
+#define F_CL4_COR_WRQUEUE_ERROR V_CL4_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL3_COR_WRQUEUE_ERROR 3
+#define V_CL3_COR_WRQUEUE_ERROR(x) ((x) << S_CL3_COR_WRQUEUE_ERROR)
+#define F_CL3_COR_WRQUEUE_ERROR V_CL3_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL2_COR_WRQUEUE_ERROR 2
+#define V_CL2_COR_WRQUEUE_ERROR(x) ((x) << S_CL2_COR_WRQUEUE_ERROR)
+#define F_CL2_COR_WRQUEUE_ERROR V_CL2_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL1_COR_WRQUEUE_ERROR 1
+#define V_CL1_COR_WRQUEUE_ERROR(x) ((x) << S_CL1_COR_WRQUEUE_ERROR)
+#define F_CL1_COR_WRQUEUE_ERROR V_CL1_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL0_COR_WRQUEUE_ERROR 0
+#define V_CL0_COR_WRQUEUE_ERROR(x) ((x) << S_CL0_COR_WRQUEUE_ERROR)
+#define F_CL0_COR_WRQUEUE_ERROR V_CL0_COR_WRQUEUE_ERROR(1U)
+
+#define A_MA_COR_ERROR_ENABLE3 0x77b8
+
+#define S_CL14_COR_RDQUEUE_ERROR_EN 14
+#define V_CL14_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL14_COR_RDQUEUE_ERROR_EN)
+#define F_CL14_COR_RDQUEUE_ERROR_EN V_CL14_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL13_COR_RDQUEUE_ERROR_EN 13
+#define V_CL13_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL13_COR_RDQUEUE_ERROR_EN)
+#define F_CL13_COR_RDQUEUE_ERROR_EN V_CL13_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL12_COR_RDQUEUE_ERROR_EN 12
+#define V_CL12_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL12_COR_RDQUEUE_ERROR_EN)
+#define F_CL12_COR_RDQUEUE_ERROR_EN V_CL12_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL11_COR_RDQUEUE_ERROR_EN 11
+#define V_CL11_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL11_COR_RDQUEUE_ERROR_EN)
+#define F_CL11_COR_RDQUEUE_ERROR_EN V_CL11_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL10_COR_RDQUEUE_ERROR_EN 10
+#define V_CL10_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL10_COR_RDQUEUE_ERROR_EN)
+#define F_CL10_COR_RDQUEUE_ERROR_EN V_CL10_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL9_COR_RDQUEUE_ERROR_EN 9
+#define V_CL9_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL9_COR_RDQUEUE_ERROR_EN)
+#define F_CL9_COR_RDQUEUE_ERROR_EN V_CL9_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL8_COR_RDQUEUE_ERROR_EN 8
+#define V_CL8_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL8_COR_RDQUEUE_ERROR_EN)
+#define F_CL8_COR_RDQUEUE_ERROR_EN V_CL8_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL7_COR_RDQUEUE_ERROR_EN 7
+#define V_CL7_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL7_COR_RDQUEUE_ERROR_EN)
+#define F_CL7_COR_RDQUEUE_ERROR_EN V_CL7_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL6_COR_RDQUEUE_ERROR_EN 6
+#define V_CL6_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL6_COR_RDQUEUE_ERROR_EN)
+#define F_CL6_COR_RDQUEUE_ERROR_EN V_CL6_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL5_COR_RDQUEUE_ERROR_EN 5
+#define V_CL5_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL5_COR_RDQUEUE_ERROR_EN)
+#define F_CL5_COR_RDQUEUE_ERROR_EN V_CL5_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL4_COR_RDQUEUE_ERROR_EN 4
+#define V_CL4_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL4_COR_RDQUEUE_ERROR_EN)
+#define F_CL4_COR_RDQUEUE_ERROR_EN V_CL4_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL3_COR_RDQUEUE_ERROR_EN 3
+#define V_CL3_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL3_COR_RDQUEUE_ERROR_EN)
+#define F_CL3_COR_RDQUEUE_ERROR_EN V_CL3_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL2_COR_RDQUEUE_ERROR_EN 2
+#define V_CL2_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL2_COR_RDQUEUE_ERROR_EN)
+#define F_CL2_COR_RDQUEUE_ERROR_EN V_CL2_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL1_COR_RDQUEUE_ERROR_EN 1
+#define V_CL1_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL1_COR_RDQUEUE_ERROR_EN)
+#define F_CL1_COR_RDQUEUE_ERROR_EN V_CL1_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL0_COR_RDQUEUE_ERROR_EN 0
+#define V_CL0_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL0_COR_RDQUEUE_ERROR_EN)
+#define F_CL0_COR_RDQUEUE_ERROR_EN V_CL0_COR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS3 0x77bc
+
+#define S_CL14_COR_RDQUEUE_ERROR 14
+#define V_CL14_COR_RDQUEUE_ERROR(x) ((x) << S_CL14_COR_RDQUEUE_ERROR)
+#define F_CL14_COR_RDQUEUE_ERROR V_CL14_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL13_COR_RDQUEUE_ERROR 13
+#define V_CL13_COR_RDQUEUE_ERROR(x) ((x) << S_CL13_COR_RDQUEUE_ERROR)
+#define F_CL13_COR_RDQUEUE_ERROR V_CL13_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL12_COR_RDQUEUE_ERROR 12
+#define V_CL12_COR_RDQUEUE_ERROR(x) ((x) << S_CL12_COR_RDQUEUE_ERROR)
+#define F_CL12_COR_RDQUEUE_ERROR V_CL12_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL11_COR_RDQUEUE_ERROR 11
+#define V_CL11_COR_RDQUEUE_ERROR(x) ((x) << S_CL11_COR_RDQUEUE_ERROR)
+#define F_CL11_COR_RDQUEUE_ERROR V_CL11_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL10_COR_RDQUEUE_ERROR 10
+#define V_CL10_COR_RDQUEUE_ERROR(x) ((x) << S_CL10_COR_RDQUEUE_ERROR)
+#define F_CL10_COR_RDQUEUE_ERROR V_CL10_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL9_COR_RDQUEUE_ERROR 9
+#define V_CL9_COR_RDQUEUE_ERROR(x) ((x) << S_CL9_COR_RDQUEUE_ERROR)
+#define F_CL9_COR_RDQUEUE_ERROR V_CL9_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL8_COR_RDQUEUE_ERROR 8
+#define V_CL8_COR_RDQUEUE_ERROR(x) ((x) << S_CL8_COR_RDQUEUE_ERROR)
+#define F_CL8_COR_RDQUEUE_ERROR V_CL8_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL7_COR_RDQUEUE_ERROR 7
+#define V_CL7_COR_RDQUEUE_ERROR(x) ((x) << S_CL7_COR_RDQUEUE_ERROR)
+#define F_CL7_COR_RDQUEUE_ERROR V_CL7_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL6_COR_RDQUEUE_ERROR 6
+#define V_CL6_COR_RDQUEUE_ERROR(x) ((x) << S_CL6_COR_RDQUEUE_ERROR)
+#define F_CL6_COR_RDQUEUE_ERROR V_CL6_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL5_COR_RDQUEUE_ERROR 5
+#define V_CL5_COR_RDQUEUE_ERROR(x) ((x) << S_CL5_COR_RDQUEUE_ERROR)
+#define F_CL5_COR_RDQUEUE_ERROR V_CL5_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL4_COR_RDQUEUE_ERROR 4
+#define V_CL4_COR_RDQUEUE_ERROR(x) ((x) << S_CL4_COR_RDQUEUE_ERROR)
+#define F_CL4_COR_RDQUEUE_ERROR V_CL4_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL3_COR_RDQUEUE_ERROR 3
+#define V_CL3_COR_RDQUEUE_ERROR(x) ((x) << S_CL3_COR_RDQUEUE_ERROR)
+#define F_CL3_COR_RDQUEUE_ERROR V_CL3_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL2_COR_RDQUEUE_ERROR 2
+#define V_CL2_COR_RDQUEUE_ERROR(x) ((x) << S_CL2_COR_RDQUEUE_ERROR)
+#define F_CL2_COR_RDQUEUE_ERROR V_CL2_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL1_COR_RDQUEUE_ERROR 1
+#define V_CL1_COR_RDQUEUE_ERROR(x) ((x) << S_CL1_COR_RDQUEUE_ERROR)
+#define F_CL1_COR_RDQUEUE_ERROR V_CL1_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL0_COR_RDQUEUE_ERROR 0
+#define V_CL0_COR_RDQUEUE_ERROR(x) ((x) << S_CL0_COR_RDQUEUE_ERROR)
+#define F_CL0_COR_RDQUEUE_ERROR V_CL0_COR_RDQUEUE_ERROR(1U)
+
#define A_MA_EDRAM0_BAR 0x77c0
#define S_EDRAM0_BASE 16
@@ -16119,6 +20575,16 @@
#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+#define S_T7_EDRAM0_BASE 16
+#define M_T7_EDRAM0_BASE 0xffffU
+#define V_T7_EDRAM0_BASE(x) ((x) << S_T7_EDRAM0_BASE)
+#define G_T7_EDRAM0_BASE(x) (((x) >> S_T7_EDRAM0_BASE) & M_T7_EDRAM0_BASE)
+
+#define S_T7_EDRAM0_SIZE 0
+#define M_T7_EDRAM0_SIZE 0xffffU
+#define V_T7_EDRAM0_SIZE(x) ((x) << S_T7_EDRAM0_SIZE)
+#define G_T7_EDRAM0_SIZE(x) (((x) >> S_T7_EDRAM0_SIZE) & M_T7_EDRAM0_SIZE)
+
#define A_MA_EDRAM1_BAR 0x77c4
#define S_EDRAM1_BASE 16
@@ -16131,6 +20597,16 @@
#define V_EDRAM1_SIZE(x) ((x) << S_EDRAM1_SIZE)
#define G_EDRAM1_SIZE(x) (((x) >> S_EDRAM1_SIZE) & M_EDRAM1_SIZE)
+#define S_T7_EDRAM1_BASE 16
+#define M_T7_EDRAM1_BASE 0xffffU
+#define V_T7_EDRAM1_BASE(x) ((x) << S_T7_EDRAM1_BASE)
+#define G_T7_EDRAM1_BASE(x) (((x) >> S_T7_EDRAM1_BASE) & M_T7_EDRAM1_BASE)
+
+#define S_T7_EDRAM1_SIZE 0
+#define M_T7_EDRAM1_SIZE 0xffffU
+#define V_T7_EDRAM1_SIZE(x) ((x) << S_T7_EDRAM1_SIZE)
+#define G_T7_EDRAM1_SIZE(x) (((x) >> S_T7_EDRAM1_SIZE) & M_T7_EDRAM1_SIZE)
+
#define A_MA_EXT_MEMORY_BAR 0x77c8
#define S_EXT_MEM_BASE 16
@@ -16155,6 +20631,16 @@
#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+#define S_T7_EXT_MEM0_BASE 16
+#define M_T7_EXT_MEM0_BASE 0xffffU
+#define V_T7_EXT_MEM0_BASE(x) ((x) << S_T7_EXT_MEM0_BASE)
+#define G_T7_EXT_MEM0_BASE(x) (((x) >> S_T7_EXT_MEM0_BASE) & M_T7_EXT_MEM0_BASE)
+
+#define S_T7_EXT_MEM0_SIZE 0
+#define M_T7_EXT_MEM0_SIZE 0xffffU
+#define V_T7_EXT_MEM0_SIZE(x) ((x) << S_T7_EXT_MEM0_SIZE)
+#define G_T7_EXT_MEM0_SIZE(x) (((x) >> S_T7_EXT_MEM0_SIZE) & M_T7_EXT_MEM0_SIZE)
+
#define A_MA_HOST_MEMORY_BAR 0x77cc
#define S_HMA_BASE 16
@@ -16167,6 +20653,16 @@
#define V_HMA_SIZE(x) ((x) << S_HMA_SIZE)
#define G_HMA_SIZE(x) (((x) >> S_HMA_SIZE) & M_HMA_SIZE)
+#define S_HMATARGETBASE 16
+#define M_HMATARGETBASE 0xffffU
+#define V_HMATARGETBASE(x) ((x) << S_HMATARGETBASE)
+#define G_HMATARGETBASE(x) (((x) >> S_HMATARGETBASE) & M_HMATARGETBASE)
+
+#define S_T7_HMA_SIZE 0
+#define M_T7_HMA_SIZE 0xffffU
+#define V_T7_HMA_SIZE(x) ((x) << S_T7_HMA_SIZE)
+#define G_T7_HMA_SIZE(x) (((x) >> S_T7_HMA_SIZE) & M_T7_HMA_SIZE)
+
#define A_MA_EXT_MEM_PAGE_SIZE 0x77d0
#define S_BRC_MODE 2
@@ -16290,6 +20786,14 @@
#define V_MC_SPLIT(x) ((x) << S_MC_SPLIT)
#define F_MC_SPLIT V_MC_SPLIT(1U)
+#define S_EDC512 8
+#define V_EDC512(x) ((x) << S_EDC512)
+#define F_EDC512 V_EDC512(1U)
+
+#define S_MC_SPLIT_BOUNDARY 7
+#define V_MC_SPLIT_BOUNDARY(x) ((x) << S_MC_SPLIT_BOUNDARY)
+#define F_MC_SPLIT_BOUNDARY V_MC_SPLIT_BOUNDARY(1U)
+
#define A_MA_INT_ENABLE 0x77dc
#define S_MEM_PERR_INT_ENABLE 1
@@ -16475,6 +20979,55 @@
#define F_CL0_PAR_RDQUEUE_ERROR_EN V_CL0_PAR_RDQUEUE_ERROR_EN(1U)
#define A_MA_PARITY_ERROR_ENABLE1 0x77f0
+
+#define S_T7_ARB4_PAR_WRQUEUE_ERROR_EN 11
+#define V_T7_ARB4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB4_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB4_PAR_WRQUEUE_ERROR_EN V_T7_ARB4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB3_PAR_WRQUEUE_ERROR_EN 10
+#define V_T7_ARB3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB3_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB3_PAR_WRQUEUE_ERROR_EN V_T7_ARB3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB2_PAR_WRQUEUE_ERROR_EN 9
+#define V_T7_ARB2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB2_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB2_PAR_WRQUEUE_ERROR_EN V_T7_ARB2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB1_PAR_WRQUEUE_ERROR_EN 8
+#define V_T7_ARB1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB1_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB1_PAR_WRQUEUE_ERROR_EN V_T7_ARB1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB0_PAR_WRQUEUE_ERROR_EN 7
+#define V_T7_ARB0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB0_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB0_PAR_WRQUEUE_ERROR_EN V_T7_ARB0_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB4_PAR_RDQUEUE_ERROR_EN 6
+#define V_T7_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB4_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB4_PAR_RDQUEUE_ERROR_EN V_T7_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB3_PAR_RDQUEUE_ERROR_EN 5
+#define V_T7_ARB3_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB3_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB3_PAR_RDQUEUE_ERROR_EN V_T7_ARB3_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB2_PAR_RDQUEUE_ERROR_EN 4
+#define V_T7_ARB2_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB2_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB2_PAR_RDQUEUE_ERROR_EN V_T7_ARB2_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB1_PAR_RDQUEUE_ERROR_EN 3
+#define V_T7_ARB1_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB1_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB1_PAR_RDQUEUE_ERROR_EN V_T7_ARB1_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB0_PAR_RDQUEUE_ERROR_EN 2
+#define V_T7_ARB0_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB0_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB0_PAR_RDQUEUE_ERROR_EN V_T7_ARB0_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_TP_DMARBT_PAR_ERROR_EN 1
+#define V_T7_TP_DMARBT_PAR_ERROR_EN(x) ((x) << S_T7_TP_DMARBT_PAR_ERROR_EN)
+#define F_T7_TP_DMARBT_PAR_ERROR_EN V_T7_TP_DMARBT_PAR_ERROR_EN(1U)
+
+#define S_T7_LOGIC_FIFO_PAR_ERROR_EN 0
+#define V_T7_LOGIC_FIFO_PAR_ERROR_EN(x) ((x) << S_T7_LOGIC_FIFO_PAR_ERROR_EN)
+#define F_T7_LOGIC_FIFO_PAR_ERROR_EN V_T7_LOGIC_FIFO_PAR_ERROR_EN(1U)
+
#define A_MA_PARITY_ERROR_STATUS 0x77f4
#define S_TP_DMARBT_PAR_ERROR 31
@@ -16606,6 +21159,55 @@
#define F_CL0_PAR_RDQUEUE_ERROR V_CL0_PAR_RDQUEUE_ERROR(1U)
#define A_MA_PARITY_ERROR_STATUS1 0x77f4
+
+#define S_T7_ARB4_PAR_WRQUEUE_ERROR 11
+#define V_T7_ARB4_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB4_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB4_PAR_WRQUEUE_ERROR V_T7_ARB4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB3_PAR_WRQUEUE_ERROR 10
+#define V_T7_ARB3_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB3_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB3_PAR_WRQUEUE_ERROR V_T7_ARB3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB2_PAR_WRQUEUE_ERROR 9
+#define V_T7_ARB2_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB2_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB2_PAR_WRQUEUE_ERROR V_T7_ARB2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB1_PAR_WRQUEUE_ERROR 8
+#define V_T7_ARB1_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB1_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB1_PAR_WRQUEUE_ERROR V_T7_ARB1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB0_PAR_WRQUEUE_ERROR 7
+#define V_T7_ARB0_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB0_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB0_PAR_WRQUEUE_ERROR V_T7_ARB0_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB4_PAR_RDQUEUE_ERROR 6
+#define V_T7_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB4_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB4_PAR_RDQUEUE_ERROR V_T7_ARB4_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB3_PAR_RDQUEUE_ERROR 5
+#define V_T7_ARB3_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB3_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB3_PAR_RDQUEUE_ERROR V_T7_ARB3_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB2_PAR_RDQUEUE_ERROR 4
+#define V_T7_ARB2_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB2_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB2_PAR_RDQUEUE_ERROR V_T7_ARB2_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB1_PAR_RDQUEUE_ERROR 3
+#define V_T7_ARB1_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB1_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB1_PAR_RDQUEUE_ERROR V_T7_ARB1_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB0_PAR_RDQUEUE_ERROR 2
+#define V_T7_ARB0_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB0_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB0_PAR_RDQUEUE_ERROR V_T7_ARB0_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_TP_DMARBT_PAR_ERROR 1
+#define V_T7_TP_DMARBT_PAR_ERROR(x) ((x) << S_T7_TP_DMARBT_PAR_ERROR)
+#define F_T7_TP_DMARBT_PAR_ERROR V_T7_TP_DMARBT_PAR_ERROR(1U)
+
+#define S_T7_LOGIC_FIFO_PAR_ERROR 0
+#define V_T7_LOGIC_FIFO_PAR_ERROR(x) ((x) << S_T7_LOGIC_FIFO_PAR_ERROR)
+#define F_T7_LOGIC_FIFO_PAR_ERROR V_T7_LOGIC_FIFO_PAR_ERROR(1U)
+
#define A_MA_SGE_PCIE_COHERANCY_CTRL 0x77f8
#define S_BONUS_REG 6
@@ -16653,6 +21255,66 @@
#define V_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR_EN)
#define F_ARB4_PAR_RDQUEUE_ERROR_EN V_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+#define S_CL14_PAR_WRQUEUE_ERROR_EN 14
+#define V_CL14_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL14_PAR_WRQUEUE_ERROR_EN)
+#define F_CL14_PAR_WRQUEUE_ERROR_EN V_CL14_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL13_PAR_WRQUEUE_ERROR_EN 13
+#define V_CL13_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL13_PAR_WRQUEUE_ERROR_EN)
+#define F_CL13_PAR_WRQUEUE_ERROR_EN V_CL13_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL12_PAR_WRQUEUE_ERROR_EN 12
+#define V_CL12_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL12_PAR_WRQUEUE_ERROR_EN)
+#define F_CL12_PAR_WRQUEUE_ERROR_EN V_CL12_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL11_PAR_WRQUEUE_ERROR_EN 11
+#define V_CL11_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL11_PAR_WRQUEUE_ERROR_EN)
+#define F_CL11_PAR_WRQUEUE_ERROR_EN V_CL11_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL10_PAR_WRQUEUE_ERROR_EN 10
+#define V_T7_CL10_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL10_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL10_PAR_WRQUEUE_ERROR_EN V_T7_CL10_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL9_PAR_WRQUEUE_ERROR_EN 9
+#define V_T7_CL9_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL9_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL9_PAR_WRQUEUE_ERROR_EN V_T7_CL9_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL8_PAR_WRQUEUE_ERROR_EN 8
+#define V_T7_CL8_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL8_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL8_PAR_WRQUEUE_ERROR_EN V_T7_CL8_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL7_PAR_WRQUEUE_ERROR_EN 7
+#define V_T7_CL7_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL7_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL7_PAR_WRQUEUE_ERROR_EN V_T7_CL7_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL6_PAR_WRQUEUE_ERROR_EN 6
+#define V_T7_CL6_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL6_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL6_PAR_WRQUEUE_ERROR_EN V_T7_CL6_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL5_PAR_WRQUEUE_ERROR_EN 5
+#define V_T7_CL5_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL5_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL5_PAR_WRQUEUE_ERROR_EN V_T7_CL5_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL4_PAR_WRQUEUE_ERROR_EN 4
+#define V_T7_CL4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL4_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL4_PAR_WRQUEUE_ERROR_EN V_T7_CL4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL3_PAR_WRQUEUE_ERROR_EN 3
+#define V_T7_CL3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL3_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL3_PAR_WRQUEUE_ERROR_EN V_T7_CL3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL2_PAR_WRQUEUE_ERROR_EN 2
+#define V_T7_CL2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL2_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL2_PAR_WRQUEUE_ERROR_EN V_T7_CL2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL1_PAR_WRQUEUE_ERROR_EN 1
+#define V_T7_CL1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL1_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL1_PAR_WRQUEUE_ERROR_EN V_T7_CL1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL0_PAR_WRQUEUE_ERROR_EN 0
+#define V_T7_CL0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL0_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL0_PAR_WRQUEUE_ERROR_EN V_T7_CL0_PAR_WRQUEUE_ERROR_EN(1U)
+
#define A_MA_PARITY_ERROR_STATUS2 0x7804
#define S_ARB4_PAR_WRQUEUE_ERROR 1
@@ -16663,6 +21325,66 @@
#define V_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR)
#define F_ARB4_PAR_RDQUEUE_ERROR V_ARB4_PAR_RDQUEUE_ERROR(1U)
+#define S_CL14_PAR_WRQUEUE_ERROR 14
+#define V_CL14_PAR_WRQUEUE_ERROR(x) ((x) << S_CL14_PAR_WRQUEUE_ERROR)
+#define F_CL14_PAR_WRQUEUE_ERROR V_CL14_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL13_PAR_WRQUEUE_ERROR 13
+#define V_CL13_PAR_WRQUEUE_ERROR(x) ((x) << S_CL13_PAR_WRQUEUE_ERROR)
+#define F_CL13_PAR_WRQUEUE_ERROR V_CL13_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL12_PAR_WRQUEUE_ERROR 12
+#define V_CL12_PAR_WRQUEUE_ERROR(x) ((x) << S_CL12_PAR_WRQUEUE_ERROR)
+#define F_CL12_PAR_WRQUEUE_ERROR V_CL12_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL11_PAR_WRQUEUE_ERROR 11
+#define V_CL11_PAR_WRQUEUE_ERROR(x) ((x) << S_CL11_PAR_WRQUEUE_ERROR)
+#define F_CL11_PAR_WRQUEUE_ERROR V_CL11_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL10_PAR_WRQUEUE_ERROR 10
+#define V_T7_CL10_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL10_PAR_WRQUEUE_ERROR)
+#define F_T7_CL10_PAR_WRQUEUE_ERROR V_T7_CL10_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL9_PAR_WRQUEUE_ERROR 9
+#define V_T7_CL9_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL9_PAR_WRQUEUE_ERROR)
+#define F_T7_CL9_PAR_WRQUEUE_ERROR V_T7_CL9_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL8_PAR_WRQUEUE_ERROR 8
+#define V_T7_CL8_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL8_PAR_WRQUEUE_ERROR)
+#define F_T7_CL8_PAR_WRQUEUE_ERROR V_T7_CL8_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL7_PAR_WRQUEUE_ERROR 7
+#define V_T7_CL7_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL7_PAR_WRQUEUE_ERROR)
+#define F_T7_CL7_PAR_WRQUEUE_ERROR V_T7_CL7_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL6_PAR_WRQUEUE_ERROR 6
+#define V_T7_CL6_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL6_PAR_WRQUEUE_ERROR)
+#define F_T7_CL6_PAR_WRQUEUE_ERROR V_T7_CL6_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL5_PAR_WRQUEUE_ERROR 5
+#define V_T7_CL5_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL5_PAR_WRQUEUE_ERROR)
+#define F_T7_CL5_PAR_WRQUEUE_ERROR V_T7_CL5_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL4_PAR_WRQUEUE_ERROR 4
+#define V_T7_CL4_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL4_PAR_WRQUEUE_ERROR)
+#define F_T7_CL4_PAR_WRQUEUE_ERROR V_T7_CL4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL3_PAR_WRQUEUE_ERROR 3
+#define V_T7_CL3_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL3_PAR_WRQUEUE_ERROR)
+#define F_T7_CL3_PAR_WRQUEUE_ERROR V_T7_CL3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL2_PAR_WRQUEUE_ERROR 2
+#define V_T7_CL2_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL2_PAR_WRQUEUE_ERROR)
+#define F_T7_CL2_PAR_WRQUEUE_ERROR V_T7_CL2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL1_PAR_WRQUEUE_ERROR 1
+#define V_T7_CL1_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL1_PAR_WRQUEUE_ERROR)
+#define F_T7_CL1_PAR_WRQUEUE_ERROR V_T7_CL1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL0_PAR_WRQUEUE_ERROR 0
+#define V_T7_CL0_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL0_PAR_WRQUEUE_ERROR)
+#define F_T7_CL0_PAR_WRQUEUE_ERROR V_T7_CL0_PAR_WRQUEUE_ERROR(1U)
+
#define A_MA_EXT_MEMORY1_BAR 0x7808
#define S_EXT_MEM1_BASE 16
@@ -16675,6 +21397,16 @@
#define V_EXT_MEM1_SIZE(x) ((x) << S_EXT_MEM1_SIZE)
#define G_EXT_MEM1_SIZE(x) (((x) >> S_EXT_MEM1_SIZE) & M_EXT_MEM1_SIZE)
+#define S_T7_EXT_MEM1_BASE 16
+#define M_T7_EXT_MEM1_BASE 0xffffU
+#define V_T7_EXT_MEM1_BASE(x) ((x) << S_T7_EXT_MEM1_BASE)
+#define G_T7_EXT_MEM1_BASE(x) (((x) >> S_T7_EXT_MEM1_BASE) & M_T7_EXT_MEM1_BASE)
+
+#define S_T7_EXT_MEM1_SIZE 0
+#define M_T7_EXT_MEM1_SIZE 0xffffU
+#define V_T7_EXT_MEM1_SIZE(x) ((x) << S_T7_EXT_MEM1_SIZE)
+#define G_T7_EXT_MEM1_SIZE(x) (((x) >> S_T7_EXT_MEM1_SIZE) & M_T7_EXT_MEM1_SIZE)
+
#define A_MA_PMTX_THROTTLE 0x780c
#define S_FL_ENABLE 31
@@ -16696,6 +21428,7 @@
#define A_MA_TP_TH1_WRDATA_CNT 0x782c
#define A_MA_LE_WRDATA_CNT 0x7830
#define A_MA_CIM_WRDATA_CNT 0x7834
+#define A_MA_CIM_TH0_WRDATA_CNT 0x7834
#define A_MA_PCIE_WRDATA_CNT 0x7838
#define A_MA_PMTX_WRDATA_CNT 0x783c
#define A_MA_PMRX_WRDATA_CNT 0x7840
@@ -16709,6 +21442,7 @@
#define A_MA_TP_TH1_RDDATA_CNT 0x7860
#define A_MA_LE_RDDATA_CNT 0x7864
#define A_MA_CIM_RDDATA_CNT 0x7868
+#define A_MA_CIM_TH0_RDDATA_CNT 0x7868
#define A_MA_PCIE_RDDATA_CNT 0x786c
#define A_MA_PMTX_RDDATA_CNT 0x7870
#define A_MA_PMRX_RDDATA_CNT 0x7874
@@ -16733,7 +21467,43 @@
#define F_DDR_MODE V_DDR_MODE(1U)
#define A_MA_EDRAM1_WRDATA_CNT1 0x7884
+#define A_MA_PARITY_ERROR_ENABLE3 0x7884
+
+#define S_CL14_PAR_RDQUEUE_ERROR_EN 14
+#define V_CL14_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL14_PAR_RDQUEUE_ERROR_EN)
+#define F_CL14_PAR_RDQUEUE_ERROR_EN V_CL14_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL13_PAR_RDQUEUE_ERROR_EN 13
+#define V_CL13_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL13_PAR_RDQUEUE_ERROR_EN)
+#define F_CL13_PAR_RDQUEUE_ERROR_EN V_CL13_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL12_PAR_RDQUEUE_ERROR_EN 12
+#define V_CL12_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL12_PAR_RDQUEUE_ERROR_EN)
+#define F_CL12_PAR_RDQUEUE_ERROR_EN V_CL12_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL11_PAR_RDQUEUE_ERROR_EN 11
+#define V_CL11_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL11_PAR_RDQUEUE_ERROR_EN)
+#define F_CL11_PAR_RDQUEUE_ERROR_EN V_CL11_PAR_RDQUEUE_ERROR_EN(1U)
+
#define A_MA_EDRAM1_WRDATA_CNT0 0x7888
+#define A_MA_PARITY_ERROR_STATUS3 0x7888
+
+#define S_CL14_PAR_RDQUEUE_ERROR 14
+#define V_CL14_PAR_RDQUEUE_ERROR(x) ((x) << S_CL14_PAR_RDQUEUE_ERROR)
+#define F_CL14_PAR_RDQUEUE_ERROR V_CL14_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL13_PAR_RDQUEUE_ERROR 13
+#define V_CL13_PAR_RDQUEUE_ERROR(x) ((x) << S_CL13_PAR_RDQUEUE_ERROR)
+#define F_CL13_PAR_RDQUEUE_ERROR V_CL13_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL12_PAR_RDQUEUE_ERROR 12
+#define V_CL12_PAR_RDQUEUE_ERROR(x) ((x) << S_CL12_PAR_RDQUEUE_ERROR)
+#define F_CL12_PAR_RDQUEUE_ERROR V_CL12_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL11_PAR_RDQUEUE_ERROR 11
+#define V_CL11_PAR_RDQUEUE_ERROR(x) ((x) << S_CL11_PAR_RDQUEUE_ERROR)
+#define F_CL11_PAR_RDQUEUE_ERROR V_CL11_PAR_RDQUEUE_ERROR(1U)
+
#define A_MA_EXT_MEMORY0_WRDATA_CNT1 0x788c
#define A_MA_EXT_MEMORY0_WRDATA_CNT0 0x7890
#define A_MA_HOST_MEMORY_WRDATA_CNT1 0x7894
@@ -16915,6 +21685,30 @@
#define V_FUTURE_DEXPANSION_WTE(x) ((x) << S_FUTURE_DEXPANSION_WTE)
#define G_FUTURE_DEXPANSION_WTE(x) (((x) >> S_FUTURE_DEXPANSION_WTE) & M_FUTURE_DEXPANSION_WTE)
+#define S_T7_FUTURE_CEXPANSION_WTE 31
+#define V_T7_FUTURE_CEXPANSION_WTE(x) ((x) << S_T7_FUTURE_CEXPANSION_WTE)
+#define F_T7_FUTURE_CEXPANSION_WTE V_T7_FUTURE_CEXPANSION_WTE(1U)
+
+#define S_CL14_WR_CMD_TO_EN 30
+#define V_CL14_WR_CMD_TO_EN(x) ((x) << S_CL14_WR_CMD_TO_EN)
+#define F_CL14_WR_CMD_TO_EN V_CL14_WR_CMD_TO_EN(1U)
+
+#define S_CL13_WR_CMD_TO_EN 29
+#define V_CL13_WR_CMD_TO_EN(x) ((x) << S_CL13_WR_CMD_TO_EN)
+#define F_CL13_WR_CMD_TO_EN V_CL13_WR_CMD_TO_EN(1U)
+
+#define S_T7_FUTURE_DEXPANSION_WTE 15
+#define V_T7_FUTURE_DEXPANSION_WTE(x) ((x) << S_T7_FUTURE_DEXPANSION_WTE)
+#define F_T7_FUTURE_DEXPANSION_WTE V_T7_FUTURE_DEXPANSION_WTE(1U)
+
+#define S_CL14_WR_DATA_TO_EN 14
+#define V_CL14_WR_DATA_TO_EN(x) ((x) << S_CL14_WR_DATA_TO_EN)
+#define F_CL14_WR_DATA_TO_EN V_CL14_WR_DATA_TO_EN(1U)
+
+#define S_CL13_WR_DATA_TO_EN 13
+#define V_CL13_WR_DATA_TO_EN(x) ((x) << S_CL13_WR_DATA_TO_EN)
+#define F_CL13_WR_DATA_TO_EN V_CL13_WR_DATA_TO_EN(1U)
+
#define A_MA_WRITE_TIMEOUT_ERROR_STATUS 0x78d8
#define S_CL12_WR_CMD_TO_ERROR 28
@@ -17031,6 +21825,30 @@
#define V_FUTURE_DEXPANSION_WTS(x) ((x) << S_FUTURE_DEXPANSION_WTS)
#define G_FUTURE_DEXPANSION_WTS(x) (((x) >> S_FUTURE_DEXPANSION_WTS) & M_FUTURE_DEXPANSION_WTS)
+#define S_T7_FUTURE_CEXPANSION_WTS 31
+#define V_T7_FUTURE_CEXPANSION_WTS(x) ((x) << S_T7_FUTURE_CEXPANSION_WTS)
+#define F_T7_FUTURE_CEXPANSION_WTS V_T7_FUTURE_CEXPANSION_WTS(1U)
+
+#define S_CL14_WR_CMD_TO_ERROR 30
+#define V_CL14_WR_CMD_TO_ERROR(x) ((x) << S_CL14_WR_CMD_TO_ERROR)
+#define F_CL14_WR_CMD_TO_ERROR V_CL14_WR_CMD_TO_ERROR(1U)
+
+#define S_CL13_WR_CMD_TO_ERROR 29
+#define V_CL13_WR_CMD_TO_ERROR(x) ((x) << S_CL13_WR_CMD_TO_ERROR)
+#define F_CL13_WR_CMD_TO_ERROR V_CL13_WR_CMD_TO_ERROR(1U)
+
+#define S_T7_FUTURE_DEXPANSION_WTS 15
+#define V_T7_FUTURE_DEXPANSION_WTS(x) ((x) << S_T7_FUTURE_DEXPANSION_WTS)
+#define F_T7_FUTURE_DEXPANSION_WTS V_T7_FUTURE_DEXPANSION_WTS(1U)
+
+#define S_CL14_WR_DATA_TO_ERROR 14
+#define V_CL14_WR_DATA_TO_ERROR(x) ((x) << S_CL14_WR_DATA_TO_ERROR)
+#define F_CL14_WR_DATA_TO_ERROR V_CL14_WR_DATA_TO_ERROR(1U)
+
+#define S_CL13_WR_DATA_TO_ERROR 13
+#define V_CL13_WR_DATA_TO_ERROR(x) ((x) << S_CL13_WR_DATA_TO_ERROR)
+#define F_CL13_WR_DATA_TO_ERROR V_CL13_WR_DATA_TO_ERROR(1U)
+
#define A_MA_READ_TIMEOUT_ERROR_ENABLE 0x78dc
#define S_CL12_RD_CMD_TO_EN 28
@@ -17147,6 +21965,30 @@
#define V_FUTURE_DEXPANSION_RTE(x) ((x) << S_FUTURE_DEXPANSION_RTE)
#define G_FUTURE_DEXPANSION_RTE(x) (((x) >> S_FUTURE_DEXPANSION_RTE) & M_FUTURE_DEXPANSION_RTE)
+#define S_T7_FUTURE_CEXPANSION_RTE 31
+#define V_T7_FUTURE_CEXPANSION_RTE(x) ((x) << S_T7_FUTURE_CEXPANSION_RTE)
+#define F_T7_FUTURE_CEXPANSION_RTE V_T7_FUTURE_CEXPANSION_RTE(1U)
+
+#define S_CL14_RD_CMD_TO_EN 30
+#define V_CL14_RD_CMD_TO_EN(x) ((x) << S_CL14_RD_CMD_TO_EN)
+#define F_CL14_RD_CMD_TO_EN V_CL14_RD_CMD_TO_EN(1U)
+
+#define S_CL13_RD_CMD_TO_EN 29
+#define V_CL13_RD_CMD_TO_EN(x) ((x) << S_CL13_RD_CMD_TO_EN)
+#define F_CL13_RD_CMD_TO_EN V_CL13_RD_CMD_TO_EN(1U)
+
+#define S_T7_FUTURE_DEXPANSION_RTE 15
+#define V_T7_FUTURE_DEXPANSION_RTE(x) ((x) << S_T7_FUTURE_DEXPANSION_RTE)
+#define F_T7_FUTURE_DEXPANSION_RTE V_T7_FUTURE_DEXPANSION_RTE(1U)
+
+#define S_CL14_RD_DATA_TO_EN 14
+#define V_CL14_RD_DATA_TO_EN(x) ((x) << S_CL14_RD_DATA_TO_EN)
+#define F_CL14_RD_DATA_TO_EN V_CL14_RD_DATA_TO_EN(1U)
+
+#define S_CL13_RD_DATA_TO_EN 13
+#define V_CL13_RD_DATA_TO_EN(x) ((x) << S_CL13_RD_DATA_TO_EN)
+#define F_CL13_RD_DATA_TO_EN V_CL13_RD_DATA_TO_EN(1U)
+
#define A_MA_READ_TIMEOUT_ERROR_STATUS 0x78e0
#define S_CL12_RD_CMD_TO_ERROR 28
@@ -17263,6 +22105,27 @@
#define V_FUTURE_DEXPANSION_RTS(x) ((x) << S_FUTURE_DEXPANSION_RTS)
#define G_FUTURE_DEXPANSION_RTS(x) (((x) >> S_FUTURE_DEXPANSION_RTS) & M_FUTURE_DEXPANSION_RTS)
+#define S_T7_FUTURE_CEXPANSION_RTS 31
+#define V_T7_FUTURE_CEXPANSION_RTS(x) ((x) << S_T7_FUTURE_CEXPANSION_RTS)
+#define F_T7_FUTURE_CEXPANSION_RTS V_T7_FUTURE_CEXPANSION_RTS(1U)
+
+#define S_CL14_RD_CMD_TO_ERROR 30
+#define V_CL14_RD_CMD_TO_ERROR(x) ((x) << S_CL14_RD_CMD_TO_ERROR)
+#define F_CL14_RD_CMD_TO_ERROR V_CL14_RD_CMD_TO_ERROR(1U)
+
+#define S_CL13_RD_CMD_TO_ERROR 29
+#define V_CL13_RD_CMD_TO_ERROR(x) ((x) << S_CL13_RD_CMD_TO_ERROR)
+#define F_CL13_RD_CMD_TO_ERROR V_CL13_RD_CMD_TO_ERROR(1U)
+
+#define S_T7_FUTURE_DEXPANSION_RTS 14
+#define M_T7_FUTURE_DEXPANSION_RTS 0x3U
+#define V_T7_FUTURE_DEXPANSION_RTS(x) ((x) << S_T7_FUTURE_DEXPANSION_RTS)
+#define G_T7_FUTURE_DEXPANSION_RTS(x) (((x) >> S_T7_FUTURE_DEXPANSION_RTS) & M_T7_FUTURE_DEXPANSION_RTS)
+
+#define S_CL13_RD_DATA_TO_ERROR 13
+#define V_CL13_RD_DATA_TO_ERROR(x) ((x) << S_CL13_RD_DATA_TO_ERROR)
+#define F_CL13_RD_DATA_TO_ERROR V_CL13_RD_DATA_TO_ERROR(1U)
+
#define A_MA_BKP_CNT_SEL 0x78e4
#define S_BKP_CNT_TYPE 30
@@ -17361,12 +22224,16 @@
#define V_FUTURE_DEXPANSION_IPE(x) ((x) << S_FUTURE_DEXPANSION_IPE)
#define G_FUTURE_DEXPANSION_IPE(x) (((x) >> S_FUTURE_DEXPANSION_IPE) & M_FUTURE_DEXPANSION_IPE)
-#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
+#define S_T7_FUTURE_DEXPANSION_IPE 14
+#define M_T7_FUTURE_DEXPANSION_IPE 0x3ffffU
+#define V_T7_FUTURE_DEXPANSION_IPE(x) ((x) << S_T7_FUTURE_DEXPANSION_IPE)
+#define G_T7_FUTURE_DEXPANSION_IPE(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPE) & M_T7_FUTURE_DEXPANSION_IPE)
-#define S_T5_FUTURE_DEXPANSION 13
-#define M_T5_FUTURE_DEXPANSION 0x7ffffU
-#define V_T5_FUTURE_DEXPANSION(x) ((x) << S_T5_FUTURE_DEXPANSION)
-#define G_T5_FUTURE_DEXPANSION(x) (((x) >> S_T5_FUTURE_DEXPANSION) & M_T5_FUTURE_DEXPANSION)
+#define S_CL13_IF_PAR_EN 13
+#define V_CL13_IF_PAR_EN(x) ((x) << S_CL13_IF_PAR_EN)
+#define F_CL13_IF_PAR_EN V_CL13_IF_PAR_EN(1U)
+
+#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
#define S_CL12_IF_PAR_ERROR 12
#define V_CL12_IF_PAR_ERROR(x) ((x) << S_CL12_IF_PAR_ERROR)
@@ -17425,6 +22292,15 @@
#define V_FUTURE_DEXPANSION_IPS(x) ((x) << S_FUTURE_DEXPANSION_IPS)
#define G_FUTURE_DEXPANSION_IPS(x) (((x) >> S_FUTURE_DEXPANSION_IPS) & M_FUTURE_DEXPANSION_IPS)
+#define S_T7_FUTURE_DEXPANSION_IPS 14
+#define M_T7_FUTURE_DEXPANSION_IPS 0x3ffffU
+#define V_T7_FUTURE_DEXPANSION_IPS(x) ((x) << S_T7_FUTURE_DEXPANSION_IPS)
+#define G_T7_FUTURE_DEXPANSION_IPS(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPS) & M_T7_FUTURE_DEXPANSION_IPS)
+
+#define S_CL13_IF_PAR_ERROR 13
+#define V_CL13_IF_PAR_ERROR(x) ((x) << S_CL13_IF_PAR_ERROR)
+#define F_CL13_IF_PAR_ERROR V_CL13_IF_PAR_ERROR(1U)
+
#define A_MA_LOCAL_DEBUG_CFG 0x78f8
#define S_DEBUG_OR 15
@@ -17445,6 +22321,131 @@
#define G_DEBUGPAGE(x) (((x) >> S_DEBUGPAGE) & M_DEBUGPAGE)
#define A_MA_LOCAL_DEBUG_RPT 0x78fc
+#define A_MA_CLIENT13_PR_THRESHOLD 0x7900
+#define A_MA_CLIENT13_CR_THRESHOLD 0x7904
+#define A_MA_CRYPTO_DEBUG_CNT 0x7908
+#define A_MA_CRYPTO_WRDATA_CNT 0x790c
+#define A_MA_CRYPTO_RDDATA_CNT 0x7910
+#define A_MA_LOCAL_DEBUG_PERF_CFG 0x7914
+#define A_MA_LOCAL_DEBUG_PERF_RPT 0x7918
+#define A_MA_PCIE_THROTTLE 0x791c
+#define A_MA_CLIENT14_PR_THRESHOLD 0x7920
+#define A_MA_CLIENT14_CR_THRESHOLD 0x7924
+#define A_MA_CIM_TH1_DEBUG_CNT 0x7928
+#define A_MA_CIM_TH1_WRDATA_CNT 0x792c
+#define A_MA_CIM_TH1_RDDATA_CNT 0x7930
+#define A_MA_CIM_THREAD1_MAPPER 0x7934
+
+#define S_CIM_THREAD1_EN 0
+#define M_CIM_THREAD1_EN 0xffU
+#define V_CIM_THREAD1_EN(x) ((x) << S_CIM_THREAD1_EN)
+#define G_CIM_THREAD1_EN(x) (((x) >> S_CIM_THREAD1_EN) & M_CIM_THREAD1_EN)
+
+#define A_MA_PIO_CI_SGE_TH0_BASE 0x7938
+
+#define S_SGE_TH0_BASE 0
+#define M_SGE_TH0_BASE 0xffffU
+#define V_SGE_TH0_BASE(x) ((x) << S_SGE_TH0_BASE)
+#define G_SGE_TH0_BASE(x) (((x) >> S_SGE_TH0_BASE) & M_SGE_TH0_BASE)
+
+#define A_MA_PIO_CI_SGE_TH1_BASE 0x793c
+
+#define S_SGE_TH1_BASE 0
+#define M_SGE_TH1_BASE 0xffffU
+#define V_SGE_TH1_BASE(x) ((x) << S_SGE_TH1_BASE)
+#define G_SGE_TH1_BASE(x) (((x) >> S_SGE_TH1_BASE) & M_SGE_TH1_BASE)
+
+#define A_MA_PIO_CI_ULPTX_BASE 0x7940
+
+#define S_ULPTX_BASE 0
+#define M_ULPTX_BASE 0xffffU
+#define V_ULPTX_BASE(x) ((x) << S_ULPTX_BASE)
+#define G_ULPTX_BASE(x) (((x) >> S_ULPTX_BASE) & M_ULPTX_BASE)
+
+#define A_MA_PIO_CI_ULPRX_BASE 0x7944
+
+#define S_ULPRX_BASE 0
+#define M_ULPRX_BASE 0xffffU
+#define V_ULPRX_BASE(x) ((x) << S_ULPRX_BASE)
+#define G_ULPRX_BASE(x) (((x) >> S_ULPRX_BASE) & M_ULPRX_BASE)
+
+#define A_MA_PIO_CI_ULPTXRX_BASE 0x7948
+
+#define S_ULPTXRX_BASE 0
+#define M_ULPTXRX_BASE 0xffffU
+#define V_ULPTXRX_BASE(x) ((x) << S_ULPTXRX_BASE)
+#define G_ULPTXRX_BASE(x) (((x) >> S_ULPTXRX_BASE) & M_ULPTXRX_BASE)
+
+#define A_MA_PIO_CI_TP_TH0_BASE 0x794c
+
+#define S_TP_TH0_BASE 0
+#define M_TP_TH0_BASE 0xffffU
+#define V_TP_TH0_BASE(x) ((x) << S_TP_TH0_BASE)
+#define G_TP_TH0_BASE(x) (((x) >> S_TP_TH0_BASE) & M_TP_TH0_BASE)
+
+#define A_MA_PIO_CI_TP_TH1_BASE 0x7950
+
+#define S_TP_TH1_BASE 0
+#define M_TP_TH1_BASE 0xffffU
+#define V_TP_TH1_BASE(x) ((x) << S_TP_TH1_BASE)
+#define G_TP_TH1_BASE(x) (((x) >> S_TP_TH1_BASE) & M_TP_TH1_BASE)
+
+#define A_MA_PIO_CI_LE_BASE 0x7954
+
+#define S_LE_BASE 0
+#define M_LE_BASE 0xffffU
+#define V_LE_BASE(x) ((x) << S_LE_BASE)
+#define G_LE_BASE(x) (((x) >> S_LE_BASE) & M_LE_BASE)
+
+#define A_MA_PIO_CI_CIM_TH0_BASE 0x7958
+
+#define S_CIM_TH0_BASE 0
+#define M_CIM_TH0_BASE 0xffffU
+#define V_CIM_TH0_BASE(x) ((x) << S_CIM_TH0_BASE)
+#define G_CIM_TH0_BASE(x) (((x) >> S_CIM_TH0_BASE) & M_CIM_TH0_BASE)
+
+#define A_MA_PIO_CI_PCIE_BASE 0x795c
+
+#define S_PCIE_BASE 0
+#define M_PCIE_BASE 0xffffU
+#define V_PCIE_BASE(x) ((x) << S_PCIE_BASE)
+#define G_PCIE_BASE(x) (((x) >> S_PCIE_BASE) & M_PCIE_BASE)
+
+#define A_MA_PIO_CI_PMTX_BASE 0x7960
+
+#define S_PMTX_BASE 0
+#define M_PMTX_BASE 0xffffU
+#define V_PMTX_BASE(x) ((x) << S_PMTX_BASE)
+#define G_PMTX_BASE(x) (((x) >> S_PMTX_BASE) & M_PMTX_BASE)
+
+#define A_MA_PIO_CI_PMRX_BASE 0x7964
+
+#define S_PMRX_BASE 0
+#define M_PMRX_BASE 0xffffU
+#define V_PMRX_BASE(x) ((x) << S_PMRX_BASE)
+#define G_PMRX_BASE(x) (((x) >> S_PMRX_BASE) & M_PMRX_BASE)
+
+#define A_MA_PIO_CI_HMA_BASE 0x7968
+
+#define S_HMACLIENTBASE 0
+#define M_HMACLIENTBASE 0xffffU
+#define V_HMACLIENTBASE(x) ((x) << S_HMACLIENTBASE)
+#define G_HMACLIENTBASE(x) (((x) >> S_HMACLIENTBASE) & M_HMACLIENTBASE)
+
+#define A_MA_PIO_CI_CRYPTO_BASE 0x796c
+
+#define S_CRYPTO_BASE 0
+#define M_CRYPTO_BASE 0xffffU
+#define V_CRYPTO_BASE(x) ((x) << S_CRYPTO_BASE)
+#define G_CRYPTO_BASE(x) (((x) >> S_CRYPTO_BASE) & M_CRYPTO_BASE)
+
+#define A_MA_PIO_CI_CIM_TH1_BASE 0x7970
+
+#define S_CIM_TH1_BASE 0
+#define M_CIM_TH1_BASE 0xffffU
+#define V_CIM_TH1_BASE(x) ((x) << S_CIM_TH1_BASE)
+#define G_CIM_TH1_BASE(x) (((x) >> S_CIM_TH1_BASE) & M_CIM_TH1_BASE)
+
#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_EXTERNAL 0xa000
#define S_CMDVLD0 31
@@ -20418,6 +25419,124 @@
#define V_FLASHADDRSIZE(x) ((x) << S_FLASHADDRSIZE)
#define G_FLASHADDRSIZE(x) (((x) >> S_FLASHADDRSIZE) & M_FLASHADDRSIZE)
+#define A_T7_CIM_PERR_ENABLE 0x7b08
+
+#define S_T7_MA_CIM_INTFPERR 31
+#define V_T7_MA_CIM_INTFPERR(x) ((x) << S_T7_MA_CIM_INTFPERR)
+#define F_T7_MA_CIM_INTFPERR V_T7_MA_CIM_INTFPERR(1U)
+
+#define S_T7_MBHOSTPARERR 30
+#define V_T7_MBHOSTPARERR(x) ((x) << S_T7_MBHOSTPARERR)
+#define F_T7_MBHOSTPARERR V_T7_MBHOSTPARERR(1U)
+
+#define S_MAARBINVRSPTAG 29
+#define V_MAARBINVRSPTAG(x) ((x) << S_MAARBINVRSPTAG)
+#define F_MAARBINVRSPTAG V_MAARBINVRSPTAG(1U)
+
+#define S_MAARBFIFOPARERR 28
+#define V_MAARBFIFOPARERR(x) ((x) << S_MAARBFIFOPARERR)
+#define F_MAARBFIFOPARERR V_MAARBFIFOPARERR(1U)
+
+#define S_SEMSRAMPARERR 27
+#define V_SEMSRAMPARERR(x) ((x) << S_SEMSRAMPARERR)
+#define F_SEMSRAMPARERR V_SEMSRAMPARERR(1U)
+
+#define S_RSACPARERR 26
+#define V_RSACPARERR(x) ((x) << S_RSACPARERR)
+#define F_RSACPARERR V_RSACPARERR(1U)
+
+#define S_RSADPARERR 25
+#define V_RSADPARERR(x) ((x) << S_RSADPARERR)
+#define F_RSADPARERR V_RSADPARERR(1U)
+
+#define S_T7_PLCIM_MSTRSPDATAPARERR 24
+#define V_T7_PLCIM_MSTRSPDATAPARERR(x) ((x) << S_T7_PLCIM_MSTRSPDATAPARERR)
+#define F_T7_PLCIM_MSTRSPDATAPARERR V_T7_PLCIM_MSTRSPDATAPARERR(1U)
+
+#define S_T7_PCIE2CIMINTFPARERR 23
+#define V_T7_PCIE2CIMINTFPARERR(x) ((x) << S_T7_PCIE2CIMINTFPARERR)
+#define F_T7_PCIE2CIMINTFPARERR V_T7_PCIE2CIMINTFPARERR(1U)
+
+#define S_T7_NCSI2CIMINTFPARERR 22
+#define V_T7_NCSI2CIMINTFPARERR(x) ((x) << S_T7_NCSI2CIMINTFPARERR)
+#define F_T7_NCSI2CIMINTFPARERR V_T7_NCSI2CIMINTFPARERR(1U)
+
+#define S_T7_SGE2CIMINTFPARERR 21
+#define V_T7_SGE2CIMINTFPARERR(x) ((x) << S_T7_SGE2CIMINTFPARERR)
+#define F_T7_SGE2CIMINTFPARERR V_T7_SGE2CIMINTFPARERR(1U)
+
+#define S_T7_ULP2CIMINTFPARERR 20
+#define V_T7_ULP2CIMINTFPARERR(x) ((x) << S_T7_ULP2CIMINTFPARERR)
+#define F_T7_ULP2CIMINTFPARERR V_T7_ULP2CIMINTFPARERR(1U)
+
+#define S_T7_TP2CIMINTFPARERR 19
+#define V_T7_TP2CIMINTFPARERR(x) ((x) << S_T7_TP2CIMINTFPARERR)
+#define F_T7_TP2CIMINTFPARERR V_T7_TP2CIMINTFPARERR(1U)
+
+#define S_CORE7PARERR 18
+#define V_CORE7PARERR(x) ((x) << S_CORE7PARERR)
+#define F_CORE7PARERR V_CORE7PARERR(1U)
+
+#define S_CORE6PARERR 17
+#define V_CORE6PARERR(x) ((x) << S_CORE6PARERR)
+#define F_CORE6PARERR V_CORE6PARERR(1U)
+
+#define S_CORE5PARERR 16
+#define V_CORE5PARERR(x) ((x) << S_CORE5PARERR)
+#define F_CORE5PARERR V_CORE5PARERR(1U)
+
+#define S_CORE4PARERR 15
+#define V_CORE4PARERR(x) ((x) << S_CORE4PARERR)
+#define F_CORE4PARERR V_CORE4PARERR(1U)
+
+#define S_CORE3PARERR 14
+#define V_CORE3PARERR(x) ((x) << S_CORE3PARERR)
+#define F_CORE3PARERR V_CORE3PARERR(1U)
+
+#define S_CORE2PARERR 13
+#define V_CORE2PARERR(x) ((x) << S_CORE2PARERR)
+#define F_CORE2PARERR V_CORE2PARERR(1U)
+
+#define S_CORE1PARERR 12
+#define V_CORE1PARERR(x) ((x) << S_CORE1PARERR)
+#define F_CORE1PARERR V_CORE1PARERR(1U)
+
+#define S_GFTPARERR 10
+#define V_GFTPARERR(x) ((x) << S_GFTPARERR)
+#define F_GFTPARERR V_GFTPARERR(1U)
+
+#define S_MPSRSPDATAPARERR 9
+#define V_MPSRSPDATAPARERR(x) ((x) << S_MPSRSPDATAPARERR)
+#define F_MPSRSPDATAPARERR V_MPSRSPDATAPARERR(1U)
+
+#define S_ER_RSPDATAPARERR 8
+#define V_ER_RSPDATAPARERR(x) ((x) << S_ER_RSPDATAPARERR)
+#define F_ER_RSPDATAPARERR V_ER_RSPDATAPARERR(1U)
+
+#define S_FLOWFIFOPARERR 7
+#define V_FLOWFIFOPARERR(x) ((x) << S_FLOWFIFOPARERR)
+#define F_FLOWFIFOPARERR V_FLOWFIFOPARERR(1U)
+
+#define S_OBQSRAMPARERR 6
+#define V_OBQSRAMPARERR(x) ((x) << S_OBQSRAMPARERR)
+#define F_OBQSRAMPARERR V_OBQSRAMPARERR(1U)
+
+#define S_TIEQOUTPARERR 3
+#define V_TIEQOUTPARERR(x) ((x) << S_TIEQOUTPARERR)
+#define F_TIEQOUTPARERR V_TIEQOUTPARERR(1U)
+
+#define S_TIEQINPARERR 2
+#define V_TIEQINPARERR(x) ((x) << S_TIEQINPARERR)
+#define F_TIEQINPARERR V_TIEQINPARERR(1U)
+
+#define S_PIFRSPPARERR 1
+#define V_PIFRSPPARERR(x) ((x) << S_PIFRSPPARERR)
+#define F_PIFRSPPARERR V_PIFRSPPARERR(1U)
+
+#define S_PIFREQPARERR 0
+#define V_PIFREQPARERR(x) ((x) << S_PIFREQPARERR)
+#define F_PIFREQPARERR V_PIFREQPARERR(1U)
+
#define A_CIM_EEPROM_BASE_ADDR 0x7b0c
#define S_EEPROMBASEADDR 6
@@ -20425,6 +25544,7 @@
#define V_EEPROMBASEADDR(x) ((x) << S_EEPROMBASEADDR)
#define G_EEPROMBASEADDR(x) (((x) >> S_EEPROMBASEADDR) & M_EEPROMBASEADDR)
+#define A_CIM_PERR_CAUSE 0x7b0c
#define A_CIM_EEPROM_ADDR_SIZE 0x7b10
#define S_EEPROMADDRSIZE 4
@@ -20593,6 +25713,38 @@
#define V_IBQPCIEPARERR(x) ((x) << S_IBQPCIEPARERR)
#define F_IBQPCIEPARERR V_IBQPCIEPARERR(1U)
+#define S_CORE7ACCINT 22
+#define V_CORE7ACCINT(x) ((x) << S_CORE7ACCINT)
+#define F_CORE7ACCINT V_CORE7ACCINT(1U)
+
+#define S_CORE6ACCINT 21
+#define V_CORE6ACCINT(x) ((x) << S_CORE6ACCINT)
+#define F_CORE6ACCINT V_CORE6ACCINT(1U)
+
+#define S_CORE5ACCINT 20
+#define V_CORE5ACCINT(x) ((x) << S_CORE5ACCINT)
+#define F_CORE5ACCINT V_CORE5ACCINT(1U)
+
+#define S_CORE4ACCINT 19
+#define V_CORE4ACCINT(x) ((x) << S_CORE4ACCINT)
+#define F_CORE4ACCINT V_CORE4ACCINT(1U)
+
+#define S_CORE3ACCINT 18
+#define V_CORE3ACCINT(x) ((x) << S_CORE3ACCINT)
+#define F_CORE3ACCINT V_CORE3ACCINT(1U)
+
+#define S_CORE2ACCINT 17
+#define V_CORE2ACCINT(x) ((x) << S_CORE2ACCINT)
+#define F_CORE2ACCINT V_CORE2ACCINT(1U)
+
+#define S_CORE1ACCINT 16
+#define V_CORE1ACCINT(x) ((x) << S_CORE1ACCINT)
+#define F_CORE1ACCINT V_CORE1ACCINT(1U)
+
+#define S_PERRNONZERO 1
+#define V_PERRNONZERO(x) ((x) << S_PERRNONZERO)
+#define F_PERRNONZERO V_PERRNONZERO(1U)
+
#define A_CIM_HOST_INT_CAUSE 0x7b2c
#define S_TIEQOUTPARERRINT 20
@@ -20745,6 +25897,10 @@
#define V_RSVDSPACEINTEN(x) ((x) << S_RSVDSPACEINTEN)
#define F_RSVDSPACEINTEN V_RSVDSPACEINTEN(1U)
+#define S_CONWRERRINTEN 31
+#define V_CONWRERRINTEN(x) ((x) << S_CONWRERRINTEN)
+#define F_CONWRERRINTEN V_CONWRERRINTEN(1U)
+
#define A_CIM_HOST_UPACC_INT_CAUSE 0x7b34
#define S_EEPROMWRINT 30
@@ -20871,12 +26027,32 @@
#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+#define S_CONWRERRINT 31
+#define V_CONWRERRINT(x) ((x) << S_CONWRERRINT)
+#define F_CONWRERRINT V_CONWRERRINT(1U)
+
#define A_CIM_UP_INT_ENABLE 0x7b38
#define S_MSTPLINTEN 4
#define V_MSTPLINTEN(x) ((x) << S_MSTPLINTEN)
#define F_MSTPLINTEN V_MSTPLINTEN(1U)
+#define S_SEMINT 8
+#define V_SEMINT(x) ((x) << S_SEMINT)
+#define F_SEMINT V_SEMINT(1U)
+
+#define S_RSAINT 7
+#define V_RSAINT(x) ((x) << S_RSAINT)
+#define F_RSAINT V_RSAINT(1U)
+
+#define S_TRNGINT 6
+#define V_TRNGINT(x) ((x) << S_TRNGINT)
+#define F_TRNGINT V_TRNGINT(1U)
+
+#define S_PEERHALTINT 5
+#define V_PEERHALTINT(x) ((x) << S_PEERHALTINT)
+#define F_PEERHALTINT V_PEERHALTINT(1U)
+
#define A_CIM_UP_INT_CAUSE 0x7b3c
#define S_MSTPLINT 4
@@ -20900,6 +26076,33 @@
#define V_QUENUMSELECT(x) ((x) << S_QUENUMSELECT)
#define G_QUENUMSELECT(x) (((x) >> S_QUENUMSELECT) & M_QUENUMSELECT)
+#define S_MAPOFFSET 11
+#define M_MAPOFFSET 0x1fU
+#define V_MAPOFFSET(x) ((x) << S_MAPOFFSET)
+#define G_MAPOFFSET(x) (((x) >> S_MAPOFFSET) & M_MAPOFFSET)
+
+#define S_MAPSELECT 10
+#define V_MAPSELECT(x) ((x) << S_MAPSELECT)
+#define F_MAPSELECT V_MAPSELECT(1U)
+
+#define S_CORESELECT 6
+#define M_CORESELECT 0xfU
+#define V_CORESELECT(x) ((x) << S_CORESELECT)
+#define G_CORESELECT(x) (((x) >> S_CORESELECT) & M_CORESELECT)
+
+#define S_T7_OBQSELECT 5
+#define V_T7_OBQSELECT(x) ((x) << S_T7_OBQSELECT)
+#define F_T7_OBQSELECT V_T7_OBQSELECT(1U)
+
+#define S_T7_IBQSELECT 4
+#define V_T7_IBQSELECT(x) ((x) << S_T7_IBQSELECT)
+#define F_T7_IBQSELECT V_T7_IBQSELECT(1U)
+
+#define S_T7_QUENUMSELECT 0
+#define M_T7_QUENUMSELECT 0xfU
+#define V_T7_QUENUMSELECT(x) ((x) << S_T7_QUENUMSELECT)
+#define G_T7_QUENUMSELECT(x) (((x) >> S_T7_QUENUMSELECT) & M_T7_QUENUMSELECT)
+
#define A_CIM_QUEUE_CONFIG_CTRL 0x7b4c
#define S_CIMQSIZE 24
@@ -20940,6 +26143,29 @@
#define V_HOSTADDR(x) ((x) << S_HOSTADDR)
#define G_HOSTADDR(x) (((x) >> S_HOSTADDR) & M_HOSTADDR)
+#define S_T7_HOSTBUSY 31
+#define V_T7_HOSTBUSY(x) ((x) << S_T7_HOSTBUSY)
+#define F_T7_HOSTBUSY V_T7_HOSTBUSY(1U)
+
+#define S_T7_HOSTWRITE 30
+#define V_T7_HOSTWRITE(x) ((x) << S_T7_HOSTWRITE)
+#define F_T7_HOSTWRITE V_T7_HOSTWRITE(1U)
+
+#define S_HOSTGRPSEL 28
+#define M_HOSTGRPSEL 0x3U
+#define V_HOSTGRPSEL(x) ((x) << S_HOSTGRPSEL)
+#define G_HOSTGRPSEL(x) (((x) >> S_HOSTGRPSEL) & M_HOSTGRPSEL)
+
+#define S_HOSTCORESEL 24
+#define M_HOSTCORESEL 0xfU
+#define V_HOSTCORESEL(x) ((x) << S_HOSTCORESEL)
+#define G_HOSTCORESEL(x) (((x) >> S_HOSTCORESEL) & M_HOSTCORESEL)
+
+#define S_T7_HOSTADDR 0
+#define M_T7_HOSTADDR 0xffffffU
+#define V_T7_HOSTADDR(x) ((x) << S_T7_HOSTADDR)
+#define G_T7_HOSTADDR(x) (((x) >> S_T7_HOSTADDR) & M_T7_HOSTADDR)
+
#define A_CIM_HOST_ACC_DATA 0x7b54
#define A_CIM_CDEBUGDATA 0x7b58
@@ -20953,6 +26179,31 @@
#define V_CDEBUGDATAL(x) ((x) << S_CDEBUGDATAL)
#define G_CDEBUGDATAL(x) (((x) >> S_CDEBUGDATAL) & M_CDEBUGDATAL)
+#define A_CIM_DEBUG_CFG 0x7b58
+
+#define S_OR_EN 20
+#define V_OR_EN(x) ((x) << S_OR_EN)
+#define F_OR_EN V_OR_EN(1U)
+
+#define S_USEL 19
+#define V_USEL(x) ((x) << S_USEL)
+#define F_USEL V_USEL(1U)
+
+#define S_HI 18
+#define V_HI(x) ((x) << S_HI)
+#define F_HI V_HI(1U)
+
+#define S_SELH 9
+#define M_SELH 0x1ffU
+#define V_SELH(x) ((x) << S_SELH)
+#define G_SELH(x) (((x) >> S_SELH) & M_SELH)
+
+#define S_SELL 0
+#define M_SELL 0x1ffU
+#define V_SELL(x) ((x) << S_SELL)
+#define G_SELL(x) (((x) >> S_SELL) & M_SELL)
+
+#define A_CIM_DEBUG_DATA 0x7b5c
#define A_CIM_IBQ_DBG_CFG 0x7b60
#define S_IBQDBGADDR 16
@@ -20972,6 +26223,25 @@
#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
#define F_IBQDBGEN V_IBQDBGEN(1U)
+#define S_IBQDBGCORE 28
+#define M_IBQDBGCORE 0xfU
+#define V_IBQDBGCORE(x) ((x) << S_IBQDBGCORE)
+#define G_IBQDBGCORE(x) (((x) >> S_IBQDBGCORE) & M_IBQDBGCORE)
+
+#define S_T7_IBQDBGADDR 12
+#define M_T7_IBQDBGADDR 0x1fffU
+#define V_T7_IBQDBGADDR(x) ((x) << S_T7_IBQDBGADDR)
+#define G_T7_IBQDBGADDR(x) (((x) >> S_T7_IBQDBGADDR) & M_T7_IBQDBGADDR)
+
+#define S_IBQDBGSTATE 4
+#define M_IBQDBGSTATE 0x3U
+#define V_IBQDBGSTATE(x) ((x) << S_IBQDBGSTATE)
+#define G_IBQDBGSTATE(x) (((x) >> S_IBQDBGSTATE) & M_IBQDBGSTATE)
+
+#define S_PERRADDRCLR 3
+#define V_PERRADDRCLR(x) ((x) << S_PERRADDRCLR)
+#define F_PERRADDRCLR V_PERRADDRCLR(1U)
+
#define A_CIM_OBQ_DBG_CFG 0x7b64
#define S_OBQDBGADDR 16
@@ -20991,6 +26261,21 @@
#define V_OBQDBGEN(x) ((x) << S_OBQDBGEN)
#define F_OBQDBGEN V_OBQDBGEN(1U)
+#define S_OBQDBGCORE 28
+#define M_OBQDBGCORE 0xfU
+#define V_OBQDBGCORE(x) ((x) << S_OBQDBGCORE)
+#define G_OBQDBGCORE(x) (((x) >> S_OBQDBGCORE) & M_OBQDBGCORE)
+
+#define S_T7_OBQDBGADDR 12
+#define M_T7_OBQDBGADDR 0x1fffU
+#define V_T7_OBQDBGADDR(x) ((x) << S_T7_OBQDBGADDR)
+#define G_T7_OBQDBGADDR(x) (((x) >> S_T7_OBQDBGADDR) & M_T7_OBQDBGADDR)
+
+#define S_OBQDBGSTATE 4
+#define M_OBQDBGSTATE 0x3U
+#define V_OBQDBGSTATE(x) ((x) << S_OBQDBGSTATE)
+#define G_OBQDBGSTATE(x) (((x) >> S_OBQDBGSTATE) & M_OBQDBGSTATE)
+
#define A_CIM_IBQ_DBG_DATA 0x7b68
#define A_CIM_OBQ_DBG_DATA 0x7b6c
#define A_CIM_DEBUGCFG 0x7b70
@@ -21075,6 +26360,11 @@
#define V_ZONE_DST(x) ((x) << S_ZONE_DST)
#define G_ZONE_DST(x) (((x) >> S_ZONE_DST) & M_ZONE_DST)
+#define S_THREAD_ID 2
+#define M_THREAD_ID 0x7U
+#define V_THREAD_ID(x) ((x) << S_THREAD_ID)
+#define G_THREAD_ID(x) (((x) >> S_THREAD_ID) & M_THREAD_ID)
+
#define A_CIM_MEM_ZONE0_LEN 0x7b98
#define S_MEM_ZONE_LEN 4
@@ -21207,6 +26497,7 @@
#define G_DUPUACCMASK(x) (((x) >> S_DUPUACCMASK) & M_DUPUACCMASK)
#define A_CIM_PERR_INJECT 0x7c20
+#define A_CIM_FPGA_ROM_EFUSE_CMD 0x7c20
#define A_CIM_PERR_ENABLE 0x7c24
#define S_PERREN 0
@@ -21224,6 +26515,7 @@
#define V_T6_T5_PERREN(x) ((x) << S_T6_T5_PERREN)
#define G_T6_T5_PERREN(x) (((x) >> S_T6_T5_PERREN) & M_T6_T5_PERREN)
+#define A_CIM_FPGA_ROM_EFUSE_DATA 0x7c24
#define A_CIM_EEPROM_BUSY_BIT 0x7c28
#define S_EEPROMBUSY 0
@@ -21240,6 +26532,22 @@
#define V_SLOW_TIMER_ENABLE(x) ((x) << S_SLOW_TIMER_ENABLE)
#define F_SLOW_TIMER_ENABLE V_SLOW_TIMER_ENABLE(1U)
+#define S_FLASHWRPAGEMORE 5
+#define V_FLASHWRPAGEMORE(x) ((x) << S_FLASHWRPAGEMORE)
+#define F_FLASHWRPAGEMORE V_FLASHWRPAGEMORE(1U)
+
+#define S_FLASHWRENABLE 4
+#define V_FLASHWRENABLE(x) ((x) << S_FLASHWRENABLE)
+#define F_FLASHWRENABLE V_FLASHWRENABLE(1U)
+
+#define S_FLASHMOREENABLE 3
+#define V_FLASHMOREENABLE(x) ((x) << S_FLASHMOREENABLE)
+#define F_FLASHMOREENABLE V_FLASHMOREENABLE(1U)
+
+#define S_WR_RESP_ENABLE 2
+#define V_WR_RESP_ENABLE(x) ((x) << S_WR_RESP_ENABLE)
+#define F_WR_RESP_ENABLE V_WR_RESP_ENABLE(1U)
+
#define A_CIM_UP_PO_SINGLE_OUTSTANDING 0x7c30
#define S_UP_PO_SINGLE_OUTSTANDING 0
@@ -21271,6 +26579,18 @@
#define G_CIM_PCIE_PKT_ERR_CODE(x) (((x) >> S_CIM_PCIE_PKT_ERR_CODE) & M_CIM_PCIE_PKT_ERR_CODE)
#define A_CIM_IBQ_DBG_WAIT_COUNTER 0x7c40
+#define A_CIM_QUE_PERR_ADDR 0x7c40
+
+#define S_IBQPERRADDR 16
+#define M_IBQPERRADDR 0xfffU
+#define V_IBQPERRADDR(x) ((x) << S_IBQPERRADDR)
+#define G_IBQPERRADDR(x) (((x) >> S_IBQPERRADDR) & M_IBQPERRADDR)
+
+#define S_OBQPERRADDR 0
+#define M_OBQPERRADDR 0xfffU
+#define V_OBQPERRADDR(x) ((x) << S_OBQPERRADDR)
+#define G_OBQPERRADDR(x) (((x) >> S_OBQPERRADDR) & M_OBQPERRADDR)
+
#define A_CIM_PIO_UP_MST_CFG_SEL 0x7c44
#define S_PIO_UP_MST_CFG_SEL 0
@@ -21309,6 +26629,20 @@
#define V_PCIE_OBQ_IF_DISABLE(x) ((x) << S_PCIE_OBQ_IF_DISABLE)
#define F_PCIE_OBQ_IF_DISABLE V_PCIE_OBQ_IF_DISABLE(1U)
+#define S_ULP_OBQ_SIZE 8
+#define M_ULP_OBQ_SIZE 0x3U
+#define V_ULP_OBQ_SIZE(x) ((x) << S_ULP_OBQ_SIZE)
+#define G_ULP_OBQ_SIZE(x) (((x) >> S_ULP_OBQ_SIZE) & M_ULP_OBQ_SIZE)
+
+#define S_TP_IBQ_SIZE 6
+#define M_TP_IBQ_SIZE 0x3U
+#define V_TP_IBQ_SIZE(x) ((x) << S_TP_IBQ_SIZE)
+#define G_TP_IBQ_SIZE(x) (((x) >> S_TP_IBQ_SIZE) & M_TP_IBQ_SIZE)
+
+#define S_OBQ_EOM_ENABLE 5
+#define V_OBQ_EOM_ENABLE(x) ((x) << S_OBQ_EOM_ENABLE)
+#define F_OBQ_EOM_ENABLE V_OBQ_EOM_ENABLE(1U)
+
#define A_CIM_CGEN_GLOBAL 0x7c50
#define S_CGEN_GLOBAL 0
@@ -21321,6 +26655,77 @@
#define V_PIFDBGLA_DPSLP_EN(x) ((x) << S_PIFDBGLA_DPSLP_EN)
#define F_PIFDBGLA_DPSLP_EN V_PIFDBGLA_DPSLP_EN(1U)
+#define A_CIM_GFT_CMM_CONFIG 0x7c58
+
+#define S_GLFL 31
+#define V_GLFL(x) ((x) << S_GLFL)
+#define F_GLFL V_GLFL(1U)
+
+#define S_T7_WRCNTIDLE 16
+#define M_T7_WRCNTIDLE 0x7fffU
+#define V_T7_WRCNTIDLE(x) ((x) << S_T7_WRCNTIDLE)
+#define G_T7_WRCNTIDLE(x) (((x) >> S_T7_WRCNTIDLE) & M_T7_WRCNTIDLE)
+
+#define A_CIM_GFT_CONFIG 0x7c5c
+
+#define S_GFTMABASE 16
+#define M_GFTMABASE 0xffffU
+#define V_GFTMABASE(x) ((x) << S_GFTMABASE)
+#define G_GFTMABASE(x) (((x) >> S_GFTMABASE) & M_GFTMABASE)
+
+#define S_GFTHASHTBLSIZE 12
+#define M_GFTHASHTBLSIZE 0xfU
+#define V_GFTHASHTBLSIZE(x) ((x) << S_GFTHASHTBLSIZE)
+#define G_GFTHASHTBLSIZE(x) (((x) >> S_GFTHASHTBLSIZE) & M_GFTHASHTBLSIZE)
+
+#define S_GFTTCAMPRIORITY 11
+#define V_GFTTCAMPRIORITY(x) ((x) << S_GFTTCAMPRIORITY)
+#define F_GFTTCAMPRIORITY V_GFTTCAMPRIORITY(1U)
+
+#define S_GFTMATHREADID 8
+#define M_GFTMATHREADID 0x7U
+#define V_GFTMATHREADID(x) ((x) << S_GFTMATHREADID)
+#define G_GFTMATHREADID(x) (((x) >> S_GFTMATHREADID) & M_GFTMATHREADID)
+
+#define S_GFTTCAMINIT 7
+#define V_GFTTCAMINIT(x) ((x) << S_GFTTCAMINIT)
+#define F_GFTTCAMINIT V_GFTTCAMINIT(1U)
+
+#define S_GFTTCAMINITDONE 6
+#define V_GFTTCAMINITDONE(x) ((x) << S_GFTTCAMINITDONE)
+#define F_GFTTCAMINITDONE V_GFTTCAMINITDONE(1U)
+
+#define S_GFTTBLMODEEN 0
+#define V_GFTTBLMODEEN(x) ((x) << S_GFTTBLMODEEN)
+#define F_GFTTBLMODEEN V_GFTTBLMODEEN(1U)
+
+#define A_CIM_TCAM_BIST_CTRL 0x7c60
+
+#define S_RST_CB 31
+#define V_RST_CB(x) ((x) << S_RST_CB)
+#define F_RST_CB V_RST_CB(1U)
+
+#define S_CB_START 0
+#define M_CB_START 0xfffffffU
+#define V_CB_START(x) ((x) << S_CB_START)
+#define G_CB_START(x) (((x) >> S_CB_START) & M_CB_START)
+
+#define A_CIM_TCAM_BIST_CB_PASS 0x7c64
+
+#define S_CB_PASS 0
+#define M_CB_PASS 0xfffffffU
+#define V_CB_PASS(x) ((x) << S_CB_PASS)
+#define G_CB_PASS(x) (((x) >> S_CB_PASS) & M_CB_PASS)
+
+#define A_CIM_TCAM_BIST_CB_BUSY 0x7c68
+
+#define S_CB_BUSY 0
+#define M_CB_BUSY 0xfffffffU
+#define V_CB_BUSY(x) ((x) << S_CB_BUSY)
+#define G_CB_BUSY(x) (((x) >> S_CB_BUSY) & M_CB_BUSY)
+
+#define A_CIM_GFT_MASK 0x7c70
+
/* registers for module TP */
#define TP_BASE_ADDR 0x7d00
@@ -21613,6 +27018,14 @@
#define V_CRXPKTXT(x) ((x) << S_CRXPKTXT)
#define F_CRXPKTXT V_CRXPKTXT(1U)
+#define S_ETOEBYPCSUMNOWAIT 15
+#define V_ETOEBYPCSUMNOWAIT(x) ((x) << S_ETOEBYPCSUMNOWAIT)
+#define F_ETOEBYPCSUMNOWAIT V_ETOEBYPCSUMNOWAIT(1U)
+
+#define S_ENICCSUMNOWAIT 14
+#define V_ENICCSUMNOWAIT(x) ((x) << S_ENICCSUMNOWAIT)
+#define F_ENICCSUMNOWAIT V_ENICCSUMNOWAIT(1U)
+
#define A_TP_GLOBAL_CONFIG 0x7d08
#define S_SYNCOOKIEPARAMS 26
@@ -21703,6 +27116,31 @@
#define V_ACTIVEFILTERCOUNTS(x) ((x) << S_ACTIVEFILTERCOUNTS)
#define F_ACTIVEFILTERCOUNTS V_ACTIVEFILTERCOUNTS(1U)
+#define S_RXSACKPARSE 31
+#define V_RXSACKPARSE(x) ((x) << S_RXSACKPARSE)
+#define F_RXSACKPARSE V_RXSACKPARSE(1U)
+
+#define S_RXSACKFWDMODE 29
+#define M_RXSACKFWDMODE 0x3U
+#define V_RXSACKFWDMODE(x) ((x) << S_RXSACKFWDMODE)
+#define G_RXSACKFWDMODE(x) (((x) >> S_RXSACKFWDMODE) & M_RXSACKFWDMODE)
+
+#define S_SRVRCHRSSEN 26
+#define V_SRVRCHRSSEN(x) ((x) << S_SRVRCHRSSEN)
+#define F_SRVRCHRSSEN V_SRVRCHRSSEN(1U)
+
+#define S_LBCHNDISTEN 23
+#define V_LBCHNDISTEN(x) ((x) << S_LBCHNDISTEN)
+#define F_LBCHNDISTEN V_LBCHNDISTEN(1U)
+
+#define S_ETHTNLLEN2X 20
+#define V_ETHTNLLEN2X(x) ((x) << S_ETHTNLLEN2X)
+#define F_ETHTNLLEN2X V_ETHTNLLEN2X(1U)
+
+#define S_EGLBCHNDISTEN 19
+#define V_EGLBCHNDISTEN(x) ((x) << S_EGLBCHNDISTEN)
+#define F_EGLBCHNDISTEN V_EGLBCHNDISTEN(1U)
+
#define A_TP_DB_CONFIG 0x7d0c
#define S_DBMAXOPCNT 24
@@ -21767,6 +27205,11 @@
#define V_PMRXMAXPAGE(x) ((x) << S_PMRXMAXPAGE)
#define G_PMRXMAXPAGE(x) (((x) >> S_PMRXMAXPAGE) & M_PMRXMAXPAGE)
+#define S_T7_PMRXNUMCHN 29
+#define M_T7_PMRXNUMCHN 0x7U
+#define V_T7_PMRXNUMCHN(x) ((x) << S_T7_PMRXNUMCHN)
+#define G_T7_PMRXNUMCHN(x) (((x) >> S_T7_PMRXNUMCHN) & M_T7_PMRXNUMCHN)
+
#define A_TP_PMM_TX_PAGE_SIZE 0x7d34
#define A_TP_PMM_TX_MAX_PAGE 0x7d38
@@ -21780,6 +27223,83 @@
#define V_PMTXMAXPAGE(x) ((x) << S_PMTXMAXPAGE)
#define G_PMTXMAXPAGE(x) (((x) >> S_PMTXMAXPAGE) & M_PMTXMAXPAGE)
+#define S_T7_PMTXNUMCHN 29
+#define M_T7_PMTXNUMCHN 0x7U
+#define V_T7_PMTXNUMCHN(x) ((x) << S_T7_PMTXNUMCHN)
+#define G_T7_PMTXNUMCHN(x) (((x) >> S_T7_PMTXNUMCHN) & M_T7_PMTXNUMCHN)
+
+#define A_TP_EXT_CONFIG 0x7d3c
+
+#define S_TNLERRORIPSECARW 29
+#define V_TNLERRORIPSECARW(x) ((x) << S_TNLERRORIPSECARW)
+#define F_TNLERRORIPSECARW V_TNLERRORIPSECARW(1U)
+
+#define S_TNLERRORIPSECICV 28
+#define V_TNLERRORIPSECICV(x) ((x) << S_TNLERRORIPSECICV)
+#define F_TNLERRORIPSECICV V_TNLERRORIPSECICV(1U)
+
+#define S_DROPERRORIPSECARW 25
+#define V_DROPERRORIPSECARW(x) ((x) << S_DROPERRORIPSECARW)
+#define F_DROPERRORIPSECARW V_DROPERRORIPSECARW(1U)
+
+#define S_DROPERRORIPSECICV 24
+#define V_DROPERRORIPSECICV(x) ((x) << S_DROPERRORIPSECICV)
+#define F_DROPERRORIPSECICV V_DROPERRORIPSECICV(1U)
+
+#define S_MIBRDMAROCEEN 19
+#define V_MIBRDMAROCEEN(x) ((x) << S_MIBRDMAROCEEN)
+#define F_MIBRDMAROCEEN V_MIBRDMAROCEEN(1U)
+
+#define S_MIBRDMAIWARPEN 18
+#define V_MIBRDMAIWARPEN(x) ((x) << S_MIBRDMAIWARPEN)
+#define F_MIBRDMAIWARPEN V_MIBRDMAIWARPEN(1U)
+
+#define S_BYPTXDATAACKALLEN 17
+#define V_BYPTXDATAACKALLEN(x) ((x) << S_BYPTXDATAACKALLEN)
+#define F_BYPTXDATAACKALLEN V_BYPTXDATAACKALLEN(1U)
+
+#define S_DATAACKEXTEN 16
+#define V_DATAACKEXTEN(x) ((x) << S_DATAACKEXTEN)
+#define F_DATAACKEXTEN V_DATAACKEXTEN(1U)
+
+#define S_MACMATCH11FWD 11
+#define V_MACMATCH11FWD(x) ((x) << S_MACMATCH11FWD)
+#define F_MACMATCH11FWD V_MACMATCH11FWD(1U)
+
+#define S_USERTMSTPEN 10
+#define V_USERTMSTPEN(x) ((x) << S_USERTMSTPEN)
+#define F_USERTMSTPEN V_USERTMSTPEN(1U)
+
+#define S_MMGRCACHEDIS 9
+#define V_MMGRCACHEDIS(x) ((x) << S_MMGRCACHEDIS)
+#define F_MMGRCACHEDIS V_MMGRCACHEDIS(1U)
+
+#define S_TXPKTPACKOUTUDPEN 8
+#define V_TXPKTPACKOUTUDPEN(x) ((x) << S_TXPKTPACKOUTUDPEN)
+#define F_TXPKTPACKOUTUDPEN V_TXPKTPACKOUTUDPEN(1U)
+
+#define S_IPSECROCECRCMODE 6
+#define M_IPSECROCECRCMODE 0x3U
+#define V_IPSECROCECRCMODE(x) ((x) << S_IPSECROCECRCMODE)
+#define G_IPSECROCECRCMODE(x) (((x) >> S_IPSECROCECRCMODE) & M_IPSECROCECRCMODE)
+
+#define S_IPSECIDXLOC 5
+#define V_IPSECIDXLOC(x) ((x) << S_IPSECIDXLOC)
+#define F_IPSECIDXLOC V_IPSECIDXLOC(1U)
+
+#define S_IPSECIDXCAPEN 4
+#define V_IPSECIDXCAPEN(x) ((x) << S_IPSECIDXCAPEN)
+#define F_IPSECIDXCAPEN V_IPSECIDXCAPEN(1U)
+
+#define S_IPSECOFEN 3
+#define V_IPSECOFEN(x) ((x) << S_IPSECOFEN)
+#define F_IPSECOFEN V_IPSECOFEN(1U)
+
+#define S_IPSECCFG 0
+#define M_IPSECCFG 0x7U
+#define V_IPSECCFG(x) ((x) << S_IPSECCFG)
+#define G_IPSECCFG(x) (((x) >> S_IPSECCFG) & M_IPSECCFG)
+
#define A_TP_TCP_OPTIONS 0x7d40
#define S_MTUDEFAULT 16
@@ -22615,10 +28135,6 @@
#define V_TXPDUSIZEADJ(x) ((x) << S_TXPDUSIZEADJ)
#define G_TXPDUSIZEADJ(x) (((x) >> S_TXPDUSIZEADJ) & M_TXPDUSIZEADJ)
-#define S_ENABLECBYP 21
-#define V_ENABLECBYP(x) ((x) << S_ENABLECBYP)
-#define F_ENABLECBYP V_ENABLECBYP(1U)
-
#define S_LIMITEDTRANSMIT 20
#define M_LIMITEDTRANSMIT 0xfU
#define V_LIMITEDTRANSMIT(x) ((x) << S_LIMITEDTRANSMIT)
@@ -22779,6 +28295,18 @@
#define V_ECNSYNECT(x) ((x) << S_ECNSYNECT)
#define F_ECNSYNECT V_ECNSYNECT(1U)
+#define A_TP_PARA_REG9 0x7d88
+
+#define S_PMMAXXFERLEN3 16
+#define M_PMMAXXFERLEN3 0xffffU
+#define V_PMMAXXFERLEN3(x) ((x) << S_PMMAXXFERLEN3)
+#define G_PMMAXXFERLEN3(x) (((x) >> S_PMMAXXFERLEN3) & M_PMMAXXFERLEN3)
+
+#define S_PMMAXXFERLEN2 0
+#define M_PMMAXXFERLEN2 0xffffU
+#define V_PMMAXXFERLEN2(x) ((x) << S_PMMAXXFERLEN2)
+#define G_PMMAXXFERLEN2(x) (((x) >> S_PMMAXXFERLEN2) & M_PMMAXXFERLEN2)
+
#define A_TP_ERR_CONFIG 0x7d8c
#define S_TNLERRORPING 30
@@ -22926,6 +28454,11 @@
#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & M_DELAYEDACKRESOLUTION)
+#define S_ROCETIMERRESOLUTION 24
+#define M_ROCETIMERRESOLUTION 0xffU
+#define V_ROCETIMERRESOLUTION(x) ((x) << S_ROCETIMERRESOLUTION)
+#define G_ROCETIMERRESOLUTION(x) (((x) >> S_ROCETIMERRESOLUTION) & M_ROCETIMERRESOLUTION)
+
#define A_TP_MSL 0x7d94
#define S_MSL 0
@@ -23423,6 +28956,14 @@
#define V_FRMWRQUEMASK(x) ((x) << S_FRMWRQUEMASK)
#define G_FRMWRQUEMASK(x) (((x) >> S_FRMWRQUEMASK) & M_FRMWRQUEMASK)
+#define S_RRCPLOPT1SMSELEN 11
+#define V_RRCPLOPT1SMSELEN(x) ((x) << S_RRCPLOPT1SMSELEN)
+#define F_RRCPLOPT1SMSELEN V_RRCPLOPT1SMSELEN(1U)
+
+#define S_RRCPLOPT1BQEN 10
+#define V_RRCPLOPT1BQEN(x) ((x) << S_RRCPLOPT1BQEN)
+#define F_RRCPLOPT1BQEN V_RRCPLOPT1BQEN(1U)
+
#define A_TP_RSS_CONFIG_SYN 0x7dfc
#define A_TP_RSS_CONFIG_VRT 0x7e00
@@ -23595,6 +29136,69 @@
#define V_QUEUE(x) ((x) << S_QUEUE)
#define G_QUEUE(x) (((x) >> S_QUEUE) & M_QUEUE)
+#define S_T7_UPDVLD 19
+#define V_T7_UPDVLD(x) ((x) << S_T7_UPDVLD)
+#define F_T7_UPDVLD V_T7_UPDVLD(1U)
+
+#define S_T7_XOFF 18
+#define V_T7_XOFF(x) ((x) << S_T7_XOFF)
+#define F_T7_XOFF V_T7_XOFF(1U)
+
+#define S_T7_UPDCHN3 17
+#define V_T7_UPDCHN3(x) ((x) << S_T7_UPDCHN3)
+#define F_T7_UPDCHN3 V_T7_UPDCHN3(1U)
+
+#define S_T7_UPDCHN2 16
+#define V_T7_UPDCHN2(x) ((x) << S_T7_UPDCHN2)
+#define F_T7_UPDCHN2 V_T7_UPDCHN2(1U)
+
+#define S_T7_UPDCHN1 15
+#define V_T7_UPDCHN1(x) ((x) << S_T7_UPDCHN1)
+#define F_T7_UPDCHN1 V_T7_UPDCHN1(1U)
+
+#define S_T7_UPDCHN0 14
+#define V_T7_UPDCHN0(x) ((x) << S_T7_UPDCHN0)
+#define F_T7_UPDCHN0 V_T7_UPDCHN0(1U)
+
+#define S_T7_QUEUE 0
+#define M_T7_QUEUE 0x3fffU
+#define V_T7_QUEUE(x) ((x) << S_T7_QUEUE)
+#define G_T7_QUEUE(x) (((x) >> S_T7_QUEUE) & M_T7_QUEUE)
+
+#define A_TP_RSS_CONFIG_4CH 0x7e08
+
+#define S_BASEQIDEN 1
+#define V_BASEQIDEN(x) ((x) << S_BASEQIDEN)
+#define F_BASEQIDEN V_BASEQIDEN(1U)
+
+#define S_200GMODE 0
+#define V_200GMODE(x) ((x) << S_200GMODE)
+#define F_200GMODE V_200GMODE(1U)
+
+#define A_TP_RSS_CONFIG_SRAM 0x7e0c
+
+#define S_SRAMRDDIS 20
+#define V_SRAMRDDIS(x) ((x) << S_SRAMRDDIS)
+#define F_SRAMRDDIS V_SRAMRDDIS(1U)
+
+#define S_SRAMSTART 19
+#define V_SRAMSTART(x) ((x) << S_SRAMSTART)
+#define F_SRAMSTART V_SRAMSTART(1U)
+
+#define S_SRAMWRITE 18
+#define V_SRAMWRITE(x) ((x) << S_SRAMWRITE)
+#define F_SRAMWRITE V_SRAMWRITE(1U)
+
+#define S_SRAMSEL 16
+#define M_SRAMSEL 0x3U
+#define V_SRAMSEL(x) ((x) << S_SRAMSEL)
+#define G_SRAMSEL(x) (((x) >> S_SRAMSEL) & M_SRAMSEL)
+
+#define S_SRAMADDR 0
+#define M_SRAMADDR 0x3fffU
+#define V_SRAMADDR(x) ((x) << S_SRAMADDR)
+#define G_SRAMADDR(x) (((x) >> S_SRAMADDR) & M_SRAMADDR)
+
#define A_TP_LA_TABLE_0 0x7e10
#define S_VIRTPORT1TABLE 16
@@ -23621,6 +29225,18 @@
#define A_TP_TM_PIO_ADDR 0x7e18
#define A_TP_TM_PIO_DATA 0x7e1c
+#define A_TP_RX_MOD_CONFIG_CH3_CH2 0x7e20
+
+#define S_RXCHANNELWEIGHT3 8
+#define M_RXCHANNELWEIGHT3 0xffU
+#define V_RXCHANNELWEIGHT3(x) ((x) << S_RXCHANNELWEIGHT3)
+#define G_RXCHANNELWEIGHT3(x) (((x) >> S_RXCHANNELWEIGHT3) & M_RXCHANNELWEIGHT3)
+
+#define S_RXCHANNELWEIGHT2 0
+#define M_RXCHANNELWEIGHT2 0xffU
+#define V_RXCHANNELWEIGHT2(x) ((x) << S_RXCHANNELWEIGHT2)
+#define G_RXCHANNELWEIGHT2(x) (((x) >> S_RXCHANNELWEIGHT2) & M_RXCHANNELWEIGHT2)
+
#define A_TP_MOD_CONFIG 0x7e24
#define S_RXCHANNELWEIGHT1 24
@@ -23887,6 +29503,30 @@
#define V_SRQTABLEPERR(x) ((x) << S_SRQTABLEPERR)
#define F_SRQTABLEPERR V_SRQTABLEPERR(1U)
+#define S_TPCERR 5
+#define V_TPCERR(x) ((x) << S_TPCERR)
+#define F_TPCERR V_TPCERR(1U)
+
+#define S_OTHERPERR 4
+#define V_OTHERPERR(x) ((x) << S_OTHERPERR)
+#define F_OTHERPERR V_OTHERPERR(1U)
+
+#define S_TPEING1PERR 3
+#define V_TPEING1PERR(x) ((x) << S_TPEING1PERR)
+#define F_TPEING1PERR V_TPEING1PERR(1U)
+
+#define S_TPEING0PERR 2
+#define V_TPEING0PERR(x) ((x) << S_TPEING0PERR)
+#define F_TPEING0PERR V_TPEING0PERR(1U)
+
+#define S_TPEEGPERR 1
+#define V_TPEEGPERR(x) ((x) << S_TPEEGPERR)
+#define F_TPEEGPERR V_TPEEGPERR(1U)
+
+#define S_TPCPERR 0
+#define V_TPCPERR(x) ((x) << S_TPCPERR)
+#define F_TPCPERR V_TPCPERR(1U)
+
#define A_TP_INT_CAUSE 0x7e74
#define A_TP_PER_ENABLE 0x7e78
#define A_TP_FLM_FREE_PS_CNT 0x7e80
@@ -23907,6 +29547,11 @@
#define V_FREERXPAGECOUNT(x) ((x) << S_FREERXPAGECOUNT)
#define G_FREERXPAGECOUNT(x) (((x) >> S_FREERXPAGECOUNT) & M_FREERXPAGECOUNT)
+#define S_T7_FREERXPAGECHN 28
+#define M_T7_FREERXPAGECHN 0x7U
+#define V_T7_FREERXPAGECHN(x) ((x) << S_T7_FREERXPAGECHN)
+#define G_T7_FREERXPAGECHN(x) (((x) >> S_T7_FREERXPAGECHN) & M_T7_FREERXPAGECHN)
+
#define A_TP_FLM_FREE_TX_CNT 0x7e88
#define S_FREETXPAGECHN 28
@@ -23919,6 +29564,11 @@
#define V_FREETXPAGECOUNT(x) ((x) << S_FREETXPAGECOUNT)
#define G_FREETXPAGECOUNT(x) (((x) >> S_FREETXPAGECOUNT) & M_FREETXPAGECOUNT)
+#define S_T7_FREETXPAGECHN 28
+#define M_T7_FREETXPAGECHN 0x7U
+#define V_T7_FREETXPAGECHN(x) ((x) << S_T7_FREETXPAGECHN)
+#define G_T7_FREETXPAGECHN(x) (((x) >> S_T7_FREETXPAGECHN) & M_T7_FREETXPAGECHN)
+
#define A_TP_TM_HEAP_PUSH_CNT 0x7e8c
#define A_TP_TM_HEAP_POP_CNT 0x7e90
#define A_TP_TM_DACK_PUSH_CNT 0x7e94
@@ -24111,6 +29761,38 @@
#define V_COMMITLIMIT0(x) ((x) << S_COMMITLIMIT0)
#define G_COMMITLIMIT0(x) (((x) >> S_COMMITLIMIT0) & M_COMMITLIMIT0)
+#define S_RXCOMMITRESET3 7
+#define V_RXCOMMITRESET3(x) ((x) << S_RXCOMMITRESET3)
+#define F_RXCOMMITRESET3 V_RXCOMMITRESET3(1U)
+
+#define S_RXCOMMITRESET2 6
+#define V_RXCOMMITRESET2(x) ((x) << S_RXCOMMITRESET2)
+#define F_RXCOMMITRESET2 V_RXCOMMITRESET2(1U)
+
+#define S_T7_RXCOMMITRESET1 5
+#define V_T7_RXCOMMITRESET1(x) ((x) << S_T7_RXCOMMITRESET1)
+#define F_T7_RXCOMMITRESET1 V_T7_RXCOMMITRESET1(1U)
+
+#define S_T7_RXCOMMITRESET0 4
+#define V_T7_RXCOMMITRESET0(x) ((x) << S_T7_RXCOMMITRESET0)
+#define F_T7_RXCOMMITRESET0 V_T7_RXCOMMITRESET0(1U)
+
+#define S_RXFORCECONG3 3
+#define V_RXFORCECONG3(x) ((x) << S_RXFORCECONG3)
+#define F_RXFORCECONG3 V_RXFORCECONG3(1U)
+
+#define S_RXFORCECONG2 2
+#define V_RXFORCECONG2(x) ((x) << S_RXFORCECONG2)
+#define F_RXFORCECONG2 V_RXFORCECONG2(1U)
+
+#define S_T7_RXFORCECONG1 1
+#define V_T7_RXFORCECONG1(x) ((x) << S_T7_RXFORCECONG1)
+#define F_T7_RXFORCECONG1 V_T7_RXFORCECONG1(1U)
+
+#define S_T7_RXFORCECONG0 0
+#define V_T7_RXFORCECONG0(x) ((x) << S_T7_RXFORCECONG0)
+#define F_T7_RXFORCECONG0 V_T7_RXFORCECONG0(1U)
+
#define A_TP_TX_SCHED 0x7eb4
#define S_COMMITRESET3 31
@@ -24229,6 +29911,14 @@
#define V_RXMODXOFF0(x) ((x) << S_RXMODXOFF0)
#define F_RXMODXOFF0 V_RXMODXOFF0(1U)
+#define S_RXMODXOFF3 3
+#define V_RXMODXOFF3(x) ((x) << S_RXMODXOFF3)
+#define F_RXMODXOFF3 V_RXMODXOFF3(1U)
+
+#define S_RXMODXOFF2 2
+#define V_RXMODXOFF2(x) ((x) << S_RXMODXOFF2)
+#define F_RXMODXOFF2 V_RXMODXOFF2(1U)
+
#define A_TP_TX_ORATE 0x7ebc
#define S_OFDRATE3 24
@@ -24313,6 +30003,37 @@
#define A_TP_DBG_LA_DATAL 0x7ed8
#define A_TP_DBG_LA_DATAH 0x7edc
+#define A_TP_DBG_LA_FILTER 0x7ee0
+
+#define S_FILTERTID 12
+#define M_FILTERTID 0xfffffU
+#define V_FILTERTID(x) ((x) << S_FILTERTID)
+#define G_FILTERTID(x) (((x) >> S_FILTERTID) & M_FILTERTID)
+
+#define S_ENTIDFILTER 5
+#define V_ENTIDFILTER(x) ((x) << S_ENTIDFILTER)
+#define F_ENTIDFILTER V_ENTIDFILTER(1U)
+
+#define S_ENOFFLOAD 4
+#define V_ENOFFLOAD(x) ((x) << S_ENOFFLOAD)
+#define F_ENOFFLOAD V_ENOFFLOAD(1U)
+
+#define S_ENTUNNEL 3
+#define V_ENTUNNEL(x) ((x) << S_ENTUNNEL)
+#define F_ENTUNNEL V_ENTUNNEL(1U)
+
+#define S_ENI 2
+#define V_ENI(x) ((x) << S_ENI)
+#define F_ENI V_ENI(1U)
+
+#define S_ENC 1
+#define V_ENC(x) ((x) << S_ENC)
+#define F_ENC V_ENC(1U)
+
+#define S_ENE 0
+#define V_ENE(x) ((x) << S_ENE)
+#define F_ENE V_ENE(1U)
+
#define A_TP_PROTOCOL_CNTRL 0x7ee8
#define S_WRITEENABLE 31
@@ -24348,6 +30069,546 @@
#define V_PROTOCOLDATAFIELD(x) ((x) << S_PROTOCOLDATAFIELD)
#define G_PROTOCOLDATAFIELD(x) (((x) >> S_PROTOCOLDATAFIELD) & M_PROTOCOLDATAFIELD)
+#define A_TP_INIC_CTRL0 0x7f00
+#define A_TP_INIC_DBG 0x7f04
+#define A_TP_INIC_PERR_ENABLE 0x7f08
+
+#define S_INICMAC1_ERR 16
+#define M_INICMAC1_ERR 0x3fU
+#define V_INICMAC1_ERR(x) ((x) << S_INICMAC1_ERR)
+#define G_INICMAC1_ERR(x) (((x) >> S_INICMAC1_ERR) & M_INICMAC1_ERR)
+
+#define S_INICMAC0_ERR 0
+#define M_INICMAC0_ERR 0x3fU
+#define V_INICMAC0_ERR(x) ((x) << S_INICMAC0_ERR)
+#define G_INICMAC0_ERR(x) (((x) >> S_INICMAC0_ERR) & M_INICMAC0_ERR)
+
+#define A_TP_INIC_PERR_CAUSE 0x7f0c
+#define A_TP_PARA_REG10 0x7f20
+
+#define S_DIS39320FIX 20
+#define V_DIS39320FIX(x) ((x) << S_DIS39320FIX)
+#define F_DIS39320FIX V_DIS39320FIX(1U)
+
+#define S_IWARPMAXPDULEN 16
+#define M_IWARPMAXPDULEN 0xfU
+#define V_IWARPMAXPDULEN(x) ((x) << S_IWARPMAXPDULEN)
+#define G_IWARPMAXPDULEN(x) (((x) >> S_IWARPMAXPDULEN) & M_IWARPMAXPDULEN)
+
+#define S_TLSMAXRXDATA 0
+#define M_TLSMAXRXDATA 0xffffU
+#define V_TLSMAXRXDATA(x) ((x) << S_TLSMAXRXDATA)
+#define G_TLSMAXRXDATA(x) (((x) >> S_TLSMAXRXDATA) & M_TLSMAXRXDATA)
+
+#define A_TP_TCAM_BIST_CTRL 0x7f24
+#define A_TP_TCAM_BIST_CB_PASS 0x7f28
+#define A_TP_TCAM_BIST_CB_BUSY 0x7f2c
+#define A_TP_C_PERR_ENABLE 0x7f30
+
+#define S_DMXFIFOOVFL 26
+#define V_DMXFIFOOVFL(x) ((x) << S_DMXFIFOOVFL)
+#define F_DMXFIFOOVFL V_DMXFIFOOVFL(1U)
+
+#define S_URX2TPCDDPINTF 25
+#define V_URX2TPCDDPINTF(x) ((x) << S_URX2TPCDDPINTF)
+#define F_URX2TPCDDPINTF V_URX2TPCDDPINTF(1U)
+
+#define S_TPCDISPTOKENFIFO 24
+#define V_TPCDISPTOKENFIFO(x) ((x) << S_TPCDISPTOKENFIFO)
+#define F_TPCDISPTOKENFIFO V_TPCDISPTOKENFIFO(1U)
+
+#define S_TPCDISPCPLFIFO3 23
+#define V_TPCDISPCPLFIFO3(x) ((x) << S_TPCDISPCPLFIFO3)
+#define F_TPCDISPCPLFIFO3 V_TPCDISPCPLFIFO3(1U)
+
+#define S_TPCDISPCPLFIFO2 22
+#define V_TPCDISPCPLFIFO2(x) ((x) << S_TPCDISPCPLFIFO2)
+#define F_TPCDISPCPLFIFO2 V_TPCDISPCPLFIFO2(1U)
+
+#define S_TPCDISPCPLFIFO1 21
+#define V_TPCDISPCPLFIFO1(x) ((x) << S_TPCDISPCPLFIFO1)
+#define F_TPCDISPCPLFIFO1 V_TPCDISPCPLFIFO1(1U)
+
+#define S_TPCDISPCPLFIFO0 20
+#define V_TPCDISPCPLFIFO0(x) ((x) << S_TPCDISPCPLFIFO0)
+#define F_TPCDISPCPLFIFO0 V_TPCDISPCPLFIFO0(1U)
+
+#define S_URXPLDINTFCRC3 19
+#define V_URXPLDINTFCRC3(x) ((x) << S_URXPLDINTFCRC3)
+#define F_URXPLDINTFCRC3 V_URXPLDINTFCRC3(1U)
+
+#define S_URXPLDINTFCRC2 18
+#define V_URXPLDINTFCRC2(x) ((x) << S_URXPLDINTFCRC2)
+#define F_URXPLDINTFCRC2 V_URXPLDINTFCRC2(1U)
+
+#define S_URXPLDINTFCRC1 17
+#define V_URXPLDINTFCRC1(x) ((x) << S_URXPLDINTFCRC1)
+#define F_URXPLDINTFCRC1 V_URXPLDINTFCRC1(1U)
+
+#define S_URXPLDINTFCRC0 16
+#define V_URXPLDINTFCRC0(x) ((x) << S_URXPLDINTFCRC0)
+#define F_URXPLDINTFCRC0 V_URXPLDINTFCRC0(1U)
+
+#define S_DMXDBFIFO 15
+#define V_DMXDBFIFO(x) ((x) << S_DMXDBFIFO)
+#define F_DMXDBFIFO V_DMXDBFIFO(1U)
+
+#define S_DMXDBSRAM 14
+#define V_DMXDBSRAM(x) ((x) << S_DMXDBSRAM)
+#define F_DMXDBSRAM V_DMXDBSRAM(1U)
+
+#define S_DMXCPLFIFO 13
+#define V_DMXCPLFIFO(x) ((x) << S_DMXCPLFIFO)
+#define F_DMXCPLFIFO V_DMXCPLFIFO(1U)
+
+#define S_DMXCPLSRAM 12
+#define V_DMXCPLSRAM(x) ((x) << S_DMXCPLSRAM)
+#define F_DMXCPLSRAM V_DMXCPLSRAM(1U)
+
+#define S_DMXCSUMFIFO 11
+#define V_DMXCSUMFIFO(x) ((x) << S_DMXCSUMFIFO)
+#define F_DMXCSUMFIFO V_DMXCSUMFIFO(1U)
+
+#define S_DMXLENFIFO 10
+#define V_DMXLENFIFO(x) ((x) << S_DMXLENFIFO)
+#define F_DMXLENFIFO V_DMXLENFIFO(1U)
+
+#define S_DMXCHECKFIFO 9
+#define V_DMXCHECKFIFO(x) ((x) << S_DMXCHECKFIFO)
+#define F_DMXCHECKFIFO V_DMXCHECKFIFO(1U)
+
+#define S_DMXWINFIFO 8
+#define V_DMXWINFIFO(x) ((x) << S_DMXWINFIFO)
+#define F_DMXWINFIFO V_DMXWINFIFO(1U)
+
+#define S_EGTOKENFIFO 7
+#define V_EGTOKENFIFO(x) ((x) << S_EGTOKENFIFO)
+#define F_EGTOKENFIFO V_EGTOKENFIFO(1U)
+
+#define S_EGDATAFIFO 6
+#define V_EGDATAFIFO(x) ((x) << S_EGDATAFIFO)
+#define F_EGDATAFIFO V_EGDATAFIFO(1U)
+
+#define S_UTX2TPCINTF3 5
+#define V_UTX2TPCINTF3(x) ((x) << S_UTX2TPCINTF3)
+#define F_UTX2TPCINTF3 V_UTX2TPCINTF3(1U)
+
+#define S_UTX2TPCINTF2 4
+#define V_UTX2TPCINTF2(x) ((x) << S_UTX2TPCINTF2)
+#define F_UTX2TPCINTF2 V_UTX2TPCINTF2(1U)
+
+#define S_UTX2TPCINTF1 3
+#define V_UTX2TPCINTF1(x) ((x) << S_UTX2TPCINTF1)
+#define F_UTX2TPCINTF1 V_UTX2TPCINTF1(1U)
+
+#define S_UTX2TPCINTF0 2
+#define V_UTX2TPCINTF0(x) ((x) << S_UTX2TPCINTF0)
+#define F_UTX2TPCINTF0 V_UTX2TPCINTF0(1U)
+
+#define S_LBKTOKENFIFO 1
+#define V_LBKTOKENFIFO(x) ((x) << S_LBKTOKENFIFO)
+#define F_LBKTOKENFIFO V_LBKTOKENFIFO(1U)
+
+#define S_LBKDATAFIFO 0
+#define V_LBKDATAFIFO(x) ((x) << S_LBKDATAFIFO)
+#define F_LBKDATAFIFO V_LBKDATAFIFO(1U)
+
+#define A_TP_C_PERR_CAUSE 0x7f34
+#define A_TP_E_EG_PERR_ENABLE 0x7f38
+
+#define S_MPSLPBKTOKENFIFO 25
+#define V_MPSLPBKTOKENFIFO(x) ((x) << S_MPSLPBKTOKENFIFO)
+#define F_MPSLPBKTOKENFIFO V_MPSLPBKTOKENFIFO(1U)
+
+#define S_MPSMACTOKENFIFO 24
+#define V_MPSMACTOKENFIFO(x) ((x) << S_MPSMACTOKENFIFO)
+#define F_MPSMACTOKENFIFO V_MPSMACTOKENFIFO(1U)
+
+#define S_DISPIPSECFIFO3 23
+#define V_DISPIPSECFIFO3(x) ((x) << S_DISPIPSECFIFO3)
+#define F_DISPIPSECFIFO3 V_DISPIPSECFIFO3(1U)
+
+#define S_DISPTCPFIFO3 22
+#define V_DISPTCPFIFO3(x) ((x) << S_DISPTCPFIFO3)
+#define F_DISPTCPFIFO3 V_DISPTCPFIFO3(1U)
+
+#define S_DISPIPFIFO3 21
+#define V_DISPIPFIFO3(x) ((x) << S_DISPIPFIFO3)
+#define F_DISPIPFIFO3 V_DISPIPFIFO3(1U)
+
+#define S_DISPETHFIFO3 20
+#define V_DISPETHFIFO3(x) ((x) << S_DISPETHFIFO3)
+#define F_DISPETHFIFO3 V_DISPETHFIFO3(1U)
+
+#define S_DISPGREFIFO3 19
+#define V_DISPGREFIFO3(x) ((x) << S_DISPGREFIFO3)
+#define F_DISPGREFIFO3 V_DISPGREFIFO3(1U)
+
+#define S_DISPCPL5FIFO3 18
+#define V_DISPCPL5FIFO3(x) ((x) << S_DISPCPL5FIFO3)
+#define F_DISPCPL5FIFO3 V_DISPCPL5FIFO3(1U)
+
+#define S_DISPIPSECFIFO2 17
+#define V_DISPIPSECFIFO2(x) ((x) << S_DISPIPSECFIFO2)
+#define F_DISPIPSECFIFO2 V_DISPIPSECFIFO2(1U)
+
+#define S_DISPTCPFIFO2 16
+#define V_DISPTCPFIFO2(x) ((x) << S_DISPTCPFIFO2)
+#define F_DISPTCPFIFO2 V_DISPTCPFIFO2(1U)
+
+#define S_DISPIPFIFO2 15
+#define V_DISPIPFIFO2(x) ((x) << S_DISPIPFIFO2)
+#define F_DISPIPFIFO2 V_DISPIPFIFO2(1U)
+
+#define S_DISPETHFIFO2 14
+#define V_DISPETHFIFO2(x) ((x) << S_DISPETHFIFO2)
+#define F_DISPETHFIFO2 V_DISPETHFIFO2(1U)
+
+#define S_DISPGREFIFO2 13
+#define V_DISPGREFIFO2(x) ((x) << S_DISPGREFIFO2)
+#define F_DISPGREFIFO2 V_DISPGREFIFO2(1U)
+
+#define S_DISPCPL5FIFO2 12
+#define V_DISPCPL5FIFO2(x) ((x) << S_DISPCPL5FIFO2)
+#define F_DISPCPL5FIFO2 V_DISPCPL5FIFO2(1U)
+
+#define S_DISPIPSECFIFO1 11
+#define V_DISPIPSECFIFO1(x) ((x) << S_DISPIPSECFIFO1)
+#define F_DISPIPSECFIFO1 V_DISPIPSECFIFO1(1U)
+
+#define S_DISPTCPFIFO1 10
+#define V_DISPTCPFIFO1(x) ((x) << S_DISPTCPFIFO1)
+#define F_DISPTCPFIFO1 V_DISPTCPFIFO1(1U)
+
+#define S_DISPIPFIFO1 9
+#define V_DISPIPFIFO1(x) ((x) << S_DISPIPFIFO1)
+#define F_DISPIPFIFO1 V_DISPIPFIFO1(1U)
+
+#define S_DISPETHFIFO1 8
+#define V_DISPETHFIFO1(x) ((x) << S_DISPETHFIFO1)
+#define F_DISPETHFIFO1 V_DISPETHFIFO1(1U)
+
+#define S_DISPGREFIFO1 7
+#define V_DISPGREFIFO1(x) ((x) << S_DISPGREFIFO1)
+#define F_DISPGREFIFO1 V_DISPGREFIFO1(1U)
+
+#define S_DISPCPL5FIFO1 6
+#define V_DISPCPL5FIFO1(x) ((x) << S_DISPCPL5FIFO1)
+#define F_DISPCPL5FIFO1 V_DISPCPL5FIFO1(1U)
+
+#define S_DISPIPSECFIFO0 5
+#define V_DISPIPSECFIFO0(x) ((x) << S_DISPIPSECFIFO0)
+#define F_DISPIPSECFIFO0 V_DISPIPSECFIFO0(1U)
+
+#define S_DISPTCPFIFO0 4
+#define V_DISPTCPFIFO0(x) ((x) << S_DISPTCPFIFO0)
+#define F_DISPTCPFIFO0 V_DISPTCPFIFO0(1U)
+
+#define S_DISPIPFIFO0 3
+#define V_DISPIPFIFO0(x) ((x) << S_DISPIPFIFO0)
+#define F_DISPIPFIFO0 V_DISPIPFIFO0(1U)
+
+#define S_DISPETHFIFO0 2
+#define V_DISPETHFIFO0(x) ((x) << S_DISPETHFIFO0)
+#define F_DISPETHFIFO0 V_DISPETHFIFO0(1U)
+
+#define S_DISPGREFIFO0 1
+#define V_DISPGREFIFO0(x) ((x) << S_DISPGREFIFO0)
+#define F_DISPGREFIFO0 V_DISPGREFIFO0(1U)
+
+#define S_DISPCPL5FIFO0 0
+#define V_DISPCPL5FIFO0(x) ((x) << S_DISPCPL5FIFO0)
+#define F_DISPCPL5FIFO0 V_DISPCPL5FIFO0(1U)
+
+#define A_TP_E_EG_PERR_CAUSE 0x7f3c
+#define A_TP_E_IN0_PERR_ENABLE 0x7f40
+
+#define S_DMXISSFIFO 30
+#define V_DMXISSFIFO(x) ((x) << S_DMXISSFIFO)
+#define F_DMXISSFIFO V_DMXISSFIFO(1U)
+
+#define S_DMXERRFIFO 29
+#define V_DMXERRFIFO(x) ((x) << S_DMXERRFIFO)
+#define F_DMXERRFIFO V_DMXERRFIFO(1U)
+
+#define S_DMXATTFIFO 28
+#define V_DMXATTFIFO(x) ((x) << S_DMXATTFIFO)
+#define F_DMXATTFIFO V_DMXATTFIFO(1U)
+
+#define S_DMXTCPFIFO 27
+#define V_DMXTCPFIFO(x) ((x) << S_DMXTCPFIFO)
+#define F_DMXTCPFIFO V_DMXTCPFIFO(1U)
+
+#define S_DMXMPAFIFO 26
+#define V_DMXMPAFIFO(x) ((x) << S_DMXMPAFIFO)
+#define F_DMXMPAFIFO V_DMXMPAFIFO(1U)
+
+#define S_DMXOPTFIFO 25
+#define V_DMXOPTFIFO(x) ((x) << S_DMXOPTFIFO)
+#define F_DMXOPTFIFO V_DMXOPTFIFO(1U)
+
+#define S_INGTOKENFIFO 24
+#define V_INGTOKENFIFO(x) ((x) << S_INGTOKENFIFO)
+#define F_INGTOKENFIFO V_INGTOKENFIFO(1U)
+
+#define S_DMXPLDCHKOVFL1 21
+#define V_DMXPLDCHKOVFL1(x) ((x) << S_DMXPLDCHKOVFL1)
+#define F_DMXPLDCHKOVFL1 V_DMXPLDCHKOVFL1(1U)
+
+#define S_DMXPLDCHKFIFO1 20
+#define V_DMXPLDCHKFIFO1(x) ((x) << S_DMXPLDCHKFIFO1)
+#define F_DMXPLDCHKFIFO1 V_DMXPLDCHKFIFO1(1U)
+
+#define S_DMXOPTFIFO1 19
+#define V_DMXOPTFIFO1(x) ((x) << S_DMXOPTFIFO1)
+#define F_DMXOPTFIFO1 V_DMXOPTFIFO1(1U)
+
+#define S_DMXMPAFIFO1 18
+#define V_DMXMPAFIFO1(x) ((x) << S_DMXMPAFIFO1)
+#define F_DMXMPAFIFO1 V_DMXMPAFIFO1(1U)
+
+#define S_DMXDBFIFO1 17
+#define V_DMXDBFIFO1(x) ((x) << S_DMXDBFIFO1)
+#define F_DMXDBFIFO1 V_DMXDBFIFO1(1U)
+
+#define S_DMXATTFIFO1 16
+#define V_DMXATTFIFO1(x) ((x) << S_DMXATTFIFO1)
+#define F_DMXATTFIFO1 V_DMXATTFIFO1(1U)
+
+#define S_DMXISSFIFO1 15
+#define V_DMXISSFIFO1(x) ((x) << S_DMXISSFIFO1)
+#define F_DMXISSFIFO1 V_DMXISSFIFO1(1U)
+
+#define S_DMXTCPFIFO1 14
+#define V_DMXTCPFIFO1(x) ((x) << S_DMXTCPFIFO1)
+#define F_DMXTCPFIFO1 V_DMXTCPFIFO1(1U)
+
+#define S_DMXERRFIFO1 13
+#define V_DMXERRFIFO1(x) ((x) << S_DMXERRFIFO1)
+#define F_DMXERRFIFO1 V_DMXERRFIFO1(1U)
+
+#define S_MPS2TPINTF1 12
+#define V_MPS2TPINTF1(x) ((x) << S_MPS2TPINTF1)
+#define F_MPS2TPINTF1 V_MPS2TPINTF1(1U)
+
+#define S_DMXPLDCHKOVFL0 9
+#define V_DMXPLDCHKOVFL0(x) ((x) << S_DMXPLDCHKOVFL0)
+#define F_DMXPLDCHKOVFL0 V_DMXPLDCHKOVFL0(1U)
+
+#define S_DMXPLDCHKFIFO0 8
+#define V_DMXPLDCHKFIFO0(x) ((x) << S_DMXPLDCHKFIFO0)
+#define F_DMXPLDCHKFIFO0 V_DMXPLDCHKFIFO0(1U)
+
+#define S_DMXOPTFIFO0 7
+#define V_DMXOPTFIFO0(x) ((x) << S_DMXOPTFIFO0)
+#define F_DMXOPTFIFO0 V_DMXOPTFIFO0(1U)
+
+#define S_DMXMPAFIFO0 6
+#define V_DMXMPAFIFO0(x) ((x) << S_DMXMPAFIFO0)
+#define F_DMXMPAFIFO0 V_DMXMPAFIFO0(1U)
+
+#define S_DMXDBFIFO0 5
+#define V_DMXDBFIFO0(x) ((x) << S_DMXDBFIFO0)
+#define F_DMXDBFIFO0 V_DMXDBFIFO0(1U)
+
+#define S_DMXATTFIFO0 4
+#define V_DMXATTFIFO0(x) ((x) << S_DMXATTFIFO0)
+#define F_DMXATTFIFO0 V_DMXATTFIFO0(1U)
+
+#define S_DMXISSFIFO0 3
+#define V_DMXISSFIFO0(x) ((x) << S_DMXISSFIFO0)
+#define F_DMXISSFIFO0 V_DMXISSFIFO0(1U)
+
+#define S_DMXTCPFIFO0 2
+#define V_DMXTCPFIFO0(x) ((x) << S_DMXTCPFIFO0)
+#define F_DMXTCPFIFO0 V_DMXTCPFIFO0(1U)
+
+#define S_DMXERRFIFO0 1
+#define V_DMXERRFIFO0(x) ((x) << S_DMXERRFIFO0)
+#define F_DMXERRFIFO0 V_DMXERRFIFO0(1U)
+
+#define S_MPS2TPINTF0 0
+#define V_MPS2TPINTF0(x) ((x) << S_MPS2TPINTF0)
+#define F_MPS2TPINTF0 V_MPS2TPINTF0(1U)
+
+#define A_TP_E_IN0_PERR_CAUSE 0x7f44
+#define A_TP_E_IN1_PERR_ENABLE 0x7f48
+
+#define S_DMXPLDCHKOVFL3 21
+#define V_DMXPLDCHKOVFL3(x) ((x) << S_DMXPLDCHKOVFL3)
+#define F_DMXPLDCHKOVFL3 V_DMXPLDCHKOVFL3(1U)
+
+#define S_DMXPLDCHKFIFO3 20
+#define V_DMXPLDCHKFIFO3(x) ((x) << S_DMXPLDCHKFIFO3)
+#define F_DMXPLDCHKFIFO3 V_DMXPLDCHKFIFO3(1U)
+
+#define S_DMXOPTFIFO3 19
+#define V_DMXOPTFIFO3(x) ((x) << S_DMXOPTFIFO3)
+#define F_DMXOPTFIFO3 V_DMXOPTFIFO3(1U)
+
+#define S_DMXMPAFIFO3 18
+#define V_DMXMPAFIFO3(x) ((x) << S_DMXMPAFIFO3)
+#define F_DMXMPAFIFO3 V_DMXMPAFIFO3(1U)
+
+#define S_DMXDBFIFO3 17
+#define V_DMXDBFIFO3(x) ((x) << S_DMXDBFIFO3)
+#define F_DMXDBFIFO3 V_DMXDBFIFO3(1U)
+
+#define S_DMXATTFIFO3 16
+#define V_DMXATTFIFO3(x) ((x) << S_DMXATTFIFO3)
+#define F_DMXATTFIFO3 V_DMXATTFIFO3(1U)
+
+#define S_DMXISSFIFO3 15
+#define V_DMXISSFIFO3(x) ((x) << S_DMXISSFIFO3)
+#define F_DMXISSFIFO3 V_DMXISSFIFO3(1U)
+
+#define S_DMXTCPFIFO3 14
+#define V_DMXTCPFIFO3(x) ((x) << S_DMXTCPFIFO3)
+#define F_DMXTCPFIFO3 V_DMXTCPFIFO3(1U)
+
+#define S_DMXERRFIFO3 13
+#define V_DMXERRFIFO3(x) ((x) << S_DMXERRFIFO3)
+#define F_DMXERRFIFO3 V_DMXERRFIFO3(1U)
+
+#define S_MPS2TPINTF3 12
+#define V_MPS2TPINTF3(x) ((x) << S_MPS2TPINTF3)
+#define F_MPS2TPINTF3 V_MPS2TPINTF3(1U)
+
+#define S_DMXPLDCHKOVFL2 9
+#define V_DMXPLDCHKOVFL2(x) ((x) << S_DMXPLDCHKOVFL2)
+#define F_DMXPLDCHKOVFL2 V_DMXPLDCHKOVFL2(1U)
+
+#define S_DMXPLDCHKFIFO2 8
+#define V_DMXPLDCHKFIFO2(x) ((x) << S_DMXPLDCHKFIFO2)
+#define F_DMXPLDCHKFIFO2 V_DMXPLDCHKFIFO2(1U)
+
+#define S_DMXOPTFIFO2 7
+#define V_DMXOPTFIFO2(x) ((x) << S_DMXOPTFIFO2)
+#define F_DMXOPTFIFO2 V_DMXOPTFIFO2(1U)
+
+#define S_DMXMPAFIFO2 6
+#define V_DMXMPAFIFO2(x) ((x) << S_DMXMPAFIFO2)
+#define F_DMXMPAFIFO2 V_DMXMPAFIFO2(1U)
+
+#define S_DMXDBFIFO2 5
+#define V_DMXDBFIFO2(x) ((x) << S_DMXDBFIFO2)
+#define F_DMXDBFIFO2 V_DMXDBFIFO2(1U)
+
+#define S_DMXATTFIFO2 4
+#define V_DMXATTFIFO2(x) ((x) << S_DMXATTFIFO2)
+#define F_DMXATTFIFO2 V_DMXATTFIFO2(1U)
+
+#define S_DMXISSFIFO2 3
+#define V_DMXISSFIFO2(x) ((x) << S_DMXISSFIFO2)
+#define F_DMXISSFIFO2 V_DMXISSFIFO2(1U)
+
+#define S_DMXTCPFIFO2 2
+#define V_DMXTCPFIFO2(x) ((x) << S_DMXTCPFIFO2)
+#define F_DMXTCPFIFO2 V_DMXTCPFIFO2(1U)
+
+#define S_DMXERRFIFO2 1
+#define V_DMXERRFIFO2(x) ((x) << S_DMXERRFIFO2)
+#define F_DMXERRFIFO2 V_DMXERRFIFO2(1U)
+
+#define S_MPS2TPINTF2 0
+#define V_MPS2TPINTF2(x) ((x) << S_MPS2TPINTF2)
+#define F_MPS2TPINTF2 V_MPS2TPINTF2(1U)
+
+#define A_TP_E_IN1_PERR_CAUSE 0x7f4c
+#define A_TP_O_PERR_ENABLE 0x7f50
+
+#define S_DMARBTPERR 31
+#define V_DMARBTPERR(x) ((x) << S_DMARBTPERR)
+#define F_DMARBTPERR V_DMARBTPERR(1U)
+
+#define S_MMGRCACHEDATASRAM 24
+#define V_MMGRCACHEDATASRAM(x) ((x) << S_MMGRCACHEDATASRAM)
+#define F_MMGRCACHEDATASRAM V_MMGRCACHEDATASRAM(1U)
+
+#define S_MMGRCACHETAGFIFO 23
+#define V_MMGRCACHETAGFIFO(x) ((x) << S_MMGRCACHETAGFIFO)
+#define F_MMGRCACHETAGFIFO V_MMGRCACHETAGFIFO(1U)
+
+#define S_TPPROTOSRAM 16
+#define V_TPPROTOSRAM(x) ((x) << S_TPPROTOSRAM)
+#define F_TPPROTOSRAM V_TPPROTOSRAM(1U)
+
+#define S_HSPSRAM 15
+#define V_HSPSRAM(x) ((x) << S_HSPSRAM)
+#define F_HSPSRAM V_HSPSRAM(1U)
+
+#define S_RATEGRPSRAM 14
+#define V_RATEGRPSRAM(x) ((x) << S_RATEGRPSRAM)
+#define F_RATEGRPSRAM V_RATEGRPSRAM(1U)
+
+#define S_TXFBSEQFIFO 13
+#define V_TXFBSEQFIFO(x) ((x) << S_TXFBSEQFIFO)
+#define F_TXFBSEQFIFO V_TXFBSEQFIFO(1U)
+
+#define S_CMDATASRAM 12
+#define V_CMDATASRAM(x) ((x) << S_CMDATASRAM)
+#define F_CMDATASRAM V_CMDATASRAM(1U)
+
+#define S_CMTAGFIFO 11
+#define V_CMTAGFIFO(x) ((x) << S_CMTAGFIFO)
+#define F_CMTAGFIFO V_CMTAGFIFO(1U)
+
+#define S_RFCOPFIFO 10
+#define V_RFCOPFIFO(x) ((x) << S_RFCOPFIFO)
+#define F_RFCOPFIFO V_RFCOPFIFO(1U)
+
+#define S_DELINVFIFO 9
+#define V_DELINVFIFO(x) ((x) << S_DELINVFIFO)
+#define F_DELINVFIFO V_DELINVFIFO(1U)
+
+#define S_RSSCFGSRAM 8
+#define V_RSSCFGSRAM(x) ((x) << S_RSSCFGSRAM)
+#define F_RSSCFGSRAM V_RSSCFGSRAM(1U)
+
+#define S_RSSKEYSRAM 7
+#define V_RSSKEYSRAM(x) ((x) << S_RSSKEYSRAM)
+#define F_RSSKEYSRAM V_RSSKEYSRAM(1U)
+
+#define S_RSSLKPSRAM 6
+#define V_RSSLKPSRAM(x) ((x) << S_RSSLKPSRAM)
+#define F_RSSLKPSRAM V_RSSLKPSRAM(1U)
+
+#define S_SRQSRAM 5
+#define V_SRQSRAM(x) ((x) << S_SRQSRAM)
+#define F_SRQSRAM V_SRQSRAM(1U)
+
+#define S_ARPDASRAM 4
+#define V_ARPDASRAM(x) ((x) << S_ARPDASRAM)
+#define F_ARPDASRAM V_ARPDASRAM(1U)
+
+#define S_ARPSASRAM 3
+#define V_ARPSASRAM(x) ((x) << S_ARPSASRAM)
+#define F_ARPSASRAM V_ARPSASRAM(1U)
+
+#define S_ARPGRESRAM 2
+#define V_ARPGRESRAM(x) ((x) << S_ARPGRESRAM)
+#define F_ARPGRESRAM V_ARPGRESRAM(1U)
+
+#define S_ARPIPSECSRAM1 1
+#define V_ARPIPSECSRAM1(x) ((x) << S_ARPIPSECSRAM1)
+#define F_ARPIPSECSRAM1 V_ARPIPSECSRAM1(1U)
+
+#define S_ARPIPSECSRAM0 0
+#define V_ARPIPSECSRAM0(x) ((x) << S_ARPIPSECSRAM0)
+#define F_ARPIPSECSRAM0 V_ARPIPSECSRAM0(1U)
+
+#define A_TP_O_PERR_CAUSE 0x7f54
+#define A_TP_CERR_ENABLE 0x7f58
+
+#define S_TPCEGDATAFIFO 8
+#define V_TPCEGDATAFIFO(x) ((x) << S_TPCEGDATAFIFO)
+#define F_TPCEGDATAFIFO V_TPCEGDATAFIFO(1U)
+
+#define S_TPCLBKDATAFIFO 7
+#define V_TPCLBKDATAFIFO(x) ((x) << S_TPCLBKDATAFIFO)
+#define F_TPCLBKDATAFIFO V_TPCLBKDATAFIFO(1U)
+
+#define A_TP_CERR_CAUSE 0x7f5c
#define A_TP_TX_MOD_Q7_Q6_TIMER_SEPARATOR 0x0
#define S_TXTIMERSEPQ7 16
@@ -24520,6 +30781,137 @@
#define A_TP_TX_MOD_C3_C2_RATE_LIMIT 0xa
#define A_TP_TX_MOD_C1_C0_RATE_LIMIT 0xb
+#define A_TP_RX_MOD_Q3_Q2_TIMER_SEPARATOR 0xc
+
+#define S_RXTIMERSEPQ3 16
+#define M_RXTIMERSEPQ3 0xffffU
+#define V_RXTIMERSEPQ3(x) ((x) << S_RXTIMERSEPQ3)
+#define G_RXTIMERSEPQ3(x) (((x) >> S_RXTIMERSEPQ3) & M_RXTIMERSEPQ3)
+
+#define S_RXTIMERSEPQ2 0
+#define M_RXTIMERSEPQ2 0xffffU
+#define V_RXTIMERSEPQ2(x) ((x) << S_RXTIMERSEPQ2)
+#define G_RXTIMERSEPQ2(x) (((x) >> S_RXTIMERSEPQ2) & M_RXTIMERSEPQ2)
+
+#define A_TP_RX_MOD_Q3_Q2_RATE_LIMIT 0xd
+
+#define S_RXRATEINCQ3 24
+#define M_RXRATEINCQ3 0xffU
+#define V_RXRATEINCQ3(x) ((x) << S_RXRATEINCQ3)
+#define G_RXRATEINCQ3(x) (((x) >> S_RXRATEINCQ3) & M_RXRATEINCQ3)
+
+#define S_RXRATETCKQ3 16
+#define M_RXRATETCKQ3 0xffU
+#define V_RXRATETCKQ3(x) ((x) << S_RXRATETCKQ3)
+#define G_RXRATETCKQ3(x) (((x) >> S_RXRATETCKQ3) & M_RXRATETCKQ3)
+
+#define S_RXRATEINCQ2 8
+#define M_RXRATEINCQ2 0xffU
+#define V_RXRATEINCQ2(x) ((x) << S_RXRATEINCQ2)
+#define G_RXRATEINCQ2(x) (((x) >> S_RXRATEINCQ2) & M_RXRATEINCQ2)
+
+#define S_RXRATETCKQ2 0
+#define M_RXRATETCKQ2 0xffU
+#define V_RXRATETCKQ2(x) ((x) << S_RXRATETCKQ2)
+#define G_RXRATETCKQ2(x) (((x) >> S_RXRATETCKQ2) & M_RXRATETCKQ2)
+
+#define A_TP_RX_LPBK_CONG 0x1c
+#define A_TP_RX_SCHED_MOD 0x1d
+
+#define S_T7_ENABLELPBKFULL1 28
+#define M_T7_ENABLELPBKFULL1 0xfU
+#define V_T7_ENABLELPBKFULL1(x) ((x) << S_T7_ENABLELPBKFULL1)
+#define G_T7_ENABLELPBKFULL1(x) (((x) >> S_T7_ENABLELPBKFULL1) & M_T7_ENABLELPBKFULL1)
+
+#define S_T7_ENABLEFIFOFULL1 24
+#define M_T7_ENABLEFIFOFULL1 0xfU
+#define V_T7_ENABLEFIFOFULL1(x) ((x) << S_T7_ENABLEFIFOFULL1)
+#define G_T7_ENABLEFIFOFULL1(x) (((x) >> S_T7_ENABLEFIFOFULL1) & M_T7_ENABLEFIFOFULL1)
+
+#define S_T7_ENABLEPCMDFULL1 20
+#define M_T7_ENABLEPCMDFULL1 0xfU
+#define V_T7_ENABLEPCMDFULL1(x) ((x) << S_T7_ENABLEPCMDFULL1)
+#define G_T7_ENABLEPCMDFULL1(x) (((x) >> S_T7_ENABLEPCMDFULL1) & M_T7_ENABLEPCMDFULL1)
+
+#define S_T7_ENABLEHDRFULL1 16
+#define M_T7_ENABLEHDRFULL1 0xfU
+#define V_T7_ENABLEHDRFULL1(x) ((x) << S_T7_ENABLEHDRFULL1)
+#define G_T7_ENABLEHDRFULL1(x) (((x) >> S_T7_ENABLEHDRFULL1) & M_T7_ENABLEHDRFULL1)
+
+#define S_T7_ENABLELPBKFULL0 12
+#define M_T7_ENABLELPBKFULL0 0xfU
+#define V_T7_ENABLELPBKFULL0(x) ((x) << S_T7_ENABLELPBKFULL0)
+#define G_T7_ENABLELPBKFULL0(x) (((x) >> S_T7_ENABLELPBKFULL0) & M_T7_ENABLELPBKFULL0)
+
+#define S_T7_ENABLEFIFOFULL0 8
+#define M_T7_ENABLEFIFOFULL0 0xfU
+#define V_T7_ENABLEFIFOFULL0(x) ((x) << S_T7_ENABLEFIFOFULL0)
+#define G_T7_ENABLEFIFOFULL0(x) (((x) >> S_T7_ENABLEFIFOFULL0) & M_T7_ENABLEFIFOFULL0)
+
+#define S_T7_ENABLEPCMDFULL0 4
+#define M_T7_ENABLEPCMDFULL0 0xfU
+#define V_T7_ENABLEPCMDFULL0(x) ((x) << S_T7_ENABLEPCMDFULL0)
+#define G_T7_ENABLEPCMDFULL0(x) (((x) >> S_T7_ENABLEPCMDFULL0) & M_T7_ENABLEPCMDFULL0)
+
+#define S_T7_ENABLEHDRFULL0 0
+#define M_T7_ENABLEHDRFULL0 0xfU
+#define V_T7_ENABLEHDRFULL0(x) ((x) << S_T7_ENABLEHDRFULL0)
+#define G_T7_ENABLEHDRFULL0(x) (((x) >> S_T7_ENABLEHDRFULL0) & M_T7_ENABLEHDRFULL0)
+
+#define A_TP_RX_SCHED_MOD_CH3_CH2 0x1e
+
+#define S_ENABLELPBKFULL3 28
+#define M_ENABLELPBKFULL3 0xfU
+#define V_ENABLELPBKFULL3(x) ((x) << S_ENABLELPBKFULL3)
+#define G_ENABLELPBKFULL3(x) (((x) >> S_ENABLELPBKFULL3) & M_ENABLELPBKFULL3)
+
+#define S_ENABLEFIFOFULL3 24
+#define M_ENABLEFIFOFULL3 0xfU
+#define V_ENABLEFIFOFULL3(x) ((x) << S_ENABLEFIFOFULL3)
+#define G_ENABLEFIFOFULL3(x) (((x) >> S_ENABLEFIFOFULL3) & M_ENABLEFIFOFULL3)
+
+#define S_ENABLEPCMDFULL3 20
+#define M_ENABLEPCMDFULL3 0xfU
+#define V_ENABLEPCMDFULL3(x) ((x) << S_ENABLEPCMDFULL3)
+#define G_ENABLEPCMDFULL3(x) (((x) >> S_ENABLEPCMDFULL3) & M_ENABLEPCMDFULL3)
+
+#define S_ENABLEHDRFULL3 16
+#define M_ENABLEHDRFULL3 0xfU
+#define V_ENABLEHDRFULL3(x) ((x) << S_ENABLEHDRFULL3)
+#define G_ENABLEHDRFULL3(x) (((x) >> S_ENABLEHDRFULL3) & M_ENABLEHDRFULL3)
+
+#define S_ENABLELPBKFULL2 12
+#define M_ENABLELPBKFULL2 0xfU
+#define V_ENABLELPBKFULL2(x) ((x) << S_ENABLELPBKFULL2)
+#define G_ENABLELPBKFULL2(x) (((x) >> S_ENABLELPBKFULL2) & M_ENABLELPBKFULL2)
+
+#define S_ENABLEFIFOFULL2 8
+#define M_ENABLEFIFOFULL2 0xfU
+#define V_ENABLEFIFOFULL2(x) ((x) << S_ENABLEFIFOFULL2)
+#define G_ENABLEFIFOFULL2(x) (((x) >> S_ENABLEFIFOFULL2) & M_ENABLEFIFOFULL2)
+
+#define S_ENABLEPCMDFULL2 4
+#define M_ENABLEPCMDFULL2 0xfU
+#define V_ENABLEPCMDFULL2(x) ((x) << S_ENABLEPCMDFULL2)
+#define G_ENABLEPCMDFULL2(x) (((x) >> S_ENABLEPCMDFULL2) & M_ENABLEPCMDFULL2)
+
+#define S_ENABLEHDRFULL2 0
+#define M_ENABLEHDRFULL2 0xfU
+#define V_ENABLEHDRFULL2(x) ((x) << S_ENABLEHDRFULL2)
+#define G_ENABLEHDRFULL2(x) (((x) >> S_ENABLEHDRFULL2) & M_ENABLEHDRFULL2)
+
+#define A_TP_RX_SCHED_MAP_CH3_CH2 0x1f
+
+#define S_T7_RXMAPCHANNEL3 16
+#define M_T7_RXMAPCHANNEL3 0xffffU
+#define V_T7_RXMAPCHANNEL3(x) ((x) << S_T7_RXMAPCHANNEL3)
+#define G_T7_RXMAPCHANNEL3(x) (((x) >> S_T7_RXMAPCHANNEL3) & M_T7_RXMAPCHANNEL3)
+
+#define S_T7_RXMAPCHANNEL2 0
+#define M_T7_RXMAPCHANNEL2 0xffffU
+#define V_T7_RXMAPCHANNEL2(x) ((x) << S_T7_RXMAPCHANNEL2)
+#define G_T7_RXMAPCHANNEL2(x) (((x) >> S_T7_RXMAPCHANNEL2) & M_T7_RXMAPCHANNEL2)
+
#define A_TP_RX_SCHED_MAP 0x20
#define S_RXMAPCHANNEL3 24
@@ -24542,6 +30934,16 @@
#define V_RXMAPCHANNEL0(x) ((x) << S_RXMAPCHANNEL0)
#define G_RXMAPCHANNEL0(x) (((x) >> S_RXMAPCHANNEL0) & M_RXMAPCHANNEL0)
+#define S_T7_RXMAPCHANNEL1 16
+#define M_T7_RXMAPCHANNEL1 0xffffU
+#define V_T7_RXMAPCHANNEL1(x) ((x) << S_T7_RXMAPCHANNEL1)
+#define G_T7_RXMAPCHANNEL1(x) (((x) >> S_T7_RXMAPCHANNEL1) & M_T7_RXMAPCHANNEL1)
+
+#define S_T7_RXMAPCHANNEL0 0
+#define M_T7_RXMAPCHANNEL0 0xffffU
+#define V_T7_RXMAPCHANNEL0(x) ((x) << S_T7_RXMAPCHANNEL0)
+#define G_T7_RXMAPCHANNEL0(x) (((x) >> S_T7_RXMAPCHANNEL0) & M_T7_RXMAPCHANNEL0)
+
#define A_TP_RX_SCHED_SGE 0x21
#define S_RXSGEMOD1 12
@@ -24570,6 +30972,16 @@
#define V_RXSGECHANNEL0(x) ((x) << S_RXSGECHANNEL0)
#define F_RXSGECHANNEL0 V_RXSGECHANNEL0(1U)
+#define S_RXSGEMOD3 20
+#define M_RXSGEMOD3 0xfU
+#define V_RXSGEMOD3(x) ((x) << S_RXSGEMOD3)
+#define G_RXSGEMOD3(x) (((x) >> S_RXSGEMOD3) & M_RXSGEMOD3)
+
+#define S_RXSGEMOD2 16
+#define M_RXSGEMOD2 0xfU
+#define V_RXSGEMOD2(x) ((x) << S_RXSGEMOD2)
+#define G_RXSGEMOD2(x) (((x) >> S_RXSGEMOD2) & M_RXSGEMOD2)
+
#define A_TP_TX_SCHED_MAP 0x22
#define S_TXMAPCHANNEL3 12
@@ -24600,6 +31012,14 @@
#define V_TXLPKCHANNEL0(x) ((x) << S_TXLPKCHANNEL0)
#define F_TXLPKCHANNEL0 V_TXLPKCHANNEL0(1U)
+#define S_TXLPKCHANNEL3 19
+#define V_TXLPKCHANNEL3(x) ((x) << S_TXLPKCHANNEL3)
+#define F_TXLPKCHANNEL3 V_TXLPKCHANNEL3(1U)
+
+#define S_TXLPKCHANNEL2 18
+#define V_TXLPKCHANNEL2(x) ((x) << S_TXLPKCHANNEL2)
+#define F_TXLPKCHANNEL2 V_TXLPKCHANNEL2(1U)
+
#define A_TP_TX_SCHED_HDR 0x23
#define S_TXMAPHDRCHANNEL7 28
@@ -24827,6 +31247,69 @@
#define V_RXMAPE2CCHANNEL0(x) ((x) << S_RXMAPE2CCHANNEL0)
#define F_RXMAPE2CCHANNEL0 V_RXMAPE2CCHANNEL0(1U)
+#define S_T7_LB_MODE 30
+#define M_T7_LB_MODE 0x3U
+#define V_T7_LB_MODE(x) ((x) << S_T7_LB_MODE)
+#define G_T7_LB_MODE(x) (((x) >> S_T7_LB_MODE) & M_T7_LB_MODE)
+
+#define S_ING_LB_MODE 28
+#define M_ING_LB_MODE 0x3U
+#define V_ING_LB_MODE(x) ((x) << S_ING_LB_MODE)
+#define G_ING_LB_MODE(x) (((x) >> S_ING_LB_MODE) & M_ING_LB_MODE)
+
+#define S_RXC_LB_MODE 26
+#define M_RXC_LB_MODE 0x3U
+#define V_RXC_LB_MODE(x) ((x) << S_RXC_LB_MODE)
+#define G_RXC_LB_MODE(x) (((x) >> S_RXC_LB_MODE) & M_RXC_LB_MODE)
+
+#define S_SINGLERXCHANNEL 25
+#define V_SINGLERXCHANNEL(x) ((x) << S_SINGLERXCHANNEL)
+#define F_SINGLERXCHANNEL V_SINGLERXCHANNEL(1U)
+
+#define S_RXCHANNELCHECK 24
+#define V_RXCHANNELCHECK(x) ((x) << S_RXCHANNELCHECK)
+#define F_RXCHANNELCHECK V_RXCHANNELCHECK(1U)
+
+#define S_T7_RXMAPC2CCHANNEL3 21
+#define M_T7_RXMAPC2CCHANNEL3 0x7U
+#define V_T7_RXMAPC2CCHANNEL3(x) ((x) << S_T7_RXMAPC2CCHANNEL3)
+#define G_T7_RXMAPC2CCHANNEL3(x) (((x) >> S_T7_RXMAPC2CCHANNEL3) & M_T7_RXMAPC2CCHANNEL3)
+
+#define S_T7_RXMAPC2CCHANNEL2 18
+#define M_T7_RXMAPC2CCHANNEL2 0x7U
+#define V_T7_RXMAPC2CCHANNEL2(x) ((x) << S_T7_RXMAPC2CCHANNEL2)
+#define G_T7_RXMAPC2CCHANNEL2(x) (((x) >> S_T7_RXMAPC2CCHANNEL2) & M_T7_RXMAPC2CCHANNEL2)
+
+#define S_T7_RXMAPC2CCHANNEL1 15
+#define M_T7_RXMAPC2CCHANNEL1 0x7U
+#define V_T7_RXMAPC2CCHANNEL1(x) ((x) << S_T7_RXMAPC2CCHANNEL1)
+#define G_T7_RXMAPC2CCHANNEL1(x) (((x) >> S_T7_RXMAPC2CCHANNEL1) & M_T7_RXMAPC2CCHANNEL1)
+
+#define S_T7_RXMAPC2CCHANNEL0 12
+#define M_T7_RXMAPC2CCHANNEL0 0x7U
+#define V_T7_RXMAPC2CCHANNEL0(x) ((x) << S_T7_RXMAPC2CCHANNEL0)
+#define G_T7_RXMAPC2CCHANNEL0(x) (((x) >> S_T7_RXMAPC2CCHANNEL0) & M_T7_RXMAPC2CCHANNEL0)
+
+#define S_T7_RXMAPE2CCHANNEL3 9
+#define M_T7_RXMAPE2CCHANNEL3 0x7U
+#define V_T7_RXMAPE2CCHANNEL3(x) ((x) << S_T7_RXMAPE2CCHANNEL3)
+#define G_T7_RXMAPE2CCHANNEL3(x) (((x) >> S_T7_RXMAPE2CCHANNEL3) & M_T7_RXMAPE2CCHANNEL3)
+
+#define S_T7_RXMAPE2CCHANNEL2 6
+#define M_T7_RXMAPE2CCHANNEL2 0x7U
+#define V_T7_RXMAPE2CCHANNEL2(x) ((x) << S_T7_RXMAPE2CCHANNEL2)
+#define G_T7_RXMAPE2CCHANNEL2(x) (((x) >> S_T7_RXMAPE2CCHANNEL2) & M_T7_RXMAPE2CCHANNEL2)
+
+#define S_T7_RXMAPE2CCHANNEL1 3
+#define M_T7_RXMAPE2CCHANNEL1 0x7U
+#define V_T7_RXMAPE2CCHANNEL1(x) ((x) << S_T7_RXMAPE2CCHANNEL1)
+#define G_T7_RXMAPE2CCHANNEL1(x) (((x) >> S_T7_RXMAPE2CCHANNEL1) & M_T7_RXMAPE2CCHANNEL1)
+
+#define S_T7_RXMAPE2CCHANNEL0 0
+#define M_T7_RXMAPE2CCHANNEL0 0x7U
+#define V_T7_RXMAPE2CCHANNEL0(x) ((x) << S_T7_RXMAPE2CCHANNEL0)
+#define G_T7_RXMAPE2CCHANNEL0(x) (((x) >> S_T7_RXMAPE2CCHANNEL0) & M_T7_RXMAPE2CCHANNEL0)
+
#define A_TP_RX_LPBK 0x28
#define A_TP_TX_LPBK 0x29
#define A_TP_TX_SCHED_PPP 0x2a
@@ -24873,6 +31356,55 @@
#define V_COMMITLIMIT0L(x) ((x) << S_COMMITLIMIT0L)
#define G_COMMITLIMIT0L(x) (((x) >> S_COMMITLIMIT0L) & M_COMMITLIMIT0L)
+#define A_TP_RX_SCHED_FIFO_CH3_CH2 0x2c
+
+#define S_COMMITLIMIT3H 24
+#define M_COMMITLIMIT3H 0xffU
+#define V_COMMITLIMIT3H(x) ((x) << S_COMMITLIMIT3H)
+#define G_COMMITLIMIT3H(x) (((x) >> S_COMMITLIMIT3H) & M_COMMITLIMIT3H)
+
+#define S_COMMITLIMIT3L 16
+#define M_COMMITLIMIT3L 0xffU
+#define V_COMMITLIMIT3L(x) ((x) << S_COMMITLIMIT3L)
+#define G_COMMITLIMIT3L(x) (((x) >> S_COMMITLIMIT3L) & M_COMMITLIMIT3L)
+
+#define S_COMMITLIMIT2H 8
+#define M_COMMITLIMIT2H 0xffU
+#define V_COMMITLIMIT2H(x) ((x) << S_COMMITLIMIT2H)
+#define G_COMMITLIMIT2H(x) (((x) >> S_COMMITLIMIT2H) & M_COMMITLIMIT2H)
+
+#define S_COMMITLIMIT2L 0
+#define M_COMMITLIMIT2L 0xffU
+#define V_COMMITLIMIT2L(x) ((x) << S_COMMITLIMIT2L)
+#define G_COMMITLIMIT2L(x) (((x) >> S_COMMITLIMIT2L) & M_COMMITLIMIT2L)
+
+#define A_TP_CHANNEL_MAP_LPBK 0x2d
+
+#define S_T7_RXMAPCHANNELELN 12
+#define M_T7_RXMAPCHANNELELN 0xfU
+#define V_T7_RXMAPCHANNELELN(x) ((x) << S_T7_RXMAPCHANNELELN)
+#define G_T7_RXMAPCHANNELELN(x) (((x) >> S_T7_RXMAPCHANNELELN) & M_T7_RXMAPCHANNELELN)
+
+#define S_T7_RXMAPE2LCHANNEL3 9
+#define M_T7_RXMAPE2LCHANNEL3 0x7U
+#define V_T7_RXMAPE2LCHANNEL3(x) ((x) << S_T7_RXMAPE2LCHANNEL3)
+#define G_T7_RXMAPE2LCHANNEL3(x) (((x) >> S_T7_RXMAPE2LCHANNEL3) & M_T7_RXMAPE2LCHANNEL3)
+
+#define S_T7_RXMAPE2LCHANNEL2 6
+#define M_T7_RXMAPE2LCHANNEL2 0x7U
+#define V_T7_RXMAPE2LCHANNEL2(x) ((x) << S_T7_RXMAPE2LCHANNEL2)
+#define G_T7_RXMAPE2LCHANNEL2(x) (((x) >> S_T7_RXMAPE2LCHANNEL2) & M_T7_RXMAPE2LCHANNEL2)
+
+#define S_T7_RXMAPE2LCHANNEL1 3
+#define M_T7_RXMAPE2LCHANNEL1 0x7U
+#define V_T7_RXMAPE2LCHANNEL1(x) ((x) << S_T7_RXMAPE2LCHANNEL1)
+#define G_T7_RXMAPE2LCHANNEL1(x) (((x) >> S_T7_RXMAPE2LCHANNEL1) & M_T7_RXMAPE2LCHANNEL1)
+
+#define S_T7_RXMAPE2LCHANNEL0 0
+#define M_T7_RXMAPE2LCHANNEL0 0x7U
+#define V_T7_RXMAPE2LCHANNEL0(x) ((x) << S_T7_RXMAPE2LCHANNEL0)
+#define G_T7_RXMAPE2LCHANNEL0(x) (((x) >> S_T7_RXMAPE2LCHANNEL0) & M_T7_RXMAPE2LCHANNEL0)
+
#define A_TP_IPMI_CFG1 0x2e
#define S_VLANENABLE 31
@@ -24966,47 +31498,12 @@
#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
#define A_TP_RSS_PF1_CONFIG 0x31
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF2_CONFIG 0x32
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF3_CONFIG 0x33
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF4_CONFIG 0x34
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF5_CONFIG 0x35
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF6_CONFIG 0x36
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF7_CONFIG 0x37
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF_MAP 0x38
#define S_LKPIDXSIZE 24
@@ -25097,6 +31594,22 @@
#define G_PF0MSKSIZE(x) (((x) >> S_PF0MSKSIZE) & M_PF0MSKSIZE)
#define A_TP_RSS_VFL_CONFIG 0x3a
+
+#define S_BASEQID 16
+#define M_BASEQID 0xfffU
+#define V_BASEQID(x) ((x) << S_BASEQID)
+#define G_BASEQID(x) (((x) >> S_BASEQID) & M_BASEQID)
+
+#define S_MAXRRQID 8
+#define M_MAXRRQID 0xffU
+#define V_MAXRRQID(x) ((x) << S_MAXRRQID)
+#define G_MAXRRQID(x) (((x) >> S_MAXRRQID) & M_MAXRRQID)
+
+#define S_RRCOUNTER 0
+#define M_RRCOUNTER 0xffU
+#define V_RRCOUNTER(x) ((x) << S_RRCOUNTER)
+#define G_RRCOUNTER(x) (((x) >> S_RRCOUNTER) & M_RRCOUNTER)
+
#define A_TP_RSS_VFH_CONFIG 0x3b
#define S_ENABLEUDPHASH 31
@@ -25150,6 +31663,10 @@
#define V_KEYINDEX(x) ((x) << S_KEYINDEX)
#define G_KEYINDEX(x) (((x) >> S_KEYINDEX) & M_KEYINDEX)
+#define S_ROUNDROBINEN 3
+#define V_ROUNDROBINEN(x) ((x) << S_ROUNDROBINEN)
+#define F_ROUNDROBINEN V_ROUNDROBINEN(1U)
+
#define A_TP_RSS_SECRET_KEY0 0x40
#define A_TP_RSS_SECRET_KEY1 0x41
#define A_TP_RSS_SECRET_KEY2 0x42
@@ -25283,6 +31800,36 @@
#define V_SHAREDXRC(x) ((x) << S_SHAREDXRC)
#define F_SHAREDXRC V_SHAREDXRC(1U)
+#define S_VERIFYRSPOP 25
+#define M_VERIFYRSPOP 0x1fU
+#define V_VERIFYRSPOP(x) ((x) << S_VERIFYRSPOP)
+#define G_VERIFYRSPOP(x) (((x) >> S_VERIFYRSPOP) & M_VERIFYRSPOP)
+
+#define S_VERIFYREQOP 20
+#define M_VERIFYREQOP 0x1fU
+#define V_VERIFYREQOP(x) ((x) << S_VERIFYREQOP)
+#define G_VERIFYREQOP(x) (((x) >> S_VERIFYREQOP) & M_VERIFYREQOP)
+
+#define S_AWRITERSPOP 15
+#define M_AWRITERSPOP 0x1fU
+#define V_AWRITERSPOP(x) ((x) << S_AWRITERSPOP)
+#define G_AWRITERSPOP(x) (((x) >> S_AWRITERSPOP) & M_AWRITERSPOP)
+
+#define S_AWRITEREQOP 10
+#define M_AWRITEREQOP 0x1fU
+#define V_AWRITEREQOP(x) ((x) << S_AWRITEREQOP)
+#define G_AWRITEREQOP(x) (((x) >> S_AWRITEREQOP) & M_AWRITEREQOP)
+
+#define S_FLUSHRSPOP 5
+#define M_FLUSHRSPOP 0x1fU
+#define V_FLUSHRSPOP(x) ((x) << S_FLUSHRSPOP)
+#define G_FLUSHRSPOP(x) (((x) >> S_FLUSHRSPOP) & M_FLUSHRSPOP)
+
+#define S_FLUSHREQOP 0
+#define M_FLUSHREQOP 0x1fU
+#define V_FLUSHREQOP(x) ((x) << S_FLUSHREQOP)
+#define G_FLUSHREQOP(x) (((x) >> S_FLUSHREQOP) & M_FLUSHREQOP)
+
#define A_TP_FRAG_CONFIG 0x56
#define S_TLSMODE 16
@@ -25330,6 +31877,21 @@
#define V_PASSMODE(x) ((x) << S_PASSMODE)
#define G_PASSMODE(x) (((x) >> S_PASSMODE) & M_PASSMODE)
+#define S_NVMTMODE 22
+#define M_NVMTMODE 0x3U
+#define V_NVMTMODE(x) ((x) << S_NVMTMODE)
+#define G_NVMTMODE(x) (((x) >> S_NVMTMODE) & M_NVMTMODE)
+
+#define S_ROCEMODE 20
+#define M_ROCEMODE 0x3U
+#define V_ROCEMODE(x) ((x) << S_ROCEMODE)
+#define G_ROCEMODE(x) (((x) >> S_ROCEMODE) & M_ROCEMODE)
+
+#define S_DTLSMODE 18
+#define M_DTLSMODE 0x3U
+#define V_DTLSMODE(x) ((x) << S_DTLSMODE)
+#define G_DTLSMODE(x) (((x) >> S_DTLSMODE) & M_DTLSMODE)
+
#define A_TP_CMM_CONFIG 0x57
#define S_WRCNTIDLE 16
@@ -25383,6 +31945,7 @@
#define V_GRETYPE(x) ((x) << S_GRETYPE)
#define G_GRETYPE(x) (((x) >> S_GRETYPE) & M_GRETYPE)
+#define A_TP_MMGR_CMM_CONFIG 0x5a
#define A_TP_DBG_CLEAR 0x60
#define A_TP_DBG_CORE_HDR0 0x61
@@ -25843,14 +32406,6 @@
#define V_T5_EPCMDBUSY(x) ((x) << S_T5_EPCMDBUSY)
#define F_T5_EPCMDBUSY V_T5_EPCMDBUSY(1U)
-#define S_T6_ETXBUSY 1
-#define V_T6_ETXBUSY(x) ((x) << S_T6_ETXBUSY)
-#define F_T6_ETXBUSY V_T6_ETXBUSY(1U)
-
-#define S_T6_EPCMDBUSY 0
-#define V_T6_EPCMDBUSY(x) ((x) << S_T6_EPCMDBUSY)
-#define F_T6_EPCMDBUSY V_T6_EPCMDBUSY(1U)
-
#define A_TP_DBG_ENG_RES1 0x67
#define S_RXCPLSRDY 31
@@ -26114,16 +32669,6 @@
#define V_T5_RXPCMDCNG(x) ((x) << S_T5_RXPCMDCNG)
#define G_T5_RXPCMDCNG(x) (((x) >> S_T5_RXPCMDCNG) & M_T5_RXPCMDCNG)
-#define S_T6_RXFIFOCNG 20
-#define M_T6_RXFIFOCNG 0xfU
-#define V_T6_RXFIFOCNG(x) ((x) << S_T6_RXFIFOCNG)
-#define G_T6_RXFIFOCNG(x) (((x) >> S_T6_RXFIFOCNG) & M_T6_RXFIFOCNG)
-
-#define S_T6_RXPCMDCNG 14
-#define M_T6_RXPCMDCNG 0x3U
-#define V_T6_RXPCMDCNG(x) ((x) << S_T6_RXPCMDCNG)
-#define G_T6_RXPCMDCNG(x) (((x) >> S_T6_RXPCMDCNG) & M_T6_RXPCMDCNG)
-
#define A_TP_DBG_ERROR_CNT 0x6c
#define A_TP_DBG_CORE_CPL 0x6d
@@ -26191,6 +32736,244 @@
#define A_TP_DBG_CACHE_RD_HIT 0x73
#define A_TP_DBG_CACHE_MC_REQ 0x74
#define A_TP_DBG_CACHE_MC_RSP 0x75
+#define A_TP_RSS_PF0_CONFIG_CH3_CH2 0x80
+
+#define S_PFMAPALWAYS 22
+#define V_PFMAPALWAYS(x) ((x) << S_PFMAPALWAYS)
+#define F_PFMAPALWAYS V_PFMAPALWAYS(1U)
+
+#define S_PFROUNDROBINEN 21
+#define V_PFROUNDROBINEN(x) ((x) << S_PFROUNDROBINEN)
+#define F_PFROUNDROBINEN V_PFROUNDROBINEN(1U)
+
+#define S_FOURCHNEN 20
+#define V_FOURCHNEN(x) ((x) << S_FOURCHNEN)
+#define F_FOURCHNEN V_FOURCHNEN(1U)
+
+#define S_CH3DEFAULTQUEUE 10
+#define M_CH3DEFAULTQUEUE 0x3ffU
+#define V_CH3DEFAULTQUEUE(x) ((x) << S_CH3DEFAULTQUEUE)
+#define G_CH3DEFAULTQUEUE(x) (((x) >> S_CH3DEFAULTQUEUE) & M_CH3DEFAULTQUEUE)
+
+#define S_CH2DEFAULTQUEUE 0
+#define M_CH2DEFAULTQUEUE 0x3ffU
+#define V_CH2DEFAULTQUEUE(x) ((x) << S_CH2DEFAULTQUEUE)
+#define G_CH2DEFAULTQUEUE(x) (((x) >> S_CH2DEFAULTQUEUE) & M_CH2DEFAULTQUEUE)
+
+#define A_TP_RSS_PF1_CONFIG_CH3_CH2 0x81
+#define A_TP_RSS_PF2_CONFIG_CH3_CH2 0x82
+#define A_TP_RSS_PF3_CONFIG_CH3_CH2 0x83
+#define A_TP_RSS_PF4_CONFIG_CH3_CH2 0x84
+#define A_TP_RSS_PF5_CONFIG_CH3_CH2 0x85
+#define A_TP_RSS_PF6_CONFIG_CH3_CH2 0x86
+#define A_TP_RSS_PF7_CONFIG_CH3_CH2 0x87
+#define A_TP_RSS_PF0_EXT_CONFIG 0x88
+#define A_TP_RSS_PF1_EXT_CONFIG 0x89
+#define A_TP_RSS_PF2_EXT_CONFIG 0x8a
+#define A_TP_RSS_PF3_EXT_CONFIG 0x8b
+#define A_TP_RSS_PF4_EXT_CONFIG 0x8c
+#define A_TP_RSS_PF5_EXT_CONFIG 0x8d
+#define A_TP_RSS_PF6_EXT_CONFIG 0x8e
+#define A_TP_RSS_PF7_EXT_CONFIG 0x8f
+#define A_TP_ROCE_CONFIG 0x90
+
+#define S_IGNAETHMSB 24
+#define V_IGNAETHMSB(x) ((x) << S_IGNAETHMSB)
+#define F_IGNAETHMSB V_IGNAETHMSB(1U)
+
+#define S_XDIDMMCTL 23
+#define V_XDIDMMCTL(x) ((x) << S_XDIDMMCTL)
+#define F_XDIDMMCTL V_XDIDMMCTL(1U)
+
+#define S_WRRETHDBGFWDEN 22
+#define V_WRRETHDBGFWDEN(x) ((x) << S_WRRETHDBGFWDEN)
+#define F_WRRETHDBGFWDEN V_WRRETHDBGFWDEN(1U)
+
+#define S_ACKINTGENCTRL 20
+#define M_ACKINTGENCTRL 0x3U
+#define V_ACKINTGENCTRL(x) ((x) << S_ACKINTGENCTRL)
+#define G_ACKINTGENCTRL(x) (((x) >> S_ACKINTGENCTRL) & M_ACKINTGENCTRL)
+
+#define S_ATOMICALIGNCHKEN 19
+#define V_ATOMICALIGNCHKEN(x) ((x) << S_ATOMICALIGNCHKEN)
+#define F_ATOMICALIGNCHKEN V_ATOMICALIGNCHKEN(1U)
+
+#define S_RDRETHLENCHKEN 18
+#define V_RDRETHLENCHKEN(x) ((x) << S_RDRETHLENCHKEN)
+#define F_RDRETHLENCHKEN V_RDRETHLENCHKEN(1U)
+
+#define S_WRTOTALLENCHKEN 17
+#define V_WRTOTALLENCHKEN(x) ((x) << S_WRTOTALLENCHKEN)
+#define F_WRTOTALLENCHKEN V_WRTOTALLENCHKEN(1U)
+
+#define S_WRRETHLENCHKEN 16
+#define V_WRRETHLENCHKEN(x) ((x) << S_WRRETHLENCHKEN)
+#define F_WRRETHLENCHKEN V_WRRETHLENCHKEN(1U)
+
+#define S_TNLERRORUDPLEN 11
+#define V_TNLERRORUDPLEN(x) ((x) << S_TNLERRORUDPLEN)
+#define F_TNLERRORUDPLEN V_TNLERRORUDPLEN(1U)
+
+#define S_TNLERRORPKEY 10
+#define V_TNLERRORPKEY(x) ((x) << S_TNLERRORPKEY)
+#define F_TNLERRORPKEY V_TNLERRORPKEY(1U)
+
+#define S_TNLERROROPCODE 9
+#define V_TNLERROROPCODE(x) ((x) << S_TNLERROROPCODE)
+#define F_TNLERROROPCODE V_TNLERROROPCODE(1U)
+
+#define S_TNLERRORTVER 8
+#define V_TNLERRORTVER(x) ((x) << S_TNLERRORTVER)
+#define F_TNLERRORTVER V_TNLERRORTVER(1U)
+
+#define S_DROPERRORUDPLEN 3
+#define V_DROPERRORUDPLEN(x) ((x) << S_DROPERRORUDPLEN)
+#define F_DROPERRORUDPLEN V_DROPERRORUDPLEN(1U)
+
+#define S_DROPERRORPKEY 2
+#define V_DROPERRORPKEY(x) ((x) << S_DROPERRORPKEY)
+#define F_DROPERRORPKEY V_DROPERRORPKEY(1U)
+
+#define S_DROPERROROPCODE 1
+#define V_DROPERROROPCODE(x) ((x) << S_DROPERROROPCODE)
+#define F_DROPERROROPCODE V_DROPERROROPCODE(1U)
+
+#define S_DROPERRORTVER 0
+#define V_DROPERRORTVER(x) ((x) << S_DROPERRORTVER)
+#define F_DROPERRORTVER V_DROPERRORTVER(1U)
+
+#define A_TP_NVMT_CONFIG 0x91
+
+#define S_PDACHKEN 2
+#define V_PDACHKEN(x) ((x) << S_PDACHKEN)
+#define F_PDACHKEN V_PDACHKEN(1U)
+
+#define S_FORCERQNONDDP 1
+#define V_FORCERQNONDDP(x) ((x) << S_FORCERQNONDDP)
+#define F_FORCERQNONDDP V_FORCERQNONDDP(1U)
+
+#define S_STRIPHCRC 0
+#define V_STRIPHCRC(x) ((x) << S_STRIPHCRC)
+#define F_STRIPHCRC V_STRIPHCRC(1U)
+
+#define A_TP_NVMT_MAXHDR 0x92
+
+#define S_MAXHDR3 24
+#define M_MAXHDR3 0xffU
+#define V_MAXHDR3(x) ((x) << S_MAXHDR3)
+#define G_MAXHDR3(x) (((x) >> S_MAXHDR3) & M_MAXHDR3)
+
+#define S_MAXHDR2 16
+#define M_MAXHDR2 0xffU
+#define V_MAXHDR2(x) ((x) << S_MAXHDR2)
+#define G_MAXHDR2(x) (((x) >> S_MAXHDR2) & M_MAXHDR2)
+
+#define S_MAXHDR1 8
+#define M_MAXHDR1 0xffU
+#define V_MAXHDR1(x) ((x) << S_MAXHDR1)
+#define G_MAXHDR1(x) (((x) >> S_MAXHDR1) & M_MAXHDR1)
+
+#define S_MAXHDR0 0
+#define M_MAXHDR0 0xffU
+#define V_MAXHDR0(x) ((x) << S_MAXHDR0)
+#define G_MAXHDR0(x) (((x) >> S_MAXHDR0) & M_MAXHDR0)
+
+#define A_TP_NVMT_PDORSVD 0x93
+
+#define S_PDORSVD3 24
+#define M_PDORSVD3 0xffU
+#define V_PDORSVD3(x) ((x) << S_PDORSVD3)
+#define G_PDORSVD3(x) (((x) >> S_PDORSVD3) & M_PDORSVD3)
+
+#define S_PDORSVD2 16
+#define M_PDORSVD2 0xffU
+#define V_PDORSVD2(x) ((x) << S_PDORSVD2)
+#define G_PDORSVD2(x) (((x) >> S_PDORSVD2) & M_PDORSVD2)
+
+#define S_PDORSVD1 8
+#define M_PDORSVD1 0xffU
+#define V_PDORSVD1(x) ((x) << S_PDORSVD1)
+#define G_PDORSVD1(x) (((x) >> S_PDORSVD1) & M_PDORSVD1)
+
+#define S_PDORSVD0 0
+#define M_PDORSVD0 0xffU
+#define V_PDORSVD0(x) ((x) << S_PDORSVD0)
+#define G_PDORSVD0(x) (((x) >> S_PDORSVD0) & M_PDORSVD0)
+
+#define A_TP_RDMA_CONFIG 0x94
+
+#define S_SRQLIMITEN 20
+#define V_SRQLIMITEN(x) ((x) << S_SRQLIMITEN)
+#define F_SRQLIMITEN V_SRQLIMITEN(1U)
+
+#define S_SNDIMMSEOP 15
+#define M_SNDIMMSEOP 0x1fU
+#define V_SNDIMMSEOP(x) ((x) << S_SNDIMMSEOP)
+#define G_SNDIMMSEOP(x) (((x) >> S_SNDIMMSEOP) & M_SNDIMMSEOP)
+
+#define S_SNDIMMOP 10
+#define M_SNDIMMOP 0x1fU
+#define V_SNDIMMOP(x) ((x) << S_SNDIMMOP)
+#define G_SNDIMMOP(x) (((x) >> S_SNDIMMOP) & M_SNDIMMOP)
+
+#define S_IWARPXRCIDCHKEN 4
+#define V_IWARPXRCIDCHKEN(x) ((x) << S_IWARPXRCIDCHKEN)
+#define F_IWARPXRCIDCHKEN V_IWARPXRCIDCHKEN(1U)
+
+#define S_IWARPEXTOPEN 3
+#define V_IWARPEXTOPEN(x) ((x) << S_IWARPEXTOPEN)
+#define F_IWARPEXTOPEN V_IWARPEXTOPEN(1U)
+
+#define S_XRCIMPLTYPE 1
+#define V_XRCIMPLTYPE(x) ((x) << S_XRCIMPLTYPE)
+#define F_XRCIMPLTYPE V_XRCIMPLTYPE(1U)
+
+#define S_XRCEN 0
+#define V_XRCEN(x) ((x) << S_XRCEN)
+#define F_XRCEN V_XRCEN(1U)
+
+#define A_TP_ROCE_RRQ_BASE 0x95
+#define A_TP_FILTER_RATE_CFG 0x96
+
+#define S_GRP_CFG_RD 30
+#define V_GRP_CFG_RD(x) ((x) << S_GRP_CFG_RD)
+#define F_GRP_CFG_RD V_GRP_CFG_RD(1U)
+
+#define S_GRP_CFG_INIT 29
+#define V_GRP_CFG_INIT(x) ((x) << S_GRP_CFG_INIT)
+#define F_GRP_CFG_INIT V_GRP_CFG_INIT(1U)
+
+#define S_GRP_CFG_RST 28
+#define V_GRP_CFG_RST(x) ((x) << S_GRP_CFG_RST)
+#define F_GRP_CFG_RST V_GRP_CFG_RST(1U)
+
+#define S_GRP_CFG_SEL 16
+#define M_GRP_CFG_SEL 0xfffU
+#define V_GRP_CFG_SEL(x) ((x) << S_GRP_CFG_SEL)
+#define G_GRP_CFG_SEL(x) (((x) >> S_GRP_CFG_SEL) & M_GRP_CFG_SEL)
+
+#define S_US_TIMER_TICK 0
+#define M_US_TIMER_TICK 0xffffU
+#define V_US_TIMER_TICK(x) ((x) << S_US_TIMER_TICK)
+#define G_US_TIMER_TICK(x) (((x) >> S_US_TIMER_TICK) & M_US_TIMER_TICK)
+
+#define A_TP_TLS_CONFIG 0x99
+
+#define S_QUIESCETYPE1 24
+#define M_QUIESCETYPE1 0xffU
+#define V_QUIESCETYPE1(x) ((x) << S_QUIESCETYPE1)
+#define G_QUIESCETYPE1(x) (((x) >> S_QUIESCETYPE1) & M_QUIESCETYPE1)
+
+#define S_QUIESCETYPE2 16
+#define M_QUIESCETYPE2 0xffU
+#define V_QUIESCETYPE2(x) ((x) << S_QUIESCETYPE2)
+#define G_QUIESCETYPE2(x) (((x) >> S_QUIESCETYPE2) & M_QUIESCETYPE2)
+
+#define S_QUIESCETYPE3 8
+#define M_QUIESCETYPE3 0xffU
+#define V_QUIESCETYPE3(x) ((x) << S_QUIESCETYPE3)
+#define G_QUIESCETYPE3(x) (((x) >> S_QUIESCETYPE3) & M_QUIESCETYPE3)
+
#define A_TP_T5_TX_DROP_CNT_CH0 0x120
#define A_TP_T5_TX_DROP_CNT_CH1 0x121
#define A_TP_TX_DROP_CNT_CH2 0x122
@@ -26682,10 +33465,6 @@
#define A_TP_DBG_ESIDE_DISP1 0x137
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE1 0
#define V_TXFULL_ESIDE1(x) ((x) << S_TXFULL_ESIDE1)
#define F_TXFULL_ESIDE1 V_TXFULL_ESIDE1(1U)
@@ -26719,20 +33498,12 @@
#define A_TP_DBG_ESIDE_DISP2 0x13a
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE2 0
#define V_TXFULL_ESIDE2(x) ((x) << S_TXFULL_ESIDE2)
#define F_TXFULL_ESIDE2 V_TXFULL_ESIDE2(1U)
#define A_TP_DBG_ESIDE_DISP3 0x13b
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE3 0
#define V_TXFULL_ESIDE3(x) ((x) << S_TXFULL_ESIDE3)
#define F_TXFULL_ESIDE3 V_TXFULL_ESIDE3(1U)
@@ -26836,6 +33607,94 @@
#define V_SRVRSRAM(x) ((x) << S_SRVRSRAM)
#define F_SRVRSRAM V_SRVRSRAM(1U)
+#define S_T7_FILTERMODE 31
+#define V_T7_FILTERMODE(x) ((x) << S_T7_FILTERMODE)
+#define F_T7_FILTERMODE V_T7_FILTERMODE(1U)
+
+#define S_T7_FCOEMASK 30
+#define V_T7_FCOEMASK(x) ((x) << S_T7_FCOEMASK)
+#define F_T7_FCOEMASK V_T7_FCOEMASK(1U)
+
+#define S_T7_SRVRSRAM 29
+#define V_T7_SRVRSRAM(x) ((x) << S_T7_SRVRSRAM)
+#define F_T7_SRVRSRAM V_T7_SRVRSRAM(1U)
+
+#define S_ROCEUDFORCEIPV6 28
+#define V_ROCEUDFORCEIPV6(x) ((x) << S_ROCEUDFORCEIPV6)
+#define F_ROCEUDFORCEIPV6 V_ROCEUDFORCEIPV6(1U)
+
+#define S_TCPFLAGS8 27
+#define V_TCPFLAGS8(x) ((x) << S_TCPFLAGS8)
+#define F_TCPFLAGS8 V_TCPFLAGS8(1U)
+
+#define S_MACMATCH11 26
+#define V_MACMATCH11(x) ((x) << S_MACMATCH11)
+#define F_MACMATCH11 V_MACMATCH11(1U)
+
+#define S_SMACMATCH10 25
+#define V_SMACMATCH10(x) ((x) << S_SMACMATCH10)
+#define F_SMACMATCH10 V_SMACMATCH10(1U)
+
+#define S_SMACMATCH 14
+#define V_SMACMATCH(x) ((x) << S_SMACMATCH)
+#define F_SMACMATCH V_SMACMATCH(1U)
+
+#define S_TCPFLAGS 13
+#define V_TCPFLAGS(x) ((x) << S_TCPFLAGS)
+#define F_TCPFLAGS V_TCPFLAGS(1U)
+
+#define S_SYNONLY 12
+#define V_SYNONLY(x) ((x) << S_SYNONLY)
+#define F_SYNONLY V_SYNONLY(1U)
+
+#define S_ROCE 11
+#define V_ROCE(x) ((x) << S_ROCE)
+#define F_ROCE V_ROCE(1U)
+
+#define S_T7_FRAGMENTATION 10
+#define V_T7_FRAGMENTATION(x) ((x) << S_T7_FRAGMENTATION)
+#define F_T7_FRAGMENTATION V_T7_FRAGMENTATION(1U)
+
+#define S_T7_MPSHITTYPE 9
+#define V_T7_MPSHITTYPE(x) ((x) << S_T7_MPSHITTYPE)
+#define F_T7_MPSHITTYPE V_T7_MPSHITTYPE(1U)
+
+#define S_T7_MACMATCH 8
+#define V_T7_MACMATCH(x) ((x) << S_T7_MACMATCH)
+#define F_T7_MACMATCH V_T7_MACMATCH(1U)
+
+#define S_T7_ETHERTYPE 7
+#define V_T7_ETHERTYPE(x) ((x) << S_T7_ETHERTYPE)
+#define F_T7_ETHERTYPE V_T7_ETHERTYPE(1U)
+
+#define S_T7_PROTOCOL 6
+#define V_T7_PROTOCOL(x) ((x) << S_T7_PROTOCOL)
+#define F_T7_PROTOCOL V_T7_PROTOCOL(1U)
+
+#define S_T7_TOS 5
+#define V_T7_TOS(x) ((x) << S_T7_TOS)
+#define F_T7_TOS V_T7_TOS(1U)
+
+#define S_T7_VLAN 4
+#define V_T7_VLAN(x) ((x) << S_T7_VLAN)
+#define F_T7_VLAN V_T7_VLAN(1U)
+
+#define S_T7_VNIC_ID 3
+#define V_T7_VNIC_ID(x) ((x) << S_T7_VNIC_ID)
+#define F_T7_VNIC_ID V_T7_VNIC_ID(1U)
+
+#define S_T7_PORT 2
+#define V_T7_PORT(x) ((x) << S_T7_PORT)
+#define F_T7_PORT V_T7_PORT(1U)
+
+#define S_T7_FCOE 1
+#define V_T7_FCOE(x) ((x) << S_T7_FCOE)
+#define F_T7_FCOE V_T7_FCOE(1U)
+
+#define S_IPSECIDX 0
+#define V_IPSECIDX(x) ((x) << S_IPSECIDX)
+#define F_IPSECIDX V_IPSECIDX(1U)
+
#define A_TP_INGRESS_CONFIG 0x141
#define S_OPAQUE_TYPE 16
@@ -26888,6 +33747,14 @@
#define V_USE_ENC_IDX(x) ((x) << S_USE_ENC_IDX)
#define F_USE_ENC_IDX V_USE_ENC_IDX(1U)
+#define S_USE_MPS_ECN 15
+#define V_USE_MPS_ECN(x) ((x) << S_USE_MPS_ECN)
+#define F_USE_MPS_ECN V_USE_MPS_ECN(1U)
+
+#define S_USE_MPS_CONG 14
+#define V_USE_MPS_CONG(x) ((x) << S_USE_MPS_CONG)
+#define F_USE_MPS_CONG V_USE_MPS_CONG(1U)
+
#define A_TP_TX_DROP_CFG_CH2 0x142
#define A_TP_TX_DROP_CFG_CH3 0x143
#define A_TP_EGRESS_CONFIG 0x145
@@ -27490,6 +34357,51 @@
#define V_ROCEV2UDPPORT(x) ((x) << S_ROCEV2UDPPORT)
#define G_ROCEV2UDPPORT(x) (((x) >> S_ROCEV2UDPPORT) & M_ROCEV2UDPPORT)
+#define S_IPSECTUNETHTRANSEN 29
+#define V_IPSECTUNETHTRANSEN(x) ((x) << S_IPSECTUNETHTRANSEN)
+#define F_IPSECTUNETHTRANSEN V_IPSECTUNETHTRANSEN(1U)
+
+#define S_ROCEV2ZEROUDP6CSUM 28
+#define V_ROCEV2ZEROUDP6CSUM(x) ((x) << S_ROCEV2ZEROUDP6CSUM)
+#define F_ROCEV2ZEROUDP6CSUM V_ROCEV2ZEROUDP6CSUM(1U)
+
+#define S_ROCEV2PROCEN 27
+#define V_ROCEV2PROCEN(x) ((x) << S_ROCEV2PROCEN)
+#define F_ROCEV2PROCEN V_ROCEV2PROCEN(1U)
+
+#define A_TP_ESIDE_ROCE_PORT12 0x161
+
+#define S_ROCEV2UDPPORT2 16
+#define M_ROCEV2UDPPORT2 0xffffU
+#define V_ROCEV2UDPPORT2(x) ((x) << S_ROCEV2UDPPORT2)
+#define G_ROCEV2UDPPORT2(x) (((x) >> S_ROCEV2UDPPORT2) & M_ROCEV2UDPPORT2)
+
+#define S_ROCEV2UDPPORT1 0
+#define M_ROCEV2UDPPORT1 0xffffU
+#define V_ROCEV2UDPPORT1(x) ((x) << S_ROCEV2UDPPORT1)
+#define G_ROCEV2UDPPORT1(x) (((x) >> S_ROCEV2UDPPORT1) & M_ROCEV2UDPPORT1)
+
+#define A_TP_ESIDE_ROCE_PORT34 0x162
+
+#define S_ROCEV2UDPPORT4 16
+#define M_ROCEV2UDPPORT4 0xffffU
+#define V_ROCEV2UDPPORT4(x) ((x) << S_ROCEV2UDPPORT4)
+#define G_ROCEV2UDPPORT4(x) (((x) >> S_ROCEV2UDPPORT4) & M_ROCEV2UDPPORT4)
+
+#define S_ROCEV2UDPPORT3 0
+#define M_ROCEV2UDPPORT3 0xffffU
+#define V_ROCEV2UDPPORT3(x) ((x) << S_ROCEV2UDPPORT3)
+#define G_ROCEV2UDPPORT3(x) (((x) >> S_ROCEV2UDPPORT3) & M_ROCEV2UDPPORT3)
+
+#define A_TP_ESIDE_CONFIG1 0x163
+
+#define S_ROCEV2CRCIGN 0
+#define M_ROCEV2CRCIGN 0xfU
+#define V_ROCEV2CRCIGN(x) ((x) << S_ROCEV2CRCIGN)
+#define G_ROCEV2CRCIGN(x) (((x) >> S_ROCEV2CRCIGN) & M_ROCEV2CRCIGN)
+
+#define A_TP_ESIDE_DEBUG_CFG 0x16c
+#define A_TP_ESIDE_DEBUG_DATA 0x16d
#define A_TP_DBG_CSIDE_RX0 0x230
#define S_CRXSOPCNT 28
@@ -27962,56 +34874,7 @@
#define V_TXFULL2X(x) ((x) << S_TXFULL2X)
#define F_TXFULL2X V_TXFULL2X(1U)
-#define S_T6_TXFULL 31
-#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
-#define F_T6_TXFULL V_T6_TXFULL(1U)
-
-#define S_T6_PLD_RXZEROP_SRDY 25
-#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
-#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U)
-
-#define S_T6_DDP_SRDY 22
-#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
-#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U)
-
-#define S_T6_DDP_DRDY 21
-#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
-#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U)
-
#define A_TP_DBG_CSIDE_DISP1 0x23b
-
-#define S_T5_TXFULL 31
-#define V_T5_TXFULL(x) ((x) << S_T5_TXFULL)
-#define F_T5_TXFULL V_T5_TXFULL(1U)
-
-#define S_T5_PLD_RXZEROP_SRDY 25
-#define V_T5_PLD_RXZEROP_SRDY(x) ((x) << S_T5_PLD_RXZEROP_SRDY)
-#define F_T5_PLD_RXZEROP_SRDY V_T5_PLD_RXZEROP_SRDY(1U)
-
-#define S_T5_DDP_SRDY 22
-#define V_T5_DDP_SRDY(x) ((x) << S_T5_DDP_SRDY)
-#define F_T5_DDP_SRDY V_T5_DDP_SRDY(1U)
-
-#define S_T5_DDP_DRDY 21
-#define V_T5_DDP_DRDY(x) ((x) << S_T5_DDP_DRDY)
-#define F_T5_DDP_DRDY V_T5_DDP_DRDY(1U)
-
-#define S_T6_TXFULL 31
-#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
-#define F_T6_TXFULL V_T6_TXFULL(1U)
-
-#define S_T6_PLD_RXZEROP_SRDY 25
-#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
-#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U)
-
-#define S_T6_DDP_SRDY 22
-#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
-#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U)
-
-#define S_T6_DDP_DRDY 21
-#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
-#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U)
-
#define A_TP_DBG_CSIDE_DDP0 0x23c
#define S_DDPMSGLATEST7 28
@@ -28222,6 +35085,59 @@
#define V_ISCSICMDMODE(x) ((x) << S_ISCSICMDMODE)
#define F_ISCSICMDMODE V_ISCSICMDMODE(1U)
+#define S_NVMTOPUPDEN 30
+#define V_NVMTOPUPDEN(x) ((x) << S_NVMTOPUPDEN)
+#define F_NVMTOPUPDEN V_NVMTOPUPDEN(1U)
+
+#define S_NOPDIS 29
+#define V_NOPDIS(x) ((x) << S_NOPDIS)
+#define F_NOPDIS V_NOPDIS(1U)
+
+#define S_IWARPINVREQEN 27
+#define V_IWARPINVREQEN(x) ((x) << S_IWARPINVREQEN)
+#define F_IWARPINVREQEN V_IWARPINVREQEN(1U)
+
+#define S_ROCEINVREQEN 26
+#define V_ROCEINVREQEN(x) ((x) << S_ROCEINVREQEN)
+#define F_ROCEINVREQEN V_ROCEINVREQEN(1U)
+
+#define S_ROCESRQFWEN 25
+#define V_ROCESRQFWEN(x) ((x) << S_ROCESRQFWEN)
+#define F_ROCESRQFWEN V_ROCESRQFWEN(1U)
+
+#define S_T7_WRITEZEROOP 20
+#define M_T7_WRITEZEROOP 0x1fU
+#define V_T7_WRITEZEROOP(x) ((x) << S_T7_WRITEZEROOP)
+#define G_T7_WRITEZEROOP(x) (((x) >> S_T7_WRITEZEROOP) & M_T7_WRITEZEROOP)
+
+#define S_IWARPEXTMODE 9
+#define V_IWARPEXTMODE(x) ((x) << S_IWARPEXTMODE)
+#define F_IWARPEXTMODE V_IWARPEXTMODE(1U)
+
+#define S_IWARPINVFWEN 8
+#define V_IWARPINVFWEN(x) ((x) << S_IWARPINVFWEN)
+#define F_IWARPINVFWEN V_IWARPINVFWEN(1U)
+
+#define S_IWARPSRQFWEN 7
+#define V_IWARPSRQFWEN(x) ((x) << S_IWARPSRQFWEN)
+#define F_IWARPSRQFWEN V_IWARPSRQFWEN(1U)
+
+#define S_T7_STARTSKIPPLD 3
+#define V_T7_STARTSKIPPLD(x) ((x) << S_T7_STARTSKIPPLD)
+#define F_T7_STARTSKIPPLD V_T7_STARTSKIPPLD(1U)
+
+#define S_NVMTFLIMMEN 2
+#define V_NVMTFLIMMEN(x) ((x) << S_NVMTFLIMMEN)
+#define F_NVMTFLIMMEN V_NVMTFLIMMEN(1U)
+
+#define S_NVMTOPCTRLEN 1
+#define V_NVMTOPCTRLEN(x) ((x) << S_NVMTOPCTRLEN)
+#define F_NVMTOPCTRLEN V_NVMTOPCTRLEN(1U)
+
+#define S_T7_WRITEZEROEN 0
+#define V_T7_WRITEZEROEN(x) ((x) << S_T7_WRITEZEROEN)
+#define F_T7_WRITEZEROEN V_T7_WRITEZEROEN(1U)
+
#define A_TP_CSPI_POWER 0x243
#define S_GATECHNTX3 11
@@ -28256,6 +35172,26 @@
#define V_SLEEPREQUTRN(x) ((x) << S_SLEEPREQUTRN)
#define F_SLEEPREQUTRN V_SLEEPREQUTRN(1U)
+#define S_GATECHNRX3 7
+#define V_GATECHNRX3(x) ((x) << S_GATECHNRX3)
+#define F_GATECHNRX3 V_GATECHNRX3(1U)
+
+#define S_GATECHNRX2 6
+#define V_GATECHNRX2(x) ((x) << S_GATECHNRX2)
+#define F_GATECHNRX2 V_GATECHNRX2(1U)
+
+#define S_T7_GATECHNRX1 5
+#define V_T7_GATECHNRX1(x) ((x) << S_T7_GATECHNRX1)
+#define F_T7_GATECHNRX1 V_T7_GATECHNRX1(1U)
+
+#define S_T7_GATECHNRX0 4
+#define V_T7_GATECHNRX0(x) ((x) << S_T7_GATECHNRX0)
+#define F_T7_GATECHNRX0 V_T7_GATECHNRX0(1U)
+
+#define S_T7_SLEEPRDYUTRN 3
+#define V_T7_SLEEPRDYUTRN(x) ((x) << S_T7_SLEEPRDYUTRN)
+#define F_T7_SLEEPRDYUTRN V_T7_SLEEPRDYUTRN(1U)
+
#define A_TP_TRC_CONFIG 0x244
#define S_TRCRR 1
@@ -28266,6 +35202,19 @@
#define V_TRCCH(x) ((x) << S_TRCCH)
#define F_TRCCH V_TRCCH(1U)
+#define S_DEBUGPG 3
+#define V_DEBUGPG(x) ((x) << S_DEBUGPG)
+#define F_DEBUGPG V_DEBUGPG(1U)
+
+#define S_T7_TRCRR 2
+#define V_T7_TRCRR(x) ((x) << S_T7_TRCRR)
+#define F_T7_TRCRR V_T7_TRCRR(1U)
+
+#define S_T7_TRCCH 0
+#define M_T7_TRCCH 0x3U
+#define V_T7_TRCCH(x) ((x) << S_T7_TRCCH)
+#define G_T7_TRCCH(x) (((x) >> S_T7_TRCCH) & M_T7_TRCCH)
+
#define A_TP_TAG_CONFIG 0x245
#define S_ETAGTYPE 16
@@ -28379,26 +35328,6 @@
#define V_T5_CPRSSTATE0(x) ((x) << S_T5_CPRSSTATE0)
#define G_T5_CPRSSTATE0(x) (((x) >> S_T5_CPRSSTATE0) & M_T5_CPRSSTATE0)
-#define S_T6_CPRSSTATE3 24
-#define M_T6_CPRSSTATE3 0xfU
-#define V_T6_CPRSSTATE3(x) ((x) << S_T6_CPRSSTATE3)
-#define G_T6_CPRSSTATE3(x) (((x) >> S_T6_CPRSSTATE3) & M_T6_CPRSSTATE3)
-
-#define S_T6_CPRSSTATE2 16
-#define M_T6_CPRSSTATE2 0xfU
-#define V_T6_CPRSSTATE2(x) ((x) << S_T6_CPRSSTATE2)
-#define G_T6_CPRSSTATE2(x) (((x) >> S_T6_CPRSSTATE2) & M_T6_CPRSSTATE2)
-
-#define S_T6_CPRSSTATE1 8
-#define M_T6_CPRSSTATE1 0xfU
-#define V_T6_CPRSSTATE1(x) ((x) << S_T6_CPRSSTATE1)
-#define G_T6_CPRSSTATE1(x) (((x) >> S_T6_CPRSSTATE1) & M_T6_CPRSSTATE1)
-
-#define S_T6_CPRSSTATE0 0
-#define M_T6_CPRSSTATE0 0xfU
-#define V_T6_CPRSSTATE0(x) ((x) << S_T6_CPRSSTATE0)
-#define G_T6_CPRSSTATE0(x) (((x) >> S_T6_CPRSSTATE0) & M_T6_CPRSSTATE0)
-
#define A_TP_DBG_CSIDE_DEMUX 0x247
#define S_CALLDONE 28
@@ -28630,6 +35559,62 @@
#define A_TP_DBG_CSIDE_ARBIT_WAIT1 0x24e
#define A_TP_DBG_CSIDE_ARBIT_CNT0 0x24f
#define A_TP_DBG_CSIDE_ARBIT_CNT1 0x250
+#define A_TP_CHDR_CONFIG1 0x259
+
+#define S_CH3HIGH 24
+#define M_CH3HIGH 0xffU
+#define V_CH3HIGH(x) ((x) << S_CH3HIGH)
+#define G_CH3HIGH(x) (((x) >> S_CH3HIGH) & M_CH3HIGH)
+
+#define S_CH3LOW 16
+#define M_CH3LOW 0xffU
+#define V_CH3LOW(x) ((x) << S_CH3LOW)
+#define G_CH3LOW(x) (((x) >> S_CH3LOW) & M_CH3LOW)
+
+#define S_CH2HIGH 8
+#define M_CH2HIGH 0xffU
+#define V_CH2HIGH(x) ((x) << S_CH2HIGH)
+#define G_CH2HIGH(x) (((x) >> S_CH2HIGH) & M_CH2HIGH)
+
+#define S_CH2LOW 0
+#define M_CH2LOW 0xffU
+#define V_CH2LOW(x) ((x) << S_CH2LOW)
+#define G_CH2LOW(x) (((x) >> S_CH2LOW) & M_CH2LOW)
+
+#define A_TP_CDSP_RDMA_CONFIG 0x260
+#define A_TP_NVMT_OP_CTRL 0x268
+
+#define S_DEFOPCTRL 30
+#define M_DEFOPCTRL 0x3U
+#define V_DEFOPCTRL(x) ((x) << S_DEFOPCTRL)
+#define G_DEFOPCTRL(x) (((x) >> S_DEFOPCTRL) & M_DEFOPCTRL)
+
+#define S_NVMTOPCTRL 0
+#define M_NVMTOPCTRL 0x3fffffffU
+#define V_NVMTOPCTRL(x) ((x) << S_NVMTOPCTRL)
+#define G_NVMTOPCTRL(x) (((x) >> S_NVMTOPCTRL) & M_NVMTOPCTRL)
+
+#define A_TP_CSIDE_DEBUG_CFG 0x26c
+
+#define S_T7_OR_EN 13
+#define V_T7_OR_EN(x) ((x) << S_T7_OR_EN)
+#define F_T7_OR_EN V_T7_OR_EN(1U)
+
+#define S_T7_HI 12
+#define V_T7_HI(x) ((x) << S_T7_HI)
+#define F_T7_HI V_T7_HI(1U)
+
+#define S_T7_SELH 6
+#define M_T7_SELH 0x3fU
+#define V_T7_SELH(x) ((x) << S_T7_SELH)
+#define G_T7_SELH(x) (((x) >> S_T7_SELH) & M_T7_SELH)
+
+#define S_T7_SELL 0
+#define M_T7_SELL 0x3fU
+#define V_T7_SELL(x) ((x) << S_T7_SELL)
+#define G_T7_SELL(x) (((x) >> S_T7_SELL) & M_T7_SELL)
+
+#define A_TP_CSIDE_DEBUG_DATA 0x26d
#define A_TP_FIFO_CONFIG 0x8c0
#define S_CH1_OUTPUT 27
@@ -28771,6 +35756,174 @@
#define A_TP_MIB_TNL_ERR_1 0x71
#define A_TP_MIB_TNL_ERR_2 0x72
#define A_TP_MIB_TNL_ERR_3 0x73
+#define A_TP_MIB_RDMA_IN_PKT_0 0x80
+#define A_TP_MIB_RDMA_IN_PKT_1 0x81
+#define A_TP_MIB_RDMA_IN_PKT_2 0x82
+#define A_TP_MIB_RDMA_IN_PKT_3 0x83
+#define A_TP_MIB_RDMA_IN_BYTE_HI_0 0x84
+#define A_TP_MIB_RDMA_IN_BYTE_LO_0 0x85
+#define A_TP_MIB_RDMA_IN_BYTE_HI_1 0x86
+#define A_TP_MIB_RDMA_IN_BYTE_LO_1 0x87
+#define A_TP_MIB_RDMA_IN_BYTE_HI_2 0x88
+#define A_TP_MIB_RDMA_IN_BYTE_LO_2 0x89
+#define A_TP_MIB_RDMA_IN_BYTE_HI_3 0x8a
+#define A_TP_MIB_RDMA_IN_BYTE_LO_3 0x8b
+#define A_TP_MIB_RDMA_OUT_PKT_0 0x90
+#define A_TP_MIB_RDMA_OUT_PKT_1 0x91
+#define A_TP_MIB_RDMA_OUT_PKT_2 0x92
+#define A_TP_MIB_RDMA_OUT_PKT_3 0x93
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_0 0x94
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_0 0x95
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_1 0x96
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_1 0x97
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_2 0x98
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_2 0x99
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_3 0x9a
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_3 0x9b
+#define A_TP_MIB_ISCSI_IN_PKT_0 0xa0
+#define A_TP_MIB_ISCSI_IN_PKT_1 0xa1
+#define A_TP_MIB_ISCSI_IN_PKT_2 0xa2
+#define A_TP_MIB_ISCSI_IN_PKT_3 0xa3
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_0 0xa4
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_0 0xa5
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_1 0xa6
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_1 0xa7
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_2 0xa8
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_2 0xa9
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_3 0xaa
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_3 0xab
+#define A_TP_MIB_ISCSI_OUT_PKT_0 0xb0
+#define A_TP_MIB_ISCSI_OUT_PKT_1 0xb1
+#define A_TP_MIB_ISCSI_OUT_PKT_2 0xb2
+#define A_TP_MIB_ISCSI_OUT_PKT_3 0xb3
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_0 0xb4
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_0 0xb5
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_1 0xb6
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_1 0xb7
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_2 0xb8
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_2 0xb9
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_3 0xba
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_3 0xbb
+#define A_TP_MIB_NVMT_IN_PKT_0 0xc0
+#define A_TP_MIB_NVMT_IN_PKT_1 0xc1
+#define A_TP_MIB_NVMT_IN_PKT_2 0xc2
+#define A_TP_MIB_NVMT_IN_PKT_3 0xc3
+#define A_TP_MIB_NVMT_IN_BYTE_HI_0 0xc4
+#define A_TP_MIB_NVMT_IN_BYTE_LO_0 0xc5
+#define A_TP_MIB_NVMT_IN_BYTE_HI_1 0xc6
+#define A_TP_MIB_NVMT_IN_BYTE_LO_1 0xc7
+#define A_TP_MIB_NVMT_IN_BYTE_HI_2 0xc8
+#define A_TP_MIB_NVMT_IN_BYTE_LO_2 0xc9
+#define A_TP_MIB_NVMT_IN_BYTE_HI_3 0xca
+#define A_TP_MIB_NVMT_IN_BYTE_LO_3 0xcb
+#define A_TP_MIB_NVMT_OUT_PKT_0 0xd0
+#define A_TP_MIB_NVMT_OUT_PKT_1 0xd1
+#define A_TP_MIB_NVMT_OUT_PKT_2 0xd2
+#define A_TP_MIB_NVMT_OUT_PKT_3 0xd3
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_0 0xd4
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_0 0xd5
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_1 0xd6
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_1 0xd7
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_2 0xd8
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_2 0xd9
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_3 0xda
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_3 0xdb
+#define A_TP_MIB_TLS_IN_PKT_0 0xe0
+#define A_TP_MIB_TLS_IN_PKT_1 0xe1
+#define A_TP_MIB_TLS_IN_PKT_2 0xe2
+#define A_TP_MIB_TLS_IN_PKT_3 0xe3
+#define A_TP_MIB_TLS_IN_BYTE_HI_0 0xe4
+#define A_TP_MIB_TLS_IN_BYTE_LO_0 0xe5
+#define A_TP_MIB_TLS_IN_BYTE_HI_1 0xe6
+#define A_TP_MIB_TLS_IN_BYTE_LO_1 0xe7
+#define A_TP_MIB_TLS_IN_BYTE_HI_2 0xe8
+#define A_TP_MIB_TLS_IN_BYTE_LO_2 0xe9
+#define A_TP_MIB_TLS_IN_BYTE_HI_3 0xea
+#define A_TP_MIB_TLS_IN_BYTE_LO_3 0xeb
+#define A_TP_MIB_TLS_OUT_PKT_0 0xf0
+#define A_TP_MIB_TLS_OUT_PKT_1 0xf1
+#define A_TP_MIB_TLS_OUT_PKT_2 0xf2
+#define A_TP_MIB_TLS_OUT_PKT_3 0xf3
+#define A_TP_MIB_TLS_OUT_BYTE_HI_0 0xf4
+#define A_TP_MIB_TLS_OUT_BYTE_LO_0 0xf5
+#define A_TP_MIB_TLS_OUT_BYTE_HI_1 0xf6
+#define A_TP_MIB_TLS_OUT_BYTE_LO_1 0xf7
+#define A_TP_MIB_TLS_OUT_BYTE_HI_2 0xf8
+#define A_TP_MIB_TLS_OUT_BYTE_LO_2 0xf9
+#define A_TP_MIB_TLS_OUT_BYTE_HI_3 0xfa
+#define A_TP_MIB_TLS_OUT_BYTE_LO_3 0xfb
+#define A_TP_MIB_ROCE_IN_PKT_0 0x100
+#define A_TP_MIB_ROCE_IN_PKT_1 0x101
+#define A_TP_MIB_ROCE_IN_PKT_2 0x102
+#define A_TP_MIB_ROCE_IN_PKT_3 0x103
+#define A_TP_MIB_ROCE_IN_BYTE_HI_0 0x104
+#define A_TP_MIB_ROCE_IN_BYTE_LO_0 0x105
+#define A_TP_MIB_ROCE_IN_BYTE_HI_1 0x106
+#define A_TP_MIB_ROCE_IN_BYTE_LO_1 0x107
+#define A_TP_MIB_ROCE_IN_BYTE_HI_2 0x108
+#define A_TP_MIB_ROCE_IN_BYTE_LO_2 0x109
+#define A_TP_MIB_ROCE_IN_BYTE_HI_3 0x10a
+#define A_TP_MIB_ROCE_IN_BYTE_LO_3 0x10b
+#define A_TP_MIB_ROCE_OUT_PKT_0 0x110
+#define A_TP_MIB_ROCE_OUT_PKT_1 0x111
+#define A_TP_MIB_ROCE_OUT_PKT_2 0x112
+#define A_TP_MIB_ROCE_OUT_PKT_3 0x113
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_0 0x114
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_0 0x115
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_1 0x116
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_1 0x117
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_2 0x118
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_2 0x119
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_3 0x11a
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_3 0x11b
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_0 0x120
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_1 0x121
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_2 0x122
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_3 0x123
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_0 0x124
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_0 0x125
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_1 0x126
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_1 0x127
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_2 0x128
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_2 0x129
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_3 0x12a
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_3 0x12b
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_0 0x130
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_1 0x131
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_2 0x132
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_3 0x133
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_0 0x134
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_0 0x135
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_1 0x136
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_1 0x137
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_2 0x138
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_2 0x139
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_3 0x13a
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_3 0x13b
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_0 0x140
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_1 0x141
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_2 0x142
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_3 0x143
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_0 0x144
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_0 0x145
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_1 0x146
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_1 0x147
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_2 0x148
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_2 0x149
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_3 0x14a
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_3 0x14b
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_0 0x150
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_1 0x151
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_2 0x152
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_3 0x153
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_0 0x154
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_0 0x155
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_1 0x156
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_1 0x157
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_2 0x158
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_2 0x159
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_3 0x15a
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_3 0x15b
/* registers for module ULP_TX */
#define ULP_TX_BASE_ADDR 0x8dc0
@@ -28853,7 +36006,58 @@
#define V_ATOMIC_FIX_DIS(x) ((x) << S_ATOMIC_FIX_DIS)
#define F_ATOMIC_FIX_DIS V_ATOMIC_FIX_DIS(1U)
+#define S_LB_LEN_SEL 28
+#define V_LB_LEN_SEL(x) ((x) << S_LB_LEN_SEL)
+#define F_LB_LEN_SEL V_LB_LEN_SEL(1U)
+
+#define S_DISABLE_TPT_CREDIT_CHK 27
+#define V_DISABLE_TPT_CREDIT_CHK(x) ((x) << S_DISABLE_TPT_CREDIT_CHK)
+#define F_DISABLE_TPT_CREDIT_CHK V_DISABLE_TPT_CREDIT_CHK(1U)
+
+#define S_REQSRC 26
+#define V_REQSRC(x) ((x) << S_REQSRC)
+#define F_REQSRC V_REQSRC(1U)
+
+#define S_ERR2UP 25
+#define V_ERR2UP(x) ((x) << S_ERR2UP)
+#define F_ERR2UP V_ERR2UP(1U)
+
+#define S_SGE_INVALIDATE_DIS 24
+#define V_SGE_INVALIDATE_DIS(x) ((x) << S_SGE_INVALIDATE_DIS)
+#define F_SGE_INVALIDATE_DIS V_SGE_INVALIDATE_DIS(1U)
+
+#define S_ROCE_ACKREQ_CTRL 23
+#define V_ROCE_ACKREQ_CTRL(x) ((x) << S_ROCE_ACKREQ_CTRL)
+#define F_ROCE_ACKREQ_CTRL V_ROCE_ACKREQ_CTRL(1U)
+
+#define S_MEM_ADDR_CTRL 21
+#define M_MEM_ADDR_CTRL 0x3U
+#define V_MEM_ADDR_CTRL(x) ((x) << S_MEM_ADDR_CTRL)
+#define G_MEM_ADDR_CTRL(x) (((x) >> S_MEM_ADDR_CTRL) & M_MEM_ADDR_CTRL)
+
+#define S_TPT_EXTENSION_MODE 20
+#define V_TPT_EXTENSION_MODE(x) ((x) << S_TPT_EXTENSION_MODE)
+#define F_TPT_EXTENSION_MODE V_TPT_EXTENSION_MODE(1U)
+
+#define S_XRC_INDICATION 19
+#define V_XRC_INDICATION(x) ((x) << S_XRC_INDICATION)
+#define F_XRC_INDICATION V_XRC_INDICATION(1U)
+
+#define S_LSO_1SEG_LEN_UPD_EN 18
+#define V_LSO_1SEG_LEN_UPD_EN(x) ((x) << S_LSO_1SEG_LEN_UPD_EN)
+#define F_LSO_1SEG_LEN_UPD_EN V_LSO_1SEG_LEN_UPD_EN(1U)
+
+#define S_PKT_ISGL_ERR_ST_EN 17
+#define V_PKT_ISGL_ERR_ST_EN(x) ((x) << S_PKT_ISGL_ERR_ST_EN)
+#define F_PKT_ISGL_ERR_ST_EN V_PKT_ISGL_ERR_ST_EN(1U)
+
#define A_ULP_TX_PERR_INJECT 0x8dc4
+
+#define S_T7_1_MEMSEL 1
+#define M_T7_1_MEMSEL 0x7fU
+#define V_T7_1_MEMSEL(x) ((x) << S_T7_1_MEMSEL)
+#define G_T7_1_MEMSEL(x) (((x) >> S_T7_1_MEMSEL) & M_T7_1_MEMSEL)
+
#define A_ULP_TX_INT_ENABLE 0x8dc8
#define S_PBL_BOUND_ERR_CH3 31
@@ -28984,8 +36188,28 @@
#define V_IMM_DATA_PERR_SET_CH0(x) ((x) << S_IMM_DATA_PERR_SET_CH0)
#define F_IMM_DATA_PERR_SET_CH0 V_IMM_DATA_PERR_SET_CH0(1U)
+#define A_ULP_TX_INT_ENABLE_1 0x8dc8
+
+#define S_TLS_DSGL_PARERR3 3
+#define V_TLS_DSGL_PARERR3(x) ((x) << S_TLS_DSGL_PARERR3)
+#define F_TLS_DSGL_PARERR3 V_TLS_DSGL_PARERR3(1U)
+
+#define S_TLS_DSGL_PARERR2 2
+#define V_TLS_DSGL_PARERR2(x) ((x) << S_TLS_DSGL_PARERR2)
+#define F_TLS_DSGL_PARERR2 V_TLS_DSGL_PARERR2(1U)
+
+#define S_TLS_DSGL_PARERR1 1
+#define V_TLS_DSGL_PARERR1(x) ((x) << S_TLS_DSGL_PARERR1)
+#define F_TLS_DSGL_PARERR1 V_TLS_DSGL_PARERR1(1U)
+
+#define S_TLS_DSGL_PARERR0 0
+#define V_TLS_DSGL_PARERR0(x) ((x) << S_TLS_DSGL_PARERR0)
+#define F_TLS_DSGL_PARERR0 V_TLS_DSGL_PARERR0(1U)
+
#define A_ULP_TX_INT_CAUSE 0x8dcc
+#define A_ULP_TX_INT_CAUSE_1 0x8dcc
#define A_ULP_TX_PERR_ENABLE 0x8dd0
+#define A_ULP_TX_PERR_ENABLE_1 0x8dd0
#define A_ULP_TX_TPT_LLIMIT 0x8dd4
#define A_ULP_TX_TPT_ULIMIT 0x8dd8
#define A_ULP_TX_PBL_LLIMIT 0x8ddc
@@ -29014,6 +36238,13 @@
#define F_TLSDISABLE V_TLSDISABLE(1U)
#define A_ULP_TX_CPL_ERR_MASK_L 0x8de8
+#define A_ULP_TX_FID_1 0x8de8
+
+#define S_FID_1 0
+#define M_FID_1 0x7ffU
+#define V_FID_1(x) ((x) << S_FID_1)
+#define G_FID_1(x) (((x) >> S_FID_1) & M_FID_1)
+
#define A_ULP_TX_CPL_ERR_MASK_H 0x8dec
#define A_ULP_TX_CPL_ERR_VALUE_L 0x8df0
#define A_ULP_TX_CPL_ERR_VALUE_H 0x8df4
@@ -29166,6 +36397,15 @@
#define V_WRREQ_SZ(x) ((x) << S_WRREQ_SZ)
#define G_WRREQ_SZ(x) (((x) >> S_WRREQ_SZ) & M_WRREQ_SZ)
+#define S_T7_GLOBALENABLE 31
+#define V_T7_GLOBALENABLE(x) ((x) << S_T7_GLOBALENABLE)
+#define F_T7_GLOBALENABLE V_T7_GLOBALENABLE(1U)
+
+#define S_RDREQ_SZ 3
+#define M_RDREQ_SZ 0x7U
+#define V_RDREQ_SZ(x) ((x) << S_RDREQ_SZ)
+#define G_RDREQ_SZ(x) (((x) >> S_RDREQ_SZ) & M_RDREQ_SZ)
+
#define A_ULP_TX_ULP2TP_BIST_ERROR_CNT 0x8e34
#define A_ULP_TX_PERR_INJECT_2 0x8e34
@@ -29385,6 +36625,200 @@
#define A_ULP_TX_INT_CAUSE_2 0x8e80
#define A_ULP_TX_PERR_ENABLE_2 0x8e84
+#define A_ULP_TX_INT_ENABLE_3 0x8e88
+
+#define S_GF_SGE_FIFO_PARERR3 31
+#define V_GF_SGE_FIFO_PARERR3(x) ((x) << S_GF_SGE_FIFO_PARERR3)
+#define F_GF_SGE_FIFO_PARERR3 V_GF_SGE_FIFO_PARERR3(1U)
+
+#define S_GF_SGE_FIFO_PARERR2 30
+#define V_GF_SGE_FIFO_PARERR2(x) ((x) << S_GF_SGE_FIFO_PARERR2)
+#define F_GF_SGE_FIFO_PARERR2 V_GF_SGE_FIFO_PARERR2(1U)
+
+#define S_GF_SGE_FIFO_PARERR1 29
+#define V_GF_SGE_FIFO_PARERR1(x) ((x) << S_GF_SGE_FIFO_PARERR1)
+#define F_GF_SGE_FIFO_PARERR1 V_GF_SGE_FIFO_PARERR1(1U)
+
+#define S_GF_SGE_FIFO_PARERR0 28
+#define V_GF_SGE_FIFO_PARERR0(x) ((x) << S_GF_SGE_FIFO_PARERR0)
+#define F_GF_SGE_FIFO_PARERR0 V_GF_SGE_FIFO_PARERR0(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR3 27
+#define V_DEDUPE_SGE_FIFO_PARERR3(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR3)
+#define F_DEDUPE_SGE_FIFO_PARERR3 V_DEDUPE_SGE_FIFO_PARERR3(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR2 26
+#define V_DEDUPE_SGE_FIFO_PARERR2(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR2)
+#define F_DEDUPE_SGE_FIFO_PARERR2 V_DEDUPE_SGE_FIFO_PARERR2(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR1 25
+#define V_DEDUPE_SGE_FIFO_PARERR1(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR1)
+#define F_DEDUPE_SGE_FIFO_PARERR1 V_DEDUPE_SGE_FIFO_PARERR1(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR0 24
+#define V_DEDUPE_SGE_FIFO_PARERR0(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR0)
+#define F_DEDUPE_SGE_FIFO_PARERR0 V_DEDUPE_SGE_FIFO_PARERR0(1U)
+
+#define S_GF3_DSGL_FIFO_PARERR 23
+#define V_GF3_DSGL_FIFO_PARERR(x) ((x) << S_GF3_DSGL_FIFO_PARERR)
+#define F_GF3_DSGL_FIFO_PARERR V_GF3_DSGL_FIFO_PARERR(1U)
+
+#define S_GF2_DSGL_FIFO_PARERR 22
+#define V_GF2_DSGL_FIFO_PARERR(x) ((x) << S_GF2_DSGL_FIFO_PARERR)
+#define F_GF2_DSGL_FIFO_PARERR V_GF2_DSGL_FIFO_PARERR(1U)
+
+#define S_GF1_DSGL_FIFO_PARERR 21
+#define V_GF1_DSGL_FIFO_PARERR(x) ((x) << S_GF1_DSGL_FIFO_PARERR)
+#define F_GF1_DSGL_FIFO_PARERR V_GF1_DSGL_FIFO_PARERR(1U)
+
+#define S_GF0_DSGL_FIFO_PARERR 20
+#define V_GF0_DSGL_FIFO_PARERR(x) ((x) << S_GF0_DSGL_FIFO_PARERR)
+#define F_GF0_DSGL_FIFO_PARERR V_GF0_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE3_DSGL_FIFO_PARERR 19
+#define V_DEDUPE3_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE3_DSGL_FIFO_PARERR)
+#define F_DEDUPE3_DSGL_FIFO_PARERR V_DEDUPE3_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE2_DSGL_FIFO_PARERR 18
+#define V_DEDUPE2_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE2_DSGL_FIFO_PARERR)
+#define F_DEDUPE2_DSGL_FIFO_PARERR V_DEDUPE2_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE1_DSGL_FIFO_PARERR 17
+#define V_DEDUPE1_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE1_DSGL_FIFO_PARERR)
+#define F_DEDUPE1_DSGL_FIFO_PARERR V_DEDUPE1_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE0_DSGL_FIFO_PARERR 16
+#define V_DEDUPE0_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE0_DSGL_FIFO_PARERR)
+#define F_DEDUPE0_DSGL_FIFO_PARERR V_DEDUPE0_DSGL_FIFO_PARERR(1U)
+
+#define S_XP10_SGE_FIFO_PARERR 15
+#define V_XP10_SGE_FIFO_PARERR(x) ((x) << S_XP10_SGE_FIFO_PARERR)
+#define F_XP10_SGE_FIFO_PARERR V_XP10_SGE_FIFO_PARERR(1U)
+
+#define S_DSGL_PAR_ERR 14
+#define V_DSGL_PAR_ERR(x) ((x) << S_DSGL_PAR_ERR)
+#define F_DSGL_PAR_ERR V_DSGL_PAR_ERR(1U)
+
+#define S_CDDIP_INT 13
+#define V_CDDIP_INT(x) ((x) << S_CDDIP_INT)
+#define F_CDDIP_INT V_CDDIP_INT(1U)
+
+#define S_CCEIP_INT 12
+#define V_CCEIP_INT(x) ((x) << S_CCEIP_INT)
+#define F_CCEIP_INT V_CCEIP_INT(1U)
+
+#define S_TLS_SGE_FIFO_PARERR3 11
+#define V_TLS_SGE_FIFO_PARERR3(x) ((x) << S_TLS_SGE_FIFO_PARERR3)
+#define F_TLS_SGE_FIFO_PARERR3 V_TLS_SGE_FIFO_PARERR3(1U)
+
+#define S_TLS_SGE_FIFO_PARERR2 10
+#define V_TLS_SGE_FIFO_PARERR2(x) ((x) << S_TLS_SGE_FIFO_PARERR2)
+#define F_TLS_SGE_FIFO_PARERR2 V_TLS_SGE_FIFO_PARERR2(1U)
+
+#define S_TLS_SGE_FIFO_PARERR1 9
+#define V_TLS_SGE_FIFO_PARERR1(x) ((x) << S_TLS_SGE_FIFO_PARERR1)
+#define F_TLS_SGE_FIFO_PARERR1 V_TLS_SGE_FIFO_PARERR1(1U)
+
+#define S_TLS_SGE_FIFO_PARERR0 8
+#define V_TLS_SGE_FIFO_PARERR0(x) ((x) << S_TLS_SGE_FIFO_PARERR0)
+#define F_TLS_SGE_FIFO_PARERR0 V_TLS_SGE_FIFO_PARERR0(1U)
+
+#define S_ULP2SMARBT_RSP_PERR 6
+#define V_ULP2SMARBT_RSP_PERR(x) ((x) << S_ULP2SMARBT_RSP_PERR)
+#define F_ULP2SMARBT_RSP_PERR V_ULP2SMARBT_RSP_PERR(1U)
+
+#define S_ULPTX2MA_RSP_PERR 5
+#define V_ULPTX2MA_RSP_PERR(x) ((x) << S_ULPTX2MA_RSP_PERR)
+#define F_ULPTX2MA_RSP_PERR V_ULPTX2MA_RSP_PERR(1U)
+
+#define S_PCIE2ULP_PERR3 4
+#define V_PCIE2ULP_PERR3(x) ((x) << S_PCIE2ULP_PERR3)
+#define F_PCIE2ULP_PERR3 V_PCIE2ULP_PERR3(1U)
+
+#define S_PCIE2ULP_PERR2 3
+#define V_PCIE2ULP_PERR2(x) ((x) << S_PCIE2ULP_PERR2)
+#define F_PCIE2ULP_PERR2 V_PCIE2ULP_PERR2(1U)
+
+#define S_PCIE2ULP_PERR1 2
+#define V_PCIE2ULP_PERR1(x) ((x) << S_PCIE2ULP_PERR1)
+#define F_PCIE2ULP_PERR1 V_PCIE2ULP_PERR1(1U)
+
+#define S_PCIE2ULP_PERR0 1
+#define V_PCIE2ULP_PERR0(x) ((x) << S_PCIE2ULP_PERR0)
+#define F_PCIE2ULP_PERR0 V_PCIE2ULP_PERR0(1U)
+
+#define S_CIM2ULP_PERR 0
+#define V_CIM2ULP_PERR(x) ((x) << S_CIM2ULP_PERR)
+#define F_CIM2ULP_PERR V_CIM2ULP_PERR(1U)
+
+#define A_ULP_TX_INT_CAUSE_3 0x8e8c
+#define A_ULP_TX_PERR_ENABLE_3 0x8e90
+#define A_ULP_TX_INT_ENABLE_4 0x8e94
+
+#define S_DMA_PAR_ERR3 28
+#define M_DMA_PAR_ERR3 0xfU
+#define V_DMA_PAR_ERR3(x) ((x) << S_DMA_PAR_ERR3)
+#define G_DMA_PAR_ERR3(x) (((x) >> S_DMA_PAR_ERR3) & M_DMA_PAR_ERR3)
+
+#define S_DMA_PAR_ERR2 24
+#define M_DMA_PAR_ERR2 0xfU
+#define V_DMA_PAR_ERR2(x) ((x) << S_DMA_PAR_ERR2)
+#define G_DMA_PAR_ERR2(x) (((x) >> S_DMA_PAR_ERR2) & M_DMA_PAR_ERR2)
+
+#define S_DMA_PAR_ERR1 20
+#define M_DMA_PAR_ERR1 0xfU
+#define V_DMA_PAR_ERR1(x) ((x) << S_DMA_PAR_ERR1)
+#define G_DMA_PAR_ERR1(x) (((x) >> S_DMA_PAR_ERR1) & M_DMA_PAR_ERR1)
+
+#define S_DMA_PAR_ERR0 16
+#define M_DMA_PAR_ERR0 0xfU
+#define V_DMA_PAR_ERR0(x) ((x) << S_DMA_PAR_ERR0)
+#define G_DMA_PAR_ERR0(x) (((x) >> S_DMA_PAR_ERR0) & M_DMA_PAR_ERR0)
+
+#define S_CORE_CMD_FIFO_LB1 12
+#define M_CORE_CMD_FIFO_LB1 0xfU
+#define V_CORE_CMD_FIFO_LB1(x) ((x) << S_CORE_CMD_FIFO_LB1)
+#define G_CORE_CMD_FIFO_LB1(x) (((x) >> S_CORE_CMD_FIFO_LB1) & M_CORE_CMD_FIFO_LB1)
+
+#define S_CORE_CMD_FIFO_LB0 8
+#define M_CORE_CMD_FIFO_LB0 0xfU
+#define V_CORE_CMD_FIFO_LB0(x) ((x) << S_CORE_CMD_FIFO_LB0)
+#define G_CORE_CMD_FIFO_LB0(x) (((x) >> S_CORE_CMD_FIFO_LB0) & M_CORE_CMD_FIFO_LB0)
+
+#define S_XP10_2_ULP_PERR 7
+#define V_XP10_2_ULP_PERR(x) ((x) << S_XP10_2_ULP_PERR)
+#define F_XP10_2_ULP_PERR V_XP10_2_ULP_PERR(1U)
+
+#define S_ULP_2_XP10_PERR 6
+#define V_ULP_2_XP10_PERR(x) ((x) << S_ULP_2_XP10_PERR)
+#define F_ULP_2_XP10_PERR V_ULP_2_XP10_PERR(1U)
+
+#define S_CMD_FIFO_LB1 5
+#define V_CMD_FIFO_LB1(x) ((x) << S_CMD_FIFO_LB1)
+#define F_CMD_FIFO_LB1 V_CMD_FIFO_LB1(1U)
+
+#define S_CMD_FIFO_LB0 4
+#define V_CMD_FIFO_LB0(x) ((x) << S_CMD_FIFO_LB0)
+#define F_CMD_FIFO_LB0 V_CMD_FIFO_LB0(1U)
+
+#define S_TF_TP_PERR 3
+#define V_TF_TP_PERR(x) ((x) << S_TF_TP_PERR)
+#define F_TF_TP_PERR V_TF_TP_PERR(1U)
+
+#define S_TF_SGE_PERR 2
+#define V_TF_SGE_PERR(x) ((x) << S_TF_SGE_PERR)
+#define F_TF_SGE_PERR V_TF_SGE_PERR(1U)
+
+#define S_TF_MEM_PERR 1
+#define V_TF_MEM_PERR(x) ((x) << S_TF_MEM_PERR)
+#define F_TF_MEM_PERR V_TF_MEM_PERR(1U)
+
+#define S_TF_MP_PERR 0
+#define V_TF_MP_PERR(x) ((x) << S_TF_MP_PERR)
+#define F_TF_MP_PERR V_TF_MP_PERR(1U)
+
+#define A_ULP_TX_INT_CAUSE_4 0x8e98
+#define A_ULP_TX_PERR_ENABLE_4 0x8e9c
#define A_ULP_TX_SE_CNT_ERR 0x8ea0
#define S_ERR_CH3 12
@@ -29531,16 +36965,381 @@
#define A_ULP_TX_CSU_REVISION 0x8ebc
#define A_ULP_TX_LA_RDPTR_0 0x8ec0
+#define A_ULP_TX_PL2APB_INFO 0x8ec0
+
+#define S_PL2APB_BRIDGE_HUNG 27
+#define V_PL2APB_BRIDGE_HUNG(x) ((x) << S_PL2APB_BRIDGE_HUNG)
+#define F_PL2APB_BRIDGE_HUNG V_PL2APB_BRIDGE_HUNG(1U)
+
+#define S_PL2APB_BRIDGE_STATE 26
+#define V_PL2APB_BRIDGE_STATE(x) ((x) << S_PL2APB_BRIDGE_STATE)
+#define F_PL2APB_BRIDGE_STATE V_PL2APB_BRIDGE_STATE(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_TYPE 25
+#define V_PL2APB_BRIDGE_HUNG_TYPE(x) ((x) << S_PL2APB_BRIDGE_HUNG_TYPE)
+#define F_PL2APB_BRIDGE_HUNG_TYPE V_PL2APB_BRIDGE_HUNG_TYPE(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_ID 24
+#define V_PL2APB_BRIDGE_HUNG_ID(x) ((x) << S_PL2APB_BRIDGE_HUNG_ID)
+#define F_PL2APB_BRIDGE_HUNG_ID V_PL2APB_BRIDGE_HUNG_ID(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_ADDR 0
+#define M_PL2APB_BRIDGE_HUNG_ADDR 0xfffffU
+#define V_PL2APB_BRIDGE_HUNG_ADDR(x) ((x) << S_PL2APB_BRIDGE_HUNG_ADDR)
+#define G_PL2APB_BRIDGE_HUNG_ADDR(x) (((x) >> S_PL2APB_BRIDGE_HUNG_ADDR) & M_PL2APB_BRIDGE_HUNG_ADDR)
+
#define A_ULP_TX_LA_RDDATA_0 0x8ec4
+#define A_ULP_TX_INT_ENABLE_5 0x8ec4
+
+#define S_DEDUPE_PERR3 23
+#define V_DEDUPE_PERR3(x) ((x) << S_DEDUPE_PERR3)
+#define F_DEDUPE_PERR3 V_DEDUPE_PERR3(1U)
+
+#define S_DEDUPE_PERR2 22
+#define V_DEDUPE_PERR2(x) ((x) << S_DEDUPE_PERR2)
+#define F_DEDUPE_PERR2 V_DEDUPE_PERR2(1U)
+
+#define S_DEDUPE_PERR1 21
+#define V_DEDUPE_PERR1(x) ((x) << S_DEDUPE_PERR1)
+#define F_DEDUPE_PERR1 V_DEDUPE_PERR1(1U)
+
+#define S_DEDUPE_PERR0 20
+#define V_DEDUPE_PERR0(x) ((x) << S_DEDUPE_PERR0)
+#define F_DEDUPE_PERR0 V_DEDUPE_PERR0(1U)
+
+#define S_GF_PERR3 19
+#define V_GF_PERR3(x) ((x) << S_GF_PERR3)
+#define F_GF_PERR3 V_GF_PERR3(1U)
+
+#define S_GF_PERR2 18
+#define V_GF_PERR2(x) ((x) << S_GF_PERR2)
+#define F_GF_PERR2 V_GF_PERR2(1U)
+
+#define S_GF_PERR1 17
+#define V_GF_PERR1(x) ((x) << S_GF_PERR1)
+#define F_GF_PERR1 V_GF_PERR1(1U)
+
+#define S_GF_PERR0 16
+#define V_GF_PERR0(x) ((x) << S_GF_PERR0)
+#define F_GF_PERR0 V_GF_PERR0(1U)
+
+#define S_SGE2ULP_INV_PERR 13
+#define V_SGE2ULP_INV_PERR(x) ((x) << S_SGE2ULP_INV_PERR)
+#define F_SGE2ULP_INV_PERR V_SGE2ULP_INV_PERR(1U)
+
+#define S_T7_PL_BUSPERR 12
+#define V_T7_PL_BUSPERR(x) ((x) << S_T7_PL_BUSPERR)
+#define F_T7_PL_BUSPERR V_T7_PL_BUSPERR(1U)
+
+#define S_TLSTX2ULPTX_PERR3 11
+#define V_TLSTX2ULPTX_PERR3(x) ((x) << S_TLSTX2ULPTX_PERR3)
+#define F_TLSTX2ULPTX_PERR3 V_TLSTX2ULPTX_PERR3(1U)
+
+#define S_TLSTX2ULPTX_PERR2 10
+#define V_TLSTX2ULPTX_PERR2(x) ((x) << S_TLSTX2ULPTX_PERR2)
+#define F_TLSTX2ULPTX_PERR2 V_TLSTX2ULPTX_PERR2(1U)
+
+#define S_TLSTX2ULPTX_PERR1 9
+#define V_TLSTX2ULPTX_PERR1(x) ((x) << S_TLSTX2ULPTX_PERR1)
+#define F_TLSTX2ULPTX_PERR1 V_TLSTX2ULPTX_PERR1(1U)
+
+#define S_TLSTX2ULPTX_PERR0 8
+#define V_TLSTX2ULPTX_PERR0(x) ((x) << S_TLSTX2ULPTX_PERR0)
+#define F_TLSTX2ULPTX_PERR0 V_TLSTX2ULPTX_PERR0(1U)
+
+#define S_XP10_2_ULP_PL_PERR 1
+#define V_XP10_2_ULP_PL_PERR(x) ((x) << S_XP10_2_ULP_PL_PERR)
+#define F_XP10_2_ULP_PL_PERR V_XP10_2_ULP_PL_PERR(1U)
+
+#define S_ULP_2_XP10_PL_PERR 0
+#define V_ULP_2_XP10_PL_PERR(x) ((x) << S_ULP_2_XP10_PL_PERR)
+#define F_ULP_2_XP10_PL_PERR V_ULP_2_XP10_PL_PERR(1U)
+
#define A_ULP_TX_LA_WRPTR_0 0x8ec8
+#define A_ULP_TX_INT_CAUSE_5 0x8ec8
#define A_ULP_TX_LA_RESERVED_0 0x8ecc
+#define A_ULP_TX_PERR_ENABLE_5 0x8ecc
#define A_ULP_TX_LA_RDPTR_1 0x8ed0
+#define A_ULP_TX_INT_CAUSE_6 0x8ed0
+
+#define S_DDR_HDR_FIFO_PERR_SET3 12
+#define V_DDR_HDR_FIFO_PERR_SET3(x) ((x) << S_DDR_HDR_FIFO_PERR_SET3)
+#define F_DDR_HDR_FIFO_PERR_SET3 V_DDR_HDR_FIFO_PERR_SET3(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET2 11
+#define V_DDR_HDR_FIFO_PERR_SET2(x) ((x) << S_DDR_HDR_FIFO_PERR_SET2)
+#define F_DDR_HDR_FIFO_PERR_SET2 V_DDR_HDR_FIFO_PERR_SET2(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET1 10
+#define V_DDR_HDR_FIFO_PERR_SET1(x) ((x) << S_DDR_HDR_FIFO_PERR_SET1)
+#define F_DDR_HDR_FIFO_PERR_SET1 V_DDR_HDR_FIFO_PERR_SET1(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET0 9
+#define V_DDR_HDR_FIFO_PERR_SET0(x) ((x) << S_DDR_HDR_FIFO_PERR_SET0)
+#define F_DDR_HDR_FIFO_PERR_SET0 V_DDR_HDR_FIFO_PERR_SET0(1U)
+
+#define S_PRE_MP_RSP_PERR_SET3 8
+#define V_PRE_MP_RSP_PERR_SET3(x) ((x) << S_PRE_MP_RSP_PERR_SET3)
+#define F_PRE_MP_RSP_PERR_SET3 V_PRE_MP_RSP_PERR_SET3(1U)
+
+#define S_PRE_MP_RSP_PERR_SET2 7
+#define V_PRE_MP_RSP_PERR_SET2(x) ((x) << S_PRE_MP_RSP_PERR_SET2)
+#define F_PRE_MP_RSP_PERR_SET2 V_PRE_MP_RSP_PERR_SET2(1U)
+
+#define S_PRE_MP_RSP_PERR_SET1 6
+#define V_PRE_MP_RSP_PERR_SET1(x) ((x) << S_PRE_MP_RSP_PERR_SET1)
+#define F_PRE_MP_RSP_PERR_SET1 V_PRE_MP_RSP_PERR_SET1(1U)
+
+#define S_PRE_MP_RSP_PERR_SET0 5
+#define V_PRE_MP_RSP_PERR_SET0(x) ((x) << S_PRE_MP_RSP_PERR_SET0)
+#define F_PRE_MP_RSP_PERR_SET0 V_PRE_MP_RSP_PERR_SET0(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET3 4
+#define V_PRE_CQE_FIFO_PERR_SET3(x) ((x) << S_PRE_CQE_FIFO_PERR_SET3)
+#define F_PRE_CQE_FIFO_PERR_SET3 V_PRE_CQE_FIFO_PERR_SET3(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET2 3
+#define V_PRE_CQE_FIFO_PERR_SET2(x) ((x) << S_PRE_CQE_FIFO_PERR_SET2)
+#define F_PRE_CQE_FIFO_PERR_SET2 V_PRE_CQE_FIFO_PERR_SET2(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET1 2
+#define V_PRE_CQE_FIFO_PERR_SET1(x) ((x) << S_PRE_CQE_FIFO_PERR_SET1)
+#define F_PRE_CQE_FIFO_PERR_SET1 V_PRE_CQE_FIFO_PERR_SET1(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET0 1
+#define V_PRE_CQE_FIFO_PERR_SET0(x) ((x) << S_PRE_CQE_FIFO_PERR_SET0)
+#define F_PRE_CQE_FIFO_PERR_SET0 V_PRE_CQE_FIFO_PERR_SET0(1U)
+
+#define S_RSP_FIFO_PERR_SET 0
+#define V_RSP_FIFO_PERR_SET(x) ((x) << S_RSP_FIFO_PERR_SET)
+#define F_RSP_FIFO_PERR_SET V_RSP_FIFO_PERR_SET(1U)
+
#define A_ULP_TX_LA_RDDATA_1 0x8ed4
+#define A_ULP_TX_INT_ENABLE_6 0x8ed4
#define A_ULP_TX_LA_WRPTR_1 0x8ed8
+#define A_ULP_TX_PERR_ENABLE_6 0x8ed8
#define A_ULP_TX_LA_RESERVED_1 0x8edc
+#define A_ULP_TX_INT_CAUSE_7 0x8edc
+
+#define S_TLS_SGE_FIFO_CORERR3 23
+#define V_TLS_SGE_FIFO_CORERR3(x) ((x) << S_TLS_SGE_FIFO_CORERR3)
+#define F_TLS_SGE_FIFO_CORERR3 V_TLS_SGE_FIFO_CORERR3(1U)
+
+#define S_TLS_SGE_FIFO_CORERR2 22
+#define V_TLS_SGE_FIFO_CORERR2(x) ((x) << S_TLS_SGE_FIFO_CORERR2)
+#define F_TLS_SGE_FIFO_CORERR2 V_TLS_SGE_FIFO_CORERR2(1U)
+
+#define S_TLS_SGE_FIFO_CORERR1 21
+#define V_TLS_SGE_FIFO_CORERR1(x) ((x) << S_TLS_SGE_FIFO_CORERR1)
+#define F_TLS_SGE_FIFO_CORERR1 V_TLS_SGE_FIFO_CORERR1(1U)
+
+#define S_TLS_SGE_FIFO_CORERR0 20
+#define V_TLS_SGE_FIFO_CORERR0(x) ((x) << S_TLS_SGE_FIFO_CORERR0)
+#define F_TLS_SGE_FIFO_CORERR0 V_TLS_SGE_FIFO_CORERR0(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET3 19
+#define V_LSO_HDR_SRAM_CERR_SET3(x) ((x) << S_LSO_HDR_SRAM_CERR_SET3)
+#define F_LSO_HDR_SRAM_CERR_SET3 V_LSO_HDR_SRAM_CERR_SET3(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET2 18
+#define V_LSO_HDR_SRAM_CERR_SET2(x) ((x) << S_LSO_HDR_SRAM_CERR_SET2)
+#define F_LSO_HDR_SRAM_CERR_SET2 V_LSO_HDR_SRAM_CERR_SET2(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET1 17
+#define V_LSO_HDR_SRAM_CERR_SET1(x) ((x) << S_LSO_HDR_SRAM_CERR_SET1)
+#define F_LSO_HDR_SRAM_CERR_SET1 V_LSO_HDR_SRAM_CERR_SET1(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET0 16
+#define V_LSO_HDR_SRAM_CERR_SET0(x) ((x) << S_LSO_HDR_SRAM_CERR_SET0)
+#define F_LSO_HDR_SRAM_CERR_SET0 V_LSO_HDR_SRAM_CERR_SET0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH3_LB1 15
+#define V_CORE_CMD_FIFO_CERR_SET_CH3_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH3_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH3_LB1 V_CORE_CMD_FIFO_CERR_SET_CH3_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH2_LB1 14
+#define V_CORE_CMD_FIFO_CERR_SET_CH2_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH2_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH2_LB1 V_CORE_CMD_FIFO_CERR_SET_CH2_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH1_LB1 13
+#define V_CORE_CMD_FIFO_CERR_SET_CH1_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH1_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH1_LB1 V_CORE_CMD_FIFO_CERR_SET_CH1_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH0_LB1 12
+#define V_CORE_CMD_FIFO_CERR_SET_CH0_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH0_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH0_LB1 V_CORE_CMD_FIFO_CERR_SET_CH0_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH3_LB0 11
+#define V_CORE_CMD_FIFO_CERR_SET_CH3_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH3_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH3_LB0 V_CORE_CMD_FIFO_CERR_SET_CH3_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH2_LB0 10
+#define V_CORE_CMD_FIFO_CERR_SET_CH2_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH2_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH2_LB0 V_CORE_CMD_FIFO_CERR_SET_CH2_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH1_LB0 9
+#define V_CORE_CMD_FIFO_CERR_SET_CH1_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH1_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH1_LB0 V_CORE_CMD_FIFO_CERR_SET_CH1_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH0_LB0 8
+#define V_CORE_CMD_FIFO_CERR_SET_CH0_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH0_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH0_LB0 V_CORE_CMD_FIFO_CERR_SET_CH0_LB0(1U)
+
+#define S_CQE_FIFO_CERR_SET3 7
+#define V_CQE_FIFO_CERR_SET3(x) ((x) << S_CQE_FIFO_CERR_SET3)
+#define F_CQE_FIFO_CERR_SET3 V_CQE_FIFO_CERR_SET3(1U)
+
+#define S_CQE_FIFO_CERR_SET2 6
+#define V_CQE_FIFO_CERR_SET2(x) ((x) << S_CQE_FIFO_CERR_SET2)
+#define F_CQE_FIFO_CERR_SET2 V_CQE_FIFO_CERR_SET2(1U)
+
+#define S_CQE_FIFO_CERR_SET1 5
+#define V_CQE_FIFO_CERR_SET1(x) ((x) << S_CQE_FIFO_CERR_SET1)
+#define F_CQE_FIFO_CERR_SET1 V_CQE_FIFO_CERR_SET1(1U)
+
+#define S_CQE_FIFO_CERR_SET0 4
+#define V_CQE_FIFO_CERR_SET0(x) ((x) << S_CQE_FIFO_CERR_SET0)
+#define F_CQE_FIFO_CERR_SET0 V_CQE_FIFO_CERR_SET0(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET3 3
+#define V_PRE_CQE_FIFO_CERR_SET3(x) ((x) << S_PRE_CQE_FIFO_CERR_SET3)
+#define F_PRE_CQE_FIFO_CERR_SET3 V_PRE_CQE_FIFO_CERR_SET3(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET2 2
+#define V_PRE_CQE_FIFO_CERR_SET2(x) ((x) << S_PRE_CQE_FIFO_CERR_SET2)
+#define F_PRE_CQE_FIFO_CERR_SET2 V_PRE_CQE_FIFO_CERR_SET2(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET1 1
+#define V_PRE_CQE_FIFO_CERR_SET1(x) ((x) << S_PRE_CQE_FIFO_CERR_SET1)
+#define F_PRE_CQE_FIFO_CERR_SET1 V_PRE_CQE_FIFO_CERR_SET1(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET0 0
+#define V_PRE_CQE_FIFO_CERR_SET0(x) ((x) << S_PRE_CQE_FIFO_CERR_SET0)
+#define F_PRE_CQE_FIFO_CERR_SET0 V_PRE_CQE_FIFO_CERR_SET0(1U)
+
#define A_ULP_TX_LA_RDPTR_2 0x8ee0
+#define A_ULP_TX_INT_ENABLE_7 0x8ee0
#define A_ULP_TX_LA_RDDATA_2 0x8ee4
+#define A_ULP_TX_INT_CAUSE_8 0x8ee4
+
+#define S_MEM_RSP_FIFO_CERR_SET3 28
+#define V_MEM_RSP_FIFO_CERR_SET3(x) ((x) << S_MEM_RSP_FIFO_CERR_SET3)
+#define F_MEM_RSP_FIFO_CERR_SET3 V_MEM_RSP_FIFO_CERR_SET3(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET2 27
+#define V_MEM_RSP_FIFO_CERR_SET2(x) ((x) << S_MEM_RSP_FIFO_CERR_SET2)
+#define F_MEM_RSP_FIFO_CERR_SET2 V_MEM_RSP_FIFO_CERR_SET2(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET1 26
+#define V_MEM_RSP_FIFO_CERR_SET1(x) ((x) << S_MEM_RSP_FIFO_CERR_SET1)
+#define F_MEM_RSP_FIFO_CERR_SET1 V_MEM_RSP_FIFO_CERR_SET1(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET0 25
+#define V_MEM_RSP_FIFO_CERR_SET0(x) ((x) << S_MEM_RSP_FIFO_CERR_SET0)
+#define F_MEM_RSP_FIFO_CERR_SET0 V_MEM_RSP_FIFO_CERR_SET0(1U)
+
+#define S_PI_SRAM_CERR_SET3 24
+#define V_PI_SRAM_CERR_SET3(x) ((x) << S_PI_SRAM_CERR_SET3)
+#define F_PI_SRAM_CERR_SET3 V_PI_SRAM_CERR_SET3(1U)
+
+#define S_PI_SRAM_CERR_SET2 23
+#define V_PI_SRAM_CERR_SET2(x) ((x) << S_PI_SRAM_CERR_SET2)
+#define F_PI_SRAM_CERR_SET2 V_PI_SRAM_CERR_SET2(1U)
+
+#define S_PI_SRAM_CERR_SET1 22
+#define V_PI_SRAM_CERR_SET1(x) ((x) << S_PI_SRAM_CERR_SET1)
+#define F_PI_SRAM_CERR_SET1 V_PI_SRAM_CERR_SET1(1U)
+
+#define S_PI_SRAM_CERR_SET0 21
+#define V_PI_SRAM_CERR_SET0(x) ((x) << S_PI_SRAM_CERR_SET0)
+#define F_PI_SRAM_CERR_SET0 V_PI_SRAM_CERR_SET0(1U)
+
+#define S_PRE_MP_RSP_CERR_SET3 20
+#define V_PRE_MP_RSP_CERR_SET3(x) ((x) << S_PRE_MP_RSP_CERR_SET3)
+#define F_PRE_MP_RSP_CERR_SET3 V_PRE_MP_RSP_CERR_SET3(1U)
+
+#define S_PRE_MP_RSP_CERR_SET2 19
+#define V_PRE_MP_RSP_CERR_SET2(x) ((x) << S_PRE_MP_RSP_CERR_SET2)
+#define F_PRE_MP_RSP_CERR_SET2 V_PRE_MP_RSP_CERR_SET2(1U)
+
+#define S_PRE_MP_RSP_CERR_SET1 18
+#define V_PRE_MP_RSP_CERR_SET1(x) ((x) << S_PRE_MP_RSP_CERR_SET1)
+#define F_PRE_MP_RSP_CERR_SET1 V_PRE_MP_RSP_CERR_SET1(1U)
+
+#define S_PRE_MP_RSP_CERR_SET0 17
+#define V_PRE_MP_RSP_CERR_SET0(x) ((x) << S_PRE_MP_RSP_CERR_SET0)
+#define F_PRE_MP_RSP_CERR_SET0 V_PRE_MP_RSP_CERR_SET0(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET3 16
+#define V_DDR_HDR_FIFO_CERR_SET3(x) ((x) << S_DDR_HDR_FIFO_CERR_SET3)
+#define F_DDR_HDR_FIFO_CERR_SET3 V_DDR_HDR_FIFO_CERR_SET3(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET2 15
+#define V_DDR_HDR_FIFO_CERR_SET2(x) ((x) << S_DDR_HDR_FIFO_CERR_SET2)
+#define F_DDR_HDR_FIFO_CERR_SET2 V_DDR_HDR_FIFO_CERR_SET2(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET1 14
+#define V_DDR_HDR_FIFO_CERR_SET1(x) ((x) << S_DDR_HDR_FIFO_CERR_SET1)
+#define F_DDR_HDR_FIFO_CERR_SET1 V_DDR_HDR_FIFO_CERR_SET1(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET0 13
+#define V_DDR_HDR_FIFO_CERR_SET0(x) ((x) << S_DDR_HDR_FIFO_CERR_SET0)
+#define F_DDR_HDR_FIFO_CERR_SET0 V_DDR_HDR_FIFO_CERR_SET0(1U)
+
+#define S_CMD_FIFO_CERR_SET3 12
+#define V_CMD_FIFO_CERR_SET3(x) ((x) << S_CMD_FIFO_CERR_SET3)
+#define F_CMD_FIFO_CERR_SET3 V_CMD_FIFO_CERR_SET3(1U)
+
+#define S_CMD_FIFO_CERR_SET2 11
+#define V_CMD_FIFO_CERR_SET2(x) ((x) << S_CMD_FIFO_CERR_SET2)
+#define F_CMD_FIFO_CERR_SET2 V_CMD_FIFO_CERR_SET2(1U)
+
+#define S_CMD_FIFO_CERR_SET1 10
+#define V_CMD_FIFO_CERR_SET1(x) ((x) << S_CMD_FIFO_CERR_SET1)
+#define F_CMD_FIFO_CERR_SET1 V_CMD_FIFO_CERR_SET1(1U)
+
+#define S_CMD_FIFO_CERR_SET0 9
+#define V_CMD_FIFO_CERR_SET0(x) ((x) << S_CMD_FIFO_CERR_SET0)
+#define F_CMD_FIFO_CERR_SET0 V_CMD_FIFO_CERR_SET0(1U)
+
+#define S_GF_SGE_FIFO_CORERR3 8
+#define V_GF_SGE_FIFO_CORERR3(x) ((x) << S_GF_SGE_FIFO_CORERR3)
+#define F_GF_SGE_FIFO_CORERR3 V_GF_SGE_FIFO_CORERR3(1U)
+
+#define S_GF_SGE_FIFO_CORERR2 7
+#define V_GF_SGE_FIFO_CORERR2(x) ((x) << S_GF_SGE_FIFO_CORERR2)
+#define F_GF_SGE_FIFO_CORERR2 V_GF_SGE_FIFO_CORERR2(1U)
+
+#define S_GF_SGE_FIFO_CORERR1 6
+#define V_GF_SGE_FIFO_CORERR1(x) ((x) << S_GF_SGE_FIFO_CORERR1)
+#define F_GF_SGE_FIFO_CORERR1 V_GF_SGE_FIFO_CORERR1(1U)
+
+#define S_GF_SGE_FIFO_CORERR0 5
+#define V_GF_SGE_FIFO_CORERR0(x) ((x) << S_GF_SGE_FIFO_CORERR0)
+#define F_GF_SGE_FIFO_CORERR0 V_GF_SGE_FIFO_CORERR0(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR3 4
+#define V_DEDUPE_SGE_FIFO_CORERR3(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR3)
+#define F_DEDUPE_SGE_FIFO_CORERR3 V_DEDUPE_SGE_FIFO_CORERR3(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR2 3
+#define V_DEDUPE_SGE_FIFO_CORERR2(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR2)
+#define F_DEDUPE_SGE_FIFO_CORERR2 V_DEDUPE_SGE_FIFO_CORERR2(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR1 2
+#define V_DEDUPE_SGE_FIFO_CORERR1(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR1)
+#define F_DEDUPE_SGE_FIFO_CORERR1 V_DEDUPE_SGE_FIFO_CORERR1(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR0 1
+#define V_DEDUPE_SGE_FIFO_CORERR0(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR0)
+#define F_DEDUPE_SGE_FIFO_CORERR0 V_DEDUPE_SGE_FIFO_CORERR0(1U)
+
+#define S_RSP_FIFO_CERR_SET 0
+#define V_RSP_FIFO_CERR_SET(x) ((x) << S_RSP_FIFO_CERR_SET)
+#define F_RSP_FIFO_CERR_SET V_RSP_FIFO_CERR_SET(1U)
+
#define A_ULP_TX_LA_WRPTR_2 0x8ee8
+#define A_ULP_TX_INT_ENABLE_8 0x8ee8
#define A_ULP_TX_LA_RESERVED_2 0x8eec
#define A_ULP_TX_LA_RDPTR_3 0x8ef0
#define A_ULP_TX_LA_RDDATA_3 0x8ef4
@@ -29671,6 +37470,97 @@
#define V_SHOVE_LAST(x) ((x) << S_SHOVE_LAST)
#define F_SHOVE_LAST V_SHOVE_LAST(1U)
+#define A_ULP_TX_ACCELERATOR_CTL 0x8f90
+
+#define S_FIFO_THRESHOLD 8
+#define M_FIFO_THRESHOLD 0x1fU
+#define V_FIFO_THRESHOLD(x) ((x) << S_FIFO_THRESHOLD)
+#define G_FIFO_THRESHOLD(x) (((x) >> S_FIFO_THRESHOLD) & M_FIFO_THRESHOLD)
+
+#define S_COMPRESSION_XP10DISABLECFUSE 5
+#define V_COMPRESSION_XP10DISABLECFUSE(x) ((x) << S_COMPRESSION_XP10DISABLECFUSE)
+#define F_COMPRESSION_XP10DISABLECFUSE V_COMPRESSION_XP10DISABLECFUSE(1U)
+
+#define S_COMPRESSION_XP10DISABLE 4
+#define V_COMPRESSION_XP10DISABLE(x) ((x) << S_COMPRESSION_XP10DISABLE)
+#define F_COMPRESSION_XP10DISABLE V_COMPRESSION_XP10DISABLE(1U)
+
+#define S_DEDUPEDISABLECFUSE 3
+#define V_DEDUPEDISABLECFUSE(x) ((x) << S_DEDUPEDISABLECFUSE)
+#define F_DEDUPEDISABLECFUSE V_DEDUPEDISABLECFUSE(1U)
+
+#define S_DEDUPEDISABLE 2
+#define V_DEDUPEDISABLE(x) ((x) << S_DEDUPEDISABLE)
+#define F_DEDUPEDISABLE V_DEDUPEDISABLE(1U)
+
+#define S_GFDISABLECFUSE 1
+#define V_GFDISABLECFUSE(x) ((x) << S_GFDISABLECFUSE)
+#define F_GFDISABLECFUSE V_GFDISABLECFUSE(1U)
+
+#define S_GFDISABLE 0
+#define V_GFDISABLE(x) ((x) << S_GFDISABLE)
+#define F_GFDISABLE V_GFDISABLE(1U)
+
+#define A_ULP_TX_XP10_IND_ADDR 0x8f94
+
+#define S_XP10_CONTROL 31
+#define V_XP10_CONTROL(x) ((x) << S_XP10_CONTROL)
+#define F_XP10_CONTROL V_XP10_CONTROL(1U)
+
+#define S_XP10_ADDR 0
+#define M_XP10_ADDR 0xfffffU
+#define V_XP10_ADDR(x) ((x) << S_XP10_ADDR)
+#define G_XP10_ADDR(x) (((x) >> S_XP10_ADDR) & M_XP10_ADDR)
+
+#define A_ULP_TX_XP10_IND_DATA 0x8f98
+#define A_ULP_TX_IWARP_PMOF_OPCODES_1 0x8f9c
+
+#define S_RDMA_VERIFY_RESPONSE 24
+#define M_RDMA_VERIFY_RESPONSE 0x1fU
+#define V_RDMA_VERIFY_RESPONSE(x) ((x) << S_RDMA_VERIFY_RESPONSE)
+#define G_RDMA_VERIFY_RESPONSE(x) (((x) >> S_RDMA_VERIFY_RESPONSE) & M_RDMA_VERIFY_RESPONSE)
+
+#define S_RDMA_VERIFY_REQUEST 16
+#define M_RDMA_VERIFY_REQUEST 0x1fU
+#define V_RDMA_VERIFY_REQUEST(x) ((x) << S_RDMA_VERIFY_REQUEST)
+#define G_RDMA_VERIFY_REQUEST(x) (((x) >> S_RDMA_VERIFY_REQUEST) & M_RDMA_VERIFY_REQUEST)
+
+#define S_RDMA_FLUSH_RESPONSE 8
+#define M_RDMA_FLUSH_RESPONSE 0x1fU
+#define V_RDMA_FLUSH_RESPONSE(x) ((x) << S_RDMA_FLUSH_RESPONSE)
+#define G_RDMA_FLUSH_RESPONSE(x) (((x) >> S_RDMA_FLUSH_RESPONSE) & M_RDMA_FLUSH_RESPONSE)
+
+#define S_RDMA_FLUSH_REQUEST 0
+#define M_RDMA_FLUSH_REQUEST 0x1fU
+#define V_RDMA_FLUSH_REQUEST(x) ((x) << S_RDMA_FLUSH_REQUEST)
+#define G_RDMA_FLUSH_REQUEST(x) (((x) >> S_RDMA_FLUSH_REQUEST) & M_RDMA_FLUSH_REQUEST)
+
+#define A_ULP_TX_IWARP_PMOF_OPCODES_2 0x8fa0
+
+#define S_RDMA_SEND_WITH_SE_IMMEDIATE 24
+#define M_RDMA_SEND_WITH_SE_IMMEDIATE 0x1fU
+#define V_RDMA_SEND_WITH_SE_IMMEDIATE(x) ((x) << S_RDMA_SEND_WITH_SE_IMMEDIATE)
+#define G_RDMA_SEND_WITH_SE_IMMEDIATE(x) (((x) >> S_RDMA_SEND_WITH_SE_IMMEDIATE) & M_RDMA_SEND_WITH_SE_IMMEDIATE)
+
+#define S_RDMA_SEND_WITH_IMMEDIATE 16
+#define M_RDMA_SEND_WITH_IMMEDIATE 0x1fU
+#define V_RDMA_SEND_WITH_IMMEDIATE(x) ((x) << S_RDMA_SEND_WITH_IMMEDIATE)
+#define G_RDMA_SEND_WITH_IMMEDIATE(x) (((x) >> S_RDMA_SEND_WITH_IMMEDIATE) & M_RDMA_SEND_WITH_IMMEDIATE)
+
+#define S_RDMA_ATOMIC_WRITE_RESPONSE 8
+#define M_RDMA_ATOMIC_WRITE_RESPONSE 0x1fU
+#define V_RDMA_ATOMIC_WRITE_RESPONSE(x) ((x) << S_RDMA_ATOMIC_WRITE_RESPONSE)
+#define G_RDMA_ATOMIC_WRITE_RESPONSE(x) (((x) >> S_RDMA_ATOMIC_WRITE_RESPONSE) & M_RDMA_ATOMIC_WRITE_RESPONSE)
+
+#define S_RDMA_ATOMIC_WRITE_REQUEST 0
+#define M_RDMA_ATOMIC_WRITE_REQUEST 0x1fU
+#define V_RDMA_ATOMIC_WRITE_REQUEST(x) ((x) << S_RDMA_ATOMIC_WRITE_REQUEST)
+#define G_RDMA_ATOMIC_WRITE_REQUEST(x) (((x) >> S_RDMA_ATOMIC_WRITE_REQUEST) & M_RDMA_ATOMIC_WRITE_REQUEST)
+
+#define A_ULP_TX_NVME_TCP_TPT_LLIMIT 0x8fa4
+#define A_ULP_TX_NVME_TCP_TPT_ULIMIT 0x8fa8
+#define A_ULP_TX_NVME_TCP_PBL_LLIMIT 0x8fac
+#define A_ULP_TX_NVME_TCP_PBL_ULIMIT 0x8fb0
#define A_ULP_TX_TLS_IND_CMD 0x8fb8
#define S_TLS_TX_REG_OFF_ADDR 0
@@ -29678,7 +37568,48 @@
#define V_TLS_TX_REG_OFF_ADDR(x) ((x) << S_TLS_TX_REG_OFF_ADDR)
#define G_TLS_TX_REG_OFF_ADDR(x) (((x) >> S_TLS_TX_REG_OFF_ADDR) & M_TLS_TX_REG_OFF_ADDR)
+#define A_ULP_TX_DBG_CTL 0x8fb8
#define A_ULP_TX_TLS_IND_DATA 0x8fbc
+#define A_ULP_TX_DBG_DATA 0x8fbc
+#define A_ULP_TX_TLS_CH0_PERR_CAUSE 0xc
+
+#define S_GLUE_PERR 3
+#define V_GLUE_PERR(x) ((x) << S_GLUE_PERR)
+#define F_GLUE_PERR V_GLUE_PERR(1U)
+
+#define S_DSGL_PERR 2
+#define V_DSGL_PERR(x) ((x) << S_DSGL_PERR)
+#define F_DSGL_PERR V_DSGL_PERR(1U)
+
+#define S_SGE_PERR 1
+#define V_SGE_PERR(x) ((x) << S_SGE_PERR)
+#define F_SGE_PERR V_SGE_PERR(1U)
+
+#define S_KEX_PERR 0
+#define V_KEX_PERR(x) ((x) << S_KEX_PERR)
+#define F_KEX_PERR V_KEX_PERR(1U)
+
+#define A_ULP_TX_TLS_CH0_PERR_ENABLE 0x10
+#define A_ULP_TX_TLS_CH0_HMACCTRL_CFG 0x20
+
+#define S_HMAC_CFG6 12
+#define M_HMAC_CFG6 0x3fU
+#define V_HMAC_CFG6(x) ((x) << S_HMAC_CFG6)
+#define G_HMAC_CFG6(x) (((x) >> S_HMAC_CFG6) & M_HMAC_CFG6)
+
+#define S_HMAC_CFG5 6
+#define M_HMAC_CFG5 0x3fU
+#define V_HMAC_CFG5(x) ((x) << S_HMAC_CFG5)
+#define G_HMAC_CFG5(x) (((x) >> S_HMAC_CFG5) & M_HMAC_CFG5)
+
+#define S_HMAC_CFG4 0
+#define M_HMAC_CFG4 0x3fU
+#define V_HMAC_CFG4(x) ((x) << S_HMAC_CFG4)
+#define G_HMAC_CFG4(x) (((x) >> S_HMAC_CFG4) & M_HMAC_CFG4)
+
+#define A_ULP_TX_TLS_CH1_PERR_CAUSE 0x4c
+#define A_ULP_TX_TLS_CH1_PERR_ENABLE 0x50
+#define A_ULP_TX_TLS_CH1_HMACCTRL_CFG 0x60
/* registers for module PM_RX */
#define PM_RX_BASE_ADDR 0x8fc0
@@ -29703,6 +37634,31 @@
#define V_PREFETCH_ENABLE(x) ((x) << S_PREFETCH_ENABLE)
#define F_PREFETCH_ENABLE V_PREFETCH_ENABLE(1U)
+#define S_CACHE_HOLD 13
+#define V_CACHE_HOLD(x) ((x) << S_CACHE_HOLD)
+#define F_CACHE_HOLD V_CACHE_HOLD(1U)
+
+#define S_CACHE_INIT_DONE 12
+#define V_CACHE_INIT_DONE(x) ((x) << S_CACHE_INIT_DONE)
+#define F_CACHE_INIT_DONE V_CACHE_INIT_DONE(1U)
+
+#define S_CACHE_DEPTH 8
+#define M_CACHE_DEPTH 0xfU
+#define V_CACHE_DEPTH(x) ((x) << S_CACHE_DEPTH)
+#define G_CACHE_DEPTH(x) (((x) >> S_CACHE_DEPTH) & M_CACHE_DEPTH)
+
+#define S_CACHE_INIT 7
+#define V_CACHE_INIT(x) ((x) << S_CACHE_INIT)
+#define F_CACHE_INIT V_CACHE_INIT(1U)
+
+#define S_CACHE_SLEEP 6
+#define V_CACHE_SLEEP(x) ((x) << S_CACHE_SLEEP)
+#define F_CACHE_SLEEP V_CACHE_SLEEP(1U)
+
+#define S_CACHE_BYPASS 5
+#define V_CACHE_BYPASS(x) ((x) << S_CACHE_BYPASS)
+#define F_CACHE_BYPASS V_CACHE_BYPASS(1U)
+
#define A_PM_RX_STAT_CONFIG 0x8fc8
#define A_PM_RX_STAT_COUNT 0x8fcc
#define A_PM_RX_STAT_LSB 0x8fd0
@@ -29723,6 +37679,11 @@
#define V_PMDBGADDR(x) ((x) << S_PMDBGADDR)
#define G_PMDBGADDR(x) (((x) >> S_PMDBGADDR) & M_PMDBGADDR)
+#define S_T7_OSPIWRBUSY_T5 21
+#define M_T7_OSPIWRBUSY_T5 0xfU
+#define V_T7_OSPIWRBUSY_T5(x) ((x) << S_T7_OSPIWRBUSY_T5)
+#define G_T7_OSPIWRBUSY_T5(x) (((x) >> S_T7_OSPIWRBUSY_T5) & M_T7_OSPIWRBUSY_T5)
+
#define A_PM_RX_STAT_MSB 0x8fd4
#define A_PM_RX_DBG_DATA 0x8fd4
#define A_PM_RX_INT_ENABLE 0x8fd8
@@ -29843,7 +37804,36 @@
#define V_SDC_ERR(x) ((x) << S_SDC_ERR)
#define F_SDC_ERR V_SDC_ERR(1U)
+#define S_MASTER_PERR 31
+#define V_MASTER_PERR(x) ((x) << S_MASTER_PERR)
+#define F_MASTER_PERR V_MASTER_PERR(1U)
+
+#define S_T7_OSPI_OVERFLOW3 30
+#define V_T7_OSPI_OVERFLOW3(x) ((x) << S_T7_OSPI_OVERFLOW3)
+#define F_T7_OSPI_OVERFLOW3 V_T7_OSPI_OVERFLOW3(1U)
+
+#define S_T7_OSPI_OVERFLOW2 29
+#define V_T7_OSPI_OVERFLOW2(x) ((x) << S_T7_OSPI_OVERFLOW2)
+#define F_T7_OSPI_OVERFLOW2 V_T7_OSPI_OVERFLOW2(1U)
+
#define A_PM_RX_INT_CAUSE 0x8fdc
+
+#define S_CACHE_SRAM_ERROR 3
+#define V_CACHE_SRAM_ERROR(x) ((x) << S_CACHE_SRAM_ERROR)
+#define F_CACHE_SRAM_ERROR V_CACHE_SRAM_ERROR(1U)
+
+#define S_CACHE_LRU_ERROR 2
+#define V_CACHE_LRU_ERROR(x) ((x) << S_CACHE_LRU_ERROR)
+#define F_CACHE_LRU_ERROR V_CACHE_LRU_ERROR(1U)
+
+#define S_CACHE_ISLAND_ERROR 1
+#define V_CACHE_ISLAND_ERROR(x) ((x) << S_CACHE_ISLAND_ERROR)
+#define F_CACHE_ISLAND_ERROR V_CACHE_ISLAND_ERROR(1U)
+
+#define S_CACHE_CTRL_ERROR 0
+#define V_CACHE_CTRL_ERROR(x) ((x) << S_CACHE_CTRL_ERROR)
+#define F_CACHE_CTRL_ERROR V_CACHE_CTRL_ERROR(1U)
+
#define A_PM_RX_ISPI_DBG_4B_DATA0 0x10000
#define A_PM_RX_ISPI_DBG_4B_DATA1 0x10001
#define A_PM_RX_ISPI_DBG_4B_DATA2 0x10002
@@ -29959,12 +37949,25 @@
#define V_CHNL0_MAX_DEFICIT_CNT(x) ((x) << S_CHNL0_MAX_DEFICIT_CNT)
#define G_CHNL0_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL0_MAX_DEFICIT_CNT) & M_CHNL0_MAX_DEFICIT_CNT)
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT0 0x1001c
#define A_PM_RX_FEATURE_EN 0x1001d
#define S_PIO_CH_DEFICIT_CTL_EN_RX 0
#define V_PIO_CH_DEFICIT_CTL_EN_RX(x) ((x) << S_PIO_CH_DEFICIT_CTL_EN_RX)
#define F_PIO_CH_DEFICIT_CTL_EN_RX V_PIO_CH_DEFICIT_CTL_EN_RX(1U)
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT1 0x1001d
+
+#define S_CHNL3_MAX_DEFICIT_CNT 16
+#define M_CHNL3_MAX_DEFICIT_CNT 0xffffU
+#define V_CHNL3_MAX_DEFICIT_CNT(x) ((x) << S_CHNL3_MAX_DEFICIT_CNT)
+#define G_CHNL3_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL3_MAX_DEFICIT_CNT) & M_CHNL3_MAX_DEFICIT_CNT)
+
+#define S_CHNL2_MAX_DEFICIT_CNT 0
+#define M_CHNL2_MAX_DEFICIT_CNT 0xffffU
+#define V_CHNL2_MAX_DEFICIT_CNT(x) ((x) << S_CHNL2_MAX_DEFICIT_CNT)
+#define G_CHNL2_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL2_MAX_DEFICIT_CNT) & M_CHNL2_MAX_DEFICIT_CNT)
+
#define A_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x1001e
#define S_CH0_OSPI_DEFICIT_THRSHLD 0
@@ -30245,16 +38248,6 @@
#define V_RX_C_TXAFULL(x) ((x) << S_RX_C_TXAFULL)
#define G_RX_C_TXAFULL(x) (((x) >> S_RX_C_TXAFULL) & M_RX_C_TXAFULL)
-#define S_T6_RX_PCMD_DRDY 26
-#define M_T6_RX_PCMD_DRDY 0x3U
-#define V_T6_RX_PCMD_DRDY(x) ((x) << S_T6_RX_PCMD_DRDY)
-#define G_T6_RX_PCMD_DRDY(x) (((x) >> S_T6_RX_PCMD_DRDY) & M_T6_RX_PCMD_DRDY)
-
-#define S_T6_RX_PCMD_SRDY 24
-#define M_T6_RX_PCMD_SRDY 0x3U
-#define V_T6_RX_PCMD_SRDY(x) ((x) << S_T6_RX_PCMD_SRDY)
-#define G_T6_RX_PCMD_SRDY(x) (((x) >> S_T6_RX_PCMD_SRDY) & M_T6_RX_PCMD_SRDY)
-
#define A_PM_RX_DBG_STAT6 0x10027
#define S_RX_M_INTRNL_FIFO_CNT 4
@@ -30434,6 +38427,179 @@
#define V_RX_BUNDLE_LEN0(x) ((x) << S_RX_BUNDLE_LEN0)
#define G_RX_BUNDLE_LEN0(x) (((x) >> S_RX_BUNDLE_LEN0) & M_RX_BUNDLE_LEN0)
+#define A_PM_RX_INT_CAUSE_MASK_HALT_2 0x10049
+#define A_PM_RX_INT_ENABLE_2 0x10060
+
+#define S_CACHE_SRAM_ODD_CERR 12
+#define V_CACHE_SRAM_ODD_CERR(x) ((x) << S_CACHE_SRAM_ODD_CERR)
+#define F_CACHE_SRAM_ODD_CERR V_CACHE_SRAM_ODD_CERR(1U)
+
+#define S_CACHE_SRAM_EVEN_CERR 11
+#define V_CACHE_SRAM_EVEN_CERR(x) ((x) << S_CACHE_SRAM_EVEN_CERR)
+#define F_CACHE_SRAM_EVEN_CERR V_CACHE_SRAM_EVEN_CERR(1U)
+
+#define S_CACHE_LRU_LEFT_CERR 10
+#define V_CACHE_LRU_LEFT_CERR(x) ((x) << S_CACHE_LRU_LEFT_CERR)
+#define F_CACHE_LRU_LEFT_CERR V_CACHE_LRU_LEFT_CERR(1U)
+
+#define S_CACHE_LRU_RIGHT_CERR 9
+#define V_CACHE_LRU_RIGHT_CERR(x) ((x) << S_CACHE_LRU_RIGHT_CERR)
+#define F_CACHE_LRU_RIGHT_CERR V_CACHE_LRU_RIGHT_CERR(1U)
+
+#define S_CACHE_ISLAND_CERR 8
+#define V_CACHE_ISLAND_CERR(x) ((x) << S_CACHE_ISLAND_CERR)
+#define F_CACHE_ISLAND_CERR V_CACHE_ISLAND_CERR(1U)
+
+#define S_OCSPI_CERR 7
+#define V_OCSPI_CERR(x) ((x) << S_OCSPI_CERR)
+#define F_OCSPI_CERR V_OCSPI_CERR(1U)
+
+#define S_IESPI_CERR 6
+#define V_IESPI_CERR(x) ((x) << S_IESPI_CERR)
+#define F_IESPI_CERR V_IESPI_CERR(1U)
+
+#define S_OCSPI2_RX_FRAMING_ERROR 5
+#define V_OCSPI2_RX_FRAMING_ERROR(x) ((x) << S_OCSPI2_RX_FRAMING_ERROR)
+#define F_OCSPI2_RX_FRAMING_ERROR V_OCSPI2_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_RX_FRAMING_ERROR 4
+#define V_OCSPI3_RX_FRAMING_ERROR(x) ((x) << S_OCSPI3_RX_FRAMING_ERROR)
+#define F_OCSPI3_RX_FRAMING_ERROR V_OCSPI3_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI2_TX_FRAMING_ERROR 3
+#define V_OCSPI2_TX_FRAMING_ERROR(x) ((x) << S_OCSPI2_TX_FRAMING_ERROR)
+#define F_OCSPI2_TX_FRAMING_ERROR V_OCSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_TX_FRAMING_ERROR 2
+#define V_OCSPI3_TX_FRAMING_ERROR(x) ((x) << S_OCSPI3_TX_FRAMING_ERROR)
+#define F_OCSPI3_TX_FRAMING_ERROR V_OCSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI2_OFIFO2X_TX_FRAMING_ERROR 1
+#define V_OCSPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI2_OFIFO2X_TX_FRAMING_ERROR V_OCSPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_OFIFO2X_TX_FRAMING_ERROR 0
+#define V_OCSPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI3_OFIFO2X_TX_FRAMING_ERROR V_OCSPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define A_PM_RX_INT_CAUSE_2 0x10061
+#define A_PM_RX_PERR_ENABLE 0x10062
+
+#define S_T7_SDC_ERR 31
+#define V_T7_SDC_ERR(x) ((x) << S_T7_SDC_ERR)
+#define F_T7_SDC_ERR V_T7_SDC_ERR(1U)
+
+#define S_T7_MA_INTF_SDC_ERR 30
+#define V_T7_MA_INTF_SDC_ERR(x) ((x) << S_T7_MA_INTF_SDC_ERR)
+#define F_T7_MA_INTF_SDC_ERR V_T7_MA_INTF_SDC_ERR(1U)
+
+#define S_E_PCMD_PERR 21
+#define V_E_PCMD_PERR(x) ((x) << S_E_PCMD_PERR)
+#define F_E_PCMD_PERR V_E_PCMD_PERR(1U)
+
+#define S_CACHE_RSP_DFIFO_PERR 20
+#define V_CACHE_RSP_DFIFO_PERR(x) ((x) << S_CACHE_RSP_DFIFO_PERR)
+#define F_CACHE_RSP_DFIFO_PERR V_CACHE_RSP_DFIFO_PERR(1U)
+
+#define S_CACHE_SRAM_ODD_PERR 19
+#define V_CACHE_SRAM_ODD_PERR(x) ((x) << S_CACHE_SRAM_ODD_PERR)
+#define F_CACHE_SRAM_ODD_PERR V_CACHE_SRAM_ODD_PERR(1U)
+
+#define S_CACHE_SRAM_EVEN_PERR 18
+#define V_CACHE_SRAM_EVEN_PERR(x) ((x) << S_CACHE_SRAM_EVEN_PERR)
+#define F_CACHE_SRAM_EVEN_PERR V_CACHE_SRAM_EVEN_PERR(1U)
+
+#define S_CACHE_RSVD_PERR 17
+#define V_CACHE_RSVD_PERR(x) ((x) << S_CACHE_RSVD_PERR)
+#define F_CACHE_RSVD_PERR V_CACHE_RSVD_PERR(1U)
+
+#define S_CACHE_LRU_LEFT_PERR 16
+#define V_CACHE_LRU_LEFT_PERR(x) ((x) << S_CACHE_LRU_LEFT_PERR)
+#define F_CACHE_LRU_LEFT_PERR V_CACHE_LRU_LEFT_PERR(1U)
+
+#define S_CACHE_LRU_RIGHT_PERR 15
+#define V_CACHE_LRU_RIGHT_PERR(x) ((x) << S_CACHE_LRU_RIGHT_PERR)
+#define F_CACHE_LRU_RIGHT_PERR V_CACHE_LRU_RIGHT_PERR(1U)
+
+#define S_CACHE_RSP_CMD_PERR 14
+#define V_CACHE_RSP_CMD_PERR(x) ((x) << S_CACHE_RSP_CMD_PERR)
+#define F_CACHE_RSP_CMD_PERR V_CACHE_RSP_CMD_PERR(1U)
+
+#define S_CACHE_SRAM_CMD_PERR 13
+#define V_CACHE_SRAM_CMD_PERR(x) ((x) << S_CACHE_SRAM_CMD_PERR)
+#define F_CACHE_SRAM_CMD_PERR V_CACHE_SRAM_CMD_PERR(1U)
+
+#define S_CACHE_MA_CMD_PERR 12
+#define V_CACHE_MA_CMD_PERR(x) ((x) << S_CACHE_MA_CMD_PERR)
+#define F_CACHE_MA_CMD_PERR V_CACHE_MA_CMD_PERR(1U)
+
+#define S_CACHE_TCAM_PERR 11
+#define V_CACHE_TCAM_PERR(x) ((x) << S_CACHE_TCAM_PERR)
+#define F_CACHE_TCAM_PERR V_CACHE_TCAM_PERR(1U)
+
+#define S_CACHE_ISLAND_PERR 10
+#define V_CACHE_ISLAND_PERR(x) ((x) << S_CACHE_ISLAND_PERR)
+#define F_CACHE_ISLAND_PERR V_CACHE_ISLAND_PERR(1U)
+
+#define S_MC_WCNT_FIFO_PERR 9
+#define V_MC_WCNT_FIFO_PERR(x) ((x) << S_MC_WCNT_FIFO_PERR)
+#define F_MC_WCNT_FIFO_PERR V_MC_WCNT_FIFO_PERR(1U)
+
+#define S_MC_WDATA_FIFO_PERR 8
+#define V_MC_WDATA_FIFO_PERR(x) ((x) << S_MC_WDATA_FIFO_PERR)
+#define F_MC_WDATA_FIFO_PERR V_MC_WDATA_FIFO_PERR(1U)
+
+#define S_MC_RCNT_FIFO_PERR 7
+#define V_MC_RCNT_FIFO_PERR(x) ((x) << S_MC_RCNT_FIFO_PERR)
+#define F_MC_RCNT_FIFO_PERR V_MC_RCNT_FIFO_PERR(1U)
+
+#define S_MC_RDATA_FIFO_PERR 6
+#define V_MC_RDATA_FIFO_PERR(x) ((x) << S_MC_RDATA_FIFO_PERR)
+#define F_MC_RDATA_FIFO_PERR V_MC_RDATA_FIFO_PERR(1U)
+
+#define S_TOKEN_FIFO_PERR 5
+#define V_TOKEN_FIFO_PERR(x) ((x) << S_TOKEN_FIFO_PERR)
+#define F_TOKEN_FIFO_PERR V_TOKEN_FIFO_PERR(1U)
+
+#define S_T7_BUNDLE_LEN_PARERR 4
+#define V_T7_BUNDLE_LEN_PARERR(x) ((x) << S_T7_BUNDLE_LEN_PARERR)
+#define F_T7_BUNDLE_LEN_PARERR V_T7_BUNDLE_LEN_PARERR(1U)
+
+#define A_PM_RX_PERR_CAUSE 0x10063
+#define A_PM_RX_EXT_CFIFO_CONFIG0 0x10070
+
+#define S_CH1_PTR_MAX 17
+#define M_CH1_PTR_MAX 0x7fffU
+#define V_CH1_PTR_MAX(x) ((x) << S_CH1_PTR_MAX)
+#define G_CH1_PTR_MAX(x) (((x) >> S_CH1_PTR_MAX) & M_CH1_PTR_MAX)
+
+#define S_CH0_PTR_MAX 1
+#define M_CH0_PTR_MAX 0x7fffU
+#define V_CH0_PTR_MAX(x) ((x) << S_CH0_PTR_MAX)
+#define G_CH0_PTR_MAX(x) (((x) >> S_CH0_PTR_MAX) & M_CH0_PTR_MAX)
+
+#define S_STROBE 0
+#define V_STROBE(x) ((x) << S_STROBE)
+#define F_STROBE V_STROBE(1U)
+
+#define A_PM_RX_EXT_CFIFO_CONFIG1 0x10071
+
+#define S_CH2_PTR_MAX 1
+#define M_CH2_PTR_MAX 0x7fffU
+#define V_CH2_PTR_MAX(x) ((x) << S_CH2_PTR_MAX)
+#define G_CH2_PTR_MAX(x) (((x) >> S_CH2_PTR_MAX) & M_CH2_PTR_MAX)
+
+#define A_PM_RX_EXT_EFIFO_CONFIG0 0x10072
+#define A_PM_RX_EXT_EFIFO_CONFIG1 0x10073
+#define A_T7_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x10074
+#define A_T7_PM_RX_CH1_OSPI_DEFICIT_THRSHLD 0x10075
+#define A_PM_RX_CH2_OSPI_DEFICIT_THRSHLD 0x10076
+#define A_PM_RX_CH3_OSPI_DEFICIT_THRSHLD 0x10077
+#define A_T7_PM_RX_FEATURE_EN 0x10078
+#define A_PM_RX_TCAM_BIST_CTRL 0x10080
+#define A_PM_RX_TCAM_BIST_CB_PASS 0x10081
+#define A_PM_RX_TCAM_BIST_CB_BUSY 0x10082
+
/* registers for module PM_TX */
#define PM_TX_BASE_ADDR 0x8fe0
@@ -30613,6 +38779,118 @@
#define V_C_PCMD_PAR_ERROR(x) ((x) << S_C_PCMD_PAR_ERROR)
#define F_C_PCMD_PAR_ERROR V_C_PCMD_PAR_ERROR(1U)
+#define S_T7_ZERO_C_CMD_ERROR 30
+#define V_T7_ZERO_C_CMD_ERROR(x) ((x) << S_T7_ZERO_C_CMD_ERROR)
+#define F_T7_ZERO_C_CMD_ERROR V_T7_ZERO_C_CMD_ERROR(1U)
+
+#define S_OESPI_COR_ERR 29
+#define V_OESPI_COR_ERR(x) ((x) << S_OESPI_COR_ERR)
+#define F_OESPI_COR_ERR V_OESPI_COR_ERR(1U)
+
+#define S_ICSPI_COR_ERR 28
+#define V_ICSPI_COR_ERR(x) ((x) << S_ICSPI_COR_ERR)
+#define F_ICSPI_COR_ERR V_ICSPI_COR_ERR(1U)
+
+#define S_ICSPI_OVFL 24
+#define V_ICSPI_OVFL(x) ((x) << S_ICSPI_OVFL)
+#define F_ICSPI_OVFL V_ICSPI_OVFL(1U)
+
+#define S_PCMD_LEN_OVFL3 23
+#define V_PCMD_LEN_OVFL3(x) ((x) << S_PCMD_LEN_OVFL3)
+#define F_PCMD_LEN_OVFL3 V_PCMD_LEN_OVFL3(1U)
+
+#define S_T7_PCMD_LEN_OVFL2 22
+#define V_T7_PCMD_LEN_OVFL2(x) ((x) << S_T7_PCMD_LEN_OVFL2)
+#define F_T7_PCMD_LEN_OVFL2 V_T7_PCMD_LEN_OVFL2(1U)
+
+#define S_T7_PCMD_LEN_OVFL1 21
+#define V_T7_PCMD_LEN_OVFL1(x) ((x) << S_T7_PCMD_LEN_OVFL1)
+#define F_T7_PCMD_LEN_OVFL1 V_T7_PCMD_LEN_OVFL1(1U)
+
+#define S_T7_PCMD_LEN_OVFL0 20
+#define V_T7_PCMD_LEN_OVFL0(x) ((x) << S_T7_PCMD_LEN_OVFL0)
+#define F_T7_PCMD_LEN_OVFL0 V_T7_PCMD_LEN_OVFL0(1U)
+
+#define S_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR 19
+#define V_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR 18
+#define V_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR 17
+#define V_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR 16
+#define V_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI0_TX_FRAMING_ERROR 15
+#define V_T7_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI0_TX_FRAMING_ERROR)
+#define F_T7_ICSPI0_TX_FRAMING_ERROR V_T7_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI1_TX_FRAMING_ERROR 14
+#define V_T7_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI1_TX_FRAMING_ERROR)
+#define F_T7_ICSPI1_TX_FRAMING_ERROR V_T7_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI2_TX_FRAMING_ERROR 13
+#define V_T7_ICSPI2_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI2_TX_FRAMING_ERROR)
+#define F_T7_ICSPI2_TX_FRAMING_ERROR V_T7_ICSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI3_TX_FRAMING_ERROR 12
+#define V_T7_ICSPI3_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI3_TX_FRAMING_ERROR)
+#define F_T7_ICSPI3_TX_FRAMING_ERROR V_T7_ICSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_RX_FRAMING_ERROR 11
+#define V_T7_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_RX_FRAMING_ERROR)
+#define F_T7_OESPI0_RX_FRAMING_ERROR V_T7_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_RX_FRAMING_ERROR 10
+#define V_T7_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_RX_FRAMING_ERROR)
+#define F_T7_OESPI1_RX_FRAMING_ERROR V_T7_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_RX_FRAMING_ERROR 9
+#define V_T7_OESPI2_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_RX_FRAMING_ERROR)
+#define F_T7_OESPI2_RX_FRAMING_ERROR V_T7_OESPI2_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_RX_FRAMING_ERROR 8
+#define V_T7_OESPI3_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_RX_FRAMING_ERROR)
+#define F_T7_OESPI3_RX_FRAMING_ERROR V_T7_OESPI3_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_TX_FRAMING_ERROR 7
+#define V_T7_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_TX_FRAMING_ERROR)
+#define F_T7_OESPI0_TX_FRAMING_ERROR V_T7_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_TX_FRAMING_ERROR 6
+#define V_T7_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_TX_FRAMING_ERROR)
+#define F_T7_OESPI1_TX_FRAMING_ERROR V_T7_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_TX_FRAMING_ERROR 5
+#define V_T7_OESPI2_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_TX_FRAMING_ERROR)
+#define F_T7_OESPI2_TX_FRAMING_ERROR V_T7_OESPI2_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_TX_FRAMING_ERROR 4
+#define V_T7_OESPI3_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_TX_FRAMING_ERROR)
+#define F_T7_OESPI3_TX_FRAMING_ERROR V_T7_OESPI3_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR 3
+#define V_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR 2
+#define V_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR 1
+#define V_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR 0
+#define V_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
#define A_PM_TX_INT_CAUSE 0x8ffc
#define S_ZERO_C_CMD_ERROR 28
@@ -30624,23 +38902,51 @@
#define F_OSPI_OR_BUNDLE_LEN_PAR_ERR V_OSPI_OR_BUNDLE_LEN_PAR_ERR(1U)
#define A_PM_TX_ISPI_DBG_4B_DATA0 0x10000
+#define A_T7_PM_TX_DBG_STAT_MSB 0x10000
#define A_PM_TX_ISPI_DBG_4B_DATA1 0x10001
+#define A_T7_PM_TX_DBG_STAT_LSB 0x10001
#define A_PM_TX_ISPI_DBG_4B_DATA2 0x10002
+#define A_T7_PM_TX_DBG_RSVD_FLIT_CNT 0x10002
#define A_PM_TX_ISPI_DBG_4B_DATA3 0x10003
+#define A_T7_PM_TX_SDC_EN 0x10003
#define A_PM_TX_ISPI_DBG_4B_DATA4 0x10004
+#define A_T7_PM_TX_INOUT_FIFO_DBG_CHNL_SEL 0x10004
#define A_PM_TX_ISPI_DBG_4B_DATA5 0x10005
+#define A_T7_PM_TX_INOUT_FIFO_DBG_WR 0x10005
#define A_PM_TX_ISPI_DBG_4B_DATA6 0x10006
+#define A_T7_PM_TX_INPUT_FIFO_STR_FWD_EN 0x10006
#define A_PM_TX_ISPI_DBG_4B_DATA7 0x10007
+#define A_T7_PM_TX_FEATURE_EN 0x10007
+
+#define S_IN_AFULL_TH 5
+#define M_IN_AFULL_TH 0x3U
+#define V_IN_AFULL_TH(x) ((x) << S_IN_AFULL_TH)
+#define G_IN_AFULL_TH(x) (((x) >> S_IN_AFULL_TH) & M_IN_AFULL_TH)
+
+#define S_PIO_FROM_CH_EN 4
+#define V_PIO_FROM_CH_EN(x) ((x) << S_PIO_FROM_CH_EN)
+#define F_PIO_FROM_CH_EN V_PIO_FROM_CH_EN(1U)
+
#define A_PM_TX_ISPI_DBG_4B_DATA8 0x10008
+#define A_T7_PM_TX_T5_PM_TX_INT_ENABLE 0x10008
#define A_PM_TX_OSPI_DBG_4B_DATA0 0x10009
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD0 0x10009
#define A_PM_TX_OSPI_DBG_4B_DATA1 0x1000a
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD1 0x1000a
#define A_PM_TX_OSPI_DBG_4B_DATA2 0x1000b
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD2 0x1000b
#define A_PM_TX_OSPI_DBG_4B_DATA3 0x1000c
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x1000c
#define A_PM_TX_OSPI_DBG_4B_DATA4 0x1000d
+#define A_T7_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x1000d
#define A_PM_TX_OSPI_DBG_4B_DATA5 0x1000e
+#define A_T7_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x1000e
#define A_PM_TX_OSPI_DBG_4B_DATA6 0x1000f
+#define A_T7_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x1000f
#define A_PM_TX_OSPI_DBG_4B_DATA7 0x10010
+#define A_T7_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x10010
#define A_PM_TX_OSPI_DBG_4B_DATA8 0x10011
+#define A_T7_PM_TX_INT_CAUSE_MASK_HALT 0x10011
#define A_PM_TX_OSPI_DBG_4B_DATA9 0x10012
#define A_PM_TX_OSPI_DBG_4B_DATA10 0x10013
#define A_PM_TX_OSPI_DBG_4B_DATA11 0x10014
@@ -30722,6 +39028,48 @@
#define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x10026
#define A_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x10027
#define A_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x10028
+#define A_PM_TX_PERR_ENABLE 0x10028
+
+#define S_T7_1_OSPI_OVERFLOW3 23
+#define V_T7_1_OSPI_OVERFLOW3(x) ((x) << S_T7_1_OSPI_OVERFLOW3)
+#define F_T7_1_OSPI_OVERFLOW3 V_T7_1_OSPI_OVERFLOW3(1U)
+
+#define S_T7_1_OSPI_OVERFLOW2 22
+#define V_T7_1_OSPI_OVERFLOW2(x) ((x) << S_T7_1_OSPI_OVERFLOW2)
+#define F_T7_1_OSPI_OVERFLOW2 V_T7_1_OSPI_OVERFLOW2(1U)
+
+#define S_T7_1_OSPI_OVERFLOW1 21
+#define V_T7_1_OSPI_OVERFLOW1(x) ((x) << S_T7_1_OSPI_OVERFLOW1)
+#define F_T7_1_OSPI_OVERFLOW1 V_T7_1_OSPI_OVERFLOW1(1U)
+
+#define S_T7_1_OSPI_OVERFLOW0 20
+#define V_T7_1_OSPI_OVERFLOW0(x) ((x) << S_T7_1_OSPI_OVERFLOW0)
+#define F_T7_1_OSPI_OVERFLOW0 V_T7_1_OSPI_OVERFLOW0(1U)
+
+#define S_T7_BUNDLE_LEN_OVFL_EN 18
+#define V_T7_BUNDLE_LEN_OVFL_EN(x) ((x) << S_T7_BUNDLE_LEN_OVFL_EN)
+#define F_T7_BUNDLE_LEN_OVFL_EN V_T7_BUNDLE_LEN_OVFL_EN(1U)
+
+#define S_T7_M_INTFPERREN 17
+#define V_T7_M_INTFPERREN(x) ((x) << S_T7_M_INTFPERREN)
+#define F_T7_M_INTFPERREN V_T7_M_INTFPERREN(1U)
+
+#define S_T7_1_SDC_ERR 16
+#define V_T7_1_SDC_ERR(x) ((x) << S_T7_1_SDC_ERR)
+#define F_T7_1_SDC_ERR V_T7_1_SDC_ERR(1U)
+
+#define S_TOKEN_PAR_ERROR 5
+#define V_TOKEN_PAR_ERROR(x) ((x) << S_TOKEN_PAR_ERROR)
+#define F_TOKEN_PAR_ERROR V_TOKEN_PAR_ERROR(1U)
+
+#define S_BUNDLE_LEN_PAR_ERROR 4
+#define V_BUNDLE_LEN_PAR_ERROR(x) ((x) << S_BUNDLE_LEN_PAR_ERROR)
+#define F_BUNDLE_LEN_PAR_ERROR V_BUNDLE_LEN_PAR_ERROR(1U)
+
+#define S_C_PCMD_TOKEN_PAR_ERROR 0
+#define V_C_PCMD_TOKEN_PAR_ERROR(x) ((x) << S_C_PCMD_TOKEN_PAR_ERROR)
+#define F_C_PCMD_TOKEN_PAR_ERROR V_C_PCMD_TOKEN_PAR_ERROR(1U)
+
#define A_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x10029
#define S_CH2_OSPI_DEFICIT_THRSHLD 0
@@ -30729,6 +39077,7 @@
#define V_CH2_OSPI_DEFICIT_THRSHLD(x) ((x) << S_CH2_OSPI_DEFICIT_THRSHLD)
#define G_CH2_OSPI_DEFICIT_THRSHLD(x) (((x) >> S_CH2_OSPI_DEFICIT_THRSHLD) & M_CH2_OSPI_DEFICIT_THRSHLD)
+#define A_PM_TX_PERR_CAUSE 0x10029
#define A_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x1002a
#define S_CH3_OSPI_DEFICIT_THRSHLD 0
@@ -31462,6 +39811,7 @@
#define G_ADDR(x) (((x) >> S_ADDR) & M_ADDR)
#define A_MPS_PORT_TX_PAUSE_SOURCE_L 0x24
+#define A_MPS_VF_TX_MAC_DROP_PP 0x24
#define A_MPS_PORT_TX_PAUSE_SOURCE_H 0x28
#define A_MPS_PORT_PRTY_BUFFER_GROUP_MAP 0x2c
@@ -31547,6 +39897,24 @@
#define V_TXPRTY0(x) ((x) << S_TXPRTY0)
#define G_TXPRTY0(x) (((x) >> S_TXPRTY0) & M_TXPRTY0)
+#define A_MPS_PORT_PRTY_GROUP_MAP 0x34
+#define A_MPS_PORT_TRACE_MAX_CAPTURE_SIZE 0x38
+
+#define S_TX2RX 6
+#define M_TX2RX 0x7U
+#define V_TX2RX(x) ((x) << S_TX2RX)
+#define G_TX2RX(x) (((x) >> S_TX2RX) & M_TX2RX)
+
+#define S_MAC2MPS 3
+#define M_MAC2MPS 0x7U
+#define V_MAC2MPS(x) ((x) << S_MAC2MPS)
+#define G_MAC2MPS(x) (((x) >> S_MAC2MPS) & M_MAC2MPS)
+
+#define S_MPS2MAC 0
+#define M_MPS2MAC 0x7U
+#define V_MPS2MAC(x) ((x) << S_MPS2MAC)
+#define G_MPS2MAC(x) (((x) >> S_MPS2MAC) & M_MPS2MAC)
+
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_H 0x84
#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
@@ -31578,7 +39946,9 @@
#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_H 0xf4
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+#define A_MPS_VF_STAT_RX_VF_ERR_DROP_FRAMES_L 0xf8
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define A_MPS_VF_STAT_RX_VF_ERR_DROP_FRAMES_H 0xfc
#define A_MPS_PORT_RX_CTL 0x100
#define S_NO_RPLCT_M 20
@@ -31682,6 +40052,26 @@
#define V_HASH_EN_MAC(x) ((x) << S_HASH_EN_MAC)
#define F_HASH_EN_MAC V_HASH_EN_MAC(1U)
+#define S_TRANS_ENCAP_EN 30
+#define V_TRANS_ENCAP_EN(x) ((x) << S_TRANS_ENCAP_EN)
+#define F_TRANS_ENCAP_EN V_TRANS_ENCAP_EN(1U)
+
+#define S_CRYPTO_DUMMY_PKT_CHK_EN 29
+#define V_CRYPTO_DUMMY_PKT_CHK_EN(x) ((x) << S_CRYPTO_DUMMY_PKT_CHK_EN)
+#define F_CRYPTO_DUMMY_PKT_CHK_EN V_CRYPTO_DUMMY_PKT_CHK_EN(1U)
+
+#define S_PASS_HPROM 28
+#define V_PASS_HPROM(x) ((x) << S_PASS_HPROM)
+#define F_PASS_HPROM V_PASS_HPROM(1U)
+
+#define S_PASS_PROM 27
+#define V_PASS_PROM(x) ((x) << S_PASS_PROM)
+#define F_PASS_PROM V_PASS_PROM(1U)
+
+#define S_ENCAP_ONLY_IF_OUTER_HIT 26
+#define V_ENCAP_ONLY_IF_OUTER_HIT(x) ((x) << S_ENCAP_ONLY_IF_OUTER_HIT)
+#define F_ENCAP_ONLY_IF_OUTER_HIT V_ENCAP_ONLY_IF_OUTER_HIT(1U)
+
#define A_MPS_PORT_RX_MTU 0x104
#define A_MPS_PORT_RX_PF_MAP 0x108
#define A_MPS_PORT_RX_VF_MAP0 0x10c
@@ -31924,6 +40314,23 @@
#define V_REPL_VECT_SEL(x) ((x) << S_REPL_VECT_SEL)
#define G_REPL_VECT_SEL(x) (((x) >> S_REPL_VECT_SEL) & M_REPL_VECT_SEL)
+#define A_MPS_PORT_MAC_RX_DROP_EN_PP 0x16c
+
+#define S_PRIO 0
+#define M_PRIO 0xffU
+#define V_PRIO(x) ((x) << S_PRIO)
+#define G_PRIO(x) (((x) >> S_PRIO) & M_PRIO)
+
+#define A_MPS_PORT_RX_INT_RSS_HASH 0x170
+#define A_MPS_PORT_RX_INT_RSS_CONTROL 0x174
+#define A_MPS_PORT_RX_CNT_DBG_CTL 0x178
+
+#define S_DBG_TYPE 0
+#define M_DBG_TYPE 0x1fU
+#define V_DBG_TYPE(x) ((x) << S_DBG_TYPE)
+#define G_DBG_TYPE(x) (((x) >> S_DBG_TYPE) & M_DBG_TYPE)
+
+#define A_MPS_PORT_RX_CNT_DBG 0x17c
#define A_MPS_PORT_TX_MAC_RELOAD_CH0 0x190
#define S_CREDIT 0
@@ -31984,6 +40391,10 @@
#define V_ON_PENDING(x) ((x) << S_ON_PENDING)
#define G_ON_PENDING(x) (((x) >> S_ON_PENDING) & M_ON_PENDING)
+#define A_MPS_PORT_TX_MAC_DROP_PP 0x1d4
+#define A_MPS_PORT_TX_LPBK_DROP_PP 0x1d8
+#define A_MPS_PORT_TX_MAC_DROP_CNT 0x1dc
+#define A_MPS_PORT_TX_LPBK_DROP_CNT 0x1e0
#define A_MPS_PORT_CLS_HASH_SRAM 0x200
#define S_VALID 20
@@ -32097,6 +40508,13 @@
#define V_TAG(x) ((x) << S_TAG)
#define G_TAG(x) (((x) >> S_TAG) & M_TAG)
+#define A_MPS_PF_TX_MAC_DROP_PP 0x2e4
+
+#define S_T7_DROPEN 0
+#define M_T7_DROPEN 0xffU
+#define V_T7_DROPEN(x) ((x) << S_T7_DROPEN)
+#define G_T7_DROPEN(x) (((x) >> S_T7_DROPEN) & M_T7_DROPEN)
+
#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_L 0x300
#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_H 0x304
#define A_MPS_PORT_CLS_HASH_CTL 0x304
@@ -32112,35 +40530,9 @@
#define V_PROMISCEN(x) ((x) << S_PROMISCEN)
#define F_PROMISCEN V_PROMISCEN(1U)
-#define S_T6_MULTILISTEN 16
-#define V_T6_MULTILISTEN(x) ((x) << S_T6_MULTILISTEN)
-#define F_T6_MULTILISTEN V_T6_MULTILISTEN(1U)
-
-#define S_T6_PRIORITY 13
-#define M_T6_PRIORITY 0x7U
-#define V_T6_PRIORITY(x) ((x) << S_T6_PRIORITY)
-#define G_T6_PRIORITY(x) (((x) >> S_T6_PRIORITY) & M_T6_PRIORITY)
-
-#define S_T6_REPLICATE 12
-#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
-#define F_T6_REPLICATE V_T6_REPLICATE(1U)
-
-#define S_T6_PF 9
-#define M_T6_PF 0x7U
-#define V_T6_PF(x) ((x) << S_T6_PF)
-#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
-
-#define S_T6_VF_VALID 8
-#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
-#define F_T6_VF_VALID V_T6_VF_VALID(1U)
-
-#define S_T6_VF 0
-#define M_T6_VF 0xffU
-#define V_T6_VF(x) ((x) << S_T6_VF)
-#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
-
#define A_MPS_PF_STAT_TX_PF_BCAST_FRAMES_H 0x30c
#define A_MPS_PORT_CLS_BMC_MAC_ADDR_L 0x30c
+#define A_MPS_PORT_CLS_BMC_MAC0_ADDR_L 0x30c
#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_L 0x310
#define A_MPS_PORT_CLS_BMC_MAC_ADDR_H 0x310
@@ -32156,6 +40548,7 @@
#define V_MATCHALL(x) ((x) << S_MATCHALL)
#define F_MATCHALL V_MATCHALL(1U)
+#define A_MPS_PORT_CLS_BMC_MAC0_ADDR_H 0x310
#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_H 0x314
#define A_MPS_PORT_CLS_BMC_VLAN 0x314
@@ -32167,6 +40560,7 @@
#define V_VLAN_VLD(x) ((x) << S_VLAN_VLD)
#define F_VLAN_VLD V_VLAN_VLD(1U)
+#define A_MPS_PORT_CLS_BMC_VLAN0 0x314
#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_L 0x318
#define A_MPS_PORT_CLS_CTL 0x318
@@ -32218,6 +40612,18 @@
#define V_DMAC_TCAM_SEL(x) ((x) << S_DMAC_TCAM_SEL)
#define G_DMAC_TCAM_SEL(x) (((x) >> S_DMAC_TCAM_SEL) & M_DMAC_TCAM_SEL)
+#define S_SMAC_INDEX_EN 17
+#define V_SMAC_INDEX_EN(x) ((x) << S_SMAC_INDEX_EN)
+#define F_SMAC_INDEX_EN V_SMAC_INDEX_EN(1U)
+
+#define S_LPBK_TCAM2_HIT_PRIORITY 16
+#define V_LPBK_TCAM2_HIT_PRIORITY(x) ((x) << S_LPBK_TCAM2_HIT_PRIORITY)
+#define F_LPBK_TCAM2_HIT_PRIORITY V_LPBK_TCAM2_HIT_PRIORITY(1U)
+
+#define S_TCAM2_HIT_PRIORITY 15
+#define V_TCAM2_HIT_PRIORITY(x) ((x) << S_TCAM2_HIT_PRIORITY)
+#define F_TCAM2_HIT_PRIORITY V_TCAM2_HIT_PRIORITY(1U)
+
#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_H 0x31c
#define A_MPS_PORT_CLS_NCSI_ETH_TYPE 0x31c
@@ -32238,14 +40644,23 @@
#define F_EN2 V_EN2(1U)
#define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_H 0x324
+#define A_MPS_PORT_CLS_BMC_MAC1_ADDR_L 0x324
#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_L 0x328
+#define A_MPS_PORT_CLS_BMC_MAC1_ADDR_H 0x328
#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_H 0x32c
+#define A_MPS_PORT_CLS_BMC_MAC2_ADDR_L 0x32c
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_L 0x330
+#define A_MPS_PORT_CLS_BMC_MAC2_ADDR_H 0x330
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_H 0x334
+#define A_MPS_PORT_CLS_BMC_MAC3_ADDR_L 0x334
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_L 0x338
+#define A_MPS_PORT_CLS_BMC_MAC3_ADDR_H 0x338
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_H 0x33c
+#define A_MPS_PORT_CLS_BMC_VLAN1 0x33c
#define A_MPS_PF_STAT_RX_PF_BYTES_L 0x340
+#define A_MPS_PORT_CLS_BMC_VLAN2 0x340
#define A_MPS_PF_STAT_RX_PF_BYTES_H 0x344
+#define A_MPS_PORT_CLS_BMC_VLAN3 0x344
#define A_MPS_PF_STAT_RX_PF_FRAMES_L 0x348
#define A_MPS_PF_STAT_RX_PF_FRAMES_H 0x34c
#define A_MPS_PF_STAT_RX_PF_BCAST_BYTES_L 0x350
@@ -32261,7 +40676,9 @@
#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_L 0x378
#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_H 0x37c
#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_L 0x380
+#define A_MPS_PF_STAT_RX_PF_ERR_DROP_FRAMES_L 0x380
#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_H 0x384
+#define A_MPS_PF_STAT_RX_PF_ERR_DROP_FRAMES_H 0x384
#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
@@ -32393,6 +40810,22 @@
#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_L 0x618
#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_H 0x61c
+#define A_MPS_PORT_STAT_RX_PRIO_0_DROP_FRAME_L 0x620
+#define A_MPS_PORT_STAT_RX_PRIO_0_DROP_FRAME_H 0x624
+#define A_MPS_PORT_STAT_RX_PRIO_1_DROP_FRAME_L 0x628
+#define A_MPS_PORT_STAT_RX_PRIO_1_DROP_FRAME_H 0x62c
+#define A_MPS_PORT_STAT_RX_PRIO_2_DROP_FRAME_L 0x630
+#define A_MPS_PORT_STAT_RX_PRIO_2_DROP_FRAME_H 0x634
+#define A_MPS_PORT_STAT_RX_PRIO_3_DROP_FRAME_L 0x638
+#define A_MPS_PORT_STAT_RX_PRIO_3_DROP_FRAME_H 0x63c
+#define A_MPS_PORT_STAT_RX_PRIO_4_DROP_FRAME_L 0x640
+#define A_MPS_PORT_STAT_RX_PRIO_4_DROP_FRAME_H 0x644
+#define A_MPS_PORT_STAT_RX_PRIO_5_DROP_FRAME_L 0x648
+#define A_MPS_PORT_STAT_RX_PRIO_5_DROP_FRAME_H 0x64c
+#define A_MPS_PORT_STAT_RX_PRIO_6_DROP_FRAME_L 0x650
+#define A_MPS_PORT_STAT_RX_PRIO_6_DROP_FRAME_H 0x654
+#define A_MPS_PORT_STAT_RX_PRIO_7_DROP_FRAME_L 0x658
+#define A_MPS_PORT_STAT_RX_PRIO_7_DROP_FRAME_H 0x65c
#define A_MPS_CMN_CTL 0x9000
#define S_DETECT8023 3
@@ -32425,6 +40858,46 @@
#define V_SPEEDMODE(x) ((x) << S_SPEEDMODE)
#define G_SPEEDMODE(x) (((x) >> S_SPEEDMODE) & M_SPEEDMODE)
+#define S_PT1_SEL_CFG 21
+#define V_PT1_SEL_CFG(x) ((x) << S_PT1_SEL_CFG)
+#define F_PT1_SEL_CFG V_PT1_SEL_CFG(1U)
+
+#define S_BUG_42938_EN 20
+#define V_BUG_42938_EN(x) ((x) << S_BUG_42938_EN)
+#define F_BUG_42938_EN V_BUG_42938_EN(1U)
+
+#define S_NO_BYPASS_PAUSE 19
+#define V_NO_BYPASS_PAUSE(x) ((x) << S_NO_BYPASS_PAUSE)
+#define F_NO_BYPASS_PAUSE V_NO_BYPASS_PAUSE(1U)
+
+#define S_BYPASS_PAUSE 18
+#define V_BYPASS_PAUSE(x) ((x) << S_BYPASS_PAUSE)
+#define F_BYPASS_PAUSE V_BYPASS_PAUSE(1U)
+
+#define S_PBUS_EN 16
+#define M_PBUS_EN 0x3U
+#define V_PBUS_EN(x) ((x) << S_PBUS_EN)
+#define G_PBUS_EN(x) (((x) >> S_PBUS_EN) & M_PBUS_EN)
+
+#define S_INIC_EN 14
+#define M_INIC_EN 0x3U
+#define V_INIC_EN(x) ((x) << S_INIC_EN)
+#define G_INIC_EN(x) (((x) >> S_INIC_EN) & M_INIC_EN)
+
+#define S_SBA_EN 12
+#define M_SBA_EN 0x3U
+#define V_SBA_EN(x) ((x) << S_SBA_EN)
+#define G_SBA_EN(x) (((x) >> S_SBA_EN) & M_SBA_EN)
+
+#define S_BG2TP_MAP_MODE 11
+#define V_BG2TP_MAP_MODE(x) ((x) << S_BG2TP_MAP_MODE)
+#define F_BG2TP_MAP_MODE V_BG2TP_MAP_MODE(1U)
+
+#define S_MPS_LB_MODE 9
+#define M_MPS_LB_MODE 0x3U
+#define V_MPS_LB_MODE(x) ((x) << S_MPS_LB_MODE)
+#define G_MPS_LB_MODE(x) (((x) >> S_MPS_LB_MODE) & M_MPS_LB_MODE)
+
#define A_MPS_INT_ENABLE 0x9004
#define S_STATINTENB 5
@@ -32618,6 +41091,17 @@
#define A_MPS_T5_BUILD_REVISION 0x9078
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH0 0x907c
+
+#define S_VALUE_1 16
+#define M_VALUE_1 0xffffU
+#define V_VALUE_1(x) ((x) << S_VALUE_1)
+#define G_VALUE_1(x) (((x) >> S_VALUE_1) & M_VALUE_1)
+
+#define S_VALUE_0 0
+#define M_VALUE_0 0xffffU
+#define V_VALUE_0(x) ((x) << S_VALUE_0)
+#define G_VALUE_0(x) (((x) >> S_VALUE_0) & M_VALUE_0)
+
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH1 0x9080
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH2 0x9084
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH3 0x9088
@@ -32671,11 +41155,130 @@
#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
#define A_MPS_FPGA_BIST_CFG_P1 0x9124
-
-#define S_T6_BASEADDR 0
-#define M_T6_BASEADDR 0xffffU
-#define V_T6_BASEADDR(x) ((x) << S_T6_BASEADDR)
-#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
+#define A_MPS_FPGA_BIST_CFG_P2 0x9128
+#define A_MPS_FPGA_BIST_CFG_P3 0x912c
+#define A_MPS_INIC_CTL 0x9130
+
+#define S_T7_RD_WRN 16
+#define V_T7_RD_WRN(x) ((x) << S_T7_RD_WRN)
+#define F_T7_RD_WRN V_T7_RD_WRN(1U)
+
+#define A_MPS_INIC_DATA 0x9134
+#define A_MPS_TP_CSIDE_MUX_CTL_P2 0x9138
+#define A_MPS_TP_CSIDE_MUX_CTL_P3 0x913c
+#define A_MPS_RED_CTL 0x9140
+
+#define S_LPBK_SHIFT_0 28
+#define M_LPBK_SHIFT_0 0xfU
+#define V_LPBK_SHIFT_0(x) ((x) << S_LPBK_SHIFT_0)
+#define G_LPBK_SHIFT_0(x) (((x) >> S_LPBK_SHIFT_0) & M_LPBK_SHIFT_0)
+
+#define S_LPBK_SHIFT_1 24
+#define M_LPBK_SHIFT_1 0xfU
+#define V_LPBK_SHIFT_1(x) ((x) << S_LPBK_SHIFT_1)
+#define G_LPBK_SHIFT_1(x) (((x) >> S_LPBK_SHIFT_1) & M_LPBK_SHIFT_1)
+
+#define S_LPBK_SHIFT_2 20
+#define M_LPBK_SHIFT_2 0xfU
+#define V_LPBK_SHIFT_2(x) ((x) << S_LPBK_SHIFT_2)
+#define G_LPBK_SHIFT_2(x) (((x) >> S_LPBK_SHIFT_2) & M_LPBK_SHIFT_2)
+
+#define S_LPBK_SHIFT_3 16
+#define M_LPBK_SHIFT_3 0xfU
+#define V_LPBK_SHIFT_3(x) ((x) << S_LPBK_SHIFT_3)
+#define G_LPBK_SHIFT_3(x) (((x) >> S_LPBK_SHIFT_3) & M_LPBK_SHIFT_3)
+
+#define S_MAC_SHIFT_0 12
+#define M_MAC_SHIFT_0 0xfU
+#define V_MAC_SHIFT_0(x) ((x) << S_MAC_SHIFT_0)
+#define G_MAC_SHIFT_0(x) (((x) >> S_MAC_SHIFT_0) & M_MAC_SHIFT_0)
+
+#define S_MAC_SHIFT_1 8
+#define M_MAC_SHIFT_1 0xfU
+#define V_MAC_SHIFT_1(x) ((x) << S_MAC_SHIFT_1)
+#define G_MAC_SHIFT_1(x) (((x) >> S_MAC_SHIFT_1) & M_MAC_SHIFT_1)
+
+#define S_MAC_SHIFT_2 4
+#define M_MAC_SHIFT_2 0xfU
+#define V_MAC_SHIFT_2(x) ((x) << S_MAC_SHIFT_2)
+#define G_MAC_SHIFT_2(x) (((x) >> S_MAC_SHIFT_2) & M_MAC_SHIFT_2)
+
+#define S_MAC_SHIFT_3 0
+#define M_MAC_SHIFT_3 0xfU
+#define V_MAC_SHIFT_3(x) ((x) << S_MAC_SHIFT_3)
+#define G_MAC_SHIFT_3(x) (((x) >> S_MAC_SHIFT_3) & M_MAC_SHIFT_3)
+
+#define A_MPS_RED_EN 0x9144
+
+#define S_LPBK_EN3 7
+#define V_LPBK_EN3(x) ((x) << S_LPBK_EN3)
+#define F_LPBK_EN3 V_LPBK_EN3(1U)
+
+#define S_LPBK_EN2 6
+#define V_LPBK_EN2(x) ((x) << S_LPBK_EN2)
+#define F_LPBK_EN2 V_LPBK_EN2(1U)
+
+#define S_LPBK_EN1 5
+#define V_LPBK_EN1(x) ((x) << S_LPBK_EN1)
+#define F_LPBK_EN1 V_LPBK_EN1(1U)
+
+#define S_LPBK_EN0 4
+#define V_LPBK_EN0(x) ((x) << S_LPBK_EN0)
+#define F_LPBK_EN0 V_LPBK_EN0(1U)
+
+#define S_MAC_EN3 3
+#define V_MAC_EN3(x) ((x) << S_MAC_EN3)
+#define F_MAC_EN3 V_MAC_EN3(1U)
+
+#define S_MAC_EN2 2
+#define V_MAC_EN2(x) ((x) << S_MAC_EN2)
+#define F_MAC_EN2 V_MAC_EN2(1U)
+
+#define S_MAC_EN1 1
+#define V_MAC_EN1(x) ((x) << S_MAC_EN1)
+#define F_MAC_EN1 V_MAC_EN1(1U)
+
+#define S_MAC_EN0 0
+#define V_MAC_EN0(x) ((x) << S_MAC_EN0)
+#define F_MAC_EN0 V_MAC_EN0(1U)
+
+#define A_MPS_MAC0_RED_DROP_CNT_H 0x9148
+#define A_MPS_MAC0_RED_DROP_CNT_L 0x914c
+#define A_MPS_MAC1_RED_DROP_CNT_H 0x9150
+#define A_MPS_MAC1_RED_DROP_CNT_L 0x9154
+#define A_MPS_MAC2_RED_DROP_CNT_H 0x9158
+#define A_MPS_MAC2_RED_DROP_CNT_L 0x915c
+#define A_MPS_MAC3_RED_DROP_CNT_H 0x9160
+#define A_MPS_MAC3_RED_DROP_CNT_L 0x9164
+#define A_MPS_LPBK0_RED_DROP_CNT_H 0x9168
+#define A_MPS_LPBK0_RED_DROP_CNT_L 0x916c
+#define A_MPS_LPBK1_RED_DROP_CNT_H 0x9170
+#define A_MPS_LPBK1_RED_DROP_CNT_L 0x9174
+#define A_MPS_LPBK2_RED_DROP_CNT_H 0x9178
+#define A_MPS_LPBK2_RED_DROP_CNT_L 0x917c
+#define A_MPS_LPBK3_RED_DROP_CNT_H 0x9180
+#define A_MPS_LPBK3_RED_DROP_CNT_L 0x9184
+#define A_MPS_MAC_RED_PP_DROP_EN 0x9188
+
+#define S_T7_MAC3 24
+#define M_T7_MAC3 0xffU
+#define V_T7_MAC3(x) ((x) << S_T7_MAC3)
+#define G_T7_MAC3(x) (((x) >> S_T7_MAC3) & M_T7_MAC3)
+
+#define S_T7_MAC2 16
+#define M_T7_MAC2 0xffU
+#define V_T7_MAC2(x) ((x) << S_T7_MAC2)
+#define G_T7_MAC2(x) (((x) >> S_T7_MAC2) & M_T7_MAC2)
+
+#define S_T7_MAC1 8
+#define M_T7_MAC1 0xffU
+#define V_T7_MAC1(x) ((x) << S_T7_MAC1)
+#define G_T7_MAC1(x) (((x) >> S_T7_MAC1) & M_T7_MAC1)
+
+#define S_T7_MAC0 0
+#define M_T7_MAC0 0xffU
+#define V_T7_MAC0(x) ((x) << S_T7_MAC0)
+#define G_T7_MAC0(x) (((x) >> S_T7_MAC0) & M_T7_MAC0)
#define A_MPS_TX_PRTY_SEL 0x9400
@@ -32714,6 +41317,26 @@
#define V_NCSI_SOURCE(x) ((x) << S_NCSI_SOURCE)
#define G_NCSI_SOURCE(x) (((x) >> S_NCSI_SOURCE) & M_NCSI_SOURCE)
+#define S_T7_CH4_PRTY 16
+#define M_T7_CH4_PRTY 0x7U
+#define V_T7_CH4_PRTY(x) ((x) << S_T7_CH4_PRTY)
+#define G_T7_CH4_PRTY(x) (((x) >> S_T7_CH4_PRTY) & M_T7_CH4_PRTY)
+
+#define S_T7_CH3_PRTY 13
+#define M_T7_CH3_PRTY 0x7U
+#define V_T7_CH3_PRTY(x) ((x) << S_T7_CH3_PRTY)
+#define G_T7_CH3_PRTY(x) (((x) >> S_T7_CH3_PRTY) & M_T7_CH3_PRTY)
+
+#define S_T7_CH2_PRTY 10
+#define M_T7_CH2_PRTY 0x7U
+#define V_T7_CH2_PRTY(x) ((x) << S_T7_CH2_PRTY)
+#define G_T7_CH2_PRTY(x) (((x) >> S_T7_CH2_PRTY) & M_T7_CH2_PRTY)
+
+#define S_T7_CH1_PRTY 7
+#define M_T7_CH1_PRTY 0x7U
+#define V_T7_CH1_PRTY(x) ((x) << S_T7_CH1_PRTY)
+#define G_T7_CH1_PRTY(x) (((x) >> S_T7_CH1_PRTY) & M_T7_CH1_PRTY)
+
#define A_MPS_TX_INT_ENABLE 0x9404
#define S_PORTERR 16
@@ -32751,9 +41374,52 @@
#define V_TPFIFO(x) ((x) << S_TPFIFO)
#define G_TPFIFO(x) (((x) >> S_TPFIFO) & M_TPFIFO)
+#define S_T7_PORTERR 28
+#define V_T7_PORTERR(x) ((x) << S_T7_PORTERR)
+#define F_T7_PORTERR V_T7_PORTERR(1U)
+
+#define S_T7_FRMERR 27
+#define V_T7_FRMERR(x) ((x) << S_T7_FRMERR)
+#define F_T7_FRMERR V_T7_FRMERR(1U)
+
+#define S_T7_SECNTERR 26
+#define V_T7_SECNTERR(x) ((x) << S_T7_SECNTERR)
+#define F_T7_SECNTERR V_T7_SECNTERR(1U)
+
+#define S_T7_BUBBLE 25
+#define V_T7_BUBBLE(x) ((x) << S_T7_BUBBLE)
+#define F_T7_BUBBLE V_T7_BUBBLE(1U)
+
+#define S_TXTOKENFIFO 15
+#define M_TXTOKENFIFO 0x3ffU
+#define V_TXTOKENFIFO(x) ((x) << S_TXTOKENFIFO)
+#define G_TXTOKENFIFO(x) (((x) >> S_TXTOKENFIFO) & M_TXTOKENFIFO)
+
+#define S_PERR_TP2MPS_TFIFO 13
+#define M_PERR_TP2MPS_TFIFO 0x3U
+#define V_PERR_TP2MPS_TFIFO(x) ((x) << S_PERR_TP2MPS_TFIFO)
+#define G_PERR_TP2MPS_TFIFO(x) (((x) >> S_PERR_TP2MPS_TFIFO) & M_PERR_TP2MPS_TFIFO)
+
#define A_MPS_TX_INT_CAUSE 0x9408
#define A_MPS_TX_NCSI2MPS_CNT 0x940c
#define A_MPS_TX_PERR_ENABLE 0x9410
+
+#define S_PORTERRINT 28
+#define V_PORTERRINT(x) ((x) << S_PORTERRINT)
+#define F_PORTERRINT V_PORTERRINT(1U)
+
+#define S_FRAMINGERRINT 27
+#define V_FRAMINGERRINT(x) ((x) << S_FRAMINGERRINT)
+#define F_FRAMINGERRINT V_FRAMINGERRINT(1U)
+
+#define S_SECNTERRINT 26
+#define V_SECNTERRINT(x) ((x) << S_SECNTERRINT)
+#define F_SECNTERRINT V_SECNTERRINT(1U)
+
+#define S_BUBBLEERRINT 25
+#define V_BUBBLEERRINT(x) ((x) << S_BUBBLEERRINT)
+#define F_BUBBLEERRINT V_BUBBLEERRINT(1U)
+
#define A_MPS_TX_PERR_INJECT 0x9414
#define S_MPSTXMEMSEL 1
@@ -33481,6 +42147,41 @@
#define F_TXINCH0_CGEN V_TXINCH0_CGEN(1U)
#define A_MPS_TX_CGEN_DYNAMIC 0x9470
+#define A_MPS_TX2RX_CH_MAP 0x9474
+
+#define S_ENABLELBK_CH3 3
+#define V_ENABLELBK_CH3(x) ((x) << S_ENABLELBK_CH3)
+#define F_ENABLELBK_CH3 V_ENABLELBK_CH3(1U)
+
+#define S_ENABLELBK_CH2 2
+#define V_ENABLELBK_CH2(x) ((x) << S_ENABLELBK_CH2)
+#define F_ENABLELBK_CH2 V_ENABLELBK_CH2(1U)
+
+#define S_ENABLELBK_CH1 1
+#define V_ENABLELBK_CH1(x) ((x) << S_ENABLELBK_CH1)
+#define F_ENABLELBK_CH1 V_ENABLELBK_CH1(1U)
+
+#define S_ENABLELBK_CH0 0
+#define V_ENABLELBK_CH0(x) ((x) << S_ENABLELBK_CH0)
+#define F_ENABLELBK_CH0 V_ENABLELBK_CH0(1U)
+
+#define A_MPS_TX_DBG_CNT_CTL 0x9478
+
+#define S_DBG_CNT_CTL 0
+#define M_DBG_CNT_CTL 0xffU
+#define V_DBG_CNT_CTL(x) ((x) << S_DBG_CNT_CTL)
+#define G_DBG_CNT_CTL(x) (((x) >> S_DBG_CNT_CTL) & M_DBG_CNT_CTL)
+
+#define A_MPS_TX_DBG_CNT 0x947c
+#define A_MPS_TX_INT2_ENABLE 0x9498
+#define A_MPS_TX_INT2_CAUSE 0x949c
+#define A_MPS_TX_PERR2_ENABLE 0x94a0
+#define A_MPS_TX_INT3_ENABLE 0x94a4
+#define A_MPS_TX_INT3_CAUSE 0x94a8
+#define A_MPS_TX_PERR3_ENABLE 0x94ac
+#define A_MPS_TX_INT4_ENABLE 0x94b0
+#define A_MPS_TX_INT4_CAUSE 0x94b4
+#define A_MPS_TX_PERR4_ENABLE 0x94b8
#define A_MPS_STAT_CTL 0x9600
#define S_COUNTVFINPF 1
@@ -33810,6 +42511,7 @@
#define A_MPS_TRC_RSS_HASH 0x9804
#define A_MPS_TRC_FILTER0_RSS_HASH 0x9804
+#define A_T7_MPS_TRC_PERR_INJECT 0x9804
#define A_MPS_TRC_RSS_CONTROL 0x9808
#define S_RSSCONTROL 16
@@ -33939,6 +42641,20 @@
#define V_FILTMEM(x) ((x) << S_FILTMEM)
#define G_FILTMEM(x) (((x) >> S_FILTMEM) & M_FILTMEM)
+#define S_T7_MISCPERR 16
+#define V_T7_MISCPERR(x) ((x) << S_T7_MISCPERR)
+#define F_T7_MISCPERR V_T7_MISCPERR(1U)
+
+#define S_T7_PKTFIFO 8
+#define M_T7_PKTFIFO 0xffU
+#define V_T7_PKTFIFO(x) ((x) << S_T7_PKTFIFO)
+#define G_T7_PKTFIFO(x) (((x) >> S_T7_PKTFIFO) & M_T7_PKTFIFO)
+
+#define S_T7_FILTMEM 0
+#define M_T7_FILTMEM 0xffU
+#define V_T7_FILTMEM(x) ((x) << S_T7_FILTMEM)
+#define G_T7_FILTMEM(x) (((x) >> S_T7_FILTMEM) & M_T7_FILTMEM)
+
#define A_MPS_TRC_INT_ENABLE 0x9858
#define S_TRCPLERRENB 9
@@ -33961,6 +42677,7 @@
#define A_MPS_TRC_FILTER2_RSS_HASH 0x9ff8
#define A_MPS_TRC_FILTER2_RSS_CONTROL 0x9ffc
#define A_MPS_TRC_FILTER3_RSS_HASH 0xa000
+#define A_MPS_TRC_FILTER4_MATCH 0xa000
#define A_MPS_TRC_FILTER3_RSS_CONTROL 0xa004
#define A_MPS_T5_TRC_RSS_HASH 0xa008
#define A_MPS_T5_TRC_RSS_CONTROL 0xa00c
@@ -34043,125 +42760,8 @@
#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
#define A_MPS_TRC_VF_OFF_FILTER_1 0xa014
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_VF_OFF_FILTER_2 0xa018
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_VF_OFF_FILTER_3 0xa01c
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_CGEN 0xa020
#define S_MPSTRCCGEN 0
@@ -34169,6 +42769,129 @@
#define V_MPSTRCCGEN(x) ((x) << S_MPSTRCCGEN)
#define G_MPSTRCCGEN(x) (((x) >> S_MPSTRCCGEN) & M_MPSTRCCGEN)
+#define A_MPS_TRC_FILTER4_DONT_CARE 0xa080
+#define A_MPS_TRC_FILTER5_MATCH 0xa100
+#define A_MPS_TRC_FILTER5_DONT_CARE 0xa180
+#define A_MPS_TRC_FILTER6_MATCH 0xa200
+#define A_MPS_TRC_FILTER6_DONT_CARE 0xa280
+#define A_MPS_TRC_FILTER7_MATCH 0xa300
+#define A_MPS_TRC_FILTER7_DONT_CARE 0xa380
+#define A_T7_MPS_TRC_FILTER0_RSS_HASH 0xa3f0
+#define A_T7_MPS_TRC_FILTER0_RSS_CONTROL 0xa3f4
+#define A_T7_MPS_TRC_FILTER1_RSS_HASH 0xa3f8
+#define A_T7_MPS_TRC_FILTER1_RSS_CONTROL 0xa3fc
+#define A_T7_MPS_TRC_FILTER2_RSS_HASH 0xa400
+#define A_T7_MPS_TRC_FILTER2_RSS_CONTROL 0xa404
+#define A_T7_MPS_TRC_FILTER3_RSS_HASH 0xa408
+#define A_T7_MPS_TRC_FILTER3_RSS_CONTROL 0xa40c
+#define A_MPS_TRC_FILTER4_RSS_HASH 0xa410
+#define A_MPS_TRC_FILTER4_RSS_CONTROL 0xa414
+#define A_MPS_TRC_FILTER5_RSS_HASH 0xa418
+#define A_MPS_TRC_FILTER5_RSS_CONTROL 0xa41c
+#define A_MPS_TRC_FILTER6_RSS_HASH 0xa420
+#define A_MPS_TRC_FILTER6_RSS_CONTROL 0xa424
+#define A_MPS_TRC_FILTER7_RSS_HASH 0xa428
+#define A_MPS_TRC_FILTER7_RSS_CONTROL 0xa42c
+#define A_T7_MPS_T5_TRC_RSS_HASH 0xa430
+#define A_T7_MPS_T5_TRC_RSS_CONTROL 0xa434
+#define A_T7_MPS_TRC_VF_OFF_FILTER_0 0xa438
+#define A_T7_MPS_TRC_VF_OFF_FILTER_1 0xa43c
+#define A_T7_MPS_TRC_VF_OFF_FILTER_2 0xa440
+#define A_T7_MPS_TRC_VF_OFF_FILTER_3 0xa444
+#define A_MPS_TRC_VF_OFF_FILTER_4 0xa448
+#define A_MPS_TRC_VF_OFF_FILTER_5 0xa44c
+#define A_MPS_TRC_VF_OFF_FILTER_6 0xa450
+#define A_MPS_TRC_VF_OFF_FILTER_7 0xa454
+#define A_T7_MPS_TRC_CGEN 0xa458
+
+#define S_T7_MPSTRCCGEN 0
+#define M_T7_MPSTRCCGEN 0xffU
+#define V_T7_MPSTRCCGEN(x) ((x) << S_T7_MPSTRCCGEN)
+#define G_T7_MPSTRCCGEN(x) (((x) >> S_T7_MPSTRCCGEN) & M_T7_MPSTRCCGEN)
+
+#define A_T7_MPS_TRC_FILTER_MATCH_CTL_A 0xa460
+#define A_T7_MPS_TRC_FILTER_MATCH_CTL_B 0xa480
+#define A_T7_MPS_TRC_FILTER_RUNT_CTL 0xa4a0
+#define A_T7_MPS_TRC_FILTER_DROP 0xa4c0
+#define A_T7_MPS_TRC_INT_ENABLE 0xa4e0
+
+#define S_T7_TRCPLERRENB 17
+#define V_T7_TRCPLERRENB(x) ((x) << S_T7_TRCPLERRENB)
+#define F_T7_TRCPLERRENB V_T7_TRCPLERRENB(1U)
+
+#define A_T7_MPS_TRC_INT_CAUSE 0xa4e4
+#define A_T7_MPS_TRC_TIMESTAMP_L 0xa4e8
+#define A_T7_MPS_TRC_TIMESTAMP_H 0xa4ec
+#define A_MPS_TRC_PERR_ENABLE2 0xa4f0
+
+#define S_TRC_TF_ECC 24
+#define M_TRC_TF_ECC 0xffU
+#define V_TRC_TF_ECC(x) ((x) << S_TRC_TF_ECC)
+#define G_TRC_TF_ECC(x) (((x) >> S_TRC_TF_ECC) & M_TRC_TF_ECC)
+
+#define S_MPS2MAC_CONV_TRC_CERR 22
+#define M_MPS2MAC_CONV_TRC_CERR 0x3U
+#define V_MPS2MAC_CONV_TRC_CERR(x) ((x) << S_MPS2MAC_CONV_TRC_CERR)
+#define G_MPS2MAC_CONV_TRC_CERR(x) (((x) >> S_MPS2MAC_CONV_TRC_CERR) & M_MPS2MAC_CONV_TRC_CERR)
+
+#define S_MPS2MAC_CONV_TRC 18
+#define M_MPS2MAC_CONV_TRC 0xfU
+#define V_MPS2MAC_CONV_TRC(x) ((x) << S_MPS2MAC_CONV_TRC)
+#define G_MPS2MAC_CONV_TRC(x) (((x) >> S_MPS2MAC_CONV_TRC) & M_MPS2MAC_CONV_TRC)
+
+#define S_TF0_PERR_1 17
+#define V_TF0_PERR_1(x) ((x) << S_TF0_PERR_1)
+#define F_TF0_PERR_1 V_TF0_PERR_1(1U)
+
+#define S_TF1_PERR_1 16
+#define V_TF1_PERR_1(x) ((x) << S_TF1_PERR_1)
+#define F_TF1_PERR_1 V_TF1_PERR_1(1U)
+
+#define S_TF2_PERR_1 15
+#define V_TF2_PERR_1(x) ((x) << S_TF2_PERR_1)
+#define F_TF2_PERR_1 V_TF2_PERR_1(1U)
+
+#define S_TF3_PERR_1 14
+#define V_TF3_PERR_1(x) ((x) << S_TF3_PERR_1)
+#define F_TF3_PERR_1 V_TF3_PERR_1(1U)
+
+#define S_TF4_PERR_1 13
+#define V_TF4_PERR_1(x) ((x) << S_TF4_PERR_1)
+#define F_TF4_PERR_1 V_TF4_PERR_1(1U)
+
+#define S_TF0_PERR_0 12
+#define V_TF0_PERR_0(x) ((x) << S_TF0_PERR_0)
+#define F_TF0_PERR_0 V_TF0_PERR_0(1U)
+
+#define S_TF1_PERR_0 11
+#define V_TF1_PERR_0(x) ((x) << S_TF1_PERR_0)
+#define F_TF1_PERR_0 V_TF1_PERR_0(1U)
+
+#define S_TF2_PERR_0 10
+#define V_TF2_PERR_0(x) ((x) << S_TF2_PERR_0)
+#define F_TF2_PERR_0 V_TF2_PERR_0(1U)
+
+#define S_TF3_PERR_0 9
+#define V_TF3_PERR_0(x) ((x) << S_TF3_PERR_0)
+#define F_TF3_PERR_0 V_TF3_PERR_0(1U)
+
+#define S_TF4_PERR_0 8
+#define V_TF4_PERR_0(x) ((x) << S_TF4_PERR_0)
+#define F_TF4_PERR_0 V_TF4_PERR_0(1U)
+
+#define S_PERR_TF_IN_CTL 0
+#define M_PERR_TF_IN_CTL 0xffU
+#define V_PERR_TF_IN_CTL(x) ((x) << S_PERR_TF_IN_CTL)
+#define G_PERR_TF_IN_CTL(x) (((x) >> S_PERR_TF_IN_CTL) & M_PERR_TF_IN_CTL)
+
+#define A_MPS_TRC_INT_ENABLE2 0xa4f4
+#define A_MPS_TRC_INT_CAUSE2 0xa4f8
+
+#define S_T7_TRC_TF_ECC 22
+#define M_T7_TRC_TF_ECC 0xffU
+#define V_T7_TRC_TF_ECC(x) ((x) << S_T7_TRC_TF_ECC)
+#define G_T7_TRC_TF_ECC(x) (((x) >> S_T7_TRC_TF_ECC) & M_T7_TRC_TF_ECC)
+
#define A_MPS_CLS_CTL 0xd000
#define S_MEMWRITEFAULT 4
@@ -34246,12 +42969,24 @@
#define V_MATCHSRAM(x) ((x) << S_MATCHSRAM)
#define F_MATCHSRAM V_MATCHSRAM(1U)
+#define S_CIM2MPS_INTF_PAR 4
+#define V_CIM2MPS_INTF_PAR(x) ((x) << S_CIM2MPS_INTF_PAR)
+#define F_CIM2MPS_INTF_PAR V_CIM2MPS_INTF_PAR(1U)
+
+#define S_TCAM_CRC_SRAM 3
+#define V_TCAM_CRC_SRAM(x) ((x) << S_TCAM_CRC_SRAM)
+#define F_TCAM_CRC_SRAM V_TCAM_CRC_SRAM(1U)
+
#define A_MPS_CLS_INT_ENABLE 0xd024
#define S_PLERRENB 3
#define V_PLERRENB(x) ((x) << S_PLERRENB)
#define F_PLERRENB V_PLERRENB(1U)
+#define S_T7_PLERRENB 5
+#define V_T7_PLERRENB(x) ((x) << S_T7_PLERRENB)
+#define F_T7_PLERRENB V_T7_PLERRENB(1U)
+
#define A_MPS_CLS_INT_CAUSE 0xd028
#define A_MPS_CLS_PL_TEST_DATA_L 0xd02c
#define A_MPS_CLS_PL_TEST_DATA_H 0xd030
@@ -34314,6 +43049,25 @@
#define V_T6_CLS_VF(x) ((x) << S_T6_CLS_VF)
#define G_T6_CLS_VF(x) (((x) >> S_T6_CLS_VF) & M_T6_CLS_VF)
+#define S_T7_CLS_SPARE 30
+#define M_T7_CLS_SPARE 0x3U
+#define V_T7_CLS_SPARE(x) ((x) << S_T7_CLS_SPARE)
+#define G_T7_CLS_SPARE(x) (((x) >> S_T7_CLS_SPARE) & M_T7_CLS_SPARE)
+
+#define S_T7_1_CLS_PRIORITY 27
+#define M_T7_1_CLS_PRIORITY 0x7U
+#define V_T7_1_CLS_PRIORITY(x) ((x) << S_T7_1_CLS_PRIORITY)
+#define G_T7_1_CLS_PRIORITY(x) (((x) >> S_T7_1_CLS_PRIORITY) & M_T7_1_CLS_PRIORITY)
+
+#define S_T7_1_CLS_REPLICATE 26
+#define V_T7_1_CLS_REPLICATE(x) ((x) << S_T7_1_CLS_REPLICATE)
+#define F_T7_1_CLS_REPLICATE V_T7_1_CLS_REPLICATE(1U)
+
+#define S_T7_1_CLS_INDEX 15
+#define M_T7_1_CLS_INDEX 0x7ffU
+#define V_T7_1_CLS_INDEX(x) ((x) << S_T7_1_CLS_INDEX)
+#define G_T7_1_CLS_INDEX(x) (((x) >> S_T7_1_CLS_INDEX) & M_T7_1_CLS_INDEX)
+
#define A_MPS_CLS_PL_TEST_CTL 0xd038
#define S_PLTESTCTL 0
@@ -34327,12 +43081,26 @@
#define F_PRTBMCCTL V_PRTBMCCTL(1U)
#define A_MPS_CLS_MATCH_CNT_TCAM 0xd100
+#define A_MPS_CLS0_MATCH_CNT_TCAM 0xd100
#define A_MPS_CLS_MATCH_CNT_HASH 0xd104
+#define A_MPS_CLS0_MATCH_CNT_HASH 0xd104
#define A_MPS_CLS_MATCH_CNT_BCAST 0xd108
+#define A_MPS_CLS0_MATCH_CNT_BCAST 0xd108
#define A_MPS_CLS_MATCH_CNT_BMC 0xd10c
+#define A_MPS_CLS0_MATCH_CNT_BMC 0xd10c
#define A_MPS_CLS_MATCH_CNT_PROM 0xd110
+#define A_MPS_CLS0_MATCH_CNT_PROM 0xd110
#define A_MPS_CLS_MATCH_CNT_HPROM 0xd114
+#define A_MPS_CLS0_MATCH_CNT_HPROM 0xd114
#define A_MPS_CLS_MISS_CNT 0xd118
+#define A_MPS_CLS0_MISS_CNT 0xd118
+#define A_MPS_CLS1_MATCH_CNT_TCAM 0xd11c
+#define A_MPS_CLS1_MATCH_CNT_HASH 0xd120
+#define A_MPS_CLS1_MATCH_CNT_BCAST 0xd124
+#define A_MPS_CLS1_MATCH_CNT_BMC 0xd128
+#define A_MPS_CLS1_MATCH_CNT_PROM 0xd12c
+#define A_MPS_CLS1_MATCH_CNT_HPROM 0xd130
+#define A_MPS_CLS1_MISS_CNT 0xd134
#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_L 0xd200
#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_H 0xd204
@@ -34428,6 +43196,15 @@
#define V_CLSTRCVF(x) ((x) << S_CLSTRCVF)
#define G_CLSTRCVF(x) (((x) >> S_CLSTRCVF) & M_CLSTRCVF)
+#define S_T7_CLSTRCMATCH 23
+#define V_T7_CLSTRCMATCH(x) ((x) << S_T7_CLSTRCMATCH)
+#define F_T7_CLSTRCMATCH V_T7_CLSTRCMATCH(1U)
+
+#define S_T7_CLSTRCINDEX 12
+#define M_T7_CLSTRCINDEX 0x7ffU
+#define V_T7_CLSTRCINDEX(x) ((x) << S_T7_CLSTRCINDEX)
+#define G_T7_CLSTRCINDEX(x) (((x) >> S_T7_CLSTRCINDEX) & M_T7_CLSTRCINDEX)
+
#define A_MPS_CLS_VLAN_TABLE 0xdfc0
#define S_VLAN_MASK 16
@@ -34536,24 +43313,6 @@
#define V_T6_SRAM_VLD(x) ((x) << S_T6_SRAM_VLD)
#define F_T6_SRAM_VLD V_T6_SRAM_VLD(1U)
-#define S_T6_REPLICATE 12
-#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
-#define F_T6_REPLICATE V_T6_REPLICATE(1U)
-
-#define S_T6_PF 9
-#define M_T6_PF 0x7U
-#define V_T6_PF(x) ((x) << S_T6_PF)
-#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
-
-#define S_T6_VF_VALID 8
-#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
-#define F_T6_VF_VALID V_T6_VF_VALID(1U)
-
-#define S_T6_VF 0
-#define M_T6_VF 0xffU
-#define V_T6_VF(x) ((x) << S_T6_VF)
-#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
-
#define A_MPS_CLS_SRAM_H 0xe004
#define S_MACPARITY1 9
@@ -34580,6 +43339,41 @@
#define V_MACPARITY2(x) ((x) << S_MACPARITY2)
#define F_MACPARITY2 V_MACPARITY2(1U)
+#define S_SRAMWRN 31
+#define V_SRAMWRN(x) ((x) << S_SRAMWRN)
+#define F_SRAMWRN V_SRAMWRN(1U)
+
+#define S_SRAMSPARE 27
+#define M_SRAMSPARE 0xfU
+#define V_SRAMSPARE(x) ((x) << S_SRAMSPARE)
+#define G_SRAMSPARE(x) (((x) >> S_SRAMSPARE) & M_SRAMSPARE)
+
+#define S_SRAMINDEX 16
+#define M_SRAMINDEX 0x7ffU
+#define V_SRAMINDEX(x) ((x) << S_SRAMINDEX)
+#define G_SRAMINDEX(x) (((x) >> S_SRAMINDEX) & M_SRAMINDEX)
+
+#define A_MPS_CLS_HASH_TCAM_CTL 0xe008
+
+#define S_T7_CTLCMDTYPE 15
+#define V_T7_CTLCMDTYPE(x) ((x) << S_T7_CTLCMDTYPE)
+#define F_T7_CTLCMDTYPE V_T7_CTLCMDTYPE(1U)
+
+#define S_T7_CTLXYBITSEL 12
+#define V_T7_CTLXYBITSEL(x) ((x) << S_T7_CTLXYBITSEL)
+#define F_T7_CTLXYBITSEL V_T7_CTLXYBITSEL(1U)
+
+#define S_T7_CTLTCAMINDEX 0
+#define M_T7_CTLTCAMINDEX 0x1ffU
+#define V_T7_CTLTCAMINDEX(x) ((x) << S_T7_CTLTCAMINDEX)
+#define G_T7_CTLTCAMINDEX(x) (((x) >> S_T7_CTLTCAMINDEX) & M_T7_CTLTCAMINDEX)
+
+#define A_MPS_CLS_HASH_TCAM_DATA 0xe00c
+
+#define S_LKPTYPE 24
+#define V_LKPTYPE(x) ((x) << S_LKPTYPE)
+#define F_LKPTYPE V_LKPTYPE(1U)
+
#define A_MPS_CLS_TCAM_Y_L 0xf000
#define A_MPS_CLS_TCAM_DATA0 0xf000
#define A_MPS_CLS_TCAM_Y_H 0xf004
@@ -34648,6 +43442,16 @@
#define V_DATAVIDH1(x) ((x) << S_DATAVIDH1)
#define G_DATAVIDH1(x) (((x) >> S_DATAVIDH1) & M_DATAVIDH1)
+#define S_T7_CTLTCAMSEL 26
+#define M_T7_CTLTCAMSEL 0x3U
+#define V_T7_CTLTCAMSEL(x) ((x) << S_T7_CTLTCAMSEL)
+#define G_T7_CTLTCAMSEL(x) (((x) >> S_T7_CTLTCAMSEL) & M_T7_CTLTCAMSEL)
+
+#define S_T7_1_CTLTCAMINDEX 17
+#define M_T7_1_CTLTCAMINDEX 0x1ffU
+#define V_T7_1_CTLTCAMINDEX(x) ((x) << S_T7_1_CTLTCAMINDEX)
+#define G_T7_1_CTLTCAMINDEX(x) (((x) >> S_T7_1_CTLTCAMINDEX) & M_T7_1_CTLTCAMINDEX)
+
#define A_MPS_CLS_TCAM_X_H 0xf00c
#define S_TCAMXH 0
@@ -34656,11 +43460,47 @@
#define G_TCAMXH(x) (((x) >> S_TCAMXH) & M_TCAMXH)
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID0 0xf010
+#define A_MPS_CLS_TCAM0_RDATA0_REQ_ID0 0xf010
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID0 0xf014
+#define A_MPS_CLS_TCAM0_RDATA1_REQ_ID0 0xf014
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM0_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM0_RDATA0_REQ_ID1 0xf01c
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID1 0xf020
+#define A_MPS_CLS_TCAM0_RDATA1_REQ_ID1 0xf020
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID1 0xf024
+#define A_MPS_CLS_TCAM0_RDATA2_REQ_ID1 0xf024
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID1 0xf028
+#define A_MPS_CLS_TCAM1_RDATA0_REQ_ID0 0xf028
+#define A_MPS_CLS_TCAM1_RDATA1_REQ_ID0 0xf02c
+#define A_MPS_CLS_TCAM1_RDATA2_REQ_ID0 0xf030
+#define A_MPS_CLS_TCAM1_RDATA0_REQ_ID1 0xf034
+#define A_MPS_CLS_TCAM1_RDATA1_REQ_ID1 0xf038
+#define A_MPS_CLS_TCAM1_RDATA2_REQ_ID1 0xf03c
+#define A_MPS_CLS_TCAM0_MASK_REG0 0xf040
+#define A_MPS_CLS_TCAM0_MASK_REG1 0xf044
+#define A_MPS_CLS_TCAM0_MASK_REG2 0xf048
+
+#define S_MASK_0_2 0
+#define M_MASK_0_2 0xffffU
+#define V_MASK_0_2(x) ((x) << S_MASK_0_2)
+#define G_MASK_0_2(x) (((x) >> S_MASK_0_2) & M_MASK_0_2)
+
+#define A_MPS_CLS_TCAM1_MASK_REG0 0xf04c
+#define A_MPS_CLS_TCAM1_MASK_REG1 0xf050
+#define A_MPS_CLS_TCAM1_MASK_REG2 0xf054
+
+#define S_MASK_1_2 0
+#define M_MASK_1_2 0xffffU
+#define V_MASK_1_2(x) ((x) << S_MASK_1_2)
+#define G_MASK_1_2(x) (((x) >> S_MASK_1_2) & M_MASK_1_2)
+
+#define A_MPS_CLS_TCAM_BIST_CTRL 0xf058
+#define A_MPS_CLS_TCAM_BIST_CB_PASS 0xf05c
+#define A_MPS_CLS_TCAM_BIST_CB_BUSY 0xf060
+#define A_MPS_CLS_TCAM2_MASK_REG0 0xf064
+#define A_MPS_CLS_TCAM2_MASK_REG1 0xf068
+#define A_MPS_CLS_TCAM2_MASK_REG2 0xf06c
#define A_MPS_RX_CTL 0x11000
#define S_FILT_VLAN_SEL 17
@@ -34686,6 +43526,14 @@
#define V_SNF(x) ((x) << S_SNF)
#define G_SNF(x) (((x) >> S_SNF) & M_SNF)
+#define S_HASH_TCAM_EN 19
+#define V_HASH_TCAM_EN(x) ((x) << S_HASH_TCAM_EN)
+#define F_HASH_TCAM_EN V_HASH_TCAM_EN(1U)
+
+#define S_SND_ORG_PFVF 18
+#define V_SND_ORG_PFVF(x) ((x) << S_SND_ORG_PFVF)
+#define F_SND_ORG_PFVF V_SND_ORG_PFVF(1U)
+
#define A_MPS_RX_PORT_MUX_CTL 0x11004
#define S_CTL_P3 12
@@ -34877,6 +43725,11 @@
#define V_THRESH(x) ((x) << S_THRESH)
#define G_THRESH(x) (((x) >> S_THRESH) & M_THRESH)
+#define S_T7_THRESH 0
+#define M_T7_THRESH 0xfffU
+#define V_T7_THRESH(x) ((x) << S_T7_THRESH)
+#define G_T7_THRESH(x) (((x) >> S_T7_THRESH) & M_T7_THRESH)
+
#define A_MPS_RX_LPBK_BP1 0x11060
#define A_MPS_RX_LPBK_BP2 0x11064
#define A_MPS_RX_LPBK_BP3 0x11068
@@ -34888,6 +43741,12 @@
#define G_GAP(x) (((x) >> S_GAP) & M_GAP)
#define A_MPS_RX_CHMN_CNT 0x11070
+#define A_MPS_CTL_STAT 0x11070
+
+#define S_T7_CTL 0
+#define V_T7_CTL(x) ((x) << S_T7_CTL)
+#define F_T7_CTL V_T7_CTL(1U)
+
#define A_MPS_RX_PERR_INT_CAUSE 0x11074
#define S_FF 23
@@ -34990,18 +43849,54 @@
#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
-#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+#define S_MAC_IN_FIFO_768B 30
+#define V_MAC_IN_FIFO_768B(x) ((x) << S_MAC_IN_FIFO_768B)
+#define F_MAC_IN_FIFO_768B V_MAC_IN_FIFO_768B(1U)
-#define S_T6_INT_ERR_INT 24
-#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
-#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
+#define S_T7_1_INT_ERR_INT 29
+#define V_T7_1_INT_ERR_INT(x) ((x) << S_T7_1_INT_ERR_INT)
+#define F_T7_1_INT_ERR_INT V_T7_1_INT_ERR_INT(1U)
-#define A_MPS_RX_PERR_ENABLE 0x1107c
+#define S_FLOP_PERR 28
+#define V_FLOP_PERR(x) ((x) << S_FLOP_PERR)
+#define F_FLOP_PERR V_FLOP_PERR(1U)
-#define S_T6_INT_ERR_INT 24
-#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
-#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
+#define S_RPLC_MAP 13
+#define M_RPLC_MAP 0x1fU
+#define V_RPLC_MAP(x) ((x) << S_RPLC_MAP)
+#define G_RPLC_MAP(x) (((x) >> S_RPLC_MAP) & M_RPLC_MAP)
+
+#define S_TKN_RUNT_DROP_FIFO 12
+#define V_TKN_RUNT_DROP_FIFO(x) ((x) << S_TKN_RUNT_DROP_FIFO)
+#define F_TKN_RUNT_DROP_FIFO V_TKN_RUNT_DROP_FIFO(1U)
+
+#define S_T7_PPM3 9
+#define M_T7_PPM3 0x7U
+#define V_T7_PPM3(x) ((x) << S_T7_PPM3)
+#define G_T7_PPM3(x) (((x) >> S_T7_PPM3) & M_T7_PPM3)
+#define S_T7_PPM2 6
+#define M_T7_PPM2 0x7U
+#define V_T7_PPM2(x) ((x) << S_T7_PPM2)
+#define G_T7_PPM2(x) (((x) >> S_T7_PPM2) & M_T7_PPM2)
+
+#define S_T7_PPM1 3
+#define M_T7_PPM1 0x7U
+#define V_T7_PPM1(x) ((x) << S_T7_PPM1)
+#define G_T7_PPM1(x) (((x) >> S_T7_PPM1) & M_T7_PPM1)
+
+#define S_T7_PPM0 0
+#define M_T7_PPM0 0x7U
+#define V_T7_PPM0(x) ((x) << S_T7_PPM0)
+#define G_T7_PPM0(x) (((x) >> S_T7_PPM0) & M_T7_PPM0)
+
+#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+
+#define S_T7_2_INT_ERR_INT 30
+#define V_T7_2_INT_ERR_INT(x) ((x) << S_T7_2_INT_ERR_INT)
+#define F_T7_2_INT_ERR_INT V_T7_2_INT_ERR_INT(1U)
+
+#define A_MPS_RX_PERR_ENABLE 0x1107c
#define A_MPS_RX_PERR_INJECT 0x11080
#define A_MPS_RX_FUNC_INT_CAUSE 0x11084
@@ -35083,8 +43978,43 @@
#define V_TH_LOW(x) ((x) << S_TH_LOW)
#define G_TH_LOW(x) (((x) >> S_TH_LOW) & M_TH_LOW)
+#define A_MPS_RX_PERR_INT_CAUSE2 0x1108c
+
+#define S_CRYPT2MPS_RX_INTF_FIFO 28
+#define M_CRYPT2MPS_RX_INTF_FIFO 0xfU
+#define V_CRYPT2MPS_RX_INTF_FIFO(x) ((x) << S_CRYPT2MPS_RX_INTF_FIFO)
+#define G_CRYPT2MPS_RX_INTF_FIFO(x) (((x) >> S_CRYPT2MPS_RX_INTF_FIFO) & M_CRYPT2MPS_RX_INTF_FIFO)
+
+#define S_INIC2MPS_TX0_PERR 27
+#define V_INIC2MPS_TX0_PERR(x) ((x) << S_INIC2MPS_TX0_PERR)
+#define F_INIC2MPS_TX0_PERR V_INIC2MPS_TX0_PERR(1U)
+
+#define S_INIC2MPS_TX1_PERR 26
+#define V_INIC2MPS_TX1_PERR(x) ((x) << S_INIC2MPS_TX1_PERR)
+#define F_INIC2MPS_TX1_PERR V_INIC2MPS_TX1_PERR(1U)
+
+#define S_XGMAC2MPS_RX0_PERR 25
+#define V_XGMAC2MPS_RX0_PERR(x) ((x) << S_XGMAC2MPS_RX0_PERR)
+#define F_XGMAC2MPS_RX0_PERR V_XGMAC2MPS_RX0_PERR(1U)
+
+#define S_XGMAC2MPS_RX1_PERR 24
+#define V_XGMAC2MPS_RX1_PERR(x) ((x) << S_XGMAC2MPS_RX1_PERR)
+#define F_XGMAC2MPS_RX1_PERR V_XGMAC2MPS_RX1_PERR(1U)
+
+#define S_MPS2CRYPTO_RX_INTF_FIFO 20
+#define M_MPS2CRYPTO_RX_INTF_FIFO 0xfU
+#define V_MPS2CRYPTO_RX_INTF_FIFO(x) ((x) << S_MPS2CRYPTO_RX_INTF_FIFO)
+#define G_MPS2CRYPTO_RX_INTF_FIFO(x) (((x) >> S_MPS2CRYPTO_RX_INTF_FIFO) & M_MPS2CRYPTO_RX_INTF_FIFO)
+
+#define S_RX_PRE_PROC_PERR 9
+#define M_RX_PRE_PROC_PERR 0x7ffU
+#define V_RX_PRE_PROC_PERR(x) ((x) << S_RX_PRE_PROC_PERR)
+#define G_RX_PRE_PROC_PERR(x) (((x) >> S_RX_PRE_PROC_PERR) & M_RX_PRE_PROC_PERR)
+
#define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
+#define A_MPS_RX_PERR_INT_ENABLE2 0x11090
#define A_MPS_RX_PAUSE_GEN_TH_2 0x11094
+#define A_MPS_RX_PERR_ENABLE2 0x11094
#define A_MPS_RX_PAUSE_GEN_TH_3 0x11098
#define A_MPS_RX_REPL_CTL 0x11098
@@ -35126,10 +44056,13 @@
#define A_MPS_RX_PT_ARB1 0x110ac
#define A_MPS_RX_PT_ARB2 0x110b0
+#define A_T7_MPS_RX_PT_ARB4 0x110b0
#define A_MPS_RX_PT_ARB3 0x110b4
#define A_T6_MPS_PF_OUT_EN 0x110b4
+#define A_T7_MPS_PF_OUT_EN 0x110b4
#define A_MPS_RX_PT_ARB4 0x110b8
#define A_T6_MPS_BMC_MTU 0x110b8
+#define A_T7_MPS_BMC_MTU 0x110b8
#define A_MPS_PF_OUT_EN 0x110bc
#define S_OUTEN 0
@@ -35138,6 +44071,7 @@
#define G_OUTEN(x) (((x) >> S_OUTEN) & M_OUTEN)
#define A_T6_MPS_BMC_PKT_CNT 0x110bc
+#define A_T7_MPS_BMC_PKT_CNT 0x110bc
#define A_MPS_BMC_MTU 0x110c0
#define S_MTU 0
@@ -35146,6 +44080,7 @@
#define G_MTU(x) (((x) >> S_MTU) & M_MTU)
#define A_T6_MPS_BMC_BYTE_CNT 0x110c0
+#define A_T7_MPS_BMC_BYTE_CNT 0x110c0
#define A_MPS_BMC_PKT_CNT 0x110c4
#define A_T6_MPS_PFVF_ATRB_CTL 0x110c4
@@ -35154,6 +44089,7 @@
#define V_T6_PFVF(x) ((x) << S_T6_PFVF)
#define G_T6_PFVF(x) (((x) >> S_T6_PFVF) & M_T6_PFVF)
+#define A_T7_MPS_PFVF_ATRB_CTL 0x110c4
#define A_MPS_BMC_BYTE_CNT 0x110c8
#define A_T6_MPS_PFVF_ATRB 0x110c8
@@ -35161,6 +44097,12 @@
#define V_FULL_FRAME_MODE(x) ((x) << S_FULL_FRAME_MODE)
#define F_FULL_FRAME_MODE V_FULL_FRAME_MODE(1U)
+#define A_T7_MPS_PFVF_ATRB 0x110c8
+
+#define S_EXTRACT_DEL_VLAN 31
+#define V_EXTRACT_DEL_VLAN(x) ((x) << S_EXTRACT_DEL_VLAN)
+#define F_EXTRACT_DEL_VLAN V_EXTRACT_DEL_VLAN(1U)
+
#define A_MPS_PFVF_ATRB_CTL 0x110cc
#define S_RD_WRN 31
@@ -35173,6 +44115,7 @@
#define G_PFVF(x) (((x) >> S_PFVF) & M_PFVF)
#define A_T6_MPS_PFVF_ATRB_FLTR0 0x110cc
+#define A_T7_MPS_PFVF_ATRB_FLTR0 0x110cc
#define A_MPS_PFVF_ATRB 0x110d0
#define S_ATTR_PF 28
@@ -35193,6 +44136,7 @@
#define F_ATTR_MODE V_ATTR_MODE(1U)
#define A_T6_MPS_PFVF_ATRB_FLTR1 0x110d0
+#define A_T7_MPS_PFVF_ATRB_FLTR1 0x110d0
#define A_MPS_PFVF_ATRB_FLTR0 0x110d4
#define S_VLAN_EN 16
@@ -35205,36 +44149,58 @@
#define G_VLAN_ID(x) (((x) >> S_VLAN_ID) & M_VLAN_ID)
#define A_T6_MPS_PFVF_ATRB_FLTR2 0x110d4
+#define A_T7_MPS_PFVF_ATRB_FLTR2 0x110d4
#define A_MPS_PFVF_ATRB_FLTR1 0x110d8
#define A_T6_MPS_PFVF_ATRB_FLTR3 0x110d8
+#define A_T7_MPS_PFVF_ATRB_FLTR3 0x110d8
#define A_MPS_PFVF_ATRB_FLTR2 0x110dc
#define A_T6_MPS_PFVF_ATRB_FLTR4 0x110dc
+#define A_T7_MPS_PFVF_ATRB_FLTR4 0x110dc
#define A_MPS_PFVF_ATRB_FLTR3 0x110e0
#define A_T6_MPS_PFVF_ATRB_FLTR5 0x110e0
+#define A_T7_MPS_PFVF_ATRB_FLTR5 0x110e0
#define A_MPS_PFVF_ATRB_FLTR4 0x110e4
#define A_T6_MPS_PFVF_ATRB_FLTR6 0x110e4
+#define A_T7_MPS_PFVF_ATRB_FLTR6 0x110e4
#define A_MPS_PFVF_ATRB_FLTR5 0x110e8
#define A_T6_MPS_PFVF_ATRB_FLTR7 0x110e8
+#define A_T7_MPS_PFVF_ATRB_FLTR7 0x110e8
#define A_MPS_PFVF_ATRB_FLTR6 0x110ec
#define A_T6_MPS_PFVF_ATRB_FLTR8 0x110ec
+#define A_T7_MPS_PFVF_ATRB_FLTR8 0x110ec
#define A_MPS_PFVF_ATRB_FLTR7 0x110f0
#define A_T6_MPS_PFVF_ATRB_FLTR9 0x110f0
+#define A_T7_MPS_PFVF_ATRB_FLTR9 0x110f0
#define A_MPS_PFVF_ATRB_FLTR8 0x110f4
#define A_T6_MPS_PFVF_ATRB_FLTR10 0x110f4
+#define A_T7_MPS_PFVF_ATRB_FLTR10 0x110f4
#define A_MPS_PFVF_ATRB_FLTR9 0x110f8
#define A_T6_MPS_PFVF_ATRB_FLTR11 0x110f8
+#define A_T7_MPS_PFVF_ATRB_FLTR11 0x110f8
#define A_MPS_PFVF_ATRB_FLTR10 0x110fc
#define A_T6_MPS_PFVF_ATRB_FLTR12 0x110fc
+#define A_T7_MPS_PFVF_ATRB_FLTR12 0x110fc
#define A_MPS_PFVF_ATRB_FLTR11 0x11100
#define A_T6_MPS_PFVF_ATRB_FLTR13 0x11100
+#define A_T7_MPS_PFVF_ATRB_FLTR13 0x11100
#define A_MPS_PFVF_ATRB_FLTR12 0x11104
#define A_T6_MPS_PFVF_ATRB_FLTR14 0x11104
+#define A_T7_MPS_PFVF_ATRB_FLTR14 0x11104
#define A_MPS_PFVF_ATRB_FLTR13 0x11108
#define A_T6_MPS_PFVF_ATRB_FLTR15 0x11108
+#define A_T7_MPS_PFVF_ATRB_FLTR15 0x11108
#define A_MPS_PFVF_ATRB_FLTR14 0x1110c
#define A_T6_MPS_RPLC_MAP_CTL 0x1110c
+#define A_T7_MPS_RPLC_MAP_CTL 0x1110c
+
+#define S_T7_RPLC_MAP_ADDR 0
+#define M_T7_RPLC_MAP_ADDR 0xfffU
+#define V_T7_RPLC_MAP_ADDR(x) ((x) << S_T7_RPLC_MAP_ADDR)
+#define G_T7_RPLC_MAP_ADDR(x) (((x) >> S_T7_RPLC_MAP_ADDR) & M_T7_RPLC_MAP_ADDR)
+
#define A_MPS_PFVF_ATRB_FLTR15 0x11110
#define A_T6_MPS_PF_RPLCT_MAP 0x11110
+#define A_T7_MPS_PF_RPLCT_MAP 0x11110
#define A_MPS_RPLC_MAP_CTL 0x11114
#define S_RPLC_MAP_ADDR 0
@@ -35243,6 +44209,7 @@
#define G_RPLC_MAP_ADDR(x) (((x) >> S_RPLC_MAP_ADDR) & M_RPLC_MAP_ADDR)
#define A_T6_MPS_VF_RPLCT_MAP0 0x11114
+#define A_T7_MPS_VF_RPLCT_MAP0 0x11114
#define A_MPS_PF_RPLCT_MAP 0x11118
#define S_PF_EN 0
@@ -35251,10 +44218,13 @@
#define G_PF_EN(x) (((x) >> S_PF_EN) & M_PF_EN)
#define A_T6_MPS_VF_RPLCT_MAP1 0x11118
+#define A_T7_MPS_VF_RPLCT_MAP1 0x11118
#define A_MPS_VF_RPLCT_MAP0 0x1111c
#define A_T6_MPS_VF_RPLCT_MAP2 0x1111c
+#define A_T7_MPS_VF_RPLCT_MAP2 0x1111c
#define A_MPS_VF_RPLCT_MAP1 0x11120
#define A_T6_MPS_VF_RPLCT_MAP3 0x11120
+#define A_T7_MPS_VF_RPLCT_MAP3 0x11120
#define A_MPS_VF_RPLCT_MAP2 0x11124
#define A_MPS_VF_RPLCT_MAP3 0x11128
#define A_MPS_MEM_DBG_CTL 0x1112c
@@ -35629,9 +44599,13 @@
#define V_CONG_TH(x) ((x) << S_CONG_TH)
#define G_CONG_TH(x) (((x) >> S_CONG_TH) & M_CONG_TH)
+#define A_MPS_RX_LPBK_BG_PG_CNT2 0x11220
#define A_MPS_RX_CONGESTION_THRESHOLD_BG1 0x11224
+#define A_MPS_RX_LPBK_BG_PG_CNT3 0x11224
#define A_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11228
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG0 0x11228
#define A_MPS_RX_CONGESTION_THRESHOLD_BG3 0x1122c
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG1 0x1122c
#define A_MPS_RX_GRE_PROT_TYPE 0x11230
#define S_NVGRE_EN 9
@@ -35647,6 +44621,7 @@
#define V_GRE(x) ((x) << S_GRE)
#define G_GRE(x) (((x) >> S_GRE) & M_GRE)
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11230
#define A_MPS_RX_VXLAN_TYPE 0x11234
#define S_VXLAN_EN 16
@@ -35658,6 +44633,7 @@
#define V_VXLAN(x) ((x) << S_VXLAN)
#define G_VXLAN(x) (((x) >> S_VXLAN) & M_VXLAN)
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG3 0x11234
#define A_MPS_RX_GENEVE_TYPE 0x11238
#define S_GENEVE_EN 16
@@ -35669,12 +44645,14 @@
#define V_GENEVE(x) ((x) << S_GENEVE)
#define G_GENEVE(x) (((x) >> S_GENEVE) & M_GENEVE)
+#define A_T7_MPS_RX_GRE_PROT_TYPE 0x11238
#define A_MPS_RX_INNER_HDR_IVLAN 0x1123c
#define S_T6_IVLAN_EN 16
#define V_T6_IVLAN_EN(x) ((x) << S_T6_IVLAN_EN)
#define F_T6_IVLAN_EN V_T6_IVLAN_EN(1U)
+#define A_T7_MPS_RX_VXLAN_TYPE 0x1123c
#define A_MPS_RX_ENCAP_NVGRE 0x11240
#define S_ETYPE_EN 16
@@ -35686,13 +44664,9 @@
#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
+#define A_T7_MPS_RX_GENEVE_TYPE 0x11240
#define A_MPS_RX_ENCAP_GENEVE 0x11244
-
-#define S_T6_ETYPE 0
-#define M_T6_ETYPE 0xffffU
-#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
-#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
-
+#define A_T7_MPS_RX_INNER_HDR_IVLAN 0x11244
#define A_MPS_RX_TCP 0x11248
#define S_PROT_TYPE_EN 8
@@ -35704,8 +44678,11 @@
#define V_PROT_TYPE(x) ((x) << S_PROT_TYPE)
#define G_PROT_TYPE(x) (((x) >> S_PROT_TYPE) & M_PROT_TYPE)
+#define A_T7_MPS_RX_ENCAP_NVGRE 0x11248
#define A_MPS_RX_UDP 0x1124c
+#define A_T7_MPS_RX_ENCAP_GENEVE 0x1124c
#define A_MPS_RX_PAUSE 0x11250
+#define A_T7_MPS_RX_TCP 0x11250
#define A_MPS_RX_LENGTH 0x11254
#define S_SAP_VALUE 16
@@ -35718,6 +44695,7 @@
#define V_LENGTH_ETYPE(x) ((x) << S_LENGTH_ETYPE)
#define G_LENGTH_ETYPE(x) (((x) >> S_LENGTH_ETYPE) & M_LENGTH_ETYPE)
+#define A_T7_MPS_RX_UDP 0x11254
#define A_MPS_RX_CTL_ORG 0x11258
#define S_CTL_VALUE 24
@@ -35730,6 +44708,7 @@
#define V_ORG_VALUE(x) ((x) << S_ORG_VALUE)
#define G_ORG_VALUE(x) (((x) >> S_ORG_VALUE) & M_ORG_VALUE)
+#define A_T7_MPS_RX_PAUSE 0x11258
#define A_MPS_RX_IPV4 0x1125c
#define S_ETYPE_IPV4 0
@@ -35737,6 +44716,7 @@
#define V_ETYPE_IPV4(x) ((x) << S_ETYPE_IPV4)
#define G_ETYPE_IPV4(x) (((x) >> S_ETYPE_IPV4) & M_ETYPE_IPV4)
+#define A_T7_MPS_RX_LENGTH 0x1125c
#define A_MPS_RX_IPV6 0x11260
#define S_ETYPE_IPV6 0
@@ -35744,6 +44724,7 @@
#define V_ETYPE_IPV6(x) ((x) << S_ETYPE_IPV6)
#define G_ETYPE_IPV6(x) (((x) >> S_ETYPE_IPV6) & M_ETYPE_IPV6)
+#define A_T7_MPS_RX_CTL_ORG 0x11260
#define A_MPS_RX_TTL 0x11264
#define S_TTL_IPV4 10
@@ -35764,6 +44745,7 @@
#define V_TTL_CHK_EN_IPV6(x) ((x) << S_TTL_CHK_EN_IPV6)
#define F_TTL_CHK_EN_IPV6 V_TTL_CHK_EN_IPV6(1U)
+#define A_T7_MPS_RX_IPV4 0x11264
#define A_MPS_RX_DEFAULT_VNI 0x11268
#define S_VNI 0
@@ -35771,6 +44753,7 @@
#define V_VNI(x) ((x) << S_VNI)
#define G_VNI(x) (((x) >> S_VNI) & M_VNI)
+#define A_T7_MPS_RX_IPV6 0x11268
#define A_MPS_RX_PRS_CTL 0x1126c
#define S_CTL_CHK_EN 28
@@ -35821,6 +44804,7 @@
#define V_DIP_EN(x) ((x) << S_DIP_EN)
#define F_DIP_EN V_DIP_EN(1U)
+#define A_T7_MPS_RX_TTL 0x1126c
#define A_MPS_RX_PRS_CTL_2 0x11270
#define S_EN_UDP_CSUM_CHK 4
@@ -35843,7 +44827,9 @@
#define V_T6_IPV6_UDP_CSUM_COMPAT(x) ((x) << S_T6_IPV6_UDP_CSUM_COMPAT)
#define F_T6_IPV6_UDP_CSUM_COMPAT V_T6_IPV6_UDP_CSUM_COMPAT(1U)
+#define A_T7_MPS_RX_DEFAULT_VNI 0x11270
#define A_MPS_RX_MPS2NCSI_CNT 0x11274
+#define A_T7_MPS_RX_PRS_CTL 0x11274
#define A_MPS_RX_MAX_TNL_HDR_LEN 0x11278
#define S_T6_LEN 0
@@ -35851,38 +44837,222 @@
#define V_T6_LEN(x) ((x) << S_T6_LEN)
#define G_T6_LEN(x) (((x) >> S_T6_LEN) & M_T6_LEN)
+#define A_T7_MPS_RX_PRS_CTL_2 0x11278
+
+#define S_IP_EXT_HDR_EN 5
+#define V_IP_EXT_HDR_EN(x) ((x) << S_IP_EXT_HDR_EN)
+#define F_IP_EXT_HDR_EN V_IP_EXT_HDR_EN(1U)
+
#define A_MPS_RX_PAUSE_DA_H 0x1127c
+#define A_T7_MPS_RX_MPS2NCSI_CNT 0x1127c
#define A_MPS_RX_PAUSE_DA_L 0x11280
+#define A_T7_MPS_RX_MAX_TNL_HDR_LEN 0x11280
+
+#define S_MPS_TNL_HDR_LEN_MODE 9
+#define V_MPS_TNL_HDR_LEN_MODE(x) ((x) << S_MPS_TNL_HDR_LEN_MODE)
+#define F_MPS_TNL_HDR_LEN_MODE V_MPS_TNL_HDR_LEN_MODE(1U)
+
+#define S_MPS_MAX_TNL_HDR_LEN 0
+#define M_MPS_MAX_TNL_HDR_LEN 0x1ffU
+#define V_MPS_MAX_TNL_HDR_LEN(x) ((x) << S_MPS_MAX_TNL_HDR_LEN)
+#define G_MPS_MAX_TNL_HDR_LEN(x) (((x) >> S_MPS_MAX_TNL_HDR_LEN) & M_MPS_MAX_TNL_HDR_LEN)
+
#define A_MPS_RX_CNT_NVGRE_PKT_MAC0 0x11284
+#define A_T7_MPS_RX_PAUSE_DA_H 0x11284
#define A_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11288
+#define A_T7_MPS_RX_PAUSE_DA_L 0x11288
#define A_MPS_RX_CNT_GENEVE_PKT_MAC0 0x1128c
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_MAC0 0x1128c
#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11290
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11290
#define A_MPS_RX_CNT_NVGRE_PKT_MAC1 0x11294
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_MAC0 0x11294
#define A_MPS_RX_CNT_VXLAN_PKT_MAC1 0x11298
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11298
#define A_MPS_RX_CNT_GENEVE_PKT_MAC1 0x1129c
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_MAC1 0x1129c
#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_MAC1 0x112a0
#define A_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112a4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_MAC1 0x112a4
#define A_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112a8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a8
#define A_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112ac
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112ac
#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112b0
#define A_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112b4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112b4
#define A_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112b8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b8
#define A_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112bc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112bc
#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112c0
#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112c4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112c4
#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112c8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c8
#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112cc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112cc
#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112d0
#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112d4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112d4
#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112d8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d8
#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112dc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112dc
#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112e0
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112e4
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e8
+#define A_MPS_RX_ESP 0x112ec
+#define A_MPS_EN_LPBK_BLK_SNDR 0x112f0
+
+#define S_EN_CH3 3
+#define V_EN_CH3(x) ((x) << S_EN_CH3)
+#define F_EN_CH3 V_EN_CH3(1U)
+
+#define S_EN_CH2 2
+#define V_EN_CH2(x) ((x) << S_EN_CH2)
+#define F_EN_CH2 V_EN_CH2(1U)
+
+#define S_EN_CH1 1
+#define V_EN_CH1(x) ((x) << S_EN_CH1)
+#define F_EN_CH1 V_EN_CH1(1U)
+
+#define S_EN_CH0 0
+#define V_EN_CH0(x) ((x) << S_EN_CH0)
+#define F_EN_CH0 V_EN_CH0(1U)
+
#define A_MPS_VF_RPLCT_MAP4 0x11300
#define A_MPS_VF_RPLCT_MAP5 0x11304
#define A_MPS_VF_RPLCT_MAP6 0x11308
#define A_MPS_VF_RPLCT_MAP7 0x1130c
+#define A_MPS_RX_PERR_INT_CAUSE3 0x11310
+#define A_MPS_RX_PERR_INT_ENABLE3 0x11314
+#define A_MPS_RX_PERR_ENABLE3 0x11318
+#define A_MPS_RX_PERR_INT_CAUSE4 0x1131c
+
+#define S_CLS 20
+#define M_CLS 0x3fU
+#define V_CLS(x) ((x) << S_CLS)
+#define G_CLS(x) (((x) >> S_CLS) & M_CLS)
+
+#define S_RX_PRE_PROC 16
+#define M_RX_PRE_PROC 0xfU
+#define V_RX_PRE_PROC(x) ((x) << S_RX_PRE_PROC)
+#define G_RX_PRE_PROC(x) (((x) >> S_RX_PRE_PROC) & M_RX_PRE_PROC)
+
+#define S_PPROC3 12
+#define M_PPROC3 0xfU
+#define V_PPROC3(x) ((x) << S_PPROC3)
+#define G_PPROC3(x) (((x) >> S_PPROC3) & M_PPROC3)
+
+#define S_PPROC2 8
+#define M_PPROC2 0xfU
+#define V_PPROC2(x) ((x) << S_PPROC2)
+#define G_PPROC2(x) (((x) >> S_PPROC2) & M_PPROC2)
+
+#define S_PPROC1 4
+#define M_PPROC1 0xfU
+#define V_PPROC1(x) ((x) << S_PPROC1)
+#define G_PPROC1(x) (((x) >> S_PPROC1) & M_PPROC1)
+
+#define S_PPROC0 0
+#define M_PPROC0 0xfU
+#define V_PPROC0(x) ((x) << S_PPROC0)
+#define G_PPROC0(x) (((x) >> S_PPROC0) & M_PPROC0)
+
+#define A_MPS_RX_PERR_INT_ENABLE4 0x11320
+#define A_MPS_RX_PERR_ENABLE4 0x11324
+#define A_MPS_RX_PERR_INT_CAUSE5 0x11328
+
+#define S_MPS2CRYP_RX_FIFO 26
+#define M_MPS2CRYP_RX_FIFO 0xfU
+#define V_MPS2CRYP_RX_FIFO(x) ((x) << S_MPS2CRYP_RX_FIFO)
+#define G_MPS2CRYP_RX_FIFO(x) (((x) >> S_MPS2CRYP_RX_FIFO) & M_MPS2CRYP_RX_FIFO)
+
+#define S_RX_OUT 20
+#define M_RX_OUT 0x3fU
+#define V_RX_OUT(x) ((x) << S_RX_OUT)
+#define G_RX_OUT(x) (((x) >> S_RX_OUT) & M_RX_OUT)
+
+#define S_MEM_WRAP 0
+#define M_MEM_WRAP 0xfffffU
+#define V_MEM_WRAP(x) ((x) << S_MEM_WRAP)
+#define G_MEM_WRAP(x) (((x) >> S_MEM_WRAP) & M_MEM_WRAP)
+
+#define A_MPS_RX_PERR_INT_ENABLE5 0x1132c
+#define A_MPS_RX_PERR_ENABLE5 0x11330
+#define A_MPS_RX_PERR_INT_CAUSE6 0x11334
+
+#define S_MPS_RX_MEM_WRAP 0
+#define M_MPS_RX_MEM_WRAP 0x1ffffffU
+#define V_MPS_RX_MEM_WRAP(x) ((x) << S_MPS_RX_MEM_WRAP)
+#define G_MPS_RX_MEM_WRAP(x) (((x) >> S_MPS_RX_MEM_WRAP) & M_MPS_RX_MEM_WRAP)
+
+#define A_MPS_RX_PERR_INT_ENABLE6 0x11338
+#define A_MPS_RX_PERR_ENABLE6 0x1133c
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC2 0x11408
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC2 0x1140c
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC2 0x11410
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC2 0x11414
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC3 0x11418
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC3 0x1141c
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC3 0x11420
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC3 0x11424
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK2 0x11428
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK2 0x1142c
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK2 0x11430
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK2 0x11434
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK3 0x11438
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK3 0x1143c
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK3 0x11440
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK3 0x11444
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP2 0x11448
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP2 0x1144c
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP2 0x11450
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP2 0x11454
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP3 0x11458
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP3 0x1145c
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP3 0x11460
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP3 0x11464
+#define A_T7_MPS_RX_PT_ARB2 0x11468
+#define A_T7_MPS_RX_PT_ARB3 0x1146c
#define A_MPS_CLS_DIPIPV4_ID_TABLE 0x12000
+#define A_MPS_CLS_DIP_ID_TABLE_CTL 0x12000
+
+#define S_DIP_VLD 12
+#define V_DIP_VLD(x) ((x) << S_DIP_VLD)
+#define F_DIP_VLD V_DIP_VLD(1U)
+
+#define S_DIP_TYPE 11
+#define V_DIP_TYPE(x) ((x) << S_DIP_TYPE)
+#define F_DIP_TYPE V_DIP_TYPE(1U)
+
+#define S_DIP_WRN 10
+#define V_DIP_WRN(x) ((x) << S_DIP_WRN)
+#define F_DIP_WRN V_DIP_WRN(1U)
+
+#define S_DIP_SEG 8
+#define M_DIP_SEG 0x3U
+#define V_DIP_SEG(x) ((x) << S_DIP_SEG)
+#define G_DIP_SEG(x) (((x) >> S_DIP_SEG) & M_DIP_SEG)
+
+#define S_DIP_TBL_RSVD1 5
+#define M_DIP_TBL_RSVD1 0x7U
+#define V_DIP_TBL_RSVD1(x) ((x) << S_DIP_TBL_RSVD1)
+#define G_DIP_TBL_RSVD1(x) (((x) >> S_DIP_TBL_RSVD1) & M_DIP_TBL_RSVD1)
+
+#define S_DIP_TBL_ADDR 0
+#define M_DIP_TBL_ADDR 0x1fU
+#define V_DIP_TBL_ADDR(x) ((x) << S_DIP_TBL_ADDR)
+#define G_DIP_TBL_ADDR(x) (((x) >> S_DIP_TBL_ADDR) & M_DIP_TBL_ADDR)
+
#define A_MPS_CLS_DIPIPV4_MASK_TABLE 0x12004
+#define A_MPS_CLS_DIP_ID_TABLE_DATA 0x12004
#define A_MPS_CLS_DIPIPV6ID_0_TABLE 0x12020
#define A_MPS_CLS_DIPIPV6ID_1_TABLE 0x12024
#define A_MPS_CLS_DIPIPV6ID_2_TABLE 0x12028
@@ -35892,6 +45062,226 @@
#define A_MPS_CLS_DIPIPV6MASK_2_TABLE 0x12038
#define A_MPS_CLS_DIPIPV6MASK_3_TABLE 0x1203c
#define A_MPS_RX_HASH_LKP_TABLE 0x12060
+#define A_MPS_CLS_DROP_DMAC0_L 0x12070
+#define A_MPS_CLS_DROP_DMAC0_H 0x12074
+
+#define S_DMAC 0
+#define M_DMAC 0xffffU
+#define V_DMAC(x) ((x) << S_DMAC)
+#define G_DMAC(x) (((x) >> S_DMAC) & M_DMAC)
+
+#define A_MPS_CLS_DROP_DMAC1_L 0x12078
+#define A_MPS_CLS_DROP_DMAC1_H 0x1207c
+#define A_MPS_CLS_DROP_DMAC2_L 0x12080
+#define A_MPS_CLS_DROP_DMAC2_H 0x12084
+#define A_MPS_CLS_DROP_DMAC3_L 0x12088
+#define A_MPS_CLS_DROP_DMAC3_H 0x1208c
+#define A_MPS_CLS_DROP_DMAC4_L 0x12090
+#define A_MPS_CLS_DROP_DMAC4_H 0x12094
+#define A_MPS_CLS_DROP_DMAC5_L 0x12098
+#define A_MPS_CLS_DROP_DMAC5_H 0x1209c
+#define A_MPS_CLS_DROP_DMAC6_L 0x120a0
+#define A_MPS_CLS_DROP_DMAC6_H 0x120a4
+#define A_MPS_CLS_DROP_DMAC7_L 0x120a8
+#define A_MPS_CLS_DROP_DMAC7_H 0x120ac
+#define A_MPS_CLS_DROP_DMAC8_L 0x120b0
+#define A_MPS_CLS_DROP_DMAC8_H 0x120b4
+#define A_MPS_CLS_DROP_DMAC9_L 0x120b8
+#define A_MPS_CLS_DROP_DMAC9_H 0x120bc
+#define A_MPS_CLS_DROP_DMAC10_L 0x120c0
+#define A_MPS_CLS_DROP_DMAC10_H 0x120c4
+#define A_MPS_CLS_DROP_DMAC11_L 0x120c8
+#define A_MPS_CLS_DROP_DMAC11_H 0x120cc
+#define A_MPS_CLS_DROP_DMAC12_L 0x120d0
+#define A_MPS_CLS_DROP_DMAC12_H 0x120d4
+#define A_MPS_CLS_DROP_DMAC13_L 0x120d8
+#define A_MPS_CLS_DROP_DMAC13_H 0x120dc
+#define A_MPS_CLS_DROP_DMAC14_L 0x120e0
+#define A_MPS_CLS_DROP_DMAC14_H 0x120e4
+#define A_MPS_CLS_DROP_DMAC15_L 0x120e8
+#define A_MPS_CLS_DROP_DMAC15_H 0x120ec
+#define A_MPS_RX_ENCAP_VXLAN 0x120f0
+#define A_MPS_RX_INT_VXLAN 0x120f4
+
+#define S_INT_TYPE_EN 16
+#define V_INT_TYPE_EN(x) ((x) << S_INT_TYPE_EN)
+#define F_INT_TYPE_EN V_INT_TYPE_EN(1U)
+
+#define S_INT_TYPE 0
+#define M_INT_TYPE 0xffffU
+#define V_INT_TYPE(x) ((x) << S_INT_TYPE)
+#define G_INT_TYPE(x) (((x) >> S_INT_TYPE) & M_INT_TYPE)
+
+#define A_MPS_RX_INT_GENEVE 0x120f8
+#define A_MPS_PFVF_ATRB2 0x120fc
+
+#define S_EXTRACT_DEL_ENCAP 31
+#define V_EXTRACT_DEL_ENCAP(x) ((x) << S_EXTRACT_DEL_ENCAP)
+#define F_EXTRACT_DEL_ENCAP V_EXTRACT_DEL_ENCAP(1U)
+
+#define A_MPS_RX_TRANS_ENCAP_FLTR_CTL 0x12100
+
+#define S_TIMEOUT_FLT_CLR_EN 8
+#define V_TIMEOUT_FLT_CLR_EN(x) ((x) << S_TIMEOUT_FLT_CLR_EN)
+#define F_TIMEOUT_FLT_CLR_EN V_TIMEOUT_FLT_CLR_EN(1U)
+
+#define S_FLTR_TIMOUT_VAL 0
+#define M_FLTR_TIMOUT_VAL 0xffU
+#define V_FLTR_TIMOUT_VAL(x) ((x) << S_FLTR_TIMOUT_VAL)
+#define G_FLTR_TIMOUT_VAL(x) (((x) >> S_FLTR_TIMOUT_VAL) & M_FLTR_TIMOUT_VAL)
+
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_0 0x12104
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_1 0x12108
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_2 0x1210c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_3 0x12110
+#define A_MPS_RX_PAUSE_GEN_TH_0_4 0x12114
+#define A_MPS_RX_PAUSE_GEN_TH_0_5 0x12118
+#define A_MPS_RX_PAUSE_GEN_TH_0_6 0x1211c
+#define A_MPS_RX_PAUSE_GEN_TH_0_7 0x12120
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_0 0x12124
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_1 0x12128
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_2 0x1212c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_3 0x12130
+#define A_MPS_RX_PAUSE_GEN_TH_1_4 0x12134
+#define A_MPS_RX_PAUSE_GEN_TH_1_5 0x12138
+#define A_MPS_RX_PAUSE_GEN_TH_1_6 0x1213c
+#define A_MPS_RX_PAUSE_GEN_TH_1_7 0x12140
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_0 0x12144
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_1 0x12148
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_2 0x1214c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_3 0x12150
+#define A_MPS_RX_PAUSE_GEN_TH_2_4 0x12154
+#define A_MPS_RX_PAUSE_GEN_TH_2_5 0x12158
+#define A_MPS_RX_PAUSE_GEN_TH_2_6 0x1215c
+#define A_MPS_RX_PAUSE_GEN_TH_2_7 0x12160
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_0 0x12164
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_1 0x12168
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_2 0x1216c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_3 0x12170
+#define A_MPS_RX_PAUSE_GEN_TH_3_4 0x12174
+#define A_MPS_RX_PAUSE_GEN_TH_3_5 0x12178
+#define A_MPS_RX_PAUSE_GEN_TH_3_6 0x1217c
+#define A_MPS_RX_PAUSE_GEN_TH_3_7 0x12180
+#define A_MPS_RX_DROP_0_0 0x12184
+
+#define S_DROP_TH 0
+#define M_DROP_TH 0xffffU
+#define V_DROP_TH(x) ((x) << S_DROP_TH)
+#define G_DROP_TH(x) (((x) >> S_DROP_TH) & M_DROP_TH)
+
+#define A_MPS_RX_DROP_0_1 0x12188
+#define A_MPS_RX_DROP_0_2 0x1218c
+#define A_MPS_RX_DROP_0_3 0x12190
+#define A_MPS_RX_DROP_0_4 0x12194
+#define A_MPS_RX_DROP_0_5 0x12198
+#define A_MPS_RX_DROP_0_6 0x1219c
+#define A_MPS_RX_DROP_0_7 0x121a0
+#define A_MPS_RX_DROP_1_0 0x121a4
+#define A_MPS_RX_DROP_1_1 0x121a8
+#define A_MPS_RX_DROP_1_2 0x121ac
+#define A_MPS_RX_DROP_1_3 0x121b0
+#define A_MPS_RX_DROP_1_4 0x121b4
+#define A_MPS_RX_DROP_1_5 0x121b8
+#define A_MPS_RX_DROP_1_6 0x121bc
+#define A_MPS_RX_DROP_1_7 0x121c0
+#define A_MPS_RX_DROP_2_0 0x121c4
+#define A_MPS_RX_DROP_2_1 0x121c8
+#define A_MPS_RX_DROP_2_2 0x121cc
+#define A_MPS_RX_DROP_2_3 0x121d0
+#define A_MPS_RX_DROP_2_4 0x121d4
+#define A_MPS_RX_DROP_2_5 0x121d8
+#define A_MPS_RX_DROP_2_6 0x121dc
+#define A_MPS_RX_DROP_2_7 0x121e0
+#define A_MPS_RX_DROP_3_0 0x121e4
+#define A_MPS_RX_DROP_3_1 0x121e8
+#define A_MPS_RX_DROP_3_2 0x121ec
+#define A_MPS_RX_DROP_3_3 0x121f0
+#define A_MPS_RX_DROP_3_4 0x121f4
+#define A_MPS_RX_DROP_3_5 0x121f8
+#define A_MPS_RX_DROP_3_6 0x121fc
+#define A_MPS_RX_DROP_3_7 0x12200
+#define A_MPS_RX_MAC_BG_PG_CNT0_0 0x12204
+#define A_MPS_RX_MAC_BG_PG_CNT0_1 0x12208
+#define A_MPS_RX_MAC_BG_PG_CNT0_2 0x1220c
+#define A_MPS_RX_MAC_BG_PG_CNT0_3 0x12210
+#define A_MPS_RX_MAC_BG_PG_CNT0_4 0x12214
+#define A_MPS_RX_MAC_BG_PG_CNT0_5 0x12218
+#define A_MPS_RX_MAC_BG_PG_CNT0_6 0x1221c
+#define A_MPS_RX_MAC_BG_PG_CNT0_7 0x12220
+#define A_MPS_RX_MAC_BG_PG_CNT1_0 0x12224
+#define A_MPS_RX_MAC_BG_PG_CNT1_1 0x12228
+#define A_MPS_RX_MAC_BG_PG_CNT1_2 0x1222c
+#define A_MPS_RX_MAC_BG_PG_CNT1_3 0x12230
+#define A_MPS_RX_MAC_BG_PG_CNT1_4 0x12234
+#define A_MPS_RX_MAC_BG_PG_CNT1_5 0x12238
+#define A_MPS_RX_MAC_BG_PG_CNT1_6 0x1223c
+#define A_MPS_RX_MAC_BG_PG_CNT1_7 0x12240
+#define A_MPS_RX_MAC_BG_PG_CNT2_0 0x12244
+#define A_MPS_RX_MAC_BG_PG_CNT2_1 0x12248
+#define A_MPS_RX_MAC_BG_PG_CNT2_2 0x1224c
+#define A_MPS_RX_MAC_BG_PG_CNT2_3 0x12250
+#define A_MPS_RX_MAC_BG_PG_CNT2_4 0x12254
+#define A_MPS_RX_MAC_BG_PG_CNT2_5 0x12258
+#define A_MPS_RX_MAC_BG_PG_CNT2_6 0x1225c
+#define A_MPS_RX_MAC_BG_PG_CNT2_7 0x12260
+#define A_MPS_RX_MAC_BG_PG_CNT3_0 0x12264
+#define A_MPS_RX_MAC_BG_PG_CNT3_1 0x12268
+#define A_MPS_RX_MAC_BG_PG_CNT3_2 0x1226c
+#define A_MPS_RX_MAC_BG_PG_CNT3_3 0x12270
+#define A_MPS_RX_MAC_BG_PG_CNT3_4 0x12274
+#define A_MPS_RX_MAC_BG_PG_CNT3_5 0x12278
+#define A_MPS_RX_MAC_BG_PG_CNT3_6 0x1227c
+#define A_MPS_RX_MAC_BG_PG_CNT3_7 0x12280
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0 0x12284
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1 0x12288
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2 0x1228c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3 0x12290
+#define A_MPS_RX_BG0_IPSEC_CNT 0x12294
+#define A_MPS_RX_BG1_IPSEC_CNT 0x12298
+#define A_MPS_RX_BG2_IPSEC_CNT 0x1229c
+#define A_MPS_RX_BG3_IPSEC_CNT 0x122a0
+#define A_MPS_RX_MEM_FIFO_CONFIG0 0x122a4
+
+#define S_FIFO_CONFIG2 16
+#define M_FIFO_CONFIG2 0xffffU
+#define V_FIFO_CONFIG2(x) ((x) << S_FIFO_CONFIG2)
+#define G_FIFO_CONFIG2(x) (((x) >> S_FIFO_CONFIG2) & M_FIFO_CONFIG2)
+
+#define S_FIFO_CONFIG1 0
+#define M_FIFO_CONFIG1 0xffffU
+#define V_FIFO_CONFIG1(x) ((x) << S_FIFO_CONFIG1)
+#define G_FIFO_CONFIG1(x) (((x) >> S_FIFO_CONFIG1) & M_FIFO_CONFIG1)
+
+#define A_MPS_RX_MEM_FIFO_CONFIG1 0x122a8
+
+#define S_FIFO_CONFIG3 0
+#define M_FIFO_CONFIG3 0xffffU
+#define V_FIFO_CONFIG3(x) ((x) << S_FIFO_CONFIG3)
+#define G_FIFO_CONFIG3(x) (((x) >> S_FIFO_CONFIG3) & M_FIFO_CONFIG3)
+
+#define A_MPS_LPBK_MEM_FIFO_CONFIG0 0x122ac
+#define A_MPS_LPBK_MEM_FIFO_CONFIG1 0x122b0
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG0 0x122b4
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG1 0x122b8
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG2 0x122bc
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG3 0x122c0
+#define A_MPS_BG_PAUSE_CTL 0x122c4
+
+#define S_BG0_PAUSE_EN 3
+#define V_BG0_PAUSE_EN(x) ((x) << S_BG0_PAUSE_EN)
+#define F_BG0_PAUSE_EN V_BG0_PAUSE_EN(1U)
+
+#define S_BG1_PAUSE_EN 2
+#define V_BG1_PAUSE_EN(x) ((x) << S_BG1_PAUSE_EN)
+#define F_BG1_PAUSE_EN V_BG1_PAUSE_EN(1U)
+
+#define S_BG2_PAUSE_EN 1
+#define V_BG2_PAUSE_EN(x) ((x) << S_BG2_PAUSE_EN)
+#define F_BG2_PAUSE_EN V_BG2_PAUSE_EN(1U)
+
+#define S_BG3_PAUSE_EN 0
+#define V_BG3_PAUSE_EN(x) ((x) << S_BG3_PAUSE_EN)
+#define F_BG3_PAUSE_EN V_BG3_PAUSE_EN(1U)
/* registers for module CPL_SWITCH */
#define CPL_SWITCH_BASE_ADDR 0x19040
@@ -35931,6 +45321,7 @@
#define V_CIM_SPLIT_ENABLE(x) ((x) << S_CIM_SPLIT_ENABLE)
#define F_CIM_SPLIT_ENABLE V_CIM_SPLIT_ENABLE(1U)
+#define A_CNTRL 0x19040
#define A_CPL_SWITCH_TBL_IDX 0x19044
#define S_SWITCH_TBL_IDX 0
@@ -35938,7 +45329,9 @@
#define V_SWITCH_TBL_IDX(x) ((x) << S_SWITCH_TBL_IDX)
#define G_SWITCH_TBL_IDX(x) (((x) >> S_SWITCH_TBL_IDX) & M_SWITCH_TBL_IDX)
+#define A_TBL_IDX 0x19044
#define A_CPL_SWITCH_TBL_DATA 0x19048
+#define A_TBL_DATA 0x19048
#define A_CPL_SWITCH_ZERO_ERROR 0x1904c
#define S_ZERO_CMD_CH1 8
@@ -35951,6 +45344,18 @@
#define V_ZERO_CMD_CH0(x) ((x) << S_ZERO_CMD_CH0)
#define G_ZERO_CMD_CH0(x) (((x) >> S_ZERO_CMD_CH0) & M_ZERO_CMD_CH0)
+#define A_ZERO_ERROR 0x1904c
+
+#define S_ZERO_CMD_CH3 24
+#define M_ZERO_CMD_CH3 0xffU
+#define V_ZERO_CMD_CH3(x) ((x) << S_ZERO_CMD_CH3)
+#define G_ZERO_CMD_CH3(x) (((x) >> S_ZERO_CMD_CH3) & M_ZERO_CMD_CH3)
+
+#define S_ZERO_CMD_CH2 16
+#define M_ZERO_CMD_CH2 0xffU
+#define V_ZERO_CMD_CH2(x) ((x) << S_ZERO_CMD_CH2)
+#define G_ZERO_CMD_CH2(x) (((x) >> S_ZERO_CMD_CH2) & M_ZERO_CMD_CH2)
+
#define A_CPL_INTR_ENABLE 0x19050
#define S_CIM_OP_MAP_PERR 5
@@ -35985,7 +45390,18 @@
#define V_PERR_CPL_128TO128_0(x) ((x) << S_PERR_CPL_128TO128_0)
#define F_PERR_CPL_128TO128_0 V_PERR_CPL_128TO128_0(1U)
+#define A_INTR_ENABLE 0x19050
+
+#define S_PERR_CPL_128TO128_3 9
+#define V_PERR_CPL_128TO128_3(x) ((x) << S_PERR_CPL_128TO128_3)
+#define F_PERR_CPL_128TO128_3 V_PERR_CPL_128TO128_3(1U)
+
+#define S_PERR_CPL_128TO128_2 8
+#define V_PERR_CPL_128TO128_2(x) ((x) << S_PERR_CPL_128TO128_2)
+#define F_PERR_CPL_128TO128_2 V_PERR_CPL_128TO128_2(1U)
+
#define A_CPL_INTR_CAUSE 0x19054
+#define A_INTR_CAUSE 0x19054
#define A_CPL_MAP_TBL_IDX 0x19058
#define S_MAP_TBL_IDX 0
@@ -35997,6 +45413,13 @@
#define V_CIM_SPLIT_OPCODE_PROGRAM(x) ((x) << S_CIM_SPLIT_OPCODE_PROGRAM)
#define F_CIM_SPLIT_OPCODE_PROGRAM V_CIM_SPLIT_OPCODE_PROGRAM(1U)
+#define A_MAP_TBL_IDX 0x19058
+
+#define S_CPL_MAP_TBL_SEL 9
+#define M_CPL_MAP_TBL_SEL 0x3U
+#define V_CPL_MAP_TBL_SEL(x) ((x) << S_CPL_MAP_TBL_SEL)
+#define G_CPL_MAP_TBL_SEL(x) (((x) >> S_CPL_MAP_TBL_SEL) & M_CPL_MAP_TBL_SEL)
+
#define A_CPL_MAP_TBL_DATA 0x1905c
#define S_MAP_TBL_DATA 0
@@ -36004,6 +45427,8 @@
#define V_MAP_TBL_DATA(x) ((x) << S_MAP_TBL_DATA)
#define G_MAP_TBL_DATA(x) (((x) >> S_MAP_TBL_DATA) & M_MAP_TBL_DATA)
+#define A_MAP_TBL_DATA 0x1905c
+
/* registers for module SMB */
#define SMB_BASE_ADDR 0x19060
@@ -36019,6 +45444,16 @@
#define V_MICROCNTCFG(x) ((x) << S_MICROCNTCFG)
#define G_MICROCNTCFG(x) (((x) >> S_MICROCNTCFG) & M_MICROCNTCFG)
+#define S_T7_MACROCNTCFG 12
+#define M_T7_MACROCNTCFG 0x1fU
+#define V_T7_MACROCNTCFG(x) ((x) << S_T7_MACROCNTCFG)
+#define G_T7_MACROCNTCFG(x) (((x) >> S_T7_MACROCNTCFG) & M_T7_MACROCNTCFG)
+
+#define S_T7_MICROCNTCFG 0
+#define M_T7_MICROCNTCFG 0xfffU
+#define V_T7_MICROCNTCFG(x) ((x) << S_T7_MICROCNTCFG)
+#define G_T7_MICROCNTCFG(x) (((x) >> S_T7_MICROCNTCFG) & M_T7_MICROCNTCFG)
+
#define A_SMB_MST_TIMEOUT_CFG 0x19064
#define S_MSTTIMEOUTCFG 0
@@ -36685,6 +46120,26 @@
#define V_UART_CLKDIV(x) ((x) << S_UART_CLKDIV)
#define G_UART_CLKDIV(x) (((x) >> S_UART_CLKDIV) & M_UART_CLKDIV)
+#define S_T7_STOPBITS 25
+#define M_T7_STOPBITS 0x3U
+#define V_T7_STOPBITS(x) ((x) << S_T7_STOPBITS)
+#define G_T7_STOPBITS(x) (((x) >> S_T7_STOPBITS) & M_T7_STOPBITS)
+
+#define S_T7_PARITY 23
+#define M_T7_PARITY 0x3U
+#define V_T7_PARITY(x) ((x) << S_T7_PARITY)
+#define G_T7_PARITY(x) (((x) >> S_T7_PARITY) & M_T7_PARITY)
+
+#define S_T7_DATABITS 19
+#define M_T7_DATABITS 0xfU
+#define V_T7_DATABITS(x) ((x) << S_T7_DATABITS)
+#define G_T7_DATABITS(x) (((x) >> S_T7_DATABITS) & M_T7_DATABITS)
+
+#define S_T7_UART_CLKDIV 0
+#define M_T7_UART_CLKDIV 0x3ffffU
+#define V_T7_UART_CLKDIV(x) ((x) << S_T7_UART_CLKDIV)
+#define G_T7_UART_CLKDIV(x) (((x) >> S_T7_UART_CLKDIV) & M_T7_UART_CLKDIV)
+
/* registers for module PMU */
#define PMU_BASE_ADDR 0x19120
@@ -36767,6 +46222,26 @@
#define V_PL_DIS_PRTY_CHK(x) ((x) << S_PL_DIS_PRTY_CHK)
#define F_PL_DIS_PRTY_CHK V_PL_DIS_PRTY_CHK(1U)
+#define S_ARM_PART_CGEN 19
+#define V_ARM_PART_CGEN(x) ((x) << S_ARM_PART_CGEN)
+#define F_ARM_PART_CGEN V_ARM_PART_CGEN(1U)
+
+#define S_CRYPTO_PART_CGEN 14
+#define V_CRYPTO_PART_CGEN(x) ((x) << S_CRYPTO_PART_CGEN)
+#define F_CRYPTO_PART_CGEN V_CRYPTO_PART_CGEN(1U)
+
+#define S_NVME_PART_CGEN 9
+#define V_NVME_PART_CGEN(x) ((x) << S_NVME_PART_CGEN)
+#define F_NVME_PART_CGEN V_NVME_PART_CGEN(1U)
+
+#define S_XP10_PART_CGEN 8
+#define V_XP10_PART_CGEN(x) ((x) << S_XP10_PART_CGEN)
+#define F_XP10_PART_CGEN V_XP10_PART_CGEN(1U)
+
+#define S_GPEX_PART_CGEN 7
+#define V_GPEX_PART_CGEN(x) ((x) << S_GPEX_PART_CGEN)
+#define F_GPEX_PART_CGEN V_GPEX_PART_CGEN(1U)
+
#define A_PMU_SLEEPMODE_WAKEUP 0x19124
#define S_HWWAKEUPEN 5
@@ -36861,6 +46336,72 @@
#define V_TDDPTAGTCB(x) ((x) << S_TDDPTAGTCB)
#define F_TDDPTAGTCB V_TDDPTAGTCB(1U)
+#define S_ISCSI_PAGE_SIZE_CHK_ENB 31
+#define V_ISCSI_PAGE_SIZE_CHK_ENB(x) ((x) << S_ISCSI_PAGE_SIZE_CHK_ENB)
+#define F_ISCSI_PAGE_SIZE_CHK_ENB V_ISCSI_PAGE_SIZE_CHK_ENB(1U)
+
+#define S_RDMA_0B_WR_OPCODE_HI 29
+#define V_RDMA_0B_WR_OPCODE_HI(x) ((x) << S_RDMA_0B_WR_OPCODE_HI)
+#define F_RDMA_0B_WR_OPCODE_HI V_RDMA_0B_WR_OPCODE_HI(1U)
+
+#define S_RDMA_IMMEDIATE_CQE 28
+#define V_RDMA_IMMEDIATE_CQE(x) ((x) << S_RDMA_IMMEDIATE_CQE)
+#define F_RDMA_IMMEDIATE_CQE V_RDMA_IMMEDIATE_CQE(1U)
+
+#define S_RDMA_ATOMIC_WR_RSP_CQE 27
+#define V_RDMA_ATOMIC_WR_RSP_CQE(x) ((x) << S_RDMA_ATOMIC_WR_RSP_CQE)
+#define F_RDMA_ATOMIC_WR_RSP_CQE V_RDMA_ATOMIC_WR_RSP_CQE(1U)
+
+#define S_RDMA_VERIFY_RSP_FLUSH 26
+#define V_RDMA_VERIFY_RSP_FLUSH(x) ((x) << S_RDMA_VERIFY_RSP_FLUSH)
+#define F_RDMA_VERIFY_RSP_FLUSH V_RDMA_VERIFY_RSP_FLUSH(1U)
+
+#define S_RDMA_VERIFY_RSP_CQE 25
+#define V_RDMA_VERIFY_RSP_CQE(x) ((x) << S_RDMA_VERIFY_RSP_CQE)
+#define F_RDMA_VERIFY_RSP_CQE V_RDMA_VERIFY_RSP_CQE(1U)
+
+#define S_RDMA_FLUSH_RSP_CQE 24
+#define V_RDMA_FLUSH_RSP_CQE(x) ((x) << S_RDMA_FLUSH_RSP_CQE)
+#define F_RDMA_FLUSH_RSP_CQE V_RDMA_FLUSH_RSP_CQE(1U)
+
+#define S_RDMA_ATOMIC_RSP_CQE 23
+#define V_RDMA_ATOMIC_RSP_CQE(x) ((x) << S_RDMA_ATOMIC_RSP_CQE)
+#define F_RDMA_ATOMIC_RSP_CQE V_RDMA_ATOMIC_RSP_CQE(1U)
+
+#define S_T7_TPT_EXTENSION_MODE 22
+#define V_T7_TPT_EXTENSION_MODE(x) ((x) << S_T7_TPT_EXTENSION_MODE)
+#define F_T7_TPT_EXTENSION_MODE V_T7_TPT_EXTENSION_MODE(1U)
+
+#define S_NVME_TCP_DDP_VAL_EN 21
+#define V_NVME_TCP_DDP_VAL_EN(x) ((x) << S_NVME_TCP_DDP_VAL_EN)
+#define F_NVME_TCP_DDP_VAL_EN V_NVME_TCP_DDP_VAL_EN(1U)
+
+#define S_NVME_TCP_REMOVE_HDR_CRC 20
+#define V_NVME_TCP_REMOVE_HDR_CRC(x) ((x) << S_NVME_TCP_REMOVE_HDR_CRC)
+#define F_NVME_TCP_REMOVE_HDR_CRC V_NVME_TCP_REMOVE_HDR_CRC(1U)
+
+#define S_NVME_TCP_LAST_PDU_CHECK_ENB 19
+#define V_NVME_TCP_LAST_PDU_CHECK_ENB(x) ((x) << S_NVME_TCP_LAST_PDU_CHECK_ENB)
+#define F_NVME_TCP_LAST_PDU_CHECK_ENB V_NVME_TCP_LAST_PDU_CHECK_ENB(1U)
+
+#define S_NVME_TCP_OFFSET_SUBMODE 17
+#define M_NVME_TCP_OFFSET_SUBMODE 0x3U
+#define V_NVME_TCP_OFFSET_SUBMODE(x) ((x) << S_NVME_TCP_OFFSET_SUBMODE)
+#define G_NVME_TCP_OFFSET_SUBMODE(x) (((x) >> S_NVME_TCP_OFFSET_SUBMODE) & M_NVME_TCP_OFFSET_SUBMODE)
+
+#define S_NVME_TCP_OFFSET_MODE 16
+#define V_NVME_TCP_OFFSET_MODE(x) ((x) << S_NVME_TCP_OFFSET_MODE)
+#define F_NVME_TCP_OFFSET_MODE V_NVME_TCP_OFFSET_MODE(1U)
+
+#define S_QPID_CHECK_DISABLE_FOR_SEND 15
+#define V_QPID_CHECK_DISABLE_FOR_SEND(x) ((x) << S_QPID_CHECK_DISABLE_FOR_SEND)
+#define F_QPID_CHECK_DISABLE_FOR_SEND V_QPID_CHECK_DISABLE_FOR_SEND(1U)
+
+#define S_RDMA_0B_WR_OPCODE_LO 10
+#define M_RDMA_0B_WR_OPCODE_LO 0xfU
+#define V_RDMA_0B_WR_OPCODE_LO(x) ((x) << S_RDMA_0B_WR_OPCODE_LO)
+#define G_RDMA_0B_WR_OPCODE_LO(x) (((x) >> S_RDMA_0B_WR_OPCODE_LO) & M_RDMA_0B_WR_OPCODE_LO)
+
#define A_ULP_RX_INT_ENABLE 0x19154
#define S_ENABLE_CTX_1 24
@@ -36971,6 +46512,86 @@
#define V_SE_CNT_MISMATCH_0(x) ((x) << S_SE_CNT_MISMATCH_0)
#define F_SE_CNT_MISMATCH_0 V_SE_CNT_MISMATCH_0(1U)
+#define S_CERR_PCMD_FIFO_3 19
+#define V_CERR_PCMD_FIFO_3(x) ((x) << S_CERR_PCMD_FIFO_3)
+#define F_CERR_PCMD_FIFO_3 V_CERR_PCMD_FIFO_3(1U)
+
+#define S_CERR_PCMD_FIFO_2 18
+#define V_CERR_PCMD_FIFO_2(x) ((x) << S_CERR_PCMD_FIFO_2)
+#define F_CERR_PCMD_FIFO_2 V_CERR_PCMD_FIFO_2(1U)
+
+#define S_CERR_PCMD_FIFO_1 17
+#define V_CERR_PCMD_FIFO_1(x) ((x) << S_CERR_PCMD_FIFO_1)
+#define F_CERR_PCMD_FIFO_1 V_CERR_PCMD_FIFO_1(1U)
+
+#define S_CERR_PCMD_FIFO_0 16
+#define V_CERR_PCMD_FIFO_0(x) ((x) << S_CERR_PCMD_FIFO_0)
+#define F_CERR_PCMD_FIFO_0 V_CERR_PCMD_FIFO_0(1U)
+
+#define S_CERR_DATA_FIFO_3 15
+#define V_CERR_DATA_FIFO_3(x) ((x) << S_CERR_DATA_FIFO_3)
+#define F_CERR_DATA_FIFO_3 V_CERR_DATA_FIFO_3(1U)
+
+#define S_CERR_DATA_FIFO_2 14
+#define V_CERR_DATA_FIFO_2(x) ((x) << S_CERR_DATA_FIFO_2)
+#define F_CERR_DATA_FIFO_2 V_CERR_DATA_FIFO_2(1U)
+
+#define S_CERR_DATA_FIFO_1 13
+#define V_CERR_DATA_FIFO_1(x) ((x) << S_CERR_DATA_FIFO_1)
+#define F_CERR_DATA_FIFO_1 V_CERR_DATA_FIFO_1(1U)
+
+#define S_CERR_DATA_FIFO_0 12
+#define V_CERR_DATA_FIFO_0(x) ((x) << S_CERR_DATA_FIFO_0)
+#define F_CERR_DATA_FIFO_0 V_CERR_DATA_FIFO_0(1U)
+
+#define S_SE_CNT_MISMATCH_3 11
+#define V_SE_CNT_MISMATCH_3(x) ((x) << S_SE_CNT_MISMATCH_3)
+#define F_SE_CNT_MISMATCH_3 V_SE_CNT_MISMATCH_3(1U)
+
+#define S_SE_CNT_MISMATCH_2 10
+#define V_SE_CNT_MISMATCH_2(x) ((x) << S_SE_CNT_MISMATCH_2)
+#define F_SE_CNT_MISMATCH_2 V_SE_CNT_MISMATCH_2(1U)
+
+#define S_T7_SE_CNT_MISMATCH_1 9
+#define V_T7_SE_CNT_MISMATCH_1(x) ((x) << S_T7_SE_CNT_MISMATCH_1)
+#define F_T7_SE_CNT_MISMATCH_1 V_T7_SE_CNT_MISMATCH_1(1U)
+
+#define S_T7_SE_CNT_MISMATCH_0 8
+#define V_T7_SE_CNT_MISMATCH_0(x) ((x) << S_T7_SE_CNT_MISMATCH_0)
+#define F_T7_SE_CNT_MISMATCH_0 V_T7_SE_CNT_MISMATCH_0(1U)
+
+#define S_ENABLE_CTX_3 7
+#define V_ENABLE_CTX_3(x) ((x) << S_ENABLE_CTX_3)
+#define F_ENABLE_CTX_3 V_ENABLE_CTX_3(1U)
+
+#define S_ENABLE_CTX_2 6
+#define V_ENABLE_CTX_2(x) ((x) << S_ENABLE_CTX_2)
+#define F_ENABLE_CTX_2 V_ENABLE_CTX_2(1U)
+
+#define S_T7_ENABLE_CTX_1 5
+#define V_T7_ENABLE_CTX_1(x) ((x) << S_T7_ENABLE_CTX_1)
+#define F_T7_ENABLE_CTX_1 V_T7_ENABLE_CTX_1(1U)
+
+#define S_T7_ENABLE_CTX_0 4
+#define V_T7_ENABLE_CTX_0(x) ((x) << S_T7_ENABLE_CTX_0)
+#define F_T7_ENABLE_CTX_0 V_T7_ENABLE_CTX_0(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_3 3
+#define V_ENABLE_ALN_SDC_ERR_3(x) ((x) << S_ENABLE_ALN_SDC_ERR_3)
+#define F_ENABLE_ALN_SDC_ERR_3 V_ENABLE_ALN_SDC_ERR_3(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_2 2
+#define V_ENABLE_ALN_SDC_ERR_2(x) ((x) << S_ENABLE_ALN_SDC_ERR_2)
+#define F_ENABLE_ALN_SDC_ERR_2 V_ENABLE_ALN_SDC_ERR_2(1U)
+
+#define S_T7_ENABLE_ALN_SDC_ERR_1 1
+#define V_T7_ENABLE_ALN_SDC_ERR_1(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_1)
+#define F_T7_ENABLE_ALN_SDC_ERR_1 V_T7_ENABLE_ALN_SDC_ERR_1(1U)
+
+#define S_T7_ENABLE_ALN_SDC_ERR_0 0
+#define V_T7_ENABLE_ALN_SDC_ERR_0(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_0)
+#define F_T7_ENABLE_ALN_SDC_ERR_0 V_T7_ENABLE_ALN_SDC_ERR_0(1U)
+
#define A_ULP_RX_INT_CAUSE 0x19158
#define S_CAUSE_CTX_1 24
@@ -37282,6 +46903,312 @@
#define G_ULPRX_TID(x) (((x) >> S_ULPRX_TID) & M_ULPRX_TID)
#define A_ULP_RX_CTX_ACC_CH1 0x191b0
+#define A_ULP_RX_CTX_ACC_CH2 0x191b4
+#define A_ULP_RX_CTX_ACC_CH3 0x191b8
+#define A_ULP_RX_CTL2 0x191bc
+
+#define S_PCMD3THRESHOLD 24
+#define M_PCMD3THRESHOLD 0xffU
+#define V_PCMD3THRESHOLD(x) ((x) << S_PCMD3THRESHOLD)
+#define G_PCMD3THRESHOLD(x) (((x) >> S_PCMD3THRESHOLD) & M_PCMD3THRESHOLD)
+
+#define S_PCMD2THRESHOLD 16
+#define M_PCMD2THRESHOLD 0xffU
+#define V_PCMD2THRESHOLD(x) ((x) << S_PCMD2THRESHOLD)
+#define G_PCMD2THRESHOLD(x) (((x) >> S_PCMD2THRESHOLD) & M_PCMD2THRESHOLD)
+
+#define S_T7_PCMD1THRESHOLD 8
+#define M_T7_PCMD1THRESHOLD 0xffU
+#define V_T7_PCMD1THRESHOLD(x) ((x) << S_T7_PCMD1THRESHOLD)
+#define G_T7_PCMD1THRESHOLD(x) (((x) >> S_T7_PCMD1THRESHOLD) & M_T7_PCMD1THRESHOLD)
+
+#define S_T7_PCMD0THRESHOLD 0
+#define M_T7_PCMD0THRESHOLD 0xffU
+#define V_T7_PCMD0THRESHOLD(x) ((x) << S_T7_PCMD0THRESHOLD)
+#define G_T7_PCMD0THRESHOLD(x) (((x) >> S_T7_PCMD0THRESHOLD) & M_T7_PCMD0THRESHOLD)
+
+#define A_ULP_RX_INT_ENABLE_INTERFACE 0x191c0
+
+#define S_ENABLE_ULPRX2SBT_RSPPERR 31
+#define V_ENABLE_ULPRX2SBT_RSPPERR(x) ((x) << S_ENABLE_ULPRX2SBT_RSPPERR)
+#define F_ENABLE_ULPRX2SBT_RSPPERR V_ENABLE_ULPRX2SBT_RSPPERR(1U)
+
+#define S_ENABLE_ULPRX2MA_RSPPERR 30
+#define V_ENABLE_ULPRX2MA_RSPPERR(x) ((x) << S_ENABLE_ULPRX2MA_RSPPERR)
+#define F_ENABLE_ULPRX2MA_RSPPERR V_ENABLE_ULPRX2MA_RSPPERR(1U)
+
+#define S_ENABME_PIO_BUS_PERR 29
+#define V_ENABME_PIO_BUS_PERR(x) ((x) << S_ENABME_PIO_BUS_PERR)
+#define F_ENABME_PIO_BUS_PERR V_ENABME_PIO_BUS_PERR(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_3 19
+#define V_ENABLE_PM2ULP_SNOOPDATA_3(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_3)
+#define F_ENABLE_PM2ULP_SNOOPDATA_3 V_ENABLE_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_2 18
+#define V_ENABLE_PM2ULP_SNOOPDATA_2(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_2)
+#define F_ENABLE_PM2ULP_SNOOPDATA_2 V_ENABLE_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_1 17
+#define V_ENABLE_PM2ULP_SNOOPDATA_1(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_1)
+#define F_ENABLE_PM2ULP_SNOOPDATA_1 V_ENABLE_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_0 16
+#define V_ENABLE_PM2ULP_SNOOPDATA_0(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_0)
+#define F_ENABLE_PM2ULP_SNOOPDATA_0 V_ENABLE_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_3 15
+#define V_ENABLE_TLS2ULP_DATA_3(x) ((x) << S_ENABLE_TLS2ULP_DATA_3)
+#define F_ENABLE_TLS2ULP_DATA_3 V_ENABLE_TLS2ULP_DATA_3(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_2 14
+#define V_ENABLE_TLS2ULP_DATA_2(x) ((x) << S_ENABLE_TLS2ULP_DATA_2)
+#define F_ENABLE_TLS2ULP_DATA_2 V_ENABLE_TLS2ULP_DATA_2(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_1 13
+#define V_ENABLE_TLS2ULP_DATA_1(x) ((x) << S_ENABLE_TLS2ULP_DATA_1)
+#define F_ENABLE_TLS2ULP_DATA_1 V_ENABLE_TLS2ULP_DATA_1(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_0 12
+#define V_ENABLE_TLS2ULP_DATA_0(x) ((x) << S_ENABLE_TLS2ULP_DATA_0)
+#define F_ENABLE_TLS2ULP_DATA_0 V_ENABLE_TLS2ULP_DATA_0(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_3 11
+#define V_ENABLE_TLS2ULP_PLENDATA_3(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_3)
+#define F_ENABLE_TLS2ULP_PLENDATA_3 V_ENABLE_TLS2ULP_PLENDATA_3(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_2 10
+#define V_ENABLE_TLS2ULP_PLENDATA_2(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_2)
+#define F_ENABLE_TLS2ULP_PLENDATA_2 V_ENABLE_TLS2ULP_PLENDATA_2(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_1 9
+#define V_ENABLE_TLS2ULP_PLENDATA_1(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_1)
+#define F_ENABLE_TLS2ULP_PLENDATA_1 V_ENABLE_TLS2ULP_PLENDATA_1(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_0 8
+#define V_ENABLE_TLS2ULP_PLENDATA_0(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_0)
+#define F_ENABLE_TLS2ULP_PLENDATA_0 V_ENABLE_TLS2ULP_PLENDATA_0(1U)
+
+#define S_ENABLE_PM2ULP_DATA_3 7
+#define V_ENABLE_PM2ULP_DATA_3(x) ((x) << S_ENABLE_PM2ULP_DATA_3)
+#define F_ENABLE_PM2ULP_DATA_3 V_ENABLE_PM2ULP_DATA_3(1U)
+
+#define S_ENABLE_PM2ULP_DATA_2 6
+#define V_ENABLE_PM2ULP_DATA_2(x) ((x) << S_ENABLE_PM2ULP_DATA_2)
+#define F_ENABLE_PM2ULP_DATA_2 V_ENABLE_PM2ULP_DATA_2(1U)
+
+#define S_ENABLE_PM2ULP_DATA_1 5
+#define V_ENABLE_PM2ULP_DATA_1(x) ((x) << S_ENABLE_PM2ULP_DATA_1)
+#define F_ENABLE_PM2ULP_DATA_1 V_ENABLE_PM2ULP_DATA_1(1U)
+
+#define S_ENABLE_PM2ULP_DATA_0 4
+#define V_ENABLE_PM2ULP_DATA_0(x) ((x) << S_ENABLE_PM2ULP_DATA_0)
+#define F_ENABLE_PM2ULP_DATA_0 V_ENABLE_PM2ULP_DATA_0(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_3 3
+#define V_ENABLE_TP2ULP_PCMD_3(x) ((x) << S_ENABLE_TP2ULP_PCMD_3)
+#define F_ENABLE_TP2ULP_PCMD_3 V_ENABLE_TP2ULP_PCMD_3(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_2 2
+#define V_ENABLE_TP2ULP_PCMD_2(x) ((x) << S_ENABLE_TP2ULP_PCMD_2)
+#define F_ENABLE_TP2ULP_PCMD_2 V_ENABLE_TP2ULP_PCMD_2(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_1 1
+#define V_ENABLE_TP2ULP_PCMD_1(x) ((x) << S_ENABLE_TP2ULP_PCMD_1)
+#define F_ENABLE_TP2ULP_PCMD_1 V_ENABLE_TP2ULP_PCMD_1(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_0 0
+#define V_ENABLE_TP2ULP_PCMD_0(x) ((x) << S_ENABLE_TP2ULP_PCMD_0)
+#define F_ENABLE_TP2ULP_PCMD_0 V_ENABLE_TP2ULP_PCMD_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_INTERFACE 0x191c4
+
+#define S_CAUSE_ULPRX2SBT_RSPPERR 31
+#define V_CAUSE_ULPRX2SBT_RSPPERR(x) ((x) << S_CAUSE_ULPRX2SBT_RSPPERR)
+#define F_CAUSE_ULPRX2SBT_RSPPERR V_CAUSE_ULPRX2SBT_RSPPERR(1U)
+
+#define S_CAUSE_ULPRX2MA_RSPPERR 30
+#define V_CAUSE_ULPRX2MA_RSPPERR(x) ((x) << S_CAUSE_ULPRX2MA_RSPPERR)
+#define F_CAUSE_ULPRX2MA_RSPPERR V_CAUSE_ULPRX2MA_RSPPERR(1U)
+
+#define S_CAUSE_PIO_BUS_PERR 29
+#define V_CAUSE_PIO_BUS_PERR(x) ((x) << S_CAUSE_PIO_BUS_PERR)
+#define F_CAUSE_PIO_BUS_PERR V_CAUSE_PIO_BUS_PERR(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_3 19
+#define V_CAUSE_PM2ULP_SNOOPDATA_3(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_3)
+#define F_CAUSE_PM2ULP_SNOOPDATA_3 V_CAUSE_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_2 18
+#define V_CAUSE_PM2ULP_SNOOPDATA_2(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_2)
+#define F_CAUSE_PM2ULP_SNOOPDATA_2 V_CAUSE_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_1 17
+#define V_CAUSE_PM2ULP_SNOOPDATA_1(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_1)
+#define F_CAUSE_PM2ULP_SNOOPDATA_1 V_CAUSE_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_0 16
+#define V_CAUSE_PM2ULP_SNOOPDATA_0(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_0)
+#define F_CAUSE_PM2ULP_SNOOPDATA_0 V_CAUSE_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_3 15
+#define V_CAUSE_TLS2ULP_DATA_3(x) ((x) << S_CAUSE_TLS2ULP_DATA_3)
+#define F_CAUSE_TLS2ULP_DATA_3 V_CAUSE_TLS2ULP_DATA_3(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_2 14
+#define V_CAUSE_TLS2ULP_DATA_2(x) ((x) << S_CAUSE_TLS2ULP_DATA_2)
+#define F_CAUSE_TLS2ULP_DATA_2 V_CAUSE_TLS2ULP_DATA_2(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_1 13
+#define V_CAUSE_TLS2ULP_DATA_1(x) ((x) << S_CAUSE_TLS2ULP_DATA_1)
+#define F_CAUSE_TLS2ULP_DATA_1 V_CAUSE_TLS2ULP_DATA_1(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_0 12
+#define V_CAUSE_TLS2ULP_DATA_0(x) ((x) << S_CAUSE_TLS2ULP_DATA_0)
+#define F_CAUSE_TLS2ULP_DATA_0 V_CAUSE_TLS2ULP_DATA_0(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_3 11
+#define V_CAUSE_TLS2ULP_PLENDATA_3(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_3)
+#define F_CAUSE_TLS2ULP_PLENDATA_3 V_CAUSE_TLS2ULP_PLENDATA_3(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_2 10
+#define V_CAUSE_TLS2ULP_PLENDATA_2(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_2)
+#define F_CAUSE_TLS2ULP_PLENDATA_2 V_CAUSE_TLS2ULP_PLENDATA_2(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_1 9
+#define V_CAUSE_TLS2ULP_PLENDATA_1(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_1)
+#define F_CAUSE_TLS2ULP_PLENDATA_1 V_CAUSE_TLS2ULP_PLENDATA_1(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_0 8
+#define V_CAUSE_TLS2ULP_PLENDATA_0(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_0)
+#define F_CAUSE_TLS2ULP_PLENDATA_0 V_CAUSE_TLS2ULP_PLENDATA_0(1U)
+
+#define S_CAUSE_PM2ULP_DATA_3 7
+#define V_CAUSE_PM2ULP_DATA_3(x) ((x) << S_CAUSE_PM2ULP_DATA_3)
+#define F_CAUSE_PM2ULP_DATA_3 V_CAUSE_PM2ULP_DATA_3(1U)
+
+#define S_CAUSE_PM2ULP_DATA_2 6
+#define V_CAUSE_PM2ULP_DATA_2(x) ((x) << S_CAUSE_PM2ULP_DATA_2)
+#define F_CAUSE_PM2ULP_DATA_2 V_CAUSE_PM2ULP_DATA_2(1U)
+
+#define S_CAUSE_PM2ULP_DATA_1 5
+#define V_CAUSE_PM2ULP_DATA_1(x) ((x) << S_CAUSE_PM2ULP_DATA_1)
+#define F_CAUSE_PM2ULP_DATA_1 V_CAUSE_PM2ULP_DATA_1(1U)
+
+#define S_CAUSE_PM2ULP_DATA_0 4
+#define V_CAUSE_PM2ULP_DATA_0(x) ((x) << S_CAUSE_PM2ULP_DATA_0)
+#define F_CAUSE_PM2ULP_DATA_0 V_CAUSE_PM2ULP_DATA_0(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_3 3
+#define V_CAUSE_TP2ULP_PCMD_3(x) ((x) << S_CAUSE_TP2ULP_PCMD_3)
+#define F_CAUSE_TP2ULP_PCMD_3 V_CAUSE_TP2ULP_PCMD_3(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_2 2
+#define V_CAUSE_TP2ULP_PCMD_2(x) ((x) << S_CAUSE_TP2ULP_PCMD_2)
+#define F_CAUSE_TP2ULP_PCMD_2 V_CAUSE_TP2ULP_PCMD_2(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_1 1
+#define V_CAUSE_TP2ULP_PCMD_1(x) ((x) << S_CAUSE_TP2ULP_PCMD_1)
+#define F_CAUSE_TP2ULP_PCMD_1 V_CAUSE_TP2ULP_PCMD_1(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_0 0
+#define V_CAUSE_TP2ULP_PCMD_0(x) ((x) << S_CAUSE_TP2ULP_PCMD_0)
+#define F_CAUSE_TP2ULP_PCMD_0 V_CAUSE_TP2ULP_PCMD_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_INTERFACE 0x191c8
+
+#define S_PERR_ULPRX2SBT_RSPPERR 31
+#define V_PERR_ULPRX2SBT_RSPPERR(x) ((x) << S_PERR_ULPRX2SBT_RSPPERR)
+#define F_PERR_ULPRX2SBT_RSPPERR V_PERR_ULPRX2SBT_RSPPERR(1U)
+
+#define S_PERR_ULPRX2MA_RSPPERR 30
+#define V_PERR_ULPRX2MA_RSPPERR(x) ((x) << S_PERR_ULPRX2MA_RSPPERR)
+#define F_PERR_ULPRX2MA_RSPPERR V_PERR_ULPRX2MA_RSPPERR(1U)
+
+#define S_PERR_PIO_BUS_PERR 29
+#define V_PERR_PIO_BUS_PERR(x) ((x) << S_PERR_PIO_BUS_PERR)
+#define F_PERR_PIO_BUS_PERR V_PERR_PIO_BUS_PERR(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_3 19
+#define V_PERR_PM2ULP_SNOOPDATA_3(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_3)
+#define F_PERR_PM2ULP_SNOOPDATA_3 V_PERR_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_2 18
+#define V_PERR_PM2ULP_SNOOPDATA_2(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_2)
+#define F_PERR_PM2ULP_SNOOPDATA_2 V_PERR_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_1 17
+#define V_PERR_PM2ULP_SNOOPDATA_1(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_1)
+#define F_PERR_PM2ULP_SNOOPDATA_1 V_PERR_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_0 16
+#define V_PERR_PM2ULP_SNOOPDATA_0(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_0)
+#define F_PERR_PM2ULP_SNOOPDATA_0 V_PERR_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_PERR_TLS2ULP_DATA_3 15
+#define V_PERR_TLS2ULP_DATA_3(x) ((x) << S_PERR_TLS2ULP_DATA_3)
+#define F_PERR_TLS2ULP_DATA_3 V_PERR_TLS2ULP_DATA_3(1U)
+
+#define S_PERR_TLS2ULP_DATA_2 14
+#define V_PERR_TLS2ULP_DATA_2(x) ((x) << S_PERR_TLS2ULP_DATA_2)
+#define F_PERR_TLS2ULP_DATA_2 V_PERR_TLS2ULP_DATA_2(1U)
+
+#define S_PERR_TLS2ULP_DATA_1 13
+#define V_PERR_TLS2ULP_DATA_1(x) ((x) << S_PERR_TLS2ULP_DATA_1)
+#define F_PERR_TLS2ULP_DATA_1 V_PERR_TLS2ULP_DATA_1(1U)
+
+#define S_PERR_TLS2ULP_DATA_0 12
+#define V_PERR_TLS2ULP_DATA_0(x) ((x) << S_PERR_TLS2ULP_DATA_0)
+#define F_PERR_TLS2ULP_DATA_0 V_PERR_TLS2ULP_DATA_0(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_3 11
+#define V_PERR_TLS2ULP_PLENDATA_3(x) ((x) << S_PERR_TLS2ULP_PLENDATA_3)
+#define F_PERR_TLS2ULP_PLENDATA_3 V_PERR_TLS2ULP_PLENDATA_3(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_2 10
+#define V_PERR_TLS2ULP_PLENDATA_2(x) ((x) << S_PERR_TLS2ULP_PLENDATA_2)
+#define F_PERR_TLS2ULP_PLENDATA_2 V_PERR_TLS2ULP_PLENDATA_2(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_1 9
+#define V_PERR_TLS2ULP_PLENDATA_1(x) ((x) << S_PERR_TLS2ULP_PLENDATA_1)
+#define F_PERR_TLS2ULP_PLENDATA_1 V_PERR_TLS2ULP_PLENDATA_1(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_0 8
+#define V_PERR_TLS2ULP_PLENDATA_0(x) ((x) << S_PERR_TLS2ULP_PLENDATA_0)
+#define F_PERR_TLS2ULP_PLENDATA_0 V_PERR_TLS2ULP_PLENDATA_0(1U)
+
+#define S_PERR_PM2ULP_DATA_3 7
+#define V_PERR_PM2ULP_DATA_3(x) ((x) << S_PERR_PM2ULP_DATA_3)
+#define F_PERR_PM2ULP_DATA_3 V_PERR_PM2ULP_DATA_3(1U)
+
+#define S_PERR_PM2ULP_DATA_2 6
+#define V_PERR_PM2ULP_DATA_2(x) ((x) << S_PERR_PM2ULP_DATA_2)
+#define F_PERR_PM2ULP_DATA_2 V_PERR_PM2ULP_DATA_2(1U)
+
+#define S_PERR_PM2ULP_DATA_1 5
+#define V_PERR_PM2ULP_DATA_1(x) ((x) << S_PERR_PM2ULP_DATA_1)
+#define F_PERR_PM2ULP_DATA_1 V_PERR_PM2ULP_DATA_1(1U)
+
+#define S_PERR_PM2ULP_DATA_0 4
+#define V_PERR_PM2ULP_DATA_0(x) ((x) << S_PERR_PM2ULP_DATA_0)
+#define F_PERR_PM2ULP_DATA_0 V_PERR_PM2ULP_DATA_0(1U)
+
+#define S_PERR_TP2ULP_PCMD_3 3
+#define V_PERR_TP2ULP_PCMD_3(x) ((x) << S_PERR_TP2ULP_PCMD_3)
+#define F_PERR_TP2ULP_PCMD_3 V_PERR_TP2ULP_PCMD_3(1U)
+
+#define S_PERR_TP2ULP_PCMD_2 2
+#define V_PERR_TP2ULP_PCMD_2(x) ((x) << S_PERR_TP2ULP_PCMD_2)
+#define F_PERR_TP2ULP_PCMD_2 V_PERR_TP2ULP_PCMD_2(1U)
+
+#define S_PERR_TP2ULP_PCMD_1 1
+#define V_PERR_TP2ULP_PCMD_1(x) ((x) << S_PERR_TP2ULP_PCMD_1)
+#define F_PERR_TP2ULP_PCMD_1 V_PERR_TP2ULP_PCMD_1(1U)
+
+#define S_PERR_TP2ULP_PCMD_0 0
+#define V_PERR_TP2ULP_PCMD_0(x) ((x) << S_PERR_TP2ULP_PCMD_0)
+#define F_PERR_TP2ULP_PCMD_0 V_PERR_TP2ULP_PCMD_0(1U)
+
#define A_ULP_RX_SE_CNT_ERR 0x191d0
#define A_ULP_RX_SE_CNT_CLR 0x191d4
@@ -37295,6 +47222,26 @@
#define V_CLRCHAN1(x) ((x) << S_CLRCHAN1)
#define G_CLRCHAN1(x) (((x) >> S_CLRCHAN1) & M_CLRCHAN1)
+#define S_CLRCHAN3 12
+#define M_CLRCHAN3 0xfU
+#define V_CLRCHAN3(x) ((x) << S_CLRCHAN3)
+#define G_CLRCHAN3(x) (((x) >> S_CLRCHAN3) & M_CLRCHAN3)
+
+#define S_CLRCHAN2 8
+#define M_CLRCHAN2 0xfU
+#define V_CLRCHAN2(x) ((x) << S_CLRCHAN2)
+#define G_CLRCHAN2(x) (((x) >> S_CLRCHAN2) & M_CLRCHAN2)
+
+#define S_T7_CLRCHAN1 4
+#define M_T7_CLRCHAN1 0xfU
+#define V_T7_CLRCHAN1(x) ((x) << S_T7_CLRCHAN1)
+#define G_T7_CLRCHAN1(x) (((x) >> S_T7_CLRCHAN1) & M_T7_CLRCHAN1)
+
+#define S_T7_CLRCHAN0 0
+#define M_T7_CLRCHAN0 0xfU
+#define V_T7_CLRCHAN0(x) ((x) << S_T7_CLRCHAN0)
+#define G_T7_CLRCHAN0(x) (((x) >> S_T7_CLRCHAN0) & M_T7_CLRCHAN0)
+
#define A_ULP_RX_SE_CNT_CH0 0x191d8
#define S_SOP_CNT_OUT0 28
@@ -37400,6 +47347,7 @@
#define G_SEL_L(x) (((x) >> S_SEL_L) & M_SEL_L)
#define A_ULP_RX_DBG_DATAH 0x191e4
+#define A_ULP_RX_DBG_DATA 0x191e4
#define A_ULP_RX_DBG_DATAL 0x191e8
#define A_ULP_RX_LA_CHNL 0x19238
@@ -37581,6 +47529,11 @@
#define V_PIO_RDMA_SEND_RQE(x) ((x) << S_PIO_RDMA_SEND_RQE)
#define F_PIO_RDMA_SEND_RQE V_PIO_RDMA_SEND_RQE(1U)
+#define S_TLS_KEYSIZECONF 26
+#define M_TLS_KEYSIZECONF 0x3U
+#define V_TLS_KEYSIZECONF(x) ((x) << S_TLS_KEYSIZECONF)
+#define G_TLS_KEYSIZECONF(x) (((x) >> S_TLS_KEYSIZECONF) & M_TLS_KEYSIZECONF)
+
#define A_ULP_RX_CH0_CGEN 0x19260
#define S_BYPASS_CGEN 7
@@ -37615,7 +47568,61 @@
#define V_RDMA_DATAPATH_CGEN(x) ((x) << S_RDMA_DATAPATH_CGEN)
#define F_RDMA_DATAPATH_CGEN V_RDMA_DATAPATH_CGEN(1U)
+#define A_ULP_RX_CH_CGEN 0x19260
+
+#define S_T7_BYPASS_CGEN 28
+#define M_T7_BYPASS_CGEN 0xfU
+#define V_T7_BYPASS_CGEN(x) ((x) << S_T7_BYPASS_CGEN)
+#define G_T7_BYPASS_CGEN(x) (((x) >> S_T7_BYPASS_CGEN) & M_T7_BYPASS_CGEN)
+
+#define S_T7_TDDP_CGEN 24
+#define M_T7_TDDP_CGEN 0xfU
+#define V_T7_TDDP_CGEN(x) ((x) << S_T7_TDDP_CGEN)
+#define G_T7_TDDP_CGEN(x) (((x) >> S_T7_TDDP_CGEN) & M_T7_TDDP_CGEN)
+
+#define S_T7_ISCSI_CGEN 20
+#define M_T7_ISCSI_CGEN 0xfU
+#define V_T7_ISCSI_CGEN(x) ((x) << S_T7_ISCSI_CGEN)
+#define G_T7_ISCSI_CGEN(x) (((x) >> S_T7_ISCSI_CGEN) & M_T7_ISCSI_CGEN)
+
+#define S_T7_RDMA_CGEN 16
+#define M_T7_RDMA_CGEN 0xfU
+#define V_T7_RDMA_CGEN(x) ((x) << S_T7_RDMA_CGEN)
+#define G_T7_RDMA_CGEN(x) (((x) >> S_T7_RDMA_CGEN) & M_T7_RDMA_CGEN)
+
+#define S_T7_CHANNEL_CGEN 12
+#define M_T7_CHANNEL_CGEN 0xfU
+#define V_T7_CHANNEL_CGEN(x) ((x) << S_T7_CHANNEL_CGEN)
+#define G_T7_CHANNEL_CGEN(x) (((x) >> S_T7_CHANNEL_CGEN) & M_T7_CHANNEL_CGEN)
+
+#define S_T7_ALL_DATAPATH_CGEN 8
+#define M_T7_ALL_DATAPATH_CGEN 0xfU
+#define V_T7_ALL_DATAPATH_CGEN(x) ((x) << S_T7_ALL_DATAPATH_CGEN)
+#define G_T7_ALL_DATAPATH_CGEN(x) (((x) >> S_T7_ALL_DATAPATH_CGEN) & M_T7_ALL_DATAPATH_CGEN)
+
+#define S_T7_T10DIFF_DATAPATH_CGEN 4
+#define M_T7_T10DIFF_DATAPATH_CGEN 0xfU
+#define V_T7_T10DIFF_DATAPATH_CGEN(x) ((x) << S_T7_T10DIFF_DATAPATH_CGEN)
+#define G_T7_T10DIFF_DATAPATH_CGEN(x) (((x) >> S_T7_T10DIFF_DATAPATH_CGEN) & M_T7_T10DIFF_DATAPATH_CGEN)
+
+#define S_T7_RDMA_DATAPATH_CGEN 0
+#define M_T7_RDMA_DATAPATH_CGEN 0xfU
+#define V_T7_RDMA_DATAPATH_CGEN(x) ((x) << S_T7_RDMA_DATAPATH_CGEN)
+#define G_T7_RDMA_DATAPATH_CGEN(x) (((x) >> S_T7_RDMA_DATAPATH_CGEN) & M_T7_RDMA_DATAPATH_CGEN)
+
#define A_ULP_RX_CH1_CGEN 0x19264
+#define A_ULP_RX_CH_CGEN_1 0x19264
+
+#define S_NVME_TCP_CGEN 4
+#define M_NVME_TCP_CGEN 0xfU
+#define V_NVME_TCP_CGEN(x) ((x) << S_NVME_TCP_CGEN)
+#define G_NVME_TCP_CGEN(x) (((x) >> S_NVME_TCP_CGEN) & M_NVME_TCP_CGEN)
+
+#define S_ROCE_CGEN 0
+#define M_ROCE_CGEN 0xfU
+#define V_ROCE_CGEN(x) ((x) << S_ROCE_CGEN)
+#define G_ROCE_CGEN(x) (((x) >> S_ROCE_CGEN) & M_ROCE_CGEN)
+
#define A_ULP_RX_RFE_DISABLE 0x19268
#define S_RQE_LIM_CHECK_RFE_DISABLE 0
@@ -37742,6 +47749,30 @@
#define V_SKIP_MA_REQ_EN0(x) ((x) << S_SKIP_MA_REQ_EN0)
#define F_SKIP_MA_REQ_EN0 V_SKIP_MA_REQ_EN0(1U)
+#define S_CLEAR_CTX_ERR_CNT3 7
+#define V_CLEAR_CTX_ERR_CNT3(x) ((x) << S_CLEAR_CTX_ERR_CNT3)
+#define F_CLEAR_CTX_ERR_CNT3 V_CLEAR_CTX_ERR_CNT3(1U)
+
+#define S_CLEAR_CTX_ERR_CNT2 6
+#define V_CLEAR_CTX_ERR_CNT2(x) ((x) << S_CLEAR_CTX_ERR_CNT2)
+#define F_CLEAR_CTX_ERR_CNT2 V_CLEAR_CTX_ERR_CNT2(1U)
+
+#define S_T7_CLEAR_CTX_ERR_CNT1 5
+#define V_T7_CLEAR_CTX_ERR_CNT1(x) ((x) << S_T7_CLEAR_CTX_ERR_CNT1)
+#define F_T7_CLEAR_CTX_ERR_CNT1 V_T7_CLEAR_CTX_ERR_CNT1(1U)
+
+#define S_T7_CLEAR_CTX_ERR_CNT0 4
+#define V_T7_CLEAR_CTX_ERR_CNT0(x) ((x) << S_T7_CLEAR_CTX_ERR_CNT0)
+#define F_T7_CLEAR_CTX_ERR_CNT0 V_T7_CLEAR_CTX_ERR_CNT0(1U)
+
+#define S_SKIP_MA_REQ_EN3 3
+#define V_SKIP_MA_REQ_EN3(x) ((x) << S_SKIP_MA_REQ_EN3)
+#define F_SKIP_MA_REQ_EN3 V_SKIP_MA_REQ_EN3(1U)
+
+#define S_SKIP_MA_REQ_EN2 2
+#define V_SKIP_MA_REQ_EN2(x) ((x) << S_SKIP_MA_REQ_EN2)
+#define F_SKIP_MA_REQ_EN2 V_SKIP_MA_REQ_EN2(1U)
+
#define A_ULP_RX_CHNL0_CTX_ERROR_COUNT_PER_TID 0x19288
#define A_ULP_RX_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1928c
#define A_ULP_RX_MSN_CHECK_ENABLE 0x19290
@@ -37758,6 +47789,92 @@
#define V_SEND_MSN_CHECK_ENABLE(x) ((x) << S_SEND_MSN_CHECK_ENABLE)
#define F_SEND_MSN_CHECK_ENABLE V_SEND_MSN_CHECK_ENABLE(1U)
+#define A_ULP_RX_SE_CNT_CH2 0x19294
+
+#define S_SOP_CNT_OUT2 28
+#define M_SOP_CNT_OUT2 0xfU
+#define V_SOP_CNT_OUT2(x) ((x) << S_SOP_CNT_OUT2)
+#define G_SOP_CNT_OUT2(x) (((x) >> S_SOP_CNT_OUT2) & M_SOP_CNT_OUT2)
+
+#define S_EOP_CNT_OUT2 24
+#define M_EOP_CNT_OUT2 0xfU
+#define V_EOP_CNT_OUT2(x) ((x) << S_EOP_CNT_OUT2)
+#define G_EOP_CNT_OUT2(x) (((x) >> S_EOP_CNT_OUT2) & M_EOP_CNT_OUT2)
+
+#define S_SOP_CNT_AL2 20
+#define M_SOP_CNT_AL2 0xfU
+#define V_SOP_CNT_AL2(x) ((x) << S_SOP_CNT_AL2)
+#define G_SOP_CNT_AL2(x) (((x) >> S_SOP_CNT_AL2) & M_SOP_CNT_AL2)
+
+#define S_EOP_CNT_AL2 16
+#define M_EOP_CNT_AL2 0xfU
+#define V_EOP_CNT_AL2(x) ((x) << S_EOP_CNT_AL2)
+#define G_EOP_CNT_AL2(x) (((x) >> S_EOP_CNT_AL2) & M_EOP_CNT_AL2)
+
+#define S_SOP_CNT_MR2 12
+#define M_SOP_CNT_MR2 0xfU
+#define V_SOP_CNT_MR2(x) ((x) << S_SOP_CNT_MR2)
+#define G_SOP_CNT_MR2(x) (((x) >> S_SOP_CNT_MR2) & M_SOP_CNT_MR2)
+
+#define S_EOP_CNT_MR2 8
+#define M_EOP_CNT_MR2 0xfU
+#define V_EOP_CNT_MR2(x) ((x) << S_EOP_CNT_MR2)
+#define G_EOP_CNT_MR2(x) (((x) >> S_EOP_CNT_MR2) & M_EOP_CNT_MR2)
+
+#define S_SOP_CNT_IN2 4
+#define M_SOP_CNT_IN2 0xfU
+#define V_SOP_CNT_IN2(x) ((x) << S_SOP_CNT_IN2)
+#define G_SOP_CNT_IN2(x) (((x) >> S_SOP_CNT_IN2) & M_SOP_CNT_IN2)
+
+#define S_EOP_CNT_IN2 0
+#define M_EOP_CNT_IN2 0xfU
+#define V_EOP_CNT_IN2(x) ((x) << S_EOP_CNT_IN2)
+#define G_EOP_CNT_IN2(x) (((x) >> S_EOP_CNT_IN2) & M_EOP_CNT_IN2)
+
+#define A_ULP_RX_SE_CNT_CH3 0x19298
+
+#define S_SOP_CNT_OUT3 28
+#define M_SOP_CNT_OUT3 0xfU
+#define V_SOP_CNT_OUT3(x) ((x) << S_SOP_CNT_OUT3)
+#define G_SOP_CNT_OUT3(x) (((x) >> S_SOP_CNT_OUT3) & M_SOP_CNT_OUT3)
+
+#define S_EOP_CNT_OUT3 24
+#define M_EOP_CNT_OUT3 0xfU
+#define V_EOP_CNT_OUT3(x) ((x) << S_EOP_CNT_OUT3)
+#define G_EOP_CNT_OUT3(x) (((x) >> S_EOP_CNT_OUT3) & M_EOP_CNT_OUT3)
+
+#define S_SOP_CNT_AL3 20
+#define M_SOP_CNT_AL3 0xfU
+#define V_SOP_CNT_AL3(x) ((x) << S_SOP_CNT_AL3)
+#define G_SOP_CNT_AL3(x) (((x) >> S_SOP_CNT_AL3) & M_SOP_CNT_AL3)
+
+#define S_EOP_CNT_AL3 16
+#define M_EOP_CNT_AL3 0xfU
+#define V_EOP_CNT_AL3(x) ((x) << S_EOP_CNT_AL3)
+#define G_EOP_CNT_AL3(x) (((x) >> S_EOP_CNT_AL3) & M_EOP_CNT_AL3)
+
+#define S_SOP_CNT_MR3 12
+#define M_SOP_CNT_MR3 0xfU
+#define V_SOP_CNT_MR3(x) ((x) << S_SOP_CNT_MR3)
+#define G_SOP_CNT_MR3(x) (((x) >> S_SOP_CNT_MR3) & M_SOP_CNT_MR3)
+
+#define S_EOP_CNT_MR3 8
+#define M_EOP_CNT_MR3 0xfU
+#define V_EOP_CNT_MR3(x) ((x) << S_EOP_CNT_MR3)
+#define G_EOP_CNT_MR3(x) (((x) >> S_EOP_CNT_MR3) & M_EOP_CNT_MR3)
+
+#define S_SOP_CNT_IN3 4
+#define M_SOP_CNT_IN3 0xfU
+#define V_SOP_CNT_IN3(x) ((x) << S_SOP_CNT_IN3)
+#define G_SOP_CNT_IN3(x) (((x) >> S_SOP_CNT_IN3) & M_SOP_CNT_IN3)
+
+#define S_EOP_CNT_IN3 0
+#define M_EOP_CNT_IN3 0xfU
+#define V_EOP_CNT_IN3(x) ((x) << S_EOP_CNT_IN3)
+#define G_EOP_CNT_IN3(x) (((x) >> S_EOP_CNT_IN3) & M_EOP_CNT_IN3)
+
+#define A_ULP_RX_CHNL2_CTX_ERROR_COUNT_PER_TID 0x1929c
+#define A_ULP_RX_CHNL3_CTX_ERROR_COUNT_PER_TID 0x192a0
#define A_ULP_RX_TLS_PP_LLIMIT 0x192a4
#define S_TLSPPLLIMIT 6
@@ -37787,6 +47904,933 @@
#define G_TLSKEYULIMIT(x) (((x) >> S_TLSKEYULIMIT) & M_TLSKEYULIMIT)
#define A_ULP_RX_TLS_CTL 0x192bc
+#define A_ULP_RX_RRQ_LLIMIT 0x192c0
+#define A_ULP_RX_RRQ_ULIMIT 0x192c4
+#define A_ULP_RX_NVME_TCP_STAG_LLIMIT 0x192c8
+#define A_ULP_RX_NVME_TCP_STAG_ULIMIT 0x192cc
+#define A_ULP_RX_NVME_TCP_RQ_LLIMIT 0x192d0
+#define A_ULP_RX_NVME_TCP_RQ_ULIMIT 0x192d4
+#define A_ULP_RX_NVME_TCP_PBL_LLIMIT 0x192d8
+#define A_ULP_RX_NVME_TCP_PBL_ULIMIT 0x192dc
+#define A_ULP_RX_NVME_TCP_MAX_LENGTH 0x192e0
+
+#define S_NVME_TCP_MAX_PLEN01 24
+#define M_NVME_TCP_MAX_PLEN01 0xffU
+#define V_NVME_TCP_MAX_PLEN01(x) ((x) << S_NVME_TCP_MAX_PLEN01)
+#define G_NVME_TCP_MAX_PLEN01(x) (((x) >> S_NVME_TCP_MAX_PLEN01) & M_NVME_TCP_MAX_PLEN01)
+
+#define S_NVME_TCP_MAX_PLEN23 16
+#define M_NVME_TCP_MAX_PLEN23 0xffU
+#define V_NVME_TCP_MAX_PLEN23(x) ((x) << S_NVME_TCP_MAX_PLEN23)
+#define G_NVME_TCP_MAX_PLEN23(x) (((x) >> S_NVME_TCP_MAX_PLEN23) & M_NVME_TCP_MAX_PLEN23)
+
+#define S_NVME_TCP_MAX_CMD_PDU_LENGTH 0
+#define M_NVME_TCP_MAX_CMD_PDU_LENGTH 0xffffU
+#define V_NVME_TCP_MAX_CMD_PDU_LENGTH(x) ((x) << S_NVME_TCP_MAX_CMD_PDU_LENGTH)
+#define G_NVME_TCP_MAX_CMD_PDU_LENGTH(x) (((x) >> S_NVME_TCP_MAX_CMD_PDU_LENGTH) & M_NVME_TCP_MAX_CMD_PDU_LENGTH)
+
+#define A_ULP_RX_NVME_TCP_IQE_SIZE 0x192e4
+#define A_ULP_RX_NVME_TCP_NEW_PDU_TYPES 0x192e8
+#define A_ULP_RX_IWARP_PMOF_OPCODES_1 0x192ec
+#define A_ULP_RX_IWARP_PMOF_OPCODES_2 0x192f0
+#define A_ULP_RX_INT_ENABLE_PCMD 0x19300
+
+#define S_ENABLE_PCMD_SFIFO_3 30
+#define V_ENABLE_PCMD_SFIFO_3(x) ((x) << S_ENABLE_PCMD_SFIFO_3)
+#define F_ENABLE_PCMD_SFIFO_3 V_ENABLE_PCMD_SFIFO_3(1U)
+
+#define S_ENABLE_PCMD_FIFO_3 29
+#define V_ENABLE_PCMD_FIFO_3(x) ((x) << S_ENABLE_PCMD_FIFO_3)
+#define F_ENABLE_PCMD_FIFO_3 V_ENABLE_PCMD_FIFO_3(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_3 28
+#define V_ENABLE_PCMD_DDP_HINT_3(x) ((x) << S_ENABLE_PCMD_DDP_HINT_3)
+#define F_ENABLE_PCMD_DDP_HINT_3 V_ENABLE_PCMD_DDP_HINT_3(1U)
+
+#define S_ENABLE_PCMD_TPT_3 27
+#define V_ENABLE_PCMD_TPT_3(x) ((x) << S_ENABLE_PCMD_TPT_3)
+#define F_ENABLE_PCMD_TPT_3 V_ENABLE_PCMD_TPT_3(1U)
+
+#define S_ENABLE_PCMD_DDP_3 26
+#define V_ENABLE_PCMD_DDP_3(x) ((x) << S_ENABLE_PCMD_DDP_3)
+#define F_ENABLE_PCMD_DDP_3 V_ENABLE_PCMD_DDP_3(1U)
+
+#define S_ENABLE_PCMD_MPAR_3 25
+#define V_ENABLE_PCMD_MPAR_3(x) ((x) << S_ENABLE_PCMD_MPAR_3)
+#define F_ENABLE_PCMD_MPAR_3 V_ENABLE_PCMD_MPAR_3(1U)
+
+#define S_ENABLE_PCMD_MPAC_3 24
+#define V_ENABLE_PCMD_MPAC_3(x) ((x) << S_ENABLE_PCMD_MPAC_3)
+#define F_ENABLE_PCMD_MPAC_3 V_ENABLE_PCMD_MPAC_3(1U)
+
+#define S_ENABLE_PCMD_SFIFO_2 22
+#define V_ENABLE_PCMD_SFIFO_2(x) ((x) << S_ENABLE_PCMD_SFIFO_2)
+#define F_ENABLE_PCMD_SFIFO_2 V_ENABLE_PCMD_SFIFO_2(1U)
+
+#define S_ENABLE_PCMD_FIFO_2 21
+#define V_ENABLE_PCMD_FIFO_2(x) ((x) << S_ENABLE_PCMD_FIFO_2)
+#define F_ENABLE_PCMD_FIFO_2 V_ENABLE_PCMD_FIFO_2(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_2 20
+#define V_ENABLE_PCMD_DDP_HINT_2(x) ((x) << S_ENABLE_PCMD_DDP_HINT_2)
+#define F_ENABLE_PCMD_DDP_HINT_2 V_ENABLE_PCMD_DDP_HINT_2(1U)
+
+#define S_ENABLE_PCMD_TPT_2 19
+#define V_ENABLE_PCMD_TPT_2(x) ((x) << S_ENABLE_PCMD_TPT_2)
+#define F_ENABLE_PCMD_TPT_2 V_ENABLE_PCMD_TPT_2(1U)
+
+#define S_ENABLE_PCMD_DDP_2 18
+#define V_ENABLE_PCMD_DDP_2(x) ((x) << S_ENABLE_PCMD_DDP_2)
+#define F_ENABLE_PCMD_DDP_2 V_ENABLE_PCMD_DDP_2(1U)
+
+#define S_ENABLE_PCMD_MPAR_2 17
+#define V_ENABLE_PCMD_MPAR_2(x) ((x) << S_ENABLE_PCMD_MPAR_2)
+#define F_ENABLE_PCMD_MPAR_2 V_ENABLE_PCMD_MPAR_2(1U)
+
+#define S_ENABLE_PCMD_MPAC_2 16
+#define V_ENABLE_PCMD_MPAC_2(x) ((x) << S_ENABLE_PCMD_MPAC_2)
+#define F_ENABLE_PCMD_MPAC_2 V_ENABLE_PCMD_MPAC_2(1U)
+
+#define S_ENABLE_PCMD_SFIFO_1 14
+#define V_ENABLE_PCMD_SFIFO_1(x) ((x) << S_ENABLE_PCMD_SFIFO_1)
+#define F_ENABLE_PCMD_SFIFO_1 V_ENABLE_PCMD_SFIFO_1(1U)
+
+#define S_ENABLE_PCMD_FIFO_1 13
+#define V_ENABLE_PCMD_FIFO_1(x) ((x) << S_ENABLE_PCMD_FIFO_1)
+#define F_ENABLE_PCMD_FIFO_1 V_ENABLE_PCMD_FIFO_1(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_1 12
+#define V_ENABLE_PCMD_DDP_HINT_1(x) ((x) << S_ENABLE_PCMD_DDP_HINT_1)
+#define F_ENABLE_PCMD_DDP_HINT_1 V_ENABLE_PCMD_DDP_HINT_1(1U)
+
+#define S_ENABLE_PCMD_TPT_1 11
+#define V_ENABLE_PCMD_TPT_1(x) ((x) << S_ENABLE_PCMD_TPT_1)
+#define F_ENABLE_PCMD_TPT_1 V_ENABLE_PCMD_TPT_1(1U)
+
+#define S_ENABLE_PCMD_DDP_1 10
+#define V_ENABLE_PCMD_DDP_1(x) ((x) << S_ENABLE_PCMD_DDP_1)
+#define F_ENABLE_PCMD_DDP_1 V_ENABLE_PCMD_DDP_1(1U)
+
+#define S_ENABLE_PCMD_MPAR_1 9
+#define V_ENABLE_PCMD_MPAR_1(x) ((x) << S_ENABLE_PCMD_MPAR_1)
+#define F_ENABLE_PCMD_MPAR_1 V_ENABLE_PCMD_MPAR_1(1U)
+
+#define S_ENABLE_PCMD_MPAC_1 8
+#define V_ENABLE_PCMD_MPAC_1(x) ((x) << S_ENABLE_PCMD_MPAC_1)
+#define F_ENABLE_PCMD_MPAC_1 V_ENABLE_PCMD_MPAC_1(1U)
+
+#define S_ENABLE_PCMD_SFIFO_0 6
+#define V_ENABLE_PCMD_SFIFO_0(x) ((x) << S_ENABLE_PCMD_SFIFO_0)
+#define F_ENABLE_PCMD_SFIFO_0 V_ENABLE_PCMD_SFIFO_0(1U)
+
+#define S_ENABLE_PCMD_FIFO_0 5
+#define V_ENABLE_PCMD_FIFO_0(x) ((x) << S_ENABLE_PCMD_FIFO_0)
+#define F_ENABLE_PCMD_FIFO_0 V_ENABLE_PCMD_FIFO_0(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_0 4
+#define V_ENABLE_PCMD_DDP_HINT_0(x) ((x) << S_ENABLE_PCMD_DDP_HINT_0)
+#define F_ENABLE_PCMD_DDP_HINT_0 V_ENABLE_PCMD_DDP_HINT_0(1U)
+
+#define S_ENABLE_PCMD_TPT_0 3
+#define V_ENABLE_PCMD_TPT_0(x) ((x) << S_ENABLE_PCMD_TPT_0)
+#define F_ENABLE_PCMD_TPT_0 V_ENABLE_PCMD_TPT_0(1U)
+
+#define S_ENABLE_PCMD_DDP_0 2
+#define V_ENABLE_PCMD_DDP_0(x) ((x) << S_ENABLE_PCMD_DDP_0)
+#define F_ENABLE_PCMD_DDP_0 V_ENABLE_PCMD_DDP_0(1U)
+
+#define S_ENABLE_PCMD_MPAR_0 1
+#define V_ENABLE_PCMD_MPAR_0(x) ((x) << S_ENABLE_PCMD_MPAR_0)
+#define F_ENABLE_PCMD_MPAR_0 V_ENABLE_PCMD_MPAR_0(1U)
+
+#define S_ENABLE_PCMD_MPAC_0 0
+#define V_ENABLE_PCMD_MPAC_0(x) ((x) << S_ENABLE_PCMD_MPAC_0)
+#define F_ENABLE_PCMD_MPAC_0 V_ENABLE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_PCMD 0x19304
+
+#define S_CAUSE_PCMD_SFIFO_3 30
+#define V_CAUSE_PCMD_SFIFO_3(x) ((x) << S_CAUSE_PCMD_SFIFO_3)
+#define F_CAUSE_PCMD_SFIFO_3 V_CAUSE_PCMD_SFIFO_3(1U)
+
+#define S_CAUSE_PCMD_FIFO_3 29
+#define V_CAUSE_PCMD_FIFO_3(x) ((x) << S_CAUSE_PCMD_FIFO_3)
+#define F_CAUSE_PCMD_FIFO_3 V_CAUSE_PCMD_FIFO_3(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_3 28
+#define V_CAUSE_PCMD_DDP_HINT_3(x) ((x) << S_CAUSE_PCMD_DDP_HINT_3)
+#define F_CAUSE_PCMD_DDP_HINT_3 V_CAUSE_PCMD_DDP_HINT_3(1U)
+
+#define S_CAUSE_PCMD_TPT_3 27
+#define V_CAUSE_PCMD_TPT_3(x) ((x) << S_CAUSE_PCMD_TPT_3)
+#define F_CAUSE_PCMD_TPT_3 V_CAUSE_PCMD_TPT_3(1U)
+
+#define S_CAUSE_PCMD_DDP_3 26
+#define V_CAUSE_PCMD_DDP_3(x) ((x) << S_CAUSE_PCMD_DDP_3)
+#define F_CAUSE_PCMD_DDP_3 V_CAUSE_PCMD_DDP_3(1U)
+
+#define S_CAUSE_PCMD_MPAR_3 25
+#define V_CAUSE_PCMD_MPAR_3(x) ((x) << S_CAUSE_PCMD_MPAR_3)
+#define F_CAUSE_PCMD_MPAR_3 V_CAUSE_PCMD_MPAR_3(1U)
+
+#define S_CAUSE_PCMD_MPAC_3 24
+#define V_CAUSE_PCMD_MPAC_3(x) ((x) << S_CAUSE_PCMD_MPAC_3)
+#define F_CAUSE_PCMD_MPAC_3 V_CAUSE_PCMD_MPAC_3(1U)
+
+#define S_CAUSE_PCMD_SFIFO_2 22
+#define V_CAUSE_PCMD_SFIFO_2(x) ((x) << S_CAUSE_PCMD_SFIFO_2)
+#define F_CAUSE_PCMD_SFIFO_2 V_CAUSE_PCMD_SFIFO_2(1U)
+
+#define S_CAUSE_PCMD_FIFO_2 21
+#define V_CAUSE_PCMD_FIFO_2(x) ((x) << S_CAUSE_PCMD_FIFO_2)
+#define F_CAUSE_PCMD_FIFO_2 V_CAUSE_PCMD_FIFO_2(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_2 20
+#define V_CAUSE_PCMD_DDP_HINT_2(x) ((x) << S_CAUSE_PCMD_DDP_HINT_2)
+#define F_CAUSE_PCMD_DDP_HINT_2 V_CAUSE_PCMD_DDP_HINT_2(1U)
+
+#define S_CAUSE_PCMD_TPT_2 19
+#define V_CAUSE_PCMD_TPT_2(x) ((x) << S_CAUSE_PCMD_TPT_2)
+#define F_CAUSE_PCMD_TPT_2 V_CAUSE_PCMD_TPT_2(1U)
+
+#define S_CAUSE_PCMD_DDP_2 18
+#define V_CAUSE_PCMD_DDP_2(x) ((x) << S_CAUSE_PCMD_DDP_2)
+#define F_CAUSE_PCMD_DDP_2 V_CAUSE_PCMD_DDP_2(1U)
+
+#define S_CAUSE_PCMD_MPAR_2 17
+#define V_CAUSE_PCMD_MPAR_2(x) ((x) << S_CAUSE_PCMD_MPAR_2)
+#define F_CAUSE_PCMD_MPAR_2 V_CAUSE_PCMD_MPAR_2(1U)
+
+#define S_CAUSE_PCMD_MPAC_2 16
+#define V_CAUSE_PCMD_MPAC_2(x) ((x) << S_CAUSE_PCMD_MPAC_2)
+#define F_CAUSE_PCMD_MPAC_2 V_CAUSE_PCMD_MPAC_2(1U)
+
+#define S_CAUSE_PCMD_SFIFO_1 14
+#define V_CAUSE_PCMD_SFIFO_1(x) ((x) << S_CAUSE_PCMD_SFIFO_1)
+#define F_CAUSE_PCMD_SFIFO_1 V_CAUSE_PCMD_SFIFO_1(1U)
+
+#define S_CAUSE_PCMD_FIFO_1 13
+#define V_CAUSE_PCMD_FIFO_1(x) ((x) << S_CAUSE_PCMD_FIFO_1)
+#define F_CAUSE_PCMD_FIFO_1 V_CAUSE_PCMD_FIFO_1(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_1 12
+#define V_CAUSE_PCMD_DDP_HINT_1(x) ((x) << S_CAUSE_PCMD_DDP_HINT_1)
+#define F_CAUSE_PCMD_DDP_HINT_1 V_CAUSE_PCMD_DDP_HINT_1(1U)
+
+#define S_CAUSE_PCMD_TPT_1 11
+#define V_CAUSE_PCMD_TPT_1(x) ((x) << S_CAUSE_PCMD_TPT_1)
+#define F_CAUSE_PCMD_TPT_1 V_CAUSE_PCMD_TPT_1(1U)
+
+#define S_CAUSE_PCMD_DDP_1 10
+#define V_CAUSE_PCMD_DDP_1(x) ((x) << S_CAUSE_PCMD_DDP_1)
+#define F_CAUSE_PCMD_DDP_1 V_CAUSE_PCMD_DDP_1(1U)
+
+#define S_CAUSE_PCMD_MPAR_1 9
+#define V_CAUSE_PCMD_MPAR_1(x) ((x) << S_CAUSE_PCMD_MPAR_1)
+#define F_CAUSE_PCMD_MPAR_1 V_CAUSE_PCMD_MPAR_1(1U)
+
+#define S_CAUSE_PCMD_MPAC_1 8
+#define V_CAUSE_PCMD_MPAC_1(x) ((x) << S_CAUSE_PCMD_MPAC_1)
+#define F_CAUSE_PCMD_MPAC_1 V_CAUSE_PCMD_MPAC_1(1U)
+
+#define S_CAUSE_PCMD_SFIFO_0 6
+#define V_CAUSE_PCMD_SFIFO_0(x) ((x) << S_CAUSE_PCMD_SFIFO_0)
+#define F_CAUSE_PCMD_SFIFO_0 V_CAUSE_PCMD_SFIFO_0(1U)
+
+#define S_CAUSE_PCMD_FIFO_0 5
+#define V_CAUSE_PCMD_FIFO_0(x) ((x) << S_CAUSE_PCMD_FIFO_0)
+#define F_CAUSE_PCMD_FIFO_0 V_CAUSE_PCMD_FIFO_0(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_0 4
+#define V_CAUSE_PCMD_DDP_HINT_0(x) ((x) << S_CAUSE_PCMD_DDP_HINT_0)
+#define F_CAUSE_PCMD_DDP_HINT_0 V_CAUSE_PCMD_DDP_HINT_0(1U)
+
+#define S_CAUSE_PCMD_TPT_0 3
+#define V_CAUSE_PCMD_TPT_0(x) ((x) << S_CAUSE_PCMD_TPT_0)
+#define F_CAUSE_PCMD_TPT_0 V_CAUSE_PCMD_TPT_0(1U)
+
+#define S_CAUSE_PCMD_DDP_0 2
+#define V_CAUSE_PCMD_DDP_0(x) ((x) << S_CAUSE_PCMD_DDP_0)
+#define F_CAUSE_PCMD_DDP_0 V_CAUSE_PCMD_DDP_0(1U)
+
+#define S_CAUSE_PCMD_MPAR_0 1
+#define V_CAUSE_PCMD_MPAR_0(x) ((x) << S_CAUSE_PCMD_MPAR_0)
+#define F_CAUSE_PCMD_MPAR_0 V_CAUSE_PCMD_MPAR_0(1U)
+
+#define S_CAUSE_PCMD_MPAC_0 0
+#define V_CAUSE_PCMD_MPAC_0(x) ((x) << S_CAUSE_PCMD_MPAC_0)
+#define F_CAUSE_PCMD_MPAC_0 V_CAUSE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_PCMD 0x19308
+
+#define S_PERR_ENABLE_PCMD_SFIFO_3 30
+#define V_PERR_ENABLE_PCMD_SFIFO_3(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_3)
+#define F_PERR_ENABLE_PCMD_SFIFO_3 V_PERR_ENABLE_PCMD_SFIFO_3(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_3 29
+#define V_PERR_ENABLE_PCMD_FIFO_3(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_3)
+#define F_PERR_ENABLE_PCMD_FIFO_3 V_PERR_ENABLE_PCMD_FIFO_3(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_3 28
+#define V_PERR_ENABLE_PCMD_DDP_HINT_3(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_3)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_3 V_PERR_ENABLE_PCMD_DDP_HINT_3(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_3 27
+#define V_PERR_ENABLE_PCMD_TPT_3(x) ((x) << S_PERR_ENABLE_PCMD_TPT_3)
+#define F_PERR_ENABLE_PCMD_TPT_3 V_PERR_ENABLE_PCMD_TPT_3(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_3 26
+#define V_PERR_ENABLE_PCMD_DDP_3(x) ((x) << S_PERR_ENABLE_PCMD_DDP_3)
+#define F_PERR_ENABLE_PCMD_DDP_3 V_PERR_ENABLE_PCMD_DDP_3(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_3 25
+#define V_PERR_ENABLE_PCMD_MPAR_3(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_3)
+#define F_PERR_ENABLE_PCMD_MPAR_3 V_PERR_ENABLE_PCMD_MPAR_3(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_3 24
+#define V_PERR_ENABLE_PCMD_MPAC_3(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_3)
+#define F_PERR_ENABLE_PCMD_MPAC_3 V_PERR_ENABLE_PCMD_MPAC_3(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_2 22
+#define V_PERR_ENABLE_PCMD_SFIFO_2(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_2)
+#define F_PERR_ENABLE_PCMD_SFIFO_2 V_PERR_ENABLE_PCMD_SFIFO_2(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_2 21
+#define V_PERR_ENABLE_PCMD_FIFO_2(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_2)
+#define F_PERR_ENABLE_PCMD_FIFO_2 V_PERR_ENABLE_PCMD_FIFO_2(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_2 20
+#define V_PERR_ENABLE_PCMD_DDP_HINT_2(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_2)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_2 V_PERR_ENABLE_PCMD_DDP_HINT_2(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_2 19
+#define V_PERR_ENABLE_PCMD_TPT_2(x) ((x) << S_PERR_ENABLE_PCMD_TPT_2)
+#define F_PERR_ENABLE_PCMD_TPT_2 V_PERR_ENABLE_PCMD_TPT_2(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_2 18
+#define V_PERR_ENABLE_PCMD_DDP_2(x) ((x) << S_PERR_ENABLE_PCMD_DDP_2)
+#define F_PERR_ENABLE_PCMD_DDP_2 V_PERR_ENABLE_PCMD_DDP_2(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_2 17
+#define V_PERR_ENABLE_PCMD_MPAR_2(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_2)
+#define F_PERR_ENABLE_PCMD_MPAR_2 V_PERR_ENABLE_PCMD_MPAR_2(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_2 16
+#define V_PERR_ENABLE_PCMD_MPAC_2(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_2)
+#define F_PERR_ENABLE_PCMD_MPAC_2 V_PERR_ENABLE_PCMD_MPAC_2(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_1 14
+#define V_PERR_ENABLE_PCMD_SFIFO_1(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_1)
+#define F_PERR_ENABLE_PCMD_SFIFO_1 V_PERR_ENABLE_PCMD_SFIFO_1(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_1 13
+#define V_PERR_ENABLE_PCMD_FIFO_1(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_1)
+#define F_PERR_ENABLE_PCMD_FIFO_1 V_PERR_ENABLE_PCMD_FIFO_1(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_1 12
+#define V_PERR_ENABLE_PCMD_DDP_HINT_1(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_1)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_1 V_PERR_ENABLE_PCMD_DDP_HINT_1(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_1 11
+#define V_PERR_ENABLE_PCMD_TPT_1(x) ((x) << S_PERR_ENABLE_PCMD_TPT_1)
+#define F_PERR_ENABLE_PCMD_TPT_1 V_PERR_ENABLE_PCMD_TPT_1(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_1 10
+#define V_PERR_ENABLE_PCMD_DDP_1(x) ((x) << S_PERR_ENABLE_PCMD_DDP_1)
+#define F_PERR_ENABLE_PCMD_DDP_1 V_PERR_ENABLE_PCMD_DDP_1(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_1 9
+#define V_PERR_ENABLE_PCMD_MPAR_1(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_1)
+#define F_PERR_ENABLE_PCMD_MPAR_1 V_PERR_ENABLE_PCMD_MPAR_1(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_1 8
+#define V_PERR_ENABLE_PCMD_MPAC_1(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_1)
+#define F_PERR_ENABLE_PCMD_MPAC_1 V_PERR_ENABLE_PCMD_MPAC_1(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_0 6
+#define V_PERR_ENABLE_PCMD_SFIFO_0(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_0)
+#define F_PERR_ENABLE_PCMD_SFIFO_0 V_PERR_ENABLE_PCMD_SFIFO_0(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_0 5
+#define V_PERR_ENABLE_PCMD_FIFO_0(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_0)
+#define F_PERR_ENABLE_PCMD_FIFO_0 V_PERR_ENABLE_PCMD_FIFO_0(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_0 4
+#define V_PERR_ENABLE_PCMD_DDP_HINT_0(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_0)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_0 V_PERR_ENABLE_PCMD_DDP_HINT_0(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_0 3
+#define V_PERR_ENABLE_PCMD_TPT_0(x) ((x) << S_PERR_ENABLE_PCMD_TPT_0)
+#define F_PERR_ENABLE_PCMD_TPT_0 V_PERR_ENABLE_PCMD_TPT_0(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_0 2
+#define V_PERR_ENABLE_PCMD_DDP_0(x) ((x) << S_PERR_ENABLE_PCMD_DDP_0)
+#define F_PERR_ENABLE_PCMD_DDP_0 V_PERR_ENABLE_PCMD_DDP_0(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_0 1
+#define V_PERR_ENABLE_PCMD_MPAR_0(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_0)
+#define F_PERR_ENABLE_PCMD_MPAR_0 V_PERR_ENABLE_PCMD_MPAR_0(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_0 0
+#define V_PERR_ENABLE_PCMD_MPAC_0(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_0)
+#define F_PERR_ENABLE_PCMD_MPAC_0 V_PERR_ENABLE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_INT_ENABLE_DATA 0x19310
+
+#define S_ENABLE_DATA_SNOOP_3 29
+#define V_ENABLE_DATA_SNOOP_3(x) ((x) << S_ENABLE_DATA_SNOOP_3)
+#define F_ENABLE_DATA_SNOOP_3 V_ENABLE_DATA_SNOOP_3(1U)
+
+#define S_ENABLE_DATA_SFIFO_3 28
+#define V_ENABLE_DATA_SFIFO_3(x) ((x) << S_ENABLE_DATA_SFIFO_3)
+#define F_ENABLE_DATA_SFIFO_3 V_ENABLE_DATA_SFIFO_3(1U)
+
+#define S_ENABLE_DATA_FIFO_3 27
+#define V_ENABLE_DATA_FIFO_3(x) ((x) << S_ENABLE_DATA_FIFO_3)
+#define F_ENABLE_DATA_FIFO_3 V_ENABLE_DATA_FIFO_3(1U)
+
+#define S_ENABLE_DATA_DDP_3 26
+#define V_ENABLE_DATA_DDP_3(x) ((x) << S_ENABLE_DATA_DDP_3)
+#define F_ENABLE_DATA_DDP_3 V_ENABLE_DATA_DDP_3(1U)
+
+#define S_ENABLE_DATA_CTX_3 25
+#define V_ENABLE_DATA_CTX_3(x) ((x) << S_ENABLE_DATA_CTX_3)
+#define F_ENABLE_DATA_CTX_3 V_ENABLE_DATA_CTX_3(1U)
+
+#define S_ENABLE_DATA_PARSER_3 24
+#define V_ENABLE_DATA_PARSER_3(x) ((x) << S_ENABLE_DATA_PARSER_3)
+#define F_ENABLE_DATA_PARSER_3 V_ENABLE_DATA_PARSER_3(1U)
+
+#define S_ENABLE_DATA_SNOOP_2 21
+#define V_ENABLE_DATA_SNOOP_2(x) ((x) << S_ENABLE_DATA_SNOOP_2)
+#define F_ENABLE_DATA_SNOOP_2 V_ENABLE_DATA_SNOOP_2(1U)
+
+#define S_ENABLE_DATA_SFIFO_2 20
+#define V_ENABLE_DATA_SFIFO_2(x) ((x) << S_ENABLE_DATA_SFIFO_2)
+#define F_ENABLE_DATA_SFIFO_2 V_ENABLE_DATA_SFIFO_2(1U)
+
+#define S_ENABLE_DATA_FIFO_2 19
+#define V_ENABLE_DATA_FIFO_2(x) ((x) << S_ENABLE_DATA_FIFO_2)
+#define F_ENABLE_DATA_FIFO_2 V_ENABLE_DATA_FIFO_2(1U)
+
+#define S_ENABLE_DATA_DDP_2 18
+#define V_ENABLE_DATA_DDP_2(x) ((x) << S_ENABLE_DATA_DDP_2)
+#define F_ENABLE_DATA_DDP_2 V_ENABLE_DATA_DDP_2(1U)
+
+#define S_ENABLE_DATA_CTX_2 17
+#define V_ENABLE_DATA_CTX_2(x) ((x) << S_ENABLE_DATA_CTX_2)
+#define F_ENABLE_DATA_CTX_2 V_ENABLE_DATA_CTX_2(1U)
+
+#define S_ENABLE_DATA_PARSER_2 16
+#define V_ENABLE_DATA_PARSER_2(x) ((x) << S_ENABLE_DATA_PARSER_2)
+#define F_ENABLE_DATA_PARSER_2 V_ENABLE_DATA_PARSER_2(1U)
+
+#define S_ENABLE_DATA_SNOOP_1 13
+#define V_ENABLE_DATA_SNOOP_1(x) ((x) << S_ENABLE_DATA_SNOOP_1)
+#define F_ENABLE_DATA_SNOOP_1 V_ENABLE_DATA_SNOOP_1(1U)
+
+#define S_ENABLE_DATA_SFIFO_1 12
+#define V_ENABLE_DATA_SFIFO_1(x) ((x) << S_ENABLE_DATA_SFIFO_1)
+#define F_ENABLE_DATA_SFIFO_1 V_ENABLE_DATA_SFIFO_1(1U)
+
+#define S_ENABLE_DATA_FIFO_1 11
+#define V_ENABLE_DATA_FIFO_1(x) ((x) << S_ENABLE_DATA_FIFO_1)
+#define F_ENABLE_DATA_FIFO_1 V_ENABLE_DATA_FIFO_1(1U)
+
+#define S_ENABLE_DATA_DDP_1 10
+#define V_ENABLE_DATA_DDP_1(x) ((x) << S_ENABLE_DATA_DDP_1)
+#define F_ENABLE_DATA_DDP_1 V_ENABLE_DATA_DDP_1(1U)
+
+#define S_ENABLE_DATA_CTX_1 9
+#define V_ENABLE_DATA_CTX_1(x) ((x) << S_ENABLE_DATA_CTX_1)
+#define F_ENABLE_DATA_CTX_1 V_ENABLE_DATA_CTX_1(1U)
+
+#define S_ENABLE_DATA_PARSER_1 8
+#define V_ENABLE_DATA_PARSER_1(x) ((x) << S_ENABLE_DATA_PARSER_1)
+#define F_ENABLE_DATA_PARSER_1 V_ENABLE_DATA_PARSER_1(1U)
+
+#define S_ENABLE_DATA_SNOOP_0 5
+#define V_ENABLE_DATA_SNOOP_0(x) ((x) << S_ENABLE_DATA_SNOOP_0)
+#define F_ENABLE_DATA_SNOOP_0 V_ENABLE_DATA_SNOOP_0(1U)
+
+#define S_ENABLE_DATA_SFIFO_0 4
+#define V_ENABLE_DATA_SFIFO_0(x) ((x) << S_ENABLE_DATA_SFIFO_0)
+#define F_ENABLE_DATA_SFIFO_0 V_ENABLE_DATA_SFIFO_0(1U)
+
+#define S_ENABLE_DATA_FIFO_0 3
+#define V_ENABLE_DATA_FIFO_0(x) ((x) << S_ENABLE_DATA_FIFO_0)
+#define F_ENABLE_DATA_FIFO_0 V_ENABLE_DATA_FIFO_0(1U)
+
+#define S_ENABLE_DATA_DDP_0 2
+#define V_ENABLE_DATA_DDP_0(x) ((x) << S_ENABLE_DATA_DDP_0)
+#define F_ENABLE_DATA_DDP_0 V_ENABLE_DATA_DDP_0(1U)
+
+#define S_ENABLE_DATA_CTX_0 1
+#define V_ENABLE_DATA_CTX_0(x) ((x) << S_ENABLE_DATA_CTX_0)
+#define F_ENABLE_DATA_CTX_0 V_ENABLE_DATA_CTX_0(1U)
+
+#define S_ENABLE_DATA_PARSER_0 0
+#define V_ENABLE_DATA_PARSER_0(x) ((x) << S_ENABLE_DATA_PARSER_0)
+#define F_ENABLE_DATA_PARSER_0 V_ENABLE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_DATA 0x19314
+
+#define S_CAUSE_DATA_SNOOP_3 29
+#define V_CAUSE_DATA_SNOOP_3(x) ((x) << S_CAUSE_DATA_SNOOP_3)
+#define F_CAUSE_DATA_SNOOP_3 V_CAUSE_DATA_SNOOP_3(1U)
+
+#define S_CAUSE_DATA_SFIFO_3 28
+#define V_CAUSE_DATA_SFIFO_3(x) ((x) << S_CAUSE_DATA_SFIFO_3)
+#define F_CAUSE_DATA_SFIFO_3 V_CAUSE_DATA_SFIFO_3(1U)
+
+#define S_CAUSE_DATA_FIFO_3 27
+#define V_CAUSE_DATA_FIFO_3(x) ((x) << S_CAUSE_DATA_FIFO_3)
+#define F_CAUSE_DATA_FIFO_3 V_CAUSE_DATA_FIFO_3(1U)
+
+#define S_CAUSE_DATA_DDP_3 26
+#define V_CAUSE_DATA_DDP_3(x) ((x) << S_CAUSE_DATA_DDP_3)
+#define F_CAUSE_DATA_DDP_3 V_CAUSE_DATA_DDP_3(1U)
+
+#define S_CAUSE_DATA_CTX_3 25
+#define V_CAUSE_DATA_CTX_3(x) ((x) << S_CAUSE_DATA_CTX_3)
+#define F_CAUSE_DATA_CTX_3 V_CAUSE_DATA_CTX_3(1U)
+
+#define S_CAUSE_DATA_PARSER_3 24
+#define V_CAUSE_DATA_PARSER_3(x) ((x) << S_CAUSE_DATA_PARSER_3)
+#define F_CAUSE_DATA_PARSER_3 V_CAUSE_DATA_PARSER_3(1U)
+
+#define S_CAUSE_DATA_SNOOP_2 21
+#define V_CAUSE_DATA_SNOOP_2(x) ((x) << S_CAUSE_DATA_SNOOP_2)
+#define F_CAUSE_DATA_SNOOP_2 V_CAUSE_DATA_SNOOP_2(1U)
+
+#define S_CAUSE_DATA_SFIFO_2 20
+#define V_CAUSE_DATA_SFIFO_2(x) ((x) << S_CAUSE_DATA_SFIFO_2)
+#define F_CAUSE_DATA_SFIFO_2 V_CAUSE_DATA_SFIFO_2(1U)
+
+#define S_CAUSE_DATA_FIFO_2 19
+#define V_CAUSE_DATA_FIFO_2(x) ((x) << S_CAUSE_DATA_FIFO_2)
+#define F_CAUSE_DATA_FIFO_2 V_CAUSE_DATA_FIFO_2(1U)
+
+#define S_CAUSE_DATA_DDP_2 18
+#define V_CAUSE_DATA_DDP_2(x) ((x) << S_CAUSE_DATA_DDP_2)
+#define F_CAUSE_DATA_DDP_2 V_CAUSE_DATA_DDP_2(1U)
+
+#define S_CAUSE_DATA_CTX_2 17
+#define V_CAUSE_DATA_CTX_2(x) ((x) << S_CAUSE_DATA_CTX_2)
+#define F_CAUSE_DATA_CTX_2 V_CAUSE_DATA_CTX_2(1U)
+
+#define S_CAUSE_DATA_PARSER_2 16
+#define V_CAUSE_DATA_PARSER_2(x) ((x) << S_CAUSE_DATA_PARSER_2)
+#define F_CAUSE_DATA_PARSER_2 V_CAUSE_DATA_PARSER_2(1U)
+
+#define S_CAUSE_DATA_SNOOP_1 13
+#define V_CAUSE_DATA_SNOOP_1(x) ((x) << S_CAUSE_DATA_SNOOP_1)
+#define F_CAUSE_DATA_SNOOP_1 V_CAUSE_DATA_SNOOP_1(1U)
+
+#define S_CAUSE_DATA_SFIFO_1 12
+#define V_CAUSE_DATA_SFIFO_1(x) ((x) << S_CAUSE_DATA_SFIFO_1)
+#define F_CAUSE_DATA_SFIFO_1 V_CAUSE_DATA_SFIFO_1(1U)
+
+#define S_CAUSE_DATA_FIFO_1 11
+#define V_CAUSE_DATA_FIFO_1(x) ((x) << S_CAUSE_DATA_FIFO_1)
+#define F_CAUSE_DATA_FIFO_1 V_CAUSE_DATA_FIFO_1(1U)
+
+#define S_CAUSE_DATA_DDP_1 10
+#define V_CAUSE_DATA_DDP_1(x) ((x) << S_CAUSE_DATA_DDP_1)
+#define F_CAUSE_DATA_DDP_1 V_CAUSE_DATA_DDP_1(1U)
+
+#define S_CAUSE_DATA_CTX_1 9
+#define V_CAUSE_DATA_CTX_1(x) ((x) << S_CAUSE_DATA_CTX_1)
+#define F_CAUSE_DATA_CTX_1 V_CAUSE_DATA_CTX_1(1U)
+
+#define S_CAUSE_DATA_PARSER_1 8
+#define V_CAUSE_DATA_PARSER_1(x) ((x) << S_CAUSE_DATA_PARSER_1)
+#define F_CAUSE_DATA_PARSER_1 V_CAUSE_DATA_PARSER_1(1U)
+
+#define S_CAUSE_DATA_SNOOP_0 5
+#define V_CAUSE_DATA_SNOOP_0(x) ((x) << S_CAUSE_DATA_SNOOP_0)
+#define F_CAUSE_DATA_SNOOP_0 V_CAUSE_DATA_SNOOP_0(1U)
+
+#define S_CAUSE_DATA_SFIFO_0 4
+#define V_CAUSE_DATA_SFIFO_0(x) ((x) << S_CAUSE_DATA_SFIFO_0)
+#define F_CAUSE_DATA_SFIFO_0 V_CAUSE_DATA_SFIFO_0(1U)
+
+#define S_CAUSE_DATA_FIFO_0 3
+#define V_CAUSE_DATA_FIFO_0(x) ((x) << S_CAUSE_DATA_FIFO_0)
+#define F_CAUSE_DATA_FIFO_0 V_CAUSE_DATA_FIFO_0(1U)
+
+#define S_CAUSE_DATA_DDP_0 2
+#define V_CAUSE_DATA_DDP_0(x) ((x) << S_CAUSE_DATA_DDP_0)
+#define F_CAUSE_DATA_DDP_0 V_CAUSE_DATA_DDP_0(1U)
+
+#define S_CAUSE_DATA_CTX_0 1
+#define V_CAUSE_DATA_CTX_0(x) ((x) << S_CAUSE_DATA_CTX_0)
+#define F_CAUSE_DATA_CTX_0 V_CAUSE_DATA_CTX_0(1U)
+
+#define S_CAUSE_DATA_PARSER_0 0
+#define V_CAUSE_DATA_PARSER_0(x) ((x) << S_CAUSE_DATA_PARSER_0)
+#define F_CAUSE_DATA_PARSER_0 V_CAUSE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_DATA 0x19318
+
+#define S_PERR_ENABLE_DATA_SNOOP_3 29
+#define V_PERR_ENABLE_DATA_SNOOP_3(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_3)
+#define F_PERR_ENABLE_DATA_SNOOP_3 V_PERR_ENABLE_DATA_SNOOP_3(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_3 28
+#define V_PERR_ENABLE_DATA_SFIFO_3(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_3)
+#define F_PERR_ENABLE_DATA_SFIFO_3 V_PERR_ENABLE_DATA_SFIFO_3(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_3 27
+#define V_PERR_ENABLE_DATA_FIFO_3(x) ((x) << S_PERR_ENABLE_DATA_FIFO_3)
+#define F_PERR_ENABLE_DATA_FIFO_3 V_PERR_ENABLE_DATA_FIFO_3(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_3 26
+#define V_PERR_ENABLE_DATA_DDP_3(x) ((x) << S_PERR_ENABLE_DATA_DDP_3)
+#define F_PERR_ENABLE_DATA_DDP_3 V_PERR_ENABLE_DATA_DDP_3(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_3 25
+#define V_PERR_ENABLE_DATA_CTX_3(x) ((x) << S_PERR_ENABLE_DATA_CTX_3)
+#define F_PERR_ENABLE_DATA_CTX_3 V_PERR_ENABLE_DATA_CTX_3(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_3 24
+#define V_PERR_ENABLE_DATA_PARSER_3(x) ((x) << S_PERR_ENABLE_DATA_PARSER_3)
+#define F_PERR_ENABLE_DATA_PARSER_3 V_PERR_ENABLE_DATA_PARSER_3(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_2 21
+#define V_PERR_ENABLE_DATA_SNOOP_2(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_2)
+#define F_PERR_ENABLE_DATA_SNOOP_2 V_PERR_ENABLE_DATA_SNOOP_2(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_2 20
+#define V_PERR_ENABLE_DATA_SFIFO_2(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_2)
+#define F_PERR_ENABLE_DATA_SFIFO_2 V_PERR_ENABLE_DATA_SFIFO_2(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_2 19
+#define V_PERR_ENABLE_DATA_FIFO_2(x) ((x) << S_PERR_ENABLE_DATA_FIFO_2)
+#define F_PERR_ENABLE_DATA_FIFO_2 V_PERR_ENABLE_DATA_FIFO_2(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_2 18
+#define V_PERR_ENABLE_DATA_DDP_2(x) ((x) << S_PERR_ENABLE_DATA_DDP_2)
+#define F_PERR_ENABLE_DATA_DDP_2 V_PERR_ENABLE_DATA_DDP_2(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_2 17
+#define V_PERR_ENABLE_DATA_CTX_2(x) ((x) << S_PERR_ENABLE_DATA_CTX_2)
+#define F_PERR_ENABLE_DATA_CTX_2 V_PERR_ENABLE_DATA_CTX_2(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_2 16
+#define V_PERR_ENABLE_DATA_PARSER_2(x) ((x) << S_PERR_ENABLE_DATA_PARSER_2)
+#define F_PERR_ENABLE_DATA_PARSER_2 V_PERR_ENABLE_DATA_PARSER_2(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_1 13
+#define V_PERR_ENABLE_DATA_SNOOP_1(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_1)
+#define F_PERR_ENABLE_DATA_SNOOP_1 V_PERR_ENABLE_DATA_SNOOP_1(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_1 12
+#define V_PERR_ENABLE_DATA_SFIFO_1(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_1)
+#define F_PERR_ENABLE_DATA_SFIFO_1 V_PERR_ENABLE_DATA_SFIFO_1(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_1 11
+#define V_PERR_ENABLE_DATA_FIFO_1(x) ((x) << S_PERR_ENABLE_DATA_FIFO_1)
+#define F_PERR_ENABLE_DATA_FIFO_1 V_PERR_ENABLE_DATA_FIFO_1(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_1 10
+#define V_PERR_ENABLE_DATA_DDP_1(x) ((x) << S_PERR_ENABLE_DATA_DDP_1)
+#define F_PERR_ENABLE_DATA_DDP_1 V_PERR_ENABLE_DATA_DDP_1(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_1 9
+#define V_PERR_ENABLE_DATA_CTX_1(x) ((x) << S_PERR_ENABLE_DATA_CTX_1)
+#define F_PERR_ENABLE_DATA_CTX_1 V_PERR_ENABLE_DATA_CTX_1(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_1 8
+#define V_PERR_ENABLE_DATA_PARSER_1(x) ((x) << S_PERR_ENABLE_DATA_PARSER_1)
+#define F_PERR_ENABLE_DATA_PARSER_1 V_PERR_ENABLE_DATA_PARSER_1(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_0 5
+#define V_PERR_ENABLE_DATA_SNOOP_0(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_0)
+#define F_PERR_ENABLE_DATA_SNOOP_0 V_PERR_ENABLE_DATA_SNOOP_0(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_0 4
+#define V_PERR_ENABLE_DATA_SFIFO_0(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_0)
+#define F_PERR_ENABLE_DATA_SFIFO_0 V_PERR_ENABLE_DATA_SFIFO_0(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_0 3
+#define V_PERR_ENABLE_DATA_FIFO_0(x) ((x) << S_PERR_ENABLE_DATA_FIFO_0)
+#define F_PERR_ENABLE_DATA_FIFO_0 V_PERR_ENABLE_DATA_FIFO_0(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_0 2
+#define V_PERR_ENABLE_DATA_DDP_0(x) ((x) << S_PERR_ENABLE_DATA_DDP_0)
+#define F_PERR_ENABLE_DATA_DDP_0 V_PERR_ENABLE_DATA_DDP_0(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_0 1
+#define V_PERR_ENABLE_DATA_CTX_0(x) ((x) << S_PERR_ENABLE_DATA_CTX_0)
+#define F_PERR_ENABLE_DATA_CTX_0 V_PERR_ENABLE_DATA_CTX_0(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_0 0
+#define V_PERR_ENABLE_DATA_PARSER_0(x) ((x) << S_PERR_ENABLE_DATA_PARSER_0)
+#define F_PERR_ENABLE_DATA_PARSER_0 V_PERR_ENABLE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_INT_ENABLE_ARB 0x19320
+
+#define S_ENABLE_ARB_PBL_PF_3 27
+#define V_ENABLE_ARB_PBL_PF_3(x) ((x) << S_ENABLE_ARB_PBL_PF_3)
+#define F_ENABLE_ARB_PBL_PF_3 V_ENABLE_ARB_PBL_PF_3(1U)
+
+#define S_ENABLE_ARB_PF_3 26
+#define V_ENABLE_ARB_PF_3(x) ((x) << S_ENABLE_ARB_PF_3)
+#define F_ENABLE_ARB_PF_3 V_ENABLE_ARB_PF_3(1U)
+
+#define S_ENABLE_ARB_TPT_PF_3 25
+#define V_ENABLE_ARB_TPT_PF_3(x) ((x) << S_ENABLE_ARB_TPT_PF_3)
+#define F_ENABLE_ARB_TPT_PF_3 V_ENABLE_ARB_TPT_PF_3(1U)
+
+#define S_ENABLE_ARB_F_3 24
+#define V_ENABLE_ARB_F_3(x) ((x) << S_ENABLE_ARB_F_3)
+#define F_ENABLE_ARB_F_3 V_ENABLE_ARB_F_3(1U)
+
+#define S_ENABLE_ARB_PBL_PF_2 19
+#define V_ENABLE_ARB_PBL_PF_2(x) ((x) << S_ENABLE_ARB_PBL_PF_2)
+#define F_ENABLE_ARB_PBL_PF_2 V_ENABLE_ARB_PBL_PF_2(1U)
+
+#define S_ENABLE_ARB_PF_2 18
+#define V_ENABLE_ARB_PF_2(x) ((x) << S_ENABLE_ARB_PF_2)
+#define F_ENABLE_ARB_PF_2 V_ENABLE_ARB_PF_2(1U)
+
+#define S_ENABLE_ARB_TPT_PF_2 17
+#define V_ENABLE_ARB_TPT_PF_2(x) ((x) << S_ENABLE_ARB_TPT_PF_2)
+#define F_ENABLE_ARB_TPT_PF_2 V_ENABLE_ARB_TPT_PF_2(1U)
+
+#define S_ENABLE_ARB_F_2 16
+#define V_ENABLE_ARB_F_2(x) ((x) << S_ENABLE_ARB_F_2)
+#define F_ENABLE_ARB_F_2 V_ENABLE_ARB_F_2(1U)
+
+#define S_ENABLE_ARB_PBL_PF_1 11
+#define V_ENABLE_ARB_PBL_PF_1(x) ((x) << S_ENABLE_ARB_PBL_PF_1)
+#define F_ENABLE_ARB_PBL_PF_1 V_ENABLE_ARB_PBL_PF_1(1U)
+
+#define S_ENABLE_ARB_PF_1 10
+#define V_ENABLE_ARB_PF_1(x) ((x) << S_ENABLE_ARB_PF_1)
+#define F_ENABLE_ARB_PF_1 V_ENABLE_ARB_PF_1(1U)
+
+#define S_ENABLE_ARB_TPT_PF_1 9
+#define V_ENABLE_ARB_TPT_PF_1(x) ((x) << S_ENABLE_ARB_TPT_PF_1)
+#define F_ENABLE_ARB_TPT_PF_1 V_ENABLE_ARB_TPT_PF_1(1U)
+
+#define S_ENABLE_ARB_F_1 8
+#define V_ENABLE_ARB_F_1(x) ((x) << S_ENABLE_ARB_F_1)
+#define F_ENABLE_ARB_F_1 V_ENABLE_ARB_F_1(1U)
+
+#define S_ENABLE_ARB_PBL_PF_0 3
+#define V_ENABLE_ARB_PBL_PF_0(x) ((x) << S_ENABLE_ARB_PBL_PF_0)
+#define F_ENABLE_ARB_PBL_PF_0 V_ENABLE_ARB_PBL_PF_0(1U)
+
+#define S_ENABLE_ARB_PF_0 2
+#define V_ENABLE_ARB_PF_0(x) ((x) << S_ENABLE_ARB_PF_0)
+#define F_ENABLE_ARB_PF_0 V_ENABLE_ARB_PF_0(1U)
+
+#define S_ENABLE_ARB_TPT_PF_0 1
+#define V_ENABLE_ARB_TPT_PF_0(x) ((x) << S_ENABLE_ARB_TPT_PF_0)
+#define F_ENABLE_ARB_TPT_PF_0 V_ENABLE_ARB_TPT_PF_0(1U)
+
+#define S_ENABLE_ARB_F_0 0
+#define V_ENABLE_ARB_F_0(x) ((x) << S_ENABLE_ARB_F_0)
+#define F_ENABLE_ARB_F_0 V_ENABLE_ARB_F_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_ARB 0x19324
+
+#define S_CAUSE_ARB_PBL_PF_3 27
+#define V_CAUSE_ARB_PBL_PF_3(x) ((x) << S_CAUSE_ARB_PBL_PF_3)
+#define F_CAUSE_ARB_PBL_PF_3 V_CAUSE_ARB_PBL_PF_3(1U)
+
+#define S_CAUSE_ARB_PF_3 26
+#define V_CAUSE_ARB_PF_3(x) ((x) << S_CAUSE_ARB_PF_3)
+#define F_CAUSE_ARB_PF_3 V_CAUSE_ARB_PF_3(1U)
+
+#define S_CAUSE_ARB_TPT_PF_3 25
+#define V_CAUSE_ARB_TPT_PF_3(x) ((x) << S_CAUSE_ARB_TPT_PF_3)
+#define F_CAUSE_ARB_TPT_PF_3 V_CAUSE_ARB_TPT_PF_3(1U)
+
+#define S_CAUSE_ARB_F_3 24
+#define V_CAUSE_ARB_F_3(x) ((x) << S_CAUSE_ARB_F_3)
+#define F_CAUSE_ARB_F_3 V_CAUSE_ARB_F_3(1U)
+
+#define S_CAUSE_ARB_PBL_PF_2 19
+#define V_CAUSE_ARB_PBL_PF_2(x) ((x) << S_CAUSE_ARB_PBL_PF_2)
+#define F_CAUSE_ARB_PBL_PF_2 V_CAUSE_ARB_PBL_PF_2(1U)
+
+#define S_CAUSE_ARB_PF_2 18
+#define V_CAUSE_ARB_PF_2(x) ((x) << S_CAUSE_ARB_PF_2)
+#define F_CAUSE_ARB_PF_2 V_CAUSE_ARB_PF_2(1U)
+
+#define S_CAUSE_ARB_TPT_PF_2 17
+#define V_CAUSE_ARB_TPT_PF_2(x) ((x) << S_CAUSE_ARB_TPT_PF_2)
+#define F_CAUSE_ARB_TPT_PF_2 V_CAUSE_ARB_TPT_PF_2(1U)
+
+#define S_CAUSE_ARB_F_2 16
+#define V_CAUSE_ARB_F_2(x) ((x) << S_CAUSE_ARB_F_2)
+#define F_CAUSE_ARB_F_2 V_CAUSE_ARB_F_2(1U)
+
+#define S_CAUSE_ARB_PBL_PF_1 11
+#define V_CAUSE_ARB_PBL_PF_1(x) ((x) << S_CAUSE_ARB_PBL_PF_1)
+#define F_CAUSE_ARB_PBL_PF_1 V_CAUSE_ARB_PBL_PF_1(1U)
+
+#define S_CAUSE_ARB_PF_1 10
+#define V_CAUSE_ARB_PF_1(x) ((x) << S_CAUSE_ARB_PF_1)
+#define F_CAUSE_ARB_PF_1 V_CAUSE_ARB_PF_1(1U)
+
+#define S_CAUSE_ARB_TPT_PF_1 9
+#define V_CAUSE_ARB_TPT_PF_1(x) ((x) << S_CAUSE_ARB_TPT_PF_1)
+#define F_CAUSE_ARB_TPT_PF_1 V_CAUSE_ARB_TPT_PF_1(1U)
+
+#define S_CAUSE_ARB_F_1 8
+#define V_CAUSE_ARB_F_1(x) ((x) << S_CAUSE_ARB_F_1)
+#define F_CAUSE_ARB_F_1 V_CAUSE_ARB_F_1(1U)
+
+#define S_CAUSE_ARB_PBL_PF_0 3
+#define V_CAUSE_ARB_PBL_PF_0(x) ((x) << S_CAUSE_ARB_PBL_PF_0)
+#define F_CAUSE_ARB_PBL_PF_0 V_CAUSE_ARB_PBL_PF_0(1U)
+
+#define S_CAUSE_ARB_PF_0 2
+#define V_CAUSE_ARB_PF_0(x) ((x) << S_CAUSE_ARB_PF_0)
+#define F_CAUSE_ARB_PF_0 V_CAUSE_ARB_PF_0(1U)
+
+#define S_CAUSE_ARB_TPT_PF_0 1
+#define V_CAUSE_ARB_TPT_PF_0(x) ((x) << S_CAUSE_ARB_TPT_PF_0)
+#define F_CAUSE_ARB_TPT_PF_0 V_CAUSE_ARB_TPT_PF_0(1U)
+
+#define S_CAUSE_ARB_F_0 0
+#define V_CAUSE_ARB_F_0(x) ((x) << S_CAUSE_ARB_F_0)
+#define F_CAUSE_ARB_F_0 V_CAUSE_ARB_F_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_ARB 0x19328
+
+#define S_PERR_ENABLE_ARB_PBL_PF_3 27
+#define V_PERR_ENABLE_ARB_PBL_PF_3(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_3)
+#define F_PERR_ENABLE_ARB_PBL_PF_3 V_PERR_ENABLE_ARB_PBL_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_PF_3 26
+#define V_PERR_ENABLE_ARB_PF_3(x) ((x) << S_PERR_ENABLE_ARB_PF_3)
+#define F_PERR_ENABLE_ARB_PF_3 V_PERR_ENABLE_ARB_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_3 25
+#define V_PERR_ENABLE_ARB_TPT_PF_3(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_3)
+#define F_PERR_ENABLE_ARB_TPT_PF_3 V_PERR_ENABLE_ARB_TPT_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_F_3 24
+#define V_PERR_ENABLE_ARB_F_3(x) ((x) << S_PERR_ENABLE_ARB_F_3)
+#define F_PERR_ENABLE_ARB_F_3 V_PERR_ENABLE_ARB_F_3(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_2 19
+#define V_PERR_ENABLE_ARB_PBL_PF_2(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_2)
+#define F_PERR_ENABLE_ARB_PBL_PF_2 V_PERR_ENABLE_ARB_PBL_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_PF_2 18
+#define V_PERR_ENABLE_ARB_PF_2(x) ((x) << S_PERR_ENABLE_ARB_PF_2)
+#define F_PERR_ENABLE_ARB_PF_2 V_PERR_ENABLE_ARB_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_2 17
+#define V_PERR_ENABLE_ARB_TPT_PF_2(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_2)
+#define F_PERR_ENABLE_ARB_TPT_PF_2 V_PERR_ENABLE_ARB_TPT_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_F_2 16
+#define V_PERR_ENABLE_ARB_F_2(x) ((x) << S_PERR_ENABLE_ARB_F_2)
+#define F_PERR_ENABLE_ARB_F_2 V_PERR_ENABLE_ARB_F_2(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_1 11
+#define V_PERR_ENABLE_ARB_PBL_PF_1(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_1)
+#define F_PERR_ENABLE_ARB_PBL_PF_1 V_PERR_ENABLE_ARB_PBL_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_PF_1 10
+#define V_PERR_ENABLE_ARB_PF_1(x) ((x) << S_PERR_ENABLE_ARB_PF_1)
+#define F_PERR_ENABLE_ARB_PF_1 V_PERR_ENABLE_ARB_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_1 9
+#define V_PERR_ENABLE_ARB_TPT_PF_1(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_1)
+#define F_PERR_ENABLE_ARB_TPT_PF_1 V_PERR_ENABLE_ARB_TPT_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_F_1 8
+#define V_PERR_ENABLE_ARB_F_1(x) ((x) << S_PERR_ENABLE_ARB_F_1)
+#define F_PERR_ENABLE_ARB_F_1 V_PERR_ENABLE_ARB_F_1(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_0 3
+#define V_PERR_ENABLE_ARB_PBL_PF_0(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_0)
+#define F_PERR_ENABLE_ARB_PBL_PF_0 V_PERR_ENABLE_ARB_PBL_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_PF_0 2
+#define V_PERR_ENABLE_ARB_PF_0(x) ((x) << S_PERR_ENABLE_ARB_PF_0)
+#define F_PERR_ENABLE_ARB_PF_0 V_PERR_ENABLE_ARB_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_0 1
+#define V_PERR_ENABLE_ARB_TPT_PF_0(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_0)
+#define F_PERR_ENABLE_ARB_TPT_PF_0 V_PERR_ENABLE_ARB_TPT_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_F_0 0
+#define V_PERR_ENABLE_ARB_F_0(x) ((x) << S_PERR_ENABLE_ARB_F_0)
+#define F_PERR_ENABLE_ARB_F_0 V_PERR_ENABLE_ARB_F_0(1U)
+
+#define A_ULP_RX_CTL1 0x19330
+
+#define S_ISCSI_CTL2 27
+#define V_ISCSI_CTL2(x) ((x) << S_ISCSI_CTL2)
+#define F_ISCSI_CTL2 V_ISCSI_CTL2(1U)
+
+#define S_ISCSI_CTL1 26
+#define V_ISCSI_CTL1(x) ((x) << S_ISCSI_CTL1)
+#define F_ISCSI_CTL1 V_ISCSI_CTL1(1U)
+
+#define S_ISCSI_CTL0 25
+#define V_ISCSI_CTL0(x) ((x) << S_ISCSI_CTL0)
+#define F_ISCSI_CTL0 V_ISCSI_CTL0(1U)
+
+#define S_NVME_TCP_DATA_ALIGNMENT 16
+#define M_NVME_TCP_DATA_ALIGNMENT 0x1ffU
+#define V_NVME_TCP_DATA_ALIGNMENT(x) ((x) << S_NVME_TCP_DATA_ALIGNMENT)
+#define G_NVME_TCP_DATA_ALIGNMENT(x) (((x) >> S_NVME_TCP_DATA_ALIGNMENT) & M_NVME_TCP_DATA_ALIGNMENT)
+
+#define S_NVME_TCP_INVLD_MSG_DIS 14
+#define M_NVME_TCP_INVLD_MSG_DIS 0x3U
+#define V_NVME_TCP_INVLD_MSG_DIS(x) ((x) << S_NVME_TCP_INVLD_MSG_DIS)
+#define G_NVME_TCP_INVLD_MSG_DIS(x) (((x) >> S_NVME_TCP_INVLD_MSG_DIS) & M_NVME_TCP_INVLD_MSG_DIS)
+
+#define S_NVME_TCP_DDP_PDU_CHK_TYPE 13
+#define V_NVME_TCP_DDP_PDU_CHK_TYPE(x) ((x) << S_NVME_TCP_DDP_PDU_CHK_TYPE)
+#define F_NVME_TCP_DDP_PDU_CHK_TYPE V_NVME_TCP_DDP_PDU_CHK_TYPE(1U)
+
+#define S_T10_CONFIG_ENB 12
+#define V_T10_CONFIG_ENB(x) ((x) << S_T10_CONFIG_ENB)
+#define F_T10_CONFIG_ENB V_T10_CONFIG_ENB(1U)
+
+#define S_NVME_TCP_COLOUR_ENB 10
+#define M_NVME_TCP_COLOUR_ENB 0x3U
+#define V_NVME_TCP_COLOUR_ENB(x) ((x) << S_NVME_TCP_COLOUR_ENB)
+#define G_NVME_TCP_COLOUR_ENB(x) (((x) >> S_NVME_TCP_COLOUR_ENB) & M_NVME_TCP_COLOUR_ENB)
+
+#define S_ROCE_SEND_RQE 8
+#define V_ROCE_SEND_RQE(x) ((x) << S_ROCE_SEND_RQE)
+#define F_ROCE_SEND_RQE V_ROCE_SEND_RQE(1U)
+
+#define S_RDMA_INVLD_MSG_DIS 6
+#define M_RDMA_INVLD_MSG_DIS 0x3U
+#define V_RDMA_INVLD_MSG_DIS(x) ((x) << S_RDMA_INVLD_MSG_DIS)
+#define G_RDMA_INVLD_MSG_DIS(x) (((x) >> S_RDMA_INVLD_MSG_DIS) & M_RDMA_INVLD_MSG_DIS)
+
+#define S_ROCE_INVLD_MSG_DIS 4
+#define M_ROCE_INVLD_MSG_DIS 0x3U
+#define V_ROCE_INVLD_MSG_DIS(x) ((x) << S_ROCE_INVLD_MSG_DIS)
+#define G_ROCE_INVLD_MSG_DIS(x) (((x) >> S_ROCE_INVLD_MSG_DIS) & M_ROCE_INVLD_MSG_DIS)
+
+#define S_T7_MEM_ADDR_CTRL 2
+#define M_T7_MEM_ADDR_CTRL 0x3U
+#define V_T7_MEM_ADDR_CTRL(x) ((x) << S_T7_MEM_ADDR_CTRL)
+#define G_T7_MEM_ADDR_CTRL(x) (((x) >> S_T7_MEM_ADDR_CTRL) & M_T7_MEM_ADDR_CTRL)
+
+#define S_ENB_32K_PDU 1
+#define V_ENB_32K_PDU(x) ((x) << S_ENB_32K_PDU)
+#define F_ENB_32K_PDU V_ENB_32K_PDU(1U)
+
+#define S_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS 0
+#define V_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS(x) ((x) << S_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS)
+#define F_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS V_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS(1U)
+
#define A_ULP_RX_TLS_IND_CMD 0x19348
#define S_TLS_RX_REG_OFF_ADDR 0
@@ -37795,6 +48839,8 @@
#define G_TLS_RX_REG_OFF_ADDR(x) (((x) >> S_TLS_RX_REG_OFF_ADDR) & M_TLS_RX_REG_OFF_ADDR)
#define A_ULP_RX_TLS_IND_DATA 0x1934c
+#define A_ULP_RX_TLS_CH0_HMACCTRL_CFG 0x20
+#define A_ULP_RX_TLS_CH1_HMACCTRL_CFG 0x60
/* registers for module SF */
#define SF_BASE_ADDR 0x193f8
@@ -37815,6 +48861,39 @@
#define V_BYTECNT(x) ((x) << S_BYTECNT)
#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+#define S_EN32BADDR 30
+#define V_EN32BADDR(x) ((x) << S_EN32BADDR)
+#define F_EN32BADDR V_EN32BADDR(1U)
+
+#define S_NUM_OF_BYTES 1
+#define M_NUM_OF_BYTES 0x3U
+#define V_NUM_OF_BYTES(x) ((x) << S_NUM_OF_BYTES)
+#define G_NUM_OF_BYTES(x) (((x) >> S_NUM_OF_BYTES) & M_NUM_OF_BYTES)
+
+#define S_QUADREADDISABLE 5
+#define V_QUADREADDISABLE(x) ((x) << S_QUADREADDISABLE)
+#define F_QUADREADDISABLE V_QUADREADDISABLE(1U)
+
+#define S_EXIT4B 6
+#define V_EXIT4B(x) ((x) << S_EXIT4B)
+#define F_EXIT4B V_EXIT4B(1U)
+
+#define S_ENTER4B 7
+#define V_ENTER4B(x) ((x) << S_ENTER4B)
+#define F_ENTER4B V_ENTER4B(1U)
+
+#define S_QUADWRENABLE 8
+#define V_QUADWRENABLE(x) ((x) << S_QUADWRENABLE)
+#define F_QUADWRENABLE V_QUADWRENABLE(1U)
+
+#define S_REGDBG_SEL 9
+#define V_REGDBG_SEL(x) ((x) << S_REGDBG_SEL)
+#define F_REGDBG_SEL V_REGDBG_SEL(1U)
+
+#define S_REGDBG_MODE 10
+#define V_REGDBG_MODE(x) ((x) << S_REGDBG_MODE)
+#define F_REGDBG_MODE V_REGDBG_MODE(1U)
+
/* registers for module PL */
#define PL_BASE_ADDR 0x19400
@@ -37892,21 +48971,6 @@
#define F_SWINT V_SWINT(1U)
#define A_PL_WHOAMI 0x19400
-
-#define S_T6_SOURCEPF 9
-#define M_T6_SOURCEPF 0x7U
-#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
-#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
-
-#define S_T6_ISVF 8
-#define V_T6_ISVF(x) ((x) << S_T6_ISVF)
-#define F_T6_ISVF V_T6_ISVF(1U)
-
-#define S_T6_VFID 0
-#define M_T6_VFID 0xffU
-#define V_T6_VFID(x) ((x) << S_T6_VFID)
-#define G_T6_VFID(x) (((x) >> S_T6_VFID) & M_T6_VFID)
-
#define A_PL_PERR_CAUSE 0x19404
#define S_UART 28
@@ -38037,6 +49101,134 @@
#define V_ANYMAC(x) ((x) << S_ANYMAC)
#define F_ANYMAC V_ANYMAC(1U)
+#define S_T7_PL_PERR_CRYPTO_KEY 31
+#define V_T7_PL_PERR_CRYPTO_KEY(x) ((x) << S_T7_PL_PERR_CRYPTO_KEY)
+#define F_T7_PL_PERR_CRYPTO_KEY V_T7_PL_PERR_CRYPTO_KEY(1U)
+
+#define S_T7_PL_PERR_CRYPTO1 30
+#define V_T7_PL_PERR_CRYPTO1(x) ((x) << S_T7_PL_PERR_CRYPTO1)
+#define F_T7_PL_PERR_CRYPTO1 V_T7_PL_PERR_CRYPTO1(1U)
+
+#define S_T7_PL_PERR_CRYPTO0 29
+#define V_T7_PL_PERR_CRYPTO0(x) ((x) << S_T7_PL_PERR_CRYPTO0)
+#define F_T7_PL_PERR_CRYPTO0 V_T7_PL_PERR_CRYPTO0(1U)
+
+#define S_T7_PL_PERR_GCACHE 28
+#define V_T7_PL_PERR_GCACHE(x) ((x) << S_T7_PL_PERR_GCACHE)
+#define F_T7_PL_PERR_GCACHE V_T7_PL_PERR_GCACHE(1U)
+
+#define S_T7_PL_PERR_ARM 27
+#define V_T7_PL_PERR_ARM(x) ((x) << S_T7_PL_PERR_ARM)
+#define F_T7_PL_PERR_ARM V_T7_PL_PERR_ARM(1U)
+
+#define S_T7_PL_PERR_ULP_TX 26
+#define V_T7_PL_PERR_ULP_TX(x) ((x) << S_T7_PL_PERR_ULP_TX)
+#define F_T7_PL_PERR_ULP_TX V_T7_PL_PERR_ULP_TX(1U)
+
+#define S_T7_PL_PERR_SGE 25
+#define V_T7_PL_PERR_SGE(x) ((x) << S_T7_PL_PERR_SGE)
+#define F_T7_PL_PERR_SGE V_T7_PL_PERR_SGE(1U)
+
+#define S_T7_PL_PERR_HMA 24
+#define V_T7_PL_PERR_HMA(x) ((x) << S_T7_PL_PERR_HMA)
+#define F_T7_PL_PERR_HMA V_T7_PL_PERR_HMA(1U)
+
+#define S_T7_PL_PERR_CPL_SWITCH 23
+#define V_T7_PL_PERR_CPL_SWITCH(x) ((x) << S_T7_PL_PERR_CPL_SWITCH)
+#define F_T7_PL_PERR_CPL_SWITCH V_T7_PL_PERR_CPL_SWITCH(1U)
+
+#define S_T7_PL_PERR_ULP_RX 22
+#define V_T7_PL_PERR_ULP_RX(x) ((x) << S_T7_PL_PERR_ULP_RX)
+#define F_T7_PL_PERR_ULP_RX V_T7_PL_PERR_ULP_RX(1U)
+
+#define S_T7_PL_PERR_PM_RX 21
+#define V_T7_PL_PERR_PM_RX(x) ((x) << S_T7_PL_PERR_PM_RX)
+#define F_T7_PL_PERR_PM_RX V_T7_PL_PERR_PM_RX(1U)
+
+#define S_T7_PL_PERR_PM_TX 20
+#define V_T7_PL_PERR_PM_TX(x) ((x) << S_T7_PL_PERR_PM_TX)
+#define F_T7_PL_PERR_PM_TX V_T7_PL_PERR_PM_TX(1U)
+
+#define S_T7_PL_PERR_MA 19
+#define V_T7_PL_PERR_MA(x) ((x) << S_T7_PL_PERR_MA)
+#define F_T7_PL_PERR_MA V_T7_PL_PERR_MA(1U)
+
+#define S_T7_PL_PERR_TP 18
+#define V_T7_PL_PERR_TP(x) ((x) << S_T7_PL_PERR_TP)
+#define F_T7_PL_PERR_TP V_T7_PL_PERR_TP(1U)
+
+#define S_T7_PL_PERR_LE 17
+#define V_T7_PL_PERR_LE(x) ((x) << S_T7_PL_PERR_LE)
+#define F_T7_PL_PERR_LE V_T7_PL_PERR_LE(1U)
+
+#define S_T7_PL_PERR_EDC1 16
+#define V_T7_PL_PERR_EDC1(x) ((x) << S_T7_PL_PERR_EDC1)
+#define F_T7_PL_PERR_EDC1 V_T7_PL_PERR_EDC1(1U)
+
+#define S_T7_PL_PERR_EDC0 15
+#define V_T7_PL_PERR_EDC0(x) ((x) << S_T7_PL_PERR_EDC0)
+#define F_T7_PL_PERR_EDC0 V_T7_PL_PERR_EDC0(1U)
+
+#define S_T7_PL_PERR_MC1 14
+#define V_T7_PL_PERR_MC1(x) ((x) << S_T7_PL_PERR_MC1)
+#define F_T7_PL_PERR_MC1 V_T7_PL_PERR_MC1(1U)
+
+#define S_T7_PL_PERR_MC0 13
+#define V_T7_PL_PERR_MC0(x) ((x) << S_T7_PL_PERR_MC0)
+#define F_T7_PL_PERR_MC0 V_T7_PL_PERR_MC0(1U)
+
+#define S_T7_PL_PERR_PCIE 12
+#define V_T7_PL_PERR_PCIE(x) ((x) << S_T7_PL_PERR_PCIE)
+#define F_T7_PL_PERR_PCIE V_T7_PL_PERR_PCIE(1U)
+
+#define S_T7_PL_PERR_UART 11
+#define V_T7_PL_PERR_UART(x) ((x) << S_T7_PL_PERR_UART)
+#define F_T7_PL_PERR_UART V_T7_PL_PERR_UART(1U)
+
+#define S_T7_PL_PERR_PMU 10
+#define V_T7_PL_PERR_PMU(x) ((x) << S_T7_PL_PERR_PMU)
+#define F_T7_PL_PERR_PMU V_T7_PL_PERR_PMU(1U)
+
+#define S_T7_PL_PERR_MAC 9
+#define V_T7_PL_PERR_MAC(x) ((x) << S_T7_PL_PERR_MAC)
+#define F_T7_PL_PERR_MAC V_T7_PL_PERR_MAC(1U)
+
+#define S_T7_PL_PERR_SMB 8
+#define V_T7_PL_PERR_SMB(x) ((x) << S_T7_PL_PERR_SMB)
+#define F_T7_PL_PERR_SMB V_T7_PL_PERR_SMB(1U)
+
+#define S_T7_PL_PERR_SF 7
+#define V_T7_PL_PERR_SF(x) ((x) << S_T7_PL_PERR_SF)
+#define F_T7_PL_PERR_SF V_T7_PL_PERR_SF(1U)
+
+#define S_T7_PL_PERR_PL 6
+#define V_T7_PL_PERR_PL(x) ((x) << S_T7_PL_PERR_PL)
+#define F_T7_PL_PERR_PL V_T7_PL_PERR_PL(1U)
+
+#define S_T7_PL_PERR_NCSI 5
+#define V_T7_PL_PERR_NCSI(x) ((x) << S_T7_PL_PERR_NCSI)
+#define F_T7_PL_PERR_NCSI V_T7_PL_PERR_NCSI(1U)
+
+#define S_T7_PL_PERR_MPS 4
+#define V_T7_PL_PERR_MPS(x) ((x) << S_T7_PL_PERR_MPS)
+#define F_T7_PL_PERR_MPS V_T7_PL_PERR_MPS(1U)
+
+#define S_T7_PL_PERR_MI 3
+#define V_T7_PL_PERR_MI(x) ((x) << S_T7_PL_PERR_MI)
+#define F_T7_PL_PERR_MI V_T7_PL_PERR_MI(1U)
+
+#define S_T7_PL_PERR_DBG 2
+#define V_T7_PL_PERR_DBG(x) ((x) << S_T7_PL_PERR_DBG)
+#define F_T7_PL_PERR_DBG V_T7_PL_PERR_DBG(1U)
+
+#define S_T7_PL_PERR_I2CM 1
+#define V_T7_PL_PERR_I2CM(x) ((x) << S_T7_PL_PERR_I2CM)
+#define F_T7_PL_PERR_I2CM V_T7_PL_PERR_I2CM(1U)
+
+#define S_T7_PL_PERR_CIM 0
+#define V_T7_PL_PERR_CIM(x) ((x) << S_T7_PL_PERR_CIM)
+#define F_T7_PL_PERR_CIM V_T7_PL_PERR_CIM(1U)
+
#define A_PL_PERR_ENABLE 0x19408
#define A_PL_INT_CAUSE 0x1940c
@@ -38064,6 +49256,78 @@
#define V_MAC0(x) ((x) << S_MAC0)
#define F_MAC0 V_MAC0(1U)
+#define S_T7_FLR 31
+#define V_T7_FLR(x) ((x) << S_T7_FLR)
+#define F_T7_FLR V_T7_FLR(1U)
+
+#define S_T7_SW_CIM 30
+#define V_T7_SW_CIM(x) ((x) << S_T7_SW_CIM)
+#define F_T7_SW_CIM V_T7_SW_CIM(1U)
+
+#define S_T7_ULP_TX 29
+#define V_T7_ULP_TX(x) ((x) << S_T7_ULP_TX)
+#define F_T7_ULP_TX V_T7_ULP_TX(1U)
+
+#define S_T7_SGE 28
+#define V_T7_SGE(x) ((x) << S_T7_SGE)
+#define F_T7_SGE V_T7_SGE(1U)
+
+#define S_T7_HMA 27
+#define V_T7_HMA(x) ((x) << S_T7_HMA)
+#define F_T7_HMA V_T7_HMA(1U)
+
+#define S_T7_CPL_SWITCH 26
+#define V_T7_CPL_SWITCH(x) ((x) << S_T7_CPL_SWITCH)
+#define F_T7_CPL_SWITCH V_T7_CPL_SWITCH(1U)
+
+#define S_T7_ULP_RX 25
+#define V_T7_ULP_RX(x) ((x) << S_T7_ULP_RX)
+#define F_T7_ULP_RX V_T7_ULP_RX(1U)
+
+#define S_T7_PM_RX 24
+#define V_T7_PM_RX(x) ((x) << S_T7_PM_RX)
+#define F_T7_PM_RX V_T7_PM_RX(1U)
+
+#define S_T7_PM_TX 23
+#define V_T7_PM_TX(x) ((x) << S_T7_PM_TX)
+#define F_T7_PM_TX V_T7_PM_TX(1U)
+
+#define S_T7_MA 22
+#define V_T7_MA(x) ((x) << S_T7_MA)
+#define F_T7_MA V_T7_MA(1U)
+
+#define S_T7_TP 21
+#define V_T7_TP(x) ((x) << S_T7_TP)
+#define F_T7_TP V_T7_TP(1U)
+
+#define S_T7_LE 20
+#define V_T7_LE(x) ((x) << S_T7_LE)
+#define F_T7_LE V_T7_LE(1U)
+
+#define S_T7_EDC1 19
+#define V_T7_EDC1(x) ((x) << S_T7_EDC1)
+#define F_T7_EDC1 V_T7_EDC1(1U)
+
+#define S_T7_EDC0 18
+#define V_T7_EDC0(x) ((x) << S_T7_EDC0)
+#define F_T7_EDC0 V_T7_EDC0(1U)
+
+#define S_T7_MC1 17
+#define V_T7_MC1(x) ((x) << S_T7_MC1)
+#define F_T7_MC1 V_T7_MC1(1U)
+
+#define S_T7_MC0 16
+#define V_T7_MC0(x) ((x) << S_T7_MC0)
+#define F_T7_MC0 V_T7_MC0(1U)
+
+#define S_T7_PCIE 15
+#define V_T7_PCIE(x) ((x) << S_T7_PCIE)
+#define F_T7_PCIE V_T7_PCIE(1U)
+
+#define S_T7_UART 14
+#define V_T7_UART(x) ((x) << S_T7_UART)
+#define F_T7_UART V_T7_UART(1U)
+
#define A_PL_INT_ENABLE 0x19410
#define A_PL_INT_MAP0 0x19414
@@ -38262,15 +49526,10 @@
#define V_T6_LN0_AECMD(x) ((x) << S_T6_LN0_AECMD)
#define G_T6_LN0_AECMD(x) (((x) >> S_T6_LN0_AECMD) & M_T6_LN0_AECMD)
-#define S_T6_STATECFGINITF 16
-#define M_T6_STATECFGINITF 0xffU
-#define V_T6_STATECFGINITF(x) ((x) << S_T6_STATECFGINITF)
-#define G_T6_STATECFGINITF(x) (((x) >> S_T6_STATECFGINITF) & M_T6_STATECFGINITF)
-
-#define S_T6_STATECFGINIT 12
-#define M_T6_STATECFGINIT 0xfU
-#define V_T6_STATECFGINIT(x) ((x) << S_T6_STATECFGINIT)
-#define G_T6_STATECFGINIT(x) (((x) >> S_T6_STATECFGINIT) & M_T6_STATECFGINIT)
+#define S_T6_1_STATECFGINITF 16
+#define M_T6_1_STATECFGINITF 0xffU
+#define V_T6_1_STATECFGINITF(x) ((x) << S_T6_1_STATECFGINITF)
+#define G_T6_1_STATECFGINITF(x) (((x) >> S_T6_1_STATECFGINITF) & M_T6_1_STATECFGINITF)
#define S_PHY_STATUS 10
#define V_PHY_STATUS(x) ((x) << S_PHY_STATUS)
@@ -38285,9 +49544,9 @@
#define V_PERSTTIMEOUT_PL(x) ((x) << S_PERSTTIMEOUT_PL)
#define F_PERSTTIMEOUT_PL V_PERSTTIMEOUT_PL(1U)
-#define S_T6_LTSSMENABLE 6
-#define V_T6_LTSSMENABLE(x) ((x) << S_T6_LTSSMENABLE)
-#define F_T6_LTSSMENABLE V_T6_LTSSMENABLE(1U)
+#define S_SPEEDMS 30
+#define V_SPEEDMS(x) ((x) << S_SPEEDMS)
+#define F_SPEEDMS V_SPEEDMS(1U)
#define A_PL_PCIE_CTL_STAT 0x19444
@@ -38382,6 +49641,37 @@
#define V_MAP0(x) ((x) << S_MAP0)
#define G_MAP0(x) (((x) >> S_MAP0) & M_MAP0)
+#define A_PL_INT_CAUSE2 0x19478
+
+#define S_CRYPTO_KEY 4
+#define V_CRYPTO_KEY(x) ((x) << S_CRYPTO_KEY)
+#define F_CRYPTO_KEY V_CRYPTO_KEY(1U)
+
+#define S_CRYPTO1 3
+#define V_CRYPTO1(x) ((x) << S_CRYPTO1)
+#define F_CRYPTO1 V_CRYPTO1(1U)
+
+#define S_CRYPTO0 2
+#define V_CRYPTO0(x) ((x) << S_CRYPTO0)
+#define F_CRYPTO0 V_CRYPTO0(1U)
+
+#define S_GCACHE 1
+#define V_GCACHE(x) ((x) << S_GCACHE)
+#define F_GCACHE V_GCACHE(1U)
+
+#define S_ARM 0
+#define V_ARM(x) ((x) << S_ARM)
+#define F_ARM V_ARM(1U)
+
+#define A_PL_INT_ENABLE2 0x1947c
+#define A_PL_ER_CMD 0x19488
+
+#define S_ER_ADDR 2
+#define M_ER_ADDR 0x3fffffffU
+#define V_ER_ADDR(x) ((x) << S_ER_ADDR)
+#define G_ER_ADDR(x) (((x) >> S_ER_ADDR) & M_ER_ADDR)
+
+#define A_PL_ER_DATA 0x1948c
#define A_PL_VF_SLICE_L 0x19490
#define S_LIMITADDR 16
@@ -38638,6 +49928,10 @@
#define V_REGION_EN(x) ((x) << S_REGION_EN)
#define G_REGION_EN(x) (((x) >> S_REGION_EN) & M_REGION_EN)
+#define S_CACHEBYPASS 28
+#define V_CACHEBYPASS(x) ((x) << S_CACHEBYPASS)
+#define F_CACHEBYPASS V_CACHEBYPASS(1U)
+
#define A_LE_MISC 0x19c08
#define S_CMPUNVAIL 0
@@ -38830,6 +50124,10 @@
#define V_TCAM_SIZE(x) ((x) << S_TCAM_SIZE)
#define G_TCAM_SIZE(x) (((x) >> S_TCAM_SIZE) & M_TCAM_SIZE)
+#define S_MLL_MASK 2
+#define V_MLL_MASK(x) ((x) << S_MLL_MASK)
+#define F_MLL_MASK V_MLL_MASK(1U)
+
#define A_LE_DB_INT_ENABLE 0x19c38
#define S_MSGSEL 27
@@ -39045,40 +50343,15 @@
#define V_PIPELINEERR(x) ((x) << S_PIPELINEERR)
#define F_PIPELINEERR V_PIPELINEERR(1U)
-#define A_LE_DB_INT_CAUSE 0x19c3c
-
-#define S_T6_ACTRGNFULL 21
-#define V_T6_ACTRGNFULL(x) ((x) << S_T6_ACTRGNFULL)
-#define F_T6_ACTRGNFULL V_T6_ACTRGNFULL(1U)
+#define S_CACHEINTPERR 31
+#define V_CACHEINTPERR(x) ((x) << S_CACHEINTPERR)
+#define F_CACHEINTPERR V_CACHEINTPERR(1U)
-#define S_T6_ACTCNTIPV6TZERO 20
-#define V_T6_ACTCNTIPV6TZERO(x) ((x) << S_T6_ACTCNTIPV6TZERO)
-#define F_T6_ACTCNTIPV6TZERO V_T6_ACTCNTIPV6TZERO(1U)
-
-#define S_T6_ACTCNTIPV4TZERO 19
-#define V_T6_ACTCNTIPV4TZERO(x) ((x) << S_T6_ACTCNTIPV4TZERO)
-#define F_T6_ACTCNTIPV4TZERO V_T6_ACTCNTIPV4TZERO(1U)
-
-#define S_T6_ACTCNTIPV6ZERO 18
-#define V_T6_ACTCNTIPV6ZERO(x) ((x) << S_T6_ACTCNTIPV6ZERO)
-#define F_T6_ACTCNTIPV6ZERO V_T6_ACTCNTIPV6ZERO(1U)
-
-#define S_T6_ACTCNTIPV4ZERO 17
-#define V_T6_ACTCNTIPV4ZERO(x) ((x) << S_T6_ACTCNTIPV4ZERO)
-#define F_T6_ACTCNTIPV4ZERO V_T6_ACTCNTIPV4ZERO(1U)
-
-#define S_T6_UNKNOWNCMD 3
-#define V_T6_UNKNOWNCMD(x) ((x) << S_T6_UNKNOWNCMD)
-#define F_T6_UNKNOWNCMD V_T6_UNKNOWNCMD(1U)
-
-#define S_T6_LIP0 2
-#define V_T6_LIP0(x) ((x) << S_T6_LIP0)
-#define F_T6_LIP0 V_T6_LIP0(1U)
-
-#define S_T6_LIPMISS 1
-#define V_T6_LIPMISS(x) ((x) << S_T6_LIPMISS)
-#define F_T6_LIPMISS V_T6_LIPMISS(1U)
+#define S_CACHESRAMPERR 30
+#define V_CACHESRAMPERR(x) ((x) << S_CACHESRAMPERR)
+#define F_CACHESRAMPERR V_CACHESRAMPERR(1U)
+#define A_LE_DB_INT_CAUSE 0x19c3c
#define A_LE_DB_INT_TID 0x19c40
#define S_INTTID 0
@@ -39287,6 +50560,14 @@
#define A_LE_DB_MASK_IPV6 0x19ca0
#define A_LE_DB_DBG_MATCH_DATA 0x19ca0
+#define A_LE_CMM_CONFIG 0x19cc0
+#define A_LE_CACHE_DBG 0x19cc4
+#define A_LE_CACHE_WR_ALL_CNT 0x19cc8
+#define A_LE_CACHE_WR_HIT_CNT 0x19ccc
+#define A_LE_CACHE_RD_ALL_CNT 0x19cd0
+#define A_LE_CACHE_RD_HIT_CNT 0x19cd4
+#define A_LE_CACHE_MC_WR_CNT 0x19cd8
+#define A_LE_CACHE_MC_RD_CNT 0x19cdc
#define A_LE_DB_REQ_RSP_CNT 0x19ce4
#define S_T4_RSPCNT 16
@@ -39309,6 +50590,14 @@
#define V_REQCNTLE(x) ((x) << S_REQCNTLE)
#define G_REQCNTLE(x) (((x) >> S_REQCNTLE) & M_REQCNTLE)
+#define A_LE_IND_ADDR 0x19ce8
+
+#define S_T7_1_ADDR 0
+#define M_T7_1_ADDR 0xffU
+#define V_T7_1_ADDR(x) ((x) << S_T7_1_ADDR)
+#define G_T7_1_ADDR(x) (((x) >> S_T7_1_ADDR) & M_T7_1_ADDR)
+
+#define A_LE_IND_DATA 0x19cec
#define A_LE_DB_DBGI_CONFIG 0x19cf0
#define S_DBGICMDPERR 31
@@ -39436,6 +50725,11 @@
#define V_T6_HASHTBLMEMCRCERR(x) ((x) << S_T6_HASHTBLMEMCRCERR)
#define F_T6_HASHTBLMEMCRCERR V_T6_HASHTBLMEMCRCERR(1U)
+#define S_T7_BKCHKPERIOD 22
+#define M_T7_BKCHKPERIOD 0xffU
+#define V_T7_BKCHKPERIOD(x) ((x) << S_T7_BKCHKPERIOD)
+#define G_T7_BKCHKPERIOD(x) (((x) >> S_T7_BKCHKPERIOD) & M_T7_BKCHKPERIOD)
+
#define A_LE_SPARE 0x19cfc
#define A_LE_DB_DBGI_REQ_DATA 0x19d00
#define A_LE_DB_DBGI_REQ_MASK 0x19d50
@@ -39551,6 +50845,7 @@
#define V_HASH_TID_BASE(x) ((x) << S_HASH_TID_BASE)
#define G_HASH_TID_BASE(x) (((x) >> S_HASH_TID_BASE) & M_HASH_TID_BASE)
+#define A_T7_LE_DB_HASH_TID_BASE 0x19df8
#define A_LE_PERR_INJECT 0x19dfc
#define S_LEMEMSEL 1
@@ -39573,6 +50868,7 @@
#define A_LE_HASH_MASK_GEN_IPV6 0x19eb0
#define A_LE_HASH_MASK_GEN_IPV6T5 0x19eb4
#define A_T6_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
+#define A_T7_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
#define A_LE_HASH_MASK_CMP_IPV4 0x19ee0
#define A_LE_HASH_MASK_CMP_IPV4T5 0x19ee4
#define A_LE_DB_PSV_FILTER_MASK_TUP_IPV4 0x19ee4
@@ -39677,6 +50973,9 @@
#define A_LE_TCAM_DEBUG_LA_DATA 0x19f4c
#define A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 0x19f90
#define A_LE_DB_SECOND_CMP_HASH_MASK_IPV4 0x19fa4
+#define A_LE_TCAM_BIST_CTRL 0x19fb0
+#define A_LE_TCAM_BIST_CB_PASS 0x19fb4
+#define A_LE_TCAM_BIST_CB_BUSY 0x19fbc
#define A_LE_HASH_COLLISION 0x19fc4
#define A_LE_GLOBAL_COLLISION 0x19fc8
#define A_LE_FULL_CNT_COLLISION 0x19fcc
@@ -39686,6 +50985,38 @@
#define A_LE_RSP_DEBUG_LA_DATAT5 0x19fdc
#define A_LE_RSP_DEBUG_LA_WRPTRT5 0x19fe0
#define A_LE_DEBUG_LA_SEL_DATA 0x19fe4
+#define A_LE_TCAM_NEG_CTRL0 0x0
+#define A_LE_TCAM_NEG_CTRL1 0x1
+#define A_LE_TCAM_NEG_CTRL2 0x2
+#define A_LE_TCAM_NEG_CTRL3 0x3
+#define A_LE_TCAM_NEG_CTRL4 0x4
+#define A_LE_TCAM_NEG_CTRL5 0x5
+#define A_LE_TCAM_NEG_CTRL6 0x6
+#define A_LE_TCAM_NEG_CTRL7 0x7
+#define A_LE_TCAM_NEG_CTRL8 0x8
+#define A_LE_TCAM_NEG_CTRL9 0x9
+#define A_LE_TCAM_NEG_CTRL10 0xa
+#define A_LE_TCAM_NEG_CTRL11 0xb
+#define A_LE_TCAM_NEG_CTRL12 0xc
+#define A_LE_TCAM_NEG_CTRL13 0xd
+#define A_LE_TCAM_NEG_CTRL14 0xe
+#define A_LE_TCAM_NEG_CTRL15 0xf
+#define A_LE_TCAM_NEG_CTRL16 0x10
+#define A_LE_TCAM_NEG_CTRL17 0x11
+#define A_LE_TCAM_NEG_CTRL18 0x12
+#define A_LE_TCAM_NEG_CTRL19 0x13
+#define A_LE_TCAM_NEG_CTRL20 0x14
+#define A_LE_TCAM_NEG_CTRL21 0x15
+#define A_LE_TCAM_NEG_CTRL22 0x16
+#define A_LE_TCAM_NEG_CTRL23 0x17
+#define A_LE_TCAM_NEG_CTRL24 0x18
+#define A_LE_TCAM_NEG_CTRL25 0x19
+#define A_LE_TCAM_NEG_CTRL26 0x1a
+#define A_LE_TCAM_NEG_CTRL27 0x1b
+#define A_LE_TCAM_NEG_CTRL28 0x1c
+#define A_LE_TCAM_NEG_CTRL29 0x1d
+#define A_LE_TCAM_NEG_CTRL30 0x1e
+#define A_LE_TCAM_NEG_CTRL31 0x1f
/* registers for module NCSI */
#define NCSI_BASE_ADDR 0x1a000
@@ -39735,6 +51066,10 @@
#define V_TX_BYTE_SWAP(x) ((x) << S_TX_BYTE_SWAP)
#define F_TX_BYTE_SWAP V_TX_BYTE_SWAP(1U)
+#define S_XGMAC0_EN 0
+#define V_XGMAC0_EN(x) ((x) << S_XGMAC0_EN)
+#define F_XGMAC0_EN V_XGMAC0_EN(1U)
+
#define A_NCSI_RST_CTRL 0x1a004
#define S_MAC_REF_RST 2
@@ -39991,6 +51326,10 @@
#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
#define F_RXFIFO_PRTY_ERR V_RXFIFO_PRTY_ERR(1U)
+#define S_CIM2NC_PERR 9
+#define V_CIM2NC_PERR(x) ((x) << S_CIM2NC_PERR)
+#define F_CIM2NC_PERR V_CIM2NC_PERR(1U)
+
#define A_NCSI_INT_CAUSE 0x1a0d8
#define A_NCSI_STATUS 0x1a0dc
@@ -40048,6 +51387,12 @@
#define F_MCSIMELSEL V_MCSIMELSEL(1U)
#define A_NCSI_PERR_ENABLE 0x1a0f8
+#define A_NCSI_MODE_SEL 0x1a0fc
+
+#define S_XGMAC_MODE 0
+#define V_XGMAC_MODE(x) ((x) << S_XGMAC_MODE)
+#define F_XGMAC_MODE V_XGMAC_MODE(1U)
+
#define A_NCSI_MACB_NETWORK_CTRL 0x1a100
#define S_TXSNDZEROPAUSE 12
@@ -40550,6 +51895,832 @@
#define V_DESREV(x) ((x) << S_DESREV)
#define G_DESREV(x) (((x) >> S_DESREV) & M_DESREV)
+#define A_NCSI_TX_CTRL 0x1a200
+
+#define S_T7_TXEN 0
+#define V_T7_TXEN(x) ((x) << S_T7_TXEN)
+#define F_T7_TXEN V_T7_TXEN(1U)
+
+#define A_NCSI_TX_CFG 0x1a204
+#define A_NCSI_TX_PAUSE_QUANTA 0x1a208
+#define A_NCSI_RX_CTRL 0x1a20c
+#define A_NCSI_RX_CFG 0x1a210
+#define A_NCSI_RX_HASH_LOW 0x1a214
+#define A_NCSI_RX_HASH_HIGH 0x1a218
+#define A_NCSI_RX_EXACT_MATCH_LOW_1 0x1a21c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_1 0x1a220
+#define A_NCSI_RX_EXACT_MATCH_LOW_2 0x1a224
+#define A_NCSI_RX_EXACT_MATCH_HIGH_2 0x1a228
+#define A_NCSI_RX_EXACT_MATCH_LOW_3 0x1a22c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_3 0x1a230
+#define A_NCSI_RX_EXACT_MATCH_LOW_4 0x1a234
+#define A_NCSI_RX_EXACT_MATCH_HIGH_4 0x1a238
+#define A_NCSI_RX_EXACT_MATCH_LOW_5 0x1a23c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_5 0x1a240
+#define A_NCSI_RX_EXACT_MATCH_LOW_6 0x1a244
+#define A_NCSI_RX_EXACT_MATCH_HIGH_6 0x1a248
+#define A_NCSI_RX_EXACT_MATCH_LOW_7 0x1a24c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_7 0x1a250
+#define A_NCSI_RX_EXACT_MATCH_LOW_8 0x1a254
+#define A_NCSI_RX_EXACT_MATCH_HIGH_8 0x1a258
+#define A_NCSI_RX_TYPE_MATCH_1 0x1a25c
+#define A_NCSI_RX_TYPE_MATCH_2 0x1a260
+#define A_NCSI_RX_TYPE_MATCH_3 0x1a264
+#define A_NCSI_RX_TYPE_MATCH_4 0x1a268
+#define A_NCSI_INT_STATUS 0x1a26c
+#define A_NCSI_XGM_INT_MASK 0x1a270
+#define A_NCSI_XGM_INT_ENABLE 0x1a274
+#define A_NCSI_XGM_INT_DISABLE 0x1a278
+#define A_NCSI_TX_PAUSE_TIMER 0x1a27c
+#define A_NCSI_STAT_CTRL 0x1a280
+#define A_NCSI_RXFIFO_CFG 0x1a284
+
+#define S_RXFIFO_EMPTY 31
+#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
+#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
+
+#define S_RXFIFO_FULL 30
+#define V_RXFIFO_FULL(x) ((x) << S_RXFIFO_FULL)
+#define F_RXFIFO_FULL V_RXFIFO_FULL(1U)
+
+#define S_RXFIFOPAUSEHWM 17
+#define M_RXFIFOPAUSEHWM 0xfffU
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM 5
+#define M_RXFIFOPAUSELWM 0xfffU
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_FORCEDPAUSE 4
+#define V_FORCEDPAUSE(x) ((x) << S_FORCEDPAUSE)
+#define F_FORCEDPAUSE V_FORCEDPAUSE(1U)
+
+#define S_EXTERNLOOPBACK 3
+#define V_EXTERNLOOPBACK(x) ((x) << S_EXTERNLOOPBACK)
+#define F_EXTERNLOOPBACK V_EXTERNLOOPBACK(1U)
+
+#define S_RXBYTESWAP 2
+#define V_RXBYTESWAP(x) ((x) << S_RXBYTESWAP)
+#define F_RXBYTESWAP V_RXBYTESWAP(1U)
+
+#define S_RXSTRFRWRD 1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES 0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES V_DISERRFRAMES(1U)
+
+#define A_NCSI_TXFIFO_CFG 0x1a288
+
+#define S_T7_TXFIFO_EMPTY 31
+#define V_T7_TXFIFO_EMPTY(x) ((x) << S_T7_TXFIFO_EMPTY)
+#define F_T7_TXFIFO_EMPTY V_T7_TXFIFO_EMPTY(1U)
+
+#define S_T7_TXFIFO_FULL 30
+#define V_T7_TXFIFO_FULL(x) ((x) << S_T7_TXFIFO_FULL)
+#define F_T7_TXFIFO_FULL V_T7_TXFIFO_FULL(1U)
+
+#define S_UNDERUNFIX 22
+#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
+#define F_UNDERUNFIX V_UNDERUNFIX(1U)
+
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
+#define S_TXIPG 13
+#define M_TXIPG 0xffU
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
+#define S_TXFIFOTHRESH 4
+#define M_TXFIFOTHRESH 0x1ffU
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+#define G_TXFIFOTHRESH(x) (((x) >> S_TXFIFOTHRESH) & M_TXFIFOTHRESH)
+
+#define S_INTERNLOOPBACK 3
+#define V_INTERNLOOPBACK(x) ((x) << S_INTERNLOOPBACK)
+#define F_INTERNLOOPBACK V_INTERNLOOPBACK(1U)
+
+#define S_TXBYTESWAP 2
+#define V_TXBYTESWAP(x) ((x) << S_TXBYTESWAP)
+#define F_TXBYTESWAP V_TXBYTESWAP(1U)
+
+#define S_DISCRC 1
+#define V_DISCRC(x) ((x) << S_DISCRC)
+#define F_DISCRC V_DISCRC(1U)
+
+#define S_DISPREAMBLE 0
+#define V_DISPREAMBLE(x) ((x) << S_DISPREAMBLE)
+#define F_DISPREAMBLE V_DISPREAMBLE(1U)
+
+#define A_NCSI_SLOW_TIMER 0x1a28c
+
+#define S_PAUSESLOWTIMEREN 31
+#define V_PAUSESLOWTIMEREN(x) ((x) << S_PAUSESLOWTIMEREN)
+#define F_PAUSESLOWTIMEREN V_PAUSESLOWTIMEREN(1U)
+
+#define S_PAUSESLOWTIMER 0
+#define M_PAUSESLOWTIMER 0xfffffU
+#define V_PAUSESLOWTIMER(x) ((x) << S_PAUSESLOWTIMER)
+#define G_PAUSESLOWTIMER(x) (((x) >> S_PAUSESLOWTIMER) & M_PAUSESLOWTIMER)
+
+#define A_NCSI_PAUSE_TIMER 0x1a290
+
+#define S_PAUSETIMER 0
+#define M_PAUSETIMER 0xfffffU
+#define V_PAUSETIMER(x) ((x) << S_PAUSETIMER)
+#define G_PAUSETIMER(x) (((x) >> S_PAUSETIMER) & M_PAUSETIMER)
+
+#define A_NCSI_XAUI_PCS_TEST 0x1a294
+
+#define S_TESTPATTERN 1
+#define M_TESTPATTERN 0x3U
+#define V_TESTPATTERN(x) ((x) << S_TESTPATTERN)
+#define G_TESTPATTERN(x) (((x) >> S_TESTPATTERN) & M_TESTPATTERN)
+
+#define S_ENTEST 0
+#define V_ENTEST(x) ((x) << S_ENTEST)
+#define F_ENTEST V_ENTEST(1U)
+
+#define A_NCSI_RGMII_CTRL 0x1a298
+
+#define S_PHALIGNFIFOTHRESH 1
+#define M_PHALIGNFIFOTHRESH 0x3U
+#define V_PHALIGNFIFOTHRESH(x) ((x) << S_PHALIGNFIFOTHRESH)
+#define G_PHALIGNFIFOTHRESH(x) (((x) >> S_PHALIGNFIFOTHRESH) & M_PHALIGNFIFOTHRESH)
+
+#define S_TXCLK90SHIFT 0
+#define V_TXCLK90SHIFT(x) ((x) << S_TXCLK90SHIFT)
+#define F_TXCLK90SHIFT V_TXCLK90SHIFT(1U)
+
+#define A_NCSI_RGMII_IMP 0x1a29c
+
+#define S_CALRESET 8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET V_CALRESET(1U)
+
+#define S_CALUPDATE 7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE V_CALUPDATE(1U)
+
+#define S_IMPSETUPDATE 6
+#define V_IMPSETUPDATE(x) ((x) << S_IMPSETUPDATE)
+#define F_IMPSETUPDATE V_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD 3
+#define M_RGMIIIMPPD 0x7U
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+#define G_RGMIIIMPPD(x) (((x) >> S_RGMIIIMPPD) & M_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU 0
+#define M_RGMIIIMPPU 0x7U
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+#define G_RGMIIIMPPU(x) (((x) >> S_RGMIIIMPPU) & M_RGMIIIMPPU)
+
+#define A_NCSI_RX_MAX_PKT_SIZE 0x1a2a8
+
+#define S_RXMAXFRAMERSIZE 17
+#define M_RXMAXFRAMERSIZE 0x3fffU
+#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
+#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+
+#define S_RXENERRORGATHER 16
+#define V_RXENERRORGATHER(x) ((x) << S_RXENERRORGATHER)
+#define F_RXENERRORGATHER V_RXENERRORGATHER(1U)
+
+#define S_RXENSINGLEFLIT 15
+#define V_RXENSINGLEFLIT(x) ((x) << S_RXENSINGLEFLIT)
+#define F_RXENSINGLEFLIT V_RXENSINGLEFLIT(1U)
+
+#define S_RXENFRAMER 14
+#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
+#define F_RXENFRAMER V_RXENFRAMER(1U)
+
+#define S_RXMAXPKTSIZE 0
+#define M_RXMAXPKTSIZE 0x3fffU
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
+
+#define A_NCSI_RESET_CTRL 0x1a2ac
+
+#define S_XGMAC_STOP_EN 4
+#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
+#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U)
+
+#define S_XG2G_RESET_ 3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_ 2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_ 1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_ V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_ 0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_ V_MAC_RESET_(1U)
+
+#define A_NCSI_XAUI1G_CTRL 0x1a2b0
+
+#define S_XAUI1GLINKID 0
+#define M_XAUI1GLINKID 0x3U
+#define V_XAUI1GLINKID(x) ((x) << S_XAUI1GLINKID)
+#define G_XAUI1GLINKID(x) (((x) >> S_XAUI1GLINKID) & M_XAUI1GLINKID)
+
+#define A_NCSI_SERDES_LANE_CTRL 0x1a2b4
+
+#define S_LANEREVERSAL 8
+#define V_LANEREVERSAL(x) ((x) << S_LANEREVERSAL)
+#define F_LANEREVERSAL V_LANEREVERSAL(1U)
+
+#define S_TXPOLARITY 4
+#define M_TXPOLARITY 0xfU
+#define V_TXPOLARITY(x) ((x) << S_TXPOLARITY)
+#define G_TXPOLARITY(x) (((x) >> S_TXPOLARITY) & M_TXPOLARITY)
+
+#define S_RXPOLARITY 0
+#define M_RXPOLARITY 0xfU
+#define V_RXPOLARITY(x) ((x) << S_RXPOLARITY)
+#define G_RXPOLARITY(x) (((x) >> S_RXPOLARITY) & M_RXPOLARITY)
+
+#define A_NCSI_PORT_CFG 0x1a2b8
+
+#define S_NCSI_SAFESPEEDCHANGE 4
+#define V_NCSI_SAFESPEEDCHANGE(x) ((x) << S_NCSI_SAFESPEEDCHANGE)
+#define F_NCSI_SAFESPEEDCHANGE V_NCSI_SAFESPEEDCHANGE(1U)
+
+#define S_NCSI_CLKDIVRESET_ 3
+#define V_NCSI_CLKDIVRESET_(x) ((x) << S_NCSI_CLKDIVRESET_)
+#define F_NCSI_CLKDIVRESET_ V_NCSI_CLKDIVRESET_(1U)
+
+#define S_NCSI_PORTSPEED 1
+#define M_NCSI_PORTSPEED 0x3U
+#define V_NCSI_PORTSPEED(x) ((x) << S_NCSI_PORTSPEED)
+#define G_NCSI_PORTSPEED(x) (((x) >> S_NCSI_PORTSPEED) & M_NCSI_PORTSPEED)
+
+#define S_NCSI_ENRGMII 0
+#define V_NCSI_ENRGMII(x) ((x) << S_NCSI_ENRGMII)
+#define F_NCSI_ENRGMII V_NCSI_ENRGMII(1U)
+
+#define A_NCSI_EPIO_DATA0 0x1a2c0
+#define A_NCSI_EPIO_DATA1 0x1a2c4
+#define A_NCSI_EPIO_DATA2 0x1a2c8
+#define A_NCSI_EPIO_DATA3 0x1a2cc
+#define A_NCSI_EPIO_OP 0x1a2d0
+
+#define S_PIO_READY 31
+#define V_PIO_READY(x) ((x) << S_PIO_READY)
+#define F_PIO_READY V_PIO_READY(1U)
+
+#define S_PIO_WRRD 24
+#define V_PIO_WRRD(x) ((x) << S_PIO_WRRD)
+#define F_PIO_WRRD V_PIO_WRRD(1U)
+
+#define S_PIO_ADDRESS 0
+#define M_PIO_ADDRESS 0xffU
+#define V_PIO_ADDRESS(x) ((x) << S_PIO_ADDRESS)
+#define G_PIO_ADDRESS(x) (((x) >> S_PIO_ADDRESS) & M_PIO_ADDRESS)
+
+#define A_NCSI_XGMAC0_INT_ENABLE 0x1a2d4
+
+#define S_XAUIPCSDECERR 24
+#define V_XAUIPCSDECERR(x) ((x) << S_XAUIPCSDECERR)
+#define F_XAUIPCSDECERR V_XAUIPCSDECERR(1U)
+
+#define S_RGMIIRXFIFOOVERFLOW 23
+#define V_RGMIIRXFIFOOVERFLOW(x) ((x) << S_RGMIIRXFIFOOVERFLOW)
+#define F_RGMIIRXFIFOOVERFLOW V_RGMIIRXFIFOOVERFLOW(1U)
+
+#define S_RGMIIRXFIFOUNDERFLOW 22
+#define V_RGMIIRXFIFOUNDERFLOW(x) ((x) << S_RGMIIRXFIFOUNDERFLOW)
+#define F_RGMIIRXFIFOUNDERFLOW V_RGMIIRXFIFOUNDERFLOW(1U)
+
+#define S_RXPKTSIZEERROR 21
+#define V_RXPKTSIZEERROR(x) ((x) << S_RXPKTSIZEERROR)
+#define F_RXPKTSIZEERROR V_RXPKTSIZEERROR(1U)
+
+#define S_WOLPATDETECTED 20
+#define V_WOLPATDETECTED(x) ((x) << S_WOLPATDETECTED)
+#define F_WOLPATDETECTED V_WOLPATDETECTED(1U)
+
+#define S_T7_TXFIFO_PRTY_ERR 17
+#define M_T7_TXFIFO_PRTY_ERR 0x7U
+#define V_T7_TXFIFO_PRTY_ERR(x) ((x) << S_T7_TXFIFO_PRTY_ERR)
+#define G_T7_TXFIFO_PRTY_ERR(x) (((x) >> S_T7_TXFIFO_PRTY_ERR) & M_T7_TXFIFO_PRTY_ERR)
+
+#define S_T7_RXFIFO_PRTY_ERR 14
+#define M_T7_RXFIFO_PRTY_ERR 0x7U
+#define V_T7_RXFIFO_PRTY_ERR(x) ((x) << S_T7_RXFIFO_PRTY_ERR)
+#define G_T7_RXFIFO_PRTY_ERR(x) (((x) >> S_T7_RXFIFO_PRTY_ERR) & M_T7_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN 13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW 12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDESBISTERR 8
+#define M_SERDESBISTERR 0xfU
+#define V_SERDESBISTERR(x) ((x) << S_SERDESBISTERR)
+#define G_SERDESBISTERR(x) (((x) >> S_SERDESBISTERR) & M_SERDESBISTERR)
+
+#define S_SERDESLOWSIGCHANGE 4
+#define M_SERDESLOWSIGCHANGE 0xfU
+#define V_SERDESLOWSIGCHANGE(x) ((x) << S_SERDESLOWSIGCHANGE)
+#define G_SERDESLOWSIGCHANGE(x) (((x) >> S_SERDESLOWSIGCHANGE) & M_SERDESLOWSIGCHANGE)
+
+#define S_XAUIPCSCTCERR 3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE 2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
+
+#define S_RGMIILINKSTSCHANGE 1
+#define V_RGMIILINKSTSCHANGE(x) ((x) << S_RGMIILINKSTSCHANGE)
+#define F_RGMIILINKSTSCHANGE V_RGMIILINKSTSCHANGE(1U)
+
+#define S_T7_XGM_INT 0
+#define V_T7_XGM_INT(x) ((x) << S_T7_XGM_INT)
+#define F_T7_XGM_INT V_T7_XGM_INT(1U)
+
+#define A_NCSI_XGMAC0_INT_CAUSE 0x1a2d8
+#define A_NCSI_XAUI_ACT_CTRL 0x1a2dc
+#define A_NCSI_SERDES_CTRL0 0x1a2e0
+
+#define S_INTSERLPBK3 27
+#define V_INTSERLPBK3(x) ((x) << S_INTSERLPBK3)
+#define F_INTSERLPBK3 V_INTSERLPBK3(1U)
+
+#define S_INTSERLPBK2 26
+#define V_INTSERLPBK2(x) ((x) << S_INTSERLPBK2)
+#define F_INTSERLPBK2 V_INTSERLPBK2(1U)
+
+#define S_INTSERLPBK1 25
+#define V_INTSERLPBK1(x) ((x) << S_INTSERLPBK1)
+#define F_INTSERLPBK1 V_INTSERLPBK1(1U)
+
+#define S_INTSERLPBK0 24
+#define V_INTSERLPBK0(x) ((x) << S_INTSERLPBK0)
+#define F_INTSERLPBK0 V_INTSERLPBK0(1U)
+
+#define S_RESET3 23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3 V_RESET3(1U)
+
+#define S_RESET2 22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2 V_RESET2(1U)
+
+#define S_RESET1 21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1 V_RESET1(1U)
+
+#define S_RESET0 20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0 V_RESET0(1U)
+
+#define S_PWRDN3 19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3 V_PWRDN3(1U)
+
+#define S_PWRDN2 18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2 V_PWRDN2(1U)
+
+#define S_PWRDN1 17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1 V_PWRDN1(1U)
+
+#define S_PWRDN0 16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0 V_PWRDN0(1U)
+
+#define S_RESETPLL23 15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23 V_RESETPLL23(1U)
+
+#define S_RESETPLL01 14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01 V_RESETPLL01(1U)
+
+#define S_PW23 12
+#define M_PW23 0x3U
+#define V_PW23(x) ((x) << S_PW23)
+#define G_PW23(x) (((x) >> S_PW23) & M_PW23)
+
+#define S_PW01 10
+#define M_PW01 0x3U
+#define V_PW01(x) ((x) << S_PW01)
+#define G_PW01(x) (((x) >> S_PW01) & M_PW01)
+
+#define S_DEQ 6
+#define M_DEQ 0xfU
+#define V_DEQ(x) ((x) << S_DEQ)
+#define G_DEQ(x) (((x) >> S_DEQ) & M_DEQ)
+
+#define S_DTX 2
+#define M_DTX 0xfU
+#define V_DTX(x) ((x) << S_DTX)
+#define G_DTX(x) (((x) >> S_DTX) & M_DTX)
+
+#define S_LODRV 1
+#define V_LODRV(x) ((x) << S_LODRV)
+#define F_LODRV V_LODRV(1U)
+
+#define S_HIDRV 0
+#define V_HIDRV(x) ((x) << S_HIDRV)
+#define F_HIDRV V_HIDRV(1U)
+
+#define A_NCSI_SERDES_CTRL1 0x1a2e4
+
+#define S_FMOFFSET3 19
+#define M_FMOFFSET3 0x1fU
+#define V_FMOFFSET3(x) ((x) << S_FMOFFSET3)
+#define G_FMOFFSET3(x) (((x) >> S_FMOFFSET3) & M_FMOFFSET3)
+
+#define S_FMOFFSETEN3 18
+#define V_FMOFFSETEN3(x) ((x) << S_FMOFFSETEN3)
+#define F_FMOFFSETEN3 V_FMOFFSETEN3(1U)
+
+#define S_FMOFFSET2 13
+#define M_FMOFFSET2 0x1fU
+#define V_FMOFFSET2(x) ((x) << S_FMOFFSET2)
+#define G_FMOFFSET2(x) (((x) >> S_FMOFFSET2) & M_FMOFFSET2)
+
+#define S_FMOFFSETEN2 12
+#define V_FMOFFSETEN2(x) ((x) << S_FMOFFSETEN2)
+#define F_FMOFFSETEN2 V_FMOFFSETEN2(1U)
+
+#define S_FMOFFSET1 7
+#define M_FMOFFSET1 0x1fU
+#define V_FMOFFSET1(x) ((x) << S_FMOFFSET1)
+#define G_FMOFFSET1(x) (((x) >> S_FMOFFSET1) & M_FMOFFSET1)
+
+#define S_FMOFFSETEN1 6
+#define V_FMOFFSETEN1(x) ((x) << S_FMOFFSETEN1)
+#define F_FMOFFSETEN1 V_FMOFFSETEN1(1U)
+
+#define S_FMOFFSET0 1
+#define M_FMOFFSET0 0x1fU
+#define V_FMOFFSET0(x) ((x) << S_FMOFFSET0)
+#define G_FMOFFSET0(x) (((x) >> S_FMOFFSET0) & M_FMOFFSET0)
+
+#define S_FMOFFSETEN0 0
+#define V_FMOFFSETEN0(x) ((x) << S_FMOFFSETEN0)
+#define F_FMOFFSETEN0 V_FMOFFSETEN0(1U)
+
+#define A_NCSI_SERDES_CTRL2 0x1a2e8
+
+#define S_DNIN3 11
+#define V_DNIN3(x) ((x) << S_DNIN3)
+#define F_DNIN3 V_DNIN3(1U)
+
+#define S_UPIN3 10
+#define V_UPIN3(x) ((x) << S_UPIN3)
+#define F_UPIN3 V_UPIN3(1U)
+
+#define S_RXSLAVE3 9
+#define V_RXSLAVE3(x) ((x) << S_RXSLAVE3)
+#define F_RXSLAVE3 V_RXSLAVE3(1U)
+
+#define S_DNIN2 8
+#define V_DNIN2(x) ((x) << S_DNIN2)
+#define F_DNIN2 V_DNIN2(1U)
+
+#define S_UPIN2 7
+#define V_UPIN2(x) ((x) << S_UPIN2)
+#define F_UPIN2 V_UPIN2(1U)
+
+#define S_RXSLAVE2 6
+#define V_RXSLAVE2(x) ((x) << S_RXSLAVE2)
+#define F_RXSLAVE2 V_RXSLAVE2(1U)
+
+#define S_DNIN1 5
+#define V_DNIN1(x) ((x) << S_DNIN1)
+#define F_DNIN1 V_DNIN1(1U)
+
+#define S_UPIN1 4
+#define V_UPIN1(x) ((x) << S_UPIN1)
+#define F_UPIN1 V_UPIN1(1U)
+
+#define S_RXSLAVE1 3
+#define V_RXSLAVE1(x) ((x) << S_RXSLAVE1)
+#define F_RXSLAVE1 V_RXSLAVE1(1U)
+
+#define S_DNIN0 2
+#define V_DNIN0(x) ((x) << S_DNIN0)
+#define F_DNIN0 V_DNIN0(1U)
+
+#define S_UPIN0 1
+#define V_UPIN0(x) ((x) << S_UPIN0)
+#define F_UPIN0 V_UPIN0(1U)
+
+#define S_RXSLAVE0 0
+#define V_RXSLAVE0(x) ((x) << S_RXSLAVE0)
+#define F_RXSLAVE0 V_RXSLAVE0(1U)
+
+#define A_NCSI_SERDES_CTRL3 0x1a2ec
+
+#define S_EXTBISTCHKERRCLR3 31
+#define V_EXTBISTCHKERRCLR3(x) ((x) << S_EXTBISTCHKERRCLR3)
+#define F_EXTBISTCHKERRCLR3 V_EXTBISTCHKERRCLR3(1U)
+
+#define S_EXTBISTCHKEN3 30
+#define V_EXTBISTCHKEN3(x) ((x) << S_EXTBISTCHKEN3)
+#define F_EXTBISTCHKEN3 V_EXTBISTCHKEN3(1U)
+
+#define S_EXTBISTGENEN3 29
+#define V_EXTBISTGENEN3(x) ((x) << S_EXTBISTGENEN3)
+#define F_EXTBISTGENEN3 V_EXTBISTGENEN3(1U)
+
+#define S_EXTBISTPAT3 26
+#define M_EXTBISTPAT3 0x7U
+#define V_EXTBISTPAT3(x) ((x) << S_EXTBISTPAT3)
+#define G_EXTBISTPAT3(x) (((x) >> S_EXTBISTPAT3) & M_EXTBISTPAT3)
+
+#define S_EXTPARRESET3 25
+#define V_EXTPARRESET3(x) ((x) << S_EXTPARRESET3)
+#define F_EXTPARRESET3 V_EXTPARRESET3(1U)
+
+#define S_EXTPARLPBK3 24
+#define V_EXTPARLPBK3(x) ((x) << S_EXTPARLPBK3)
+#define F_EXTPARLPBK3 V_EXTPARLPBK3(1U)
+
+#define S_EXTBISTCHKERRCLR2 23
+#define V_EXTBISTCHKERRCLR2(x) ((x) << S_EXTBISTCHKERRCLR2)
+#define F_EXTBISTCHKERRCLR2 V_EXTBISTCHKERRCLR2(1U)
+
+#define S_EXTBISTCHKEN2 22
+#define V_EXTBISTCHKEN2(x) ((x) << S_EXTBISTCHKEN2)
+#define F_EXTBISTCHKEN2 V_EXTBISTCHKEN2(1U)
+
+#define S_EXTBISTGENEN2 21
+#define V_EXTBISTGENEN2(x) ((x) << S_EXTBISTGENEN2)
+#define F_EXTBISTGENEN2 V_EXTBISTGENEN2(1U)
+
+#define S_EXTBISTPAT2 18
+#define M_EXTBISTPAT2 0x7U
+#define V_EXTBISTPAT2(x) ((x) << S_EXTBISTPAT2)
+#define G_EXTBISTPAT2(x) (((x) >> S_EXTBISTPAT2) & M_EXTBISTPAT2)
+
+#define S_EXTPARRESET2 17
+#define V_EXTPARRESET2(x) ((x) << S_EXTPARRESET2)
+#define F_EXTPARRESET2 V_EXTPARRESET2(1U)
+
+#define S_EXTPARLPBK2 16
+#define V_EXTPARLPBK2(x) ((x) << S_EXTPARLPBK2)
+#define F_EXTPARLPBK2 V_EXTPARLPBK2(1U)
+
+#define S_EXTBISTCHKERRCLR1 15
+#define V_EXTBISTCHKERRCLR1(x) ((x) << S_EXTBISTCHKERRCLR1)
+#define F_EXTBISTCHKERRCLR1 V_EXTBISTCHKERRCLR1(1U)
+
+#define S_EXTBISTCHKEN1 14
+#define V_EXTBISTCHKEN1(x) ((x) << S_EXTBISTCHKEN1)
+#define F_EXTBISTCHKEN1 V_EXTBISTCHKEN1(1U)
+
+#define S_EXTBISTGENEN1 13
+#define V_EXTBISTGENEN1(x) ((x) << S_EXTBISTGENEN1)
+#define F_EXTBISTGENEN1 V_EXTBISTGENEN1(1U)
+
+#define S_EXTBISTPAT1 10
+#define M_EXTBISTPAT1 0x7U
+#define V_EXTBISTPAT1(x) ((x) << S_EXTBISTPAT1)
+#define G_EXTBISTPAT1(x) (((x) >> S_EXTBISTPAT1) & M_EXTBISTPAT1)
+
+#define S_EXTPARRESET1 9
+#define V_EXTPARRESET1(x) ((x) << S_EXTPARRESET1)
+#define F_EXTPARRESET1 V_EXTPARRESET1(1U)
+
+#define S_EXTPARLPBK1 8
+#define V_EXTPARLPBK1(x) ((x) << S_EXTPARLPBK1)
+#define F_EXTPARLPBK1 V_EXTPARLPBK1(1U)
+
+#define S_EXTBISTCHKERRCLR0 7
+#define V_EXTBISTCHKERRCLR0(x) ((x) << S_EXTBISTCHKERRCLR0)
+#define F_EXTBISTCHKERRCLR0 V_EXTBISTCHKERRCLR0(1U)
+
+#define S_EXTBISTCHKEN0 6
+#define V_EXTBISTCHKEN0(x) ((x) << S_EXTBISTCHKEN0)
+#define F_EXTBISTCHKEN0 V_EXTBISTCHKEN0(1U)
+
+#define S_EXTBISTGENEN0 5
+#define V_EXTBISTGENEN0(x) ((x) << S_EXTBISTGENEN0)
+#define F_EXTBISTGENEN0 V_EXTBISTGENEN0(1U)
+
+#define S_EXTBISTPAT0 2
+#define M_EXTBISTPAT0 0x7U
+#define V_EXTBISTPAT0(x) ((x) << S_EXTBISTPAT0)
+#define G_EXTBISTPAT0(x) (((x) >> S_EXTBISTPAT0) & M_EXTBISTPAT0)
+
+#define S_EXTPARRESET0 1
+#define V_EXTPARRESET0(x) ((x) << S_EXTPARRESET0)
+#define F_EXTPARRESET0 V_EXTPARRESET0(1U)
+
+#define S_EXTPARLPBK0 0
+#define V_EXTPARLPBK0(x) ((x) << S_EXTPARLPBK0)
+#define F_EXTPARLPBK0 V_EXTPARLPBK0(1U)
+
+#define A_NCSI_SERDES_STAT0 0x1a2f0
+
+#define S_EXTBISTCHKERRCNT0 4
+#define M_EXTBISTCHKERRCNT0 0xffffffU
+#define V_EXTBISTCHKERRCNT0(x) ((x) << S_EXTBISTCHKERRCNT0)
+#define G_EXTBISTCHKERRCNT0(x) (((x) >> S_EXTBISTCHKERRCNT0) & M_EXTBISTCHKERRCNT0)
+
+#define S_EXTBISTCHKFMD0 3
+#define V_EXTBISTCHKFMD0(x) ((x) << S_EXTBISTCHKFMD0)
+#define F_EXTBISTCHKFMD0 V_EXTBISTCHKFMD0(1U)
+
+#define S_LOWSIGFORCEEN0 2
+#define V_LOWSIGFORCEEN0(x) ((x) << S_LOWSIGFORCEEN0)
+#define F_LOWSIGFORCEEN0 V_LOWSIGFORCEEN0(1U)
+
+#define S_LOWSIGFORCEVALUE0 1
+#define V_LOWSIGFORCEVALUE0(x) ((x) << S_LOWSIGFORCEVALUE0)
+#define F_LOWSIGFORCEVALUE0 V_LOWSIGFORCEVALUE0(1U)
+
+#define S_LOWSIG0 0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0 V_LOWSIG0(1U)
+
+#define A_NCSI_SERDES_STAT1 0x1a2f4
+
+#define S_EXTBISTCHKERRCNT1 4
+#define M_EXTBISTCHKERRCNT1 0xffffffU
+#define V_EXTBISTCHKERRCNT1(x) ((x) << S_EXTBISTCHKERRCNT1)
+#define G_EXTBISTCHKERRCNT1(x) (((x) >> S_EXTBISTCHKERRCNT1) & M_EXTBISTCHKERRCNT1)
+
+#define S_EXTBISTCHKFMD1 3
+#define V_EXTBISTCHKFMD1(x) ((x) << S_EXTBISTCHKFMD1)
+#define F_EXTBISTCHKFMD1 V_EXTBISTCHKFMD1(1U)
+
+#define S_LOWSIGFORCEEN1 2
+#define V_LOWSIGFORCEEN1(x) ((x) << S_LOWSIGFORCEEN1)
+#define F_LOWSIGFORCEEN1 V_LOWSIGFORCEEN1(1U)
+
+#define S_LOWSIGFORCEVALUE1 1
+#define V_LOWSIGFORCEVALUE1(x) ((x) << S_LOWSIGFORCEVALUE1)
+#define F_LOWSIGFORCEVALUE1 V_LOWSIGFORCEVALUE1(1U)
+
+#define S_LOWSIG1 0
+#define V_LOWSIG1(x) ((x) << S_LOWSIG1)
+#define F_LOWSIG1 V_LOWSIG1(1U)
+
+#define A_NCSI_SERDES_STAT2 0x1a2f8
+
+#define S_EXTBISTCHKERRCNT2 4
+#define M_EXTBISTCHKERRCNT2 0xffffffU
+#define V_EXTBISTCHKERRCNT2(x) ((x) << S_EXTBISTCHKERRCNT2)
+#define G_EXTBISTCHKERRCNT2(x) (((x) >> S_EXTBISTCHKERRCNT2) & M_EXTBISTCHKERRCNT2)
+
+#define S_EXTBISTCHKFMD2 3
+#define V_EXTBISTCHKFMD2(x) ((x) << S_EXTBISTCHKFMD2)
+#define F_EXTBISTCHKFMD2 V_EXTBISTCHKFMD2(1U)
+
+#define S_LOWSIGFORCEEN2 2
+#define V_LOWSIGFORCEEN2(x) ((x) << S_LOWSIGFORCEEN2)
+#define F_LOWSIGFORCEEN2 V_LOWSIGFORCEEN2(1U)
+
+#define S_LOWSIGFORCEVALUE2 1
+#define V_LOWSIGFORCEVALUE2(x) ((x) << S_LOWSIGFORCEVALUE2)
+#define F_LOWSIGFORCEVALUE2 V_LOWSIGFORCEVALUE2(1U)
+
+#define S_LOWSIG2 0
+#define V_LOWSIG2(x) ((x) << S_LOWSIG2)
+#define F_LOWSIG2 V_LOWSIG2(1U)
+
+#define A_NCSI_SERDES_STAT3 0x1a2fc
+
+#define S_EXTBISTCHKERRCNT3 4
+#define M_EXTBISTCHKERRCNT3 0xffffffU
+#define V_EXTBISTCHKERRCNT3(x) ((x) << S_EXTBISTCHKERRCNT3)
+#define G_EXTBISTCHKERRCNT3(x) (((x) >> S_EXTBISTCHKERRCNT3) & M_EXTBISTCHKERRCNT3)
+
+#define S_EXTBISTCHKFMD3 3
+#define V_EXTBISTCHKFMD3(x) ((x) << S_EXTBISTCHKFMD3)
+#define F_EXTBISTCHKFMD3 V_EXTBISTCHKFMD3(1U)
+
+#define S_LOWSIGFORCEEN3 2
+#define V_LOWSIGFORCEEN3(x) ((x) << S_LOWSIGFORCEEN3)
+#define F_LOWSIGFORCEEN3 V_LOWSIGFORCEEN3(1U)
+
+#define S_LOWSIGFORCEVALUE3 1
+#define V_LOWSIGFORCEVALUE3(x) ((x) << S_LOWSIGFORCEVALUE3)
+#define F_LOWSIGFORCEVALUE3 V_LOWSIGFORCEVALUE3(1U)
+
+#define S_LOWSIG3 0
+#define V_LOWSIG3(x) ((x) << S_LOWSIG3)
+#define F_LOWSIG3 V_LOWSIG3(1U)
+
+#define A_NCSI_STAT_TX_BYTE_LOW 0x1a300
+#define A_NCSI_STAT_TX_BYTE_HIGH 0x1a304
+#define A_NCSI_STAT_TX_FRAME_LOW 0x1a308
+#define A_NCSI_STAT_TX_FRAME_HIGH 0x1a30c
+#define A_NCSI_STAT_TX_BCAST 0x1a310
+#define A_NCSI_STAT_TX_MCAST 0x1a314
+#define A_NCSI_STAT_TX_PAUSE 0x1a318
+#define A_NCSI_STAT_TX_64B_FRAMES 0x1a31c
+#define A_NCSI_STAT_TX_65_127B_FRAMES 0x1a320
+#define A_NCSI_STAT_TX_128_255B_FRAMES 0x1a324
+#define A_NCSI_STAT_TX_256_511B_FRAMES 0x1a328
+#define A_NCSI_STAT_TX_512_1023B_FRAMES 0x1a32c
+#define A_NCSI_STAT_TX_1024_1518B_FRAMES 0x1a330
+#define A_NCSI_STAT_TX_1519_MAXB_FRAMES 0x1a334
+#define A_NCSI_STAT_TX_ERR_FRAMES 0x1a338
+#define A_NCSI_STAT_RX_BYTES_LOW 0x1a33c
+#define A_NCSI_STAT_RX_BYTES_HIGH 0x1a340
+#define A_NCSI_STAT_RX_FRAMES_LOW 0x1a344
+#define A_NCSI_STAT_RX_FRAMES_HIGH 0x1a348
+#define A_NCSI_STAT_RX_BCAST_FRAMES 0x1a34c
+#define A_NCSI_STAT_RX_MCAST_FRAMES 0x1a350
+#define A_NCSI_STAT_RX_PAUSE_FRAMES 0x1a354
+#define A_NCSI_STAT_RX_64B_FRAMES 0x1a358
+#define A_NCSI_STAT_RX_65_127B_FRAMES 0x1a35c
+#define A_NCSI_STAT_RX_128_255B_FRAMES 0x1a360
+#define A_NCSI_STAT_RX_256_511B_FRAMES 0x1a364
+#define A_NCSI_STAT_RX_512_1023B_FRAMES 0x1a368
+#define A_NCSI_STAT_RX_1024_1518B_FRAMES 0x1a36c
+#define A_NCSI_STAT_RX_1519_MAXB_FRAMES 0x1a370
+#define A_NCSI_STAT_RX_SHORT_FRAMES 0x1a374
+#define A_NCSI_STAT_RX_OVERSIZE_FRAMES 0x1a378
+#define A_NCSI_STAT_RX_JABBER_FRAMES 0x1a37c
+#define A_NCSI_STAT_RX_CRC_ERR_FRAMES 0x1a380
+#define A_NCSI_STAT_RX_LENGTH_ERR_FRAMES 0x1a384
+#define A_NCSI_STAT_RX_SYM_CODE_ERR_FRAMES 0x1a388
+#define A_NCSI_XAUI_PCS_ERR 0x1a398
+
+#define S_PCS_SYNCSTATUS 5
+#define M_PCS_SYNCSTATUS 0xfU
+#define V_PCS_SYNCSTATUS(x) ((x) << S_PCS_SYNCSTATUS)
+#define G_PCS_SYNCSTATUS(x) (((x) >> S_PCS_SYNCSTATUS) & M_PCS_SYNCSTATUS)
+
+#define S_PCS_CTCFIFOERR 1
+#define M_PCS_CTCFIFOERR 0xfU
+#define V_PCS_CTCFIFOERR(x) ((x) << S_PCS_CTCFIFOERR)
+#define G_PCS_CTCFIFOERR(x) (((x) >> S_PCS_CTCFIFOERR) & M_PCS_CTCFIFOERR)
+
+#define S_PCS_NOTALIGNED 0
+#define V_PCS_NOTALIGNED(x) ((x) << S_PCS_NOTALIGNED)
+#define F_PCS_NOTALIGNED V_PCS_NOTALIGNED(1U)
+
+#define A_NCSI_RGMII_STATUS 0x1a39c
+
+#define S_GMIIDUPLEX 3
+#define V_GMIIDUPLEX(x) ((x) << S_GMIIDUPLEX)
+#define F_GMIIDUPLEX V_GMIIDUPLEX(1U)
+
+#define S_GMIISPEED 1
+#define M_GMIISPEED 0x3U
+#define V_GMIISPEED(x) ((x) << S_GMIISPEED)
+#define G_GMIISPEED(x) (((x) >> S_GMIISPEED) & M_GMIISPEED)
+
+#define S_GMIILINKSTATUS 0
+#define V_GMIILINKSTATUS(x) ((x) << S_GMIILINKSTATUS)
+#define F_GMIILINKSTATUS V_GMIILINKSTATUS(1U)
+
+#define A_NCSI_WOL_STATUS 0x1a3a0
+
+#define S_T7_PATDETECTED 31
+#define V_T7_PATDETECTED(x) ((x) << S_T7_PATDETECTED)
+#define F_T7_PATDETECTED V_T7_PATDETECTED(1U)
+
+#define A_NCSI_RX_MAX_PKT_SIZE_ERR_CNT 0x1a3a4
+#define A_NCSI_TX_SPI4_SOP_EOP_CNT 0x1a3a8
+
+#define S_TXSPI4SOPCNT 16
+#define M_TXSPI4SOPCNT 0xffffU
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
+#define S_TXSPI4EOPCNT 0
+#define M_TXSPI4EOPCNT 0xffffU
+#define V_TXSPI4EOPCNT(x) ((x) << S_TXSPI4EOPCNT)
+#define G_TXSPI4EOPCNT(x) (((x) >> S_TXSPI4EOPCNT) & M_TXSPI4EOPCNT)
+
+#define A_NCSI_RX_SPI4_SOP_EOP_CNT 0x1a3ac
+
+#define S_RXSPI4SOPCNT 16
+#define M_RXSPI4SOPCNT 0xffffU
+#define V_RXSPI4SOPCNT(x) ((x) << S_RXSPI4SOPCNT)
+#define G_RXSPI4SOPCNT(x) (((x) >> S_RXSPI4SOPCNT) & M_RXSPI4SOPCNT)
+
+#define S_RXSPI4EOPCNT 0
+#define M_RXSPI4EOPCNT 0xffffU
+#define V_RXSPI4EOPCNT(x) ((x) << S_RXSPI4EOPCNT)
+#define G_RXSPI4EOPCNT(x) (((x) >> S_RXSPI4EOPCNT) & M_RXSPI4EOPCNT)
+
/* registers for module XGMAC */
#define XGMAC_BASE_ADDR 0x0
@@ -44054,6 +56225,16 @@
#define V_IBQEMPTY(x) ((x) << S_IBQEMPTY)
#define G_IBQEMPTY(x) (((x) >> S_IBQEMPTY) & M_IBQEMPTY)
+#define S_T7_IBQGEN1 10
+#define M_T7_IBQGEN1 0x3fU
+#define V_T7_IBQGEN1(x) ((x) << S_T7_IBQGEN1)
+#define G_T7_IBQGEN1(x) (((x) >> S_T7_IBQGEN1) & M_T7_IBQGEN1)
+
+#define S_T7_IBQEMPTY 0
+#define M_T7_IBQEMPTY 0x3ffU
+#define V_T7_IBQEMPTY(x) ((x) << S_T7_IBQEMPTY)
+#define G_T7_IBQEMPTY(x) (((x) >> S_T7_IBQEMPTY) & M_T7_IBQEMPTY)
+
#define A_UP_OBQ_GEN 0xc
#define S_OBQGEN 6
@@ -44076,6 +56257,16 @@
#define V_T5_OBQFULL(x) ((x) << S_T5_OBQFULL)
#define G_T5_OBQFULL(x) (((x) >> S_T5_OBQFULL) & M_T5_OBQFULL)
+#define S_T7_T5_OBQGEN 16
+#define M_T7_T5_OBQGEN 0xffffU
+#define V_T7_T5_OBQGEN(x) ((x) << S_T7_T5_OBQGEN)
+#define G_T7_T5_OBQGEN(x) (((x) >> S_T7_T5_OBQGEN) & M_T7_T5_OBQGEN)
+
+#define S_T7_T5_OBQFULL 0
+#define M_T7_T5_OBQFULL 0xffffU
+#define V_T7_T5_OBQFULL(x) ((x) << S_T7_T5_OBQFULL)
+#define G_T7_T5_OBQFULL(x) (((x) >> S_T7_T5_OBQFULL) & M_T7_T5_OBQFULL)
+
#define A_UP_IBQ_0_RDADDR 0x10
#define S_QUEID 13
@@ -44088,6 +56279,13 @@
#define V_IBQRDADDR(x) ((x) << S_IBQRDADDR)
#define G_IBQRDADDR(x) (((x) >> S_IBQRDADDR) & M_IBQRDADDR)
+#define A_UP_IBQ_GEN_IPC 0x10
+
+#define S_IPCEMPTY 0
+#define M_IPCEMPTY 0x7fU
+#define V_IPCEMPTY(x) ((x) << S_IPCEMPTY)
+#define G_IPCEMPTY(x) (((x) >> S_IPCEMPTY) & M_IPCEMPTY)
+
#define A_UP_IBQ_0_WRADDR 0x14
#define S_IBQWRADDR 0
@@ -44160,10 +56358,15 @@
#define A_UP_OBQ_0_STATUS 0x78
#define A_UP_OBQ_0_PKTCNT 0x7c
#define A_UP_OBQ_1_RDADDR 0x80
+#define A_UP_NXT_FLOWADDR0 0x80
#define A_UP_OBQ_1_WRADDR 0x84
+#define A_UP_NXT_FLOWADDR1 0x84
#define A_UP_OBQ_1_STATUS 0x88
+#define A_UP_NXT_FLOWADDR2 0x88
#define A_UP_OBQ_1_PKTCNT 0x8c
+#define A_UP_NXT_FLOWADDR3 0x8c
#define A_UP_OBQ_2_RDADDR 0x90
+#define A_UP_DFT_FLOWADDR 0x90
#define A_UP_OBQ_2_WRADDR 0x94
#define A_UP_OBQ_2_STATUS 0x98
#define A_UP_OBQ_2_PKTCNT 0x9c
@@ -44176,9 +56379,33 @@
#define A_UP_OBQ_4_STATUS 0xb8
#define A_UP_OBQ_4_PKTCNT 0xbc
#define A_UP_OBQ_5_RDADDR 0xc0
+#define A_UP_MAX_SEQ_NUM 0xc0
#define A_UP_OBQ_5_WRADDR 0xc4
+#define A_UP_UNACK_SEQ_NUM 0xc4
#define A_UP_OBQ_5_STATUS 0xc8
+#define A_UP_SEARCH_SEQ_NUM 0xc8
#define A_UP_OBQ_5_PKTCNT 0xcc
+#define A_UP_SEQ_SEARCH_CTRL 0xcc
+
+#define S_FIFO_SIZE 29
+#define M_FIFO_SIZE 0x7U
+#define V_FIFO_SIZE(x) ((x) << S_FIFO_SIZE)
+#define G_FIFO_SIZE(x) (((x) >> S_FIFO_SIZE) & M_FIFO_SIZE)
+
+#define S_ROCE_MODE 28
+#define V_ROCE_MODE(x) ((x) << S_ROCE_MODE)
+#define F_ROCE_MODE V_ROCE_MODE(1U)
+
+#define S_SEQ_WR_PTR 16
+#define M_SEQ_WR_PTR 0xfffU
+#define V_SEQ_WR_PTR(x) ((x) << S_SEQ_WR_PTR)
+#define G_SEQ_WR_PTR(x) (((x) >> S_SEQ_WR_PTR) & M_SEQ_WR_PTR)
+
+#define S_SEQ_RD_PTR 0
+#define M_SEQ_RD_PTR 0xfffU
+#define V_SEQ_RD_PTR(x) ((x) << S_SEQ_RD_PTR)
+#define G_SEQ_RD_PTR(x) (((x) >> S_SEQ_RD_PTR) & M_SEQ_RD_PTR)
+
#define A_UP_IBQ_0_CONFIG 0xd0
#define S_QUESIZE 26
@@ -44203,6 +56430,25 @@
#define V_QUE1KEN(x) ((x) << S_QUE1KEN)
#define F_QUE1KEN V_QUE1KEN(1U)
+#define A_UP_SEQ_SEARCH_RES0 0xd0
+
+#define S_INV_SEQ 18
+#define V_INV_SEQ(x) ((x) << S_INV_SEQ)
+#define F_INV_SEQ V_INV_SEQ(1U)
+
+#define S_DUP_SEQ 17
+#define V_DUP_SEQ(x) ((x) << S_DUP_SEQ)
+#define F_DUP_SEQ V_DUP_SEQ(1U)
+
+#define S_MATCH_VLD 16
+#define V_MATCH_VLD(x) ((x) << S_MATCH_VLD)
+#define F_MATCH_VLD V_MATCH_VLD(1U)
+
+#define S_MATCH_INDEX 0
+#define M_MATCH_INDEX 0xffffU
+#define V_MATCH_INDEX(x) ((x) << S_MATCH_INDEX)
+#define G_MATCH_INDEX(x) (((x) >> S_MATCH_INDEX) & M_MATCH_INDEX)
+
#define A_UP_IBQ_0_REALADDR 0xd4
#define S_QUERDADDRWRAP 31
@@ -44218,6 +56464,7 @@
#define V_QUEMEMADDR(x) ((x) << S_QUEMEMADDR)
#define G_QUEMEMADDR(x) (((x) >> S_QUEMEMADDR) & M_QUEMEMADDR)
+#define A_UP_SEQ_SEARCH_RES1 0xd4
#define A_UP_IBQ_1_CONFIG 0xd8
#define A_UP_IBQ_1_REALADDR 0xdc
#define A_UP_IBQ_2_CONFIG 0xe0
@@ -44229,14 +56476,34 @@
#define A_UP_IBQ_5_CONFIG 0xf8
#define A_UP_IBQ_5_REALADDR 0xfc
#define A_UP_OBQ_0_CONFIG 0x100
+#define A_UP_PEER_HALT_STAT0 0x100
+
+#define S_HALTINFO 1
+#define M_HALTINFO 0x7fffffffU
+#define V_HALTINFO(x) ((x) << S_HALTINFO)
+#define G_HALTINFO(x) (((x) >> S_HALTINFO) & M_HALTINFO)
+
#define A_UP_OBQ_0_REALADDR 0x104
+#define A_UP_PEER_HALT_STAT1 0x104
#define A_UP_OBQ_1_CONFIG 0x108
+#define A_UP_PEER_HALT_STAT2 0x108
#define A_UP_OBQ_1_REALADDR 0x10c
+#define A_UP_PEER_HALT_STAT3 0x10c
#define A_UP_OBQ_2_CONFIG 0x110
+#define A_UP_PEER_HALT_STAT4 0x110
#define A_UP_OBQ_2_REALADDR 0x114
+#define A_UP_PEER_HALT_STAT5 0x114
#define A_UP_OBQ_3_CONFIG 0x118
+#define A_UP_PEER_HALT_STAT6 0x118
#define A_UP_OBQ_3_REALADDR 0x11c
+#define A_UP_PEER_HALT_STAT7 0x11c
#define A_UP_OBQ_4_CONFIG 0x120
+#define A_UP_PEER_HALT_CTL 0x120
+
+#define S_HALTREQ 0
+#define V_HALTREQ(x) ((x) << S_HALTREQ)
+#define F_HALTREQ V_HALTREQ(1U)
+
#define A_UP_OBQ_4_REALADDR 0x124
#define A_UP_OBQ_5_CONFIG 0x128
#define A_UP_OBQ_5_REALADDR 0x12c
@@ -44516,6 +56783,204 @@
#define A_UP_OBQ_6_SHADOW_REALADDR 0x3c4
#define A_UP_OBQ_7_SHADOW_CONFIG 0x3c8
#define A_UP_OBQ_7_SHADOW_REALADDR 0x3cc
+#define A_T7_UP_IBQ_0_SHADOW_RDADDR 0x400
+#define A_T7_UP_IBQ_0_SHADOW_WRADDR 0x404
+#define A_T7_UP_IBQ_0_SHADOW_STATUS 0x408
+
+#define S_T7_QUEREMFLITS 0
+#define M_T7_QUEREMFLITS 0xfffU
+#define V_T7_QUEREMFLITS(x) ((x) << S_T7_QUEREMFLITS)
+#define G_T7_QUEREMFLITS(x) (((x) >> S_T7_QUEREMFLITS) & M_T7_QUEREMFLITS)
+
+#define A_T7_UP_IBQ_0_SHADOW_PKTCNT 0x40c
+#define A_T7_UP_IBQ_1_SHADOW_RDADDR 0x410
+#define A_T7_UP_IBQ_1_SHADOW_WRADDR 0x414
+#define A_T7_UP_IBQ_1_SHADOW_STATUS 0x418
+#define A_T7_UP_IBQ_1_SHADOW_PKTCNT 0x41c
+#define A_T7_UP_IBQ_2_SHADOW_RDADDR 0x420
+#define A_T7_UP_IBQ_2_SHADOW_WRADDR 0x424
+#define A_T7_UP_IBQ_2_SHADOW_STATUS 0x428
+#define A_T7_UP_IBQ_2_SHADOW_PKTCNT 0x42c
+#define A_T7_UP_IBQ_3_SHADOW_RDADDR 0x430
+#define A_T7_UP_IBQ_3_SHADOW_WRADDR 0x434
+#define A_T7_UP_IBQ_3_SHADOW_STATUS 0x438
+#define A_T7_UP_IBQ_3_SHADOW_PKTCNT 0x43c
+#define A_T7_UP_IBQ_4_SHADOW_RDADDR 0x440
+#define A_T7_UP_IBQ_4_SHADOW_WRADDR 0x444
+#define A_T7_UP_IBQ_4_SHADOW_STATUS 0x448
+#define A_T7_UP_IBQ_4_SHADOW_PKTCNT 0x44c
+#define A_T7_UP_IBQ_5_SHADOW_RDADDR 0x450
+#define A_T7_UP_IBQ_5_SHADOW_WRADDR 0x454
+#define A_T7_UP_IBQ_5_SHADOW_STATUS 0x458
+#define A_T7_UP_IBQ_5_SHADOW_PKTCNT 0x45c
+#define A_UP_IBQ_6_SHADOW_RDADDR 0x460
+#define A_UP_IBQ_6_SHADOW_WRADDR 0x464
+#define A_UP_IBQ_6_SHADOW_STATUS 0x468
+#define A_UP_IBQ_6_SHADOW_PKTCNT 0x46c
+#define A_UP_IBQ_7_SHADOW_RDADDR 0x470
+#define A_UP_IBQ_7_SHADOW_WRADDR 0x474
+#define A_UP_IBQ_7_SHADOW_STATUS 0x478
+#define A_UP_IBQ_7_SHADOW_PKTCNT 0x47c
+#define A_UP_IBQ_8_SHADOW_RDADDR 0x480
+#define A_UP_IBQ_8_SHADOW_WRADDR 0x484
+#define A_UP_IBQ_8_SHADOW_STATUS 0x488
+#define A_UP_IBQ_8_SHADOW_PKTCNT 0x48c
+#define A_UP_IBQ_9_SHADOW_RDADDR 0x490
+#define A_UP_IBQ_9_SHADOW_WRADDR 0x494
+#define A_UP_IBQ_9_SHADOW_STATUS 0x498
+#define A_UP_IBQ_9_SHADOW_PKTCNT 0x49c
+#define A_UP_IBQ_10_SHADOW_RDADDR 0x4a0
+#define A_UP_IBQ_10_SHADOW_WRADDR 0x4a4
+#define A_UP_IBQ_10_SHADOW_STATUS 0x4a8
+#define A_UP_IBQ_10_SHADOW_PKTCNT 0x4ac
+#define A_UP_IBQ_11_SHADOW_RDADDR 0x4b0
+#define A_UP_IBQ_11_SHADOW_WRADDR 0x4b4
+#define A_UP_IBQ_11_SHADOW_STATUS 0x4b8
+#define A_UP_IBQ_11_SHADOW_PKTCNT 0x4bc
+#define A_UP_IBQ_12_SHADOW_RDADDR 0x4c0
+#define A_UP_IBQ_12_SHADOW_WRADDR 0x4c4
+#define A_UP_IBQ_12_SHADOW_STATUS 0x4c8
+#define A_UP_IBQ_12_SHADOW_PKTCNT 0x4cc
+#define A_UP_IBQ_13_SHADOW_RDADDR 0x4d0
+#define A_UP_IBQ_13_SHADOW_WRADDR 0x4d4
+#define A_UP_IBQ_13_SHADOW_STATUS 0x4d8
+#define A_UP_IBQ_13_SHADOW_PKTCNT 0x4dc
+#define A_UP_IBQ_14_SHADOW_RDADDR 0x4e0
+#define A_UP_IBQ_14_SHADOW_WRADDR 0x4e4
+#define A_UP_IBQ_14_SHADOW_STATUS 0x4e8
+#define A_UP_IBQ_14_SHADOW_PKTCNT 0x4ec
+#define A_UP_IBQ_15_SHADOW_RDADDR 0x4f0
+#define A_UP_IBQ_15_SHADOW_WRADDR 0x4f4
+#define A_UP_IBQ_15_SHADOW_STATUS 0x4f8
+#define A_UP_IBQ_15_SHADOW_PKTCNT 0x4fc
+#define A_T7_UP_IBQ_0_SHADOW_CONFIG 0x500
+#define A_T7_UP_IBQ_0_SHADOW_REALADDR 0x504
+#define A_T7_UP_IBQ_1_SHADOW_CONFIG 0x510
+#define A_T7_UP_IBQ_1_SHADOW_REALADDR 0x514
+#define A_T7_UP_IBQ_2_SHADOW_CONFIG 0x520
+#define A_T7_UP_IBQ_2_SHADOW_REALADDR 0x524
+#define A_T7_UP_IBQ_3_SHADOW_CONFIG 0x530
+#define A_T7_UP_IBQ_3_SHADOW_REALADDR 0x534
+#define A_T7_UP_IBQ_4_SHADOW_CONFIG 0x540
+#define A_T7_UP_IBQ_4_SHADOW_REALADDR 0x544
+#define A_T7_UP_IBQ_5_SHADOW_CONFIG 0x550
+#define A_T7_UP_IBQ_5_SHADOW_REALADDR 0x554
+#define A_UP_IBQ_6_SHADOW_CONFIG 0x560
+#define A_UP_IBQ_6_SHADOW_REALADDR 0x564
+#define A_UP_IBQ_7_SHADOW_CONFIG 0x570
+#define A_UP_IBQ_7_SHADOW_REALADDR 0x574
+#define A_UP_IBQ_8_SHADOW_CONFIG 0x580
+#define A_UP_IBQ_8_SHADOW_REALADDR 0x584
+#define A_UP_IBQ_9_SHADOW_CONFIG 0x590
+#define A_UP_IBQ_9_SHADOW_REALADDR 0x594
+#define A_UP_IBQ_10_SHADOW_CONFIG 0x5a0
+#define A_UP_IBQ_10_SHADOW_REALADDR 0x5a4
+#define A_UP_IBQ_11_SHADOW_CONFIG 0x5b0
+#define A_UP_IBQ_11_SHADOW_REALADDR 0x5b4
+#define A_UP_IBQ_12_SHADOW_CONFIG 0x5c0
+#define A_UP_IBQ_12_SHADOW_REALADDR 0x5c4
+#define A_UP_IBQ_13_SHADOW_CONFIG 0x5d0
+#define A_UP_IBQ_13_SHADOW_REALADDR 0x5d4
+#define A_UP_IBQ_14_SHADOW_CONFIG 0x5e0
+#define A_UP_IBQ_14_SHADOW_REALADDR 0x5e4
+#define A_UP_IBQ_15_SHADOW_CONFIG 0x5f0
+#define A_UP_IBQ_15_SHADOW_REALADDR 0x5f4
+#define A_T7_UP_OBQ_0_SHADOW_RDADDR 0x600
+#define A_T7_UP_OBQ_0_SHADOW_WRADDR 0x604
+#define A_T7_UP_OBQ_0_SHADOW_STATUS 0x608
+#define A_T7_UP_OBQ_0_SHADOW_PKTCNT 0x60c
+#define A_T7_UP_OBQ_1_SHADOW_RDADDR 0x610
+#define A_T7_UP_OBQ_1_SHADOW_WRADDR 0x614
+#define A_T7_UP_OBQ_1_SHADOW_STATUS 0x618
+#define A_T7_UP_OBQ_1_SHADOW_PKTCNT 0x61c
+#define A_T7_UP_OBQ_2_SHADOW_RDADDR 0x620
+#define A_T7_UP_OBQ_2_SHADOW_WRADDR 0x624
+#define A_T7_UP_OBQ_2_SHADOW_STATUS 0x628
+#define A_T7_UP_OBQ_2_SHADOW_PKTCNT 0x62c
+#define A_T7_UP_OBQ_3_SHADOW_RDADDR 0x630
+#define A_T7_UP_OBQ_3_SHADOW_WRADDR 0x634
+#define A_T7_UP_OBQ_3_SHADOW_STATUS 0x638
+#define A_T7_UP_OBQ_3_SHADOW_PKTCNT 0x63c
+#define A_T7_UP_OBQ_4_SHADOW_RDADDR 0x640
+#define A_T7_UP_OBQ_4_SHADOW_WRADDR 0x644
+#define A_T7_UP_OBQ_4_SHADOW_STATUS 0x648
+#define A_T7_UP_OBQ_4_SHADOW_PKTCNT 0x64c
+#define A_T7_UP_OBQ_5_SHADOW_RDADDR 0x650
+#define A_T7_UP_OBQ_5_SHADOW_WRADDR 0x654
+#define A_T7_UP_OBQ_5_SHADOW_STATUS 0x658
+#define A_T7_UP_OBQ_5_SHADOW_PKTCNT 0x65c
+#define A_T7_UP_OBQ_6_SHADOW_RDADDR 0x660
+#define A_T7_UP_OBQ_6_SHADOW_WRADDR 0x664
+#define A_T7_UP_OBQ_6_SHADOW_STATUS 0x668
+#define A_T7_UP_OBQ_6_SHADOW_PKTCNT 0x66c
+#define A_T7_UP_OBQ_7_SHADOW_RDADDR 0x670
+#define A_T7_UP_OBQ_7_SHADOW_WRADDR 0x674
+#define A_T7_UP_OBQ_7_SHADOW_STATUS 0x678
+#define A_T7_UP_OBQ_7_SHADOW_PKTCNT 0x67c
+#define A_UP_OBQ_8_SHADOW_RDADDR 0x680
+#define A_UP_OBQ_8_SHADOW_WRADDR 0x684
+#define A_UP_OBQ_8_SHADOW_STATUS 0x688
+#define A_UP_OBQ_8_SHADOW_PKTCNT 0x68c
+#define A_UP_OBQ_9_SHADOW_RDADDR 0x690
+#define A_UP_OBQ_9_SHADOW_WRADDR 0x694
+#define A_UP_OBQ_9_SHADOW_STATUS 0x698
+#define A_UP_OBQ_9_SHADOW_PKTCNT 0x69c
+#define A_UP_OBQ_10_SHADOW_RDADDR 0x6a0
+#define A_UP_OBQ_10_SHADOW_WRADDR 0x6a4
+#define A_UP_OBQ_10_SHADOW_STATUS 0x6a8
+#define A_UP_OBQ_10_SHADOW_PKTCNT 0x6ac
+#define A_UP_OBQ_11_SHADOW_RDADDR 0x6b0
+#define A_UP_OBQ_11_SHADOW_WRADDR 0x6b4
+#define A_UP_OBQ_11_SHADOW_STATUS 0x6b8
+#define A_UP_OBQ_11_SHADOW_PKTCNT 0x6bc
+#define A_UP_OBQ_12_SHADOW_RDADDR 0x6c0
+#define A_UP_OBQ_12_SHADOW_WRADDR 0x6c4
+#define A_UP_OBQ_12_SHADOW_STATUS 0x6c8
+#define A_UP_OBQ_12_SHADOW_PKTCNT 0x6cc
+#define A_UP_OBQ_13_SHADOW_RDADDR 0x6d0
+#define A_UP_OBQ_13_SHADOW_WRADDR 0x6d4
+#define A_UP_OBQ_13_SHADOW_STATUS 0x6d8
+#define A_UP_OBQ_13_SHADOW_PKTCNT 0x6dc
+#define A_UP_OBQ_14_SHADOW_RDADDR 0x6e0
+#define A_UP_OBQ_14_SHADOW_WRADDR 0x6e4
+#define A_UP_OBQ_14_SHADOW_STATUS 0x6e8
+#define A_UP_OBQ_14_SHADOW_PKTCNT 0x6ec
+#define A_UP_OBQ_15_SHADOW_RDADDR 0x6f0
+#define A_UP_OBQ_15_SHADOW_WRADDR 0x6f4
+#define A_UP_OBQ_15_SHADOW_STATUS 0x6f8
+#define A_UP_OBQ_15_SHADOW_PKTCNT 0x6fc
+#define A_T7_UP_OBQ_0_SHADOW_CONFIG 0x700
+#define A_T7_UP_OBQ_0_SHADOW_REALADDR 0x704
+#define A_T7_UP_OBQ_1_SHADOW_CONFIG 0x710
+#define A_T7_UP_OBQ_1_SHADOW_REALADDR 0x714
+#define A_T7_UP_OBQ_2_SHADOW_CONFIG 0x720
+#define A_T7_UP_OBQ_2_SHADOW_REALADDR 0x724
+#define A_T7_UP_OBQ_3_SHADOW_CONFIG 0x730
+#define A_T7_UP_OBQ_3_SHADOW_REALADDR 0x734
+#define A_T7_UP_OBQ_4_SHADOW_CONFIG 0x740
+#define A_T7_UP_OBQ_4_SHADOW_REALADDR 0x744
+#define A_T7_UP_OBQ_5_SHADOW_CONFIG 0x750
+#define A_T7_UP_OBQ_5_SHADOW_REALADDR 0x754
+#define A_T7_UP_OBQ_6_SHADOW_CONFIG 0x760
+#define A_T7_UP_OBQ_6_SHADOW_REALADDR 0x764
+#define A_T7_UP_OBQ_7_SHADOW_CONFIG 0x770
+#define A_T7_UP_OBQ_7_SHADOW_REALADDR 0x774
+#define A_UP_OBQ_8_SHADOW_CONFIG 0x780
+#define A_UP_OBQ_8_SHADOW_REALADDR 0x784
+#define A_UP_OBQ_9_SHADOW_CONFIG 0x790
+#define A_UP_OBQ_9_SHADOW_REALADDR 0x794
+#define A_UP_OBQ_10_SHADOW_CONFIG 0x7a0
+#define A_UP_OBQ_10_SHADOW_REALADDR 0x7a4
+#define A_UP_OBQ_11_SHADOW_CONFIG 0x7b0
+#define A_UP_OBQ_11_SHADOW_REALADDR 0x7b4
+#define A_UP_OBQ_12_SHADOW_CONFIG 0x7c0
+#define A_UP_OBQ_12_SHADOW_REALADDR 0x7c4
+#define A_UP_OBQ_13_SHADOW_CONFIG 0x7d0
+#define A_UP_OBQ_13_SHADOW_REALADDR 0x7d4
+#define A_UP_OBQ_14_SHADOW_CONFIG 0x7e0
+#define A_UP_OBQ_14_SHADOW_REALADDR 0x7e4
+#define A_UP_OBQ_15_SHADOW_CONFIG 0x7f0
+#define A_UP_OBQ_15_SHADOW_REALADDR 0x7f4
/* registers for module CIM_CTL */
#define CIM_CTL_BASE_ADDR 0x0
@@ -44579,17 +57044,63 @@
#define A_CIM_CTL_STATIC_PREFADDR10 0x38
#define A_CIM_CTL_STATIC_PREFADDR11 0x3c
#define A_CIM_CTL_STATIC_PREFADDR12 0x40
+#define A_CIM_CTL_SEM_CFG 0x40
+
+#define S_SEMINIT 31
+#define V_SEMINIT(x) ((x) << S_SEMINIT)
+#define F_SEMINIT V_SEMINIT(1U)
+
+#define S_NUMSEM 0
+#define M_NUMSEM 0x3ffffU
+#define V_NUMSEM(x) ((x) << S_NUMSEM)
+#define G_NUMSEM(x) (((x) >> S_NUMSEM) & M_NUMSEM)
+
#define A_CIM_CTL_STATIC_PREFADDR13 0x44
+#define A_CIM_CTL_SEM_MA_CFG 0x44
+
+#define S_SEMMABASE 4
+#define M_SEMMABASE 0xfffffffU
+#define V_SEMMABASE(x) ((x) << S_SEMMABASE)
+#define G_SEMMABASE(x) (((x) >> S_SEMMABASE) & M_SEMMABASE)
+
+#define S_SEMMATHREADID 0
+#define M_SEMMATHREADID 0x7U
+#define V_SEMMATHREADID(x) ((x) << S_SEMMATHREADID)
+#define G_SEMMATHREADID(x) (((x) >> S_SEMMATHREADID) & M_SEMMATHREADID)
+
#define A_CIM_CTL_STATIC_PREFADDR14 0x48
#define A_CIM_CTL_STATIC_PREFADDR15 0x4c
#define A_CIM_CTL_STATIC_ALLOCADDR0 0x50
+#define A_CIM_CTL_LOCK_CFG 0x50
+
+#define S_NUMLOCK 0
+#define M_NUMLOCK 0x3ffffU
+#define V_NUMLOCK(x) ((x) << S_NUMLOCK)
+#define G_NUMLOCK(x) (((x) >> S_NUMLOCK) & M_NUMLOCK)
+
#define A_CIM_CTL_STATIC_ALLOCADDR1 0x54
+#define A_CIM_CTL_LOCK_MA_CFG 0x54
+
+#define S_LOCKMABASE 4
+#define M_LOCKMABASE 0xfffffffU
+#define V_LOCKMABASE(x) ((x) << S_LOCKMABASE)
+#define G_LOCKMABASE(x) (((x) >> S_LOCKMABASE) & M_LOCKMABASE)
+
+#define S_LOCKMATHREADID 0
+#define M_LOCKMATHREADID 0x7U
+#define V_LOCKMATHREADID(x) ((x) << S_LOCKMATHREADID)
+#define G_LOCKMATHREADID(x) (((x) >> S_LOCKMATHREADID) & M_LOCKMATHREADID)
+
#define A_CIM_CTL_STATIC_ALLOCADDR2 0x58
#define A_CIM_CTL_STATIC_ALLOCADDR3 0x5c
#define A_CIM_CTL_STATIC_ALLOCADDR4 0x60
+#define A_CIM_CTL_RSA_INT 0x60
#define A_CIM_CTL_STATIC_ALLOCADDR5 0x64
+#define A_CIM_CTL_RSA_BUSY 0x64
#define A_CIM_CTL_STATIC_ALLOCADDR6 0x68
+#define A_CIM_CTL_RSA_CPERR 0x68
#define A_CIM_CTL_STATIC_ALLOCADDR7 0x6c
+#define A_CIM_CTL_RSA_DPERR 0x6c
#define A_CIM_CTL_STATIC_ALLOCADDR8 0x70
#define A_CIM_CTL_STATIC_ALLOCADDR9 0x74
#define A_CIM_CTL_STATIC_ALLOCADDR10 0x78
@@ -44650,6 +57161,66 @@
#define A_CIM_CTL_GEN_TIMER3 0xd0
#define A_CIM_CTL_MAILBOX_VF_STATUS 0xe0
#define A_CIM_CTL_MAILBOX_VFN_CTL 0x100
+#define A_CIM_CTL_TID_MAP_EN 0x500
+#define A_CIM_CTL_TID_MAP_CORE 0x520
+#define A_CIM_CTL_TID_MAP_CONFIG 0x540
+
+#define S_TIDDEFCORE 4
+#define M_TIDDEFCORE 0xfU
+#define V_TIDDEFCORE(x) ((x) << S_TIDDEFCORE)
+#define G_TIDDEFCORE(x) (((x) >> S_TIDDEFCORE) & M_TIDDEFCORE)
+
+#define S_TIDVECBASE 0
+#define M_TIDVECBASE 0x7U
+#define V_TIDVECBASE(x) ((x) << S_TIDVECBASE)
+#define G_TIDVECBASE(x) (((x) >> S_TIDVECBASE) & M_TIDVECBASE)
+
+#define A_CIM_CTL_CRYPTO_KEY_DATA 0x600
+#define A_CIM_CTL_SECURE_CONFIG 0x6f8
+#define A_CIM_CTL_CRYPTO_KEY_CTRL 0x6fc
+
+#define S_CRYPTOKEYDATAREGNUM 8
+#define M_CRYPTOKEYDATAREGNUM 0xffU
+#define V_CRYPTOKEYDATAREGNUM(x) ((x) << S_CRYPTOKEYDATAREGNUM)
+#define G_CRYPTOKEYDATAREGNUM(x) (((x) >> S_CRYPTOKEYDATAREGNUM) & M_CRYPTOKEYDATAREGNUM)
+
+#define S_CRYPTOKEYSTARTBUSY 0
+#define V_CRYPTOKEYSTARTBUSY(x) ((x) << S_CRYPTOKEYSTARTBUSY)
+#define F_CRYPTOKEYSTARTBUSY V_CRYPTOKEYSTARTBUSY(1U)
+
+#define A_CIM_CTL_FLOWID_OP_VALID 0x700
+#define A_CIM_CTL_FLOWID_CTL 0x720
+
+#define S_FLOWBASEADDR 8
+#define M_FLOWBASEADDR 0xffffffU
+#define V_FLOWBASEADDR(x) ((x) << S_FLOWBASEADDR)
+#define G_FLOWBASEADDR(x) (((x) >> S_FLOWBASEADDR) & M_FLOWBASEADDR)
+
+#define S_SEQSRCHALIGNCFG 4
+#define M_SEQSRCHALIGNCFG 0x3U
+#define V_SEQSRCHALIGNCFG(x) ((x) << S_SEQSRCHALIGNCFG)
+#define G_SEQSRCHALIGNCFG(x) (((x) >> S_SEQSRCHALIGNCFG) & M_SEQSRCHALIGNCFG)
+
+#define S_FLOWADDRSIZE 1
+#define M_FLOWADDRSIZE 0x3U
+#define V_FLOWADDRSIZE(x) ((x) << S_FLOWADDRSIZE)
+#define G_FLOWADDRSIZE(x) (((x) >> S_FLOWADDRSIZE) & M_FLOWADDRSIZE)
+
+#define S_FLOWIDEN 0
+#define V_FLOWIDEN(x) ((x) << S_FLOWIDEN)
+#define F_FLOWIDEN V_FLOWIDEN(1U)
+
+#define A_CIM_CTL_FLOWID_MAX 0x724
+
+#define S_MAXFLOWID 0
+#define M_MAXFLOWID 0xffffffU
+#define V_MAXFLOWID(x) ((x) << S_MAXFLOWID)
+#define G_MAXFLOWID(x) (((x) >> S_MAXFLOWID) & M_MAXFLOWID)
+
+#define A_CIM_CTL_FLOWID_HINT0 0x728
+#define A_CIM_CTL_EFUSE_CTRL 0x780
+#define A_CIM_CTL_EFUSE_QOUT 0x784
+#define A_CIM_CTL_EFUSE_RFOUT 0x788
#define A_CIM_CTL_TSCH_CHNLN_CTL 0x900
#define S_TSCHNLEN 31
@@ -45001,14 +57572,19 @@
#define A_CIM_CTL_TSCH_TICK3 0xd8c
#define A_CIM_CTL_MAILBOX_PF3_CTL 0xd90
#define A_T6_CIM_CTL_MAILBOX_PF0_CTL 0xd90
+#define A_T7_CIM_CTL_MAILBOX_PF0_CTL 0xd90
#define A_CIM_CTL_MAILBOX_PF4_CTL 0xd94
#define A_T6_CIM_CTL_MAILBOX_PF1_CTL 0xd94
+#define A_T7_CIM_CTL_MAILBOX_PF1_CTL 0xd94
#define A_CIM_CTL_MAILBOX_PF5_CTL 0xd98
#define A_T6_CIM_CTL_MAILBOX_PF2_CTL 0xd98
+#define A_T7_CIM_CTL_MAILBOX_PF2_CTL 0xd98
#define A_CIM_CTL_MAILBOX_PF6_CTL 0xd9c
#define A_T6_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
+#define A_T7_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
#define A_CIM_CTL_MAILBOX_PF7_CTL 0xda0
#define A_T6_CIM_CTL_MAILBOX_PF4_CTL 0xda0
+#define A_T7_CIM_CTL_MAILBOX_PF4_CTL 0xda0
#define A_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xda4
#define S_PF7_OWNER_PL 15
@@ -45076,6 +57652,7 @@
#define F_PF0_OWNER_UP V_PF0_OWNER_UP(1U)
#define A_T6_CIM_CTL_MAILBOX_PF5_CTL 0xda4
+#define A_T7_CIM_CTL_MAILBOX_PF5_CTL 0xda4
#define A_CIM_CTL_PIO_MST_CONFIG 0xda8
#define S_T5_CTLRID 0
@@ -45084,15 +57661,13 @@
#define G_T5_CTLRID(x) (((x) >> S_T5_CTLRID) & M_T5_CTLRID)
#define A_T6_CIM_CTL_MAILBOX_PF6_CTL 0xda8
+#define A_T7_CIM_CTL_MAILBOX_PF6_CTL 0xda8
#define A_T6_CIM_CTL_MAILBOX_PF7_CTL 0xdac
+#define A_T7_CIM_CTL_MAILBOX_PF7_CTL 0xdac
#define A_T6_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
+#define A_T7_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
#define A_T6_CIM_CTL_PIO_MST_CONFIG 0xdb4
-
-#define S_T6_UPRID 0
-#define M_T6_UPRID 0x1ffU
-#define V_T6_UPRID(x) ((x) << S_T6_UPRID)
-#define G_T6_UPRID(x) (((x) >> S_T6_UPRID) & M_T6_UPRID)
-
+#define A_T7_CIM_CTL_PIO_MST_CONFIG 0xdb4
#define A_CIM_CTL_ULP_OBQ0_PAUSE_MASK 0xe00
#define A_CIM_CTL_ULP_OBQ1_PAUSE_MASK 0xe04
#define A_CIM_CTL_ULP_OBQ2_PAUSE_MASK 0xe08
@@ -45119,6 +57694,64 @@
#define V_MA_TIMEOUT(x) ((x) << S_MA_TIMEOUT)
#define G_MA_TIMEOUT(x) (((x) >> S_MA_TIMEOUT) & M_MA_TIMEOUT)
+#define A_CIM_CTL_BREAK 0xf00
+
+#define S_XOCDMODE 8
+#define M_XOCDMODE 0xffU
+#define V_XOCDMODE(x) ((x) << S_XOCDMODE)
+#define G_XOCDMODE(x) (((x) >> S_XOCDMODE) & M_XOCDMODE)
+
+#define S_BREAKIN_CONTROL 0
+#define M_BREAKIN_CONTROL 0xffU
+#define V_BREAKIN_CONTROL(x) ((x) << S_BREAKIN_CONTROL)
+#define G_BREAKIN_CONTROL(x) (((x) >> S_BREAKIN_CONTROL) & M_BREAKIN_CONTROL)
+
+#define A_CIM_CTL_SLV_BOOT_CFG 0x4000
+
+#define S_T7_UPGEN 3
+#define M_T7_UPGEN 0x1fU
+#define V_T7_UPGEN(x) ((x) << S_T7_UPGEN)
+#define G_T7_UPGEN(x) (((x) >> S_T7_UPGEN) & M_T7_UPGEN)
+
+#define S_UPCLKEN 2
+#define V_UPCLKEN(x) ((x) << S_UPCLKEN)
+#define F_UPCLKEN V_UPCLKEN(1U)
+
+#define A_CIM_CTL_SLV_BOOT_LEN 0x4004
+#define A_CIM_CTL_SLV_ACC_INT_ENABLE 0x4008
+#define A_CIM_CTL_SLV_ACC_INT_CAUSE 0x400c
+#define A_CIM_CTL_SLV_INT_ENABLE 0x4010
+#define A_CIM_CTL_SLV_INT_CAUSE 0x4014
+#define A_CIM_CTL_SLV_PERR_ENABLE 0x4018
+#define A_CIM_CTL_SLV_PERR_CAUSE 0x401c
+#define A_CIM_CTL_SLV_ADDR_TIMEOUT 0x4028
+#define A_CIM_CTL_SLV_ADDR_ILLEGAL 0x402c
+#define A_CIM_CTL_SLV_PIO_MST_CONFIG 0x4030
+#define A_CIM_CTL_SLV_MEM_ZONE0_VA 0x4040
+#define A_CIM_CTL_SLV_MEM_ZONE0_BA 0x4044
+#define A_CIM_CTL_SLV_MEM_ZONE0_LEN 0x4048
+#define A_CIM_CTL_SLV_MEM_ZONE1_VA 0x404c
+#define A_CIM_CTL_SLV_MEM_ZONE1_BA 0x4050
+#define A_CIM_CTL_SLV_MEM_ZONE1_LEN 0x4054
+#define A_CIM_CTL_SLV_MEM_ZONE2_VA 0x4058
+#define A_CIM_CTL_SLV_MEM_ZONE2_BA 0x405c
+#define A_CIM_CTL_SLV_MEM_ZONE2_LEN 0x4060
+#define A_CIM_CTL_SLV_MEM_ZONE3_VA 0x4064
+#define A_CIM_CTL_SLV_MEM_ZONE3_BA 0x4068
+#define A_CIM_CTL_SLV_MEM_ZONE3_LEN 0x406c
+#define A_CIM_CTL_SLV_MEM_ZONE4_VA 0x4070
+#define A_CIM_CTL_SLV_MEM_ZONE4_BA 0x4074
+#define A_CIM_CTL_SLV_MEM_ZONE4_LEN 0x4078
+#define A_CIM_CTL_SLV_MEM_ZONE5_VA 0x407c
+#define A_CIM_CTL_SLV_MEM_ZONE5_BA 0x4080
+#define A_CIM_CTL_SLV_MEM_ZONE5_LEN 0x4084
+#define A_CIM_CTL_SLV_MEM_ZONE6_VA 0x4088
+#define A_CIM_CTL_SLV_MEM_ZONE6_BA 0x408c
+#define A_CIM_CTL_SLV_MEM_ZONE6_LEN 0x4090
+#define A_CIM_CTL_SLV_MEM_ZONE7_VA 0x4094
+#define A_CIM_CTL_SLV_MEM_ZONE7_BA 0x4098
+#define A_CIM_CTL_SLV_MEM_ZONE7_LEN 0x409c
+
/* registers for module MAC */
#define MAC_BASE_ADDR 0x0
@@ -46613,33 +59246,7 @@
#define F_PERR_TX_PCS1G V_PERR_TX_PCS1G(1U)
#define A_MAC_PORT_PERR_INT_CAUSE 0x8e4
-
-#define S_T6_PERR_PKT_RAM 31
-#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
-#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U)
-
-#define S_T6_PERR_MASK_RAM 30
-#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
-#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U)
-
-#define S_T6_PERR_CRC_RAM 29
-#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
-#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U)
-
#define A_MAC_PORT_PERR_ENABLE 0x8e8
-
-#define S_T6_PERR_PKT_RAM 31
-#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
-#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U)
-
-#define S_T6_PERR_MASK_RAM 30
-#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
-#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U)
-
-#define S_T6_PERR_CRC_RAM 29
-#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
-#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U)
-
#define A_MAC_PORT_PERR_INJECT 0x8ec
#define S_MEMSEL_PERR 1
@@ -47304,10 +59911,12 @@
#define A_MAC_PORT_PTP_DRIFT_ADJUST_COUNT 0x9a0
#define A_MAC_PORT_PTP_OFFSET_ADJUST_FINE 0x9a4
+#if 0
#define S_B 16
-#define CXGBE_M_B 0xffffU
+#define M_B 0xffffU
#define V_B(x) ((x) << S_B)
-#define G_B(x) (((x) >> S_B) & CXGBE_M_B)
+#define G_B(x) (((x) >> S_B) & M_B)
+#endif
#define S_A 0
#define M_A 0xffffU
@@ -48454,10 +61063,6 @@
#define V_LOW_POWER(x) ((x) << S_LOW_POWER)
#define F_LOW_POWER V_LOW_POWER(1U)
-#define S_T6_SPEED_SEL1 6
-#define V_T6_SPEED_SEL1(x) ((x) << S_T6_SPEED_SEL1)
-#define F_T6_SPEED_SEL1 V_T6_SPEED_SEL1(1U)
-
#define S_SPEED_SEL2 2
#define M_SPEED_SEL2 0xfU
#define V_SPEED_SEL2(x) ((x) << S_SPEED_SEL2)
@@ -49016,7 +61621,7 @@
#define S_VLANTAG 0
#define CXGBE_M_VLANTAG 0xffffU
#define V_VLANTAG(x) ((x) << S_VLANTAG)
-#define G_VLANTAG(x) (((x) >> S_VLANTAG) & CXGBE_M_VLANTAG)
+#define G_VLANTAG(x) (((x) >> S_VLANTAG) & M_VLANTAG)
#define A_MAC_PORT_MTIP_VLAN_TPID_1 0x1a04
#define A_MAC_PORT_MTIP_VLAN_TPID_2 0x1a08
@@ -51279,75 +63884,24 @@
#define G_DPC_TIME_LIM(x) (((x) >> S_DPC_TIME_LIM) & M_DPC_TIME_LIM)
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_1 0x2b20
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_1 0x2b24
#define A_MAC_PORT_AET_ZFE_LIMITS_1 0x2b28
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_1 0x2b2c
#define A_MAC_PORT_AET_STATUS_1 0x2b30
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_21 0x2b34
#define A_MAC_PORT_AET_LIMITS1 0x2b38
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_2 0x2b40
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_2 0x2b44
#define A_MAC_PORT_AET_ZFE_LIMITS_2 0x2b48
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_2 0x2b4c
#define A_MAC_PORT_AET_STATUS_2 0x2b50
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_22 0x2b54
#define A_MAC_PORT_AET_LIMITS2 0x2b58
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_3 0x2b60
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_3 0x2b64
#define A_MAC_PORT_AET_ZFE_LIMITS_3 0x2b68
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_3 0x2b6c
#define A_MAC_PORT_AET_STATUS_3 0x2b70
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_23 0x2b74
#define A_MAC_PORT_AET_LIMITS3 0x2b78
#define A_T6_MAC_PORT_BEAN_CTL 0x2c00
@@ -52384,103 +64938,21 @@
#define F_BSOUTP V_BSOUTP(1U)
#define A_MAC_PORT_TX_LINKB_TRANSMIT_CONFIGURATION_MODE 0x3100
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TEST_CONTROL 0x3104
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_COEFFICIENT_CONTROL 0x3108
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_MODE_CONTROL 0x310c
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3110
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3114
#define A_MAC_PORT_TX_LINKB_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3118
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x311c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT 0x3120
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT 0x3124
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT 0x3128
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_3_COEFFICIENT 0x312c
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AMPLITUDE 0x3130
#define A_MAC_PORT_TX_LINKB_TRANSMIT_POLARITY 0x3134
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3138
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x313c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3140
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3140
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3144
@@ -52503,12 +64975,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3174
#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3178
#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x317c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3180
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3184
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3188
@@ -52521,21 +64987,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AZ_CONTROL 0x319c
#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_CONTROL 0x31a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKB_DCCSTEP_CTL 6
#define M_TX_LINKB_DCCSTEP_CTL 0x3U
#define V_TX_LINKB_DCCSTEP_CTL(x) ((x) << S_TX_LINKB_DCCSTEP_CTL)
@@ -52553,20 +65004,9 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x31e0
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_5 0x31ec
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_4 0x31f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_3 0x31f4
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_2 0x31f8
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_1 0x31fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINKA_RECEIVER_CONFIGURATION_MODE 0x3200
#define S_T5_RX_LINKEN 15
@@ -54442,56 +66882,15 @@
#define A_MAC_PORT_RX_LINKB_RECEIVER_TEST_CONTROL 0x3304
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_CONTROL 0x3308
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_OFFSET_CONTROL 0x330c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_1 0x3310
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_2 0x3314
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3318
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x331c
#define A_MAC_PORT_RX_LINKB_DFE_CONTROL 0x3320
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_1 0x3324
#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_2 0x3328
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_1 0x332c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_2 0x3330
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_3 0x3334
#define A_MAC_PORT_RX_LINKB_RECEIVER_DQCC_CONTROL_1 0x3338
#define A_MAC_PORT_RX_LINKB_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3338
@@ -54515,12 +66914,6 @@
#define A_MAC_PORT_RX_LINKB_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x336c
#define A_MAC_PORT_RX_LINKB_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3370
#define A_MAC_PORT_RX_LINKB_DYNAMIC_DATA_CENTERING_DDC 0x3374
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_INTERNAL_STATUS 0x3378
#define S_RX_LINKB_ACCCMP_RIS 11
@@ -54550,20 +66943,6 @@
#define A_MAC_PORT_RX_LINKB_INTEGRATOR_DAC_OFFSET 0x33a4
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_CONTROL 0x33a8
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS 0x33ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_ERROR_COUNT 0x33b0
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x33b4
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x33b8
@@ -54611,103 +66990,21 @@
#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x33f8
#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_1 0x33fc
#define A_MAC_PORT_TX_LINKC_TRANSMIT_CONFIGURATION_MODE 0x3400
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TEST_CONTROL 0x3404
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_COEFFICIENT_CONTROL 0x3408
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_MODE_CONTROL 0x340c
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3410
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3414
#define A_MAC_PORT_TX_LINKC_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3418
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x341c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT 0x3420
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT 0x3424
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT 0x3428
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_3_COEFFICIENT 0x342c
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AMPLITUDE 0x3430
#define A_MAC_PORT_TX_LINKC_TRANSMIT_POLARITY 0x3434
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3438
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x343c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3440
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3440
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3444
@@ -54730,12 +67027,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3474
#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3478
#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x347c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3480
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3484
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3488
@@ -54748,21 +67039,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AZ_CONTROL 0x349c
#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_CONTROL 0x34a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKC_DCCSTEP_CTL 6
#define M_TX_LINKC_DCCSTEP_CTL 0x3U
#define V_TX_LINKC_DCCSTEP_CTL(x) ((x) << S_TX_LINKC_DCCSTEP_CTL)
@@ -54780,118 +67056,25 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x34e0
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_5 0x34ec
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_4 0x34f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_3 0x34f4
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_2 0x34f8
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_1 0x34fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_CONFIGURATION_MODE 0x3500
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TEST_CONTROL 0x3504
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_COEFFICIENT_CONTROL 0x3508
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_MODE_CONTROL 0x350c
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3510
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3514
#define A_MAC_PORT_TX_LINKD_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3518
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x351c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT 0x3520
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT 0x3524
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT 0x3528
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_3_COEFFICIENT 0x352c
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AMPLITUDE 0x3530
#define A_MAC_PORT_TX_LINKD_TRANSMIT_POLARITY 0x3534
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3538
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x353c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3540
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3540
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3544
@@ -54914,12 +67097,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3574
#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3578
#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x357c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3580
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3584
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3588
@@ -54932,21 +67109,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AZ_CONTROL 0x359c
#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_CONTROL 0x35a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKD_DCCSTEP_CTL 6
#define M_TX_LINKD_DCCSTEP_CTL 0x3U
#define V_TX_LINKD_DCCSTEP_CTL(x) ((x) << S_TX_LINKD_DCCSTEP_CTL)
@@ -54964,74 +67126,22 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x35e0
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_5 0x35ec
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_4 0x35f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_3 0x35f4
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_2 0x35f8
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_1 0x35fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_CONFIGURATION_MODE 0x3600
#define A_MAC_PORT_RX_LINKC_RECEIVER_TEST_CONTROL 0x3604
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_CONTROL 0x3608
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_OFFSET_CONTROL 0x360c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_1 0x3610
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_2 0x3614
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3618
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x361c
#define A_MAC_PORT_RX_LINKC_DFE_CONTROL 0x3620
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_1 0x3624
#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_2 0x3628
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_1 0x362c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_2 0x3630
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_3 0x3634
#define A_MAC_PORT_RX_LINKC_RECEIVER_DQCC_CONTROL_1 0x3638
#define A_MAC_PORT_RX_LINKC_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3638
@@ -55055,12 +67165,6 @@
#define A_MAC_PORT_RX_LINKC_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x366c
#define A_MAC_PORT_RX_LINKC_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3670
#define A_MAC_PORT_RX_LINKC_DYNAMIC_DATA_CENTERING_DDC 0x3674
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_INTERNAL_STATUS 0x3678
#define S_RX_LINKC_ACCCMP_RIS 11
@@ -55090,20 +67194,6 @@
#define A_MAC_PORT_RX_LINKC_INTEGRATOR_DAC_OFFSET 0x36a4
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_CONTROL 0x36a8
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS 0x36ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_ERROR_COUNT 0x36b0
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x36b4
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x36b8
@@ -55154,56 +67244,15 @@
#define A_MAC_PORT_RX_LINKD_RECEIVER_TEST_CONTROL 0x3704
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_CONTROL 0x3708
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_OFFSET_CONTROL 0x370c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_1 0x3710
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_2 0x3714
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3718
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x371c
#define A_MAC_PORT_RX_LINKD_DFE_CONTROL 0x3720
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_1 0x3724
#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_2 0x3728
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_1 0x372c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_2 0x3730
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_3 0x3734
#define A_MAC_PORT_RX_LINKD_RECEIVER_DQCC_CONTROL_1 0x3738
#define A_MAC_PORT_RX_LINKD_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3738
@@ -55227,12 +67276,6 @@
#define A_MAC_PORT_RX_LINKD_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x376c
#define A_MAC_PORT_RX_LINKD_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3770
#define A_MAC_PORT_RX_LINKD_DYNAMIC_DATA_CENTERING_DDC 0x3774
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_INTERNAL_STATUS 0x3778
#define S_RX_LINKD_ACCCMP_RIS 11
@@ -55262,20 +67305,6 @@
#define A_MAC_PORT_RX_LINKD_INTEGRATOR_DAC_OFFSET 0x37a4
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_CONTROL 0x37a8
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS 0x37ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_ERROR_COUNT 0x37b0
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x37b4
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x37b8
@@ -55597,103 +67626,21 @@
#define F_MACROTEST V_MACROTEST(1U)
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_CONFIGURATION_MODE 0x3900
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TEST_CONTROL 0x3904
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_COEFFICIENT_CONTROL 0x3908
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_MODE_CONTROL 0x390c
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3910
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3914
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3918
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x391c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT 0x3920
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT 0x3924
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT 0x3928
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_3_COEFFICIENT 0x392c
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AMPLITUDE 0x3930
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_POLARITY 0x3934
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3938
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x393c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3940
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3940
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3944
@@ -55716,12 +67663,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3974
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3978
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x397c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3980
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3984
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3988
@@ -55734,21 +67675,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AZ_CONTROL 0x399c
#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_CONTROL 0x39a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINK_BCST_DCCSTEP_CTL 6
#define M_TX_LINK_BCST_DCCSTEP_CTL 0x3U
#define V_TX_LINK_BCST_DCCSTEP_CTL(x) ((x) << S_TX_LINK_BCST_DCCSTEP_CTL)
@@ -55766,74 +67692,22 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x39e0
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_5 0x39ec
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_4 0x39f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_3 0x39f4
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_2 0x39f8
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_1 0x39fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_CONFIGURATION_MODE 0x3a00
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_TEST_CONTROL 0x3a04
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_CONTROL 0x3a08
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_OFFSET_CONTROL 0x3a0c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_1 0x3a10
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_2 0x3a14
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3a18
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x3a1c
#define A_MAC_PORT_RX_LINK_BCST_DFE_CONTROL 0x3a20
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_1 0x3a24
#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_2 0x3a28
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_1 0x3a2c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_2 0x3a30
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_3 0x3a34
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DQCC_CONTROL_1 0x3a38
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3a38
@@ -55857,12 +67731,6 @@
#define A_MAC_PORT_RX_LINK_BCST_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x3a6c
#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3a70
#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_DATA_CENTERING_DDC 0x3a74
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_INTERNAL_STATUS 0x3a78
#define S_RX_LINK_BCST_ACCCMP_RIS 11
@@ -55892,20 +67760,6 @@
#define A_MAC_PORT_RX_LINK_BCST_INTEGRATOR_DAC_OFFSET 0x3aa4
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_CONTROL 0x3aa8
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS 0x3aac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_ERROR_COUNT 0x3ab0
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x3ab4
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x3ab8
@@ -56304,17 +68158,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56323,17 +68166,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56349,17 +68181,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56368,17 +68189,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56394,17 +68204,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56413,17 +68212,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56439,17 +68227,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56458,17 +68235,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56639,17 +68405,6 @@
#define G_RX_LINKB_INDEX_DFE_EN(x) (((x) >> S_RX_LINKB_INDEX_DFE_EN) & M_RX_LINKB_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKB_DFE_H1 0x2b04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKB_DFE_H2 0x2b08
#define A_T6_MAC_PORT_RX_LINKB_DFE_H3 0x2b0c
#define A_T6_MAC_PORT_RX_LINKB_DFE_H4 0x2b10
@@ -56668,17 +68423,6 @@
#define G_RX_LINKC_INDEX_DFE_EN(x) (((x) >> S_RX_LINKC_INDEX_DFE_EN) & M_RX_LINKC_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKC_DFE_H1 0x2e04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKC_DFE_H2 0x2e08
#define A_T6_MAC_PORT_RX_LINKC_DFE_H3 0x2e0c
#define A_T6_MAC_PORT_RX_LINKC_DFE_H4 0x2e10
@@ -56697,17 +68441,6 @@
#define G_RX_LINKD_INDEX_DFE_EN(x) (((x) >> S_RX_LINKD_INDEX_DFE_EN) & M_RX_LINKD_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKD_DFE_H1 0x2f04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKD_DFE_H2 0x2f08
#define A_T6_MAC_PORT_RX_LINKD_DFE_H3 0x2f0c
#define A_T6_MAC_PORT_RX_LINKD_DFE_H4 0x2f10
@@ -56726,17 +68459,6 @@
#define G_RX_LINK_BCST_INDEX_DFE_EN(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_EN) & M_RX_LINK_BCST_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H1 0x3204
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H2 0x3208
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H3 0x320c
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H4 0x3210
@@ -57294,69 +69016,21 @@
#define G_BANK(x) (((x) >> S_BANK) & M_BANK)
#define A_MC_LMC_INITSEQ1 0x40148
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD1 0x4014c
#define A_MC_LMC_INITSEQ2 0x40150
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD2 0x40154
#define A_MC_LMC_INITSEQ3 0x40158
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD3 0x4015c
#define A_MC_LMC_INITSEQ4 0x40160
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD4 0x40164
#define A_MC_LMC_INITSEQ5 0x40168
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD5 0x4016c
#define A_MC_LMC_INITSEQ6 0x40170
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD6 0x40174
#define A_MC_LMC_INITSEQ7 0x40178
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD7 0x4017c
#define A_MC_UPCTL_ECCCFG 0x40180
#define A_MC_LMC_INITSEQ8 0x40180
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_UPCTL_ECCTST 0x40184
#define S_ECC_TEST_MASK0 0
@@ -57367,61 +69041,19 @@
#define A_MC_LMC_CMD8 0x40184
#define A_MC_UPCTL_ECCCLR 0x40188
#define A_MC_LMC_INITSEQ9 0x40188
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_UPCTL_ECCLOG 0x4018c
#define A_MC_LMC_CMD9 0x4018c
#define A_MC_LMC_INITSEQ10 0x40190
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD10 0x40194
#define A_MC_LMC_INITSEQ11 0x40198
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD11 0x4019c
#define A_MC_LMC_INITSEQ12 0x401a0
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD12 0x401a4
#define A_MC_LMC_INITSEQ13 0x401a8
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD13 0x401ac
#define A_MC_LMC_INITSEQ14 0x401b0
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD14 0x401b4
#define A_MC_LMC_INITSEQ15 0x401b8
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD15 0x401bc
#define A_MC_UPCTL_DTUWACTL 0x40200
@@ -61990,6 +73622,11 @@
#define V_NUMPIPESTAGES(x) ((x) << S_NUMPIPESTAGES)
#define G_NUMPIPESTAGES(x) (((x) >> S_NUMPIPESTAGES) & M_NUMPIPESTAGES)
+#define S_DRAMREFENABLE 27
+#define M_DRAMREFENABLE 0x3U
+#define V_DRAMREFENABLE(x) ((x) << S_DRAMREFENABLE)
+#define G_DRAMREFENABLE(x) (((x) >> S_DRAMREFENABLE) & M_DRAMREFENABLE)
+
#define A_EDC_H_DBG_MA_CMD_INTF 0x50300
#define S_MCMDADDR 12
@@ -62372,12 +74009,51 @@
#define V_REFCNT(x) ((x) << S_REFCNT)
#define G_REFCNT(x) (((x) >> S_REFCNT) & M_REFCNT)
+#define A_EDC_H_PAR_CAUSE 0x50404
+
+#define S_STG_CMDQ_PARERR_CAUSE 7
+#define V_STG_CMDQ_PARERR_CAUSE(x) ((x) << S_STG_CMDQ_PARERR_CAUSE)
+#define F_STG_CMDQ_PARERR_CAUSE V_STG_CMDQ_PARERR_CAUSE(1U)
+
+#define S_STG_WRDQ_PARERR_CAUSE 6
+#define V_STG_WRDQ_PARERR_CAUSE(x) ((x) << S_STG_WRDQ_PARERR_CAUSE)
+#define F_STG_WRDQ_PARERR_CAUSE V_STG_WRDQ_PARERR_CAUSE(1U)
+
+#define S_INP_CMDQ_PARERR_CAUSE 5
+#define V_INP_CMDQ_PARERR_CAUSE(x) ((x) << S_INP_CMDQ_PARERR_CAUSE)
+#define F_INP_CMDQ_PARERR_CAUSE V_INP_CMDQ_PARERR_CAUSE(1U)
+
+#define S_INP_WRDQ_PARERR_CAUSE 4
+#define V_INP_WRDQ_PARERR_CAUSE(x) ((x) << S_INP_WRDQ_PARERR_CAUSE)
+#define F_INP_WRDQ_PARERR_CAUSE V_INP_WRDQ_PARERR_CAUSE(1U)
+
+#define S_INP_BEQ_PARERR_CAUSE 3
+#define V_INP_BEQ_PARERR_CAUSE(x) ((x) << S_INP_BEQ_PARERR_CAUSE)
+#define F_INP_BEQ_PARERR_CAUSE V_INP_BEQ_PARERR_CAUSE(1U)
+
+#define S_ECC_CE_PAR_ENABLE_CAUSE 2
+#define V_ECC_CE_PAR_ENABLE_CAUSE(x) ((x) << S_ECC_CE_PAR_ENABLE_CAUSE)
+#define F_ECC_CE_PAR_ENABLE_CAUSE V_ECC_CE_PAR_ENABLE_CAUSE(1U)
+
+#define S_ECC_UE_PAR_ENABLE_CAUSE 1
+#define V_ECC_UE_PAR_ENABLE_CAUSE(x) ((x) << S_ECC_UE_PAR_ENABLE_CAUSE)
+#define F_ECC_UE_PAR_ENABLE_CAUSE V_ECC_UE_PAR_ENABLE_CAUSE(1U)
+
+#define S_RDDQ_PARERR_CAUSE 0
+#define V_RDDQ_PARERR_CAUSE(x) ((x) << S_RDDQ_PARERR_CAUSE)
+#define F_RDDQ_PARERR_CAUSE V_RDDQ_PARERR_CAUSE(1U)
+
/* registers for module EDC_T61 */
#define EDC_T61_BASE_ADDR 0x50800
/* registers for module HMA_T6 */
#define HMA_T6_BASE_ADDR 0x51000
+#define S_T7_CLIENT_EN 0
+#define M_T7_CLIENT_EN 0x7fffU
+#define V_T7_CLIENT_EN(x) ((x) << S_T7_CLIENT_EN)
+#define G_T7_CLIENT_EN(x) (((x) >> S_T7_CLIENT_EN) & M_T7_CLIENT_EN)
+
#define S_TPH 12
#define M_TPH 0x3U
#define V_TPH(x) ((x) << S_TPH)
@@ -62398,6 +74074,14 @@
#define V_OP_MODE(x) ((x) << S_OP_MODE)
#define F_OP_MODE V_OP_MODE(1U)
+#define S_GK_ENABLE 30
+#define V_GK_ENABLE(x) ((x) << S_GK_ENABLE)
+#define F_GK_ENABLE V_GK_ENABLE(1U)
+
+#define S_DBGCNTRST 29
+#define V_DBGCNTRST(x) ((x) << S_DBGCNTRST)
+#define F_DBGCNTRST V_DBGCNTRST(1U)
+
#define A_HMA_TLB_ACCESS 0x51028
#define S_INV_ALL 29
@@ -62437,6 +74121,11 @@
#define V_REGION(x) ((x) << S_REGION)
#define G_REGION(x) (((x) >> S_REGION) & M_REGION)
+#define S_T7_VA 8
+#define M_T7_VA 0xffffffU
+#define V_T7_VA(x) ((x) << S_T7_VA)
+#define G_T7_VA(x) (((x) >> S_T7_VA) & M_T7_VA)
+
#define A_HMA_TLB_DESC_0_H 0x51030
#define A_HMA_TLB_DESC_0_L 0x51034
#define A_HMA_TLB_DESC_1_H 0x51038
@@ -62460,6 +74149,11 @@
#define V_ADDR0_MIN(x) ((x) << S_ADDR0_MIN)
#define G_ADDR0_MIN(x) (((x) >> S_ADDR0_MIN) & M_ADDR0_MIN)
+#define S_REG0MINADDR0MIN 8
+#define M_REG0MINADDR0MIN 0xffffffU
+#define V_REG0MINADDR0MIN(x) ((x) << S_REG0MINADDR0MIN)
+#define G_REG0MINADDR0MIN(x) (((x) >> S_REG0MINADDR0MIN) & M_REG0MINADDR0MIN)
+
#define A_HMA_REG0_MAX 0x51074
#define S_ADDR0_MAX 12
@@ -62467,6 +74161,11 @@
#define V_ADDR0_MAX(x) ((x) << S_ADDR0_MAX)
#define G_ADDR0_MAX(x) (((x) >> S_ADDR0_MAX) & M_ADDR0_MAX)
+#define S_REG0MAXADDR0MAX 8
+#define M_REG0MAXADDR0MAX 0xffffffU
+#define V_REG0MAXADDR0MAX(x) ((x) << S_REG0MAXADDR0MAX)
+#define G_REG0MAXADDR0MAX(x) (((x) >> S_REG0MAXADDR0MAX) & M_REG0MAXADDR0MAX)
+
#define A_HMA_REG0_MASK 0x51078
#define S_PAGE_SIZE0 12
@@ -62475,6 +74174,7 @@
#define G_PAGE_SIZE0(x) (((x) >> S_PAGE_SIZE0) & M_PAGE_SIZE0)
#define A_HMA_REG0_BASE 0x5107c
+#define A_HMA_REG0_BASE_LSB 0x5107c
#define A_HMA_REG1_MIN 0x51080
#define S_ADDR1_MIN 12
@@ -62482,6 +74182,11 @@
#define V_ADDR1_MIN(x) ((x) << S_ADDR1_MIN)
#define G_ADDR1_MIN(x) (((x) >> S_ADDR1_MIN) & M_ADDR1_MIN)
+#define S_REG1MINADDR1MIN 8
+#define M_REG1MINADDR1MIN 0xffffffU
+#define V_REG1MINADDR1MIN(x) ((x) << S_REG1MINADDR1MIN)
+#define G_REG1MINADDR1MIN(x) (((x) >> S_REG1MINADDR1MIN) & M_REG1MINADDR1MIN)
+
#define A_HMA_REG1_MAX 0x51084
#define S_ADDR1_MAX 12
@@ -62489,6 +74194,11 @@
#define V_ADDR1_MAX(x) ((x) << S_ADDR1_MAX)
#define G_ADDR1_MAX(x) (((x) >> S_ADDR1_MAX) & M_ADDR1_MAX)
+#define S_REG1MAXADDR1MAX 8
+#define M_REG1MAXADDR1MAX 0xffffffU
+#define V_REG1MAXADDR1MAX(x) ((x) << S_REG1MAXADDR1MAX)
+#define G_REG1MAXADDR1MAX(x) (((x) >> S_REG1MAXADDR1MAX) & M_REG1MAXADDR1MAX)
+
#define A_HMA_REG1_MASK 0x51088
#define S_PAGE_SIZE1 12
@@ -62497,6 +74207,7 @@
#define G_PAGE_SIZE1(x) (((x) >> S_PAGE_SIZE1) & M_PAGE_SIZE1)
#define A_HMA_REG1_BASE 0x5108c
+#define A_HMA_REG1_BASE_LSB 0x5108c
#define A_HMA_REG2_MIN 0x51090
#define S_ADDR2_MIN 12
@@ -62504,6 +74215,11 @@
#define V_ADDR2_MIN(x) ((x) << S_ADDR2_MIN)
#define G_ADDR2_MIN(x) (((x) >> S_ADDR2_MIN) & M_ADDR2_MIN)
+#define S_REG2MINADDR2MIN 8
+#define M_REG2MINADDR2MIN 0xffffffU
+#define V_REG2MINADDR2MIN(x) ((x) << S_REG2MINADDR2MIN)
+#define G_REG2MINADDR2MIN(x) (((x) >> S_REG2MINADDR2MIN) & M_REG2MINADDR2MIN)
+
#define A_HMA_REG2_MAX 0x51094
#define S_ADDR2_MAX 12
@@ -62511,6 +74227,11 @@
#define V_ADDR2_MAX(x) ((x) << S_ADDR2_MAX)
#define G_ADDR2_MAX(x) (((x) >> S_ADDR2_MAX) & M_ADDR2_MAX)
+#define S_REG2MAXADDR2MAX 8
+#define M_REG2MAXADDR2MAX 0xffffffU
+#define V_REG2MAXADDR2MAX(x) ((x) << S_REG2MAXADDR2MAX)
+#define G_REG2MAXADDR2MAX(x) (((x) >> S_REG2MAXADDR2MAX) & M_REG2MAXADDR2MAX)
+
#define A_HMA_REG2_MASK 0x51098
#define S_PAGE_SIZE2 12
@@ -62519,6 +74240,7 @@
#define G_PAGE_SIZE2(x) (((x) >> S_PAGE_SIZE2) & M_PAGE_SIZE2)
#define A_HMA_REG2_BASE 0x5109c
+#define A_HMA_REG2_BASE_LSB 0x5109c
#define A_HMA_REG3_MIN 0x510a0
#define S_ADDR3_MIN 12
@@ -62526,6 +74248,11 @@
#define V_ADDR3_MIN(x) ((x) << S_ADDR3_MIN)
#define G_ADDR3_MIN(x) (((x) >> S_ADDR3_MIN) & M_ADDR3_MIN)
+#define S_REG3MINADDR3MIN 8
+#define M_REG3MINADDR3MIN 0xffffffU
+#define V_REG3MINADDR3MIN(x) ((x) << S_REG3MINADDR3MIN)
+#define G_REG3MINADDR3MIN(x) (((x) >> S_REG3MINADDR3MIN) & M_REG3MINADDR3MIN)
+
#define A_HMA_REG3_MAX 0x510a4
#define S_ADDR3_MAX 12
@@ -62533,6 +74260,11 @@
#define V_ADDR3_MAX(x) ((x) << S_ADDR3_MAX)
#define G_ADDR3_MAX(x) (((x) >> S_ADDR3_MAX) & M_ADDR3_MAX)
+#define S_REG3MAXADDR3MAX 8
+#define M_REG3MAXADDR3MAX 0xffffffU
+#define V_REG3MAXADDR3MAX(x) ((x) << S_REG3MAXADDR3MAX)
+#define G_REG3MAXADDR3MAX(x) (((x) >> S_REG3MAXADDR3MAX) & M_REG3MAXADDR3MAX)
+
#define A_HMA_REG3_MASK 0x510a8
#define S_PAGE_SIZE3 12
@@ -62541,6 +74273,7 @@
#define G_PAGE_SIZE3(x) (((x) >> S_PAGE_SIZE3) & M_PAGE_SIZE3)
#define A_HMA_REG3_BASE 0x510ac
+#define A_HMA_REG3_BASE_LSB 0x510ac
#define A_HMA_SW_SYNC 0x510b0
#define S_ENTER_SYNC 31
@@ -62551,6 +74284,84 @@
#define V_EXIT_SYNC(x) ((x) << S_EXIT_SYNC)
#define F_EXIT_SYNC V_EXIT_SYNC(1U)
+#define A_HMA_GC_MODE_SEL 0x510b4
+
+#define S_MODE_SEL 8
+#define M_MODE_SEL 0x3U
+#define V_MODE_SEL(x) ((x) << S_MODE_SEL)
+#define G_MODE_SEL(x) (((x) >> S_MODE_SEL) & M_MODE_SEL)
+
+#define S_FLUSH_REQ 4
+#define V_FLUSH_REQ(x) ((x) << S_FLUSH_REQ)
+#define F_FLUSH_REQ V_FLUSH_REQ(1U)
+
+#define S_CLEAR_REQ 0
+#define V_CLEAR_REQ(x) ((x) << S_CLEAR_REQ)
+#define F_CLEAR_REQ V_CLEAR_REQ(1U)
+
+#define A_HMA_REG0_BASE_MSB 0x510b8
+
+#define S_BASE0_MSB 0
+#define M_BASE0_MSB 0xfU
+#define V_BASE0_MSB(x) ((x) << S_BASE0_MSB)
+#define G_BASE0_MSB(x) (((x) >> S_BASE0_MSB) & M_BASE0_MSB)
+
+#define A_HMA_REG1_BASE_MSB 0x510bc
+
+#define S_BASE1_MSB 0
+#define M_BASE1_MSB 0xfU
+#define V_BASE1_MSB(x) ((x) << S_BASE1_MSB)
+#define G_BASE1_MSB(x) (((x) >> S_BASE1_MSB) & M_BASE1_MSB)
+
+#define A_HMA_REG2_BASE_MSB 0x510c0
+
+#define S_BASE2_MSB 0
+#define M_BASE2_MSB 0xfU
+#define V_BASE2_MSB(x) ((x) << S_BASE2_MSB)
+#define G_BASE2_MSB(x) (((x) >> S_BASE2_MSB) & M_BASE2_MSB)
+
+#define A_HMA_REG3_BASE_MSB 0x510c4
+
+#define S_BASE3_MSB 0
+#define M_BASE3_MSB 0xfU
+#define V_BASE3_MSB(x) ((x) << S_BASE3_MSB)
+#define G_BASE3_MSB(x) (((x) >> S_BASE3_MSB) & M_BASE3_MSB)
+
+#define A_HMA_DBG_CTL 0x51104
+#define A_HMA_DBG_DATA 0x51108
+#define A_HMA_H_BIST_CMD 0x51200
+#define A_HMA_H_BIST_CMD_ADDR 0x51204
+#define A_HMA_H_BIST_CMD_LEN 0x51208
+#define A_HMA_H_BIST_DATA_PATTERN 0x5120c
+#define A_HMA_H_BIST_USER_WDATA0 0x51210
+#define A_HMA_H_BIST_USER_WDATA1 0x51214
+#define A_HMA_H_BIST_USER_WDATA2 0x51218
+#define A_HMA_H_BIST_NUM_ERR 0x5121c
+#define A_HMA_H_BIST_ERR_FIRST_ADDR 0x51220
+#define A_HMA_H_BIST_STATUS_RDATA 0x51224
+#define A_HMA_H_BIST_CRC_SEED 0x5126c
+#define A_HMA_TABLE_LINE1_MSB 0x51270
+
+#define S_STARTA 0
+#define M_STARTA 0xfU
+#define V_STARTA(x) ((x) << S_STARTA)
+#define G_STARTA(x) (((x) >> S_STARTA) & M_STARTA)
+
+#define A_HMA_TABLE_LINE2_MSB 0x51274
+
+#define S_ENDA 0
+#define M_ENDA 0xfU
+#define V_ENDA(x) ((x) << S_ENDA)
+#define G_ENDA(x) (((x) >> S_ENDA) & M_ENDA)
+
+#define S_GK_UF_PAR_ENABLE 6
+#define V_GK_UF_PAR_ENABLE(x) ((x) << S_GK_UF_PAR_ENABLE)
+#define F_GK_UF_PAR_ENABLE V_GK_UF_PAR_ENABLE(1U)
+
+#define S_PCIEMST_PAR_ENABLE 2
+#define V_PCIEMST_PAR_ENABLE(x) ((x) << S_PCIEMST_PAR_ENABLE)
+#define F_PCIEMST_PAR_ENABLE V_PCIEMST_PAR_ENABLE(1U)
+
#define S_IDTF_INT_ENABLE 5
#define V_IDTF_INT_ENABLE(x) ((x) << S_IDTF_INT_ENABLE)
#define F_IDTF_INT_ENABLE V_IDTF_INT_ENABLE(1U)
@@ -62571,6 +74382,10 @@
#define V_MAMST_INT_ENABLE(x) ((x) << S_MAMST_INT_ENABLE)
#define F_MAMST_INT_ENABLE V_MAMST_INT_ENABLE(1U)
+#define S_GK_UF_INT_ENABLE 6
+#define V_GK_UF_INT_ENABLE(x) ((x) << S_GK_UF_INT_ENABLE)
+#define F_GK_UF_INT_ENABLE V_GK_UF_INT_ENABLE(1U)
+
#define S_IDTF_INT_CAUSE 5
#define V_IDTF_INT_CAUSE(x) ((x) << S_IDTF_INT_CAUSE)
#define F_IDTF_INT_CAUSE V_IDTF_INT_CAUSE(1U)
@@ -62591,6 +74406,10 @@
#define V_MAMST_INT_CAUSE(x) ((x) << S_MAMST_INT_CAUSE)
#define F_MAMST_INT_CAUSE V_MAMST_INT_CAUSE(1U)
+#define S_GK_UF_INT_CAUSE 6
+#define V_GK_UF_INT_CAUSE(x) ((x) << S_GK_UF_INT_CAUSE)
+#define F_GK_UF_INT_CAUSE V_GK_UF_INT_CAUSE(1U)
+
#define A_HMA_MA_MST_ERR 0x5130c
#define A_HMA_RTF_ERR 0x51310
#define A_HMA_OTF_ERR 0x51314
@@ -62904,3 +74723,12365 @@
#define M_RD_EOP_CNT 0xffU
#define V_RD_EOP_CNT(x) ((x) << S_RD_EOP_CNT)
#define G_RD_EOP_CNT(x) (((x) >> S_RD_EOP_CNT) & M_RD_EOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT 16
+#define M_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT 8
+#define M_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT 0
+#define M_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT)
+
+/* registers for module MAC_T7 */
+#define MAC_T7_BASE_ADDR 0x38000
+
+#define S_T7_PORT_MAP 21
+#define M_T7_PORT_MAP 0x7U
+#define V_T7_PORT_MAP(x) ((x) << S_T7_PORT_MAP)
+#define G_T7_PORT_MAP(x) (((x) >> S_T7_PORT_MAP) & M_T7_PORT_MAP)
+
+#define S_T7_SMUX_RX_LOOP 17
+#define M_T7_SMUX_RX_LOOP 0xfU
+#define V_T7_SMUX_RX_LOOP(x) ((x) << S_T7_SMUX_RX_LOOP)
+#define G_T7_SMUX_RX_LOOP(x) (((x) >> S_T7_SMUX_RX_LOOP) & M_T7_SMUX_RX_LOOP)
+
+#define S_T7_SIGNAL_DET 15
+#define V_T7_SIGNAL_DET(x) ((x) << S_T7_SIGNAL_DET)
+#define F_T7_SIGNAL_DET V_T7_SIGNAL_DET(1U)
+
+#define S_CFG_MAC_2_MPS_FULL 13
+#define V_CFG_MAC_2_MPS_FULL(x) ((x) << S_CFG_MAC_2_MPS_FULL)
+#define F_CFG_MAC_2_MPS_FULL V_CFG_MAC_2_MPS_FULL(1U)
+
+#define S_MPS_FULL_SEL 12
+#define V_MPS_FULL_SEL(x) ((x) << S_MPS_FULL_SEL)
+#define F_MPS_FULL_SEL V_MPS_FULL_SEL(1U)
+
+#define S_T7_SMUXTXSEL 8
+#define M_T7_SMUXTXSEL 0xfU
+#define V_T7_SMUXTXSEL(x) ((x) << S_T7_SMUXTXSEL)
+#define G_T7_SMUXTXSEL(x) (((x) >> S_T7_SMUXTXSEL) & M_T7_SMUXTXSEL)
+
+#define S_T7_PORTSPEED 4
+#define M_T7_PORTSPEED 0xfU
+#define V_T7_PORTSPEED(x) ((x) << S_T7_PORTSPEED)
+#define G_T7_PORTSPEED(x) (((x) >> S_T7_PORTSPEED) & M_T7_PORTSPEED)
+
+#define S_MTIP_REG_RESET 25
+#define V_MTIP_REG_RESET(x) ((x) << S_MTIP_REG_RESET)
+#define F_MTIP_REG_RESET V_MTIP_REG_RESET(1U)
+
+#define S_RESET_REG_CLK_I 24
+#define V_RESET_REG_CLK_I(x) ((x) << S_RESET_REG_CLK_I)
+#define F_RESET_REG_CLK_I V_RESET_REG_CLK_I(1U)
+
+#define S_T7_LED1_CFG1 15
+#define M_T7_LED1_CFG1 0x7U
+#define V_T7_LED1_CFG1(x) ((x) << S_T7_LED1_CFG1)
+#define G_T7_LED1_CFG1(x) (((x) >> S_T7_LED1_CFG1) & M_T7_LED1_CFG1)
+
+#define S_T7_LED0_CFG1 12
+#define M_T7_LED0_CFG1 0x7U
+#define V_T7_LED0_CFG1(x) ((x) << S_T7_LED0_CFG1)
+#define G_T7_LED0_CFG1(x) (((x) >> S_T7_LED0_CFG1) & M_T7_LED0_CFG1)
+
+#define A_T7_MAC_PORT_MAGIC_MACID_LO 0x820
+#define A_T7_MAC_PORT_MAGIC_MACID_HI 0x824
+#define A_T7_MAC_PORT_LINK_STATUS 0x828
+
+#define S_EGR_SE_CNT_ERR 9
+#define V_EGR_SE_CNT_ERR(x) ((x) << S_EGR_SE_CNT_ERR)
+#define F_EGR_SE_CNT_ERR V_EGR_SE_CNT_ERR(1U)
+
+#define S_INGR_SE_CNT_ERR 8
+#define V_INGR_SE_CNT_ERR(x) ((x) << S_INGR_SE_CNT_ERR)
+#define F_INGR_SE_CNT_ERR V_INGR_SE_CNT_ERR(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_EN_100G 0x82c
+
+#define S_PERR_PCSR_FDM_3 21
+#define V_PERR_PCSR_FDM_3(x) ((x) << S_PERR_PCSR_FDM_3)
+#define F_PERR_PCSR_FDM_3 V_PERR_PCSR_FDM_3(1U)
+
+#define S_PERR_PCSR_FDM_2 20
+#define V_PERR_PCSR_FDM_2(x) ((x) << S_PERR_PCSR_FDM_2)
+#define F_PERR_PCSR_FDM_2 V_PERR_PCSR_FDM_2(1U)
+
+#define S_PERR_PCSR_FDM_1 19
+#define V_PERR_PCSR_FDM_1(x) ((x) << S_PERR_PCSR_FDM_1)
+#define F_PERR_PCSR_FDM_1 V_PERR_PCSR_FDM_1(1U)
+
+#define S_PERR_PCSR_FDM_0 18
+#define V_PERR_PCSR_FDM_0(x) ((x) << S_PERR_PCSR_FDM_0)
+#define F_PERR_PCSR_FDM_0 V_PERR_PCSR_FDM_0(1U)
+
+#define S_PERR_PCSR_FM_3 17
+#define V_PERR_PCSR_FM_3(x) ((x) << S_PERR_PCSR_FM_3)
+#define F_PERR_PCSR_FM_3 V_PERR_PCSR_FM_3(1U)
+
+#define S_PERR_PCSR_FM_2 16
+#define V_PERR_PCSR_FM_2(x) ((x) << S_PERR_PCSR_FM_2)
+#define F_PERR_PCSR_FM_2 V_PERR_PCSR_FM_2(1U)
+
+#define S_PERR_PCSR_FM_1 15
+#define V_PERR_PCSR_FM_1(x) ((x) << S_PERR_PCSR_FM_1)
+#define F_PERR_PCSR_FM_1 V_PERR_PCSR_FM_1(1U)
+
+#define S_PERR_PCSR_FM_0 14
+#define V_PERR_PCSR_FM_0(x) ((x) << S_PERR_PCSR_FM_0)
+#define F_PERR_PCSR_FM_0 V_PERR_PCSR_FM_0(1U)
+
+#define S_PERR_PCSR_DM_1 13
+#define V_PERR_PCSR_DM_1(x) ((x) << S_PERR_PCSR_DM_1)
+#define F_PERR_PCSR_DM_1 V_PERR_PCSR_DM_1(1U)
+
+#define S_PERR_PCSR_DM_0 12
+#define V_PERR_PCSR_DM_0(x) ((x) << S_PERR_PCSR_DM_0)
+#define F_PERR_PCSR_DM_0 V_PERR_PCSR_DM_0(1U)
+
+#define S_PERR_PCSR_DK_3 11
+#define V_PERR_PCSR_DK_3(x) ((x) << S_PERR_PCSR_DK_3)
+#define F_PERR_PCSR_DK_3 V_PERR_PCSR_DK_3(1U)
+
+#define S_PERR_PCSR_DK_2 10
+#define V_PERR_PCSR_DK_2(x) ((x) << S_PERR_PCSR_DK_2)
+#define F_PERR_PCSR_DK_2 V_PERR_PCSR_DK_2(1U)
+
+#define S_PERR_PCSR_DK_1 9
+#define V_PERR_PCSR_DK_1(x) ((x) << S_PERR_PCSR_DK_1)
+#define F_PERR_PCSR_DK_1 V_PERR_PCSR_DK_1(1U)
+
+#define S_PERR_PCSR_DK_0 8
+#define V_PERR_PCSR_DK_0(x) ((x) << S_PERR_PCSR_DK_0)
+#define F_PERR_PCSR_DK_0 V_PERR_PCSR_DK_0(1U)
+
+#define S_PERR_F91RO_1 7
+#define V_PERR_F91RO_1(x) ((x) << S_PERR_F91RO_1)
+#define F_PERR_F91RO_1 V_PERR_F91RO_1(1U)
+
+#define S_PERR_F91RO_0 6
+#define V_PERR_F91RO_0(x) ((x) << S_PERR_F91RO_0)
+#define F_PERR_F91RO_0 V_PERR_F91RO_0(1U)
+
+#define S_PERR_PCSR_F91DM 5
+#define V_PERR_PCSR_F91DM(x) ((x) << S_PERR_PCSR_F91DM)
+#define F_PERR_PCSR_F91DM V_PERR_PCSR_F91DM(1U)
+
+#define S_PERR_PCSR_F91TI 4
+#define V_PERR_PCSR_F91TI(x) ((x) << S_PERR_PCSR_F91TI)
+#define F_PERR_PCSR_F91TI V_PERR_PCSR_F91TI(1U)
+
+#define S_PERR_PCSR_F91TO 3
+#define V_PERR_PCSR_F91TO(x) ((x) << S_PERR_PCSR_F91TO)
+#define F_PERR_PCSR_F91TO V_PERR_PCSR_F91TO(1U)
+
+#define S_PERR_PCSR_F91M 2
+#define V_PERR_PCSR_F91M(x) ((x) << S_PERR_PCSR_F91M)
+#define F_PERR_PCSR_F91M V_PERR_PCSR_F91M(1U)
+
+#define S_PERR_PCSR_80_16_1 1
+#define V_PERR_PCSR_80_16_1(x) ((x) << S_PERR_PCSR_80_16_1)
+#define F_PERR_PCSR_80_16_1 V_PERR_PCSR_80_16_1(1U)
+
+#define S_PERR_PCSR_80_16_0 0
+#define V_PERR_PCSR_80_16_0(x) ((x) << S_PERR_PCSR_80_16_0)
+#define F_PERR_PCSR_80_16_0 V_PERR_PCSR_80_16_0(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_CAUSE_100G 0x830
+#define A_T7_MAC_PORT_PERR_ENABLE_100G 0x834
+#define A_MAC_PORT_MAC10G100G_CONFIG_0 0x838
+
+#define S_PEER_DELAY_VAL 31
+#define V_PEER_DELAY_VAL(x) ((x) << S_PEER_DELAY_VAL)
+#define F_PEER_DELAY_VAL V_PEER_DELAY_VAL(1U)
+
+#define S_PEER_DELAY 1
+#define M_PEER_DELAY 0x3fffffffU
+#define V_PEER_DELAY(x) ((x) << S_PEER_DELAY)
+#define G_PEER_DELAY(x) (((x) >> S_PEER_DELAY) & M_PEER_DELAY)
+
+#define S_MODE1S_ENA 0
+#define V_MODE1S_ENA(x) ((x) << S_MODE1S_ENA)
+#define F_MODE1S_ENA V_MODE1S_ENA(1U)
+
+#define A_MAC_PORT_MAC10G100G_CONFIG_1 0x83c
+
+#define S_TX_STOP 25
+#define V_TX_STOP(x) ((x) << S_TX_STOP)
+#define F_TX_STOP V_TX_STOP(1U)
+
+#define S_T7_MODE1S_ENA 24
+#define V_T7_MODE1S_ENA(x) ((x) << S_T7_MODE1S_ENA)
+#define F_T7_MODE1S_ENA V_T7_MODE1S_ENA(1U)
+
+#define S_TX_TS_ID 12
+#define M_TX_TS_ID 0xfffU
+#define V_TX_TS_ID(x) ((x) << S_TX_TS_ID)
+#define G_TX_TS_ID(x) (((x) >> S_TX_TS_ID) & M_TX_TS_ID)
+
+#define S_T7_TX_LI_FAULT 11
+#define V_T7_TX_LI_FAULT(x) ((x) << S_T7_TX_LI_FAULT)
+#define F_T7_TX_LI_FAULT V_T7_TX_LI_FAULT(1U)
+
+#define S_XOFF_GEN 3
+#define M_XOFF_GEN 0xffU
+#define V_XOFF_GEN(x) ((x) << S_XOFF_GEN)
+#define G_XOFF_GEN(x) (((x) >> S_XOFF_GEN) & M_XOFF_GEN)
+
+#define S_TX_REM_FAULT 1
+#define V_TX_REM_FAULT(x) ((x) << S_TX_REM_FAULT)
+#define F_TX_REM_FAULT V_TX_REM_FAULT(1U)
+
+#define S_TX_LOC_FAULT 0
+#define V_TX_LOC_FAULT(x) ((x) << S_TX_LOC_FAULT)
+#define F_TX_LOC_FAULT V_TX_LOC_FAULT(1U)
+
+#define A_MAC_PORT_MAC10G100G_CONFIG_2 0x840
+
+#define S_FF_TX_RX_TS_NS 0
+#define M_FF_TX_RX_TS_NS 0x3fffffffU
+#define V_FF_TX_RX_TS_NS(x) ((x) << S_FF_TX_RX_TS_NS)
+#define G_FF_TX_RX_TS_NS(x) (((x) >> S_FF_TX_RX_TS_NS) & M_FF_TX_RX_TS_NS)
+
+#define A_MAC_PORT_MAC10G100G_STATUS 0x844
+
+#define S_REG_LOWP 21
+#define V_REG_LOWP(x) ((x) << S_REG_LOWP)
+#define F_REG_LOWP V_REG_LOWP(1U)
+
+#define S_LI_FAULT 20
+#define V_LI_FAULT(x) ((x) << S_LI_FAULT)
+#define F_LI_FAULT V_LI_FAULT(1U)
+
+#define S_TX_ISIDLE 19
+#define V_TX_ISIDLE(x) ((x) << S_TX_ISIDLE)
+#define F_TX_ISIDLE V_TX_ISIDLE(1U)
+
+#define S_TX_UNDERFLOW 18
+#define V_TX_UNDERFLOW(x) ((x) << S_TX_UNDERFLOW)
+#define F_TX_UNDERFLOW V_TX_UNDERFLOW(1U)
+
+#define S_T7_TX_EMPTY 17
+#define V_T7_TX_EMPTY(x) ((x) << S_T7_TX_EMPTY)
+#define F_T7_TX_EMPTY V_T7_TX_EMPTY(1U)
+
+#define S_T7_1_REM_FAULT 16
+#define V_T7_1_REM_FAULT(x) ((x) << S_T7_1_REM_FAULT)
+#define F_T7_1_REM_FAULT V_T7_1_REM_FAULT(1U)
+
+#define S_REG_TS_AVAIL 15
+#define V_REG_TS_AVAIL(x) ((x) << S_REG_TS_AVAIL)
+#define F_REG_TS_AVAIL V_REG_TS_AVAIL(1U)
+
+#define S_T7_PHY_TXENA 14
+#define V_T7_PHY_TXENA(x) ((x) << S_T7_PHY_TXENA)
+#define F_T7_PHY_TXENA V_T7_PHY_TXENA(1U)
+
+#define S_T7_PFC_MODE 13
+#define V_T7_PFC_MODE(x) ((x) << S_T7_PFC_MODE)
+#define F_T7_PFC_MODE V_T7_PFC_MODE(1U)
+
+#define S_PAUSE_ON 5
+#define M_PAUSE_ON 0xffU
+#define V_PAUSE_ON(x) ((x) << S_PAUSE_ON)
+#define G_PAUSE_ON(x) (((x) >> S_PAUSE_ON) & M_PAUSE_ON)
+
+#define S_MAC_PAUSE_EN 4
+#define V_MAC_PAUSE_EN(x) ((x) << S_MAC_PAUSE_EN)
+#define F_MAC_PAUSE_EN V_MAC_PAUSE_EN(1U)
+
+#define S_MAC_ENABLE 3
+#define V_MAC_ENABLE(x) ((x) << S_MAC_ENABLE)
+#define F_MAC_ENABLE V_MAC_ENABLE(1U)
+
+#define S_LOOP_ENA 2
+#define V_LOOP_ENA(x) ((x) << S_LOOP_ENA)
+#define F_LOOP_ENA V_LOOP_ENA(1U)
+
+#define S_LOC_FAULT 1
+#define V_LOC_FAULT(x) ((x) << S_LOC_FAULT)
+#define F_LOC_FAULT V_LOC_FAULT(1U)
+
+#define S_FF_RX_EMPTY 0
+#define V_FF_RX_EMPTY(x) ((x) << S_FF_RX_EMPTY)
+#define F_FF_RX_EMPTY V_FF_RX_EMPTY(1U)
+
+#define A_MAC_PORT_MAC_AN_STATE_STATUS0 0x848
+
+#define S_AN_VAL_AN 15
+#define V_AN_VAL_AN(x) ((x) << S_AN_VAL_AN)
+#define F_AN_VAL_AN V_AN_VAL_AN(1U)
+
+#define S_AN_TR_DIS_STATUS_AN 14
+#define V_AN_TR_DIS_STATUS_AN(x) ((x) << S_AN_TR_DIS_STATUS_AN)
+#define F_AN_TR_DIS_STATUS_AN V_AN_TR_DIS_STATUS_AN(1U)
+
+#define S_AN_STATUS_AN 13
+#define V_AN_STATUS_AN(x) ((x) << S_AN_STATUS_AN)
+#define F_AN_STATUS_AN V_AN_STATUS_AN(1U)
+
+#define S_AN_SELECT_AN 8
+#define M_AN_SELECT_AN 0x1fU
+#define V_AN_SELECT_AN(x) ((x) << S_AN_SELECT_AN)
+#define G_AN_SELECT_AN(x) (((x) >> S_AN_SELECT_AN) & M_AN_SELECT_AN)
+
+#define S_AN_RS_FEC_ENA_AN 7
+#define V_AN_RS_FEC_ENA_AN(x) ((x) << S_AN_RS_FEC_ENA_AN)
+#define F_AN_RS_FEC_ENA_AN V_AN_RS_FEC_ENA_AN(1U)
+
+#define S_AN_INT_AN 6
+#define V_AN_INT_AN(x) ((x) << S_AN_INT_AN)
+#define F_AN_INT_AN V_AN_INT_AN(1U)
+
+#define S_AN_FEC_ENA_AN 5
+#define V_AN_FEC_ENA_AN(x) ((x) << S_AN_FEC_ENA_AN)
+#define F_AN_FEC_ENA_AN V_AN_FEC_ENA_AN(1U)
+
+#define S_AN_DONE_AN 4
+#define V_AN_DONE_AN(x) ((x) << S_AN_DONE_AN)
+#define F_AN_DONE_AN V_AN_DONE_AN(1U)
+
+#define S_AN_STATE 0
+#define M_AN_STATE 0xfU
+#define V_AN_STATE(x) ((x) << S_AN_STATE)
+#define G_AN_STATE(x) (((x) >> S_AN_STATE) & M_AN_STATE)
+
+#define A_MAC_PORT_MAC_AN_STATE_STATUS1 0x84c
+#define A_T7_MAC_PORT_EPIO_DATA0 0x850
+#define A_T7_MAC_PORT_EPIO_DATA1 0x854
+#define A_T7_MAC_PORT_EPIO_DATA2 0x858
+#define A_T7_MAC_PORT_EPIO_DATA3 0x85c
+#define A_T7_MAC_PORT_EPIO_OP 0x860
+#define A_T7_MAC_PORT_WOL_STATUS 0x864
+#define A_T7_MAC_PORT_INT_EN 0x868
+
+#define S_MAC2MPS_PERR 31
+#define V_MAC2MPS_PERR(x) ((x) << S_MAC2MPS_PERR)
+#define F_MAC2MPS_PERR V_MAC2MPS_PERR(1U)
+
+#define S_MAC_PPS_INT_EN 30
+#define V_MAC_PPS_INT_EN(x) ((x) << S_MAC_PPS_INT_EN)
+#define F_MAC_PPS_INT_EN V_MAC_PPS_INT_EN(1U)
+
+#define S_MAC_TX_TS_AVAIL_INT_EN 29
+#define V_MAC_TX_TS_AVAIL_INT_EN(x) ((x) << S_MAC_TX_TS_AVAIL_INT_EN)
+#define F_MAC_TX_TS_AVAIL_INT_EN V_MAC_TX_TS_AVAIL_INT_EN(1U)
+
+#define S_MAC_SINGLE_ALARM_INT_EN 28
+#define V_MAC_SINGLE_ALARM_INT_EN(x) ((x) << S_MAC_SINGLE_ALARM_INT_EN)
+#define F_MAC_SINGLE_ALARM_INT_EN V_MAC_SINGLE_ALARM_INT_EN(1U)
+
+#define S_MAC_PERIODIC_ALARM_INT_EN 27
+#define V_MAC_PERIODIC_ALARM_INT_EN(x) ((x) << S_MAC_PERIODIC_ALARM_INT_EN)
+#define F_MAC_PERIODIC_ALARM_INT_EN V_MAC_PERIODIC_ALARM_INT_EN(1U)
+
+#define S_MAC_PATDETWAKE_INT_EN 26
+#define V_MAC_PATDETWAKE_INT_EN(x) ((x) << S_MAC_PATDETWAKE_INT_EN)
+#define F_MAC_PATDETWAKE_INT_EN V_MAC_PATDETWAKE_INT_EN(1U)
+
+#define S_MAC_MAGIC_WAKE_INT_EN 25
+#define V_MAC_MAGIC_WAKE_INT_EN(x) ((x) << S_MAC_MAGIC_WAKE_INT_EN)
+#define F_MAC_MAGIC_WAKE_INT_EN V_MAC_MAGIC_WAKE_INT_EN(1U)
+
+#define S_MAC_SIGDETCHG_INT_EN 24
+#define V_MAC_SIGDETCHG_INT_EN(x) ((x) << S_MAC_SIGDETCHG_INT_EN)
+#define F_MAC_SIGDETCHG_INT_EN V_MAC_SIGDETCHG_INT_EN(1U)
+
+#define S_MAC_PCS_LINK_GOOD_EN 12
+#define V_MAC_PCS_LINK_GOOD_EN(x) ((x) << S_MAC_PCS_LINK_GOOD_EN)
+#define F_MAC_PCS_LINK_GOOD_EN V_MAC_PCS_LINK_GOOD_EN(1U)
+
+#define S_MAC_PCS_LINK_FAIL_EN 11
+#define V_MAC_PCS_LINK_FAIL_EN(x) ((x) << S_MAC_PCS_LINK_FAIL_EN)
+#define F_MAC_PCS_LINK_FAIL_EN V_MAC_PCS_LINK_FAIL_EN(1U)
+
+#define S_MAC_OVRFLOW_INT_EN 10
+#define V_MAC_OVRFLOW_INT_EN(x) ((x) << S_MAC_OVRFLOW_INT_EN)
+#define F_MAC_OVRFLOW_INT_EN V_MAC_OVRFLOW_INT_EN(1U)
+
+#define S_MAC_REM_FAULT_INT_EN 7
+#define V_MAC_REM_FAULT_INT_EN(x) ((x) << S_MAC_REM_FAULT_INT_EN)
+#define F_MAC_REM_FAULT_INT_EN V_MAC_REM_FAULT_INT_EN(1U)
+
+#define S_MAC_LOC_FAULT_INT_EN 6
+#define V_MAC_LOC_FAULT_INT_EN(x) ((x) << S_MAC_LOC_FAULT_INT_EN)
+#define F_MAC_LOC_FAULT_INT_EN V_MAC_LOC_FAULT_INT_EN(1U)
+
+#define S_MAC_LINK_DOWN_INT_EN 5
+#define V_MAC_LINK_DOWN_INT_EN(x) ((x) << S_MAC_LINK_DOWN_INT_EN)
+#define F_MAC_LINK_DOWN_INT_EN V_MAC_LINK_DOWN_INT_EN(1U)
+
+#define S_MAC_LINK_UP_INT_EN 4
+#define V_MAC_LINK_UP_INT_EN(x) ((x) << S_MAC_LINK_UP_INT_EN)
+#define F_MAC_LINK_UP_INT_EN V_MAC_LINK_UP_INT_EN(1U)
+
+#define S_MAC_AN_DONE_INT_EN 3
+#define V_MAC_AN_DONE_INT_EN(x) ((x) << S_MAC_AN_DONE_INT_EN)
+#define F_MAC_AN_DONE_INT_EN V_MAC_AN_DONE_INT_EN(1U)
+
+#define S_MAC_AN_PGRD_INT_EN 2
+#define V_MAC_AN_PGRD_INT_EN(x) ((x) << S_MAC_AN_PGRD_INT_EN)
+#define F_MAC_AN_PGRD_INT_EN V_MAC_AN_PGRD_INT_EN(1U)
+
+#define S_MAC_TXFIFO_ERR_INT_EN 1
+#define V_MAC_TXFIFO_ERR_INT_EN(x) ((x) << S_MAC_TXFIFO_ERR_INT_EN)
+#define F_MAC_TXFIFO_ERR_INT_EN V_MAC_TXFIFO_ERR_INT_EN(1U)
+
+#define S_MAC_RXFIFO_ERR_INT_EN 0
+#define V_MAC_RXFIFO_ERR_INT_EN(x) ((x) << S_MAC_RXFIFO_ERR_INT_EN)
+#define F_MAC_RXFIFO_ERR_INT_EN V_MAC_RXFIFO_ERR_INT_EN(1U)
+
+#define A_T7_MAC_PORT_INT_CAUSE 0x86c
+
+#define S_MAC2MPS_PERR_CAUSE 31
+#define V_MAC2MPS_PERR_CAUSE(x) ((x) << S_MAC2MPS_PERR_CAUSE)
+#define F_MAC2MPS_PERR_CAUSE V_MAC2MPS_PERR_CAUSE(1U)
+
+#define S_MAC_PPS_INT_CAUSE 30
+#define V_MAC_PPS_INT_CAUSE(x) ((x) << S_MAC_PPS_INT_CAUSE)
+#define F_MAC_PPS_INT_CAUSE V_MAC_PPS_INT_CAUSE(1U)
+
+#define S_MAC_TX_TS_AVAIL_INT_CAUSE 29
+#define V_MAC_TX_TS_AVAIL_INT_CAUSE(x) ((x) << S_MAC_TX_TS_AVAIL_INT_CAUSE)
+#define F_MAC_TX_TS_AVAIL_INT_CAUSE V_MAC_TX_TS_AVAIL_INT_CAUSE(1U)
+
+#define S_MAC_SINGLE_ALARM_INT_CAUSE 28
+#define V_MAC_SINGLE_ALARM_INT_CAUSE(x) ((x) << S_MAC_SINGLE_ALARM_INT_CAUSE)
+#define F_MAC_SINGLE_ALARM_INT_CAUSE V_MAC_SINGLE_ALARM_INT_CAUSE(1U)
+
+#define S_MAC_PERIODIC_ALARM_INT_CAUSE 27
+#define V_MAC_PERIODIC_ALARM_INT_CAUSE(x) ((x) << S_MAC_PERIODIC_ALARM_INT_CAUSE)
+#define F_MAC_PERIODIC_ALARM_INT_CAUSE V_MAC_PERIODIC_ALARM_INT_CAUSE(1U)
+
+#define S_MAC_PATDETWAKE_INT_CAUSE 26
+#define V_MAC_PATDETWAKE_INT_CAUSE(x) ((x) << S_MAC_PATDETWAKE_INT_CAUSE)
+#define F_MAC_PATDETWAKE_INT_CAUSE V_MAC_PATDETWAKE_INT_CAUSE(1U)
+
+#define S_MAC_MAGIC_WAKE_INT_CAUSE 25
+#define V_MAC_MAGIC_WAKE_INT_CAUSE(x) ((x) << S_MAC_MAGIC_WAKE_INT_CAUSE)
+#define F_MAC_MAGIC_WAKE_INT_CAUSE V_MAC_MAGIC_WAKE_INT_CAUSE(1U)
+
+#define S_MAC_SIGDETCHG_INT_CAUSE 24
+#define V_MAC_SIGDETCHG_INT_CAUSE(x) ((x) << S_MAC_SIGDETCHG_INT_CAUSE)
+#define F_MAC_SIGDETCHG_INT_CAUSE V_MAC_SIGDETCHG_INT_CAUSE(1U)
+
+#define S_MAC_PCS_LINK_GOOD_CAUSE 12
+#define V_MAC_PCS_LINK_GOOD_CAUSE(x) ((x) << S_MAC_PCS_LINK_GOOD_CAUSE)
+#define F_MAC_PCS_LINK_GOOD_CAUSE V_MAC_PCS_LINK_GOOD_CAUSE(1U)
+
+#define S_MAC_PCS_LINK_FAIL_CAUSE 11
+#define V_MAC_PCS_LINK_FAIL_CAUSE(x) ((x) << S_MAC_PCS_LINK_FAIL_CAUSE)
+#define F_MAC_PCS_LINK_FAIL_CAUSE V_MAC_PCS_LINK_FAIL_CAUSE(1U)
+
+#define S_MAC_OVRFLOW_INT_CAUSE 10
+#define V_MAC_OVRFLOW_INT_CAUSE(x) ((x) << S_MAC_OVRFLOW_INT_CAUSE)
+#define F_MAC_OVRFLOW_INT_CAUSE V_MAC_OVRFLOW_INT_CAUSE(1U)
+
+#define S_MAC_REM_FAULT_INT_CAUSE 7
+#define V_MAC_REM_FAULT_INT_CAUSE(x) ((x) << S_MAC_REM_FAULT_INT_CAUSE)
+#define F_MAC_REM_FAULT_INT_CAUSE V_MAC_REM_FAULT_INT_CAUSE(1U)
+
+#define S_MAC_LOC_FAULT_INT_CAUSE 6
+#define V_MAC_LOC_FAULT_INT_CAUSE(x) ((x) << S_MAC_LOC_FAULT_INT_CAUSE)
+#define F_MAC_LOC_FAULT_INT_CAUSE V_MAC_LOC_FAULT_INT_CAUSE(1U)
+
+#define S_MAC_LINK_DOWN_INT_CAUSE 5
+#define V_MAC_LINK_DOWN_INT_CAUSE(x) ((x) << S_MAC_LINK_DOWN_INT_CAUSE)
+#define F_MAC_LINK_DOWN_INT_CAUSE V_MAC_LINK_DOWN_INT_CAUSE(1U)
+
+#define S_MAC_LINK_UP_INT_CAUSE 4
+#define V_MAC_LINK_UP_INT_CAUSE(x) ((x) << S_MAC_LINK_UP_INT_CAUSE)
+#define F_MAC_LINK_UP_INT_CAUSE V_MAC_LINK_UP_INT_CAUSE(1U)
+
+#define S_MAC_AN_DONE_INT_CAUSE 3
+#define V_MAC_AN_DONE_INT_CAUSE(x) ((x) << S_MAC_AN_DONE_INT_CAUSE)
+#define F_MAC_AN_DONE_INT_CAUSE V_MAC_AN_DONE_INT_CAUSE(1U)
+
+#define S_MAC_AN_PGRD_INT_CAUSE 2
+#define V_MAC_AN_PGRD_INT_CAUSE(x) ((x) << S_MAC_AN_PGRD_INT_CAUSE)
+#define F_MAC_AN_PGRD_INT_CAUSE V_MAC_AN_PGRD_INT_CAUSE(1U)
+
+#define S_MAC_TXFIFO_ERR_INT_CAUSE 1
+#define V_MAC_TXFIFO_ERR_INT_CAUSE(x) ((x) << S_MAC_TXFIFO_ERR_INT_CAUSE)
+#define F_MAC_TXFIFO_ERR_INT_CAUSE V_MAC_TXFIFO_ERR_INT_CAUSE(1U)
+
+#define S_MAC_RXFIFO_ERR_INT_CAUSE 0
+#define V_MAC_RXFIFO_ERR_INT_CAUSE(x) ((x) << S_MAC_RXFIFO_ERR_INT_CAUSE)
+#define F_MAC_RXFIFO_ERR_INT_CAUSE V_MAC_RXFIFO_ERR_INT_CAUSE(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_EN 0x870
+#define A_T7_MAC_PORT_PERR_INT_CAUSE 0x874
+#define A_T7_MAC_PORT_PERR_ENABLE 0x878
+#define A_T7_MAC_PORT_PERR_INJECT 0x87c
+
+#define S_T7_MEMSEL_PERR 1
+#define M_T7_MEMSEL_PERR 0xffU
+#define V_T7_MEMSEL_PERR(x) ((x) << S_T7_MEMSEL_PERR)
+#define G_T7_MEMSEL_PERR(x) (((x) >> S_T7_MEMSEL_PERR) & M_T7_MEMSEL_PERR)
+
+#define A_T7_MAC_PORT_RUNT_FRAME 0x880
+#define A_T7_MAC_PORT_EEE_STATUS 0x884
+#define A_T7_MAC_PORT_TX_TS_ID 0x888
+
+#define S_TS_ID_MSB 3
+#define V_TS_ID_MSB(x) ((x) << S_TS_ID_MSB)
+#define F_TS_ID_MSB V_TS_ID_MSB(1U)
+
+#define A_T7_MAC_PORT_TX_TS_VAL_LO 0x88c
+#define A_T7_MAC_PORT_TX_TS_VAL_HI 0x890
+#define A_T7_MAC_PORT_EEE_CTL 0x894
+#define A_T7_MAC_PORT_EEE_TX_CTL 0x898
+#define A_T7_MAC_PORT_EEE_RX_CTL 0x89c
+#define A_T7_MAC_PORT_EEE_TX_10G_SLEEP_TIMER 0x8a0
+#define A_T7_MAC_PORT_EEE_TX_10G_QUIET_TIMER 0x8a4
+#define A_T7_MAC_PORT_EEE_TX_10G_WAKE_TIMER 0x8a8
+#define A_T7_MAC_PORT_EEE_RX_10G_QUIET_TIMER 0x8b8
+#define A_T7_MAC_PORT_EEE_RX_10G_WAKE_TIMER 0x8bc
+#define A_T7_MAC_PORT_EEE_RX_10G_WF_TIMER 0x8c0
+#define A_T7_MAC_PORT_EEE_WF_COUNT 0x8cc
+#define A_MAC_PORT_WOL_EN 0x8d0
+
+#define S_WOL_ENABLE 1
+#define V_WOL_ENABLE(x) ((x) << S_WOL_ENABLE)
+#define F_WOL_ENABLE V_WOL_ENABLE(1U)
+
+#define S_WOL_INDICATOR 0
+#define V_WOL_INDICATOR(x) ((x) << S_WOL_INDICATOR)
+#define F_WOL_INDICATOR V_WOL_INDICATOR(1U)
+
+#define A_MAC_PORT_INT_TRACE 0x8d4
+
+#define S_INTERRUPT 0
+#define M_INTERRUPT 0x7fffffffU
+#define V_INTERRUPT(x) ((x) << S_INTERRUPT)
+#define G_INTERRUPT(x) (((x) >> S_INTERRUPT) & M_INTERRUPT)
+
+#define A_MAC_PORT_TRACE_TS_LO 0x8d8
+#define A_MAC_PORT_TRACE_TS_HI 0x8dc
+#define A_MAC_PORT_MTIP_10G100G_REVISION 0x900
+
+#define S_VER_10G100G 8
+#define M_VER_10G100G 0xffU
+#define V_VER_10G100G(x) ((x) << S_VER_10G100G)
+#define G_VER_10G100G(x) (((x) >> S_VER_10G100G) & M_VER_10G100G)
+
+#define S_REV_10G100G 0
+#define M_REV_10G100G 0xffU
+#define V_REV_10G100G(x) ((x) << S_REV_10G100G)
+#define G_REV_10G100G(x) (((x) >> S_REV_10G100G) & M_REV_10G100G)
+
+#define A_MAC_PORT_MTIP_10G100G_SCRATCH 0x904
+#define A_MAC_PORT_MTIP_10G100G_COMMAND_CONFIG 0x908
+
+#define S_NO_PREAM 31
+#define V_NO_PREAM(x) ((x) << S_NO_PREAM)
+#define F_NO_PREAM V_NO_PREAM(1U)
+
+#define S_SHORT_PREAM 30
+#define V_SHORT_PREAM(x) ((x) << S_SHORT_PREAM)
+#define F_SHORT_PREAM V_SHORT_PREAM(1U)
+
+#define S_FLT_HDL_DIS 27
+#define V_FLT_HDL_DIS(x) ((x) << S_FLT_HDL_DIS)
+#define F_FLT_HDL_DIS V_FLT_HDL_DIS(1U)
+
+#define S_TX_FIFO_RESET 26
+#define V_TX_FIFO_RESET(x) ((x) << S_TX_FIFO_RESET)
+#define F_TX_FIFO_RESET V_TX_FIFO_RESET(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_MAC_ADDR_0 0x90c
+#define A_MAC_PORT_MTIP_10G100G_MAC_ADDR_1 0x910
+#define A_MAC_PORT_MTIP_10G100G_FRM_LENGTH_TX_MTU 0x914
+#define A_MAC_PORT_MTIP_10G100G_RX_FIFO_SECTIONS 0x91c
+
+#define S_RX10G100G_EMPTY 16
+#define M_RX10G100G_EMPTY 0xffffU
+#define V_RX10G100G_EMPTY(x) ((x) << S_RX10G100G_EMPTY)
+#define G_RX10G100G_EMPTY(x) (((x) >> S_RX10G100G_EMPTY) & M_RX10G100G_EMPTY)
+
+#define S_RX10G100G_AVAIL 0
+#define M_RX10G100G_AVAIL 0xffffU
+#define V_RX10G100G_AVAIL(x) ((x) << S_RX10G100G_AVAIL)
+#define G_RX10G100G_AVAIL(x) (((x) >> S_RX10G100G_AVAIL) & M_RX10G100G_AVAIL)
+
+#define A_MAC_PORT_MTIP_10G100G_TX_FIFO_SECTIONS 0x920
+
+#define S_TX10G100G_EMPTY 16
+#define M_TX10G100G_EMPTY 0xffffU
+#define V_TX10G100G_EMPTY(x) ((x) << S_TX10G100G_EMPTY)
+#define G_TX10G100G_EMPTY(x) (((x) >> S_TX10G100G_EMPTY) & M_TX10G100G_EMPTY)
+
+#define S_TX10G100G_AVAIL 0
+#define M_TX10G100G_AVAIL 0xffffU
+#define V_TX10G100G_AVAIL(x) ((x) << S_TX10G100G_AVAIL)
+#define G_TX10G100G_AVAIL(x) (((x) >> S_TX10G100G_AVAIL) & M_TX10G100G_AVAIL)
+
+#define A_MAC_PORT_MTIP_10G100G_RX_FIFO_ALMOST_F_E 0x924
+#define A_MAC_PORT_MTIP_10G100G_TX_FIFO_ALMOST_F_E 0x928
+#define A_MAC_PORT_MTIP_10G100G_MDIO_CFG_STATUS 0x930
+#define A_MAC_PORT_MTIP_10G100G_MDIO_COMMAND 0x934
+#define A_MAC_PORT_MTIP_10G100G_MDIO_DATA 0x938
+#define A_MAC_PORT_MTIP_10G100G_MDIO_REGADDR 0x93c
+#define A_MAC_PORT_MTIP_10G100G_STATUS 0x940
+
+#define S_T7_TX_ISIDLE 8
+#define V_T7_TX_ISIDLE(x) ((x) << S_T7_TX_ISIDLE)
+#define F_T7_TX_ISIDLE V_T7_TX_ISIDLE(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_TX_IPG_LENGTH 0x944
+
+#define S_IPG_COMP_CNT 16
+#define M_IPG_COMP_CNT 0xffffU
+#define V_IPG_COMP_CNT(x) ((x) << S_IPG_COMP_CNT)
+#define G_IPG_COMP_CNT(x) (((x) >> S_IPG_COMP_CNT) & M_IPG_COMP_CNT)
+
+#define S_AVG_IPG_LEN 2
+#define M_AVG_IPG_LEN 0xfU
+#define V_AVG_IPG_LEN(x) ((x) << S_AVG_IPG_LEN)
+#define G_AVG_IPG_LEN(x) (((x) >> S_AVG_IPG_LEN) & M_AVG_IPG_LEN)
+
+#define S_DSBL_DIC 0
+#define V_DSBL_DIC(x) ((x) << S_DSBL_DIC)
+#define F_DSBL_DIC V_DSBL_DIC(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_CRC_MODE 0x948
+#define A_MAC_PORT_MTIP_10G100G_CL01_PAUSE_QUANTA 0x954
+#define A_MAC_PORT_MTIP_10G100G_CL23_PAUSE_QUANTA 0x958
+#define A_MAC_PORT_MTIP_10G100G_CL45_PAUSE_QUANTA 0x95c
+#define A_MAC_PORT_MTIP_10G100G_CL67_PAUSE_QUANTA 0x960
+#define A_MAC_PORT_MTIP_10G100G_CL01_QUANTA_THRESH 0x964
+#define A_MAC_PORT_MTIP_10G100G_CL23_QUANTA_THRESH 0x968
+#define A_MAC_PORT_MTIP_10G100G_CL45_QUANTA_THRESH 0x96c
+#define A_MAC_PORT_MTIP_10G100G_CL67_QUANTA_THRESH 0x970
+#define A_MAC_PORT_MTIP_10G100G_RX_PAUSE_STATUS 0x974
+#define A_MAC_PORT_MTIP_10G100G_TS_TIMESTAMP 0x97c
+#define A_MAC_PORT_MTIP_10G100G_XIF_MODE 0x980
+
+#define S_RX_CNT_MODE 16
+#define V_RX_CNT_MODE(x) ((x) << S_RX_CNT_MODE)
+#define F_RX_CNT_MODE V_RX_CNT_MODE(1U)
+
+#define S_TS_UPD64_MODE 12
+#define V_TS_UPD64_MODE(x) ((x) << S_TS_UPD64_MODE)
+#define F_TS_UPD64_MODE V_TS_UPD64_MODE(1U)
+
+#define S_TS_BINARY_MODE 11
+#define V_TS_BINARY_MODE(x) ((x) << S_TS_BINARY_MODE)
+#define F_TS_BINARY_MODE V_TS_BINARY_MODE(1U)
+
+#define S_TS_DELAY_MODE 10
+#define V_TS_DELAY_MODE(x) ((x) << S_TS_DELAY_MODE)
+#define F_TS_DELAY_MODE V_TS_DELAY_MODE(1U)
+
+#define S_TS_DELTA_MODE 9
+#define V_TS_DELTA_MODE(x) ((x) << S_TS_DELTA_MODE)
+#define F_TS_DELTA_MODE V_TS_DELTA_MODE(1U)
+
+#define S_TX_MAC_RS_ERR 8
+#define V_TX_MAC_RS_ERR(x) ((x) << S_TX_MAC_RS_ERR)
+#define F_TX_MAC_RS_ERR V_TX_MAC_RS_ERR(1U)
+
+#define S_RX_PAUSE_BYPASS 6
+#define V_RX_PAUSE_BYPASS(x) ((x) << S_RX_PAUSE_BYPASS)
+#define F_RX_PAUSE_BYPASS V_RX_PAUSE_BYPASS(1U)
+
+#define S_ONE_STEP_ENA 5
+#define V_ONE_STEP_ENA(x) ((x) << S_ONE_STEP_ENA)
+#define F_ONE_STEP_ENA V_ONE_STEP_ENA(1U)
+
+#define S_PAUSETIMERX8 4
+#define V_PAUSETIMERX8(x) ((x) << S_PAUSETIMERX8)
+#define F_PAUSETIMERX8 V_PAUSETIMERX8(1U)
+
+#define S_XGMII_ENA 0
+#define V_XGMII_ENA(x) ((x) << S_XGMII_ENA)
+#define F_XGMII_ENA V_XGMII_ENA(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_CONTROL_1 0xa00
+#define A_MAC_PORT_MTIP_CR4_0_STATUS_1 0xa04
+
+#define S_CR4_0_RX_LINK_STATUS 2
+#define V_CR4_0_RX_LINK_STATUS(x) ((x) << S_CR4_0_RX_LINK_STATUS)
+#define F_CR4_0_RX_LINK_STATUS V_CR4_0_RX_LINK_STATUS(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICE_ID0 0xa08
+
+#define S_CR4_0_DEVICE_ID0 0
+#define M_CR4_0_DEVICE_ID0 0xffffU
+#define V_CR4_0_DEVICE_ID0(x) ((x) << S_CR4_0_DEVICE_ID0)
+#define G_CR4_0_DEVICE_ID0(x) (((x) >> S_CR4_0_DEVICE_ID0) & M_CR4_0_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICE_ID1 0xa0c
+
+#define S_CR4_0_DEVICE_ID1 0
+#define M_CR4_0_DEVICE_ID1 0xffffU
+#define V_CR4_0_DEVICE_ID1(x) ((x) << S_CR4_0_DEVICE_ID1)
+#define G_CR4_0_DEVICE_ID1(x) (((x) >> S_CR4_0_DEVICE_ID1) & M_CR4_0_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SPEED_ABILITY 0xa10
+
+#define S_50G_CAPABLE 5
+#define V_50G_CAPABLE(x) ((x) << S_50G_CAPABLE)
+#define F_50G_CAPABLE V_50G_CAPABLE(1U)
+
+#define S_25G_CAPABLE 4
+#define V_25G_CAPABLE(x) ((x) << S_25G_CAPABLE)
+#define F_25G_CAPABLE V_25G_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICES_IN_PKG1 0xa14
+#define A_MAC_PORT_MTIP_CR4_0_DEVICES_IN_PKG2 0xa18
+#define A_MAC_PORT_MTIP_CR4_0_CONTROL_2 0xa1c
+
+#define S_T7_PCS_TYPE_SELECTION 0
+#define M_T7_PCS_TYPE_SELECTION 0xfU
+#define V_T7_PCS_TYPE_SELECTION(x) ((x) << S_T7_PCS_TYPE_SELECTION)
+#define G_T7_PCS_TYPE_SELECTION(x) (((x) >> S_T7_PCS_TYPE_SELECTION) & M_T7_PCS_TYPE_SELECTION)
+
+#define A_MAC_PORT_MTIP_CR4_0_STATUS_2 0xa20
+
+#define S_50GBASE_R_CAPABLE 8
+#define V_50GBASE_R_CAPABLE(x) ((x) << S_50GBASE_R_CAPABLE)
+#define F_50GBASE_R_CAPABLE V_50GBASE_R_CAPABLE(1U)
+
+#define S_25GBASE_R_CAPABLE 7
+#define V_25GBASE_R_CAPABLE(x) ((x) << S_25GBASE_R_CAPABLE)
+#define F_25GBASE_R_CAPABLE V_25GBASE_R_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_PKG_ID0 0xa38
+#define A_MAC_PORT_MTIP_CR4_0_PKG_ID1 0xa3c
+#define A_MAC_PORT_MTIP_CR4_0_EEE_CTRL 0xa50
+
+#define S_50GBASE_R_FW 14
+#define V_50GBASE_R_FW(x) ((x) << S_50GBASE_R_FW)
+#define F_50GBASE_R_FW V_50GBASE_R_FW(1U)
+
+#define S_100GBASE_R_DS 13
+#define V_100GBASE_R_DS(x) ((x) << S_100GBASE_R_DS)
+#define F_100GBASE_R_DS V_100GBASE_R_DS(1U)
+
+#define S_100GBASE_R_FW 12
+#define V_100GBASE_R_FW(x) ((x) << S_100GBASE_R_FW)
+#define F_100GBASE_R_FW V_100GBASE_R_FW(1U)
+
+#define S_25GBASE_R_DS 11
+#define V_25GBASE_R_DS(x) ((x) << S_25GBASE_R_DS)
+#define F_25GBASE_R_DS V_25GBASE_R_DS(1U)
+
+#define S_25GBASE_R_FW 10
+#define V_25GBASE_R_FW(x) ((x) << S_25GBASE_R_FW)
+#define F_25GBASE_R_FW V_25GBASE_R_FW(1U)
+
+#define S_40GBASE_R_DS 9
+#define V_40GBASE_R_DS(x) ((x) << S_40GBASE_R_DS)
+#define F_40GBASE_R_DS V_40GBASE_R_DS(1U)
+
+#define S_40GBASE_R_FW 8
+#define V_40GBASE_R_FW(x) ((x) << S_40GBASE_R_FW)
+#define F_40GBASE_R_FW V_40GBASE_R_FW(1U)
+
+#define S_10GBASE_KE_EEE 6
+#define V_10GBASE_KE_EEE(x) ((x) << S_10GBASE_KE_EEE)
+#define F_10GBASE_KE_EEE V_10GBASE_KE_EEE(1U)
+
+#define S_FAST_WAKE 1
+#define M_FAST_WAKE 0x1fU
+#define V_FAST_WAKE(x) ((x) << S_FAST_WAKE)
+#define G_FAST_WAKE(x) (((x) >> S_FAST_WAKE) & M_FAST_WAKE)
+
+#define S_DEEP_SLEEP 0
+#define V_DEEP_SLEEP(x) ((x) << S_DEEP_SLEEP)
+#define F_DEEP_SLEEP V_DEEP_SLEEP(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_WAKE_ERROR_COUNTER 0xa58
+
+#define S_WAKE_ERROR_COUNTER 0
+#define M_WAKE_ERROR_COUNTER 0x1ffffU
+#define V_WAKE_ERROR_COUNTER(x) ((x) << S_WAKE_ERROR_COUNTER)
+#define G_WAKE_ERROR_COUNTER(x) (((x) >> S_WAKE_ERROR_COUNTER) & M_WAKE_ERROR_COUNTER)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_STATUS_1 0xa80
+
+#define S_CR4_0_BR_BLOCK_LOCK 0
+#define V_CR4_0_BR_BLOCK_LOCK(x) ((x) << S_CR4_0_BR_BLOCK_LOCK)
+#define F_CR4_0_BR_BLOCK_LOCK V_CR4_0_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_STATUS_2 0xa84
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_0 0xa88
+
+#define S_SEED_A_0 0
+#define M_SEED_A_0 0xffffU
+#define V_SEED_A_0(x) ((x) << S_SEED_A_0)
+#define G_SEED_A_0(x) (((x) >> S_SEED_A_0) & M_SEED_A_0)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_1 0xa8c
+
+#define S_SEED_A_1 0
+#define M_SEED_A_1 0xffffU
+#define V_SEED_A_1(x) ((x) << S_SEED_A_1)
+#define G_SEED_A_1(x) (((x) >> S_SEED_A_1) & M_SEED_A_1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_2 0xa90
+
+#define S_SEED_A_2 0
+#define M_SEED_A_2 0xffffU
+#define V_SEED_A_2(x) ((x) << S_SEED_A_2)
+#define G_SEED_A_2(x) (((x) >> S_SEED_A_2) & M_SEED_A_2)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_3 0xa94
+
+#define S_SEED_A_3 0
+#define M_SEED_A_3 0xffffU
+#define V_SEED_A_3(x) ((x) << S_SEED_A_3)
+#define G_SEED_A_3(x) (((x) >> S_SEED_A_3) & M_SEED_A_3)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_0 0xa98
+
+#define S_SEED_B_0 0
+#define M_SEED_B_0 0xffffU
+#define V_SEED_B_0(x) ((x) << S_SEED_B_0)
+#define G_SEED_B_0(x) (((x) >> S_SEED_B_0) & M_SEED_B_0)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_1 0xa9c
+
+#define S_SEED_B_1 0
+#define M_SEED_B_1 0xffffU
+#define V_SEED_B_1(x) ((x) << S_SEED_B_1)
+#define G_SEED_B_1(x) (((x) >> S_SEED_B_1) & M_SEED_B_1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_2 0xaa0
+
+#define S_SEED_B_2 0
+#define M_SEED_B_2 0xffffU
+#define V_SEED_B_2(x) ((x) << S_SEED_B_2)
+#define G_SEED_B_2(x) (((x) >> S_SEED_B_2) & M_SEED_B_2)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_3 0xaa4
+
+#define S_SEED_B_3 0
+#define M_SEED_B_3 0xffffU
+#define V_SEED_B_3(x) ((x) << S_SEED_B_3)
+#define G_SEED_B_3(x) (((x) >> S_SEED_B_3) & M_SEED_B_3)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_TEST_PATTERN_CONTROL 0xaa8
+
+#define S_TEST_PATTERN_40G 7
+#define V_TEST_PATTERN_40G(x) ((x) << S_TEST_PATTERN_40G)
+#define F_TEST_PATTERN_40G V_TEST_PATTERN_40G(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_TEST_ERR_CNT 0xaac
+#define A_MAC_PORT_MTIP_CR4_0_BER_HIGH_ORDER_CNT 0xab0
+
+#define S_BASE_R_BER_HIGH_ORDER_CNT 0
+#define M_BASE_R_BER_HIGH_ORDER_CNT 0xffffU
+#define V_BASE_R_BER_HIGH_ORDER_CNT(x) ((x) << S_BASE_R_BER_HIGH_ORDER_CNT)
+#define G_BASE_R_BER_HIGH_ORDER_CNT(x) (((x) >> S_BASE_R_BER_HIGH_ORDER_CNT) & M_BASE_R_BER_HIGH_ORDER_CNT)
+
+#define A_MAC_PORT_MTIP_CR4_0_ERR_BLK_HIGH_ORDER_CNT 0xab4
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_1 0xac8
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_2 0xacc
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_3 0xad0
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_4 0xad4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_0 0xad8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_1 0xadc
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_2 0xae0
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_3 0xae4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_4 0xae8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_5 0xaec
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_6 0xaf0
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_7 0xaf4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_8 0xaf8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_9 0xafc
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_10 0xb00
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_11 0xb04
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_12 0xb08
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_13 0xb0c
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_14 0xb10
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_15 0xb14
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_16 0xb18
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_17 0xb1c
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_18 0xb20
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_19 0xb24
+#define A_MAC_PORT_MTIP_CR4_0_LANE_0_MAPPING 0xb28
+#define A_MAC_PORT_MTIP_CR4_0_LANE_1_MAPPING 0xb2c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_2_MAPPING 0xb30
+#define A_MAC_PORT_MTIP_CR4_0_LANE_3_MAPPING 0xb34
+#define A_MAC_PORT_MTIP_CR4_0_LANE_4_MAPPING 0xb38
+#define A_MAC_PORT_MTIP_CR4_0_LANE_5_MAPPING 0xb3c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_6_MAPPING 0xb40
+#define A_MAC_PORT_MTIP_CR4_0_LANE_7_MAPPING 0xb44
+#define A_MAC_PORT_MTIP_CR4_0_LANE_8_MAPPING 0xb48
+#define A_MAC_PORT_MTIP_CR4_0_LANE_9_MAPPING 0xb4c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_10_MAPPING 0xb50
+#define A_MAC_PORT_MTIP_CR4_0_LANE_11_MAPPING 0xb54
+#define A_MAC_PORT_MTIP_CR4_0_LANE_12_MAPPING 0xb58
+#define A_MAC_PORT_MTIP_CR4_0_LANE_13_MAPPING 0xb5c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_14_MAPPING 0xb60
+#define A_MAC_PORT_MTIP_CR4_0_LANE_15_MAPPING 0xb64
+#define A_MAC_PORT_MTIP_CR4_0_LANE_16_MAPPING 0xb68
+#define A_MAC_PORT_MTIP_CR4_0_LANE_17_MAPPING 0xb6c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_18_MAPPING 0xb70
+#define A_MAC_PORT_MTIP_CR4_0_LANE_19_MAPPING 0xb74
+#define A_MAC_PORT_MTIP_CR4_0_SCRATCH 0xb78
+#define A_MAC_PORT_MTIP_CR4_0_CORE_REVISION 0xb7c
+#define A_MAC_PORT_MTIP_CR4_0_VL_INTVL 0xb80
+
+#define S_VL_INTCL 0
+#define M_VL_INTCL 0xffffU
+#define V_VL_INTCL(x) ((x) << S_VL_INTCL)
+#define G_VL_INTCL(x) (((x) >> S_VL_INTCL) & M_VL_INTCL)
+
+#define A_MAC_PORT_MTIP_CR4_0_TX_LANE_THRESH 0xb84
+
+#define S_LANE6_LANE7 12
+#define M_LANE6_LANE7 0xfU
+#define V_LANE6_LANE7(x) ((x) << S_LANE6_LANE7)
+#define G_LANE6_LANE7(x) (((x) >> S_LANE6_LANE7) & M_LANE6_LANE7)
+
+#define S_LANE4_LANE5 8
+#define M_LANE4_LANE5 0xfU
+#define V_LANE4_LANE5(x) ((x) << S_LANE4_LANE5)
+#define G_LANE4_LANE5(x) (((x) >> S_LANE4_LANE5) & M_LANE4_LANE5)
+
+#define S_LANE2_LANE3 4
+#define M_LANE2_LANE3 0xfU
+#define V_LANE2_LANE3(x) ((x) << S_LANE2_LANE3)
+#define G_LANE2_LANE3(x) (((x) >> S_LANE2_LANE3) & M_LANE2_LANE3)
+
+#define S_LANE0_LANE1 0
+#define M_LANE0_LANE1 0xfU
+#define V_LANE0_LANE1(x) ((x) << S_LANE0_LANE1)
+#define G_LANE0_LANE1(x) (((x) >> S_LANE0_LANE1) & M_LANE0_LANE1)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL0_0 0xb98
+
+#define S_M1 8
+#define M_M1 0xffU
+#define V_M1(x) ((x) << S_M1)
+#define G_M1(x) (((x) >> S_M1) & M_M1)
+
+#define S_M0 0
+#define M_M0 0xffU
+#define V_M0(x) ((x) << S_M0)
+#define G_M0(x) (((x) >> S_M0) & M_M0)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL0_1 0xb9c
+
+#define S_M2 0
+#define M_M2 0xffU
+#define V_M2(x) ((x) << S_M2)
+#define G_M2(x) (((x) >> S_M2) & M_M2)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL1_0 0xba0
+#define A_MAC_PORT_MTIP_CR4_0_VL1_1 0xba4
+#define A_MAC_PORT_MTIP_CR4_0_VL2_0 0xba8
+#define A_MAC_PORT_MTIP_CR4_0_VL2_1 0xbac
+#define A_MAC_PORT_MTIP_CR4_0_VL3_0 0xbb0
+#define A_MAC_PORT_MTIP_CR4_0_VL3_1 0xbb4
+#define A_MAC_PORT_MTIP_CR4_0_PCS_MODE 0xbb8
+
+#define S_ST_DISABLE_MLD 9
+#define V_ST_DISABLE_MLD(x) ((x) << S_ST_DISABLE_MLD)
+#define F_ST_DISABLE_MLD V_ST_DISABLE_MLD(1U)
+
+#define S_ST_EN_CLAUSE49 8
+#define V_ST_EN_CLAUSE49(x) ((x) << S_ST_EN_CLAUSE49)
+#define F_ST_EN_CLAUSE49 V_ST_EN_CLAUSE49(1U)
+
+#define S_HI_BER25 2
+#define V_HI_BER25(x) ((x) << S_HI_BER25)
+#define F_HI_BER25 V_HI_BER25(1U)
+
+#define S_DISABLE_MLD 1
+#define V_DISABLE_MLD(x) ((x) << S_DISABLE_MLD)
+#define F_DISABLE_MLD V_DISABLE_MLD(1U)
+
+#define S_ENA_CLAUSE49 0
+#define V_ENA_CLAUSE49(x) ((x) << S_ENA_CLAUSE49)
+#define F_ENA_CLAUSE49 V_ENA_CLAUSE49(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL4_0 0xc98
+#define A_MAC_PORT_MTIP_CR4_0_VL4_1 0xc9c
+#define A_MAC_PORT_MTIP_CR4_0_VL5_0 0xca0
+#define A_MAC_PORT_MTIP_CR4_0_VL5_1 0xca4
+#define A_MAC_PORT_MTIP_CR4_0_VL6_0 0xca8
+#define A_MAC_PORT_MTIP_CR4_0_VL6_1 0xcac
+#define A_MAC_PORT_MTIP_CR4_0_VL7_0 0xcb0
+#define A_MAC_PORT_MTIP_CR4_0_VL7_1 0xcb4
+#define A_MAC_PORT_MTIP_CR4_0_VL8_0 0xcb8
+#define A_MAC_PORT_MTIP_CR4_0_VL8_1 0xcbc
+#define A_MAC_PORT_MTIP_CR4_0_VL9_0 0xcc0
+#define A_MAC_PORT_MTIP_CR4_0_VL9_1 0xcc4
+#define A_MAC_PORT_MTIP_CR4_0_VL10_0 0xcc8
+#define A_MAC_PORT_MTIP_CR4_0_VL10_1 0xccc
+#define A_MAC_PORT_MTIP_CR4_0_VL11_0 0xcd0
+#define A_MAC_PORT_MTIP_CR4_0_VL11_1 0xcd4
+#define A_MAC_PORT_MTIP_CR4_0_VL12_0 0xcd8
+#define A_MAC_PORT_MTIP_CR4_0_VL12_1 0xcdc
+#define A_MAC_PORT_MTIP_CR4_0_VL13_0 0xce0
+#define A_MAC_PORT_MTIP_CR4_0_VL13_1 0xce4
+#define A_MAC_PORT_MTIP_CR4_0_VL14_0 0xce8
+#define A_MAC_PORT_MTIP_CR4_0_VL14_1 0xcec
+#define A_MAC_PORT_MTIP_CR4_0_VL15_0 0xcf0
+#define A_MAC_PORT_MTIP_CR4_0_VL15_1 0xcf4
+#define A_MAC_PORT_MTIP_CR4_0_VL16_0 0xcf8
+#define A_MAC_PORT_MTIP_CR4_0_VL16_1 0xcfc
+#define A_MAC_PORT_MTIP_CR4_0_VL17_0 0xd00
+#define A_MAC_PORT_MTIP_CR4_0_VL17_1 0xd04
+#define A_MAC_PORT_MTIP_CR4_0_VL18_0 0xd08
+#define A_MAC_PORT_MTIP_CR4_0_VL18_1 0xd0c
+#define A_MAC_PORT_MTIP_CR4_0_VL19_0 0xd10
+#define A_MAC_PORT_MTIP_CR4_0_VL19_1 0xd14
+#define A_MAC_PORT_MTIP_CR4_1_CONTROL_1 0x1000
+#define A_MAC_PORT_MTIP_CR4_1_STATUS_1 0x1004
+
+#define S_CR4_RX_LINK_STATUS_1 2
+#define V_CR4_RX_LINK_STATUS_1(x) ((x) << S_CR4_RX_LINK_STATUS_1)
+#define F_CR4_RX_LINK_STATUS_1 V_CR4_RX_LINK_STATUS_1(1U)
+
+#define A_MAC_PORT_MTIP_CR4_1_DEVICE_ID0 0x1008
+
+#define S_CR4_1_DEVICE_ID0 0
+#define M_CR4_1_DEVICE_ID0 0xffffU
+#define V_CR4_1_DEVICE_ID0(x) ((x) << S_CR4_1_DEVICE_ID0)
+#define G_CR4_1_DEVICE_ID0(x) (((x) >> S_CR4_1_DEVICE_ID0) & M_CR4_1_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_1_DEVICE_ID1 0x100c
+
+#define S_CR4_1_DEVICE_ID1 0
+#define M_CR4_1_DEVICE_ID1 0xffffU
+#define V_CR4_1_DEVICE_ID1(x) ((x) << S_CR4_1_DEVICE_ID1)
+#define G_CR4_1_DEVICE_ID1(x) (((x) >> S_CR4_1_DEVICE_ID1) & M_CR4_1_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_1_SPEED_ABILITY 0x1010
+#define A_MAC_PORT_MTIP_CR4_1_DEVICES_IN_PKG1 0x1014
+#define A_MAC_PORT_MTIP_CR4_1_DEVICES_IN_PKG2 0x1018
+#define A_MAC_PORT_MTIP_CR4_1_CONTROL_2 0x101c
+#define A_MAC_PORT_MTIP_CR4_1_STATUS_2 0x1020
+#define A_MAC_PORT_MTIP_CR4_1_PKG_ID0 0x1038
+#define A_MAC_PORT_MTIP_CR4_1_PKG_ID1 0x103c
+#define A_MAC_PORT_MTIP_CR4_1_EEE_CTRL 0x1050
+#define A_MAC_PORT_MTIP_CR4_1_WAKE_ERROR_COUNTER 0x1058
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_STATUS_1 0x1080
+
+#define S_CR4_1_BR_BLOCK_LOCK 0
+#define V_CR4_1_BR_BLOCK_LOCK(x) ((x) << S_CR4_1_BR_BLOCK_LOCK)
+#define F_CR4_1_BR_BLOCK_LOCK V_CR4_1_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_STATUS_2 0x1084
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_0 0x1088
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_1 0x108c
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_2 0x1090
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_3 0x1094
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_0 0x1098
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_1 0x109c
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_2 0x10a0
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_3 0x10a4
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_TEST_PATTERN_CONTROL 0x10a8
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_TEST_ERR_CNT 0x10ac
+#define A_MAC_PORT_MTIP_CR4_1_BER_HIGH_ORDER_CNT 0x10b0
+#define A_MAC_PORT_MTIP_CR4_1_ERR_BLK_HIGH_ORDER_CNT 0x10b4
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_1 0x10c8
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_2 0x10cc
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_3 0x10d0
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_4 0x10d4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_0 0x10d8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_1 0x10dc
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_2 0x10e0
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_3 0x10e4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_4 0x10e8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_5 0x10ec
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_6 0x10f0
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_7 0x10f4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_8 0x10f8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_9 0x10fc
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_10 0x1100
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_11 0x1104
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_12 0x1108
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_13 0x110c
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_14 0x1110
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_15 0x1114
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_16 0x1118
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_17 0x111c
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_18 0x1120
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_19 0x1124
+#define A_MAC_PORT_MTIP_CR4_1_LANE_0_MAPPING 0x1128
+#define A_MAC_PORT_MTIP_CR4_1_LANE_1_MAPPING 0x112c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_2_MAPPING 0x1130
+#define A_MAC_PORT_MTIP_CR4_1_LANE_3_MAPPING 0x1134
+#define A_MAC_PORT_MTIP_CR4_1_LANE_4_MAPPING 0x1138
+#define A_MAC_PORT_MTIP_CR4_1_LANE_5_MAPPING 0x113c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_6_MAPPING 0x1140
+#define A_MAC_PORT_MTIP_CR4_1_LANE_7_MAPPING 0x1144
+#define A_MAC_PORT_MTIP_CR4_1_LANE_8_MAPPING 0x1148
+#define A_MAC_PORT_MTIP_CR4_1_LANE_9_MAPPING 0x114c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_10_MAPPING 0x1150
+#define A_MAC_PORT_MTIP_CR4_1_LANE_11_MAPPING 0x1154
+#define A_MAC_PORT_MTIP_CR4_1_LANE_12_MAPPING 0x1158
+#define A_MAC_PORT_MTIP_CR4_1_LANE_13_MAPPING 0x115c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_14_MAPPING 0x1160
+#define A_MAC_PORT_MTIP_CR4_1_LANE_15_MAPPING 0x1164
+#define A_MAC_PORT_MTIP_CR4_1_LANE_16_MAPPING 0x1168
+#define A_MAC_PORT_MTIP_CR4_1_LANE_17_MAPPING 0x116c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_18_MAPPING 0x1170
+#define A_MAC_PORT_MTIP_CR4_1_LANE_19_MAPPING 0x1174
+#define A_MAC_PORT_MTIP_CR4_1_SCRATCH 0x1178
+#define A_MAC_PORT_MTIP_CR4_1_CORE_REVISION 0x117c
+#define A_MAC_PORT_MTIP_CR4_1_VL_INTVL 0x1180
+#define A_MAC_PORT_MTIP_CR4_1_TX_LANE_THRESH 0x1184
+#define A_MAC_PORT_MTIP_CR4_1_VL0_0 0x1198
+#define A_MAC_PORT_MTIP_CR4_1_VL0_1 0x119c
+#define A_MAC_PORT_MTIP_CR4_1_VL1_0 0x11a0
+#define A_MAC_PORT_MTIP_CR4_1_VL1_1 0x11a4
+#define A_MAC_PORT_MTIP_CR4_1_VL2_0 0x11a8
+#define A_MAC_PORT_MTIP_CR4_1_VL2_1 0x11ac
+#define A_MAC_PORT_MTIP_CR4_1_VL3_0 0x11b0
+#define A_MAC_PORT_MTIP_CR4_1_VL3_1 0x11b4
+#define A_MAC_PORT_MTIP_CR4_1_PCS_MODE 0x11b8
+#define A_MAC_COMMON_CFG_0 0x38000
+
+#define S_T7_RX_POLARITY_INV 24
+#define M_T7_RX_POLARITY_INV 0xffU
+#define V_T7_RX_POLARITY_INV(x) ((x) << S_T7_RX_POLARITY_INV)
+#define G_T7_RX_POLARITY_INV(x) (((x) >> S_T7_RX_POLARITY_INV) & M_T7_RX_POLARITY_INV)
+
+#define S_T7_TX_POLARITY_INV 16
+#define M_T7_TX_POLARITY_INV 0xffU
+#define V_T7_TX_POLARITY_INV(x) ((x) << S_T7_TX_POLARITY_INV)
+#define G_T7_TX_POLARITY_INV(x) (((x) >> S_T7_TX_POLARITY_INV) & M_T7_TX_POLARITY_INV)
+
+#define S_T7_DEBUG_PORT_SEL 14
+#define M_T7_DEBUG_PORT_SEL 0x3U
+#define V_T7_DEBUG_PORT_SEL(x) ((x) << S_T7_DEBUG_PORT_SEL)
+#define G_T7_DEBUG_PORT_SEL(x) (((x) >> S_T7_DEBUG_PORT_SEL) & M_T7_DEBUG_PORT_SEL)
+
+#define S_MAC_SEPTY_CTL 8
+#define M_MAC_SEPTY_CTL 0x3fU
+#define V_MAC_SEPTY_CTL(x) ((x) << S_MAC_SEPTY_CTL)
+#define G_MAC_SEPTY_CTL(x) (((x) >> S_MAC_SEPTY_CTL) & M_MAC_SEPTY_CTL)
+
+#define S_T7_DEBUG_TX_RX_SEL 7
+#define V_T7_DEBUG_TX_RX_SEL(x) ((x) << S_T7_DEBUG_TX_RX_SEL)
+#define F_T7_DEBUG_TX_RX_SEL V_T7_DEBUG_TX_RX_SEL(1U)
+
+#define S_MAC_RDY_CTL 0
+#define M_MAC_RDY_CTL 0x3fU
+#define V_MAC_RDY_CTL(x) ((x) << S_MAC_RDY_CTL)
+#define G_MAC_RDY_CTL(x) (((x) >> S_MAC_RDY_CTL) & M_MAC_RDY_CTL)
+
+#define A_MAC_MTIP_RESET_CTRL_0 0x38004
+
+#define S_RESET_F91_REF_CLK_I 31
+#define V_RESET_F91_REF_CLK_I(x) ((x) << S_RESET_F91_REF_CLK_I)
+#define F_RESET_F91_REF_CLK_I V_RESET_F91_REF_CLK_I(1U)
+
+#define S_RESET_PCS000_REF_CLK_I 30
+#define V_RESET_PCS000_REF_CLK_I(x) ((x) << S_RESET_PCS000_REF_CLK_I)
+#define F_RESET_PCS000_REF_CLK_I V_RESET_PCS000_REF_CLK_I(1U)
+
+#define S_RESET_REF_CLK_I 29
+#define V_RESET_REF_CLK_I(x) ((x) << S_RESET_REF_CLK_I)
+#define F_RESET_REF_CLK_I V_RESET_REF_CLK_I(1U)
+
+#define S_RESET_SD_RX_CLK_I_0 28
+#define V_RESET_SD_RX_CLK_I_0(x) ((x) << S_RESET_SD_RX_CLK_I_0)
+#define F_RESET_SD_RX_CLK_I_0 V_RESET_SD_RX_CLK_I_0(1U)
+
+#define S_RESET_SD_RX_CLK_I_1 27
+#define V_RESET_SD_RX_CLK_I_1(x) ((x) << S_RESET_SD_RX_CLK_I_1)
+#define F_RESET_SD_RX_CLK_I_1 V_RESET_SD_RX_CLK_I_1(1U)
+
+#define S_RESET_SD_RX_CLK_I_2 26
+#define V_RESET_SD_RX_CLK_I_2(x) ((x) << S_RESET_SD_RX_CLK_I_2)
+#define F_RESET_SD_RX_CLK_I_2 V_RESET_SD_RX_CLK_I_2(1U)
+
+#define S_RESET_SD_RX_CLK_I_3 25
+#define V_RESET_SD_RX_CLK_I_3(x) ((x) << S_RESET_SD_RX_CLK_I_3)
+#define F_RESET_SD_RX_CLK_I_3 V_RESET_SD_RX_CLK_I_3(1U)
+
+#define S_RESET_SD_RX_CLK_I_4 24
+#define V_RESET_SD_RX_CLK_I_4(x) ((x) << S_RESET_SD_RX_CLK_I_4)
+#define F_RESET_SD_RX_CLK_I_4 V_RESET_SD_RX_CLK_I_4(1U)
+
+#define S_RESET_SD_RX_CLK_I_5 23
+#define V_RESET_SD_RX_CLK_I_5(x) ((x) << S_RESET_SD_RX_CLK_I_5)
+#define F_RESET_SD_RX_CLK_I_5 V_RESET_SD_RX_CLK_I_5(1U)
+
+#define S_RESET_SD_RX_CLK_I_6 22
+#define V_RESET_SD_RX_CLK_I_6(x) ((x) << S_RESET_SD_RX_CLK_I_6)
+#define F_RESET_SD_RX_CLK_I_6 V_RESET_SD_RX_CLK_I_6(1U)
+
+#define S_RESET_SD_RX_CLK_I_7 21
+#define V_RESET_SD_RX_CLK_I_7(x) ((x) << S_RESET_SD_RX_CLK_I_7)
+#define F_RESET_SD_RX_CLK_I_7 V_RESET_SD_RX_CLK_I_7(1U)
+
+#define S_RESET_SD_TX_CLK_I_0 20
+#define V_RESET_SD_TX_CLK_I_0(x) ((x) << S_RESET_SD_TX_CLK_I_0)
+#define F_RESET_SD_TX_CLK_I_0 V_RESET_SD_TX_CLK_I_0(1U)
+
+#define S_RESET_SD_TX_CLK_I_1 19
+#define V_RESET_SD_TX_CLK_I_1(x) ((x) << S_RESET_SD_TX_CLK_I_1)
+#define F_RESET_SD_TX_CLK_I_1 V_RESET_SD_TX_CLK_I_1(1U)
+
+#define S_RESET_SD_TX_CLK_I_2 18
+#define V_RESET_SD_TX_CLK_I_2(x) ((x) << S_RESET_SD_TX_CLK_I_2)
+#define F_RESET_SD_TX_CLK_I_2 V_RESET_SD_TX_CLK_I_2(1U)
+
+#define S_RESET_SD_TX_CLK_I_3 17
+#define V_RESET_SD_TX_CLK_I_3(x) ((x) << S_RESET_SD_TX_CLK_I_3)
+#define F_RESET_SD_TX_CLK_I_3 V_RESET_SD_TX_CLK_I_3(1U)
+
+#define S_RESET_SD_TX_CLK_I_4 16
+#define V_RESET_SD_TX_CLK_I_4(x) ((x) << S_RESET_SD_TX_CLK_I_4)
+#define F_RESET_SD_TX_CLK_I_4 V_RESET_SD_TX_CLK_I_4(1U)
+
+#define S_RESET_SD_TX_CLK_I_5 15
+#define V_RESET_SD_TX_CLK_I_5(x) ((x) << S_RESET_SD_TX_CLK_I_5)
+#define F_RESET_SD_TX_CLK_I_5 V_RESET_SD_TX_CLK_I_5(1U)
+
+#define S_RESET_SD_TX_CLK_I_6 14
+#define V_RESET_SD_TX_CLK_I_6(x) ((x) << S_RESET_SD_TX_CLK_I_6)
+#define F_RESET_SD_TX_CLK_I_6 V_RESET_SD_TX_CLK_I_6(1U)
+
+#define S_RESET_SD_TX_CLK_I_7 13
+#define V_RESET_SD_TX_CLK_I_7(x) ((x) << S_RESET_SD_TX_CLK_I_7)
+#define F_RESET_SD_TX_CLK_I_7 V_RESET_SD_TX_CLK_I_7(1U)
+
+#define S_RESET_XPCS_REF_CLK_I_0 12
+#define V_RESET_XPCS_REF_CLK_I_0(x) ((x) << S_RESET_XPCS_REF_CLK_I_0)
+#define F_RESET_XPCS_REF_CLK_I_0 V_RESET_XPCS_REF_CLK_I_0(1U)
+
+#define S_RESET_XPCS_REF_CLK_I_1 11
+#define V_RESET_XPCS_REF_CLK_I_1(x) ((x) << S_RESET_XPCS_REF_CLK_I_1)
+#define F_RESET_XPCS_REF_CLK_I_1 V_RESET_XPCS_REF_CLK_I_1(1U)
+
+#define S_RESET_FF_RX_CLK_0_I 9
+#define V_RESET_FF_RX_CLK_0_I(x) ((x) << S_RESET_FF_RX_CLK_0_I)
+#define F_RESET_FF_RX_CLK_0_I V_RESET_FF_RX_CLK_0_I(1U)
+
+#define S_RESET_FF_TX_CLK_0_I 8
+#define V_RESET_FF_TX_CLK_0_I(x) ((x) << S_RESET_FF_TX_CLK_0_I)
+#define F_RESET_FF_TX_CLK_0_I V_RESET_FF_TX_CLK_0_I(1U)
+
+#define S_RESET_RXCLK_0_I 7
+#define V_RESET_RXCLK_0_I(x) ((x) << S_RESET_RXCLK_0_I)
+#define F_RESET_RXCLK_0_I V_RESET_RXCLK_0_I(1U)
+
+#define S_RESET_TXCLK_0_I 6
+#define V_RESET_TXCLK_0_I(x) ((x) << S_RESET_TXCLK_0_I)
+#define F_RESET_TXCLK_0_I V_RESET_TXCLK_0_I(1U)
+
+#define S_RESET_FF_RX_CLK_1_I 5
+#define V_RESET_FF_RX_CLK_1_I(x) ((x) << S_RESET_FF_RX_CLK_1_I)
+#define F_RESET_FF_RX_CLK_1_I V_RESET_FF_RX_CLK_1_I(1U)
+
+#define S_RESET_FF_TX_CLK_1_I 4
+#define V_RESET_FF_TX_CLK_1_I(x) ((x) << S_RESET_FF_TX_CLK_1_I)
+#define F_RESET_FF_TX_CLK_1_I V_RESET_FF_TX_CLK_1_I(1U)
+
+#define S_RESET_RXCLK_1_I 3
+#define V_RESET_RXCLK_1_I(x) ((x) << S_RESET_RXCLK_1_I)
+#define F_RESET_RXCLK_1_I V_RESET_RXCLK_1_I(1U)
+
+#define S_RESET_TXCLK_1_I 2
+#define V_RESET_TXCLK_1_I(x) ((x) << S_RESET_TXCLK_1_I)
+#define F_RESET_TXCLK_1_I V_RESET_TXCLK_1_I(1U)
+
+#define S_XGMII_CLK_RESET_0 0
+#define V_XGMII_CLK_RESET_0(x) ((x) << S_XGMII_CLK_RESET_0)
+#define F_XGMII_CLK_RESET_0 V_XGMII_CLK_RESET_0(1U)
+
+#define A_MAC_MTIP_RESET_CTRL_1 0x38008
+
+#define S_RESET_FF_RX_CLK_2_I 31
+#define V_RESET_FF_RX_CLK_2_I(x) ((x) << S_RESET_FF_RX_CLK_2_I)
+#define F_RESET_FF_RX_CLK_2_I V_RESET_FF_RX_CLK_2_I(1U)
+
+#define S_RESET_FF_TX_CLK_2_I 30
+#define V_RESET_FF_TX_CLK_2_I(x) ((x) << S_RESET_FF_TX_CLK_2_I)
+#define F_RESET_FF_TX_CLK_2_I V_RESET_FF_TX_CLK_2_I(1U)
+
+#define S_RESET_RXCLK_2_I 29
+#define V_RESET_RXCLK_2_I(x) ((x) << S_RESET_RXCLK_2_I)
+#define F_RESET_RXCLK_2_I V_RESET_RXCLK_2_I(1U)
+
+#define S_RESET_TXCLK_2_I 28
+#define V_RESET_TXCLK_2_I(x) ((x) << S_RESET_TXCLK_2_I)
+#define F_RESET_TXCLK_2_I V_RESET_TXCLK_2_I(1U)
+
+#define S_RESET_FF_RX_CLK_3_I 27
+#define V_RESET_FF_RX_CLK_3_I(x) ((x) << S_RESET_FF_RX_CLK_3_I)
+#define F_RESET_FF_RX_CLK_3_I V_RESET_FF_RX_CLK_3_I(1U)
+
+#define S_RESET_FF_TX_CLK_3_I 26
+#define V_RESET_FF_TX_CLK_3_I(x) ((x) << S_RESET_FF_TX_CLK_3_I)
+#define F_RESET_FF_TX_CLK_3_I V_RESET_FF_TX_CLK_3_I(1U)
+
+#define S_RESET_RXCLK_3_I 25
+#define V_RESET_RXCLK_3_I(x) ((x) << S_RESET_RXCLK_3_I)
+#define F_RESET_RXCLK_3_I V_RESET_RXCLK_3_I(1U)
+
+#define S_RESET_TXCLK_3_I 24
+#define V_RESET_TXCLK_3_I(x) ((x) << S_RESET_TXCLK_3_I)
+#define F_RESET_TXCLK_3_I V_RESET_TXCLK_3_I(1U)
+
+#define S_RESET_FF_RX_CLK_4_I 23
+#define V_RESET_FF_RX_CLK_4_I(x) ((x) << S_RESET_FF_RX_CLK_4_I)
+#define F_RESET_FF_RX_CLK_4_I V_RESET_FF_RX_CLK_4_I(1U)
+
+#define S_RESET_FF_TX_CLK_4_I 22
+#define V_RESET_FF_TX_CLK_4_I(x) ((x) << S_RESET_FF_TX_CLK_4_I)
+#define F_RESET_FF_TX_CLK_4_I V_RESET_FF_TX_CLK_4_I(1U)
+
+#define S_RESET_RXCLK_4_I 21
+#define V_RESET_RXCLK_4_I(x) ((x) << S_RESET_RXCLK_4_I)
+#define F_RESET_RXCLK_4_I V_RESET_RXCLK_4_I(1U)
+
+#define S_RESET_TXCLK_4_I 20
+#define V_RESET_TXCLK_4_I(x) ((x) << S_RESET_TXCLK_4_I)
+#define F_RESET_TXCLK_4_I V_RESET_TXCLK_4_I(1U)
+
+#define S_RESET_FF_RX_CLK_5_I 19
+#define V_RESET_FF_RX_CLK_5_I(x) ((x) << S_RESET_FF_RX_CLK_5_I)
+#define F_RESET_FF_RX_CLK_5_I V_RESET_FF_RX_CLK_5_I(1U)
+
+#define S_RESET_FF_TX_CLK_5_I 18
+#define V_RESET_FF_TX_CLK_5_I(x) ((x) << S_RESET_FF_TX_CLK_5_I)
+#define F_RESET_FF_TX_CLK_5_I V_RESET_FF_TX_CLK_5_I(1U)
+
+#define S_RESET_RXCLK_5_I 17
+#define V_RESET_RXCLK_5_I(x) ((x) << S_RESET_RXCLK_5_I)
+#define F_RESET_RXCLK_5_I V_RESET_RXCLK_5_I(1U)
+
+#define S_RESET_TXCLK_5_I 16
+#define V_RESET_TXCLK_5_I(x) ((x) << S_RESET_TXCLK_5_I)
+#define F_RESET_TXCLK_5_I V_RESET_TXCLK_5_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_0_I 15
+#define V_RESET_SD_RX_CLK_AN_0_I(x) ((x) << S_RESET_SD_RX_CLK_AN_0_I)
+#define F_RESET_SD_RX_CLK_AN_0_I V_RESET_SD_RX_CLK_AN_0_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_0_I 14
+#define V_RESET_SD_TX_CLK_AN_0_I(x) ((x) << S_RESET_SD_TX_CLK_AN_0_I)
+#define F_RESET_SD_TX_CLK_AN_0_I V_RESET_SD_TX_CLK_AN_0_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_1_I 13
+#define V_RESET_SD_RX_CLK_AN_1_I(x) ((x) << S_RESET_SD_RX_CLK_AN_1_I)
+#define F_RESET_SD_RX_CLK_AN_1_I V_RESET_SD_RX_CLK_AN_1_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_1_I 12
+#define V_RESET_SD_TX_CLK_AN_1_I(x) ((x) << S_RESET_SD_TX_CLK_AN_1_I)
+#define F_RESET_SD_TX_CLK_AN_1_I V_RESET_SD_TX_CLK_AN_1_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_2_I 11
+#define V_RESET_SD_RX_CLK_AN_2_I(x) ((x) << S_RESET_SD_RX_CLK_AN_2_I)
+#define F_RESET_SD_RX_CLK_AN_2_I V_RESET_SD_RX_CLK_AN_2_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_2_I 10
+#define V_RESET_SD_TX_CLK_AN_2_I(x) ((x) << S_RESET_SD_TX_CLK_AN_2_I)
+#define F_RESET_SD_TX_CLK_AN_2_I V_RESET_SD_TX_CLK_AN_2_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_3_I 9
+#define V_RESET_SD_RX_CLK_AN_3_I(x) ((x) << S_RESET_SD_RX_CLK_AN_3_I)
+#define F_RESET_SD_RX_CLK_AN_3_I V_RESET_SD_RX_CLK_AN_3_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_3_I 8
+#define V_RESET_SD_TX_CLK_AN_3_I(x) ((x) << S_RESET_SD_TX_CLK_AN_3_I)
+#define F_RESET_SD_TX_CLK_AN_3_I V_RESET_SD_TX_CLK_AN_3_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_4_I 7
+#define V_RESET_SD_RX_CLK_AN_4_I(x) ((x) << S_RESET_SD_RX_CLK_AN_4_I)
+#define F_RESET_SD_RX_CLK_AN_4_I V_RESET_SD_RX_CLK_AN_4_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_4_I 6
+#define V_RESET_SD_TX_CLK_AN_4_I(x) ((x) << S_RESET_SD_TX_CLK_AN_4_I)
+#define F_RESET_SD_TX_CLK_AN_4_I V_RESET_SD_TX_CLK_AN_4_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_5_I 5
+#define V_RESET_SD_RX_CLK_AN_5_I(x) ((x) << S_RESET_SD_RX_CLK_AN_5_I)
+#define F_RESET_SD_RX_CLK_AN_5_I V_RESET_SD_RX_CLK_AN_5_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_5_I 4
+#define V_RESET_SD_TX_CLK_AN_5_I(x) ((x) << S_RESET_SD_TX_CLK_AN_5_I)
+#define F_RESET_SD_TX_CLK_AN_5_I V_RESET_SD_TX_CLK_AN_5_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_6_I 3
+#define V_RESET_SD_RX_CLK_AN_6_I(x) ((x) << S_RESET_SD_RX_CLK_AN_6_I)
+#define F_RESET_SD_RX_CLK_AN_6_I V_RESET_SD_RX_CLK_AN_6_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_6_I 2
+#define V_RESET_SD_TX_CLK_AN_6_I(x) ((x) << S_RESET_SD_TX_CLK_AN_6_I)
+#define F_RESET_SD_TX_CLK_AN_6_I V_RESET_SD_TX_CLK_AN_6_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_7_I 1
+#define V_RESET_SD_RX_CLK_AN_7_I(x) ((x) << S_RESET_SD_RX_CLK_AN_7_I)
+#define F_RESET_SD_RX_CLK_AN_7_I V_RESET_SD_RX_CLK_AN_7_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_7_I 0
+#define V_RESET_SD_TX_CLK_AN_7_I(x) ((x) << S_RESET_SD_TX_CLK_AN_7_I)
+#define F_RESET_SD_TX_CLK_AN_7_I V_RESET_SD_TX_CLK_AN_7_I(1U)
+
+#define A_MAC_MTIP_RESET_CTRL_2 0x3800c
+
+#define S_RESET_SGMII_TXCLK_I_3 31
+#define V_RESET_SGMII_TXCLK_I_3(x) ((x) << S_RESET_SGMII_TXCLK_I_3)
+#define F_RESET_SGMII_TXCLK_I_3 V_RESET_SGMII_TXCLK_I_3(1U)
+
+#define S_RESET_SGMII_RXCLK_I_3 30
+#define V_RESET_SGMII_RXCLK_I_3(x) ((x) << S_RESET_SGMII_RXCLK_I_3)
+#define F_RESET_SGMII_RXCLK_I_3 V_RESET_SGMII_RXCLK_I_3(1U)
+
+#define S_RESET_SGMII_TXCLK_I_2 29
+#define V_RESET_SGMII_TXCLK_I_2(x) ((x) << S_RESET_SGMII_TXCLK_I_2)
+#define F_RESET_SGMII_TXCLK_I_2 V_RESET_SGMII_TXCLK_I_2(1U)
+
+#define S_RESET_SGMII_RXCLK_I_2 28
+#define V_RESET_SGMII_RXCLK_I_2(x) ((x) << S_RESET_SGMII_RXCLK_I_2)
+#define F_RESET_SGMII_RXCLK_I_2 V_RESET_SGMII_RXCLK_I_2(1U)
+
+#define S_RESET_SGMII_TXCLK_I_1 27
+#define V_RESET_SGMII_TXCLK_I_1(x) ((x) << S_RESET_SGMII_TXCLK_I_1)
+#define F_RESET_SGMII_TXCLK_I_1 V_RESET_SGMII_TXCLK_I_1(1U)
+
+#define S_RESET_SGMII_RXCLK_I_1 26
+#define V_RESET_SGMII_RXCLK_I_1(x) ((x) << S_RESET_SGMII_RXCLK_I_1)
+#define F_RESET_SGMII_RXCLK_I_1 V_RESET_SGMII_RXCLK_I_1(1U)
+
+#define S_RESET_SGMII_TXCLK_I_0 25
+#define V_RESET_SGMII_TXCLK_I_0(x) ((x) << S_RESET_SGMII_TXCLK_I_0)
+#define F_RESET_SGMII_TXCLK_I_0 V_RESET_SGMII_TXCLK_I_0(1U)
+
+#define S_RESET_SGMII_RXCLK_I_0 24
+#define V_RESET_SGMII_RXCLK_I_0(x) ((x) << S_RESET_SGMII_RXCLK_I_0)
+#define F_RESET_SGMII_RXCLK_I_0 V_RESET_SGMII_RXCLK_I_0(1U)
+
+#define S_MTIPSD7TXRST 23
+#define V_MTIPSD7TXRST(x) ((x) << S_MTIPSD7TXRST)
+#define F_MTIPSD7TXRST V_MTIPSD7TXRST(1U)
+
+#define S_MTIPSD6TXRST 22
+#define V_MTIPSD6TXRST(x) ((x) << S_MTIPSD6TXRST)
+#define F_MTIPSD6TXRST V_MTIPSD6TXRST(1U)
+
+#define S_MTIPSD5TXRST 21
+#define V_MTIPSD5TXRST(x) ((x) << S_MTIPSD5TXRST)
+#define F_MTIPSD5TXRST V_MTIPSD5TXRST(1U)
+
+#define S_MTIPSD4TXRST 20
+#define V_MTIPSD4TXRST(x) ((x) << S_MTIPSD4TXRST)
+#define F_MTIPSD4TXRST V_MTIPSD4TXRST(1U)
+
+#define S_T7_MTIPSD3TXRST 19
+#define V_T7_MTIPSD3TXRST(x) ((x) << S_T7_MTIPSD3TXRST)
+#define F_T7_MTIPSD3TXRST V_T7_MTIPSD3TXRST(1U)
+
+#define S_T7_MTIPSD2TXRST 18
+#define V_T7_MTIPSD2TXRST(x) ((x) << S_T7_MTIPSD2TXRST)
+#define F_T7_MTIPSD2TXRST V_T7_MTIPSD2TXRST(1U)
+
+#define S_T7_MTIPSD1TXRST 17
+#define V_T7_MTIPSD1TXRST(x) ((x) << S_T7_MTIPSD1TXRST)
+#define F_T7_MTIPSD1TXRST V_T7_MTIPSD1TXRST(1U)
+
+#define S_T7_MTIPSD0TXRST 16
+#define V_T7_MTIPSD0TXRST(x) ((x) << S_T7_MTIPSD0TXRST)
+#define F_T7_MTIPSD0TXRST V_T7_MTIPSD0TXRST(1U)
+
+#define S_MTIPSD7RXRST 15
+#define V_MTIPSD7RXRST(x) ((x) << S_MTIPSD7RXRST)
+#define F_MTIPSD7RXRST V_MTIPSD7RXRST(1U)
+
+#define S_MTIPSD6RXRST 14
+#define V_MTIPSD6RXRST(x) ((x) << S_MTIPSD6RXRST)
+#define F_MTIPSD6RXRST V_MTIPSD6RXRST(1U)
+
+#define S_MTIPSD5RXRST 13
+#define V_MTIPSD5RXRST(x) ((x) << S_MTIPSD5RXRST)
+#define F_MTIPSD5RXRST V_MTIPSD5RXRST(1U)
+
+#define S_MTIPSD4RXRST 12
+#define V_MTIPSD4RXRST(x) ((x) << S_MTIPSD4RXRST)
+#define F_MTIPSD4RXRST V_MTIPSD4RXRST(1U)
+
+#define S_T7_MTIPSD3RXRST 11
+#define V_T7_MTIPSD3RXRST(x) ((x) << S_T7_MTIPSD3RXRST)
+#define F_T7_MTIPSD3RXRST V_T7_MTIPSD3RXRST(1U)
+
+#define S_T7_MTIPSD2RXRST 10
+#define V_T7_MTIPSD2RXRST(x) ((x) << S_T7_MTIPSD2RXRST)
+#define F_T7_MTIPSD2RXRST V_T7_MTIPSD2RXRST(1U)
+
+#define S_T7_MTIPSD1RXRST 9
+#define V_T7_MTIPSD1RXRST(x) ((x) << S_T7_MTIPSD1RXRST)
+#define F_T7_MTIPSD1RXRST V_T7_MTIPSD1RXRST(1U)
+
+#define S_T7_MTIPSD0RXRST 8
+#define V_T7_MTIPSD0RXRST(x) ((x) << S_T7_MTIPSD0RXRST)
+#define F_T7_MTIPSD0RXRST V_T7_MTIPSD0RXRST(1U)
+
+#define S_RESET_REG_CLK_AN_0_I 7
+#define V_RESET_REG_CLK_AN_0_I(x) ((x) << S_RESET_REG_CLK_AN_0_I)
+#define F_RESET_REG_CLK_AN_0_I V_RESET_REG_CLK_AN_0_I(1U)
+
+#define S_RESET_REG_CLK_AN_1_I 6
+#define V_RESET_REG_CLK_AN_1_I(x) ((x) << S_RESET_REG_CLK_AN_1_I)
+#define F_RESET_REG_CLK_AN_1_I V_RESET_REG_CLK_AN_1_I(1U)
+
+#define S_RESET_REG_CLK_AN_2_I 5
+#define V_RESET_REG_CLK_AN_2_I(x) ((x) << S_RESET_REG_CLK_AN_2_I)
+#define F_RESET_REG_CLK_AN_2_I V_RESET_REG_CLK_AN_2_I(1U)
+
+#define S_RESET_REG_CLK_AN_3_I 4
+#define V_RESET_REG_CLK_AN_3_I(x) ((x) << S_RESET_REG_CLK_AN_3_I)
+#define F_RESET_REG_CLK_AN_3_I V_RESET_REG_CLK_AN_3_I(1U)
+
+#define S_RESET_REG_CLK_AN_4_I 3
+#define V_RESET_REG_CLK_AN_4_I(x) ((x) << S_RESET_REG_CLK_AN_4_I)
+#define F_RESET_REG_CLK_AN_4_I V_RESET_REG_CLK_AN_4_I(1U)
+
+#define S_RESET_REG_CLK_AN_5_I 2
+#define V_RESET_REG_CLK_AN_5_I(x) ((x) << S_RESET_REG_CLK_AN_5_I)
+#define F_RESET_REG_CLK_AN_5_I V_RESET_REG_CLK_AN_5_I(1U)
+
+#define S_RESET_REG_CLK_AN_6_I 1
+#define V_RESET_REG_CLK_AN_6_I(x) ((x) << S_RESET_REG_CLK_AN_6_I)
+#define F_RESET_REG_CLK_AN_6_I V_RESET_REG_CLK_AN_6_I(1U)
+
+#define S_RESET_REG_CLK_AN_7_I 0
+#define V_RESET_REG_CLK_AN_7_I(x) ((x) << S_RESET_REG_CLK_AN_7_I)
+#define F_RESET_REG_CLK_AN_7_I V_RESET_REG_CLK_AN_7_I(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_0 0x38010
+
+#define S_F91_REF_CLK_I_G 31
+#define V_F91_REF_CLK_I_G(x) ((x) << S_F91_REF_CLK_I_G)
+#define F_F91_REF_CLK_I_G V_F91_REF_CLK_I_G(1U)
+
+#define S_PCS000_REF_CLK_I_G 30
+#define V_PCS000_REF_CLK_I_G(x) ((x) << S_PCS000_REF_CLK_I_G)
+#define F_PCS000_REF_CLK_I_G V_PCS000_REF_CLK_I_G(1U)
+
+#define S_REF_CLK_I_G 29
+#define V_REF_CLK_I_G(x) ((x) << S_REF_CLK_I_G)
+#define F_REF_CLK_I_G V_REF_CLK_I_G(1U)
+
+#define S_SD_RX_CLK_I_0_G 28
+#define V_SD_RX_CLK_I_0_G(x) ((x) << S_SD_RX_CLK_I_0_G)
+#define F_SD_RX_CLK_I_0_G V_SD_RX_CLK_I_0_G(1U)
+
+#define S_SD_RX_CLK_I_1_G 27
+#define V_SD_RX_CLK_I_1_G(x) ((x) << S_SD_RX_CLK_I_1_G)
+#define F_SD_RX_CLK_I_1_G V_SD_RX_CLK_I_1_G(1U)
+
+#define S_SD_RX_CLK_I_2_G 26
+#define V_SD_RX_CLK_I_2_G(x) ((x) << S_SD_RX_CLK_I_2_G)
+#define F_SD_RX_CLK_I_2_G V_SD_RX_CLK_I_2_G(1U)
+
+#define S_SD_RX_CLK_I_3_G 25
+#define V_SD_RX_CLK_I_3_G(x) ((x) << S_SD_RX_CLK_I_3_G)
+#define F_SD_RX_CLK_I_3_G V_SD_RX_CLK_I_3_G(1U)
+
+#define S_SD_RX_CLK_I_4_G 24
+#define V_SD_RX_CLK_I_4_G(x) ((x) << S_SD_RX_CLK_I_4_G)
+#define F_SD_RX_CLK_I_4_G V_SD_RX_CLK_I_4_G(1U)
+
+#define S_SD_RX_CLK_I_5_G 23
+#define V_SD_RX_CLK_I_5_G(x) ((x) << S_SD_RX_CLK_I_5_G)
+#define F_SD_RX_CLK_I_5_G V_SD_RX_CLK_I_5_G(1U)
+
+#define S_SD_RX_CLK_I_6_G 22
+#define V_SD_RX_CLK_I_6_G(x) ((x) << S_SD_RX_CLK_I_6_G)
+#define F_SD_RX_CLK_I_6_G V_SD_RX_CLK_I_6_G(1U)
+
+#define S_SD_RX_CLK_I_7_G 21
+#define V_SD_RX_CLK_I_7_G(x) ((x) << S_SD_RX_CLK_I_7_G)
+#define F_SD_RX_CLK_I_7_G V_SD_RX_CLK_I_7_G(1U)
+
+#define S_SD_TX_CLK_I_0_G 20
+#define V_SD_TX_CLK_I_0_G(x) ((x) << S_SD_TX_CLK_I_0_G)
+#define F_SD_TX_CLK_I_0_G V_SD_TX_CLK_I_0_G(1U)
+
+#define S_SD_TX_CLK_I_1_G 19
+#define V_SD_TX_CLK_I_1_G(x) ((x) << S_SD_TX_CLK_I_1_G)
+#define F_SD_TX_CLK_I_1_G V_SD_TX_CLK_I_1_G(1U)
+
+#define S_SD_TX_CLK_I_2_G 18
+#define V_SD_TX_CLK_I_2_G(x) ((x) << S_SD_TX_CLK_I_2_G)
+#define F_SD_TX_CLK_I_2_G V_SD_TX_CLK_I_2_G(1U)
+
+#define S_SD_TX_CLK_I_3_G 17
+#define V_SD_TX_CLK_I_3_G(x) ((x) << S_SD_TX_CLK_I_3_G)
+#define F_SD_TX_CLK_I_3_G V_SD_TX_CLK_I_3_G(1U)
+
+#define S_SD_TX_CLK_I_4_G 16
+#define V_SD_TX_CLK_I_4_G(x) ((x) << S_SD_TX_CLK_I_4_G)
+#define F_SD_TX_CLK_I_4_G V_SD_TX_CLK_I_4_G(1U)
+
+#define S_SD_TX_CLK_I_5_G 15
+#define V_SD_TX_CLK_I_5_G(x) ((x) << S_SD_TX_CLK_I_5_G)
+#define F_SD_TX_CLK_I_5_G V_SD_TX_CLK_I_5_G(1U)
+
+#define S_SD_TX_CLK_I_6_G 14
+#define V_SD_TX_CLK_I_6_G(x) ((x) << S_SD_TX_CLK_I_6_G)
+#define F_SD_TX_CLK_I_6_G V_SD_TX_CLK_I_6_G(1U)
+
+#define S_SD_TX_CLK_I_7_G 13
+#define V_SD_TX_CLK_I_7_G(x) ((x) << S_SD_TX_CLK_I_7_G)
+#define F_SD_TX_CLK_I_7_G V_SD_TX_CLK_I_7_G(1U)
+
+#define S_XPCS_REF_CLK_I_0_G 12
+#define V_XPCS_REF_CLK_I_0_G(x) ((x) << S_XPCS_REF_CLK_I_0_G)
+#define F_XPCS_REF_CLK_I_0_G V_XPCS_REF_CLK_I_0_G(1U)
+
+#define S_XPCS_REF_CLK_I_1_G 11
+#define V_XPCS_REF_CLK_I_1_G(x) ((x) << S_XPCS_REF_CLK_I_1_G)
+#define F_XPCS_REF_CLK_I_1_G V_XPCS_REF_CLK_I_1_G(1U)
+
+#define S_REG_CLK_I_G 10
+#define V_REG_CLK_I_G(x) ((x) << S_REG_CLK_I_G)
+#define F_REG_CLK_I_G V_REG_CLK_I_G(1U)
+
+#define S_FF_RX_CLK_0_I_G 9
+#define V_FF_RX_CLK_0_I_G(x) ((x) << S_FF_RX_CLK_0_I_G)
+#define F_FF_RX_CLK_0_I_G V_FF_RX_CLK_0_I_G(1U)
+
+#define S_FF_TX_CLK_0_I_G 8
+#define V_FF_TX_CLK_0_I_G(x) ((x) << S_FF_TX_CLK_0_I_G)
+#define F_FF_TX_CLK_0_I_G V_FF_TX_CLK_0_I_G(1U)
+
+#define S_RXCLK_0_I_G 7
+#define V_RXCLK_0_I_G(x) ((x) << S_RXCLK_0_I_G)
+#define F_RXCLK_0_I_G V_RXCLK_0_I_G(1U)
+
+#define S_TXCLK_0_I_G 6
+#define V_TXCLK_0_I_G(x) ((x) << S_TXCLK_0_I_G)
+#define F_TXCLK_0_I_G V_TXCLK_0_I_G(1U)
+
+#define S_FF_RX_CLK_1_I_G 5
+#define V_FF_RX_CLK_1_I_G(x) ((x) << S_FF_RX_CLK_1_I_G)
+#define F_FF_RX_CLK_1_I_G V_FF_RX_CLK_1_I_G(1U)
+
+#define S_FF_TX_CLK_1_I_G 4
+#define V_FF_TX_CLK_1_I_G(x) ((x) << S_FF_TX_CLK_1_I_G)
+#define F_FF_TX_CLK_1_I_G V_FF_TX_CLK_1_I_G(1U)
+
+#define S_RXCLK_1_I_G 3
+#define V_RXCLK_1_I_G(x) ((x) << S_RXCLK_1_I_G)
+#define F_RXCLK_1_I_G V_RXCLK_1_I_G(1U)
+
+#define S_TXCLK_1_I_G 2
+#define V_TXCLK_1_I_G(x) ((x) << S_TXCLK_1_I_G)
+#define F_TXCLK_1_I_G V_TXCLK_1_I_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_1 0x38014
+
+#define S_FF_RX_CLK_2_I_G 31
+#define V_FF_RX_CLK_2_I_G(x) ((x) << S_FF_RX_CLK_2_I_G)
+#define F_FF_RX_CLK_2_I_G V_FF_RX_CLK_2_I_G(1U)
+
+#define S_FF_TX_CLK_2_I_G 30
+#define V_FF_TX_CLK_2_I_G(x) ((x) << S_FF_TX_CLK_2_I_G)
+#define F_FF_TX_CLK_2_I_G V_FF_TX_CLK_2_I_G(1U)
+
+#define S_RXCLK_2_I_G 29
+#define V_RXCLK_2_I_G(x) ((x) << S_RXCLK_2_I_G)
+#define F_RXCLK_2_I_G V_RXCLK_2_I_G(1U)
+
+#define S_TXCLK_2_I_G 28
+#define V_TXCLK_2_I_G(x) ((x) << S_TXCLK_2_I_G)
+#define F_TXCLK_2_I_G V_TXCLK_2_I_G(1U)
+
+#define S_FF_RX_CLK_3_I_G 27
+#define V_FF_RX_CLK_3_I_G(x) ((x) << S_FF_RX_CLK_3_I_G)
+#define F_FF_RX_CLK_3_I_G V_FF_RX_CLK_3_I_G(1U)
+
+#define S_FF_TX_CLK_3_I_G 26
+#define V_FF_TX_CLK_3_I_G(x) ((x) << S_FF_TX_CLK_3_I_G)
+#define F_FF_TX_CLK_3_I_G V_FF_TX_CLK_3_I_G(1U)
+
+#define S_RXCLK_3_I_G 25
+#define V_RXCLK_3_I_G(x) ((x) << S_RXCLK_3_I_G)
+#define F_RXCLK_3_I_G V_RXCLK_3_I_G(1U)
+
+#define S_TXCLK_3_I_G 24
+#define V_TXCLK_3_I_G(x) ((x) << S_TXCLK_3_I_G)
+#define F_TXCLK_3_I_G V_TXCLK_3_I_G(1U)
+
+#define S_FF_RX_CLK_4_I_G 23
+#define V_FF_RX_CLK_4_I_G(x) ((x) << S_FF_RX_CLK_4_I_G)
+#define F_FF_RX_CLK_4_I_G V_FF_RX_CLK_4_I_G(1U)
+
+#define S_FF_TX_CLK_4_I_G 22
+#define V_FF_TX_CLK_4_I_G(x) ((x) << S_FF_TX_CLK_4_I_G)
+#define F_FF_TX_CLK_4_I_G V_FF_TX_CLK_4_I_G(1U)
+
+#define S_RXCLK_4_I_G 21
+#define V_RXCLK_4_I_G(x) ((x) << S_RXCLK_4_I_G)
+#define F_RXCLK_4_I_G V_RXCLK_4_I_G(1U)
+
+#define S_TXCLK_4_I_G 20
+#define V_TXCLK_4_I_G(x) ((x) << S_TXCLK_4_I_G)
+#define F_TXCLK_4_I_G V_TXCLK_4_I_G(1U)
+
+#define S_FF_RX_CLK_5_I_G 19
+#define V_FF_RX_CLK_5_I_G(x) ((x) << S_FF_RX_CLK_5_I_G)
+#define F_FF_RX_CLK_5_I_G V_FF_RX_CLK_5_I_G(1U)
+
+#define S_FF_TX_CLK_5_I_G 18
+#define V_FF_TX_CLK_5_I_G(x) ((x) << S_FF_TX_CLK_5_I_G)
+#define F_FF_TX_CLK_5_I_G V_FF_TX_CLK_5_I_G(1U)
+
+#define S_RXCLK_5_I_G 17
+#define V_RXCLK_5_I_G(x) ((x) << S_RXCLK_5_I_G)
+#define F_RXCLK_5_I_G V_RXCLK_5_I_G(1U)
+
+#define S_TXCLK_5_I_G 16
+#define V_TXCLK_5_I_G(x) ((x) << S_TXCLK_5_I_G)
+#define F_TXCLK_5_I_G V_TXCLK_5_I_G(1U)
+
+#define S_SD_RX_CLK_AN_0_I_G 15
+#define V_SD_RX_CLK_AN_0_I_G(x) ((x) << S_SD_RX_CLK_AN_0_I_G)
+#define F_SD_RX_CLK_AN_0_I_G V_SD_RX_CLK_AN_0_I_G(1U)
+
+#define S_SD_TX_CLK_AN_0_I_G 14
+#define V_SD_TX_CLK_AN_0_I_G(x) ((x) << S_SD_TX_CLK_AN_0_I_G)
+#define F_SD_TX_CLK_AN_0_I_G V_SD_TX_CLK_AN_0_I_G(1U)
+
+#define S_SD_RX_CLK_AN_1_I_G 13
+#define V_SD_RX_CLK_AN_1_I_G(x) ((x) << S_SD_RX_CLK_AN_1_I_G)
+#define F_SD_RX_CLK_AN_1_I_G V_SD_RX_CLK_AN_1_I_G(1U)
+
+#define S_SD_TX_CLK_AN_1_I_G 12
+#define V_SD_TX_CLK_AN_1_I_G(x) ((x) << S_SD_TX_CLK_AN_1_I_G)
+#define F_SD_TX_CLK_AN_1_I_G V_SD_TX_CLK_AN_1_I_G(1U)
+
+#define S_SD_RX_CLK_AN_2_I_G 11
+#define V_SD_RX_CLK_AN_2_I_G(x) ((x) << S_SD_RX_CLK_AN_2_I_G)
+#define F_SD_RX_CLK_AN_2_I_G V_SD_RX_CLK_AN_2_I_G(1U)
+
+#define S_SD_TX_CLK_AN_2_I_G 10
+#define V_SD_TX_CLK_AN_2_I_G(x) ((x) << S_SD_TX_CLK_AN_2_I_G)
+#define F_SD_TX_CLK_AN_2_I_G V_SD_TX_CLK_AN_2_I_G(1U)
+
+#define S_SD_RX_CLK_AN_3_I_G 9
+#define V_SD_RX_CLK_AN_3_I_G(x) ((x) << S_SD_RX_CLK_AN_3_I_G)
+#define F_SD_RX_CLK_AN_3_I_G V_SD_RX_CLK_AN_3_I_G(1U)
+
+#define S_SD_TX_CLK_AN_3_I_G 8
+#define V_SD_TX_CLK_AN_3_I_G(x) ((x) << S_SD_TX_CLK_AN_3_I_G)
+#define F_SD_TX_CLK_AN_3_I_G V_SD_TX_CLK_AN_3_I_G(1U)
+
+#define S_SD_RX_CLK_AN_4_I_G 7
+#define V_SD_RX_CLK_AN_4_I_G(x) ((x) << S_SD_RX_CLK_AN_4_I_G)
+#define F_SD_RX_CLK_AN_4_I_G V_SD_RX_CLK_AN_4_I_G(1U)
+
+#define S_SD_TX_CLK_AN_4_I_G 6
+#define V_SD_TX_CLK_AN_4_I_G(x) ((x) << S_SD_TX_CLK_AN_4_I_G)
+#define F_SD_TX_CLK_AN_4_I_G V_SD_TX_CLK_AN_4_I_G(1U)
+
+#define S_SD_RX_CLK_AN_5_I_G 5
+#define V_SD_RX_CLK_AN_5_I_G(x) ((x) << S_SD_RX_CLK_AN_5_I_G)
+#define F_SD_RX_CLK_AN_5_I_G V_SD_RX_CLK_AN_5_I_G(1U)
+
+#define S_SD_TX_CLK_AN_5_I_G 4
+#define V_SD_TX_CLK_AN_5_I_G(x) ((x) << S_SD_TX_CLK_AN_5_I_G)
+#define F_SD_TX_CLK_AN_5_I_G V_SD_TX_CLK_AN_5_I_G(1U)
+
+#define S_SD_RX_CLK_AN_6_I_G 3
+#define V_SD_RX_CLK_AN_6_I_G(x) ((x) << S_SD_RX_CLK_AN_6_I_G)
+#define F_SD_RX_CLK_AN_6_I_G V_SD_RX_CLK_AN_6_I_G(1U)
+
+#define S_SD_TX_CLK_AN_6_I_G 2
+#define V_SD_TX_CLK_AN_6_I_G(x) ((x) << S_SD_TX_CLK_AN_6_I_G)
+#define F_SD_TX_CLK_AN_6_I_G V_SD_TX_CLK_AN_6_I_G(1U)
+
+#define S_SD_RX_CLK_AN_7_I_G 1
+#define V_SD_RX_CLK_AN_7_I_G(x) ((x) << S_SD_RX_CLK_AN_7_I_G)
+#define F_SD_RX_CLK_AN_7_I_G V_SD_RX_CLK_AN_7_I_G(1U)
+
+#define S_SD_TX_CLK_AN_7_I_G 0
+#define V_SD_TX_CLK_AN_7_I_G(x) ((x) << S_SD_TX_CLK_AN_7_I_G)
+#define F_SD_TX_CLK_AN_7_I_G V_SD_TX_CLK_AN_7_I_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_2 0x38018
+
+#define S_SD_RX_CLK_0_G 31
+#define V_SD_RX_CLK_0_G(x) ((x) << S_SD_RX_CLK_0_G)
+#define F_SD_RX_CLK_0_G V_SD_RX_CLK_0_G(1U)
+
+#define S_SD_RX_CLK_1_G 30
+#define V_SD_RX_CLK_1_G(x) ((x) << S_SD_RX_CLK_1_G)
+#define F_SD_RX_CLK_1_G V_SD_RX_CLK_1_G(1U)
+
+#define S_SD_RX_CLK_2_G 29
+#define V_SD_RX_CLK_2_G(x) ((x) << S_SD_RX_CLK_2_G)
+#define F_SD_RX_CLK_2_G V_SD_RX_CLK_2_G(1U)
+
+#define S_SD_RX_CLK_3_G 28
+#define V_SD_RX_CLK_3_G(x) ((x) << S_SD_RX_CLK_3_G)
+#define F_SD_RX_CLK_3_G V_SD_RX_CLK_3_G(1U)
+
+#define S_SD_RX_CLK_4_G 27
+#define V_SD_RX_CLK_4_G(x) ((x) << S_SD_RX_CLK_4_G)
+#define F_SD_RX_CLK_4_G V_SD_RX_CLK_4_G(1U)
+
+#define S_SD_RX_CLK_5_G 26
+#define V_SD_RX_CLK_5_G(x) ((x) << S_SD_RX_CLK_5_G)
+#define F_SD_RX_CLK_5_G V_SD_RX_CLK_5_G(1U)
+
+#define S_SD_RX_CLK_6_G 25
+#define V_SD_RX_CLK_6_G(x) ((x) << S_SD_RX_CLK_6_G)
+#define F_SD_RX_CLK_6_G V_SD_RX_CLK_6_G(1U)
+
+#define S_SD_RX_CLK_7_G 24
+#define V_SD_RX_CLK_7_G(x) ((x) << S_SD_RX_CLK_7_G)
+#define F_SD_RX_CLK_7_G V_SD_RX_CLK_7_G(1U)
+
+#define S_SD_TX_CLK_0_G 23
+#define V_SD_TX_CLK_0_G(x) ((x) << S_SD_TX_CLK_0_G)
+#define F_SD_TX_CLK_0_G V_SD_TX_CLK_0_G(1U)
+
+#define S_SD_TX_CLK_1_G 22
+#define V_SD_TX_CLK_1_G(x) ((x) << S_SD_TX_CLK_1_G)
+#define F_SD_TX_CLK_1_G V_SD_TX_CLK_1_G(1U)
+
+#define S_SD_TX_CLK_2_G 21
+#define V_SD_TX_CLK_2_G(x) ((x) << S_SD_TX_CLK_2_G)
+#define F_SD_TX_CLK_2_G V_SD_TX_CLK_2_G(1U)
+
+#define S_SD_TX_CLK_3_G 20
+#define V_SD_TX_CLK_3_G(x) ((x) << S_SD_TX_CLK_3_G)
+#define F_SD_TX_CLK_3_G V_SD_TX_CLK_3_G(1U)
+
+#define S_SD_TX_CLK_4_G 19
+#define V_SD_TX_CLK_4_G(x) ((x) << S_SD_TX_CLK_4_G)
+#define F_SD_TX_CLK_4_G V_SD_TX_CLK_4_G(1U)
+
+#define S_SD_TX_CLK_5_G 18
+#define V_SD_TX_CLK_5_G(x) ((x) << S_SD_TX_CLK_5_G)
+#define F_SD_TX_CLK_5_G V_SD_TX_CLK_5_G(1U)
+
+#define S_SD_TX_CLK_6_G 17
+#define V_SD_TX_CLK_6_G(x) ((x) << S_SD_TX_CLK_6_G)
+#define F_SD_TX_CLK_6_G V_SD_TX_CLK_6_G(1U)
+
+#define S_SD_TX_CLK_7_G 16
+#define V_SD_TX_CLK_7_G(x) ((x) << S_SD_TX_CLK_7_G)
+#define F_SD_TX_CLK_7_G V_SD_TX_CLK_7_G(1U)
+
+#define S_SD_RX_CLK_AEC_0_G 15
+#define V_SD_RX_CLK_AEC_0_G(x) ((x) << S_SD_RX_CLK_AEC_0_G)
+#define F_SD_RX_CLK_AEC_0_G V_SD_RX_CLK_AEC_0_G(1U)
+
+#define S_SD_RX_CLK_AEC_1_G 14
+#define V_SD_RX_CLK_AEC_1_G(x) ((x) << S_SD_RX_CLK_AEC_1_G)
+#define F_SD_RX_CLK_AEC_1_G V_SD_RX_CLK_AEC_1_G(1U)
+
+#define S_SD_RX_CLK_AEC_2_G 13
+#define V_SD_RX_CLK_AEC_2_G(x) ((x) << S_SD_RX_CLK_AEC_2_G)
+#define F_SD_RX_CLK_AEC_2_G V_SD_RX_CLK_AEC_2_G(1U)
+
+#define S_SD_RX_CLK_AEC_3_G 12
+#define V_SD_RX_CLK_AEC_3_G(x) ((x) << S_SD_RX_CLK_AEC_3_G)
+#define F_SD_RX_CLK_AEC_3_G V_SD_RX_CLK_AEC_3_G(1U)
+
+#define S_SD_RX_CLK_AEC_4_G 11
+#define V_SD_RX_CLK_AEC_4_G(x) ((x) << S_SD_RX_CLK_AEC_4_G)
+#define F_SD_RX_CLK_AEC_4_G V_SD_RX_CLK_AEC_4_G(1U)
+
+#define S_SD_RX_CLK_AEC_5_G 10
+#define V_SD_RX_CLK_AEC_5_G(x) ((x) << S_SD_RX_CLK_AEC_5_G)
+#define F_SD_RX_CLK_AEC_5_G V_SD_RX_CLK_AEC_5_G(1U)
+
+#define S_SD_RX_CLK_AEC_6_G 9
+#define V_SD_RX_CLK_AEC_6_G(x) ((x) << S_SD_RX_CLK_AEC_6_G)
+#define F_SD_RX_CLK_AEC_6_G V_SD_RX_CLK_AEC_6_G(1U)
+
+#define S_SD_RX_CLK_AEC_7_G 8
+#define V_SD_RX_CLK_AEC_7_G(x) ((x) << S_SD_RX_CLK_AEC_7_G)
+#define F_SD_RX_CLK_AEC_7_G V_SD_RX_CLK_AEC_7_G(1U)
+
+#define S_SD_TX_CLK_AEC_0_G 7
+#define V_SD_TX_CLK_AEC_0_G(x) ((x) << S_SD_TX_CLK_AEC_0_G)
+#define F_SD_TX_CLK_AEC_0_G V_SD_TX_CLK_AEC_0_G(1U)
+
+#define S_SD_TX_CLK_AEC_1_G 6
+#define V_SD_TX_CLK_AEC_1_G(x) ((x) << S_SD_TX_CLK_AEC_1_G)
+#define F_SD_TX_CLK_AEC_1_G V_SD_TX_CLK_AEC_1_G(1U)
+
+#define S_SD_TX_CLK_AEC_2_G 5
+#define V_SD_TX_CLK_AEC_2_G(x) ((x) << S_SD_TX_CLK_AEC_2_G)
+#define F_SD_TX_CLK_AEC_2_G V_SD_TX_CLK_AEC_2_G(1U)
+
+#define S_SD_TX_CLK_AEC_3_G 4
+#define V_SD_TX_CLK_AEC_3_G(x) ((x) << S_SD_TX_CLK_AEC_3_G)
+#define F_SD_TX_CLK_AEC_3_G V_SD_TX_CLK_AEC_3_G(1U)
+
+#define S_SD_TX_CLK_AEC_4_G 3
+#define V_SD_TX_CLK_AEC_4_G(x) ((x) << S_SD_TX_CLK_AEC_4_G)
+#define F_SD_TX_CLK_AEC_4_G V_SD_TX_CLK_AEC_4_G(1U)
+
+#define S_SD_TX_CLK_AEC_5_G 2
+#define V_SD_TX_CLK_AEC_5_G(x) ((x) << S_SD_TX_CLK_AEC_5_G)
+#define F_SD_TX_CLK_AEC_5_G V_SD_TX_CLK_AEC_5_G(1U)
+
+#define S_SD_TX_CLK_AEC_6_G 1
+#define V_SD_TX_CLK_AEC_6_G(x) ((x) << S_SD_TX_CLK_AEC_6_G)
+#define F_SD_TX_CLK_AEC_6_G V_SD_TX_CLK_AEC_6_G(1U)
+
+#define S_SD_TX_CLK_AEC_7_G 0
+#define V_SD_TX_CLK_AEC_7_G(x) ((x) << S_SD_TX_CLK_AEC_7_G)
+#define F_SD_TX_CLK_AEC_7_G V_SD_TX_CLK_AEC_7_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_3 0x3801c
+
+#define S_PCS_RX_CLK_0_G 31
+#define V_PCS_RX_CLK_0_G(x) ((x) << S_PCS_RX_CLK_0_G)
+#define F_PCS_RX_CLK_0_G V_PCS_RX_CLK_0_G(1U)
+
+#define S_PCS_RX_CLK_1_G 30
+#define V_PCS_RX_CLK_1_G(x) ((x) << S_PCS_RX_CLK_1_G)
+#define F_PCS_RX_CLK_1_G V_PCS_RX_CLK_1_G(1U)
+
+#define S_PCS_RX_CLK_2_G 29
+#define V_PCS_RX_CLK_2_G(x) ((x) << S_PCS_RX_CLK_2_G)
+#define F_PCS_RX_CLK_2_G V_PCS_RX_CLK_2_G(1U)
+
+#define S_PCS_RX_CLK_3_G 28
+#define V_PCS_RX_CLK_3_G(x) ((x) << S_PCS_RX_CLK_3_G)
+#define F_PCS_RX_CLK_3_G V_PCS_RX_CLK_3_G(1U)
+
+#define S_PCS_RX_CLK_4_G 27
+#define V_PCS_RX_CLK_4_G(x) ((x) << S_PCS_RX_CLK_4_G)
+#define F_PCS_RX_CLK_4_G V_PCS_RX_CLK_4_G(1U)
+
+#define S_PCS_RX_CLK_5_G 26
+#define V_PCS_RX_CLK_5_G(x) ((x) << S_PCS_RX_CLK_5_G)
+#define F_PCS_RX_CLK_5_G V_PCS_RX_CLK_5_G(1U)
+
+#define S_PCS_RX_CLK_6_G 25
+#define V_PCS_RX_CLK_6_G(x) ((x) << S_PCS_RX_CLK_6_G)
+#define F_PCS_RX_CLK_6_G V_PCS_RX_CLK_6_G(1U)
+
+#define S_PCS_RX_CLK_7_G 24
+#define V_PCS_RX_CLK_7_G(x) ((x) << S_PCS_RX_CLK_7_G)
+#define F_PCS_RX_CLK_7_G V_PCS_RX_CLK_7_G(1U)
+
+#define S_PCS_TX_CLK_0_G 23
+#define V_PCS_TX_CLK_0_G(x) ((x) << S_PCS_TX_CLK_0_G)
+#define F_PCS_TX_CLK_0_G V_PCS_TX_CLK_0_G(1U)
+
+#define S_PCS_TX_CLK_1_G 22
+#define V_PCS_TX_CLK_1_G(x) ((x) << S_PCS_TX_CLK_1_G)
+#define F_PCS_TX_CLK_1_G V_PCS_TX_CLK_1_G(1U)
+
+#define S_PCS_TX_CLK_2_G 21
+#define V_PCS_TX_CLK_2_G(x) ((x) << S_PCS_TX_CLK_2_G)
+#define F_PCS_TX_CLK_2_G V_PCS_TX_CLK_2_G(1U)
+
+#define S_PCS_TX_CLK_3_G 20
+#define V_PCS_TX_CLK_3_G(x) ((x) << S_PCS_TX_CLK_3_G)
+#define F_PCS_TX_CLK_3_G V_PCS_TX_CLK_3_G(1U)
+
+#define S_PCS_TX_CLK_4_G 19
+#define V_PCS_TX_CLK_4_G(x) ((x) << S_PCS_TX_CLK_4_G)
+#define F_PCS_TX_CLK_4_G V_PCS_TX_CLK_4_G(1U)
+
+#define S_PCS_TX_CLK_5_G 18
+#define V_PCS_TX_CLK_5_G(x) ((x) << S_PCS_TX_CLK_5_G)
+#define F_PCS_TX_CLK_5_G V_PCS_TX_CLK_5_G(1U)
+
+#define S_PCS_TX_CLK_6_G 17
+#define V_PCS_TX_CLK_6_G(x) ((x) << S_PCS_TX_CLK_6_G)
+#define F_PCS_TX_CLK_6_G V_PCS_TX_CLK_6_G(1U)
+
+#define S_PCS_TX_CLK_7_G 16
+#define V_PCS_TX_CLK_7_G(x) ((x) << S_PCS_TX_CLK_7_G)
+#define F_PCS_TX_CLK_7_G V_PCS_TX_CLK_7_G(1U)
+
+#define S_SD_RX_CLK_EN_0 15
+#define V_SD_RX_CLK_EN_0(x) ((x) << S_SD_RX_CLK_EN_0)
+#define F_SD_RX_CLK_EN_0 V_SD_RX_CLK_EN_0(1U)
+
+#define S_SD_RX_CLK_EN_1 14
+#define V_SD_RX_CLK_EN_1(x) ((x) << S_SD_RX_CLK_EN_1)
+#define F_SD_RX_CLK_EN_1 V_SD_RX_CLK_EN_1(1U)
+
+#define S_SD_RX_CLK_EN_2 13
+#define V_SD_RX_CLK_EN_2(x) ((x) << S_SD_RX_CLK_EN_2)
+#define F_SD_RX_CLK_EN_2 V_SD_RX_CLK_EN_2(1U)
+
+#define S_SD_RX_CLK_EN_3 12
+#define V_SD_RX_CLK_EN_3(x) ((x) << S_SD_RX_CLK_EN_3)
+#define F_SD_RX_CLK_EN_3 V_SD_RX_CLK_EN_3(1U)
+
+#define S_SD_RX_CLK_EN_4 11
+#define V_SD_RX_CLK_EN_4(x) ((x) << S_SD_RX_CLK_EN_4)
+#define F_SD_RX_CLK_EN_4 V_SD_RX_CLK_EN_4(1U)
+
+#define S_SD_RX_CLK_EN_5 10
+#define V_SD_RX_CLK_EN_5(x) ((x) << S_SD_RX_CLK_EN_5)
+#define F_SD_RX_CLK_EN_5 V_SD_RX_CLK_EN_5(1U)
+
+#define S_SD_RX_CLK_EN_6 9
+#define V_SD_RX_CLK_EN_6(x) ((x) << S_SD_RX_CLK_EN_6)
+#define F_SD_RX_CLK_EN_6 V_SD_RX_CLK_EN_6(1U)
+
+#define S_SD_RX_CLK_EN_7 8
+#define V_SD_RX_CLK_EN_7(x) ((x) << S_SD_RX_CLK_EN_7)
+#define F_SD_RX_CLK_EN_7 V_SD_RX_CLK_EN_7(1U)
+
+#define S_SD_TX_CLK_EN_0 7
+#define V_SD_TX_CLK_EN_0(x) ((x) << S_SD_TX_CLK_EN_0)
+#define F_SD_TX_CLK_EN_0 V_SD_TX_CLK_EN_0(1U)
+
+#define S_SD_TX_CLK_EN_1 6
+#define V_SD_TX_CLK_EN_1(x) ((x) << S_SD_TX_CLK_EN_1)
+#define F_SD_TX_CLK_EN_1 V_SD_TX_CLK_EN_1(1U)
+
+#define S_SD_TX_CLK_EN_2 5
+#define V_SD_TX_CLK_EN_2(x) ((x) << S_SD_TX_CLK_EN_2)
+#define F_SD_TX_CLK_EN_2 V_SD_TX_CLK_EN_2(1U)
+
+#define S_SD_TX_CLK_EN_3 4
+#define V_SD_TX_CLK_EN_3(x) ((x) << S_SD_TX_CLK_EN_3)
+#define F_SD_TX_CLK_EN_3 V_SD_TX_CLK_EN_3(1U)
+
+#define S_SD_TX_CLK_EN_4 3
+#define V_SD_TX_CLK_EN_4(x) ((x) << S_SD_TX_CLK_EN_4)
+#define F_SD_TX_CLK_EN_4 V_SD_TX_CLK_EN_4(1U)
+
+#define S_SD_TX_CLK_EN_5 2
+#define V_SD_TX_CLK_EN_5(x) ((x) << S_SD_TX_CLK_EN_5)
+#define F_SD_TX_CLK_EN_5 V_SD_TX_CLK_EN_5(1U)
+
+#define S_SD_TX_CLK_EN_6 1
+#define V_SD_TX_CLK_EN_6(x) ((x) << S_SD_TX_CLK_EN_6)
+#define F_SD_TX_CLK_EN_6 V_SD_TX_CLK_EN_6(1U)
+
+#define S_SD_TX_CLK_EN_7 0
+#define V_SD_TX_CLK_EN_7(x) ((x) << S_SD_TX_CLK_EN_7)
+#define F_SD_TX_CLK_EN_7 V_SD_TX_CLK_EN_7(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_4 0x38020
+
+#define S_SGMII_TX_CLK_0_G 7
+#define V_SGMII_TX_CLK_0_G(x) ((x) << S_SGMII_TX_CLK_0_G)
+#define F_SGMII_TX_CLK_0_G V_SGMII_TX_CLK_0_G(1U)
+
+#define S_SGMII_TX_CLK_1_G 6
+#define V_SGMII_TX_CLK_1_G(x) ((x) << S_SGMII_TX_CLK_1_G)
+#define F_SGMII_TX_CLK_1_G V_SGMII_TX_CLK_1_G(1U)
+
+#define S_SGMII_TX_CLK_2_G 5
+#define V_SGMII_TX_CLK_2_G(x) ((x) << S_SGMII_TX_CLK_2_G)
+#define F_SGMII_TX_CLK_2_G V_SGMII_TX_CLK_2_G(1U)
+
+#define S_SGMII_TX_CLK_3_G 4
+#define V_SGMII_TX_CLK_3_G(x) ((x) << S_SGMII_TX_CLK_3_G)
+#define F_SGMII_TX_CLK_3_G V_SGMII_TX_CLK_3_G(1U)
+
+#define S_SGMII_RX_CLK_0_G 3
+#define V_SGMII_RX_CLK_0_G(x) ((x) << S_SGMII_RX_CLK_0_G)
+#define F_SGMII_RX_CLK_0_G V_SGMII_RX_CLK_0_G(1U)
+
+#define S_SGMII_RX_CLK_1_G 2
+#define V_SGMII_RX_CLK_1_G(x) ((x) << S_SGMII_RX_CLK_1_G)
+#define F_SGMII_RX_CLK_1_G V_SGMII_RX_CLK_1_G(1U)
+
+#define S_SGMII_RX_CLK_2_G 1
+#define V_SGMII_RX_CLK_2_G(x) ((x) << S_SGMII_RX_CLK_2_G)
+#define F_SGMII_RX_CLK_2_G V_SGMII_RX_CLK_2_G(1U)
+
+#define S_SGMII_RX_CLK_3_G 0
+#define V_SGMII_RX_CLK_3_G(x) ((x) << S_SGMII_RX_CLK_3_G)
+#define F_SGMII_RX_CLK_3_G V_SGMII_RX_CLK_3_G(1U)
+
+#define A_MAC_PCS_CONFIG_0 0x38024
+
+#define S_KP_MODE_IN 24
+#define M_KP_MODE_IN 0xffU
+#define V_KP_MODE_IN(x) ((x) << S_KP_MODE_IN)
+#define G_KP_MODE_IN(x) (((x) >> S_KP_MODE_IN) & M_KP_MODE_IN)
+
+#define S_FEC91_ENA_IN 16
+#define M_FEC91_ENA_IN 0xffU
+#define V_FEC91_ENA_IN(x) ((x) << S_FEC91_ENA_IN)
+#define G_FEC91_ENA_IN(x) (((x) >> S_FEC91_ENA_IN) & M_FEC91_ENA_IN)
+
+#define S_SD_8X 8
+#define M_SD_8X 0xffU
+#define V_SD_8X(x) ((x) << S_SD_8X)
+#define G_SD_8X(x) (((x) >> S_SD_8X) & M_SD_8X)
+
+#define S_SD_N2 0
+#define M_SD_N2 0xffU
+#define V_SD_N2(x) ((x) << S_SD_N2)
+#define G_SD_N2(x) (((x) >> S_SD_N2) & M_SD_N2)
+
+#define A_MAC_PCS_CONFIG_1 0x38028
+
+#define S_FAST_1LANE_MODE 24
+#define M_FAST_1LANE_MODE 0xffU
+#define V_FAST_1LANE_MODE(x) ((x) << S_FAST_1LANE_MODE)
+#define G_FAST_1LANE_MODE(x) (((x) >> S_FAST_1LANE_MODE) & M_FAST_1LANE_MODE)
+
+#define S_PACER_10G 16
+#define M_PACER_10G 0xffU
+#define V_PACER_10G(x) ((x) << S_PACER_10G)
+#define G_PACER_10G(x) (((x) >> S_PACER_10G) & M_PACER_10G)
+
+#define S_PCS400_ENA_IN 14
+#define M_PCS400_ENA_IN 0x3U
+#define V_PCS400_ENA_IN(x) ((x) << S_PCS400_ENA_IN)
+#define G_PCS400_ENA_IN(x) (((x) >> S_PCS400_ENA_IN) & M_PCS400_ENA_IN)
+
+#define S_MODE40_ENA_IN4 13
+#define V_MODE40_ENA_IN4(x) ((x) << S_MODE40_ENA_IN4)
+#define F_MODE40_ENA_IN4 V_MODE40_ENA_IN4(1U)
+
+#define S_MODE40_ENA_IN0 12
+#define V_MODE40_ENA_IN0(x) ((x) << S_MODE40_ENA_IN0)
+#define F_MODE40_ENA_IN0 V_MODE40_ENA_IN0(1U)
+
+#define S_PCS100_ENA_IN6 11
+#define V_PCS100_ENA_IN6(x) ((x) << S_PCS100_ENA_IN6)
+#define F_PCS100_ENA_IN6 V_PCS100_ENA_IN6(1U)
+
+#define S_PCS100_ENA_IN4 10
+#define V_PCS100_ENA_IN4(x) ((x) << S_PCS100_ENA_IN4)
+#define F_PCS100_ENA_IN4 V_PCS100_ENA_IN4(1U)
+
+#define S_PCS100_ENA_IN2 9
+#define V_PCS100_ENA_IN2(x) ((x) << S_PCS100_ENA_IN2)
+#define F_PCS100_ENA_IN2 V_PCS100_ENA_IN2(1U)
+
+#define S_PCS100_ENA_IN0 8
+#define V_PCS100_ENA_IN0(x) ((x) << S_PCS100_ENA_IN0)
+#define F_PCS100_ENA_IN0 V_PCS100_ENA_IN0(1U)
+
+#define S_RXLAUI_ENA_IN6 7
+#define V_RXLAUI_ENA_IN6(x) ((x) << S_RXLAUI_ENA_IN6)
+#define F_RXLAUI_ENA_IN6 V_RXLAUI_ENA_IN6(1U)
+
+#define S_RXLAUI_ENA_IN4 6
+#define V_RXLAUI_ENA_IN4(x) ((x) << S_RXLAUI_ENA_IN4)
+#define F_RXLAUI_ENA_IN4 V_RXLAUI_ENA_IN4(1U)
+
+#define S_RXLAUI_ENA_IN2 5
+#define V_RXLAUI_ENA_IN2(x) ((x) << S_RXLAUI_ENA_IN2)
+#define F_RXLAUI_ENA_IN2 V_RXLAUI_ENA_IN2(1U)
+
+#define S_RXLAUI_ENA_IN0 4
+#define V_RXLAUI_ENA_IN0(x) ((x) << S_RXLAUI_ENA_IN0)
+#define F_RXLAUI_ENA_IN0 V_RXLAUI_ENA_IN0(1U)
+
+#define S_FEC91_LANE_IN6 3
+#define V_FEC91_LANE_IN6(x) ((x) << S_FEC91_LANE_IN6)
+#define F_FEC91_LANE_IN6 V_FEC91_LANE_IN6(1U)
+
+#define S_FEC91_LANE_IN4 2
+#define V_FEC91_LANE_IN4(x) ((x) << S_FEC91_LANE_IN4)
+#define F_FEC91_LANE_IN4 V_FEC91_LANE_IN4(1U)
+
+#define S_FEC91_LANE_IN2 1
+#define V_FEC91_LANE_IN2(x) ((x) << S_FEC91_LANE_IN2)
+#define F_FEC91_LANE_IN2 V_FEC91_LANE_IN2(1U)
+
+#define S_FEC91_LANE_IN0 0
+#define V_FEC91_LANE_IN0(x) ((x) << S_FEC91_LANE_IN0)
+#define F_FEC91_LANE_IN0 V_FEC91_LANE_IN0(1U)
+
+#define A_MAC_PCS_CONFIG_2 0x3802c
+
+#define S_SGPCS_EN_3 29
+#define V_SGPCS_EN_3(x) ((x) << S_SGPCS_EN_3)
+#define F_SGPCS_EN_3 V_SGPCS_EN_3(1U)
+
+#define S_SGPCS_EN_2 28
+#define V_SGPCS_EN_2(x) ((x) << S_SGPCS_EN_2)
+#define F_SGPCS_EN_2 V_SGPCS_EN_2(1U)
+
+#define S_SGPCS_EN_1 27
+#define V_SGPCS_EN_1(x) ((x) << S_SGPCS_EN_1)
+#define F_SGPCS_EN_1 V_SGPCS_EN_1(1U)
+
+#define S_SGPCS_EN_0 26
+#define V_SGPCS_EN_0(x) ((x) << S_SGPCS_EN_0)
+#define F_SGPCS_EN_0 V_SGPCS_EN_0(1U)
+
+#define S_CFG_CLOCK_RATE 22
+#define M_CFG_CLOCK_RATE 0xfU
+#define V_CFG_CLOCK_RATE(x) ((x) << S_CFG_CLOCK_RATE)
+#define G_CFG_CLOCK_RATE(x) (((x) >> S_CFG_CLOCK_RATE) & M_CFG_CLOCK_RATE)
+
+#define S_FEC_ERR_ENA 14
+#define M_FEC_ERR_ENA 0xffU
+#define V_FEC_ERR_ENA(x) ((x) << S_FEC_ERR_ENA)
+#define G_FEC_ERR_ENA(x) (((x) >> S_FEC_ERR_ENA) & M_FEC_ERR_ENA)
+
+#define S_FEC_ENA 6
+#define M_FEC_ENA 0xffU
+#define V_FEC_ENA(x) ((x) << S_FEC_ENA)
+#define G_FEC_ENA(x) (((x) >> S_FEC_ENA) & M_FEC_ENA)
+
+#define S_PCS001_TX_AM_SF 3
+#define M_PCS001_TX_AM_SF 0x7U
+#define V_PCS001_TX_AM_SF(x) ((x) << S_PCS001_TX_AM_SF)
+#define G_PCS001_TX_AM_SF(x) (((x) >> S_PCS001_TX_AM_SF) & M_PCS001_TX_AM_SF)
+
+#define S_PCS000_TX_AM_SF 0
+#define M_PCS000_TX_AM_SF 0x7U
+#define V_PCS000_TX_AM_SF(x) ((x) << S_PCS000_TX_AM_SF)
+#define G_PCS000_TX_AM_SF(x) (((x) >> S_PCS000_TX_AM_SF) & M_PCS000_TX_AM_SF)
+
+#define A_MAC_PCS_STATUS_0 0x38030
+
+#define S_PCS000_ALIGN_LOCK 30
+#define M_PCS000_ALIGN_LOCK 0x3U
+#define V_PCS000_ALIGN_LOCK(x) ((x) << S_PCS000_ALIGN_LOCK)
+#define G_PCS000_ALIGN_LOCK(x) (((x) >> S_PCS000_ALIGN_LOCK) & M_PCS000_ALIGN_LOCK)
+
+#define S_PCS000_HI_SER 28
+#define M_PCS000_HI_SER 0x3U
+#define V_PCS000_HI_SER(x) ((x) << S_PCS000_HI_SER)
+#define G_PCS000_HI_SER(x) (((x) >> S_PCS000_HI_SER) & M_PCS000_HI_SER)
+
+#define S_BER_TIMER_DONE 20
+#define M_BER_TIMER_DONE 0xffU
+#define V_BER_TIMER_DONE(x) ((x) << S_BER_TIMER_DONE)
+#define G_BER_TIMER_DONE(x) (((x) >> S_BER_TIMER_DONE) & M_BER_TIMER_DONE)
+
+#define S_T7_AMPS_LOCK 4
+#define M_T7_AMPS_LOCK 0xffffU
+#define V_T7_AMPS_LOCK(x) ((x) << S_T7_AMPS_LOCK)
+#define G_T7_AMPS_LOCK(x) (((x) >> S_T7_AMPS_LOCK) & M_T7_AMPS_LOCK)
+
+#define S_T7_ALIGN_DONE 0
+#define M_T7_ALIGN_DONE 0xfU
+#define V_T7_ALIGN_DONE(x) ((x) << S_T7_ALIGN_DONE)
+#define G_T7_ALIGN_DONE(x) (((x) >> S_T7_ALIGN_DONE) & M_T7_ALIGN_DONE)
+
+#define A_MAC_PCS_STATUS_1 0x38034
+#define A_MAC_PCS_STATUS_2 0x38038
+
+#define S_RSFEC_ALIGNED 24
+#define M_RSFEC_ALIGNED 0xffU
+#define V_RSFEC_ALIGNED(x) ((x) << S_RSFEC_ALIGNED)
+#define G_RSFEC_ALIGNED(x) (((x) >> S_RSFEC_ALIGNED) & M_RSFEC_ALIGNED)
+
+#define S_T7_FEC_LOCKED 8
+#define M_T7_FEC_LOCKED 0xffffU
+#define V_T7_FEC_LOCKED(x) ((x) << S_T7_FEC_LOCKED)
+#define G_T7_FEC_LOCKED(x) (((x) >> S_T7_FEC_LOCKED) & M_T7_FEC_LOCKED)
+
+#define S_T7_BLOCK_LOCK 0
+#define M_T7_BLOCK_LOCK 0xffU
+#define V_T7_BLOCK_LOCK(x) ((x) << S_T7_BLOCK_LOCK)
+#define G_T7_BLOCK_LOCK(x) (((x) >> S_T7_BLOCK_LOCK) & M_T7_BLOCK_LOCK)
+
+#define A_MAC_PCS_STATUS_3 0x3803c
+
+#define S_FEC_NCERR 16
+#define M_FEC_NCERR 0xffffU
+#define V_FEC_NCERR(x) ((x) << S_FEC_NCERR)
+#define G_FEC_NCERR(x) (((x) >> S_FEC_NCERR) & M_FEC_NCERR)
+
+#define S_FEC_CERR 0
+#define M_FEC_CERR 0xffffU
+#define V_FEC_CERR(x) ((x) << S_FEC_CERR)
+#define G_FEC_CERR(x) (((x) >> S_FEC_CERR) & M_FEC_CERR)
+
+#define A_MAC_PCS_STATUS_4 0x38040
+
+#define S_MAC1_RES_SPEED 23
+#define M_MAC1_RES_SPEED 0xffU
+#define V_MAC1_RES_SPEED(x) ((x) << S_MAC1_RES_SPEED)
+#define G_MAC1_RES_SPEED(x) (((x) >> S_MAC1_RES_SPEED) & M_MAC1_RES_SPEED)
+
+#define S_MAC0_RES_SPEED 14
+#define M_MAC0_RES_SPEED 0xffU
+#define V_MAC0_RES_SPEED(x) ((x) << S_MAC0_RES_SPEED)
+#define G_MAC0_RES_SPEED(x) (((x) >> S_MAC0_RES_SPEED) & M_MAC0_RES_SPEED)
+
+#define S_PCS400_ENA_IN_REF 12
+#define M_PCS400_ENA_IN_REF 0x3U
+#define V_PCS400_ENA_IN_REF(x) ((x) << S_PCS400_ENA_IN_REF)
+#define G_PCS400_ENA_IN_REF(x) (((x) >> S_PCS400_ENA_IN_REF) & M_PCS400_ENA_IN_REF)
+
+#define S_PCS000_DEGRADE_SER 10
+#define M_PCS000_DEGRADE_SER 0x3U
+#define V_PCS000_DEGRADE_SER(x) ((x) << S_PCS000_DEGRADE_SER)
+#define G_PCS000_DEGRADE_SER(x) (((x) >> S_PCS000_DEGRADE_SER) & M_PCS000_DEGRADE_SER)
+
+#define S_P4X_SIGNAL_OK 8
+#define M_P4X_SIGNAL_OK 0x3U
+#define V_P4X_SIGNAL_OK(x) ((x) << S_P4X_SIGNAL_OK)
+#define G_P4X_SIGNAL_OK(x) (((x) >> S_P4X_SIGNAL_OK) & M_P4X_SIGNAL_OK)
+
+#define S_MODE200_IND_REF 7
+#define V_MODE200_IND_REF(x) ((x) << S_MODE200_IND_REF)
+#define F_MODE200_IND_REF V_MODE200_IND_REF(1U)
+
+#define S_MODE200_8X26_IND_REF 6
+#define V_MODE200_8X26_IND_REF(x) ((x) << S_MODE200_8X26_IND_REF)
+#define F_MODE200_8X26_IND_REF V_MODE200_8X26_IND_REF(1U)
+
+#define S_PCS001_RX_AM_SF 3
+#define M_PCS001_RX_AM_SF 0x7U
+#define V_PCS001_RX_AM_SF(x) ((x) << S_PCS001_RX_AM_SF)
+#define G_PCS001_RX_AM_SF(x) (((x) >> S_PCS001_RX_AM_SF) & M_PCS001_RX_AM_SF)
+
+#define S_PCS000_RX_AM_SF 0
+#define M_PCS000_RX_AM_SF 0x7U
+#define V_PCS000_RX_AM_SF(x) ((x) << S_PCS000_RX_AM_SF)
+#define G_PCS000_RX_AM_SF(x) (((x) >> S_PCS000_RX_AM_SF) & M_PCS000_RX_AM_SF)
+
+#define A_MAC_PCS_STATUS_5 0x38044
+
+#define S_MAC5_RES_SPEED 24
+#define M_MAC5_RES_SPEED 0xffU
+#define V_MAC5_RES_SPEED(x) ((x) << S_MAC5_RES_SPEED)
+#define G_MAC5_RES_SPEED(x) (((x) >> S_MAC5_RES_SPEED) & M_MAC5_RES_SPEED)
+
+#define S_MAC4_RES_SPEED 16
+#define M_MAC4_RES_SPEED 0xffU
+#define V_MAC4_RES_SPEED(x) ((x) << S_MAC4_RES_SPEED)
+#define G_MAC4_RES_SPEED(x) (((x) >> S_MAC4_RES_SPEED) & M_MAC4_RES_SPEED)
+
+#define S_MAC3_RES_SPEED 8
+#define M_MAC3_RES_SPEED 0xffU
+#define V_MAC3_RES_SPEED(x) ((x) << S_MAC3_RES_SPEED)
+#define G_MAC3_RES_SPEED(x) (((x) >> S_MAC3_RES_SPEED) & M_MAC3_RES_SPEED)
+
+#define S_MAC2_RES_SPEED 0
+#define M_MAC2_RES_SPEED 0xffU
+#define V_MAC2_RES_SPEED(x) ((x) << S_MAC2_RES_SPEED)
+#define G_MAC2_RES_SPEED(x) (((x) >> S_MAC2_RES_SPEED) & M_MAC2_RES_SPEED)
+
+#define A_MAC_PCS_STATUS_6 0x38048
+
+#define S_MARKER_INS_CNT_100_00 16
+#define M_MARKER_INS_CNT_100_00 0x7fffU
+#define V_MARKER_INS_CNT_100_00(x) ((x) << S_MARKER_INS_CNT_100_00)
+#define G_MARKER_INS_CNT_100_00(x) (((x) >> S_MARKER_INS_CNT_100_00) & M_MARKER_INS_CNT_100_00)
+
+#define S_MAC7_RES_SPEED 8
+#define M_MAC7_RES_SPEED 0xffU
+#define V_MAC7_RES_SPEED(x) ((x) << S_MAC7_RES_SPEED)
+#define G_MAC7_RES_SPEED(x) (((x) >> S_MAC7_RES_SPEED) & M_MAC7_RES_SPEED)
+
+#define S_MAC6_RES_SPEED 0
+#define M_MAC6_RES_SPEED 0xffU
+#define V_MAC6_RES_SPEED(x) ((x) << S_MAC6_RES_SPEED)
+#define G_MAC6_RES_SPEED(x) (((x) >> S_MAC6_RES_SPEED) & M_MAC6_RES_SPEED)
+
+#define A_MAC_PCS_STATUS_7 0x3804c
+
+#define S_PCS000_LINK_STATUS 30
+#define M_PCS000_LINK_STATUS 0x3U
+#define V_PCS000_LINK_STATUS(x) ((x) << S_PCS000_LINK_STATUS)
+#define G_PCS000_LINK_STATUS(x) (((x) >> S_PCS000_LINK_STATUS) & M_PCS000_LINK_STATUS)
+
+#define S_MARKER_INS_CNT_100_02 15
+#define M_MARKER_INS_CNT_100_02 0x7fffU
+#define V_MARKER_INS_CNT_100_02(x) ((x) << S_MARKER_INS_CNT_100_02)
+#define G_MARKER_INS_CNT_100_02(x) (((x) >> S_MARKER_INS_CNT_100_02) & M_MARKER_INS_CNT_100_02)
+
+#define S_MARKER_INS_CNT_100_01 0
+#define M_MARKER_INS_CNT_100_01 0x7fffU
+#define V_MARKER_INS_CNT_100_01(x) ((x) << S_MARKER_INS_CNT_100_01)
+#define G_MARKER_INS_CNT_100_01(x) (((x) >> S_MARKER_INS_CNT_100_01) & M_MARKER_INS_CNT_100_01)
+
+#define A_MAC_PCS_STATUS_8 0x38050
+
+#define S_MARKER_INS_CNT_25_1 15
+#define M_MARKER_INS_CNT_25_1 0xffffU
+#define V_MARKER_INS_CNT_25_1(x) ((x) << S_MARKER_INS_CNT_25_1)
+#define G_MARKER_INS_CNT_25_1(x) (((x) >> S_MARKER_INS_CNT_25_1) & M_MARKER_INS_CNT_25_1)
+
+#define S_MARKER_INS_CNT_100_03 0
+#define M_MARKER_INS_CNT_100_03 0x7fffU
+#define V_MARKER_INS_CNT_100_03(x) ((x) << S_MARKER_INS_CNT_100_03)
+#define G_MARKER_INS_CNT_100_03(x) (((x) >> S_MARKER_INS_CNT_100_03) & M_MARKER_INS_CNT_100_03)
+
+#define A_MAC_PCS_STATUS_9 0x38054
+
+#define S_MARKER_INS_CNT_25_5 16
+#define M_MARKER_INS_CNT_25_5 0xffffU
+#define V_MARKER_INS_CNT_25_5(x) ((x) << S_MARKER_INS_CNT_25_5)
+#define G_MARKER_INS_CNT_25_5(x) (((x) >> S_MARKER_INS_CNT_25_5) & M_MARKER_INS_CNT_25_5)
+
+#define S_MARKER_INS_CNT_25_3 0
+#define M_MARKER_INS_CNT_25_3 0xffffU
+#define V_MARKER_INS_CNT_25_3(x) ((x) << S_MARKER_INS_CNT_25_3)
+#define G_MARKER_INS_CNT_25_3(x) (((x) >> S_MARKER_INS_CNT_25_3) & M_MARKER_INS_CNT_25_3)
+
+#define A_MAC_PCS_STATUS_10 0x38058
+
+#define S_MARKER_INS_CNT_25_50_2 16
+#define M_MARKER_INS_CNT_25_50_2 0xffffU
+#define V_MARKER_INS_CNT_25_50_2(x) ((x) << S_MARKER_INS_CNT_25_50_2)
+#define G_MARKER_INS_CNT_25_50_2(x) (((x) >> S_MARKER_INS_CNT_25_50_2) & M_MARKER_INS_CNT_25_50_2)
+
+#define S_MARKER_INS_CNT_25_50_0 0
+#define M_MARKER_INS_CNT_25_50_0 0xffffU
+#define V_MARKER_INS_CNT_25_50_0(x) ((x) << S_MARKER_INS_CNT_25_50_0)
+#define G_MARKER_INS_CNT_25_50_0(x) (((x) >> S_MARKER_INS_CNT_25_50_0) & M_MARKER_INS_CNT_25_50_0)
+
+#define A_MAC_PCS_STATUS_11 0x3805c
+
+#define S_MARKER_INS_CNT_25_50_6 16
+#define M_MARKER_INS_CNT_25_50_6 0xffffU
+#define V_MARKER_INS_CNT_25_50_6(x) ((x) << S_MARKER_INS_CNT_25_50_6)
+#define G_MARKER_INS_CNT_25_50_6(x) (((x) >> S_MARKER_INS_CNT_25_50_6) & M_MARKER_INS_CNT_25_50_6)
+
+#define S_MARKER_INS_CNT_25_50_4 0
+#define M_MARKER_INS_CNT_25_50_4 0xffffU
+#define V_MARKER_INS_CNT_25_50_4(x) ((x) << S_MARKER_INS_CNT_25_50_4)
+#define G_MARKER_INS_CNT_25_50_4(x) (((x) >> S_MARKER_INS_CNT_25_50_4) & M_MARKER_INS_CNT_25_50_4)
+
+#define A_MAC_PCS_STATUS_12 0x38060
+
+#define S_T7_LINK_STATUS 24
+#define M_T7_LINK_STATUS 0xffU
+#define V_T7_LINK_STATUS(x) ((x) << S_T7_LINK_STATUS)
+#define G_T7_LINK_STATUS(x) (((x) >> S_T7_LINK_STATUS) & M_T7_LINK_STATUS)
+
+#define S_T7_HI_BER 16
+#define M_T7_HI_BER 0xffU
+#define V_T7_HI_BER(x) ((x) << S_T7_HI_BER)
+#define G_T7_HI_BER(x) (((x) >> S_T7_HI_BER) & M_T7_HI_BER)
+
+#define S_MARKER_INS_CNT_25_7 0
+#define M_MARKER_INS_CNT_25_7 0xffffU
+#define V_MARKER_INS_CNT_25_7(x) ((x) << S_MARKER_INS_CNT_25_7)
+#define G_MARKER_INS_CNT_25_7(x) (((x) >> S_MARKER_INS_CNT_25_7) & M_MARKER_INS_CNT_25_7)
+
+#define A_MAC_MAC200G400G_0_CONFIG_0 0x38064
+#define A_MAC_MAC200G400G_0_CONFIG_1 0x38068
+
+#define S_FF_TX_CRC_OVR 11
+#define V_FF_TX_CRC_OVR(x) ((x) << S_FF_TX_CRC_OVR)
+#define F_FF_TX_CRC_OVR V_FF_TX_CRC_OVR(1U)
+
+#define S_TX_SMHOLD 2
+#define V_TX_SMHOLD(x) ((x) << S_TX_SMHOLD)
+#define F_TX_SMHOLD V_TX_SMHOLD(1U)
+
+#define A_MAC_MAC200G400G_0_CONFIG_2 0x3806c
+#define A_MAC_MAC200G400G_0_CONFIG_3 0x38070
+#define A_MAC_MAC200G400G_0_CONFIG_4 0x38074
+
+#define S_FRC_DELTA 0
+#define M_FRC_DELTA 0xffffU
+#define V_FRC_DELTA(x) ((x) << S_FRC_DELTA)
+#define G_FRC_DELTA(x) (((x) >> S_FRC_DELTA) & M_FRC_DELTA)
+
+#define A_MAC_MAC200G400G_0_STATUS 0x38078
+
+#define S_T7_LOOP_ENA 4
+#define V_T7_LOOP_ENA(x) ((x) << S_T7_LOOP_ENA)
+#define F_T7_LOOP_ENA V_T7_LOOP_ENA(1U)
+
+#define S_T7_LOC_FAULT 3
+#define V_T7_LOC_FAULT(x) ((x) << S_T7_LOC_FAULT)
+#define F_T7_LOC_FAULT V_T7_LOC_FAULT(1U)
+
+#define S_FRM_DROP 2
+#define V_FRM_DROP(x) ((x) << S_FRM_DROP)
+#define F_FRM_DROP V_FRM_DROP(1U)
+
+#define S_FF_TX_CREDIT 1
+#define V_FF_TX_CREDIT(x) ((x) << S_FF_TX_CREDIT)
+#define F_FF_TX_CREDIT V_FF_TX_CREDIT(1U)
+
+#define A_MAC_MAC200G400G_1_CONFIG_0 0x3807c
+#define A_MAC_MAC200G400G_1_CONFIG_1 0x38080
+#define A_MAC_MAC200G400G_1_CONFIG_2 0x38084
+#define A_MAC_MAC200G400G_1_CONFIG_3 0x38088
+#define A_MAC_MAC200G400G_1_CONFIG_4 0x3808c
+#define A_MAC_MAC200G400G_1_STATUS 0x38090
+#define A_MAC_AN_CFG_0 0x38094
+
+#define S_T7_AN_DATA_CTL 24
+#define M_T7_AN_DATA_CTL 0xffU
+#define V_T7_AN_DATA_CTL(x) ((x) << S_T7_AN_DATA_CTL)
+#define G_T7_AN_DATA_CTL(x) (((x) >> S_T7_AN_DATA_CTL) & M_T7_AN_DATA_CTL)
+
+#define S_T7_AN_ENA 16
+#define M_T7_AN_ENA 0xffU
+#define V_T7_AN_ENA(x) ((x) << S_T7_AN_ENA)
+#define G_T7_AN_ENA(x) (((x) >> S_T7_AN_ENA) & M_T7_AN_ENA)
+
+#define A_MAC_AN_CFG_1 0x38098
+
+#define S_AN_DIS_TIMER_AN_7 7
+#define V_AN_DIS_TIMER_AN_7(x) ((x) << S_AN_DIS_TIMER_AN_7)
+#define F_AN_DIS_TIMER_AN_7 V_AN_DIS_TIMER_AN_7(1U)
+
+#define S_AN_DIS_TIMER_AN_6 6
+#define V_AN_DIS_TIMER_AN_6(x) ((x) << S_AN_DIS_TIMER_AN_6)
+#define F_AN_DIS_TIMER_AN_6 V_AN_DIS_TIMER_AN_6(1U)
+
+#define S_AN_DIS_TIMER_AN_5 5
+#define V_AN_DIS_TIMER_AN_5(x) ((x) << S_AN_DIS_TIMER_AN_5)
+#define F_AN_DIS_TIMER_AN_5 V_AN_DIS_TIMER_AN_5(1U)
+
+#define S_AN_DIS_TIMER_AN_4 4
+#define V_AN_DIS_TIMER_AN_4(x) ((x) << S_AN_DIS_TIMER_AN_4)
+#define F_AN_DIS_TIMER_AN_4 V_AN_DIS_TIMER_AN_4(1U)
+
+#define S_AN_DIS_TIMER_AN_3 3
+#define V_AN_DIS_TIMER_AN_3(x) ((x) << S_AN_DIS_TIMER_AN_3)
+#define F_AN_DIS_TIMER_AN_3 V_AN_DIS_TIMER_AN_3(1U)
+
+#define S_AN_DIS_TIMER_AN_2 2
+#define V_AN_DIS_TIMER_AN_2(x) ((x) << S_AN_DIS_TIMER_AN_2)
+#define F_AN_DIS_TIMER_AN_2 V_AN_DIS_TIMER_AN_2(1U)
+
+#define S_AN_DIS_TIMER_AN_1 1
+#define V_AN_DIS_TIMER_AN_1(x) ((x) << S_AN_DIS_TIMER_AN_1)
+#define F_AN_DIS_TIMER_AN_1 V_AN_DIS_TIMER_AN_1(1U)
+
+#define S_AN_DIS_TIMER_AN_0 0
+#define V_AN_DIS_TIMER_AN_0(x) ((x) << S_AN_DIS_TIMER_AN_0)
+#define F_AN_DIS_TIMER_AN_0 V_AN_DIS_TIMER_AN_0(1U)
+
+#define A_MAC_AN_SERDES25G_ENA 0x3809c
+
+#define S_AN_SD25_TX_ENA_7 15
+#define V_AN_SD25_TX_ENA_7(x) ((x) << S_AN_SD25_TX_ENA_7)
+#define F_AN_SD25_TX_ENA_7 V_AN_SD25_TX_ENA_7(1U)
+
+#define S_AN_SD25_TX_ENA_6 14
+#define V_AN_SD25_TX_ENA_6(x) ((x) << S_AN_SD25_TX_ENA_6)
+#define F_AN_SD25_TX_ENA_6 V_AN_SD25_TX_ENA_6(1U)
+
+#define S_AN_SD25_TX_ENA_5 13
+#define V_AN_SD25_TX_ENA_5(x) ((x) << S_AN_SD25_TX_ENA_5)
+#define F_AN_SD25_TX_ENA_5 V_AN_SD25_TX_ENA_5(1U)
+
+#define S_AN_SD25_TX_ENA_4 12
+#define V_AN_SD25_TX_ENA_4(x) ((x) << S_AN_SD25_TX_ENA_4)
+#define F_AN_SD25_TX_ENA_4 V_AN_SD25_TX_ENA_4(1U)
+
+#define S_AN_SD25_TX_ENA_3 11
+#define V_AN_SD25_TX_ENA_3(x) ((x) << S_AN_SD25_TX_ENA_3)
+#define F_AN_SD25_TX_ENA_3 V_AN_SD25_TX_ENA_3(1U)
+
+#define S_AN_SD25_TX_ENA_2 10
+#define V_AN_SD25_TX_ENA_2(x) ((x) << S_AN_SD25_TX_ENA_2)
+#define F_AN_SD25_TX_ENA_2 V_AN_SD25_TX_ENA_2(1U)
+
+#define S_AN_SD25_TX_ENA_1 9
+#define V_AN_SD25_TX_ENA_1(x) ((x) << S_AN_SD25_TX_ENA_1)
+#define F_AN_SD25_TX_ENA_1 V_AN_SD25_TX_ENA_1(1U)
+
+#define S_AN_SD25_TX_ENA_0 8
+#define V_AN_SD25_TX_ENA_0(x) ((x) << S_AN_SD25_TX_ENA_0)
+#define F_AN_SD25_TX_ENA_0 V_AN_SD25_TX_ENA_0(1U)
+
+#define S_AN_SD25_RX_ENA_7 7
+#define V_AN_SD25_RX_ENA_7(x) ((x) << S_AN_SD25_RX_ENA_7)
+#define F_AN_SD25_RX_ENA_7 V_AN_SD25_RX_ENA_7(1U)
+
+#define S_AN_SD25_RX_ENA_6 6
+#define V_AN_SD25_RX_ENA_6(x) ((x) << S_AN_SD25_RX_ENA_6)
+#define F_AN_SD25_RX_ENA_6 V_AN_SD25_RX_ENA_6(1U)
+
+#define S_AN_SD25_RX_ENA_5 5
+#define V_AN_SD25_RX_ENA_5(x) ((x) << S_AN_SD25_RX_ENA_5)
+#define F_AN_SD25_RX_ENA_5 V_AN_SD25_RX_ENA_5(1U)
+
+#define S_AN_SD25_RX_ENA_4 4
+#define V_AN_SD25_RX_ENA_4(x) ((x) << S_AN_SD25_RX_ENA_4)
+#define F_AN_SD25_RX_ENA_4 V_AN_SD25_RX_ENA_4(1U)
+
+#define S_AN_SD25_RX_ENA_3 3
+#define V_AN_SD25_RX_ENA_3(x) ((x) << S_AN_SD25_RX_ENA_3)
+#define F_AN_SD25_RX_ENA_3 V_AN_SD25_RX_ENA_3(1U)
+
+#define S_AN_SD25_RX_ENA_2 2
+#define V_AN_SD25_RX_ENA_2(x) ((x) << S_AN_SD25_RX_ENA_2)
+#define F_AN_SD25_RX_ENA_2 V_AN_SD25_RX_ENA_2(1U)
+
+#define S_AN_SD25_RX_ENA_1 1
+#define V_AN_SD25_RX_ENA_1(x) ((x) << S_AN_SD25_RX_ENA_1)
+#define F_AN_SD25_RX_ENA_1 V_AN_SD25_RX_ENA_1(1U)
+
+#define S_AN_SD25_RX_ENA_0 0
+#define V_AN_SD25_RX_ENA_0(x) ((x) << S_AN_SD25_RX_ENA_0)
+#define F_AN_SD25_RX_ENA_0 V_AN_SD25_RX_ENA_0(1U)
+
+#define A_MAC_PLL_CFG_0 0x380a0
+
+#define S_USE_RX_CDR_CLK_FOR_TX 7
+#define V_USE_RX_CDR_CLK_FOR_TX(x) ((x) << S_USE_RX_CDR_CLK_FOR_TX)
+#define F_USE_RX_CDR_CLK_FOR_TX V_USE_RX_CDR_CLK_FOR_TX(1U)
+
+#define S_HSSPLLSEL0 5
+#define M_HSSPLLSEL0 0x3U
+#define V_HSSPLLSEL0(x) ((x) << S_HSSPLLSEL0)
+#define G_HSSPLLSEL0(x) (((x) >> S_HSSPLLSEL0) & M_HSSPLLSEL0)
+
+#define S_HSSTXDIV2CLK_SEL0 3
+#define M_HSSTXDIV2CLK_SEL0 0x3U
+#define V_HSSTXDIV2CLK_SEL0(x) ((x) << S_HSSTXDIV2CLK_SEL0)
+#define G_HSSTXDIV2CLK_SEL0(x) (((x) >> S_HSSTXDIV2CLK_SEL0) & M_HSSTXDIV2CLK_SEL0)
+
+#define S_HSS_RESET0 2
+#define V_HSS_RESET0(x) ((x) << S_HSS_RESET0)
+#define F_HSS_RESET0 V_HSS_RESET0(1U)
+
+#define S_APB_RESET0 1
+#define V_APB_RESET0(x) ((x) << S_APB_RESET0)
+#define F_APB_RESET0 V_APB_RESET0(1U)
+
+#define S_HSSCLK32DIV2_RESET0 0
+#define V_HSSCLK32DIV2_RESET0(x) ((x) << S_HSSCLK32DIV2_RESET0)
+#define F_HSSCLK32DIV2_RESET0 V_HSSCLK32DIV2_RESET0(1U)
+
+#define A_MAC_PLL_CFG_1 0x380a4
+
+#define S_HSSPLLSEL1 5
+#define M_HSSPLLSEL1 0x3U
+#define V_HSSPLLSEL1(x) ((x) << S_HSSPLLSEL1)
+#define G_HSSPLLSEL1(x) (((x) >> S_HSSPLLSEL1) & M_HSSPLLSEL1)
+
+#define S_HSSTXDIV2CLK_SEL1 3
+#define M_HSSTXDIV2CLK_SEL1 0x3U
+#define V_HSSTXDIV2CLK_SEL1(x) ((x) << S_HSSTXDIV2CLK_SEL1)
+#define G_HSSTXDIV2CLK_SEL1(x) (((x) >> S_HSSTXDIV2CLK_SEL1) & M_HSSTXDIV2CLK_SEL1)
+
+#define S_HSS_RESET1 2
+#define V_HSS_RESET1(x) ((x) << S_HSS_RESET1)
+#define F_HSS_RESET1 V_HSS_RESET1(1U)
+
+#define S_APB_RESET1 1
+#define V_APB_RESET1(x) ((x) << S_APB_RESET1)
+#define F_APB_RESET1 V_APB_RESET1(1U)
+
+#define S_HSSCLK32DIV2_RESET1 0
+#define V_HSSCLK32DIV2_RESET1(x) ((x) << S_HSSCLK32DIV2_RESET1)
+#define F_HSSCLK32DIV2_RESET1 V_HSSCLK32DIV2_RESET1(1U)
+
+#define A_MAC_PLL_CFG_2 0x380a8
+
+#define S_HSSPLLSEL2 5
+#define M_HSSPLLSEL2 0x3U
+#define V_HSSPLLSEL2(x) ((x) << S_HSSPLLSEL2)
+#define G_HSSPLLSEL2(x) (((x) >> S_HSSPLLSEL2) & M_HSSPLLSEL2)
+
+#define S_HSSTXDIV2CLK_SEL2 3
+#define M_HSSTXDIV2CLK_SEL2 0x3U
+#define V_HSSTXDIV2CLK_SEL2(x) ((x) << S_HSSTXDIV2CLK_SEL2)
+#define G_HSSTXDIV2CLK_SEL2(x) (((x) >> S_HSSTXDIV2CLK_SEL2) & M_HSSTXDIV2CLK_SEL2)
+
+#define S_HSS_RESET2 2
+#define V_HSS_RESET2(x) ((x) << S_HSS_RESET2)
+#define F_HSS_RESET2 V_HSS_RESET2(1U)
+
+#define S_APB_RESET2 1
+#define V_APB_RESET2(x) ((x) << S_APB_RESET2)
+#define F_APB_RESET2 V_APB_RESET2(1U)
+
+#define S_HSSCLK32DIV2_RESET2 0
+#define V_HSSCLK32DIV2_RESET2(x) ((x) << S_HSSCLK32DIV2_RESET2)
+#define F_HSSCLK32DIV2_RESET2 V_HSSCLK32DIV2_RESET2(1U)
+
+#define A_MAC_PLL_CFG_3 0x380ac
+
+#define S_HSSPLLSEL3 5
+#define M_HSSPLLSEL3 0x3U
+#define V_HSSPLLSEL3(x) ((x) << S_HSSPLLSEL3)
+#define G_HSSPLLSEL3(x) (((x) >> S_HSSPLLSEL3) & M_HSSPLLSEL3)
+
+#define S_HSSTXDIV2CLK_SEL3 3
+#define M_HSSTXDIV2CLK_SEL3 0x3U
+#define V_HSSTXDIV2CLK_SEL3(x) ((x) << S_HSSTXDIV2CLK_SEL3)
+#define G_HSSTXDIV2CLK_SEL3(x) (((x) >> S_HSSTXDIV2CLK_SEL3) & M_HSSTXDIV2CLK_SEL3)
+
+#define S_HSS_RESET3 2
+#define V_HSS_RESET3(x) ((x) << S_HSS_RESET3)
+#define F_HSS_RESET3 V_HSS_RESET3(1U)
+
+#define S_APB_RESET3 1
+#define V_APB_RESET3(x) ((x) << S_APB_RESET3)
+#define F_APB_RESET3 V_APB_RESET3(1U)
+
+#define S_HSSCLK32DIV2_RESET3 0
+#define V_HSSCLK32DIV2_RESET3(x) ((x) << S_HSSCLK32DIV2_RESET3)
+#define F_HSSCLK32DIV2_RESET3 V_HSSCLK32DIV2_RESET3(1U)
+
+#define A_MAC_HSS_STATUS 0x380b0
+
+#define S_TX_LANE_PLL_SEL_3 30
+#define M_TX_LANE_PLL_SEL_3 0x3U
+#define V_TX_LANE_PLL_SEL_3(x) ((x) << S_TX_LANE_PLL_SEL_3)
+#define G_TX_LANE_PLL_SEL_3(x) (((x) >> S_TX_LANE_PLL_SEL_3) & M_TX_LANE_PLL_SEL_3)
+
+#define S_TX_LANE_PLL_SEL_2 28
+#define M_TX_LANE_PLL_SEL_2 0x3U
+#define V_TX_LANE_PLL_SEL_2(x) ((x) << S_TX_LANE_PLL_SEL_2)
+#define G_TX_LANE_PLL_SEL_2(x) (((x) >> S_TX_LANE_PLL_SEL_2) & M_TX_LANE_PLL_SEL_2)
+
+#define S_TX_LANE_PLL_SEL_1 26
+#define M_TX_LANE_PLL_SEL_1 0x3U
+#define V_TX_LANE_PLL_SEL_1(x) ((x) << S_TX_LANE_PLL_SEL_1)
+#define G_TX_LANE_PLL_SEL_1(x) (((x) >> S_TX_LANE_PLL_SEL_1) & M_TX_LANE_PLL_SEL_1)
+
+#define S_TX_LANE_PLL_SEL_0 24
+#define M_TX_LANE_PLL_SEL_0 0x3U
+#define V_TX_LANE_PLL_SEL_0(x) ((x) << S_TX_LANE_PLL_SEL_0)
+#define G_TX_LANE_PLL_SEL_0(x) (((x) >> S_TX_LANE_PLL_SEL_0) & M_TX_LANE_PLL_SEL_0)
+
+#define S_HSSPLLLOCKB_HSS3 7
+#define V_HSSPLLLOCKB_HSS3(x) ((x) << S_HSSPLLLOCKB_HSS3)
+#define F_HSSPLLLOCKB_HSS3 V_HSSPLLLOCKB_HSS3(1U)
+
+#define S_HSSPLLLOCKA_HSS3 6
+#define V_HSSPLLLOCKA_HSS3(x) ((x) << S_HSSPLLLOCKA_HSS3)
+#define F_HSSPLLLOCKA_HSS3 V_HSSPLLLOCKA_HSS3(1U)
+
+#define S_HSSPLLLOCKB_HSS2 5
+#define V_HSSPLLLOCKB_HSS2(x) ((x) << S_HSSPLLLOCKB_HSS2)
+#define F_HSSPLLLOCKB_HSS2 V_HSSPLLLOCKB_HSS2(1U)
+
+#define S_HSSPLLLOCKA_HSS2 4
+#define V_HSSPLLLOCKA_HSS2(x) ((x) << S_HSSPLLLOCKA_HSS2)
+#define F_HSSPLLLOCKA_HSS2 V_HSSPLLLOCKA_HSS2(1U)
+
+#define S_HSSPLLLOCKB_HSS1 3
+#define V_HSSPLLLOCKB_HSS1(x) ((x) << S_HSSPLLLOCKB_HSS1)
+#define F_HSSPLLLOCKB_HSS1 V_HSSPLLLOCKB_HSS1(1U)
+
+#define S_HSSPLLLOCKA_HSS1 2
+#define V_HSSPLLLOCKA_HSS1(x) ((x) << S_HSSPLLLOCKA_HSS1)
+#define F_HSSPLLLOCKA_HSS1 V_HSSPLLLOCKA_HSS1(1U)
+
+#define S_HSSPLLLOCKB_HSS0 1
+#define V_HSSPLLLOCKB_HSS0(x) ((x) << S_HSSPLLLOCKB_HSS0)
+#define F_HSSPLLLOCKB_HSS0 V_HSSPLLLOCKB_HSS0(1U)
+
+#define S_HSSPLLLOCKA_HSS0 0
+#define V_HSSPLLLOCKA_HSS0(x) ((x) << S_HSSPLLLOCKA_HSS0)
+#define F_HSSPLLLOCKA_HSS0 V_HSSPLLLOCKA_HSS0(1U)
+
+#define A_MAC_HSS_SIGDET_STATUS 0x380b4
+
+#define S_HSS3_SIGDET 6
+#define M_HSS3_SIGDET 0x3U
+#define V_HSS3_SIGDET(x) ((x) << S_HSS3_SIGDET)
+#define G_HSS3_SIGDET(x) (((x) >> S_HSS3_SIGDET) & M_HSS3_SIGDET)
+
+#define S_HSS2_SIGDET 4
+#define M_HSS2_SIGDET 0x3U
+#define V_HSS2_SIGDET(x) ((x) << S_HSS2_SIGDET)
+#define G_HSS2_SIGDET(x) (((x) >> S_HSS2_SIGDET) & M_HSS2_SIGDET)
+
+#define S_HSS1_SIGDET 2
+#define M_HSS1_SIGDET 0x3U
+#define V_HSS1_SIGDET(x) ((x) << S_HSS1_SIGDET)
+#define G_HSS1_SIGDET(x) (((x) >> S_HSS1_SIGDET) & M_HSS1_SIGDET)
+
+#define S_HSS0_SIGDET 0
+#define M_HSS0_SIGDET 0x3U
+#define V_HSS0_SIGDET(x) ((x) << S_HSS0_SIGDET)
+#define G_HSS0_SIGDET(x) (((x) >> S_HSS0_SIGDET) & M_HSS0_SIGDET)
+
+#define A_MAC_FPGA_CFG_0 0x380b8
+#define A_MAC_PMD_STATUS 0x380bc
+
+#define S_SIGNAL_DETECT 0
+#define M_SIGNAL_DETECT 0xffU
+#define V_SIGNAL_DETECT(x) ((x) << S_SIGNAL_DETECT)
+#define G_SIGNAL_DETECT(x) (((x) >> S_SIGNAL_DETECT) & M_SIGNAL_DETECT)
+
+#define A_MAC_PMD_AN_CONFIG0 0x380c0
+
+#define S_AN3_RATE_SELECT 25
+#define M_AN3_RATE_SELECT 0x1fU
+#define V_AN3_RATE_SELECT(x) ((x) << S_AN3_RATE_SELECT)
+#define G_AN3_RATE_SELECT(x) (((x) >> S_AN3_RATE_SELECT) & M_AN3_RATE_SELECT)
+
+#define S_AN3_STATUS 24
+#define V_AN3_STATUS(x) ((x) << S_AN3_STATUS)
+#define F_AN3_STATUS V_AN3_STATUS(1U)
+
+#define S_AN2_RATE_SELECT 17
+#define M_AN2_RATE_SELECT 0x1fU
+#define V_AN2_RATE_SELECT(x) ((x) << S_AN2_RATE_SELECT)
+#define G_AN2_RATE_SELECT(x) (((x) >> S_AN2_RATE_SELECT) & M_AN2_RATE_SELECT)
+
+#define S_AN2_STATUS 16
+#define V_AN2_STATUS(x) ((x) << S_AN2_STATUS)
+#define F_AN2_STATUS V_AN2_STATUS(1U)
+
+#define S_AN1_RATE_SELECT 9
+#define M_AN1_RATE_SELECT 0x1fU
+#define V_AN1_RATE_SELECT(x) ((x) << S_AN1_RATE_SELECT)
+#define G_AN1_RATE_SELECT(x) (((x) >> S_AN1_RATE_SELECT) & M_AN1_RATE_SELECT)
+
+#define S_AN1_STATUS 8
+#define V_AN1_STATUS(x) ((x) << S_AN1_STATUS)
+#define F_AN1_STATUS V_AN1_STATUS(1U)
+
+#define S_AN0_RATE_SELECT 1
+#define M_AN0_RATE_SELECT 0x1fU
+#define V_AN0_RATE_SELECT(x) ((x) << S_AN0_RATE_SELECT)
+#define G_AN0_RATE_SELECT(x) (((x) >> S_AN0_RATE_SELECT) & M_AN0_RATE_SELECT)
+
+#define S_AN0_STATUS 0
+#define V_AN0_STATUS(x) ((x) << S_AN0_STATUS)
+#define F_AN0_STATUS V_AN0_STATUS(1U)
+
+#define A_MAC_PMD_AN_CONFIG1 0x380c4
+
+#define S_AN7_RATE_SELECT 25
+#define M_AN7_RATE_SELECT 0x1fU
+#define V_AN7_RATE_SELECT(x) ((x) << S_AN7_RATE_SELECT)
+#define G_AN7_RATE_SELECT(x) (((x) >> S_AN7_RATE_SELECT) & M_AN7_RATE_SELECT)
+
+#define S_AN7_STATUS 24
+#define V_AN7_STATUS(x) ((x) << S_AN7_STATUS)
+#define F_AN7_STATUS V_AN7_STATUS(1U)
+
+#define S_AN6_RATE_SELECT 17
+#define M_AN6_RATE_SELECT 0x1fU
+#define V_AN6_RATE_SELECT(x) ((x) << S_AN6_RATE_SELECT)
+#define G_AN6_RATE_SELECT(x) (((x) >> S_AN6_RATE_SELECT) & M_AN6_RATE_SELECT)
+
+#define S_AN6_STATUS 16
+#define V_AN6_STATUS(x) ((x) << S_AN6_STATUS)
+#define F_AN6_STATUS V_AN6_STATUS(1U)
+
+#define S_AN5_RATE_SELECT 9
+#define M_AN5_RATE_SELECT 0x1fU
+#define V_AN5_RATE_SELECT(x) ((x) << S_AN5_RATE_SELECT)
+#define G_AN5_RATE_SELECT(x) (((x) >> S_AN5_RATE_SELECT) & M_AN5_RATE_SELECT)
+
+#define S_AN5_STATUS 8
+#define V_AN5_STATUS(x) ((x) << S_AN5_STATUS)
+#define F_AN5_STATUS V_AN5_STATUS(1U)
+
+#define S_AN4_RATE_SELECT 1
+#define M_AN4_RATE_SELECT 0x1fU
+#define V_AN4_RATE_SELECT(x) ((x) << S_AN4_RATE_SELECT)
+#define G_AN4_RATE_SELECT(x) (((x) >> S_AN4_RATE_SELECT) & M_AN4_RATE_SELECT)
+
+#define S_AN4_STATUS 0
+#define V_AN4_STATUS(x) ((x) << S_AN4_STATUS)
+#define F_AN4_STATUS V_AN4_STATUS(1U)
+
+#define A_MAC_INT_EN_CMN 0x380c8
+
+#define S_HSS3PLL1_LOCK_LOST_INT_EN 21
+#define V_HSS3PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS3PLL1_LOCK_LOST_INT_EN)
+#define F_HSS3PLL1_LOCK_LOST_INT_EN V_HSS3PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS3PLL1_LOCK_INT_EN 20
+#define V_HSS3PLL1_LOCK_INT_EN(x) ((x) << S_HSS3PLL1_LOCK_INT_EN)
+#define F_HSS3PLL1_LOCK_INT_EN V_HSS3PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS3PLL0_LOCK_LOST_INT_EN 19
+#define V_HSS3PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS3PLL0_LOCK_LOST_INT_EN)
+#define F_HSS3PLL0_LOCK_LOST_INT_EN V_HSS3PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS3PLL0_LOCK_INT_EN 18
+#define V_HSS3PLL0_LOCK_INT_EN(x) ((x) << S_HSS3PLL0_LOCK_INT_EN)
+#define F_HSS3PLL0_LOCK_INT_EN V_HSS3PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS2PLL1_LOCK_LOST_INT_EN 17
+#define V_HSS2PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS2PLL1_LOCK_LOST_INT_EN)
+#define F_HSS2PLL1_LOCK_LOST_INT_EN V_HSS2PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS2PLL1_LOCK_INT_EN 16
+#define V_HSS2PLL1_LOCK_INT_EN(x) ((x) << S_HSS2PLL1_LOCK_INT_EN)
+#define F_HSS2PLL1_LOCK_INT_EN V_HSS2PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS2PLL0_LOCK_LOST_INT_EN 15
+#define V_HSS2PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS2PLL0_LOCK_LOST_INT_EN)
+#define F_HSS2PLL0_LOCK_LOST_INT_EN V_HSS2PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS2PLL0_LOCK_INT_EN 14
+#define V_HSS2PLL0_LOCK_INT_EN(x) ((x) << S_HSS2PLL0_LOCK_INT_EN)
+#define F_HSS2PLL0_LOCK_INT_EN V_HSS2PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS1PLL1_LOCK_LOST_INT_EN 13
+#define V_HSS1PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS1PLL1_LOCK_LOST_INT_EN)
+#define F_HSS1PLL1_LOCK_LOST_INT_EN V_HSS1PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS1PLL1_LOCK_INT_EN 12
+#define V_HSS1PLL1_LOCK_INT_EN(x) ((x) << S_HSS1PLL1_LOCK_INT_EN)
+#define F_HSS1PLL1_LOCK_INT_EN V_HSS1PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS1PLL0_LOCK_LOST_INT_EN 11
+#define V_HSS1PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS1PLL0_LOCK_LOST_INT_EN)
+#define F_HSS1PLL0_LOCK_LOST_INT_EN V_HSS1PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS1PLL0_LOCK_INT_EN 10
+#define V_HSS1PLL0_LOCK_INT_EN(x) ((x) << S_HSS1PLL0_LOCK_INT_EN)
+#define F_HSS1PLL0_LOCK_INT_EN V_HSS1PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS0PLL1_LOCK_LOST_INT_EN 9
+#define V_HSS0PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS0PLL1_LOCK_LOST_INT_EN)
+#define F_HSS0PLL1_LOCK_LOST_INT_EN V_HSS0PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS0PLL1_LOCK_INT_EN 8
+#define V_HSS0PLL1_LOCK_INT_EN(x) ((x) << S_HSS0PLL1_LOCK_INT_EN)
+#define F_HSS0PLL1_LOCK_INT_EN V_HSS0PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS0PLL0_LOCK_LOST_INT_EN 7
+#define V_HSS0PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS0PLL0_LOCK_LOST_INT_EN)
+#define F_HSS0PLL0_LOCK_LOST_INT_EN V_HSS0PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS0PLL0_LOCK_INT_EN 6
+#define V_HSS0PLL0_LOCK_INT_EN(x) ((x) << S_HSS0PLL0_LOCK_INT_EN)
+#define F_HSS0PLL0_LOCK_INT_EN V_HSS0PLL0_LOCK_INT_EN(1U)
+
+#define S_FLOCK_ASSERTED 5
+#define V_FLOCK_ASSERTED(x) ((x) << S_FLOCK_ASSERTED)
+#define F_FLOCK_ASSERTED V_FLOCK_ASSERTED(1U)
+
+#define S_FLOCK_LOST 4
+#define V_FLOCK_LOST(x) ((x) << S_FLOCK_LOST)
+#define F_FLOCK_LOST V_FLOCK_LOST(1U)
+
+#define S_PHASE_LOCK_ASSERTED 3
+#define V_PHASE_LOCK_ASSERTED(x) ((x) << S_PHASE_LOCK_ASSERTED)
+#define F_PHASE_LOCK_ASSERTED V_PHASE_LOCK_ASSERTED(1U)
+
+#define S_PHASE_LOCK_LOST 2
+#define V_PHASE_LOCK_LOST(x) ((x) << S_PHASE_LOCK_LOST)
+#define F_PHASE_LOCK_LOST V_PHASE_LOCK_LOST(1U)
+
+#define S_LOCK_ASSERTED 1
+#define V_LOCK_ASSERTED(x) ((x) << S_LOCK_ASSERTED)
+#define F_LOCK_ASSERTED V_LOCK_ASSERTED(1U)
+
+#define S_LOCK_LOST 0
+#define V_LOCK_LOST(x) ((x) << S_LOCK_LOST)
+#define F_LOCK_LOST V_LOCK_LOST(1U)
+
+#define A_MAC_INT_CAUSE_CMN 0x380cc
+
+#define S_HSS3PLL1_LOCK_LOST_INT_CAUSE 21
+#define V_HSS3PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS3PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS3PLL1_LOCK_LOST_INT_CAUSE V_HSS3PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS3PLL1_LOCK_INT_CAUSE 20
+#define V_HSS3PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS3PLL1_LOCK_INT_CAUSE)
+#define F_HSS3PLL1_LOCK_INT_CAUSE V_HSS3PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS3PLL0_LOCK_LOST_INT_CAUSE 19
+#define V_HSS3PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS3PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS3PLL0_LOCK_LOST_INT_CAUSE V_HSS3PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS3PLL0_LOCK_INT_CAUSE 18
+#define V_HSS3PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS3PLL0_LOCK_INT_CAUSE)
+#define F_HSS3PLL0_LOCK_INT_CAUSE V_HSS3PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS2PLL1_LOCK_LOST_CAUSE 17
+#define V_HSS2PLL1_LOCK_LOST_CAUSE(x) ((x) << S_HSS2PLL1_LOCK_LOST_CAUSE)
+#define F_HSS2PLL1_LOCK_LOST_CAUSE V_HSS2PLL1_LOCK_LOST_CAUSE(1U)
+
+#define S_HSS2PLL1_LOCK_INT_CAUSE 16
+#define V_HSS2PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS2PLL1_LOCK_INT_CAUSE)
+#define F_HSS2PLL1_LOCK_INT_CAUSE V_HSS2PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS2PLL0_LOCK_LOST_INT_CAUSE 15
+#define V_HSS2PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS2PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS2PLL0_LOCK_LOST_INT_CAUSE V_HSS2PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS2PLL0_LOCK_INT_CAUSE 14
+#define V_HSS2PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS2PLL0_LOCK_INT_CAUSE)
+#define F_HSS2PLL0_LOCK_INT_CAUSE V_HSS2PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS1PLL1_LOCK_LOST_INT_CAUSE 13
+#define V_HSS1PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS1PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS1PLL1_LOCK_LOST_INT_CAUSE V_HSS1PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS1PLL1_LOCK_INT_CAUSE 12
+#define V_HSS1PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS1PLL1_LOCK_INT_CAUSE)
+#define F_HSS1PLL1_LOCK_INT_CAUSE V_HSS1PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS1PLL0_LOCK_LOST_INT_CAUSE 11
+#define V_HSS1PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS1PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS1PLL0_LOCK_LOST_INT_CAUSE V_HSS1PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS1PLL0_LOCK_INT_CAUSE 10
+#define V_HSS1PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS1PLL0_LOCK_INT_CAUSE)
+#define F_HSS1PLL0_LOCK_INT_CAUSE V_HSS1PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS0PLL1_LOCK_LOST_INT_CAUSE 9
+#define V_HSS0PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS0PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS0PLL1_LOCK_LOST_INT_CAUSE V_HSS0PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS0PLL1_LOCK_INT_CAUSE 8
+#define V_HSS0PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS0PLL1_LOCK_INT_CAUSE)
+#define F_HSS0PLL1_LOCK_INT_CAUSE V_HSS0PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS0PLL0_LOCK_LOST_INT_CAUSE 7
+#define V_HSS0PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS0PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS0PLL0_LOCK_LOST_INT_CAUSE V_HSS0PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS0PLL0_LOCK_INT_CAUSE 6
+#define V_HSS0PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS0PLL0_LOCK_INT_CAUSE)
+#define F_HSS0PLL0_LOCK_INT_CAUSE V_HSS0PLL0_LOCK_INT_CAUSE(1U)
+
+#define A_MAC_PERR_INT_EN_MTIP 0x380d0
+
+#define S_PERR_MAC0_TX 19
+#define V_PERR_MAC0_TX(x) ((x) << S_PERR_MAC0_TX)
+#define F_PERR_MAC0_TX V_PERR_MAC0_TX(1U)
+
+#define S_PERR_MAC1_TX 18
+#define V_PERR_MAC1_TX(x) ((x) << S_PERR_MAC1_TX)
+#define F_PERR_MAC1_TX V_PERR_MAC1_TX(1U)
+
+#define S_PERR_MAC2_TX 17
+#define V_PERR_MAC2_TX(x) ((x) << S_PERR_MAC2_TX)
+#define F_PERR_MAC2_TX V_PERR_MAC2_TX(1U)
+
+#define S_PERR_MAC3_TX 16
+#define V_PERR_MAC3_TX(x) ((x) << S_PERR_MAC3_TX)
+#define F_PERR_MAC3_TX V_PERR_MAC3_TX(1U)
+
+#define S_PERR_MAC4_TX 15
+#define V_PERR_MAC4_TX(x) ((x) << S_PERR_MAC4_TX)
+#define F_PERR_MAC4_TX V_PERR_MAC4_TX(1U)
+
+#define S_PERR_MAC5_TX 14
+#define V_PERR_MAC5_TX(x) ((x) << S_PERR_MAC5_TX)
+#define F_PERR_MAC5_TX V_PERR_MAC5_TX(1U)
+
+#define S_PERR_MAC0_RX 13
+#define V_PERR_MAC0_RX(x) ((x) << S_PERR_MAC0_RX)
+#define F_PERR_MAC0_RX V_PERR_MAC0_RX(1U)
+
+#define S_PERR_MAC1_RX 12
+#define V_PERR_MAC1_RX(x) ((x) << S_PERR_MAC1_RX)
+#define F_PERR_MAC1_RX V_PERR_MAC1_RX(1U)
+
+#define S_PERR_MAC2_RX 11
+#define V_PERR_MAC2_RX(x) ((x) << S_PERR_MAC2_RX)
+#define F_PERR_MAC2_RX V_PERR_MAC2_RX(1U)
+
+#define S_PERR_MAC3_RX 10
+#define V_PERR_MAC3_RX(x) ((x) << S_PERR_MAC3_RX)
+#define F_PERR_MAC3_RX V_PERR_MAC3_RX(1U)
+
+#define S_PERR_MAC4_RX 9
+#define V_PERR_MAC4_RX(x) ((x) << S_PERR_MAC4_RX)
+#define F_PERR_MAC4_RX V_PERR_MAC4_RX(1U)
+
+#define S_PERR_MAC5_RX 8
+#define V_PERR_MAC5_RX(x) ((x) << S_PERR_MAC5_RX)
+#define F_PERR_MAC5_RX V_PERR_MAC5_RX(1U)
+
+#define S_PERR_MAC_STAT2_RX 7
+#define V_PERR_MAC_STAT2_RX(x) ((x) << S_PERR_MAC_STAT2_RX)
+#define F_PERR_MAC_STAT2_RX V_PERR_MAC_STAT2_RX(1U)
+
+#define S_PERR_MAC_STAT3_RX 6
+#define V_PERR_MAC_STAT3_RX(x) ((x) << S_PERR_MAC_STAT3_RX)
+#define F_PERR_MAC_STAT3_RX V_PERR_MAC_STAT3_RX(1U)
+
+#define S_PERR_MAC_STAT4_RX 5
+#define V_PERR_MAC_STAT4_RX(x) ((x) << S_PERR_MAC_STAT4_RX)
+#define F_PERR_MAC_STAT4_RX V_PERR_MAC_STAT4_RX(1U)
+
+#define S_PERR_MAC_STAT5_RX 4
+#define V_PERR_MAC_STAT5_RX(x) ((x) << S_PERR_MAC_STAT5_RX)
+#define F_PERR_MAC_STAT5_RX V_PERR_MAC_STAT5_RX(1U)
+
+#define S_PERR_MAC_STAT2_TX 3
+#define V_PERR_MAC_STAT2_TX(x) ((x) << S_PERR_MAC_STAT2_TX)
+#define F_PERR_MAC_STAT2_TX V_PERR_MAC_STAT2_TX(1U)
+
+#define S_PERR_MAC_STAT3_TX 2
+#define V_PERR_MAC_STAT3_TX(x) ((x) << S_PERR_MAC_STAT3_TX)
+#define F_PERR_MAC_STAT3_TX V_PERR_MAC_STAT3_TX(1U)
+
+#define S_PERR_MAC_STAT4_TX 1
+#define V_PERR_MAC_STAT4_TX(x) ((x) << S_PERR_MAC_STAT4_TX)
+#define F_PERR_MAC_STAT4_TX V_PERR_MAC_STAT4_TX(1U)
+
+#define S_PERR_MAC_STAT5_TX 0
+#define V_PERR_MAC_STAT5_TX(x) ((x) << S_PERR_MAC_STAT5_TX)
+#define F_PERR_MAC_STAT5_TX V_PERR_MAC_STAT5_TX(1U)
+
+#define A_MAC_PERR_INT_CAUSE_MTIP 0x380d4
+
+#define S_PERR_MAC_STAT_RX 7
+#define V_PERR_MAC_STAT_RX(x) ((x) << S_PERR_MAC_STAT_RX)
+#define F_PERR_MAC_STAT_RX V_PERR_MAC_STAT_RX(1U)
+
+#define S_PERR_MAC_STAT_TX 3
+#define V_PERR_MAC_STAT_TX(x) ((x) << S_PERR_MAC_STAT_TX)
+#define F_PERR_MAC_STAT_TX V_PERR_MAC_STAT_TX(1U)
+
+#define S_PERR_MAC_STAT_CAP 2
+#define V_PERR_MAC_STAT_CAP(x) ((x) << S_PERR_MAC_STAT_CAP)
+#define F_PERR_MAC_STAT_CAP V_PERR_MAC_STAT_CAP(1U)
+
+#define A_MAC_PERR_ENABLE_MTIP 0x380d8
+#define A_MAC_PCS_1G_CONFIG_0 0x380dc
+
+#define S_SEQ_ENA_3 19
+#define V_SEQ_ENA_3(x) ((x) << S_SEQ_ENA_3)
+#define F_SEQ_ENA_3 V_SEQ_ENA_3(1U)
+
+#define S_SEQ_ENA_2 18
+#define V_SEQ_ENA_2(x) ((x) << S_SEQ_ENA_2)
+#define F_SEQ_ENA_2 V_SEQ_ENA_2(1U)
+
+#define S_SEQ_ENA_1 17
+#define V_SEQ_ENA_1(x) ((x) << S_SEQ_ENA_1)
+#define F_SEQ_ENA_1 V_SEQ_ENA_1(1U)
+
+#define S_SEQ_ENA_0 16
+#define V_SEQ_ENA_0(x) ((x) << S_SEQ_ENA_0)
+#define F_SEQ_ENA_0 V_SEQ_ENA_0(1U)
+
+#define S_TX_LANE_THRESH_3 12
+#define M_TX_LANE_THRESH_3 0xfU
+#define V_TX_LANE_THRESH_3(x) ((x) << S_TX_LANE_THRESH_3)
+#define G_TX_LANE_THRESH_3(x) (((x) >> S_TX_LANE_THRESH_3) & M_TX_LANE_THRESH_3)
+
+#define S_TX_LANE_THRESH_2 8
+#define M_TX_LANE_THRESH_2 0xfU
+#define V_TX_LANE_THRESH_2(x) ((x) << S_TX_LANE_THRESH_2)
+#define G_TX_LANE_THRESH_2(x) (((x) >> S_TX_LANE_THRESH_2) & M_TX_LANE_THRESH_2)
+
+#define S_TX_LANE_THRESH_1 4
+#define M_TX_LANE_THRESH_1 0xfU
+#define V_TX_LANE_THRESH_1(x) ((x) << S_TX_LANE_THRESH_1)
+#define G_TX_LANE_THRESH_1(x) (((x) >> S_TX_LANE_THRESH_1) & M_TX_LANE_THRESH_1)
+
+#define S_TX_LANE_THRESH_0 0
+#define M_TX_LANE_THRESH_0 0xfU
+#define V_TX_LANE_THRESH_0(x) ((x) << S_TX_LANE_THRESH_0)
+#define G_TX_LANE_THRESH_0(x) (((x) >> S_TX_LANE_THRESH_0) & M_TX_LANE_THRESH_0)
+
+#define A_MAC_PCS_1G_CONFIG_1 0x380e0
+
+#define S_TX_LANE_CKMULT_3 9
+#define M_TX_LANE_CKMULT_3 0x7U
+#define V_TX_LANE_CKMULT_3(x) ((x) << S_TX_LANE_CKMULT_3)
+#define G_TX_LANE_CKMULT_3(x) (((x) >> S_TX_LANE_CKMULT_3) & M_TX_LANE_CKMULT_3)
+
+#define S_TX_LANE_CKMULT_2 6
+#define M_TX_LANE_CKMULT_2 0x7U
+#define V_TX_LANE_CKMULT_2(x) ((x) << S_TX_LANE_CKMULT_2)
+#define G_TX_LANE_CKMULT_2(x) (((x) >> S_TX_LANE_CKMULT_2) & M_TX_LANE_CKMULT_2)
+
+#define S_TX_LANE_CKMULT_1 3
+#define M_TX_LANE_CKMULT_1 0x7U
+#define V_TX_LANE_CKMULT_1(x) ((x) << S_TX_LANE_CKMULT_1)
+#define G_TX_LANE_CKMULT_1(x) (((x) >> S_TX_LANE_CKMULT_1) & M_TX_LANE_CKMULT_1)
+
+#define S_TX_LANE_CKMULT_0 0
+#define M_TX_LANE_CKMULT_0 0x7U
+#define V_TX_LANE_CKMULT_0(x) ((x) << S_TX_LANE_CKMULT_0)
+#define G_TX_LANE_CKMULT_0(x) (((x) >> S_TX_LANE_CKMULT_0) & M_TX_LANE_CKMULT_0)
+
+#define A_MAC_PTP_TIMER_RD0_LO 0x380e4
+#define A_MAC_PTP_TIMER_RD0_HI 0x380e8
+#define A_MAC_PTP_TIMER_RD1_LO 0x380ec
+#define A_MAC_PTP_TIMER_RD1_HI 0x380f0
+#define A_MAC_PTP_TIMER_WR_LO 0x380f4
+#define A_MAC_PTP_TIMER_WR_HI 0x380f8
+#define A_MAC_PTP_TIMER_OFFSET_0 0x380fc
+#define A_MAC_PTP_TIMER_OFFSET_1 0x38100
+#define A_MAC_PTP_TIMER_OFFSET_2 0x38104
+#define A_MAC_PTP_SUM_LO 0x38108
+#define A_MAC_PTP_SUM_HI 0x3810c
+#define A_MAC_PTP_TIMER_INCR0 0x38110
+#define A_MAC_PTP_TIMER_INCR1 0x38114
+#define A_MAC_PTP_DRIFT_ADJUST_COUNT 0x38118
+#define A_MAC_PTP_OFFSET_ADJUST_FINE 0x3811c
+#define A_MAC_PTP_OFFSET_ADJUST_TOTAL 0x38120
+#define A_MAC_PTP_CFG 0x38124
+#define A_MAC_PTP_PPS 0x38128
+#define A_MAC_PTP_SINGLE_ALARM 0x3812c
+#define A_MAC_PTP_PERIODIC_ALARM 0x38130
+#define A_MAC_PTP_STATUS 0x38134
+#define A_MAC_STS_GPIO_SEL 0x38140
+
+#define S_STSOUTSEL 1
+#define V_STSOUTSEL(x) ((x) << S_STSOUTSEL)
+#define F_STSOUTSEL V_STSOUTSEL(1U)
+
+#define S_STSINSEL 0
+#define V_STSINSEL(x) ((x) << S_STSINSEL)
+#define F_STSINSEL V_STSINSEL(1U)
+
+#define A_MAC_CERR_INT_EN_MTIP 0x38150
+
+#define S_CERR_MAC0_TX 11
+#define V_CERR_MAC0_TX(x) ((x) << S_CERR_MAC0_TX)
+#define F_CERR_MAC0_TX V_CERR_MAC0_TX(1U)
+
+#define S_CERR_MAC1_TX 10
+#define V_CERR_MAC1_TX(x) ((x) << S_CERR_MAC1_TX)
+#define F_CERR_MAC1_TX V_CERR_MAC1_TX(1U)
+
+#define S_CERR_MAC2_TX 9
+#define V_CERR_MAC2_TX(x) ((x) << S_CERR_MAC2_TX)
+#define F_CERR_MAC2_TX V_CERR_MAC2_TX(1U)
+
+#define S_CERR_MAC3_TX 8
+#define V_CERR_MAC3_TX(x) ((x) << S_CERR_MAC3_TX)
+#define F_CERR_MAC3_TX V_CERR_MAC3_TX(1U)
+
+#define S_CERR_MAC4_TX 7
+#define V_CERR_MAC4_TX(x) ((x) << S_CERR_MAC4_TX)
+#define F_CERR_MAC4_TX V_CERR_MAC4_TX(1U)
+
+#define S_CERR_MAC5_TX 6
+#define V_CERR_MAC5_TX(x) ((x) << S_CERR_MAC5_TX)
+#define F_CERR_MAC5_TX V_CERR_MAC5_TX(1U)
+
+#define S_CERR_MAC0_RX 5
+#define V_CERR_MAC0_RX(x) ((x) << S_CERR_MAC0_RX)
+#define F_CERR_MAC0_RX V_CERR_MAC0_RX(1U)
+
+#define S_CERR_MAC1_RX 4
+#define V_CERR_MAC1_RX(x) ((x) << S_CERR_MAC1_RX)
+#define F_CERR_MAC1_RX V_CERR_MAC1_RX(1U)
+
+#define S_CERR_MAC2_RX 3
+#define V_CERR_MAC2_RX(x) ((x) << S_CERR_MAC2_RX)
+#define F_CERR_MAC2_RX V_CERR_MAC2_RX(1U)
+
+#define S_CERR_MAC3_RX 2
+#define V_CERR_MAC3_RX(x) ((x) << S_CERR_MAC3_RX)
+#define F_CERR_MAC3_RX V_CERR_MAC3_RX(1U)
+
+#define S_CERR_MAC4_RX 1
+#define V_CERR_MAC4_RX(x) ((x) << S_CERR_MAC4_RX)
+#define F_CERR_MAC4_RX V_CERR_MAC4_RX(1U)
+
+#define S_CERR_MAC5_RX 0
+#define V_CERR_MAC5_RX(x) ((x) << S_CERR_MAC5_RX)
+#define F_CERR_MAC5_RX V_CERR_MAC5_RX(1U)
+
+#define A_MAC_CERR_INT_CAUSE_MTIP 0x38154
+#define A_MAC_1G_PCS0_STATUS 0x38160
+
+#define S_1G_PCS0_LOOPBACK 12
+#define V_1G_PCS0_LOOPBACK(x) ((x) << S_1G_PCS0_LOOPBACK)
+#define F_1G_PCS0_LOOPBACK V_1G_PCS0_LOOPBACK(1U)
+
+#define S_1G_PCS0_LINK_STATUS 11
+#define V_1G_PCS0_LINK_STATUS(x) ((x) << S_1G_PCS0_LINK_STATUS)
+#define F_1G_PCS0_LINK_STATUS V_1G_PCS0_LINK_STATUS(1U)
+
+#define S_1G_PCS0_RX_SYNC 10
+#define V_1G_PCS0_RX_SYNC(x) ((x) << S_1G_PCS0_RX_SYNC)
+#define F_1G_PCS0_RX_SYNC V_1G_PCS0_RX_SYNC(1U)
+
+#define S_1G_PCS0_AN_DONE 9
+#define V_1G_PCS0_AN_DONE(x) ((x) << S_1G_PCS0_AN_DONE)
+#define F_1G_PCS0_AN_DONE V_1G_PCS0_AN_DONE(1U)
+
+#define S_1G_PCS0_PGRCVD 8
+#define V_1G_PCS0_PGRCVD(x) ((x) << S_1G_PCS0_PGRCVD)
+#define F_1G_PCS0_PGRCVD V_1G_PCS0_PGRCVD(1U)
+
+#define S_1G_PCS0_SPEED_SEL 6
+#define M_1G_PCS0_SPEED_SEL 0x3U
+#define V_1G_PCS0_SPEED_SEL(x) ((x) << S_1G_PCS0_SPEED_SEL)
+#define G_1G_PCS0_SPEED_SEL(x) (((x) >> S_1G_PCS0_SPEED_SEL) & M_1G_PCS0_SPEED_SEL)
+
+#define S_1G_PCS0_HALF_DUPLEX 5
+#define V_1G_PCS0_HALF_DUPLEX(x) ((x) << S_1G_PCS0_HALF_DUPLEX)
+#define F_1G_PCS0_HALF_DUPLEX V_1G_PCS0_HALF_DUPLEX(1U)
+
+#define S_1G_PCS0_TX_MODE_QUIET 4
+#define V_1G_PCS0_TX_MODE_QUIET(x) ((x) << S_1G_PCS0_TX_MODE_QUIET)
+#define F_1G_PCS0_TX_MODE_QUIET V_1G_PCS0_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS0_TX_LPI_ACTIVE 3
+#define V_1G_PCS0_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS0_TX_LPI_ACTIVE)
+#define F_1G_PCS0_TX_LPI_ACTIVE V_1G_PCS0_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS0_RX_MODE_QUIET 2
+#define V_1G_PCS0_RX_MODE_QUIET(x) ((x) << S_1G_PCS0_RX_MODE_QUIET)
+#define F_1G_PCS0_RX_MODE_QUIET V_1G_PCS0_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS0_RX_LPI_ACTIVE 1
+#define V_1G_PCS0_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS0_RX_LPI_ACTIVE)
+#define F_1G_PCS0_RX_LPI_ACTIVE V_1G_PCS0_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS0_RX_WAKE_ERR 0
+#define V_1G_PCS0_RX_WAKE_ERR(x) ((x) << S_1G_PCS0_RX_WAKE_ERR)
+#define F_1G_PCS0_RX_WAKE_ERR V_1G_PCS0_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS1_STATUS 0x38164
+
+#define S_1G_PCS1_LOOPBACK 12
+#define V_1G_PCS1_LOOPBACK(x) ((x) << S_1G_PCS1_LOOPBACK)
+#define F_1G_PCS1_LOOPBACK V_1G_PCS1_LOOPBACK(1U)
+
+#define S_1G_PCS1_LINK_STATUS 11
+#define V_1G_PCS1_LINK_STATUS(x) ((x) << S_1G_PCS1_LINK_STATUS)
+#define F_1G_PCS1_LINK_STATUS V_1G_PCS1_LINK_STATUS(1U)
+
+#define S_1G_PCS1_RX_SYNC 10
+#define V_1G_PCS1_RX_SYNC(x) ((x) << S_1G_PCS1_RX_SYNC)
+#define F_1G_PCS1_RX_SYNC V_1G_PCS1_RX_SYNC(1U)
+
+#define S_1G_PCS1_AN_DONE 9
+#define V_1G_PCS1_AN_DONE(x) ((x) << S_1G_PCS1_AN_DONE)
+#define F_1G_PCS1_AN_DONE V_1G_PCS1_AN_DONE(1U)
+
+#define S_1G_PCS1_PGRCVD 8
+#define V_1G_PCS1_PGRCVD(x) ((x) << S_1G_PCS1_PGRCVD)
+#define F_1G_PCS1_PGRCVD V_1G_PCS1_PGRCVD(1U)
+
+#define S_1G_PCS1_SPEED_SEL 6
+#define M_1G_PCS1_SPEED_SEL 0x3U
+#define V_1G_PCS1_SPEED_SEL(x) ((x) << S_1G_PCS1_SPEED_SEL)
+#define G_1G_PCS1_SPEED_SEL(x) (((x) >> S_1G_PCS1_SPEED_SEL) & M_1G_PCS1_SPEED_SEL)
+
+#define S_1G_PCS1_HALF_DUPLEX 5
+#define V_1G_PCS1_HALF_DUPLEX(x) ((x) << S_1G_PCS1_HALF_DUPLEX)
+#define F_1G_PCS1_HALF_DUPLEX V_1G_PCS1_HALF_DUPLEX(1U)
+
+#define S_1G_PCS1_TX_MODE_QUIET 4
+#define V_1G_PCS1_TX_MODE_QUIET(x) ((x) << S_1G_PCS1_TX_MODE_QUIET)
+#define F_1G_PCS1_TX_MODE_QUIET V_1G_PCS1_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS1_TX_LPI_ACTIVE 3
+#define V_1G_PCS1_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS1_TX_LPI_ACTIVE)
+#define F_1G_PCS1_TX_LPI_ACTIVE V_1G_PCS1_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS1_RX_MODE_QUIET 2
+#define V_1G_PCS1_RX_MODE_QUIET(x) ((x) << S_1G_PCS1_RX_MODE_QUIET)
+#define F_1G_PCS1_RX_MODE_QUIET V_1G_PCS1_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS1_RX_LPI_ACTIVE 1
+#define V_1G_PCS1_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS1_RX_LPI_ACTIVE)
+#define F_1G_PCS1_RX_LPI_ACTIVE V_1G_PCS1_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS1_RX_WAKE_ERR 0
+#define V_1G_PCS1_RX_WAKE_ERR(x) ((x) << S_1G_PCS1_RX_WAKE_ERR)
+#define F_1G_PCS1_RX_WAKE_ERR V_1G_PCS1_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS2_STATUS 0x38168
+
+#define S_1G_PCS2_LOOPBACK 12
+#define V_1G_PCS2_LOOPBACK(x) ((x) << S_1G_PCS2_LOOPBACK)
+#define F_1G_PCS2_LOOPBACK V_1G_PCS2_LOOPBACK(1U)
+
+#define S_1G_PCS2_LINK_STATUS 11
+#define V_1G_PCS2_LINK_STATUS(x) ((x) << S_1G_PCS2_LINK_STATUS)
+#define F_1G_PCS2_LINK_STATUS V_1G_PCS2_LINK_STATUS(1U)
+
+#define S_1G_PCS2_RX_SYNC 10
+#define V_1G_PCS2_RX_SYNC(x) ((x) << S_1G_PCS2_RX_SYNC)
+#define F_1G_PCS2_RX_SYNC V_1G_PCS2_RX_SYNC(1U)
+
+#define S_1G_PCS2_AN_DONE 9
+#define V_1G_PCS2_AN_DONE(x) ((x) << S_1G_PCS2_AN_DONE)
+#define F_1G_PCS2_AN_DONE V_1G_PCS2_AN_DONE(1U)
+
+#define S_1G_PCS2_PGRCVD 8
+#define V_1G_PCS2_PGRCVD(x) ((x) << S_1G_PCS2_PGRCVD)
+#define F_1G_PCS2_PGRCVD V_1G_PCS2_PGRCVD(1U)
+
+#define S_1G_PCS2_SPEED_SEL 6
+#define M_1G_PCS2_SPEED_SEL 0x3U
+#define V_1G_PCS2_SPEED_SEL(x) ((x) << S_1G_PCS2_SPEED_SEL)
+#define G_1G_PCS2_SPEED_SEL(x) (((x) >> S_1G_PCS2_SPEED_SEL) & M_1G_PCS2_SPEED_SEL)
+
+#define S_1G_PCS2_HALF_DUPLEX 5
+#define V_1G_PCS2_HALF_DUPLEX(x) ((x) << S_1G_PCS2_HALF_DUPLEX)
+#define F_1G_PCS2_HALF_DUPLEX V_1G_PCS2_HALF_DUPLEX(1U)
+
+#define S_1G_PCS2_TX_MODE_QUIET 4
+#define V_1G_PCS2_TX_MODE_QUIET(x) ((x) << S_1G_PCS2_TX_MODE_QUIET)
+#define F_1G_PCS2_TX_MODE_QUIET V_1G_PCS2_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS2_TX_LPI_ACTIVE 3
+#define V_1G_PCS2_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS2_TX_LPI_ACTIVE)
+#define F_1G_PCS2_TX_LPI_ACTIVE V_1G_PCS2_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS2_RX_MODE_QUIET 2
+#define V_1G_PCS2_RX_MODE_QUIET(x) ((x) << S_1G_PCS2_RX_MODE_QUIET)
+#define F_1G_PCS2_RX_MODE_QUIET V_1G_PCS2_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS2_RX_LPI_ACTIVE 1
+#define V_1G_PCS2_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS2_RX_LPI_ACTIVE)
+#define F_1G_PCS2_RX_LPI_ACTIVE V_1G_PCS2_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS2_RX_WAKE_ERR 0
+#define V_1G_PCS2_RX_WAKE_ERR(x) ((x) << S_1G_PCS2_RX_WAKE_ERR)
+#define F_1G_PCS2_RX_WAKE_ERR V_1G_PCS2_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS3_STATUS 0x3816c
+
+#define S_1G_PCS3_LOOPBACK 12
+#define V_1G_PCS3_LOOPBACK(x) ((x) << S_1G_PCS3_LOOPBACK)
+#define F_1G_PCS3_LOOPBACK V_1G_PCS3_LOOPBACK(1U)
+
+#define S_1G_PCS3_LINK_STATUS 11
+#define V_1G_PCS3_LINK_STATUS(x) ((x) << S_1G_PCS3_LINK_STATUS)
+#define F_1G_PCS3_LINK_STATUS V_1G_PCS3_LINK_STATUS(1U)
+
+#define S_1G_PCS3_RX_SYNC 10
+#define V_1G_PCS3_RX_SYNC(x) ((x) << S_1G_PCS3_RX_SYNC)
+#define F_1G_PCS3_RX_SYNC V_1G_PCS3_RX_SYNC(1U)
+
+#define S_1G_PCS3_AN_DONE 9
+#define V_1G_PCS3_AN_DONE(x) ((x) << S_1G_PCS3_AN_DONE)
+#define F_1G_PCS3_AN_DONE V_1G_PCS3_AN_DONE(1U)
+
+#define S_1G_PCS3_PGRCVD 8
+#define V_1G_PCS3_PGRCVD(x) ((x) << S_1G_PCS3_PGRCVD)
+#define F_1G_PCS3_PGRCVD V_1G_PCS3_PGRCVD(1U)
+
+#define S_1G_PCS3_SPEED_SEL 6
+#define M_1G_PCS3_SPEED_SEL 0x3U
+#define V_1G_PCS3_SPEED_SEL(x) ((x) << S_1G_PCS3_SPEED_SEL)
+#define G_1G_PCS3_SPEED_SEL(x) (((x) >> S_1G_PCS3_SPEED_SEL) & M_1G_PCS3_SPEED_SEL)
+
+#define S_1G_PCS3_HALF_DUPLEX 5
+#define V_1G_PCS3_HALF_DUPLEX(x) ((x) << S_1G_PCS3_HALF_DUPLEX)
+#define F_1G_PCS3_HALF_DUPLEX V_1G_PCS3_HALF_DUPLEX(1U)
+
+#define S_1G_PCS3_TX_MODE_QUIET 4
+#define V_1G_PCS3_TX_MODE_QUIET(x) ((x) << S_1G_PCS3_TX_MODE_QUIET)
+#define F_1G_PCS3_TX_MODE_QUIET V_1G_PCS3_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS3_TX_LPI_ACTIVE 3
+#define V_1G_PCS3_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS3_TX_LPI_ACTIVE)
+#define F_1G_PCS3_TX_LPI_ACTIVE V_1G_PCS3_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS3_RX_MODE_QUIET 2
+#define V_1G_PCS3_RX_MODE_QUIET(x) ((x) << S_1G_PCS3_RX_MODE_QUIET)
+#define F_1G_PCS3_RX_MODE_QUIET V_1G_PCS3_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS3_RX_LPI_ACTIVE 1
+#define V_1G_PCS3_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS3_RX_LPI_ACTIVE)
+#define F_1G_PCS3_RX_LPI_ACTIVE V_1G_PCS3_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS3_RX_WAKE_ERR 0
+#define V_1G_PCS3_RX_WAKE_ERR(x) ((x) << S_1G_PCS3_RX_WAKE_ERR)
+#define F_1G_PCS3_RX_WAKE_ERR V_1G_PCS3_RX_WAKE_ERR(1U)
+
+#define A_MAC_PCS_LPI_STATUS_0 0x38170
+
+#define S_TX_LPI_STATE 0
+#define M_TX_LPI_STATE 0xffffffU
+#define V_TX_LPI_STATE(x) ((x) << S_TX_LPI_STATE)
+#define G_TX_LPI_STATE(x) (((x) >> S_TX_LPI_STATE) & M_TX_LPI_STATE)
+
+#define A_MAC_PCS_LPI_STATUS_1 0x38174
+
+#define S_TX_LPI_MODE 0
+#define M_TX_LPI_MODE 0xffffU
+#define V_TX_LPI_MODE(x) ((x) << S_TX_LPI_MODE)
+#define G_TX_LPI_MODE(x) (((x) >> S_TX_LPI_MODE) & M_TX_LPI_MODE)
+
+#define A_MAC_PCS_LPI_STATUS_2 0x38178
+
+#define S_RX_LPI_MODE 24
+#define M_RX_LPI_MODE 0xffU
+#define V_RX_LPI_MODE(x) ((x) << S_RX_LPI_MODE)
+#define G_RX_LPI_MODE(x) (((x) >> S_RX_LPI_MODE) & M_RX_LPI_MODE)
+
+#define S_RX_LPI_STATE 0
+#define M_RX_LPI_STATE 0xffffffU
+#define V_RX_LPI_STATE(x) ((x) << S_RX_LPI_STATE)
+#define G_RX_LPI_STATE(x) (((x) >> S_RX_LPI_STATE) & M_RX_LPI_STATE)
+
+#define A_MAC_PCS_LPI_STATUS_3 0x3817c
+
+#define S_T7_RX_LPI_ACTIVE 0
+#define M_T7_RX_LPI_ACTIVE 0xffU
+#define V_T7_RX_LPI_ACTIVE(x) ((x) << S_T7_RX_LPI_ACTIVE)
+#define G_T7_RX_LPI_ACTIVE(x) (((x) >> S_T7_RX_LPI_ACTIVE) & M_T7_RX_LPI_ACTIVE)
+
+#define A_MAC_TX0_CLK_DIV 0x38180
+#define A_MAC_TX1_CLK_DIV 0x38184
+#define A_MAC_TX2_CLK_DIV 0x38188
+#define A_MAC_TX3_CLK_DIV 0x3818c
+#define A_MAC_TX4_CLK_DIV 0x38190
+#define A_MAC_TX5_CLK_DIV 0x38194
+#define A_MAC_TX6_CLK_DIV 0x38198
+#define A_MAC_TX7_CLK_DIV 0x3819c
+#define A_MAC_RX0_CLK_DIV 0x381a0
+#define A_MAC_RX1_CLK_DIV 0x381a4
+#define A_MAC_RX2_CLK_DIV 0x381a8
+#define A_MAC_RX3_CLK_DIV 0x381ac
+#define A_MAC_RX4_CLK_DIV 0x381b0
+#define A_MAC_RX5_CLK_DIV 0x381b4
+#define A_MAC_RX6_CLK_DIV 0x381b8
+#define A_MAC_RX7_CLK_DIV 0x381bc
+#define A_MAC_SYNC_E_CDR_LANE_SEL 0x381c0
+
+#define S_CML_MUX_SEL 11
+#define V_CML_MUX_SEL(x) ((x) << S_CML_MUX_SEL)
+#define F_CML_MUX_SEL V_CML_MUX_SEL(1U)
+
+#define S_CMOS_OUT_EN 10
+#define V_CMOS_OUT_EN(x) ((x) << S_CMOS_OUT_EN)
+#define F_CMOS_OUT_EN V_CMOS_OUT_EN(1U)
+
+#define S_CML_OUT_EN 9
+#define V_CML_OUT_EN(x) ((x) << S_CML_OUT_EN)
+#define F_CML_OUT_EN V_CML_OUT_EN(1U)
+
+#define S_LOC_FAULT_PORT_SEL 6
+#define M_LOC_FAULT_PORT_SEL 0x3U
+#define V_LOC_FAULT_PORT_SEL(x) ((x) << S_LOC_FAULT_PORT_SEL)
+#define G_LOC_FAULT_PORT_SEL(x) (((x) >> S_LOC_FAULT_PORT_SEL) & M_LOC_FAULT_PORT_SEL)
+
+#define S_TX_CDR_LANE_SEL 3
+#define M_TX_CDR_LANE_SEL 0x7U
+#define V_TX_CDR_LANE_SEL(x) ((x) << S_TX_CDR_LANE_SEL)
+#define G_TX_CDR_LANE_SEL(x) (((x) >> S_TX_CDR_LANE_SEL) & M_TX_CDR_LANE_SEL)
+
+#define S_RX_CDR_LANE_SEL 0
+#define M_RX_CDR_LANE_SEL 0x7U
+#define V_RX_CDR_LANE_SEL(x) ((x) << S_RX_CDR_LANE_SEL)
+#define G_RX_CDR_LANE_SEL(x) (((x) >> S_RX_CDR_LANE_SEL) & M_RX_CDR_LANE_SEL)
+
+#define A_MAC_DEBUG_PL_IF_1 0x381c4
+#define A_MAC_SIGNAL_DETECT_CTRL 0x381f0
+
+#define S_SIGNAL_DET_LN7 15
+#define V_SIGNAL_DET_LN7(x) ((x) << S_SIGNAL_DET_LN7)
+#define F_SIGNAL_DET_LN7 V_SIGNAL_DET_LN7(1U)
+
+#define S_SIGNAL_DET_LN6 14
+#define V_SIGNAL_DET_LN6(x) ((x) << S_SIGNAL_DET_LN6)
+#define F_SIGNAL_DET_LN6 V_SIGNAL_DET_LN6(1U)
+
+#define S_SIGNAL_DET_LN5 13
+#define V_SIGNAL_DET_LN5(x) ((x) << S_SIGNAL_DET_LN5)
+#define F_SIGNAL_DET_LN5 V_SIGNAL_DET_LN5(1U)
+
+#define S_SIGNAL_DET_LN4 12
+#define V_SIGNAL_DET_LN4(x) ((x) << S_SIGNAL_DET_LN4)
+#define F_SIGNAL_DET_LN4 V_SIGNAL_DET_LN4(1U)
+
+#define S_SIGNAL_DET_LN3 11
+#define V_SIGNAL_DET_LN3(x) ((x) << S_SIGNAL_DET_LN3)
+#define F_SIGNAL_DET_LN3 V_SIGNAL_DET_LN3(1U)
+
+#define S_SIGNAL_DET_LN2 10
+#define V_SIGNAL_DET_LN2(x) ((x) << S_SIGNAL_DET_LN2)
+#define F_SIGNAL_DET_LN2 V_SIGNAL_DET_LN2(1U)
+
+#define S_SIGNAL_DET_LN1 9
+#define V_SIGNAL_DET_LN1(x) ((x) << S_SIGNAL_DET_LN1)
+#define F_SIGNAL_DET_LN1 V_SIGNAL_DET_LN1(1U)
+
+#define S_SIGNAL_DET_LN0 8
+#define V_SIGNAL_DET_LN0(x) ((x) << S_SIGNAL_DET_LN0)
+#define F_SIGNAL_DET_LN0 V_SIGNAL_DET_LN0(1U)
+
+#define S_SIGDETCTRL_LN7 7
+#define V_SIGDETCTRL_LN7(x) ((x) << S_SIGDETCTRL_LN7)
+#define F_SIGDETCTRL_LN7 V_SIGDETCTRL_LN7(1U)
+
+#define S_SIGDETCTRL_LN6 6
+#define V_SIGDETCTRL_LN6(x) ((x) << S_SIGDETCTRL_LN6)
+#define F_SIGDETCTRL_LN6 V_SIGDETCTRL_LN6(1U)
+
+#define S_SIGDETCTRL_LN5 5
+#define V_SIGDETCTRL_LN5(x) ((x) << S_SIGDETCTRL_LN5)
+#define F_SIGDETCTRL_LN5 V_SIGDETCTRL_LN5(1U)
+
+#define S_SIGDETCTRL_LN4 4
+#define V_SIGDETCTRL_LN4(x) ((x) << S_SIGDETCTRL_LN4)
+#define F_SIGDETCTRL_LN4 V_SIGDETCTRL_LN4(1U)
+
+#define S_SIGDETCTRL_LN3 3
+#define V_SIGDETCTRL_LN3(x) ((x) << S_SIGDETCTRL_LN3)
+#define F_SIGDETCTRL_LN3 V_SIGDETCTRL_LN3(1U)
+
+#define S_SIGDETCTRL_LN2 2
+#define V_SIGDETCTRL_LN2(x) ((x) << S_SIGDETCTRL_LN2)
+#define F_SIGDETCTRL_LN2 V_SIGDETCTRL_LN2(1U)
+
+#define S_SIGDETCTRL_LN1 1
+#define V_SIGDETCTRL_LN1(x) ((x) << S_SIGDETCTRL_LN1)
+#define F_SIGDETCTRL_LN1 V_SIGDETCTRL_LN1(1U)
+
+#define S_SIGDETCTRL_LN0 0
+#define V_SIGDETCTRL_LN0(x) ((x) << S_SIGDETCTRL_LN0)
+#define F_SIGDETCTRL_LN0 V_SIGDETCTRL_LN0(1U)
+
+#define A_MAC_FPGA_STATUS_FRM_BOARD 0x381f4
+
+#define S_SFP3_RX_LOS 15
+#define V_SFP3_RX_LOS(x) ((x) << S_SFP3_RX_LOS)
+#define F_SFP3_RX_LOS V_SFP3_RX_LOS(1U)
+
+#define S_SFP3_TX_FAULT 14
+#define V_SFP3_TX_FAULT(x) ((x) << S_SFP3_TX_FAULT)
+#define F_SFP3_TX_FAULT V_SFP3_TX_FAULT(1U)
+
+#define S_SFP3_MOD_PRES 13
+#define V_SFP3_MOD_PRES(x) ((x) << S_SFP3_MOD_PRES)
+#define F_SFP3_MOD_PRES V_SFP3_MOD_PRES(1U)
+
+#define S_SFP2_RX_LOS 12
+#define V_SFP2_RX_LOS(x) ((x) << S_SFP2_RX_LOS)
+#define F_SFP2_RX_LOS V_SFP2_RX_LOS(1U)
+
+#define S_SFP2_TX_FAULT 11
+#define V_SFP2_TX_FAULT(x) ((x) << S_SFP2_TX_FAULT)
+#define F_SFP2_TX_FAULT V_SFP2_TX_FAULT(1U)
+
+#define S_SFP2_MOD_PRES 10
+#define V_SFP2_MOD_PRES(x) ((x) << S_SFP2_MOD_PRES)
+#define F_SFP2_MOD_PRES V_SFP2_MOD_PRES(1U)
+
+#define S_SFP1_RX_LOS 9
+#define V_SFP1_RX_LOS(x) ((x) << S_SFP1_RX_LOS)
+#define F_SFP1_RX_LOS V_SFP1_RX_LOS(1U)
+
+#define S_SFP1_TX_FAULT 8
+#define V_SFP1_TX_FAULT(x) ((x) << S_SFP1_TX_FAULT)
+#define F_SFP1_TX_FAULT V_SFP1_TX_FAULT(1U)
+
+#define S_SFP1_MOD_PRES 7
+#define V_SFP1_MOD_PRES(x) ((x) << S_SFP1_MOD_PRES)
+#define F_SFP1_MOD_PRES V_SFP1_MOD_PRES(1U)
+
+#define S_SFP0_RX_LOS 6
+#define V_SFP0_RX_LOS(x) ((x) << S_SFP0_RX_LOS)
+#define F_SFP0_RX_LOS V_SFP0_RX_LOS(1U)
+
+#define S_SFP0_TX_FAULT 5
+#define V_SFP0_TX_FAULT(x) ((x) << S_SFP0_TX_FAULT)
+#define F_SFP0_TX_FAULT V_SFP0_TX_FAULT(1U)
+
+#define S_SFP0_MOD_PRES 4
+#define V_SFP0_MOD_PRES(x) ((x) << S_SFP0_MOD_PRES)
+#define F_SFP0_MOD_PRES V_SFP0_MOD_PRES(1U)
+
+#define S_QSFP1_INT_L 3
+#define V_QSFP1_INT_L(x) ((x) << S_QSFP1_INT_L)
+#define F_QSFP1_INT_L V_QSFP1_INT_L(1U)
+
+#define S_QSFP1_MOD_PRES 2
+#define V_QSFP1_MOD_PRES(x) ((x) << S_QSFP1_MOD_PRES)
+#define F_QSFP1_MOD_PRES V_QSFP1_MOD_PRES(1U)
+
+#define S_QSFP0_INT_L 1
+#define V_QSFP0_INT_L(x) ((x) << S_QSFP0_INT_L)
+#define F_QSFP0_INT_L V_QSFP0_INT_L(1U)
+
+#define S_QSFP0_MOD_PRES 0
+#define V_QSFP0_MOD_PRES(x) ((x) << S_QSFP0_MOD_PRES)
+#define F_QSFP0_MOD_PRES V_QSFP0_MOD_PRES(1U)
+
+#define A_MAC_FPGA_CONTROL_TO_BOARD 0x381f8
+
+#define S_T7_1_LB_MODE 10
+#define M_T7_1_LB_MODE 0x3U
+#define V_T7_1_LB_MODE(x) ((x) << S_T7_1_LB_MODE)
+#define G_T7_1_LB_MODE(x) (((x) >> S_T7_1_LB_MODE) & M_T7_1_LB_MODE)
+
+#define S_SFP3_TX_DISABLE 9
+#define V_SFP3_TX_DISABLE(x) ((x) << S_SFP3_TX_DISABLE)
+#define F_SFP3_TX_DISABLE V_SFP3_TX_DISABLE(1U)
+
+#define S_SFP2_TX_DISABLE 8
+#define V_SFP2_TX_DISABLE(x) ((x) << S_SFP2_TX_DISABLE)
+#define F_SFP2_TX_DISABLE V_SFP2_TX_DISABLE(1U)
+
+#define S_SFP1_TX_DISABLE 7
+#define V_SFP1_TX_DISABLE(x) ((x) << S_SFP1_TX_DISABLE)
+#define F_SFP1_TX_DISABLE V_SFP1_TX_DISABLE(1U)
+
+#define S_SFP0_TX_DISABLE 6
+#define V_SFP0_TX_DISABLE(x) ((x) << S_SFP0_TX_DISABLE)
+#define F_SFP0_TX_DISABLE V_SFP0_TX_DISABLE(1U)
+
+#define S_QSFP1_LPMODE 5
+#define V_QSFP1_LPMODE(x) ((x) << S_QSFP1_LPMODE)
+#define F_QSFP1_LPMODE V_QSFP1_LPMODE(1U)
+
+#define S_QSFP1_MODSEL_L 4
+#define V_QSFP1_MODSEL_L(x) ((x) << S_QSFP1_MODSEL_L)
+#define F_QSFP1_MODSEL_L V_QSFP1_MODSEL_L(1U)
+
+#define S_QSFP1_RESET_L 3
+#define V_QSFP1_RESET_L(x) ((x) << S_QSFP1_RESET_L)
+#define F_QSFP1_RESET_L V_QSFP1_RESET_L(1U)
+
+#define S_QSFP0_LPMODE 2
+#define V_QSFP0_LPMODE(x) ((x) << S_QSFP0_LPMODE)
+#define F_QSFP0_LPMODE V_QSFP0_LPMODE(1U)
+
+#define S_QSFP0_MODSEL_L 1
+#define V_QSFP0_MODSEL_L(x) ((x) << S_QSFP0_MODSEL_L)
+#define F_QSFP0_MODSEL_L V_QSFP0_MODSEL_L(1U)
+
+#define S_QSFP0_RESET_L 0
+#define V_QSFP0_RESET_L(x) ((x) << S_QSFP0_RESET_L)
+#define F_QSFP0_RESET_L V_QSFP0_RESET_L(1U)
+
+#define A_MAC_FPGA_LINK_STATUS 0x381fc
+
+#define S_PORT3_FPGA_LINK_UP 3
+#define V_PORT3_FPGA_LINK_UP(x) ((x) << S_PORT3_FPGA_LINK_UP)
+#define F_PORT3_FPGA_LINK_UP V_PORT3_FPGA_LINK_UP(1U)
+
+#define S_PORT2_FPGA_LINK_UP 2
+#define V_PORT2_FPGA_LINK_UP(x) ((x) << S_PORT2_FPGA_LINK_UP)
+#define F_PORT2_FPGA_LINK_UP V_PORT2_FPGA_LINK_UP(1U)
+
+#define S_PORT1_FPGA_LINK_UP 1
+#define V_PORT1_FPGA_LINK_UP(x) ((x) << S_PORT1_FPGA_LINK_UP)
+#define F_PORT1_FPGA_LINK_UP V_PORT1_FPGA_LINK_UP(1U)
+
+#define S_PORT0_FPGA_LINK_UP 0
+#define V_PORT0_FPGA_LINK_UP(x) ((x) << S_PORT0_FPGA_LINK_UP)
+#define F_PORT0_FPGA_LINK_UP V_PORT0_FPGA_LINK_UP(1U)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_REVISION 0x38200
+
+#define S_MTIP_REV_400G_0 0
+#define M_MTIP_REV_400G_0 0xffU
+#define V_MTIP_REV_400G_0(x) ((x) << S_MTIP_REV_400G_0)
+#define G_MTIP_REV_400G_0(x) (((x) >> S_MTIP_REV_400G_0) & M_MTIP_REV_400G_0)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_SCRATCH 0x38204
+#define A_MAC_MTIP_MAC400G_0_MTIP_COMMAND_CONFIG 0x38208
+
+#define S_INV_LOOP 31
+#define V_INV_LOOP(x) ((x) << S_INV_LOOP)
+#define F_INV_LOOP V_INV_LOOP(1U)
+
+#define S_TX_FLUSH_ENABLE_400G_0 22
+#define V_TX_FLUSH_ENABLE_400G_0(x) ((x) << S_TX_FLUSH_ENABLE_400G_0)
+#define F_TX_FLUSH_ENABLE_400G_0 V_TX_FLUSH_ENABLE_400G_0(1U)
+
+#define S_PHY_LOOPBACK_EN_400G 10
+#define V_PHY_LOOPBACK_EN_400G(x) ((x) << S_PHY_LOOPBACK_EN_400G)
+#define F_PHY_LOOPBACK_EN_400G V_PHY_LOOPBACK_EN_400G(1U)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_0 0x3820c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_1 0x38210
+#define A_MAC_MTIP_MAC400G_0_MTIP_FRM_LENGTH 0x38214
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_SECTIONS 0x3821c
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_SECTIONS 0x38220
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_ALMOST_F_E 0x38224
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_ALMOST_F_E 0x38228
+#define A_MAC_MTIP_MAC400G_0_MTIP_HASHTABLE_LOAD 0x3822c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_STATUS 0x38240
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_IPG_LENGTH 0x38244
+
+#define S_T7_IPG 19
+#define M_T7_IPG 0x1fffU
+#define V_T7_IPG(x) ((x) << S_T7_IPG)
+#define G_T7_IPG(x) (((x) >> S_T7_IPG) & M_T7_IPG)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL01_PAUSE_QUANTA 0x38254
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL23_PAUSE_QUANTA 0x38258
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL45_PAUSE_QUANTA 0x3825c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL67_PAUSE_QUANTA 0x38260
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL01_PAUSE_QUANTA_THRESH 0x38264
+
+#define S_CL1_PAUSE_QUANTA_THRESH 16
+#define M_CL1_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL1_PAUSE_QUANTA_THRESH(x) ((x) << S_CL1_PAUSE_QUANTA_THRESH)
+#define G_CL1_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL1_PAUSE_QUANTA_THRESH) & M_CL1_PAUSE_QUANTA_THRESH)
+
+#define S_CL0_PAUSE_QUANTA_THRESH 0
+#define M_CL0_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL0_PAUSE_QUANTA_THRESH(x) ((x) << S_CL0_PAUSE_QUANTA_THRESH)
+#define G_CL0_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL0_PAUSE_QUANTA_THRESH) & M_CL0_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL23_PAUSE_QUANTA_THRESH 0x38268
+
+#define S_CL3_PAUSE_QUANTA_THRESH 16
+#define M_CL3_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL3_PAUSE_QUANTA_THRESH(x) ((x) << S_CL3_PAUSE_QUANTA_THRESH)
+#define G_CL3_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL3_PAUSE_QUANTA_THRESH) & M_CL3_PAUSE_QUANTA_THRESH)
+
+#define S_CL2_PAUSE_QUANTA_THRESH 0
+#define M_CL2_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL2_PAUSE_QUANTA_THRESH(x) ((x) << S_CL2_PAUSE_QUANTA_THRESH)
+#define G_CL2_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL2_PAUSE_QUANTA_THRESH) & M_CL2_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL45_PAUSE_QUANTA_THRESH 0x3826c
+
+#define S_CL5_PAUSE_QUANTA_THRESH 16
+#define M_CL5_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL5_PAUSE_QUANTA_THRESH(x) ((x) << S_CL5_PAUSE_QUANTA_THRESH)
+#define G_CL5_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL5_PAUSE_QUANTA_THRESH) & M_CL5_PAUSE_QUANTA_THRESH)
+
+#define S_CL4_PAUSE_QUANTA_THRESH 0
+#define M_CL4_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL4_PAUSE_QUANTA_THRESH(x) ((x) << S_CL4_PAUSE_QUANTA_THRESH)
+#define G_CL4_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL4_PAUSE_QUANTA_THRESH) & M_CL4_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL67_PAUSE_QUANTA_THRESH 0x38270
+
+#define S_CL7_PAUSE_QUANTA_THRESH 16
+#define M_CL7_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL7_PAUSE_QUANTA_THRESH(x) ((x) << S_CL7_PAUSE_QUANTA_THRESH)
+#define G_CL7_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL7_PAUSE_QUANTA_THRESH) & M_CL7_PAUSE_QUANTA_THRESH)
+
+#define S_CL6_PAUSE_QUANTA_THRESH 0
+#define M_CL6_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL6_PAUSE_QUANTA_THRESH(x) ((x) << S_CL6_PAUSE_QUANTA_THRESH)
+#define G_CL6_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL6_PAUSE_QUANTA_THRESH) & M_CL6_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_PAUSE_STATUS 0x38274
+
+#define S_RX_PAUSE_STATUS 0
+#define M_RX_PAUSE_STATUS 0xffU
+#define V_RX_PAUSE_STATUS(x) ((x) << S_RX_PAUSE_STATUS)
+#define G_RX_PAUSE_STATUS(x) (((x) >> S_RX_PAUSE_STATUS) & M_RX_PAUSE_STATUS)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_TS_TIMESTAMP 0x3827c
+#define A_MAC_MTIP_MAC400G_0_MTIP_XIF_MODE 0x38280
+#define A_MAC_MTIP_MAC400G_1_MTIP_REVISION 0x38300
+
+#define S_MTIP_REV_400G_1 0
+#define M_MTIP_REV_400G_1 0xffU
+#define V_MTIP_REV_400G_1(x) ((x) << S_MTIP_REV_400G_1)
+#define G_MTIP_REV_400G_1(x) (((x) >> S_MTIP_REV_400G_1) & M_MTIP_REV_400G_1)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_SCRATCH 0x38304
+#define A_MAC_MTIP_MAC400G_1_MTIP_COMMAND_CONFIG 0x38308
+
+#define S_TX_FLUSH_ENABLE_400G_1 22
+#define V_TX_FLUSH_ENABLE_400G_1(x) ((x) << S_TX_FLUSH_ENABLE_400G_1)
+#define F_TX_FLUSH_ENABLE_400G_1 V_TX_FLUSH_ENABLE_400G_1(1U)
+
+#define S_PHY_LOOPBACK_EN_400G_1 10
+#define V_PHY_LOOPBACK_EN_400G_1(x) ((x) << S_PHY_LOOPBACK_EN_400G_1)
+#define F_PHY_LOOPBACK_EN_400G_1 V_PHY_LOOPBACK_EN_400G_1(1U)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_ADDR_0 0x3830c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_ADDR_1 0x38310
+#define A_MAC_MTIP_MAC400G_1_MTIP_FRM_LENGTH 0x38314
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_FIFO_SECTIONS 0x3831c
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_FIFO_SECTIONS 0x38320
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_FIFO_ALMOST_F_E 0x38324
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_FIFO_ALMOST_F_E 0x38328
+#define A_MAC_MTIP_MAC400G_1_MTIP_HASHTABLE_LOAD 0x3832c
+
+#define S_ENABLE_MCAST_RX_400G_1 8
+#define V_ENABLE_MCAST_RX_400G_1(x) ((x) << S_ENABLE_MCAST_RX_400G_1)
+#define F_ENABLE_MCAST_RX_400G_1 V_ENABLE_MCAST_RX_400G_1(1U)
+
+#define S_HASHTABLE_ADDR_400G_1 0
+#define M_HASHTABLE_ADDR_400G_1 0x3fU
+#define V_HASHTABLE_ADDR_400G_1(x) ((x) << S_HASHTABLE_ADDR_400G_1)
+#define G_HASHTABLE_ADDR_400G_1(x) (((x) >> S_HASHTABLE_ADDR_400G_1) & M_HASHTABLE_ADDR_400G_1)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_STATUS 0x38340
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_IPG_LENGTH 0x38344
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL01_PAUSE_QUANTA 0x38354
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL23_PAUSE_QUANTA 0x38358
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL45_PAUSE_QUANTA 0x3835c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL67_PAUSE_QUANTA 0x38360
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL01_PAUSE_QUANTA_THRESH 0x38364
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL23_PAUSE_QUANTA_THRESH 0x38368
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL45_PAUSE_QUANTA_THRESH 0x3836c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL67_PAUSE_QUANTA_THRESH 0x38370
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_PAUSE_STATUS 0x38374
+#define A_MAC_MTIP_MAC400G_1_MTIP_TS_TIMESTAMP 0x3837c
+#define A_MAC_MTIP_MAC400G_1_MTIP_XIF_MODE 0x38380
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CONTROL_1 0x38400
+
+#define S_T7_SPEED_SELECTION 2
+#define V_T7_SPEED_SELECTION(x) ((x) << S_T7_SPEED_SELECTION)
+#define F_T7_SPEED_SELECTION V_T7_SPEED_SELECTION(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_1 0x38404
+
+#define S_400G_RX_LINK_STATUS 2
+#define V_400G_RX_LINK_STATUS(x) ((x) << S_400G_RX_LINK_STATUS)
+#define F_400G_RX_LINK_STATUS V_400G_RX_LINK_STATUS(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICE_ID0 0x38408
+
+#define S_400G_DEVICE_ID0_0 0
+#define M_400G_DEVICE_ID0_0 0xffffU
+#define V_400G_DEVICE_ID0_0(x) ((x) << S_400G_DEVICE_ID0_0)
+#define G_400G_DEVICE_ID0_0(x) (((x) >> S_400G_DEVICE_ID0_0) & M_400G_DEVICE_ID0_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICE_ID1 0x3840c
+
+#define S_400G_DEVICE_ID1_0 0
+#define M_400G_DEVICE_ID1_0 0xffffU
+#define V_400G_DEVICE_ID1_0(x) ((x) << S_400G_DEVICE_ID1_0)
+#define G_400G_DEVICE_ID1_0(x) (((x) >> S_400G_DEVICE_ID1_0) & M_400G_DEVICE_ID1_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_SPEED_ABILITY 0x38410
+
+#define S_400G_CAPABLE_0 9
+#define V_400G_CAPABLE_0(x) ((x) << S_400G_CAPABLE_0)
+#define F_400G_CAPABLE_0 V_400G_CAPABLE_0(1U)
+
+#define S_200G_CAPABLE_0 8
+#define V_200G_CAPABLE_0(x) ((x) << S_200G_CAPABLE_0)
+#define F_200G_CAPABLE_0 V_200G_CAPABLE_0(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICES_IN_PKG1 0x38414
+
+#define S_DEVICE_PACKAGE 3
+#define V_DEVICE_PACKAGE(x) ((x) << S_DEVICE_PACKAGE)
+#define F_DEVICE_PACKAGE V_DEVICE_PACKAGE(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICES_IN_PKG2 0x38418
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CONTROL_2 0x3841c
+
+#define S_400G_PCS_TYPE_SELECTION_0 0
+#define M_400G_PCS_TYPE_SELECTION_0 0xfU
+#define V_400G_PCS_TYPE_SELECTION_0(x) ((x) << S_400G_PCS_TYPE_SELECTION_0)
+#define G_400G_PCS_TYPE_SELECTION_0(x) (((x) >> S_400G_PCS_TYPE_SELECTION_0) & M_400G_PCS_TYPE_SELECTION_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_2 0x38420
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_3 0x38424
+
+#define S_T7_DEVICE_PRESENT 2
+#define M_T7_DEVICE_PRESENT 0x3fffU
+#define V_T7_DEVICE_PRESENT(x) ((x) << S_T7_DEVICE_PRESENT)
+#define G_T7_DEVICE_PRESENT(x) (((x) >> S_T7_DEVICE_PRESENT) & M_T7_DEVICE_PRESENT)
+
+#define S_400GBASE_R 1
+#define V_400GBASE_R(x) ((x) << S_400GBASE_R)
+#define F_400GBASE_R V_400GBASE_R(1U)
+
+#define S_200GBASE_R 0
+#define V_200GBASE_R(x) ((x) << S_200GBASE_R)
+#define F_200GBASE_R V_200GBASE_R(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_PKG_ID0 0x38438
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_PKG_ID1 0x3843c
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_STATUS_1 0x38480
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_STATUS_2 0x38484
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_TEST_CONTROL 0x384a8
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_TEST_ERR_CNT 0x384ac
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BER_HIGH_ORDER_CNT 0x384b0
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_ERR_BLK_HIGH_ORDER_CNT 0x384b4
+
+#define S_HIGH_ORDER 15
+#define V_HIGH_ORDER(x) ((x) << S_HIGH_ORDER)
+#define F_HIGH_ORDER V_HIGH_ORDER(1U)
+
+#define S_ERROR_BLOCK_COUNTER 0
+#define M_ERROR_BLOCK_COUNTER 0x3fffU
+#define V_ERROR_BLOCK_COUNTER(x) ((x) << S_ERROR_BLOCK_COUNTER)
+#define G_ERROR_BLOCK_COUNTER(x) (((x) >> S_ERROR_BLOCK_COUNTER) & M_ERROR_BLOCK_COUNTER)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_1 0x384c8
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_2 0x384cc
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_3 0x384d0
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_4 0x384d4
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_0_MAPPING 0x384d8
+
+#define S_T7_LANE_0_MAPPING 0
+#define M_T7_LANE_0_MAPPING 0xfU
+#define V_T7_LANE_0_MAPPING(x) ((x) << S_T7_LANE_0_MAPPING)
+#define G_T7_LANE_0_MAPPING(x) (((x) >> S_T7_LANE_0_MAPPING) & M_T7_LANE_0_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_1_MAPPING 0x384dc
+
+#define S_T7_LANE_1_MAPPING 0
+#define M_T7_LANE_1_MAPPING 0xfU
+#define V_T7_LANE_1_MAPPING(x) ((x) << S_T7_LANE_1_MAPPING)
+#define G_T7_LANE_1_MAPPING(x) (((x) >> S_T7_LANE_1_MAPPING) & M_T7_LANE_1_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_2_MAPPING 0x384e0
+
+#define S_T7_LANE_2_MAPPING 0
+#define M_T7_LANE_2_MAPPING 0xfU
+#define V_T7_LANE_2_MAPPING(x) ((x) << S_T7_LANE_2_MAPPING)
+#define G_T7_LANE_2_MAPPING(x) (((x) >> S_T7_LANE_2_MAPPING) & M_T7_LANE_2_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_3_MAPPING 0x384e4
+
+#define S_T7_LANE_3_MAPPING 0
+#define M_T7_LANE_3_MAPPING 0xfU
+#define V_T7_LANE_3_MAPPING(x) ((x) << S_T7_LANE_3_MAPPING)
+#define G_T7_LANE_3_MAPPING(x) (((x) >> S_T7_LANE_3_MAPPING) & M_T7_LANE_3_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_4_MAPPING 0x384e8
+
+#define S_T7_LANE_4_MAPPING 0
+#define M_T7_LANE_4_MAPPING 0xfU
+#define V_T7_LANE_4_MAPPING(x) ((x) << S_T7_LANE_4_MAPPING)
+#define G_T7_LANE_4_MAPPING(x) (((x) >> S_T7_LANE_4_MAPPING) & M_T7_LANE_4_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_5_MAPPING 0x384ec
+
+#define S_T7_LANE_5_MAPPING 0
+#define M_T7_LANE_5_MAPPING 0xfU
+#define V_T7_LANE_5_MAPPING(x) ((x) << S_T7_LANE_5_MAPPING)
+#define G_T7_LANE_5_MAPPING(x) (((x) >> S_T7_LANE_5_MAPPING) & M_T7_LANE_5_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_6_MAPPING 0x384f0
+
+#define S_T7_LANE_6_MAPPING 0
+#define M_T7_LANE_6_MAPPING 0xfU
+#define V_T7_LANE_6_MAPPING(x) ((x) << S_T7_LANE_6_MAPPING)
+#define G_T7_LANE_6_MAPPING(x) (((x) >> S_T7_LANE_6_MAPPING) & M_T7_LANE_6_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_7_MAPPING 0x384f4
+
+#define S_T7_LANE_7_MAPPING 0
+#define M_T7_LANE_7_MAPPING 0xfU
+#define V_T7_LANE_7_MAPPING(x) ((x) << S_T7_LANE_7_MAPPING)
+#define G_T7_LANE_7_MAPPING(x) (((x) >> S_T7_LANE_7_MAPPING) & M_T7_LANE_7_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_8_MAPPING 0x384f8
+
+#define S_T7_LANE_8_MAPPING 0
+#define M_T7_LANE_8_MAPPING 0xfU
+#define V_T7_LANE_8_MAPPING(x) ((x) << S_T7_LANE_8_MAPPING)
+#define G_T7_LANE_8_MAPPING(x) (((x) >> S_T7_LANE_8_MAPPING) & M_T7_LANE_8_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_9_MAPPING 0x384fc
+
+#define S_T7_LANE_9_MAPPING 0
+#define M_T7_LANE_9_MAPPING 0xfU
+#define V_T7_LANE_9_MAPPING(x) ((x) << S_T7_LANE_9_MAPPING)
+#define G_T7_LANE_9_MAPPING(x) (((x) >> S_T7_LANE_9_MAPPING) & M_T7_LANE_9_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_10_MAPPING 0x38500
+
+#define S_T7_LANE_10_MAPPING 0
+#define M_T7_LANE_10_MAPPING 0xfU
+#define V_T7_LANE_10_MAPPING(x) ((x) << S_T7_LANE_10_MAPPING)
+#define G_T7_LANE_10_MAPPING(x) (((x) >> S_T7_LANE_10_MAPPING) & M_T7_LANE_10_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_11_MAPPING 0x38504
+
+#define S_T7_LANE_11_MAPPING 0
+#define M_T7_LANE_11_MAPPING 0xfU
+#define V_T7_LANE_11_MAPPING(x) ((x) << S_T7_LANE_11_MAPPING)
+#define G_T7_LANE_11_MAPPING(x) (((x) >> S_T7_LANE_11_MAPPING) & M_T7_LANE_11_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_12_MAPPING 0x38508
+
+#define S_T7_LANE_12_MAPPING 0
+#define M_T7_LANE_12_MAPPING 0xfU
+#define V_T7_LANE_12_MAPPING(x) ((x) << S_T7_LANE_12_MAPPING)
+#define G_T7_LANE_12_MAPPING(x) (((x) >> S_T7_LANE_12_MAPPING) & M_T7_LANE_12_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_13_MAPPING 0x3850c
+
+#define S_T7_LANE_13_MAPPING 0
+#define M_T7_LANE_13_MAPPING 0xfU
+#define V_T7_LANE_13_MAPPING(x) ((x) << S_T7_LANE_13_MAPPING)
+#define G_T7_LANE_13_MAPPING(x) (((x) >> S_T7_LANE_13_MAPPING) & M_T7_LANE_13_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_14_MAPPING 0x38510
+
+#define S_T7_LANE_14_MAPPING 0
+#define M_T7_LANE_14_MAPPING 0xfU
+#define V_T7_LANE_14_MAPPING(x) ((x) << S_T7_LANE_14_MAPPING)
+#define G_T7_LANE_14_MAPPING(x) (((x) >> S_T7_LANE_14_MAPPING) & M_T7_LANE_14_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_15_MAPPING 0x38514
+
+#define S_T7_LANE_15_MAPPING 0
+#define M_T7_LANE_15_MAPPING 0xfU
+#define V_T7_LANE_15_MAPPING(x) ((x) << S_T7_LANE_15_MAPPING)
+#define G_T7_LANE_15_MAPPING(x) (((x) >> S_T7_LANE_15_MAPPING) & M_T7_LANE_15_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_SCRATCH 0x38600
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CORE_REVISION 0x38604
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CL_INTVL 0x38608
+
+#define S_T7_VL_INTVL 0
+#define M_T7_VL_INTVL 0xffffU
+#define V_T7_VL_INTVL(x) ((x) << S_T7_VL_INTVL)
+#define G_T7_VL_INTVL(x) (((x) >> S_T7_VL_INTVL) & M_T7_VL_INTVL)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_TX_LANE_THRESH 0x3860c
+
+#define S_TX_LANE_THRESH 0
+#define M_TX_LANE_THRESH 0xfU
+#define V_TX_LANE_THRESH(x) ((x) << S_TX_LANE_THRESH)
+#define G_TX_LANE_THRESH(x) (((x) >> S_TX_LANE_THRESH) & M_TX_LANE_THRESH)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_TX_CDMII_PACE 0x3861c
+
+#define S_TX_CDMII_PACE 0
+#define M_TX_CDMII_PACE 0xfU
+#define V_TX_CDMII_PACE(x) ((x) << S_TX_CDMII_PACE)
+#define G_TX_CDMII_PACE(x) (((x) >> S_TX_CDMII_PACE) & M_TX_CDMII_PACE)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_AM_0 0x38620
+
+#define S_AM_0 0
+#define M_AM_0 0xffffU
+#define V_AM_0(x) ((x) << S_AM_0)
+#define G_AM_0(x) (((x) >> S_AM_0) & M_AM_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_AM_1 0x38624
+
+#define S_AM_1 0
+#define M_AM_1 0xffffU
+#define V_AM_1(x) ((x) << S_AM_1)
+#define G_AM_1(x) (((x) >> S_AM_1) & M_AM_1)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO0 0x38800
+
+#define S_DBGINFO0 0
+#define M_DBGINFO0 0xffffU
+#define V_DBGINFO0(x) ((x) << S_DBGINFO0)
+#define G_DBGINFO0(x) (((x) >> S_DBGINFO0) & M_DBGINFO0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO1 0x38804
+
+#define S_DBGINFO1 0
+#define M_DBGINFO1 0xffffU
+#define V_DBGINFO1(x) ((x) << S_DBGINFO1)
+#define G_DBGINFO1(x) (((x) >> S_DBGINFO1) & M_DBGINFO1)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO2 0x38808
+
+#define S_DBGINFO2 0
+#define M_DBGINFO2 0xffffU
+#define V_DBGINFO2(x) ((x) << S_DBGINFO2)
+#define G_DBGINFO2(x) (((x) >> S_DBGINFO2) & M_DBGINFO2)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO3 0x3880c
+
+#define S_DBGINFO3 0
+#define M_DBGINFO3 0xffffU
+#define V_DBGINFO3(x) ((x) << S_DBGINFO3)
+#define G_DBGINFO3(x) (((x) >> S_DBGINFO3) & M_DBGINFO3)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CONTROL_1 0x38900
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_1 0x38904
+
+#define S_400G_RX_LINK_STATUS_1 2
+#define V_400G_RX_LINK_STATUS_1(x) ((x) << S_400G_RX_LINK_STATUS_1)
+#define F_400G_RX_LINK_STATUS_1 V_400G_RX_LINK_STATUS_1(1U)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICE_ID0 0x38908
+
+#define S_400G_DEVICE_ID0_1 0
+#define M_400G_DEVICE_ID0_1 0xffffU
+#define V_400G_DEVICE_ID0_1(x) ((x) << S_400G_DEVICE_ID0_1)
+#define G_400G_DEVICE_ID0_1(x) (((x) >> S_400G_DEVICE_ID0_1) & M_400G_DEVICE_ID0_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICE_ID1 0x3890c
+
+#define S_400G_DEVICE_ID1_1 0
+#define M_400G_DEVICE_ID1_1 0xffffU
+#define V_400G_DEVICE_ID1_1(x) ((x) << S_400G_DEVICE_ID1_1)
+#define G_400G_DEVICE_ID1_1(x) (((x) >> S_400G_DEVICE_ID1_1) & M_400G_DEVICE_ID1_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_SPEED_ABILITY 0x38910
+
+#define S_400G_CAPABLE_1 9
+#define V_400G_CAPABLE_1(x) ((x) << S_400G_CAPABLE_1)
+#define F_400G_CAPABLE_1 V_400G_CAPABLE_1(1U)
+
+#define S_200G_CAPABLE_1 8
+#define V_200G_CAPABLE_1(x) ((x) << S_200G_CAPABLE_1)
+#define F_200G_CAPABLE_1 V_200G_CAPABLE_1(1U)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICES_IN_PKG1 0x38914
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICES_IN_PKG2 0x38918
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CONTROL_2 0x3891c
+
+#define S_400G_PCS_TYPE_SELECTION_1 0
+#define M_400G_PCS_TYPE_SELECTION_1 0xfU
+#define V_400G_PCS_TYPE_SELECTION_1(x) ((x) << S_400G_PCS_TYPE_SELECTION_1)
+#define G_400G_PCS_TYPE_SELECTION_1(x) (((x) >> S_400G_PCS_TYPE_SELECTION_1) & M_400G_PCS_TYPE_SELECTION_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_2 0x38920
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_3 0x38924
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_PKG_ID0 0x38938
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_PKG_ID1 0x3893c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_STATUS_1 0x38980
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_STATUS_2 0x38984
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_TEST_CONTROL 0x389a8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_TEST_ERR_CNT 0x389ac
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BER_HIGH_ORDER_CNT 0x389b0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_ERR_BLK_HIGH_ORDER_CNT 0x389b4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_1 0x389c8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_2 0x389cc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_3 0x389d0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_4 0x389d4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_0_MAPPING 0x389d8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_1_MAPPING 0x389dc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_2_MAPPING 0x389e0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_3_MAPPING 0x389e4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_4_MAPPING 0x389e8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_5_MAPPING 0x389ec
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_6_MAPPING 0x389f0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_7_MAPPING 0x389f4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_8_MAPPING 0x389f8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_9_MAPPING 0x389fc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_10_MAPPING 0x38a00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_11_MAPPING 0x38a04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_12_MAPPING 0x38a08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_13_MAPPING 0x38a0c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_14_MAPPING 0x38a10
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_15_MAPPING 0x38a14
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_SCRATCH 0x38b00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CORE_REVISION 0x38b04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CL_INTVL 0x38b08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_TX_LANE_THRESH 0x38b0c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_TX_CDMII_PACE 0x38b1c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_AM_0 0x38b20
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_AM_1 0x38b24
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO0 0x38d00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO1 0x38d04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO2 0x38d08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO3 0x38d0c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_0 0x38e00
+
+#define S_TC_PAD_ALTER 10
+#define V_TC_PAD_ALTER(x) ((x) << S_TC_PAD_ALTER)
+#define F_TC_PAD_ALTER V_TC_PAD_ALTER(1U)
+
+#define S_TC_PAD_VALUE 9
+#define V_TC_PAD_VALUE(x) ((x) << S_TC_PAD_VALUE)
+#define F_TC_PAD_VALUE V_TC_PAD_VALUE(1U)
+
+#define S_KP_ENABLE 8
+#define V_KP_ENABLE(x) ((x) << S_KP_ENABLE)
+#define F_KP_ENABLE V_KP_ENABLE(1U)
+
+#define S_AM16_COPY_DIS 3
+#define V_AM16_COPY_DIS(x) ((x) << S_AM16_COPY_DIS)
+#define F_AM16_COPY_DIS V_AM16_COPY_DIS(1U)
+
+#define S_RS_FEC_DEGRADE_OPTION_ENA 2
+#define V_RS_FEC_DEGRADE_OPTION_ENA(x) ((x) << S_RS_FEC_DEGRADE_OPTION_ENA)
+#define F_RS_FEC_DEGRADE_OPTION_ENA V_RS_FEC_DEGRADE_OPTION_ENA(1U)
+
+#define A_MAC_MTIP_RS_FEC_STATUS_0_0 0x38e04
+
+#define S_FEC_STATUS_0_14 14
+#define V_FEC_STATUS_0_14(x) ((x) << S_FEC_STATUS_0_14)
+#define F_FEC_STATUS_0_14 V_FEC_STATUS_0_14(1U)
+
+#define S_FEC_STATUS_0_11 8
+#define M_FEC_STATUS_0_11 0xfU
+#define V_FEC_STATUS_0_11(x) ((x) << S_FEC_STATUS_0_11)
+#define G_FEC_STATUS_0_11(x) (((x) >> S_FEC_STATUS_0_11) & M_FEC_STATUS_0_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_0 V_RS_FEC_DEGRADE_SER_RECEIVED0_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_1 V_RS_FEC_DEGRADE_SER_RECEIVED0_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_2 V_RS_FEC_DEGRADE_SER_RECEIVED0_2(1U)
+
+#define S_FEC_STATUS_0_4 4
+#define V_FEC_STATUS_0_4(x) ((x) << S_FEC_STATUS_0_4)
+#define F_FEC_STATUS_0_4 V_FEC_STATUS_0_4(1U)
+
+#define S_FEC_STATUS_0_3 3
+#define V_FEC_STATUS_0_3(x) ((x) << S_FEC_STATUS_0_3)
+#define F_FEC_STATUS_0_3 V_FEC_STATUS_0_3(1U)
+
+#define S_FEC_STATUS_0_2 2
+#define V_FEC_STATUS_0_2(x) ((x) << S_FEC_STATUS_0_2)
+#define F_FEC_STATUS_0_2 V_FEC_STATUS_0_2(1U)
+
+#define S_FEC_STATUS_0_1 1
+#define V_FEC_STATUS_0_1(x) ((x) << S_FEC_STATUS_0_1)
+#define F_FEC_STATUS_0_1 V_FEC_STATUS_0_1(1U)
+
+#define S_FEC_STATUS_0_0 0
+#define V_FEC_STATUS_0_0(x) ((x) << S_FEC_STATUS_0_0)
+#define F_FEC_STATUS_0_0 V_FEC_STATUS_0_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_0 0x38e08
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_0 0x38e0c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_0 0x38e10
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_0 0x38e14
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_0 0x38e18
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_0 0x38e1c
+
+#define S_DEC_TRESH 0
+#define M_DEC_TRESH 0x3fU
+#define V_DEC_TRESH(x) ((x) << S_DEC_TRESH)
+#define G_DEC_TRESH(x) (((x) >> S_DEC_TRESH) & M_DEC_TRESH)
+
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_1 0x38e20
+#define A_MAC_MTIP_RS_FEC_STATUS_0_1 0x38e24
+
+#define S_FEC_STATUS_1_14 14
+#define V_FEC_STATUS_1_14(x) ((x) << S_FEC_STATUS_1_14)
+#define F_FEC_STATUS_1_14 V_FEC_STATUS_1_14(1U)
+
+#define S_FEC_STATUS_1_11 8
+#define M_FEC_STATUS_1_11 0xfU
+#define V_FEC_STATUS_1_11(x) ((x) << S_FEC_STATUS_1_11)
+#define G_FEC_STATUS_1_11(x) (((x) >> S_FEC_STATUS_1_11) & M_FEC_STATUS_1_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_0 V_RS_FEC_DEGRADE_SER_RECEIVED1_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_1 V_RS_FEC_DEGRADE_SER_RECEIVED1_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_2 V_RS_FEC_DEGRADE_SER_RECEIVED1_2(1U)
+
+#define S_FEC_STATUS_1_4 4
+#define V_FEC_STATUS_1_4(x) ((x) << S_FEC_STATUS_1_4)
+#define F_FEC_STATUS_1_4 V_FEC_STATUS_1_4(1U)
+
+#define S_FEC_STATUS_1_3 3
+#define V_FEC_STATUS_1_3(x) ((x) << S_FEC_STATUS_1_3)
+#define F_FEC_STATUS_1_3 V_FEC_STATUS_1_3(1U)
+
+#define S_FEC_STATUS_1_2 2
+#define V_FEC_STATUS_1_2(x) ((x) << S_FEC_STATUS_1_2)
+#define F_FEC_STATUS_1_2 V_FEC_STATUS_1_2(1U)
+
+#define S_FEC_STATUS_1_1 1
+#define V_FEC_STATUS_1_1(x) ((x) << S_FEC_STATUS_1_1)
+#define F_FEC_STATUS_1_1 V_FEC_STATUS_1_1(1U)
+
+#define S_FEC_STATUS_1_0 0
+#define V_FEC_STATUS_1_0(x) ((x) << S_FEC_STATUS_1_0)
+#define F_FEC_STATUS_1_0 V_FEC_STATUS_1_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_1 0x38e28
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_1 0x38e2c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_1 0x38e30
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_1 0x38e34
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_1 0x38e38
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_1 0x38e3c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_2 0x38e40
+#define A_MAC_MTIP_RS_FEC_STATUS_0_2 0x38e44
+
+#define S_FEC_STATUS_2_14 14
+#define V_FEC_STATUS_2_14(x) ((x) << S_FEC_STATUS_2_14)
+#define F_FEC_STATUS_2_14 V_FEC_STATUS_2_14(1U)
+
+#define S_FEC_STATUS_2_11 8
+#define M_FEC_STATUS_2_11 0xfU
+#define V_FEC_STATUS_2_11(x) ((x) << S_FEC_STATUS_2_11)
+#define G_FEC_STATUS_2_11(x) (((x) >> S_FEC_STATUS_2_11) & M_FEC_STATUS_2_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_0 V_RS_FEC_DEGRADE_SER_RECEIVED2_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_1 V_RS_FEC_DEGRADE_SER_RECEIVED2_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_2 V_RS_FEC_DEGRADE_SER_RECEIVED2_2(1U)
+
+#define S_FEC_STATUS_2_4 4
+#define V_FEC_STATUS_2_4(x) ((x) << S_FEC_STATUS_2_4)
+#define F_FEC_STATUS_2_4 V_FEC_STATUS_2_4(1U)
+
+#define S_FEC_STATUS_2_3 3
+#define V_FEC_STATUS_2_3(x) ((x) << S_FEC_STATUS_2_3)
+#define F_FEC_STATUS_2_3 V_FEC_STATUS_2_3(1U)
+
+#define S_FEC_STATUS_2_2 2
+#define V_FEC_STATUS_2_2(x) ((x) << S_FEC_STATUS_2_2)
+#define F_FEC_STATUS_2_2 V_FEC_STATUS_2_2(1U)
+
+#define S_FEC_STATUS_2_1 1
+#define V_FEC_STATUS_2_1(x) ((x) << S_FEC_STATUS_2_1)
+#define F_FEC_STATUS_2_1 V_FEC_STATUS_2_1(1U)
+
+#define S_FEC_STATUS_2_0 0
+#define V_FEC_STATUS_2_0(x) ((x) << S_FEC_STATUS_2_0)
+#define F_FEC_STATUS_2_0 V_FEC_STATUS_2_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_2 0x38e48
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_2 0x38e4c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_2 0x38e50
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_2 0x38e54
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_2 0x38e58
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_2 0x38e5c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_3 0x38e60
+#define A_MAC_MTIP_RS_FEC_STATUS_0_3 0x38e64
+
+#define S_FEC_STATUS_3_14 14
+#define V_FEC_STATUS_3_14(x) ((x) << S_FEC_STATUS_3_14)
+#define F_FEC_STATUS_3_14 V_FEC_STATUS_3_14(1U)
+
+#define S_FEC_STATUS_3_11 8
+#define M_FEC_STATUS_3_11 0xfU
+#define V_FEC_STATUS_3_11(x) ((x) << S_FEC_STATUS_3_11)
+#define G_FEC_STATUS_3_11(x) (((x) >> S_FEC_STATUS_3_11) & M_FEC_STATUS_3_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_0 V_RS_FEC_DEGRADE_SER_RECEIVED3_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_1 V_RS_FEC_DEGRADE_SER_RECEIVED3_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_2 V_RS_FEC_DEGRADE_SER_RECEIVED3_2(1U)
+
+#define S_FEC_STATUS_3_4 4
+#define V_FEC_STATUS_3_4(x) ((x) << S_FEC_STATUS_3_4)
+#define F_FEC_STATUS_3_4 V_FEC_STATUS_3_4(1U)
+
+#define S_FEC_STATUS_3_3 3
+#define V_FEC_STATUS_3_3(x) ((x) << S_FEC_STATUS_3_3)
+#define F_FEC_STATUS_3_3 V_FEC_STATUS_3_3(1U)
+
+#define S_FEC_STATUS_3_2 2
+#define V_FEC_STATUS_3_2(x) ((x) << S_FEC_STATUS_3_2)
+#define F_FEC_STATUS_3_2 V_FEC_STATUS_3_2(1U)
+
+#define S_FEC_STATUS_3_1 1
+#define V_FEC_STATUS_3_1(x) ((x) << S_FEC_STATUS_3_1)
+#define F_FEC_STATUS_3_1 V_FEC_STATUS_3_1(1U)
+
+#define S_FEC_STATUS_3_0 0
+#define V_FEC_STATUS_3_0(x) ((x) << S_FEC_STATUS_3_0)
+#define F_FEC_STATUS_3_0 V_FEC_STATUS_3_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_3 0x38e68
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_3 0x38e6c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_3 0x38e70
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_3 0x38e74
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_3 0x38e78
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_3 0x38e7c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_4 0x38e80
+#define A_MAC_MTIP_RS_FEC_STATUS_0_4 0x38e84
+
+#define S_FEC_STATUS_4_14 14
+#define V_FEC_STATUS_4_14(x) ((x) << S_FEC_STATUS_4_14)
+#define F_FEC_STATUS_4_14 V_FEC_STATUS_4_14(1U)
+
+#define S_FEC_STATUS_4_11 8
+#define M_FEC_STATUS_4_11 0xfU
+#define V_FEC_STATUS_4_11(x) ((x) << S_FEC_STATUS_4_11)
+#define G_FEC_STATUS_4_11(x) (((x) >> S_FEC_STATUS_4_11) & M_FEC_STATUS_4_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_0 V_RS_FEC_DEGRADE_SER_RECEIVED4_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_1 V_RS_FEC_DEGRADE_SER_RECEIVED4_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_2 V_RS_FEC_DEGRADE_SER_RECEIVED4_2(1U)
+
+#define S_FEC_STATUS_4_4 4
+#define V_FEC_STATUS_4_4(x) ((x) << S_FEC_STATUS_4_4)
+#define F_FEC_STATUS_4_4 V_FEC_STATUS_4_4(1U)
+
+#define S_FEC_STATUS_4_3 3
+#define V_FEC_STATUS_4_3(x) ((x) << S_FEC_STATUS_4_3)
+#define F_FEC_STATUS_4_3 V_FEC_STATUS_4_3(1U)
+
+#define S_FEC_STATUS_4_2 2
+#define V_FEC_STATUS_4_2(x) ((x) << S_FEC_STATUS_4_2)
+#define F_FEC_STATUS_4_2 V_FEC_STATUS_4_2(1U)
+
+#define S_FEC_STATUS_4_1 1
+#define V_FEC_STATUS_4_1(x) ((x) << S_FEC_STATUS_4_1)
+#define F_FEC_STATUS_4_1 V_FEC_STATUS_4_1(1U)
+
+#define S_FEC_STATUS_4_0 0
+#define V_FEC_STATUS_4_0(x) ((x) << S_FEC_STATUS_4_0)
+#define F_FEC_STATUS_4_0 V_FEC_STATUS_4_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_4 0x38e88
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_4 0x38e8c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_4 0x38e90
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_4 0x38e94
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_4 0x38e98
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_4 0x38e9c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_5 0x38ea0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_5 0x38ea4
+
+#define S_FEC_STATUS_5_14 14
+#define V_FEC_STATUS_5_14(x) ((x) << S_FEC_STATUS_5_14)
+#define F_FEC_STATUS_5_14 V_FEC_STATUS_5_14(1U)
+
+#define S_FEC_STATUS_5_11 8
+#define M_FEC_STATUS_5_11 0xfU
+#define V_FEC_STATUS_5_11(x) ((x) << S_FEC_STATUS_5_11)
+#define G_FEC_STATUS_5_11(x) (((x) >> S_FEC_STATUS_5_11) & M_FEC_STATUS_5_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_0 V_RS_FEC_DEGRADE_SER_RECEIVED5_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_1 V_RS_FEC_DEGRADE_SER_RECEIVED5_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_2 V_RS_FEC_DEGRADE_SER_RECEIVED5_2(1U)
+
+#define S_FEC_STATUS_5_4 4
+#define V_FEC_STATUS_5_4(x) ((x) << S_FEC_STATUS_5_4)
+#define F_FEC_STATUS_5_4 V_FEC_STATUS_5_4(1U)
+
+#define S_FEC_STATUS_5_3 3
+#define V_FEC_STATUS_5_3(x) ((x) << S_FEC_STATUS_5_3)
+#define F_FEC_STATUS_5_3 V_FEC_STATUS_5_3(1U)
+
+#define S_FEC_STATUS_5_2 2
+#define V_FEC_STATUS_5_2(x) ((x) << S_FEC_STATUS_5_2)
+#define F_FEC_STATUS_5_2 V_FEC_STATUS_5_2(1U)
+
+#define S_FEC_STATUS_5_1 1
+#define V_FEC_STATUS_5_1(x) ((x) << S_FEC_STATUS_5_1)
+#define F_FEC_STATUS_5_1 V_FEC_STATUS_5_1(1U)
+
+#define S_FEC_STATUS_5_0 0
+#define V_FEC_STATUS_5_0(x) ((x) << S_FEC_STATUS_5_0)
+#define F_FEC_STATUS_5_0 V_FEC_STATUS_5_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_5 0x38ea8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_5 0x38eac
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_5 0x38eb0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_5 0x38eb4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_5 0x38eb8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_5 0x38ebc
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_6 0x38ec0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_6 0x38ec4
+
+#define S_FEC_STATUS_6_14 14
+#define V_FEC_STATUS_6_14(x) ((x) << S_FEC_STATUS_6_14)
+#define F_FEC_STATUS_6_14 V_FEC_STATUS_6_14(1U)
+
+#define S_FEC_STATUS_6_11 8
+#define M_FEC_STATUS_6_11 0xfU
+#define V_FEC_STATUS_6_11(x) ((x) << S_FEC_STATUS_6_11)
+#define G_FEC_STATUS_6_11(x) (((x) >> S_FEC_STATUS_6_11) & M_FEC_STATUS_6_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_0 V_RS_FEC_DEGRADE_SER_RECEIVED6_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_1 V_RS_FEC_DEGRADE_SER_RECEIVED6_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_2 V_RS_FEC_DEGRADE_SER_RECEIVED6_2(1U)
+
+#define S_FEC_STATUS_6_4 4
+#define V_FEC_STATUS_6_4(x) ((x) << S_FEC_STATUS_6_4)
+#define F_FEC_STATUS_6_4 V_FEC_STATUS_6_4(1U)
+
+#define S_FEC_STATUS_6_3 3
+#define V_FEC_STATUS_6_3(x) ((x) << S_FEC_STATUS_6_3)
+#define F_FEC_STATUS_6_3 V_FEC_STATUS_6_3(1U)
+
+#define S_FEC_STATUS_6_2 2
+#define V_FEC_STATUS_6_2(x) ((x) << S_FEC_STATUS_6_2)
+#define F_FEC_STATUS_6_2 V_FEC_STATUS_6_2(1U)
+
+#define S_FEC_STATUS_6_1 1
+#define V_FEC_STATUS_6_1(x) ((x) << S_FEC_STATUS_6_1)
+#define F_FEC_STATUS_6_1 V_FEC_STATUS_6_1(1U)
+
+#define S_FEC_STATUS_6_0 0
+#define V_FEC_STATUS_6_0(x) ((x) << S_FEC_STATUS_6_0)
+#define F_FEC_STATUS_6_0 V_FEC_STATUS_6_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_6 0x38ec8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_6 0x38ecc
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_6 0x38ed0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_6 0x38ed4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_6 0x38ed8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_6 0x38edc
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_7 0x38ee0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_7 0x38ee4
+
+#define S_FEC_STATUS_7_14 14
+#define V_FEC_STATUS_7_14(x) ((x) << S_FEC_STATUS_7_14)
+#define F_FEC_STATUS_7_14 V_FEC_STATUS_7_14(1U)
+
+#define S_FEC_STATUS_7_11 8
+#define M_FEC_STATUS_7_11 0xfU
+#define V_FEC_STATUS_7_11(x) ((x) << S_FEC_STATUS_7_11)
+#define G_FEC_STATUS_7_11(x) (((x) >> S_FEC_STATUS_7_11) & M_FEC_STATUS_7_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_0 V_RS_FEC_DEGRADE_SER_RECEIVED7_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_1 V_RS_FEC_DEGRADE_SER_RECEIVED7_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_2 V_RS_FEC_DEGRADE_SER_RECEIVED7_2(1U)
+
+#define S_FEC_STATUS_7_4 4
+#define V_FEC_STATUS_7_4(x) ((x) << S_FEC_STATUS_7_4)
+#define F_FEC_STATUS_7_4 V_FEC_STATUS_7_4(1U)
+
+#define S_FEC_STATUS_7_3 3
+#define V_FEC_STATUS_7_3(x) ((x) << S_FEC_STATUS_7_3)
+#define F_FEC_STATUS_7_3 V_FEC_STATUS_7_3(1U)
+
+#define S_FEC_STATUS_7_2 2
+#define V_FEC_STATUS_7_2(x) ((x) << S_FEC_STATUS_7_2)
+#define F_FEC_STATUS_7_2 V_FEC_STATUS_7_2(1U)
+
+#define S_FEC_STATUS_7_1 1
+#define V_FEC_STATUS_7_1(x) ((x) << S_FEC_STATUS_7_1)
+#define F_FEC_STATUS_7_1 V_FEC_STATUS_7_1(1U)
+
+#define S_FEC_STATUS_7_0 0
+#define V_FEC_STATUS_7_0(x) ((x) << S_FEC_STATUS_7_0)
+#define F_FEC_STATUS_7_0 V_FEC_STATUS_7_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_7 0x38ee8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_7 0x38eec
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_7 0x38ef0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_7 0x38ef4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_7 0x38ef8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_7 0x38efc
+#define A_MAC_MTIP_RS_FEC_HISER_CW 0x38f00
+
+#define S_HISER_CW 0
+#define M_HISER_CW 0xffffU
+#define V_HISER_CW(x) ((x) << S_HISER_CW)
+#define G_HISER_CW(x) (((x) >> S_HISER_CW) & M_HISER_CW)
+
+#define A_MAC_MTIP_RS_FEC_HISER_THRESH 0x38f04
+
+#define S_HISER_THRESH 0
+#define M_HISER_THRESH 0xffffU
+#define V_HISER_THRESH(x) ((x) << S_HISER_THRESH)
+#define G_HISER_THRESH(x) (((x) >> S_HISER_THRESH) & M_HISER_THRESH)
+
+#define A_MAC_MTIP_RS_FEC_HISER_TIME 0x38f08
+
+#define S_HISER_TIME 0
+#define M_HISER_TIME 0xffffU
+#define V_HISER_TIME(x) ((x) << S_HISER_TIME)
+#define G_HISER_TIME(x) (((x) >> S_HISER_TIME) & M_HISER_TIME)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CW 0x38f10
+
+#define S_DEGRADE_SET_CW 0
+#define M_DEGRADE_SET_CW 0xffffU
+#define V_DEGRADE_SET_CW(x) ((x) << S_DEGRADE_SET_CW)
+#define G_DEGRADE_SET_CW(x) (((x) >> S_DEGRADE_SET_CW) & M_DEGRADE_SET_CW)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CW_HI 0x38f14
+
+#define S_DEGRADE_SET_CW_HI 0
+#define M_DEGRADE_SET_CW_HI 0xffffU
+#define V_DEGRADE_SET_CW_HI(x) ((x) << S_DEGRADE_SET_CW_HI)
+#define G_DEGRADE_SET_CW_HI(x) (((x) >> S_DEGRADE_SET_CW_HI) & M_DEGRADE_SET_CW_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_THRESH 0x38f18
+
+#define S_DEGRADE_SET_THRESH 0
+#define M_DEGRADE_SET_THRESH 0xffffU
+#define V_DEGRADE_SET_THRESH(x) ((x) << S_DEGRADE_SET_THRESH)
+#define G_DEGRADE_SET_THRESH(x) (((x) >> S_DEGRADE_SET_THRESH) & M_DEGRADE_SET_THRESH)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_THRESH_HI 0x38f1c
+
+#define S_DEGRADE_SET_THRESH_HI 0
+#define M_DEGRADE_SET_THRESH_HI 0xffffU
+#define V_DEGRADE_SET_THRESH_HI(x) ((x) << S_DEGRADE_SET_THRESH_HI)
+#define G_DEGRADE_SET_THRESH_HI(x) (((x) >> S_DEGRADE_SET_THRESH_HI) & M_DEGRADE_SET_THRESH_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_CLEAR 0x38f20
+
+#define S_DEGRADE_SET_CLEAR 0
+#define M_DEGRADE_SET_CLEAR 0xffffU
+#define V_DEGRADE_SET_CLEAR(x) ((x) << S_DEGRADE_SET_CLEAR)
+#define G_DEGRADE_SET_CLEAR(x) (((x) >> S_DEGRADE_SET_CLEAR) & M_DEGRADE_SET_CLEAR)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CLEAR_HI 0x38f24
+
+#define S_DEGRADE_SET_CLEAR_HI 0
+#define M_DEGRADE_SET_CLEAR_HI 0xffffU
+#define V_DEGRADE_SET_CLEAR_HI(x) ((x) << S_DEGRADE_SET_CLEAR_HI)
+#define G_DEGRADE_SET_CLEAR_HI(x) (((x) >> S_DEGRADE_SET_CLEAR_HI) & M_DEGRADE_SET_CLEAR_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_CLEAR_THRESH 0x38f28
+
+#define S_DEGRADE_SET_CLEAR_THRESH 0
+#define M_DEGRADE_SET_CLEAR_THRESH 0xffffU
+#define V_DEGRADE_SET_CLEAR_THRESH(x) ((x) << S_DEGRADE_SET_CLEAR_THRESH)
+#define G_DEGRADE_SET_CLEAR_THRESH(x) (((x) >> S_DEGRADE_SET_CLEAR_THRESH) & M_DEGRADE_SET_CLEAR_THRESH)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CLEAR_THRESH_HI 0x38f2c
+
+#define S_DEGRADE_SET_CLEAR_THRESH_HI 0
+#define M_DEGRADE_SET_CLEAR_THRESH_HI 0xffffU
+#define V_DEGRADE_SET_CLEAR_THRESH_HI(x) ((x) << S_DEGRADE_SET_CLEAR_THRESH_HI)
+#define G_DEGRADE_SET_CLEAR_THRESH_HI(x) (((x) >> S_DEGRADE_SET_CLEAR_THRESH_HI) & M_DEGRADE_SET_CLEAR_THRESH_HI)
+
+#define A_MAC_MTIP_RS_VL0_0 0x38f80
+#define A_MAC_MTIP_RS_VL0_1 0x38f84
+#define A_MAC_MTIP_RS_VL1_0 0x38f88
+#define A_MAC_MTIP_RS_VL1_1 0x38f8c
+#define A_MAC_MTIP_RS_VL2_0 0x38f90
+#define A_MAC_MTIP_RS_VL2_1 0x38f94
+#define A_MAC_MTIP_RS_VL3_0 0x38f98
+#define A_MAC_MTIP_RS_VL3_1 0x38f9c
+#define A_MAC_MTIP_RS_VL4_0 0x38fa0
+#define A_MAC_MTIP_RS_VL4_1 0x38fa4
+#define A_MAC_MTIP_RS_VL5_0 0x38fa8
+#define A_MAC_MTIP_RS_VL5_1 0x38fac
+#define A_MAC_MTIP_RS_VL6_0 0x38fb0
+#define A_MAC_MTIP_RS_VL6_1 0x38fb4
+#define A_MAC_MTIP_RS_VL7_0 0x38fb8
+#define A_MAC_MTIP_RS_VL7_1 0x38fbc
+#define A_MAC_MTIP_RS_VL8_0 0x38fc0
+#define A_MAC_MTIP_RS_VL8_1 0x38fc4
+#define A_MAC_MTIP_RS_VL9_0 0x38fc8
+#define A_MAC_MTIP_RS_VL9_1 0x38fcc
+#define A_MAC_MTIP_RS_VL10_0 0x38fd0
+#define A_MAC_MTIP_RS_VL10_1 0x38fd4
+#define A_MAC_MTIP_RS_VL11_0 0x38fd8
+#define A_MAC_MTIP_RS_VL11_1 0x38fdc
+#define A_MAC_MTIP_RS_VL12_0 0x38fe0
+#define A_MAC_MTIP_RS_VL12_1 0x38fe4
+#define A_MAC_MTIP_RS_VL13_0 0x38fe8
+#define A_MAC_MTIP_RS_VL13_1 0x38fec
+#define A_MAC_MTIP_RS_VL14_0 0x38ff0
+#define A_MAC_MTIP_RS_VL14_1 0x38ff4
+#define A_MAC_MTIP_RS_VL15_0 0x38ff8
+#define A_MAC_MTIP_RS_VL15_1 0x38ffc
+#define A_MAC_MTIP_RS_FEC_SYMBLERR0_LO 0x39000
+#define A_MAC_MTIP_RS_FEC_SYMBLERR0_HI 0x39004
+#define A_MAC_MTIP_RS_FEC_SYMBLERR1_LO 0x39008
+#define A_MAC_MTIP_RS_FEC_SYMBLERR1_HI 0x3900c
+#define A_MAC_MTIP_RS_FEC_SYMBLERR2_LO 0x39010
+#define A_MAC_MTIP_RS_FEC_SYMBLERR2_HI 0x39014
+#define A_MAC_MTIP_RS_FEC_SYMBLERR3_LO 0x39018
+#define A_MAC_MTIP_RS_FEC_SYMBLERR3_HI 0x3901c
+#define A_MAC_MTIP_RS_FEC_SYMBLERR4_LO 0x39020
+
+#define S_RS_FEC_SYMBLERR4_LO 0
+#define V_RS_FEC_SYMBLERR4_LO(x) ((x) << S_RS_FEC_SYMBLERR4_LO)
+#define F_RS_FEC_SYMBLERR4_LO V_RS_FEC_SYMBLERR4_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR4_HI 0x39024
+
+#define S_RS_FEC_SYMBLERR4_HI 0
+#define V_RS_FEC_SYMBLERR4_HI(x) ((x) << S_RS_FEC_SYMBLERR4_HI)
+#define F_RS_FEC_SYMBLERR4_HI V_RS_FEC_SYMBLERR4_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR5_LO 0x39028
+
+#define S_RS_FEC_SYMBLERR5_LO 0
+#define V_RS_FEC_SYMBLERR5_LO(x) ((x) << S_RS_FEC_SYMBLERR5_LO)
+#define F_RS_FEC_SYMBLERR5_LO V_RS_FEC_SYMBLERR5_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR5_HI 0x3902c
+
+#define S_RS_FEC_SYMBLERR5_HI 0
+#define V_RS_FEC_SYMBLERR5_HI(x) ((x) << S_RS_FEC_SYMBLERR5_HI)
+#define F_RS_FEC_SYMBLERR5_HI V_RS_FEC_SYMBLERR5_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR6_LO 0x39030
+
+#define S_RS_FEC_SYMBLERR6_LO 0
+#define V_RS_FEC_SYMBLERR6_LO(x) ((x) << S_RS_FEC_SYMBLERR6_LO)
+#define F_RS_FEC_SYMBLERR6_LO V_RS_FEC_SYMBLERR6_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR6_HI 0x39034
+
+#define S_RS_FEC_SYMBLERR6_HI 0
+#define V_RS_FEC_SYMBLERR6_HI(x) ((x) << S_RS_FEC_SYMBLERR6_HI)
+#define F_RS_FEC_SYMBLERR6_HI V_RS_FEC_SYMBLERR6_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR7_LO 0x39038
+
+#define S_RS_FEC_SYMBLERR7_LO 0
+#define V_RS_FEC_SYMBLERR7_LO(x) ((x) << S_RS_FEC_SYMBLERR7_LO)
+#define F_RS_FEC_SYMBLERR7_LO V_RS_FEC_SYMBLERR7_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR7_HI 0x3903c
+
+#define S_RS_FEC_SYMBLERR7_HI 0
+#define V_RS_FEC_SYMBLERR7_HI(x) ((x) << S_RS_FEC_SYMBLERR7_HI)
+#define F_RS_FEC_SYMBLERR7_HI V_RS_FEC_SYMBLERR7_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR8_LO 0x39040
+
+#define S_RS_FEC_SYMBLERR8_LO 0
+#define V_RS_FEC_SYMBLERR8_LO(x) ((x) << S_RS_FEC_SYMBLERR8_LO)
+#define F_RS_FEC_SYMBLERR8_LO V_RS_FEC_SYMBLERR8_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR8_HI 0x39044
+
+#define S_RS_FEC_SYMBLERR8_HI 0
+#define V_RS_FEC_SYMBLERR8_HI(x) ((x) << S_RS_FEC_SYMBLERR8_HI)
+#define F_RS_FEC_SYMBLERR8_HI V_RS_FEC_SYMBLERR8_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR9_LO 0x39048
+
+#define S_RS_FEC_SYMBLERR9_LO 0
+#define V_RS_FEC_SYMBLERR9_LO(x) ((x) << S_RS_FEC_SYMBLERR9_LO)
+#define F_RS_FEC_SYMBLERR9_LO V_RS_FEC_SYMBLERR9_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR9_HI 0x3904c
+
+#define S_RS_FEC_SYMBLERR9_HI 0
+#define V_RS_FEC_SYMBLERR9_HI(x) ((x) << S_RS_FEC_SYMBLERR9_HI)
+#define F_RS_FEC_SYMBLERR9_HI V_RS_FEC_SYMBLERR9_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR10_LO 0x39050
+
+#define S_RS_FEC_SYMBLERR10_LO 0
+#define V_RS_FEC_SYMBLERR10_LO(x) ((x) << S_RS_FEC_SYMBLERR10_LO)
+#define F_RS_FEC_SYMBLERR10_LO V_RS_FEC_SYMBLERR10_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR10_HI 0x39054
+
+#define S_RS_FEC_SYMBLERR10_HI 0
+#define V_RS_FEC_SYMBLERR10_HI(x) ((x) << S_RS_FEC_SYMBLERR10_HI)
+#define F_RS_FEC_SYMBLERR10_HI V_RS_FEC_SYMBLERR10_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR11_LO 0x39058
+
+#define S_RS_FEC_SYMBLERR11_LO 0
+#define V_RS_FEC_SYMBLERR11_LO(x) ((x) << S_RS_FEC_SYMBLERR11_LO)
+#define F_RS_FEC_SYMBLERR11_LO V_RS_FEC_SYMBLERR11_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR11_HI 0x3905c
+
+#define S_RS_FEC_SYMBLERR11_HI 0
+#define V_RS_FEC_SYMBLERR11_HI(x) ((x) << S_RS_FEC_SYMBLERR11_HI)
+#define F_RS_FEC_SYMBLERR11_HI V_RS_FEC_SYMBLERR11_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR12_LO 0x39060
+
+#define S_RS_FEC_SYMBLERR12_LO 0
+#define V_RS_FEC_SYMBLERR12_LO(x) ((x) << S_RS_FEC_SYMBLERR12_LO)
+#define F_RS_FEC_SYMBLERR12_LO V_RS_FEC_SYMBLERR12_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR12_HI 0x39064
+
+#define S_RS_FEC_SYMBLERR12_HI 0
+#define V_RS_FEC_SYMBLERR12_HI(x) ((x) << S_RS_FEC_SYMBLERR12_HI)
+#define F_RS_FEC_SYMBLERR12_HI V_RS_FEC_SYMBLERR12_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR13_LO 0x39068
+
+#define S_RS_FEC_SYMBLERR13_LO 0
+#define V_RS_FEC_SYMBLERR13_LO(x) ((x) << S_RS_FEC_SYMBLERR13_LO)
+#define F_RS_FEC_SYMBLERR13_LO V_RS_FEC_SYMBLERR13_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR13_HI 0x3906c
+
+#define S_RS_FEC_SYMBLERR13_HI 0
+#define V_RS_FEC_SYMBLERR13_HI(x) ((x) << S_RS_FEC_SYMBLERR13_HI)
+#define F_RS_FEC_SYMBLERR13_HI V_RS_FEC_SYMBLERR13_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR14_LO 0x39070
+
+#define S_RS_FEC_SYMBLERR14_LO 0
+#define V_RS_FEC_SYMBLERR14_LO(x) ((x) << S_RS_FEC_SYMBLERR14_LO)
+#define F_RS_FEC_SYMBLERR14_LO V_RS_FEC_SYMBLERR14_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR14_HI 0x39074
+
+#define S_RS_FEC_SYMBLERR14_HI 0
+#define V_RS_FEC_SYMBLERR14_HI(x) ((x) << S_RS_FEC_SYMBLERR14_HI)
+#define F_RS_FEC_SYMBLERR14_HI V_RS_FEC_SYMBLERR14_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR15_LO 0x39078
+
+#define S_RS_FEC_SYMBLERR15_LO 0
+#define V_RS_FEC_SYMBLERR15_LO(x) ((x) << S_RS_FEC_SYMBLERR15_LO)
+#define F_RS_FEC_SYMBLERR15_LO V_RS_FEC_SYMBLERR15_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR15_HI 0x3907c
+
+#define S_RS_FEC_SYMBLERR15_HI 0
+#define V_RS_FEC_SYMBLERR15_HI(x) ((x) << S_RS_FEC_SYMBLERR15_HI)
+#define F_RS_FEC_SYMBLERR15_HI V_RS_FEC_SYMBLERR15_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_CONTROL 0x39080
+#define A_MAC_MTIP_RS_FEC_VENDOR_INFO_1 0x39084
+
+#define S_VENDOR_INFO_1_AMPS_LOCK 0
+#define V_VENDOR_INFO_1_AMPS_LOCK(x) ((x) << S_VENDOR_INFO_1_AMPS_LOCK)
+#define F_VENDOR_INFO_1_AMPS_LOCK V_VENDOR_INFO_1_AMPS_LOCK(1U)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_INFO_2 0x39088
+
+#define S_VENDOR_INFO_2_AMPS_LOCK 0
+#define M_VENDOR_INFO_2_AMPS_LOCK 0xffffU
+#define V_VENDOR_INFO_2_AMPS_LOCK(x) ((x) << S_VENDOR_INFO_2_AMPS_LOCK)
+#define G_VENDOR_INFO_2_AMPS_LOCK(x) (((x) >> S_VENDOR_INFO_2_AMPS_LOCK) & M_VENDOR_INFO_2_AMPS_LOCK)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_REVISION 0x3908c
+#define A_MAC_MTIP_RS_FEC_VENDOR_ALIGN_STATUS 0x39090
+
+#define S_RS_FEC_VENDOR_ALIGN_STATUS 0
+#define M_RS_FEC_VENDOR_ALIGN_STATUS 0xffffU
+#define V_RS_FEC_VENDOR_ALIGN_STATUS(x) ((x) << S_RS_FEC_VENDOR_ALIGN_STATUS)
+#define G_RS_FEC_VENDOR_ALIGN_STATUS(x) (((x) >> S_RS_FEC_VENDOR_ALIGN_STATUS) & M_RS_FEC_VENDOR_ALIGN_STATUS)
+
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_0 0x39100
+
+#define S_FEC74_FEC_ABILITY_0_B1 1
+#define V_FEC74_FEC_ABILITY_0_B1(x) ((x) << S_FEC74_FEC_ABILITY_0_B1)
+#define F_FEC74_FEC_ABILITY_0_B1 V_FEC74_FEC_ABILITY_0_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_0_B0 0
+#define V_FEC74_FEC_ABILITY_0_B0(x) ((x) << S_FEC74_FEC_ABILITY_0_B0)
+#define F_FEC74_FEC_ABILITY_0_B0 V_FEC74_FEC_ABILITY_0_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_0 0x39104
+
+#define S_FEC_ENABLE_ERROR_INDICATION 1
+#define V_FEC_ENABLE_ERROR_INDICATION(x) ((x) << S_FEC_ENABLE_ERROR_INDICATION)
+#define F_FEC_ENABLE_ERROR_INDICATION V_FEC_ENABLE_ERROR_INDICATION(1U)
+
+#define S_T7_FEC_ENABLE 0
+#define V_T7_FEC_ENABLE(x) ((x) << S_T7_FEC_ENABLE)
+#define F_T7_FEC_ENABLE V_T7_FEC_ENABLE(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_STATUS_0 0x39108
+
+#define S_FEC_LOCKED_1 1
+#define V_FEC_LOCKED_1(x) ((x) << S_FEC_LOCKED_1)
+#define F_FEC_LOCKED_1 V_FEC_LOCKED_1(1U)
+
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_0 0x3910c
+
+#define S_VL0_CCW_LO 0
+#define M_VL0_CCW_LO 0xffffU
+#define V_VL0_CCW_LO(x) ((x) << S_VL0_CCW_LO)
+#define G_VL0_CCW_LO(x) (((x) >> S_VL0_CCW_LO) & M_VL0_CCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_0 0x39110
+
+#define S_VL0_NCCW_LO 0
+#define M_VL0_NCCW_LO 0xffffU
+#define V_VL0_NCCW_LO(x) ((x) << S_VL0_NCCW_LO)
+#define G_VL0_NCCW_LO(x) (((x) >> S_VL0_NCCW_LO) & M_VL0_NCCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_0 0x39114
+
+#define S_VL1_CCW_LO 0
+#define M_VL1_CCW_LO 0xffffU
+#define V_VL1_CCW_LO(x) ((x) << S_VL1_CCW_LO)
+#define G_VL1_CCW_LO(x) (((x) >> S_VL1_CCW_LO) & M_VL1_CCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_0 0x39118
+
+#define S_VL1_NCCW_LO 0
+#define M_VL1_NCCW_LO 0xffffU
+#define V_VL1_NCCW_LO(x) ((x) << S_VL1_NCCW_LO)
+#define G_VL1_NCCW_LO(x) (((x) >> S_VL1_NCCW_LO) & M_VL1_NCCW_LO)
+
+#define A_MAC_MTIP_FEC74_COUNTER_HI_0 0x3911c
+
+#define S_COUNTER_HI 0
+#define M_COUNTER_HI 0xffffU
+#define V_COUNTER_HI(x) ((x) << S_COUNTER_HI)
+#define G_COUNTER_HI(x) (((x) >> S_COUNTER_HI) & M_COUNTER_HI)
+
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_1 0x39120
+
+#define S_FEC74_FEC_ABILITY_1_B1 1
+#define V_FEC74_FEC_ABILITY_1_B1(x) ((x) << S_FEC74_FEC_ABILITY_1_B1)
+#define F_FEC74_FEC_ABILITY_1_B1 V_FEC74_FEC_ABILITY_1_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_1_B0 0
+#define V_FEC74_FEC_ABILITY_1_B0(x) ((x) << S_FEC74_FEC_ABILITY_1_B0)
+#define F_FEC74_FEC_ABILITY_1_B0 V_FEC74_FEC_ABILITY_1_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_1 0x39124
+#define A_MAC_MTIP_FEC74_FEC_STATUS_1 0x39128
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_1 0x3912c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_1 0x39130
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_1 0x39134
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_1 0x39138
+#define A_MAC_MTIP_FEC74_COUNTER_HI_1 0x3913c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_2 0x39140
+
+#define S_FEC74_FEC_ABILITY_2_B1 1
+#define V_FEC74_FEC_ABILITY_2_B1(x) ((x) << S_FEC74_FEC_ABILITY_2_B1)
+#define F_FEC74_FEC_ABILITY_2_B1 V_FEC74_FEC_ABILITY_2_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_2_B0 0
+#define V_FEC74_FEC_ABILITY_2_B0(x) ((x) << S_FEC74_FEC_ABILITY_2_B0)
+#define F_FEC74_FEC_ABILITY_2_B0 V_FEC74_FEC_ABILITY_2_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_2 0x39144
+#define A_MAC_MTIP_FEC74_FEC_STATUS_2 0x39148
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_2 0x3914c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_2 0x39150
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_2 0x39154
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_2 0x39158
+#define A_MAC_MTIP_FEC74_COUNTER_HI_2 0x3915c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_3 0x39160
+
+#define S_FEC74_FEC_ABILITY_3_B1 1
+#define V_FEC74_FEC_ABILITY_3_B1(x) ((x) << S_FEC74_FEC_ABILITY_3_B1)
+#define F_FEC74_FEC_ABILITY_3_B1 V_FEC74_FEC_ABILITY_3_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_3_B0 0
+#define V_FEC74_FEC_ABILITY_3_B0(x) ((x) << S_FEC74_FEC_ABILITY_3_B0)
+#define F_FEC74_FEC_ABILITY_3_B0 V_FEC74_FEC_ABILITY_3_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_3 0x39164
+#define A_MAC_MTIP_FEC74_FEC_STATUS_3 0x39168
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_3 0x3916c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_3 0x39170
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_3 0x39174
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_3 0x39178
+#define A_MAC_MTIP_FEC74_COUNTER_HI_3 0x3917c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_4 0x39180
+
+#define S_FEC74_FEC_ABILITY_4_B1 1
+#define V_FEC74_FEC_ABILITY_4_B1(x) ((x) << S_FEC74_FEC_ABILITY_4_B1)
+#define F_FEC74_FEC_ABILITY_4_B1 V_FEC74_FEC_ABILITY_4_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_4_B0 0
+#define V_FEC74_FEC_ABILITY_4_B0(x) ((x) << S_FEC74_FEC_ABILITY_4_B0)
+#define F_FEC74_FEC_ABILITY_4_B0 V_FEC74_FEC_ABILITY_4_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_4 0x39184
+#define A_MAC_MTIP_FEC74_FEC_STATUS_4 0x39188
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_4 0x3918c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_4 0x39190
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_4 0x39194
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_4 0x39198
+#define A_MAC_MTIP_FEC74_COUNTER_HI_4 0x3919c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_5 0x391a0
+
+#define S_FEC74_FEC_ABILITY_5_B1 1
+#define V_FEC74_FEC_ABILITY_5_B1(x) ((x) << S_FEC74_FEC_ABILITY_5_B1)
+#define F_FEC74_FEC_ABILITY_5_B1 V_FEC74_FEC_ABILITY_5_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_5_B0 0
+#define V_FEC74_FEC_ABILITY_5_B0(x) ((x) << S_FEC74_FEC_ABILITY_5_B0)
+#define F_FEC74_FEC_ABILITY_5_B0 V_FEC74_FEC_ABILITY_5_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_5 0x391a4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_5 0x391a8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_5 0x391ac
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_5 0x391b0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_5 0x391b4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_5 0x391b8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_5 0x391bc
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_6 0x391c0
+
+#define S_FEC74_FEC_ABILITY_6_B1 1
+#define V_FEC74_FEC_ABILITY_6_B1(x) ((x) << S_FEC74_FEC_ABILITY_6_B1)
+#define F_FEC74_FEC_ABILITY_6_B1 V_FEC74_FEC_ABILITY_6_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_6_B0 0
+#define V_FEC74_FEC_ABILITY_6_B0(x) ((x) << S_FEC74_FEC_ABILITY_6_B0)
+#define F_FEC74_FEC_ABILITY_6_B0 V_FEC74_FEC_ABILITY_6_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_6 0x391c4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_6 0x391c8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_6 0x391cc
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_6 0x391d0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_6 0x391d4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_6 0x391d8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_6 0x391dc
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_7 0x391e0
+
+#define S_FEC74_FEC_ABILITY_7_B1 1
+#define V_FEC74_FEC_ABILITY_7_B1(x) ((x) << S_FEC74_FEC_ABILITY_7_B1)
+#define F_FEC74_FEC_ABILITY_7_B1 V_FEC74_FEC_ABILITY_7_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_7_B0 0
+#define V_FEC74_FEC_ABILITY_7_B0(x) ((x) << S_FEC74_FEC_ABILITY_7_B0)
+#define F_FEC74_FEC_ABILITY_7_B0 V_FEC74_FEC_ABILITY_7_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_7 0x391e4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_7 0x391e8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_7 0x391ec
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_7 0x391f0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_7 0x391f4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_7 0x391f8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_7 0x391fc
+#define A_MAC_BEAN0_CTL 0x39200
+#define A_MAC_BEAN0_STATUS 0x39204
+#define A_MAC_BEAN0_ABILITY_0 0x39208
+
+#define S_BEAN0_REM_FAULT 13
+#define V_BEAN0_REM_FAULT(x) ((x) << S_BEAN0_REM_FAULT)
+#define F_BEAN0_REM_FAULT V_BEAN0_REM_FAULT(1U)
+
+#define A_MAC_BEAN0_ABILITY_1 0x3920c
+#define A_MAC_BEAN0_ABILITY_2 0x39210
+
+#define S_BEAN0_AB_2_15_12 12
+#define M_BEAN0_AB_2_15_12 0xfU
+#define V_BEAN0_AB_2_15_12(x) ((x) << S_BEAN0_AB_2_15_12)
+#define G_BEAN0_AB_2_15_12(x) (((x) >> S_BEAN0_AB_2_15_12) & M_BEAN0_AB_2_15_12)
+
+#define S_BEAN0_AB_2_11_0 0
+#define M_BEAN0_AB_2_11_0 0xfffU
+#define V_BEAN0_AB_2_11_0(x) ((x) << S_BEAN0_AB_2_11_0)
+#define G_BEAN0_AB_2_11_0(x) (((x) >> S_BEAN0_AB_2_11_0) & M_BEAN0_AB_2_11_0)
+
+#define A_MAC_BEAN0_REM_ABILITY_0 0x39214
+
+#define S_BEAN0_ABL_REM_FAULT 13
+#define V_BEAN0_ABL_REM_FAULT(x) ((x) << S_BEAN0_ABL_REM_FAULT)
+#define F_BEAN0_ABL_REM_FAULT V_BEAN0_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN0_REM_ABILITY_1 0x39218
+#define A_MAC_BEAN0_REM_ABILITY_2 0x3921c
+
+#define S_BEAN0_REM_AB_15_12 12
+#define M_BEAN0_REM_AB_15_12 0xfU
+#define V_BEAN0_REM_AB_15_12(x) ((x) << S_BEAN0_REM_AB_15_12)
+#define G_BEAN0_REM_AB_15_12(x) (((x) >> S_BEAN0_REM_AB_15_12) & M_BEAN0_REM_AB_15_12)
+
+#define S_BEAN0_REM_AB_11_0 0
+#define M_BEAN0_REM_AB_11_0 0xfffU
+#define V_BEAN0_REM_AB_11_0(x) ((x) << S_BEAN0_REM_AB_11_0)
+#define G_BEAN0_REM_AB_11_0(x) (((x) >> S_BEAN0_REM_AB_11_0) & M_BEAN0_REM_AB_11_0)
+
+#define A_MAC_BEAN0_MS_COUNT 0x39220
+#define A_MAC_BEAN0_XNP_0 0x39224
+#define A_MAC_BEAN0_XNP_1 0x39228
+#define A_MAC_BEAN0_XNP_2 0x3922c
+#define A_MAC_LP_BEAN0_XNP_0 0x39230
+#define A_MAC_LP_BEAN0_XNP_1 0x39234
+#define A_MAC_LP_BEAN0_XNP_2 0x39238
+#define A_MAC_BEAN0_ETH_STATUS 0x3923c
+
+#define S_5GKR 15
+#define V_5GKR(x) ((x) << S_5GKR)
+#define F_5GKR V_5GKR(1U)
+
+#define S_2P5GKX 14
+#define V_2P5GKX(x) ((x) << S_2P5GKX)
+#define F_2P5GKX V_2P5GKX(1U)
+
+#define S_25G_KR 13
+#define V_25G_KR(x) ((x) << S_25G_KR)
+#define F_25G_KR V_25G_KR(1U)
+
+#define S_25G_KR_S 12
+#define V_25G_KR_S(x) ((x) << S_25G_KR_S)
+#define F_25G_KR_S V_25G_KR_S(1U)
+
+#define S_RS_FEC 7
+#define V_RS_FEC(x) ((x) << S_RS_FEC)
+#define F_RS_FEC V_RS_FEC(1U)
+
+#define S_FC_FEC 4
+#define V_FC_FEC(x) ((x) << S_FC_FEC)
+#define F_FC_FEC V_FC_FEC(1U)
+
+#define A_MAC_BEAN0_ETH_STATUS_2 0x39240
+
+#define S_RS_FEC_NEGOTIATED 6
+#define V_RS_FEC_NEGOTIATED(x) ((x) << S_RS_FEC_NEGOTIATED)
+#define F_RS_FEC_NEGOTIATED V_RS_FEC_NEGOTIATED(1U)
+
+#define S_400GKR4CR4 5
+#define V_400GKR4CR4(x) ((x) << S_400GKR4CR4)
+#define F_400GKR4CR4 V_400GKR4CR4(1U)
+
+#define S_200GKR2CR2 4
+#define V_200GKR2CR2(x) ((x) << S_200GKR2CR2)
+#define F_200GKR2CR2 V_200GKR2CR2(1U)
+
+#define S_100GKR1CR1 3
+#define V_100GKR1CR1(x) ((x) << S_100GKR1CR1)
+#define F_100GKR1CR1 V_100GKR1CR1(1U)
+
+#define S_200GKR4CR4 2
+#define V_200GKR4CR4(x) ((x) << S_200GKR4CR4)
+#define F_200GKR4CR4 V_200GKR4CR4(1U)
+
+#define S_100GKR2CR2 1
+#define V_100GKR2CR2(x) ((x) << S_100GKR2CR2)
+#define F_100GKR2CR2 V_100GKR2CR2(1U)
+
+#define S_50GKRCR 0
+#define V_50GKRCR(x) ((x) << S_50GKRCR)
+#define F_50GKRCR V_50GKRCR(1U)
+
+#define A_MAC_BEAN1_CTL 0x39300
+#define A_MAC_BEAN1_STATUS 0x39304
+#define A_MAC_BEAN1_ABILITY_0 0x39308
+
+#define S_BEAN1_REM_FAULT 13
+#define V_BEAN1_REM_FAULT(x) ((x) << S_BEAN1_REM_FAULT)
+#define F_BEAN1_REM_FAULT V_BEAN1_REM_FAULT(1U)
+
+#define A_MAC_BEAN1_ABILITY_1 0x3930c
+#define A_MAC_BEAN1_ABILITY_2 0x39310
+
+#define S_BEAN1_AB_2_15_12 12
+#define M_BEAN1_AB_2_15_12 0xfU
+#define V_BEAN1_AB_2_15_12(x) ((x) << S_BEAN1_AB_2_15_12)
+#define G_BEAN1_AB_2_15_12(x) (((x) >> S_BEAN1_AB_2_15_12) & M_BEAN1_AB_2_15_12)
+
+#define S_BEAN1_AB_2_11_0 0
+#define M_BEAN1_AB_2_11_0 0xfffU
+#define V_BEAN1_AB_2_11_0(x) ((x) << S_BEAN1_AB_2_11_0)
+#define G_BEAN1_AB_2_11_0(x) (((x) >> S_BEAN1_AB_2_11_0) & M_BEAN1_AB_2_11_0)
+
+#define A_MAC_BEAN1_REM_ABILITY_0 0x39314
+
+#define S_BEAN1_ABL_REM_FAULT 13
+#define V_BEAN1_ABL_REM_FAULT(x) ((x) << S_BEAN1_ABL_REM_FAULT)
+#define F_BEAN1_ABL_REM_FAULT V_BEAN1_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN1_REM_ABILITY_1 0x39318
+#define A_MAC_BEAN1_REM_ABILITY_2 0x3931c
+
+#define S_BEAN1_REM_AB_15_12 12
+#define M_BEAN1_REM_AB_15_12 0xfU
+#define V_BEAN1_REM_AB_15_12(x) ((x) << S_BEAN1_REM_AB_15_12)
+#define G_BEAN1_REM_AB_15_12(x) (((x) >> S_BEAN1_REM_AB_15_12) & M_BEAN1_REM_AB_15_12)
+
+#define S_BEAN1_REM_AB_11_0 0
+#define M_BEAN1_REM_AB_11_0 0xfffU
+#define V_BEAN1_REM_AB_11_0(x) ((x) << S_BEAN1_REM_AB_11_0)
+#define G_BEAN1_REM_AB_11_0(x) (((x) >> S_BEAN1_REM_AB_11_0) & M_BEAN1_REM_AB_11_0)
+
+#define A_MAC_BEAN1_MS_COUNT 0x39320
+#define A_MAC_BEAN1_XNP_0 0x39324
+#define A_MAC_BEAN1_XNP_1 0x39328
+#define A_MAC_BEAN1_XNP_2 0x3932c
+#define A_MAC_LP_BEAN1_XNP_0 0x39330
+#define A_MAC_LP_BEAN1_XNP_1 0x39334
+#define A_MAC_LP_BEAN1_XNP_2 0x39338
+#define A_MAC_BEAN1_ETH_STATUS 0x3933c
+#define A_MAC_BEAN1_ETH_STATUS_2 0x39340
+#define A_MAC_BEAN2_CTL 0x39400
+#define A_MAC_BEAN2_STATUS 0x39404
+#define A_MAC_BEAN2_ABILITY_0 0x39408
+
+#define S_BEAN2_REM_FAULT 13
+#define V_BEAN2_REM_FAULT(x) ((x) << S_BEAN2_REM_FAULT)
+#define F_BEAN2_REM_FAULT V_BEAN2_REM_FAULT(1U)
+
+#define A_MAC_BEAN2_ABILITY_1 0x3940c
+#define A_MAC_BEAN2_ABILITY_2 0x39410
+
+#define S_BEAN2_AB_2_15_12 12
+#define M_BEAN2_AB_2_15_12 0xfU
+#define V_BEAN2_AB_2_15_12(x) ((x) << S_BEAN2_AB_2_15_12)
+#define G_BEAN2_AB_2_15_12(x) (((x) >> S_BEAN2_AB_2_15_12) & M_BEAN2_AB_2_15_12)
+
+#define S_BEAN2_AB_2_11_0 0
+#define M_BEAN2_AB_2_11_0 0xfffU
+#define V_BEAN2_AB_2_11_0(x) ((x) << S_BEAN2_AB_2_11_0)
+#define G_BEAN2_AB_2_11_0(x) (((x) >> S_BEAN2_AB_2_11_0) & M_BEAN2_AB_2_11_0)
+
+#define A_MAC_BEAN2_REM_ABILITY_0 0x39414
+
+#define S_BEAN2_ABL_REM_FAULT 13
+#define V_BEAN2_ABL_REM_FAULT(x) ((x) << S_BEAN2_ABL_REM_FAULT)
+#define F_BEAN2_ABL_REM_FAULT V_BEAN2_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN2_REM_ABILITY_1 0x39418
+#define A_MAC_BEAN2_REM_ABILITY_2 0x3941c
+
+#define S_BEAN2_REM_AB_15_12 12
+#define M_BEAN2_REM_AB_15_12 0xfU
+#define V_BEAN2_REM_AB_15_12(x) ((x) << S_BEAN2_REM_AB_15_12)
+#define G_BEAN2_REM_AB_15_12(x) (((x) >> S_BEAN2_REM_AB_15_12) & M_BEAN2_REM_AB_15_12)
+
+#define S_BEAN2_REM_AB_11_0 0
+#define M_BEAN2_REM_AB_11_0 0xfffU
+#define V_BEAN2_REM_AB_11_0(x) ((x) << S_BEAN2_REM_AB_11_0)
+#define G_BEAN2_REM_AB_11_0(x) (((x) >> S_BEAN2_REM_AB_11_0) & M_BEAN2_REM_AB_11_0)
+
+#define A_MAC_BEAN2_MS_COUNT 0x39420
+#define A_MAC_BEAN2_XNP_0 0x39424
+#define A_MAC_BEAN2_XNP_1 0x39428
+#define A_MAC_BEAN2_XNP_2 0x3942c
+#define A_MAC_LP_BEAN2_XNP_0 0x39430
+#define A_MAC_LP_BEAN2_XNP_1 0x39434
+#define A_MAC_LP_BEAN2_XNP_2 0x39438
+#define A_MAC_BEAN2_ETH_STATUS 0x3943c
+#define A_MAC_BEAN2_ETH_STATUS_2 0x39440
+#define A_MAC_BEAN3_CTL 0x39500
+#define A_MAC_BEAN3_STATUS 0x39504
+#define A_MAC_BEAN3_ABILITY_0 0x39508
+
+#define S_BEAN3_REM_FAULT 13
+#define V_BEAN3_REM_FAULT(x) ((x) << S_BEAN3_REM_FAULT)
+#define F_BEAN3_REM_FAULT V_BEAN3_REM_FAULT(1U)
+
+#define A_MAC_BEAN3_ABILITY_1 0x3950c
+#define A_MAC_BEAN3_ABILITY_2 0x39510
+
+#define S_BEAN3_AB_2_15_12 12
+#define M_BEAN3_AB_2_15_12 0xfU
+#define V_BEAN3_AB_2_15_12(x) ((x) << S_BEAN3_AB_2_15_12)
+#define G_BEAN3_AB_2_15_12(x) (((x) >> S_BEAN3_AB_2_15_12) & M_BEAN3_AB_2_15_12)
+
+#define S_BEAN3_AB_2_11_0 0
+#define M_BEAN3_AB_2_11_0 0xfffU
+#define V_BEAN3_AB_2_11_0(x) ((x) << S_BEAN3_AB_2_11_0)
+#define G_BEAN3_AB_2_11_0(x) (((x) >> S_BEAN3_AB_2_11_0) & M_BEAN3_AB_2_11_0)
+
+#define A_MAC_BEAN3_REM_ABILITY_0 0x39514
+
+#define S_BEAN3_ABL_REM_FAULT 13
+#define V_BEAN3_ABL_REM_FAULT(x) ((x) << S_BEAN3_ABL_REM_FAULT)
+#define F_BEAN3_ABL_REM_FAULT V_BEAN3_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN3_REM_ABILITY_1 0x39518
+#define A_MAC_BEAN3_REM_ABILITY_2 0x3951c
+
+#define S_BEAN3_REM_AB_15_12 12
+#define M_BEAN3_REM_AB_15_12 0xfU
+#define V_BEAN3_REM_AB_15_12(x) ((x) << S_BEAN3_REM_AB_15_12)
+#define G_BEAN3_REM_AB_15_12(x) (((x) >> S_BEAN3_REM_AB_15_12) & M_BEAN3_REM_AB_15_12)
+
+#define S_BEAN3_REM_AB_11_0 0
+#define M_BEAN3_REM_AB_11_0 0xfffU
+#define V_BEAN3_REM_AB_11_0(x) ((x) << S_BEAN3_REM_AB_11_0)
+#define G_BEAN3_REM_AB_11_0(x) (((x) >> S_BEAN3_REM_AB_11_0) & M_BEAN3_REM_AB_11_0)
+
+#define A_MAC_BEAN3_MS_COUNT 0x39520
+#define A_MAC_BEAN3_XNP_0 0x39524
+#define A_MAC_BEAN3_XNP_1 0x39528
+#define A_MAC_BEAN3_XNP_2 0x3952c
+#define A_MAC_LP_BEAN3_XNP_0 0x39530
+#define A_MAC_LP_BEAN3_XNP_1 0x39534
+#define A_MAC_LP_BEAN3_XNP_2 0x39538
+#define A_MAC_BEAN3_ETH_STATUS 0x3953c
+#define A_MAC_BEAN3_ETH_STATUS_2 0x39540
+#define A_MAC_BEAN4_CTL 0x39600
+#define A_MAC_BEAN4_STATUS 0x39604
+#define A_MAC_BEAN4_ABILITY_0 0x39608
+
+#define S_BEAN4_REM_FAULT 13
+#define V_BEAN4_REM_FAULT(x) ((x) << S_BEAN4_REM_FAULT)
+#define F_BEAN4_REM_FAULT V_BEAN4_REM_FAULT(1U)
+
+#define A_MAC_BEAN4_ABILITY_1 0x3960c
+#define A_MAC_BEAN4_ABILITY_2 0x39610
+
+#define S_BEAN4_AB_2_15_12 12
+#define M_BEAN4_AB_2_15_12 0xfU
+#define V_BEAN4_AB_2_15_12(x) ((x) << S_BEAN4_AB_2_15_12)
+#define G_BEAN4_AB_2_15_12(x) (((x) >> S_BEAN4_AB_2_15_12) & M_BEAN4_AB_2_15_12)
+
+#define S_BEAN4_AB_2_11_0 0
+#define M_BEAN4_AB_2_11_0 0xfffU
+#define V_BEAN4_AB_2_11_0(x) ((x) << S_BEAN4_AB_2_11_0)
+#define G_BEAN4_AB_2_11_0(x) (((x) >> S_BEAN4_AB_2_11_0) & M_BEAN4_AB_2_11_0)
+
+#define A_MAC_BEAN4_REM_ABILITY_0 0x39614
+
+#define S_BEAN4_ABL_REM_FAULT 13
+#define V_BEAN4_ABL_REM_FAULT(x) ((x) << S_BEAN4_ABL_REM_FAULT)
+#define F_BEAN4_ABL_REM_FAULT V_BEAN4_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN4_REM_ABILITY_1 0x39618
+#define A_MAC_BEAN4_REM_ABILITY_2 0x3961c
+
+#define S_BEAN4_REM_AB_15_12 12
+#define M_BEAN4_REM_AB_15_12 0xfU
+#define V_BEAN4_REM_AB_15_12(x) ((x) << S_BEAN4_REM_AB_15_12)
+#define G_BEAN4_REM_AB_15_12(x) (((x) >> S_BEAN4_REM_AB_15_12) & M_BEAN4_REM_AB_15_12)
+
+#define S_BEAN4_REM_AB_11_0 0
+#define M_BEAN4_REM_AB_11_0 0xfffU
+#define V_BEAN4_REM_AB_11_0(x) ((x) << S_BEAN4_REM_AB_11_0)
+#define G_BEAN4_REM_AB_11_0(x) (((x) >> S_BEAN4_REM_AB_11_0) & M_BEAN4_REM_AB_11_0)
+
+#define A_MAC_BEAN4_MS_COUNT 0x39620
+#define A_MAC_BEAN4_XNP_0 0x39624
+#define A_MAC_BEAN4_XNP_1 0x39628
+#define A_MAC_BEAN4_XNP_2 0x3962c
+#define A_MAC_LP_BEAN4_XNP_0 0x39630
+#define A_MAC_LP_BEAN4_XNP_1 0x39634
+#define A_MAC_LP_BEAN4_XNP_2 0x39638
+#define A_MAC_BEAN4_ETH_STATUS 0x3963c
+#define A_MAC_BEAN4_ETH_STATUS_2 0x39640
+#define A_MAC_BEAN5_CTL 0x39700
+#define A_MAC_BEAN5_STATUS 0x39704
+#define A_MAC_BEAN5_ABILITY_0 0x39708
+
+#define S_BEAN5_REM_FAULT 13
+#define V_BEAN5_REM_FAULT(x) ((x) << S_BEAN5_REM_FAULT)
+#define F_BEAN5_REM_FAULT V_BEAN5_REM_FAULT(1U)
+
+#define A_MAC_BEAN5_ABILITY_1 0x3970c
+#define A_MAC_BEAN5_ABILITY_2 0x39710
+
+#define S_BEAN5_AB_2_15_12 12
+#define M_BEAN5_AB_2_15_12 0xfU
+#define V_BEAN5_AB_2_15_12(x) ((x) << S_BEAN5_AB_2_15_12)
+#define G_BEAN5_AB_2_15_12(x) (((x) >> S_BEAN5_AB_2_15_12) & M_BEAN5_AB_2_15_12)
+
+#define S_BEAN5_AB_2_11_0 0
+#define M_BEAN5_AB_2_11_0 0xfffU
+#define V_BEAN5_AB_2_11_0(x) ((x) << S_BEAN5_AB_2_11_0)
+#define G_BEAN5_AB_2_11_0(x) (((x) >> S_BEAN5_AB_2_11_0) & M_BEAN5_AB_2_11_0)
+
+#define A_MAC_BEAN5_REM_ABILITY_0 0x39714
+
+#define S_BEAN5_ABL_REM_FAULT 13
+#define V_BEAN5_ABL_REM_FAULT(x) ((x) << S_BEAN5_ABL_REM_FAULT)
+#define F_BEAN5_ABL_REM_FAULT V_BEAN5_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN5_REM_ABILITY_1 0x39718
+#define A_MAC_BEAN5_REM_ABILITY_2 0x3971c
+
+#define S_BEAN5_REM_AB_15_12 12
+#define M_BEAN5_REM_AB_15_12 0xfU
+#define V_BEAN5_REM_AB_15_12(x) ((x) << S_BEAN5_REM_AB_15_12)
+#define G_BEAN5_REM_AB_15_12(x) (((x) >> S_BEAN5_REM_AB_15_12) & M_BEAN5_REM_AB_15_12)
+
+#define S_BEAN5_REM_AB_11_0 0
+#define M_BEAN5_REM_AB_11_0 0xfffU
+#define V_BEAN5_REM_AB_11_0(x) ((x) << S_BEAN5_REM_AB_11_0)
+#define G_BEAN5_REM_AB_11_0(x) (((x) >> S_BEAN5_REM_AB_11_0) & M_BEAN5_REM_AB_11_0)
+
+#define A_MAC_BEAN5_MS_COUNT 0x39720
+#define A_MAC_BEAN5_XNP_0 0x39724
+#define A_MAC_BEAN5_XNP_1 0x39728
+#define A_MAC_BEAN5_XNP_2 0x3972c
+#define A_MAC_LP_BEAN5_XNP_0 0x39730
+#define A_MAC_LP_BEAN5_XNP_1 0x39734
+#define A_MAC_LP_BEAN5_XNP_2 0x39738
+#define A_MAC_BEAN5_ETH_STATUS 0x3973c
+#define A_MAC_BEAN5_ETH_STATUS_2 0x39740
+#define A_MAC_BEAN6_CTL 0x39800
+#define A_MAC_BEAN6_STATUS 0x39804
+#define A_MAC_BEAN6_ABILITY_0 0x39808
+
+#define S_BEAN6_REM_FAULT 13
+#define V_BEAN6_REM_FAULT(x) ((x) << S_BEAN6_REM_FAULT)
+#define F_BEAN6_REM_FAULT V_BEAN6_REM_FAULT(1U)
+
+#define A_MAC_BEAN6_ABILITY_1 0x3980c
+#define A_MAC_BEAN6_ABILITY_2 0x39810
+
+#define S_BEAN6_AB_2_15_12 12
+#define M_BEAN6_AB_2_15_12 0xfU
+#define V_BEAN6_AB_2_15_12(x) ((x) << S_BEAN6_AB_2_15_12)
+#define G_BEAN6_AB_2_15_12(x) (((x) >> S_BEAN6_AB_2_15_12) & M_BEAN6_AB_2_15_12)
+
+#define S_BEAN6_AB_2_11_0 0
+#define M_BEAN6_AB_2_11_0 0xfffU
+#define V_BEAN6_AB_2_11_0(x) ((x) << S_BEAN6_AB_2_11_0)
+#define G_BEAN6_AB_2_11_0(x) (((x) >> S_BEAN6_AB_2_11_0) & M_BEAN6_AB_2_11_0)
+
+#define A_MAC_BEAN6_REM_ABILITY_0 0x39814
+
+#define S_BEAN6_ABL_REM_FAULT 13
+#define V_BEAN6_ABL_REM_FAULT(x) ((x) << S_BEAN6_ABL_REM_FAULT)
+#define F_BEAN6_ABL_REM_FAULT V_BEAN6_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN6_REM_ABILITY_1 0x39818
+#define A_MAC_BEAN6_REM_ABILITY_2 0x3981c
+
+#define S_BEAN6_REM_AB_15_12 12
+#define M_BEAN6_REM_AB_15_12 0xfU
+#define V_BEAN6_REM_AB_15_12(x) ((x) << S_BEAN6_REM_AB_15_12)
+#define G_BEAN6_REM_AB_15_12(x) (((x) >> S_BEAN6_REM_AB_15_12) & M_BEAN6_REM_AB_15_12)
+
+#define S_BEAN6_REM_AB_11_0 0
+#define M_BEAN6_REM_AB_11_0 0xfffU
+#define V_BEAN6_REM_AB_11_0(x) ((x) << S_BEAN6_REM_AB_11_0)
+#define G_BEAN6_REM_AB_11_0(x) (((x) >> S_BEAN6_REM_AB_11_0) & M_BEAN6_REM_AB_11_0)
+
+#define A_MAC_BEAN6_MS_COUNT 0x39820
+#define A_MAC_BEAN6_XNP_0 0x39824
+#define A_MAC_BEAN6_XNP_1 0x39828
+#define A_MAC_BEAN6_XNP_2 0x3982c
+#define A_MAC_LP_BEAN6_XNP_0 0x39830
+#define A_MAC_LP_BEAN6_XNP_1 0x39834
+#define A_MAC_LP_BEAN6_XNP_2 0x39838
+#define A_MAC_BEAN6_ETH_STATUS 0x3983c
+#define A_MAC_BEAN6_ETH_STATUS_2 0x39840
+#define A_MAC_BEAN7_CTL 0x39900
+#define A_MAC_BEAN7_STATUS 0x39904
+#define A_MAC_BEAN7_ABILITY_0 0x39908
+
+#define S_BEAN7_REM_FAULT 13
+#define V_BEAN7_REM_FAULT(x) ((x) << S_BEAN7_REM_FAULT)
+#define F_BEAN7_REM_FAULT V_BEAN7_REM_FAULT(1U)
+
+#define A_MAC_BEAN7_ABILITY_1 0x3990c
+#define A_MAC_BEAN7_ABILITY_2 0x39910
+
+#define S_BEAN7_AB_2_15_12 12
+#define M_BEAN7_AB_2_15_12 0xfU
+#define V_BEAN7_AB_2_15_12(x) ((x) << S_BEAN7_AB_2_15_12)
+#define G_BEAN7_AB_2_15_12(x) (((x) >> S_BEAN7_AB_2_15_12) & M_BEAN7_AB_2_15_12)
+
+#define S_BEAN7_AB_2_11_0 0
+#define M_BEAN7_AB_2_11_0 0xfffU
+#define V_BEAN7_AB_2_11_0(x) ((x) << S_BEAN7_AB_2_11_0)
+#define G_BEAN7_AB_2_11_0(x) (((x) >> S_BEAN7_AB_2_11_0) & M_BEAN7_AB_2_11_0)
+
+#define A_MAC_BEAN7_REM_ABILITY_0 0x39914
+
+#define S_BEAN7_ABL_REM_FAULT 13
+#define V_BEAN7_ABL_REM_FAULT(x) ((x) << S_BEAN7_ABL_REM_FAULT)
+#define F_BEAN7_ABL_REM_FAULT V_BEAN7_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN7_REM_ABILITY_1 0x39918
+#define A_MAC_BEAN7_REM_ABILITY_2 0x3991c
+
+#define S_BEAN7_REM_AB_15_12 12
+#define M_BEAN7_REM_AB_15_12 0xfU
+#define V_BEAN7_REM_AB_15_12(x) ((x) << S_BEAN7_REM_AB_15_12)
+#define G_BEAN7_REM_AB_15_12(x) (((x) >> S_BEAN7_REM_AB_15_12) & M_BEAN7_REM_AB_15_12)
+
+#define S_BEAN7_REM_AB_11_0 0
+#define M_BEAN7_REM_AB_11_0 0xfffU
+#define V_BEAN7_REM_AB_11_0(x) ((x) << S_BEAN7_REM_AB_11_0)
+#define G_BEAN7_REM_AB_11_0(x) (((x) >> S_BEAN7_REM_AB_11_0) & M_BEAN7_REM_AB_11_0)
+
+#define A_MAC_BEAN7_MS_COUNT 0x39920
+#define A_MAC_BEAN7_XNP_0 0x39924
+#define A_MAC_BEAN7_XNP_1 0x39928
+#define A_MAC_BEAN7_XNP_2 0x3992c
+#define A_MAC_LP_BEAN7_XNP_0 0x39930
+#define A_MAC_LP_BEAN7_XNP_1 0x39934
+#define A_MAC_LP_BEAN7_XNP_2 0x39938
+#define A_MAC_BEAN7_ETH_STATUS 0x3993c
+#define A_MAC_BEAN7_ETH_STATUS_2 0x39940
+#define A_MAC_MTIP_ETHERSTATS_DATA_HI 0x39a00
+#define A_MAC_MTIP_ETHERSTATS_STATN_STATUS 0x39a04
+#define A_MAC_MTIP_ETHERSTATS_STATN_CONFIG 0x39a08
+
+#define S_T7_RESET 31
+#define V_T7_RESET(x) ((x) << S_T7_RESET)
+#define F_T7_RESET V_T7_RESET(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CONTROL 0x39a0c
+
+#define S_CMD_CLEAR_TX 31
+#define V_CMD_CLEAR_TX(x) ((x) << S_CMD_CLEAR_TX)
+#define F_CMD_CLEAR_TX V_CMD_CLEAR_TX(1U)
+
+#define S_CMD_CLEAR_RX 30
+#define V_CMD_CLEAR_RX(x) ((x) << S_CMD_CLEAR_RX)
+#define F_CMD_CLEAR_RX V_CMD_CLEAR_RX(1U)
+
+#define S_CLEAR_PRE 29
+#define V_CLEAR_PRE(x) ((x) << S_CLEAR_PRE)
+#define F_CLEAR_PRE V_CLEAR_PRE(1U)
+
+#define S_CMD_CAPTURE_TX 28
+#define V_CMD_CAPTURE_TX(x) ((x) << S_CMD_CAPTURE_TX)
+#define F_CMD_CAPTURE_TX V_CMD_CAPTURE_TX(1U)
+
+#define S_CMD_CAPTURE_RX 27
+#define V_CMD_CAPTURE_RX(x) ((x) << S_CMD_CAPTURE_RX)
+#define F_CMD_CAPTURE_RX V_CMD_CAPTURE_RX(1U)
+
+#define S_PORTMASK 0
+#define M_PORTMASK 0xffU
+#define V_PORTMASK(x) ((x) << S_PORTMASK)
+#define G_PORTMASK(x) (((x) >> S_PORTMASK) & M_PORTMASK)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CLEARVALUE_LO 0x39a10
+
+#define S_STATN_CLEARVALUE_LO 0
+#define V_STATN_CLEARVALUE_LO(x) ((x) << S_STATN_CLEARVALUE_LO)
+#define F_STATN_CLEARVALUE_LO V_STATN_CLEARVALUE_LO(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CLEARVALUE_HI 0x39a14
+
+#define S_STATN_CLEARVALUE_HI 0
+#define V_STATN_CLEARVALUE_HI(x) ((x) << S_STATN_CLEARVALUE_HI)
+#define F_STATN_CLEARVALUE_HI V_STATN_CLEARVALUE_HI(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_DATA_HI_1 0x39a1c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_0 0x39a20
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_1 0x39a24
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_2 0x39a28
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_3 0x39a2c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_4 0x39a30
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_5 0x39a34
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_6 0x39a38
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_7 0x39a3c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_8 0x39a40
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_9 0x39a44
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_10 0x39a48
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_11 0x39a4c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_12 0x39a50
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_13 0x39a54
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_14 0x39a58
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_15 0x39a5c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_16 0x39a60
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_17 0x39a64
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_18 0x39a68
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_19 0x39a6c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_20 0x39a70
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_21 0x39a74
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_22 0x39a78
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_23 0x39a7c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_24 0x39a80
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_25 0x39a84
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_26 0x39a88
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_27 0x39a8c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_28 0x39a90
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_29 0x39a94
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_30 0x39a98
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_31 0x39a9c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_32 0x39aa0
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_33 0x39aa4
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_34 0x39aa8
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOCTETS 0x39b00
+#define A_MAC_MTIP_ETHERSTATS0_OCTETSRECEIVEDOK 0x39b04
+#define A_MAC_MTIP_ETHERSTATS0_AALIGNMENTERRORS 0x39b08
+#define A_MAC_MTIP_ETHERSTATS0_APAUSEMACCTRLFRAMESRECEIVED 0x39b0c
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMETOOLONGERRORS 0x39b10
+#define A_MAC_MTIP_ETHERSTATS0_AINRANGELENGTHERRORS 0x39b14
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMESRECEIVEDOK 0x39b18
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMECHECKSEQUENCEERRORS 0x39b1c
+#define A_MAC_MTIP_ETHERSTATS0_VLANRECEIVEDOK 0x39b20
+#define A_MAC_MTIP_ETHERSTATS0_IFINERRORS_RX 0x39b24
+#define A_MAC_MTIP_ETHERSTATS0_IFINUCASTPKTS_RX 0x39b28
+#define A_MAC_MTIP_ETHERSTATS0_IFINMULTICASTPKTS_RX 0x39b2c
+#define A_MAC_MTIP_ETHERSTATS0_IFINBROADCASTPKTS_RX 0x39b30
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSDROPEVENTS_RX 0x39b34
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS_RX 0x39b38
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSUNDERSIZEPKTS_RX 0x39b3c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS64OCTETS_RX 0x39b40
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS65TO127OCTETS_RX 0x39b44
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS128TO255OCTETS_RX 0x39b48
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS256TO511OCTETS_RX 0x39b4c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39b50
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39b54
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39b58
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOVERSIZEPKTS_RX 0x39b5c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSJABBERS_RX 0x39b60
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSFRAGMENTS_RX 0x39b64
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39b68
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39b6c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39b70
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39b74
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39b78
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39b7c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39b80
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39b84
+#define A_MAC_MTIP_ETHERSTATS0_AMACCONTROLFRAMESRECEIVED_RX 0x39b88
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOCTETS 0x39b8c
+#define A_MAC_MTIP_ETHERSTATS1_OCTETSRECEIVEDOK 0x39b90
+#define A_MAC_MTIP_ETHERSTATS1_AALIGNMENTERRORS 0x39b94
+#define A_MAC_MTIP_ETHERSTATS1_APAUSEMACCTRLFRAMESRECEIVED 0x39b98
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMETOOLONGERRORS 0x39b9c
+#define A_MAC_MTIP_ETHERSTATS1_AINRANGELENGTHERRORS 0x39ba0
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMESRECEIVEDOK 0x39ba4
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMECHECKSEQUENCEERRORS 0x39ba8
+#define A_MAC_MTIP_ETHERSTATS1_VLANRECEIVEDOK 0x39bac
+#define A_MAC_MTIP_ETHERSTATS1_IFINERRORS_RX 0x39bb0
+#define A_MAC_MTIP_ETHERSTATS1_IFINUCASTPKTS_RX 0x39bb4
+#define A_MAC_MTIP_ETHERSTATS1_IFINMULTICASTPKTS_RX 0x39bb8
+#define A_MAC_MTIP_ETHERSTATS1_IFINBROADCASTPKTS_RX 0x39bbc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSDROPEVENTS_RX 0x39bc0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS_RX 0x39bc4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSUNDERSIZEPKTS_RX 0x39bc8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS64OCTETS_RX 0x39bcc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS65TO127OCTETS_RX 0x39bd0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS128TO255OCTETS_RX 0x39bd4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS256TO511OCTETS_RX 0x39bd8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39bdc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39be0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39be4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOVERSIZEPKTS_RX 0x39be8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSJABBERS_RX 0x39bec
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSFRAGMENTS_RX 0x39bf0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39bf4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39bf8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39bfc
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39c00
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39c04
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39c08
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39c0c
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39c10
+#define A_MAC_MTIP_ETHERSTATS1_AMACCONTROLFRAMESRECEIVED_RX 0x39c14
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOCTETS 0x39c18
+#define A_MAC_MTIP_ETHERSTATS2_OCTETSRECEIVEDOK 0x39c1c
+#define A_MAC_MTIP_ETHERSTATS2_AALIGNMENTERRORS 0x39c20
+#define A_MAC_MTIP_ETHERSTATS2_APAUSEMACCTRLFRAMESRECEIVED 0x39c24
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMETOOLONGERRORS 0x39c28
+#define A_MAC_MTIP_ETHERSTATS2_AINRANGELENGTHERRORS 0x39c2c
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMESRECEIVEDOK 0x39c30
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMECHECKSEQUENCEERRORS 0x39c34
+#define A_MAC_MTIP_ETHERSTATS2_VLANRECEIVEDOK 0x39c38
+#define A_MAC_MTIP_ETHERSTATS2_IFINERRORS_RX 0x39c3c
+#define A_MAC_MTIP_ETHERSTATS2_IFINUCASTPKTS_RX 0x39c40
+#define A_MAC_MTIP_ETHERSTATS2_IFINMULTICASTPKTS_RX 0x39c44
+#define A_MAC_MTIP_ETHERSTATS2_IFINBROADCASTPKTS_RX 0x39c48
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSDROPEVENTS_RX 0x39c4c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS_RX 0x39c50
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSUNDERSIZEPKTS_RX 0x39c54
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS64OCTETS_RX 0x39c58
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS65TO127OCTETS_RX 0x39c5c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS128TO255OCTETS_RX 0x39c60
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS256TO511OCTETS_RX 0x39c64
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39c68
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39c6c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39c70
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOVERSIZEPKTS_RX 0x39c74
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSJABBERS_RX 0x39c78
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSFRAGMENTS_RX 0x39c7c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39c80
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39c84
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39c88
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39c8c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39c90
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39c94
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39c98
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39c9c
+#define A_MAC_MTIP_ETHERSTATS2_AMACCONTROLFRAMESRECEIVED_RX 0x39ca0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOCTETS 0x39ca4
+#define A_MAC_MTIP_ETHERSTATS3_OCTETSRECEIVEDOK 0x39ca8
+#define A_MAC_MTIP_ETHERSTATS3_AALIGNMENTERRORS 0x39cac
+#define A_MAC_MTIP_ETHERSTATS3_APAUSEMACCTRLFRAMESRECEIVED 0x39cb0
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMETOOLONGERRORS 0x39cb4
+#define A_MAC_MTIP_ETHERSTATS3_AINRANGELENGTHERRORS 0x39cb8
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMESRECEIVEDOK 0x39cbc
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMECHECKSEQUENCEERRORS 0x39cc0
+#define A_MAC_MTIP_ETHERSTATS3_VLANRECEIVEDOK 0x39cc4
+#define A_MAC_MTIP_ETHERSTATS3_IFINERRORS_RX 0x39cc8
+#define A_MAC_MTIP_ETHERSTATS3_IFINUCASTPKTS_RX 0x39ccc
+#define A_MAC_MTIP_ETHERSTATS3_IFINMULTICASTPKTS_RX 0x39cd0
+#define A_MAC_MTIP_ETHERSTATS3_IFINBROADCASTPKTS_RX 0x39cd4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSDROPEVENTS_RX 0x39cd8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS_RX 0x39cdc
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSUNDERSIZEPKTS_RX 0x39ce0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS64OCTETS_RX 0x39ce4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS65TO127OCTETS_RX 0x39ce8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS128TO255OCTETS_RX 0x39cec
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS256TO511OCTETS_RX 0x39cf0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39cf4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39cf8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39cfc
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOVERSIZEPKTS_RX 0x39d00
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSJABBERS_RX 0x39d04
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSFRAGMENTS_RX 0x39d08
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39d0c
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39d10
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39d14
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39d18
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39d1c
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39d20
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39d24
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39d28
+#define A_MAC_MTIP_ETHERSTATS3_AMACCONTROLFRAMESRECEIVED_RX 0x39d2c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOCTETS_TX 0x39d30
+#define A_MAC_MTIP_ETHERSTATS0_OCTETSTRANSMITTEDOK_TX 0x39d34
+#define A_MAC_MTIP_ETHERSTATS0_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39d38
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMESTRANSMITTEDOK_TX 0x39d3c
+#define A_MAC_MTIP_ETHERSTATS0_VLANTRANSMITTEDOK_TX 0x39d40
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTERRORS_TX 0x39d44
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTUCASTPKTS_TX 0x39d48
+#define A_MAC_MTIP_ETHERSTATS0IFOUTMULTICASTPKTS_TX 0x39d4c
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTBROADCASTPKTS_TX 0x39d50
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS64OCTETS_TX 0x39d54
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS65TO127OCTETS_TX 0x39d58
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS128TO255OCTETS_TX 0x39d5c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS256TO511OCTETS_TX 0x39d60
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39d64
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39d68
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39d6c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39d70
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39d74
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39d78
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39d7c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39d80
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39d84
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39d88
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39d8c
+#define A_MAC_MTIP_ETHERSTATS0_AMACCONTROLFRAMESTRANSMITTED_TX 0x39d90
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS_TX 0x39d94
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOCTETS_TX 0x39d98
+#define A_MAC_MTIP_ETHERSTATS1_OCTETSTRANSMITTEDOK_TX 0x39d9c
+#define A_MAC_MTIP_ETHERSTATS1_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39da0
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMESTRANSMITTEDOK_TX 0x39da4
+#define A_MAC_MTIP_ETHERSTATS1_VLANTRANSMITTEDOK_TX 0x39da8
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTERRORS_TX 0x39dac
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTUCASTPKTS_TX 0x39db0
+#define A_MAC_MTIP_ETHERSTATS1IFOUTMULTICASTPKTS_TX 0x39db4
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTBROADCASTPKTS_TX 0x39db8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS64OCTETS_TX 0x39dbc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS65TO127OCTETS_TX 0x39dc0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS128TO255OCTETS_TX 0x39dc4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS256TO511OCTETS_TX 0x39dc8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39dcc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39dd0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39dd4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39dd8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39ddc
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39de0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39de4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39de8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39dec
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39df0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39df4
+#define A_MAC_MTIP_ETHERSTATS1_AMACCONTROLFRAMESTRANSMITTED_TX 0x39df8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS_TX 0x39dfc
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOCTETS_TX 0x39e00
+#define A_MAC_MTIP_ETHERSTATS2_OCTETSTRANSMITTEDOK_TX 0x39e04
+#define A_MAC_MTIP_ETHERSTATS2_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39e08
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMESTRANSMITTEDOK_TX 0x39e0c
+#define A_MAC_MTIP_ETHERSTATS2_VLANTRANSMITTEDOK_TX 0x39e10
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTERRORS_TX 0x39e14
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTUCASTPKTS_TX 0x39e18
+#define A_MAC_MTIP_ETHERSTATS2IFOUTMULTICASTPKTS_TX 0x39e1c
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTBROADCASTPKTS_TX 0x39e20
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS64OCTETS_TX 0x39e24
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS65TO127OCTETS_TX 0x39e28
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS128TO255OCTETS_TX 0x39e2c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS256TO511OCTETS_TX 0x39e30
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39e34
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39e38
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39e3c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39e40
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39e44
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39e48
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39e4c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39e50
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39e54
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39e58
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39e5c
+#define A_MAC_MTIP_ETHERSTATS2_AMACCONTROLFRAMESTRANSMITTED_TX 0x39e60
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS_TX 0x39e64
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOCTETS_TX 0x39e68
+#define A_MAC_MTIP_ETHERSTATS3_OCTETSTRANSMITTEDOK_TX 0x39e6c
+#define A_MAC_MTIP_ETHERSTATS3_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39e70
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMESTRANSMITTEDOK_TX 0x39e74
+#define A_MAC_MTIP_ETHERSTATS3_VLANTRANSMITTEDOK_TX 0x39e78
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTERRORS_TX 0x39e7c
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTUCASTPKTS_TX 0x39e80
+#define A_MAC_MTIP_ETHERSTATS3IFOUTMULTICASTPKTS_TX 0x39e84
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTBROADCASTPKTS_TX 0x39e88
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS64OCTETS_TX 0x39e8c
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS65TO127OCTETS_TX 0x39e90
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS128TO255OCTETS_TX 0x39e94
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS256TO511OCTETS_TX 0x39e98
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39e9c
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39ea0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39ea4
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39ea8
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39eac
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39eb0
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39eb4
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39eb8
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39ebc
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39ec0
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39ec4
+#define A_MAC_MTIP_ETHERSTATS3_AMACCONTROLFRAMESTRANSMITTED_TX 0x39ec8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS_TX 0x39ecc
+#define A_MAC_IOS_CTRL 0x3a000
+
+#define S_SUB_BLOCK_SEL 28
+#define M_SUB_BLOCK_SEL 0x7U
+#define V_SUB_BLOCK_SEL(x) ((x) << S_SUB_BLOCK_SEL)
+#define G_SUB_BLOCK_SEL(x) (((x) >> S_SUB_BLOCK_SEL) & M_SUB_BLOCK_SEL)
+
+#define S_QUAD_BROADCAST_EN 24
+#define V_QUAD_BROADCAST_EN(x) ((x) << S_QUAD_BROADCAST_EN)
+#define F_QUAD_BROADCAST_EN V_QUAD_BROADCAST_EN(1U)
+
+#define S_AUTO_INCR 20
+#define V_AUTO_INCR(x) ((x) << S_AUTO_INCR)
+#define F_AUTO_INCR V_AUTO_INCR(1U)
+
+#define S_T7_2_ADDR 0
+#define M_T7_2_ADDR 0x7ffffU
+#define V_T7_2_ADDR(x) ((x) << S_T7_2_ADDR)
+#define G_T7_2_ADDR(x) (((x) >> S_T7_2_ADDR) & M_T7_2_ADDR)
+
+#define A_MAC_IOS_DATA 0x3a004
+#define A_MAC_IOS_BGR_RST 0x3a050
+
+#define S_BGR_RSTN 0
+#define V_BGR_RSTN(x) ((x) << S_BGR_RSTN)
+#define F_BGR_RSTN V_BGR_RSTN(1U)
+
+#define A_MAC_IOS_BGR_CFG 0x3a054
+
+#define S_SOC_REFCLK_EN 0
+#define V_SOC_REFCLK_EN(x) ((x) << S_SOC_REFCLK_EN)
+#define F_SOC_REFCLK_EN V_SOC_REFCLK_EN(1U)
+
+#define A_MAC_IOS_QUAD0_CFG 0x3a058
+
+#define S_QUAD0_CH3_RSTN 5
+#define V_QUAD0_CH3_RSTN(x) ((x) << S_QUAD0_CH3_RSTN)
+#define F_QUAD0_CH3_RSTN V_QUAD0_CH3_RSTN(1U)
+
+#define S_QUAD0_CH2_RSTN 4
+#define V_QUAD0_CH2_RSTN(x) ((x) << S_QUAD0_CH2_RSTN)
+#define F_QUAD0_CH2_RSTN V_QUAD0_CH2_RSTN(1U)
+
+#define S_QUAD0_CH1_RSTN 3
+#define V_QUAD0_CH1_RSTN(x) ((x) << S_QUAD0_CH1_RSTN)
+#define F_QUAD0_CH1_RSTN V_QUAD0_CH1_RSTN(1U)
+
+#define S_QUAD0_CH0_RSTN 2
+#define V_QUAD0_CH0_RSTN(x) ((x) << S_QUAD0_CH0_RSTN)
+#define F_QUAD0_CH0_RSTN V_QUAD0_CH0_RSTN(1U)
+
+#define S_QUAD0_RSTN 1
+#define V_QUAD0_RSTN(x) ((x) << S_QUAD0_RSTN)
+#define F_QUAD0_RSTN V_QUAD0_RSTN(1U)
+
+#define S_PLL0_RSTN 0
+#define V_PLL0_RSTN(x) ((x) << S_PLL0_RSTN)
+#define F_PLL0_RSTN V_PLL0_RSTN(1U)
+
+#define A_MAC_IOS_QUAD1_CFG 0x3a05c
+
+#define S_QUAD1_CH3_RSTN 5
+#define V_QUAD1_CH3_RSTN(x) ((x) << S_QUAD1_CH3_RSTN)
+#define F_QUAD1_CH3_RSTN V_QUAD1_CH3_RSTN(1U)
+
+#define S_QUAD1_CH2_RSTN 4
+#define V_QUAD1_CH2_RSTN(x) ((x) << S_QUAD1_CH2_RSTN)
+#define F_QUAD1_CH2_RSTN V_QUAD1_CH2_RSTN(1U)
+
+#define S_QUAD1_CH1_RSTN 3
+#define V_QUAD1_CH1_RSTN(x) ((x) << S_QUAD1_CH1_RSTN)
+#define F_QUAD1_CH1_RSTN V_QUAD1_CH1_RSTN(1U)
+
+#define S_QUAD1_CH0_RSTN 2
+#define V_QUAD1_CH0_RSTN(x) ((x) << S_QUAD1_CH0_RSTN)
+#define F_QUAD1_CH0_RSTN V_QUAD1_CH0_RSTN(1U)
+
+#define S_QUAD1_RSTN 1
+#define V_QUAD1_RSTN(x) ((x) << S_QUAD1_RSTN)
+#define F_QUAD1_RSTN V_QUAD1_RSTN(1U)
+
+#define S_PLL1_RSTN 0
+#define V_PLL1_RSTN(x) ((x) << S_PLL1_RSTN)
+#define F_PLL1_RSTN V_PLL1_RSTN(1U)
+
+#define A_MAC_IOS_SCRATCHPAD0 0x3a060
+#define A_MAC_IOS_SCRATCHPAD1 0x3a064
+#define A_MAC_IOS_SCRATCHPAD2 0x3a068
+#define A_MAC_IOS_SCRATCHPAD3 0x3a06c
+
+#define S_DATA0 1
+#define M_DATA0 0x7fffffffU
+#define V_DATA0(x) ((x) << S_DATA0)
+#define G_DATA0(x) (((x) >> S_DATA0) & M_DATA0)
+
+#define S_I2C_MODE 0
+#define V_I2C_MODE(x) ((x) << S_I2C_MODE)
+#define F_I2C_MODE V_I2C_MODE(1U)
+
+#define A_MAC_IOS_BGR_DBG_COUNTER 0x3a070
+#define A_MAC_IOS_QUAD0_DBG_COUNTER 0x3a074
+#define A_MAC_IOS_PLL0_DBG_COUNTER 0x3a078
+#define A_MAC_IOS_QUAD1_DBG_COUNTER 0x3a07c
+#define A_MAC_IOS_PLL1_DBG_COUNTER 0x3a080
+#define A_MAC_IOS_DBG_CLK_CFG 0x3a084
+
+#define S_DBG_CLK_MUX_GPIO 3
+#define V_DBG_CLK_MUX_GPIO(x) ((x) << S_DBG_CLK_MUX_GPIO)
+#define F_DBG_CLK_MUX_GPIO V_DBG_CLK_MUX_GPIO(1U)
+
+#define S_DBG_CLK_MUX_SEL 0
+#define M_DBG_CLK_MUX_SEL 0x7U
+#define V_DBG_CLK_MUX_SEL(x) ((x) << S_DBG_CLK_MUX_SEL)
+#define G_DBG_CLK_MUX_SEL(x) (((x) >> S_DBG_CLK_MUX_SEL) & M_DBG_CLK_MUX_SEL)
+
+#define A_MAC_IOS_INTR_EN_QUAD0 0x3a090
+
+#define S_Q0_MAILBOX_INT_ASSERT 24
+#define V_Q0_MAILBOX_INT_ASSERT(x) ((x) << S_Q0_MAILBOX_INT_ASSERT)
+#define F_Q0_MAILBOX_INT_ASSERT V_Q0_MAILBOX_INT_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_3_ASSERT 23
+#define V_Q0_TRAINING_FAILURE_3_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_3_ASSERT)
+#define F_Q0_TRAINING_FAILURE_3_ASSERT V_Q0_TRAINING_FAILURE_3_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_2_ASSERT 22
+#define V_Q0_TRAINING_FAILURE_2_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_2_ASSERT)
+#define F_Q0_TRAINING_FAILURE_2_ASSERT V_Q0_TRAINING_FAILURE_2_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_1_ASSERT 21
+#define V_Q0_TRAINING_FAILURE_1_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_1_ASSERT)
+#define F_Q0_TRAINING_FAILURE_1_ASSERT V_Q0_TRAINING_FAILURE_1_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_0_ASSERT 20
+#define V_Q0_TRAINING_FAILURE_0_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_0_ASSERT)
+#define F_Q0_TRAINING_FAILURE_0_ASSERT V_Q0_TRAINING_FAILURE_0_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_3_ASSERT 19
+#define V_Q0_TRAINING_COMPLETE_3_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_3_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_3_ASSERT V_Q0_TRAINING_COMPLETE_3_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_2_ASSERT 18
+#define V_Q0_TRAINING_COMPLETE_2_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_2_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_2_ASSERT V_Q0_TRAINING_COMPLETE_2_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_1_ASSERT 17
+#define V_Q0_TRAINING_COMPLETE_1_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_1_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_1_ASSERT V_Q0_TRAINING_COMPLETE_1_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_0_ASSERT 16
+#define V_Q0_TRAINING_COMPLETE_0_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_0_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_0_ASSERT V_Q0_TRAINING_COMPLETE_0_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_3_ASSERT 15
+#define V_Q0_AN_TX_INT_3_ASSERT(x) ((x) << S_Q0_AN_TX_INT_3_ASSERT)
+#define F_Q0_AN_TX_INT_3_ASSERT V_Q0_AN_TX_INT_3_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_2_ASSERT 14
+#define V_Q0_AN_TX_INT_2_ASSERT(x) ((x) << S_Q0_AN_TX_INT_2_ASSERT)
+#define F_Q0_AN_TX_INT_2_ASSERT V_Q0_AN_TX_INT_2_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_1_ASSERT 13
+#define V_Q0_AN_TX_INT_1_ASSERT(x) ((x) << S_Q0_AN_TX_INT_1_ASSERT)
+#define F_Q0_AN_TX_INT_1_ASSERT V_Q0_AN_TX_INT_1_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_0_ASSERT 12
+#define V_Q0_AN_TX_INT_0_ASSERT(x) ((x) << S_Q0_AN_TX_INT_0_ASSERT)
+#define F_Q0_AN_TX_INT_0_ASSERT V_Q0_AN_TX_INT_0_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_3_ASSERT 11
+#define V_Q0_SIGNAL_DETECT_3_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_3_ASSERT)
+#define F_Q0_SIGNAL_DETECT_3_ASSERT V_Q0_SIGNAL_DETECT_3_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_2_ASSERT 10
+#define V_Q0_SIGNAL_DETECT_2_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_2_ASSERT)
+#define F_Q0_SIGNAL_DETECT_2_ASSERT V_Q0_SIGNAL_DETECT_2_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_1_ASSERT 9
+#define V_Q0_SIGNAL_DETECT_1_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_1_ASSERT)
+#define F_Q0_SIGNAL_DETECT_1_ASSERT V_Q0_SIGNAL_DETECT_1_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_0_ASSERT 8
+#define V_Q0_SIGNAL_DETECT_0_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_0_ASSERT)
+#define F_Q0_SIGNAL_DETECT_0_ASSERT V_Q0_SIGNAL_DETECT_0_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_3_ASSERT 7
+#define V_Q0_CDR_LOL_3_ASSERT(x) ((x) << S_Q0_CDR_LOL_3_ASSERT)
+#define F_Q0_CDR_LOL_3_ASSERT V_Q0_CDR_LOL_3_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_2_ASSERT 6
+#define V_Q0_CDR_LOL_2_ASSERT(x) ((x) << S_Q0_CDR_LOL_2_ASSERT)
+#define F_Q0_CDR_LOL_2_ASSERT V_Q0_CDR_LOL_2_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_1_ASSERT 5
+#define V_Q0_CDR_LOL_1_ASSERT(x) ((x) << S_Q0_CDR_LOL_1_ASSERT)
+#define F_Q0_CDR_LOL_1_ASSERT V_Q0_CDR_LOL_1_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_0_ASSERT 4
+#define V_Q0_CDR_LOL_0_ASSERT(x) ((x) << S_Q0_CDR_LOL_0_ASSERT)
+#define F_Q0_CDR_LOL_0_ASSERT V_Q0_CDR_LOL_0_ASSERT(1U)
+
+#define S_Q0_LOS_3_ASSERT 3
+#define V_Q0_LOS_3_ASSERT(x) ((x) << S_Q0_LOS_3_ASSERT)
+#define F_Q0_LOS_3_ASSERT V_Q0_LOS_3_ASSERT(1U)
+
+#define S_Q0_LOS_2_ASSERT 2
+#define V_Q0_LOS_2_ASSERT(x) ((x) << S_Q0_LOS_2_ASSERT)
+#define F_Q0_LOS_2_ASSERT V_Q0_LOS_2_ASSERT(1U)
+
+#define S_Q0_LOS_1_ASSERT 1
+#define V_Q0_LOS_1_ASSERT(x) ((x) << S_Q0_LOS_1_ASSERT)
+#define F_Q0_LOS_1_ASSERT V_Q0_LOS_1_ASSERT(1U)
+
+#define S_Q0_LOS_0_ASSERT 0
+#define V_Q0_LOS_0_ASSERT(x) ((x) << S_Q0_LOS_0_ASSERT)
+#define F_Q0_LOS_0_ASSERT V_Q0_LOS_0_ASSERT(1U)
+
+#define A_MAC_IOS_INTR_CAUSE_QUAD0 0x3a094
+#define A_MAC_IOS_INTR_EN_QUAD1 0x3a098
+
+#define S_Q1_MAILBOX_INT_ASSERT 24
+#define V_Q1_MAILBOX_INT_ASSERT(x) ((x) << S_Q1_MAILBOX_INT_ASSERT)
+#define F_Q1_MAILBOX_INT_ASSERT V_Q1_MAILBOX_INT_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_3_ASSERT 23
+#define V_Q1_TRAINING_FAILURE_3_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_3_ASSERT)
+#define F_Q1_TRAINING_FAILURE_3_ASSERT V_Q1_TRAINING_FAILURE_3_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_2_ASSERT 22
+#define V_Q1_TRAINING_FAILURE_2_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_2_ASSERT)
+#define F_Q1_TRAINING_FAILURE_2_ASSERT V_Q1_TRAINING_FAILURE_2_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_1_ASSERT 21
+#define V_Q1_TRAINING_FAILURE_1_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_1_ASSERT)
+#define F_Q1_TRAINING_FAILURE_1_ASSERT V_Q1_TRAINING_FAILURE_1_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_0_ASSERT 20
+#define V_Q1_TRAINING_FAILURE_0_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_0_ASSERT)
+#define F_Q1_TRAINING_FAILURE_0_ASSERT V_Q1_TRAINING_FAILURE_0_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_3_ASSERT 19
+#define V_Q1_TRAINING_COMPLETE_3_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_3_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_3_ASSERT V_Q1_TRAINING_COMPLETE_3_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_2_ASSERT 18
+#define V_Q1_TRAINING_COMPLETE_2_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_2_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_2_ASSERT V_Q1_TRAINING_COMPLETE_2_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_1_ASSERT 17
+#define V_Q1_TRAINING_COMPLETE_1_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_1_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_1_ASSERT V_Q1_TRAINING_COMPLETE_1_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_0_ASSERT 16
+#define V_Q1_TRAINING_COMPLETE_0_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_0_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_0_ASSERT V_Q1_TRAINING_COMPLETE_0_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_3_ASSERT 15
+#define V_Q1_AN_TX_INT_3_ASSERT(x) ((x) << S_Q1_AN_TX_INT_3_ASSERT)
+#define F_Q1_AN_TX_INT_3_ASSERT V_Q1_AN_TX_INT_3_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_2_ASSERT 14
+#define V_Q1_AN_TX_INT_2_ASSERT(x) ((x) << S_Q1_AN_TX_INT_2_ASSERT)
+#define F_Q1_AN_TX_INT_2_ASSERT V_Q1_AN_TX_INT_2_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_1_ASSERT 13
+#define V_Q1_AN_TX_INT_1_ASSERT(x) ((x) << S_Q1_AN_TX_INT_1_ASSERT)
+#define F_Q1_AN_TX_INT_1_ASSERT V_Q1_AN_TX_INT_1_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_0_ASSERT 12
+#define V_Q1_AN_TX_INT_0_ASSERT(x) ((x) << S_Q1_AN_TX_INT_0_ASSERT)
+#define F_Q1_AN_TX_INT_0_ASSERT V_Q1_AN_TX_INT_0_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_3_ASSERT 11
+#define V_Q1_SIGNAL_DETECT_3_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_3_ASSERT)
+#define F_Q1_SIGNAL_DETECT_3_ASSERT V_Q1_SIGNAL_DETECT_3_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_2_ASSERT 10
+#define V_Q1_SIGNAL_DETECT_2_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_2_ASSERT)
+#define F_Q1_SIGNAL_DETECT_2_ASSERT V_Q1_SIGNAL_DETECT_2_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_1_ASSERT 9
+#define V_Q1_SIGNAL_DETECT_1_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_1_ASSERT)
+#define F_Q1_SIGNAL_DETECT_1_ASSERT V_Q1_SIGNAL_DETECT_1_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_0_ASSERT 8
+#define V_Q1_SIGNAL_DETECT_0_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_0_ASSERT)
+#define F_Q1_SIGNAL_DETECT_0_ASSERT V_Q1_SIGNAL_DETECT_0_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_3_ASSERT 7
+#define V_Q1_CDR_LOL_3_ASSERT(x) ((x) << S_Q1_CDR_LOL_3_ASSERT)
+#define F_Q1_CDR_LOL_3_ASSERT V_Q1_CDR_LOL_3_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_2_ASSERT 6
+#define V_Q1_CDR_LOL_2_ASSERT(x) ((x) << S_Q1_CDR_LOL_2_ASSERT)
+#define F_Q1_CDR_LOL_2_ASSERT V_Q1_CDR_LOL_2_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_1_ASSERT 5
+#define V_Q1_CDR_LOL_1_ASSERT(x) ((x) << S_Q1_CDR_LOL_1_ASSERT)
+#define F_Q1_CDR_LOL_1_ASSERT V_Q1_CDR_LOL_1_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_0_ASSERT 4
+#define V_Q1_CDR_LOL_0_ASSERT(x) ((x) << S_Q1_CDR_LOL_0_ASSERT)
+#define F_Q1_CDR_LOL_0_ASSERT V_Q1_CDR_LOL_0_ASSERT(1U)
+
+#define S_Q1_LOS_3_ASSERT 3
+#define V_Q1_LOS_3_ASSERT(x) ((x) << S_Q1_LOS_3_ASSERT)
+#define F_Q1_LOS_3_ASSERT V_Q1_LOS_3_ASSERT(1U)
+
+#define S_Q1_LOS_2_ASSERT 2
+#define V_Q1_LOS_2_ASSERT(x) ((x) << S_Q1_LOS_2_ASSERT)
+#define F_Q1_LOS_2_ASSERT V_Q1_LOS_2_ASSERT(1U)
+
+#define S_Q1_LOS_1_ASSERT 1
+#define V_Q1_LOS_1_ASSERT(x) ((x) << S_Q1_LOS_1_ASSERT)
+#define F_Q1_LOS_1_ASSERT V_Q1_LOS_1_ASSERT(1U)
+
+#define S_Q1_LOS_0_ASSERT 0
+#define V_Q1_LOS_0_ASSERT(x) ((x) << S_Q1_LOS_0_ASSERT)
+#define F_Q1_LOS_0_ASSERT V_Q1_LOS_0_ASSERT(1U)
+
+#define A_MAC_IOS_INTR_CAUSE_QUAD1 0x3a09c
+#define A_MAC_MTIP_PCS_1G_0_CONTROL 0x3e000
+
+#define S_SPEED_SEL_1 13
+#define V_SPEED_SEL_1(x) ((x) << S_SPEED_SEL_1)
+#define F_SPEED_SEL_1 V_SPEED_SEL_1(1U)
+
+#define S_AUTO_NEG_ENA 12
+#define V_AUTO_NEG_ENA(x) ((x) << S_AUTO_NEG_ENA)
+#define F_AUTO_NEG_ENA V_AUTO_NEG_ENA(1U)
+
+#define S_T7_POWER_DOWN 11
+#define V_T7_POWER_DOWN(x) ((x) << S_T7_POWER_DOWN)
+#define F_T7_POWER_DOWN V_T7_POWER_DOWN(1U)
+
+#define S_RESTART_AUTO_NEG 9
+#define V_RESTART_AUTO_NEG(x) ((x) << S_RESTART_AUTO_NEG)
+#define F_RESTART_AUTO_NEG V_RESTART_AUTO_NEG(1U)
+
+#define S_SPEED_SEL_0 6
+#define V_SPEED_SEL_0(x) ((x) << S_SPEED_SEL_0)
+#define F_SPEED_SEL_0 V_SPEED_SEL_0(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_STATUS 0x3e004
+
+#define S_100BASE_T4 15
+#define V_100BASE_T4(x) ((x) << S_100BASE_T4)
+#define F_100BASE_T4 V_100BASE_T4(1U)
+
+#define S_100BASE_X_FULL_DUPLEX 14
+#define V_100BASE_X_FULL_DUPLEX(x) ((x) << S_100BASE_X_FULL_DUPLEX)
+#define F_100BASE_X_FULL_DUPLEX V_100BASE_X_FULL_DUPLEX(1U)
+
+#define S_100BASE_X_HALF_DUPLEX 13
+#define V_100BASE_X_HALF_DUPLEX(x) ((x) << S_100BASE_X_HALF_DUPLEX)
+#define F_100BASE_X_HALF_DUPLEX V_100BASE_X_HALF_DUPLEX(1U)
+
+#define S_10MBPS_FULL_DUPLEX 12
+#define V_10MBPS_FULL_DUPLEX(x) ((x) << S_10MBPS_FULL_DUPLEX)
+#define F_10MBPS_FULL_DUPLEX V_10MBPS_FULL_DUPLEX(1U)
+
+#define S_10MBPS_HALF_DUPLEX 11
+#define V_10MBPS_HALF_DUPLEX(x) ((x) << S_10MBPS_HALF_DUPLEX)
+#define F_10MBPS_HALF_DUPLEX V_10MBPS_HALF_DUPLEX(1U)
+
+#define S_100BASE_T2_HALF_DUPLEX1 10
+#define V_100BASE_T2_HALF_DUPLEX1(x) ((x) << S_100BASE_T2_HALF_DUPLEX1)
+#define F_100BASE_T2_HALF_DUPLEX1 V_100BASE_T2_HALF_DUPLEX1(1U)
+
+#define S_100BASE_T2_HALF_DUPLEX0 9
+#define V_100BASE_T2_HALF_DUPLEX0(x) ((x) << S_100BASE_T2_HALF_DUPLEX0)
+#define F_100BASE_T2_HALF_DUPLEX0 V_100BASE_T2_HALF_DUPLEX0(1U)
+
+#define S_T7_EXTENDED_STATUS 8
+#define V_T7_EXTENDED_STATUS(x) ((x) << S_T7_EXTENDED_STATUS)
+#define F_T7_EXTENDED_STATUS V_T7_EXTENDED_STATUS(1U)
+
+#define S_AUTO_NEG_COMPLETE 5
+#define V_AUTO_NEG_COMPLETE(x) ((x) << S_AUTO_NEG_COMPLETE)
+#define F_AUTO_NEG_COMPLETE V_AUTO_NEG_COMPLETE(1U)
+
+#define S_T7_REMOTE_FAULT 4
+#define V_T7_REMOTE_FAULT(x) ((x) << S_T7_REMOTE_FAULT)
+#define F_T7_REMOTE_FAULT V_T7_REMOTE_FAULT(1U)
+
+#define S_AUTO_NEG_ABILITY 3
+#define V_AUTO_NEG_ABILITY(x) ((x) << S_AUTO_NEG_ABILITY)
+#define F_AUTO_NEG_ABILITY V_AUTO_NEG_ABILITY(1U)
+
+#define S_JABBER_DETECT 1
+#define V_JABBER_DETECT(x) ((x) << S_JABBER_DETECT)
+#define F_JABBER_DETECT V_JABBER_DETECT(1U)
+
+#define S_EXTENDED_CAPABILITY 0
+#define V_EXTENDED_CAPABILITY(x) ((x) << S_EXTENDED_CAPABILITY)
+#define F_EXTENDED_CAPABILITY V_EXTENDED_CAPABILITY(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_PHY_IDENTIFIER_0 0x3e008
+#define A_MAC_MTIP_PCS_1G_0_PHY_IDENTIFIER_1 0x3e00c
+#define A_MAC_MTIP_PCS_1G_0_DEV_ABILITY 0x3e010
+
+#define S_EEE_CLOCK_STOP_ENABLE 8
+#define V_EEE_CLOCK_STOP_ENABLE(x) ((x) << S_EEE_CLOCK_STOP_ENABLE)
+#define F_EEE_CLOCK_STOP_ENABLE V_EEE_CLOCK_STOP_ENABLE(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_PARTNER_ABILITY 0x3e014
+
+#define S_COPPER_LINK_STATUS 15
+#define V_COPPER_LINK_STATUS(x) ((x) << S_COPPER_LINK_STATUS)
+#define F_COPPER_LINK_STATUS V_COPPER_LINK_STATUS(1U)
+
+#define S_COPPER_DUPLEX_STATUS 12
+#define V_COPPER_DUPLEX_STATUS(x) ((x) << S_COPPER_DUPLEX_STATUS)
+#define F_COPPER_DUPLEX_STATUS V_COPPER_DUPLEX_STATUS(1U)
+
+#define S_COPPER_SPEED 10
+#define M_COPPER_SPEED 0x3U
+#define V_COPPER_SPEED(x) ((x) << S_COPPER_SPEED)
+#define G_COPPER_SPEED(x) (((x) >> S_COPPER_SPEED) & M_COPPER_SPEED)
+
+#define S_EEE_CAPABILITY 9
+#define V_EEE_CAPABILITY(x) ((x) << S_EEE_CAPABILITY)
+#define F_EEE_CAPABILITY V_EEE_CAPABILITY(1U)
+
+#define S_EEE_CLOCK_STOP_CAPABILITY 8
+#define V_EEE_CLOCK_STOP_CAPABILITY(x) ((x) << S_EEE_CLOCK_STOP_CAPABILITY)
+#define F_EEE_CLOCK_STOP_CAPABILITY V_EEE_CLOCK_STOP_CAPABILITY(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_AN_EXPANSION 0x3e018
+#define A_MAC_MTIP_PCS_1G_0_NP_TX 0x3e01c
+#define A_MAC_MTIP_PCS_1G_0_LP_NP_RX 0x3e020
+
+#define S_T7_DATA 0
+#define M_T7_DATA 0x7ffU
+#define V_T7_DATA(x) ((x) << S_T7_DATA)
+#define G_T7_DATA(x) (((x) >> S_T7_DATA) & M_T7_DATA)
+
+#define A_MAC_MTIP_PCS_1G_0_EXTENDED_STATUS 0x3e03c
+#define A_MAC_MTIP_PCS_1G_0_SCRATCH 0x3e040
+#define A_MAC_MTIP_PCS_1G_0_REV 0x3e044
+#define A_MAC_MTIP_PCS_1G_0_LINK_TIMER_0 0x3e048
+
+#define S_LINK_TIMER_VAL 0
+#define M_LINK_TIMER_VAL 0xffffU
+#define V_LINK_TIMER_VAL(x) ((x) << S_LINK_TIMER_VAL)
+#define G_LINK_TIMER_VAL(x) (((x) >> S_LINK_TIMER_VAL) & M_LINK_TIMER_VAL)
+
+#define A_MAC_MTIP_PCS_1G_0_LINK_TIMER_1 0x3e04c
+
+#define S_T7_LINK_TIMER_VAL 0
+#define M_T7_LINK_TIMER_VAL 0x1fU
+#define V_T7_LINK_TIMER_VAL(x) ((x) << S_T7_LINK_TIMER_VAL)
+#define G_T7_LINK_TIMER_VAL(x) (((x) >> S_T7_LINK_TIMER_VAL) & M_T7_LINK_TIMER_VAL)
+
+#define A_MAC_MTIP_PCS_1G_0_IF_MODE 0x3e050
+#define A_MAC_MTIP_PCS_1G_0_DEC_ERR_CNT 0x3e054
+#define A_MAC_MTIP_PCS_1G_0_VENDOR_CONTROL 0x3e058
+
+#define S_SGPCS_ENA_ST 15
+#define V_SGPCS_ENA_ST(x) ((x) << S_SGPCS_ENA_ST)
+#define F_SGPCS_ENA_ST V_SGPCS_ENA_ST(1U)
+
+#define S_T7_CFG_CLOCK_RATE 4
+#define M_T7_CFG_CLOCK_RATE 0xfU
+#define V_T7_CFG_CLOCK_RATE(x) ((x) << S_T7_CFG_CLOCK_RATE)
+#define G_T7_CFG_CLOCK_RATE(x) (((x) >> S_T7_CFG_CLOCK_RATE) & M_T7_CFG_CLOCK_RATE)
+
+#define S_SGPCS_ENA_R 0
+#define V_SGPCS_ENA_R(x) ((x) << S_SGPCS_ENA_R)
+#define F_SGPCS_ENA_R V_SGPCS_ENA_R(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_SD_BIT_SLIP 0x3e05c
+
+#define S_SD_BIT_SLIP 0
+#define M_SD_BIT_SLIP 0xfU
+#define V_SD_BIT_SLIP(x) ((x) << S_SD_BIT_SLIP)
+#define G_SD_BIT_SLIP(x) (((x) >> S_SD_BIT_SLIP) & M_SD_BIT_SLIP)
+
+#define A_MAC_MTIP_PCS_1G_1_CONTROL 0x3e100
+#define A_MAC_MTIP_PCS_1G_1_STATUS 0x3e104
+#define A_MAC_MTIP_PCS_1G_1_PHY_IDENTIFIER_0 0x3e108
+#define A_MAC_MTIP_PCS_1G_1_PHY_IDENTIFIER_1 0x3e10c
+#define A_MAC_MTIP_PCS_1G_1_DEV_ABILITY 0x3e110
+#define A_MAC_MTIP_PCS_1G_1_PARTNER_ABILITY 0x3e114
+#define A_MAC_MTIP_PCS_1G_1_AN_EXPANSION 0x3e118
+#define A_MAC_MTIP_PCS_1G_1_NP_TX 0x3e11c
+#define A_MAC_MTIP_PCS_1G_1_LP_NP_RX 0x3e120
+#define A_MAC_MTIP_PCS_1G_1_EXTENDED_STATUS 0x3e13c
+#define A_MAC_MTIP_PCS_1G_1_SCRATCH 0x3e140
+#define A_MAC_MTIP_PCS_1G_1_REV 0x3e144
+#define A_MAC_MTIP_PCS_1G_1_LINK_TIMER_0 0x3e148
+#define A_MAC_MTIP_PCS_1G_1_LINK_TIMER_1 0x3e14c
+#define A_MAC_MTIP_PCS_1G_1_IF_MODE 0x3e150
+#define A_MAC_MTIP_PCS_1G_1_DEC_ERR_CNT 0x3e154
+#define A_MAC_MTIP_PCS_1G_1_VENDOR_CONTROL 0x3e158
+#define A_MAC_MTIP_PCS_1G_1_SD_BIT_SLIP 0x3e15c
+#define A_MAC_MTIP_PCS_1G_2_CONTROL 0x3e200
+#define A_MAC_MTIP_PCS_1G_2_STATUS 0x3e204
+#define A_MAC_MTIP_PCS_1G_2_PHY_IDENTIFIER_0 0x3e208
+#define A_MAC_MTIP_PCS_1G_2_PHY_IDENTIFIER_1 0x3e20c
+#define A_MAC_MTIP_PCS_1G_2_DEV_ABILITY 0x3e210
+#define A_MAC_MTIP_PCS_1G_2_PARTNER_ABILITY 0x3e214
+#define A_MAC_MTIP_PCS_1G_2_AN_EXPANSION 0x3e218
+#define A_MAC_MTIP_PCS_1G_2_NP_TX 0x3e21c
+#define A_MAC_MTIP_PCS_1G_2_LP_NP_RX 0x3e220
+#define A_MAC_MTIP_PCS_1G_2_EXTENDED_STATUS 0x3e23c
+#define A_MAC_MTIP_PCS_1G_2_SCRATCH 0x3e240
+#define A_MAC_MTIP_PCS_1G_2_REV 0x3e244
+#define A_MAC_MTIP_PCS_1G_2_LINK_TIMER_0 0x3e248
+#define A_MAC_MTIP_PCS_1G_2_LINK_TIMER_1 0x3e24c
+#define A_MAC_MTIP_PCS_1G_2_IF_MODE 0x3e250
+#define A_MAC_MTIP_PCS_1G_2_DEC_ERR_CNT 0x3e254
+#define A_MAC_MTIP_PCS_1G_2_VENDOR_CONTROL 0x3e258
+#define A_MAC_MTIP_PCS_1G_2_SD_BIT_SLIP 0x3e25c
+#define A_MAC_MTIP_PCS_1G_3_CONTROL 0x3e300
+#define A_MAC_MTIP_PCS_1G_3_STATUS 0x3e304
+#define A_MAC_MTIP_PCS_1G_3_PHY_IDENTIFIER_0 0x3e308
+#define A_MAC_MTIP_PCS_1G_3_PHY_IDENTIFIER_1 0x3e30c
+#define A_MAC_MTIP_PCS_1G_3_DEV_ABILITY 0x3e310
+#define A_MAC_MTIP_PCS_1G_3_PARTNER_ABILITY 0x3e314
+#define A_MAC_MTIP_PCS_1G_3_AN_EXPANSION 0x3e318
+#define A_MAC_MTIP_PCS_1G_3_NP_TX 0x3e31c
+#define A_MAC_MTIP_PCS_1G_3_LP_NP_RX 0x3e320
+#define A_MAC_MTIP_PCS_1G_3_EXTENDED_STATUS 0x3e33c
+#define A_MAC_MTIP_PCS_1G_3_SCRATCH 0x3e340
+#define A_MAC_MTIP_PCS_1G_3_REV 0x3e344
+#define A_MAC_MTIP_PCS_1G_3_LINK_TIMER_0 0x3e348
+#define A_MAC_MTIP_PCS_1G_3_LINK_TIMER_1 0x3e34c
+#define A_MAC_MTIP_PCS_1G_3_IF_MODE 0x3e350
+#define A_MAC_MTIP_PCS_1G_3_DEC_ERR_CNT 0x3e354
+#define A_MAC_MTIP_PCS_1G_3_VENDOR_CONTROL 0x3e358
+#define A_MAC_MTIP_PCS_1G_3_SD_BIT_SLIP 0x3e35c
+#define A_MAC_DPLL_CTRL_0 0x3f000
+
+#define S_LOCAL_FAULT_OVRD 18
+#define V_LOCAL_FAULT_OVRD(x) ((x) << S_LOCAL_FAULT_OVRD)
+#define F_LOCAL_FAULT_OVRD V_LOCAL_FAULT_OVRD(1U)
+
+#define S_LOCAL_FAULT_HOLD_EN 17
+#define V_LOCAL_FAULT_HOLD_EN(x) ((x) << S_LOCAL_FAULT_HOLD_EN)
+#define F_LOCAL_FAULT_HOLD_EN V_LOCAL_FAULT_HOLD_EN(1U)
+
+#define S_DPLL_RST 16
+#define V_DPLL_RST(x) ((x) << S_DPLL_RST)
+#define F_DPLL_RST V_DPLL_RST(1U)
+
+#define S_CNTOFFSET 0
+#define M_CNTOFFSET 0xffffU
+#define V_CNTOFFSET(x) ((x) << S_CNTOFFSET)
+#define G_CNTOFFSET(x) (((x) >> S_CNTOFFSET) & M_CNTOFFSET)
+
+#define A_MAC_DPLL_CTRL_1 0x3f004
+
+#define S_DELAYK 0
+#define M_DELAYK 0xffffffU
+#define V_DELAYK(x) ((x) << S_DELAYK)
+#define G_DELAYK(x) (((x) >> S_DELAYK) & M_DELAYK)
+
+#define A_MAC_DPLL_CTRL_2 0x3f008
+
+#define S_DIVFFB 16
+#define M_DIVFFB 0xffffU
+#define V_DIVFFB(x) ((x) << S_DIVFFB)
+#define G_DIVFFB(x) (((x) >> S_DIVFFB) & M_DIVFFB)
+
+#define S_DIVFIN 0
+#define M_DIVFIN 0xffffU
+#define V_DIVFIN(x) ((x) << S_DIVFIN)
+#define G_DIVFIN(x) (((x) >> S_DIVFIN) & M_DIVFIN)
+
+#define A_MAC_DPLL_CTRL_3 0x3f00c
+
+#define S_ISHIFT_HOLD 28
+#define M_ISHIFT_HOLD 0xfU
+#define V_ISHIFT_HOLD(x) ((x) << S_ISHIFT_HOLD)
+#define G_ISHIFT_HOLD(x) (((x) >> S_ISHIFT_HOLD) & M_ISHIFT_HOLD)
+
+#define S_ISHIFT 24
+#define M_ISHIFT 0xfU
+#define V_ISHIFT(x) ((x) << S_ISHIFT)
+#define G_ISHIFT(x) (((x) >> S_ISHIFT) & M_ISHIFT)
+
+#define S_INT_PRESET 12
+#define M_INT_PRESET 0xfffU
+#define V_INT_PRESET(x) ((x) << S_INT_PRESET)
+#define G_INT_PRESET(x) (((x) >> S_INT_PRESET) & M_INT_PRESET)
+
+#define S_FMI 4
+#define M_FMI 0xffU
+#define V_FMI(x) ((x) << S_FMI)
+#define G_FMI(x) (((x) >> S_FMI) & M_FMI)
+
+#define S_DPLL_PROGRAM 3
+#define V_DPLL_PROGRAM(x) ((x) << S_DPLL_PROGRAM)
+#define F_DPLL_PROGRAM V_DPLL_PROGRAM(1U)
+
+#define S_PRESET_EN 2
+#define V_PRESET_EN(x) ((x) << S_PRESET_EN)
+#define F_PRESET_EN V_PRESET_EN(1U)
+
+#define S_ONTARGETOV 1
+#define V_ONTARGETOV(x) ((x) << S_ONTARGETOV)
+#define F_ONTARGETOV V_ONTARGETOV(1U)
+
+#define S_FDONLY 0
+#define V_FDONLY(x) ((x) << S_FDONLY)
+#define F_FDONLY V_FDONLY(1U)
+
+#define A_MAC_DPLL_CTRL_4 0x3f010
+
+#define S_FKI 24
+#define M_FKI 0x1fU
+#define V_FKI(x) ((x) << S_FKI)
+#define G_FKI(x) (((x) >> S_FKI) & M_FKI)
+
+#define S_FRAC_PRESET 0
+#define M_FRAC_PRESET 0xffffffU
+#define V_FRAC_PRESET(x) ((x) << S_FRAC_PRESET)
+#define G_FRAC_PRESET(x) (((x) >> S_FRAC_PRESET) & M_FRAC_PRESET)
+
+#define A_MAC_DPLL_CTRL_5 0x3f014
+
+#define S_PH_STEP_CNT_HOLD 24
+#define M_PH_STEP_CNT_HOLD 0x1fU
+#define V_PH_STEP_CNT_HOLD(x) ((x) << S_PH_STEP_CNT_HOLD)
+#define G_PH_STEP_CNT_HOLD(x) (((x) >> S_PH_STEP_CNT_HOLD) & M_PH_STEP_CNT_HOLD)
+
+#define S_CFG_RESET 23
+#define V_CFG_RESET(x) ((x) << S_CFG_RESET)
+#define F_CFG_RESET V_CFG_RESET(1U)
+
+#define S_PH_STEP_CNT 16
+#define M_PH_STEP_CNT 0x1fU
+#define V_PH_STEP_CNT(x) ((x) << S_PH_STEP_CNT)
+#define G_PH_STEP_CNT(x) (((x) >> S_PH_STEP_CNT) & M_PH_STEP_CNT)
+
+#define S_OTDLY 0
+#define M_OTDLY 0xffffU
+#define V_OTDLY(x) ((x) << S_OTDLY)
+#define G_OTDLY(x) (((x) >> S_OTDLY) & M_OTDLY)
+
+#define A_MAC_DPLL_CTRL_6 0x3f018
+
+#define S_TARGETCNT 16
+#define M_TARGETCNT 0xffffU
+#define V_TARGETCNT(x) ((x) << S_TARGETCNT)
+#define G_TARGETCNT(x) (((x) >> S_TARGETCNT) & M_TARGETCNT)
+
+#define S_PKP 8
+#define M_PKP 0x1fU
+#define V_PKP(x) ((x) << S_PKP)
+#define G_PKP(x) (((x) >> S_PKP) & M_PKP)
+
+#define S_PMP 0
+#define M_PMP 0xffU
+#define V_PMP(x) ((x) << S_PMP)
+#define G_PMP(x) (((x) >> S_PMP) & M_PMP)
+
+#define A_MAC_DPLL_CTRL_7 0x3f01c
+#define A_MAC_DPLL_STATUS_0 0x3f020
+
+#define S_FRAC 0
+#define M_FRAC 0xffffffU
+#define V_FRAC(x) ((x) << S_FRAC)
+#define G_FRAC(x) (((x) >> S_FRAC) & M_FRAC)
+
+#define A_MAC_DPLL_STATUS_1 0x3f024
+
+#define S_FRAC_PD_OUT 0
+#define M_FRAC_PD_OUT 0xffffffU
+#define V_FRAC_PD_OUT(x) ((x) << S_FRAC_PD_OUT)
+#define G_FRAC_PD_OUT(x) (((x) >> S_FRAC_PD_OUT) & M_FRAC_PD_OUT)
+
+#define A_MAC_DPLL_STATUS_2 0x3f028
+
+#define S_INT 12
+#define M_INT 0xfffU
+#define V_INT(x) ((x) << S_INT)
+#define G_INT(x) (((x) >> S_INT) & M_INT)
+
+#define S_INT_PD_OUT 0
+#define M_INT_PD_OUT 0xfffU
+#define V_INT_PD_OUT(x) ((x) << S_INT_PD_OUT)
+#define G_INT_PD_OUT(x) (((x) >> S_INT_PD_OUT) & M_INT_PD_OUT)
+
+#define A_MAC_FRAC_N_PLL_CTRL_0 0x3f02c
+
+#define S_FRAC_N_DSKEWCALCNT 29
+#define M_FRAC_N_DSKEWCALCNT 0x7U
+#define V_FRAC_N_DSKEWCALCNT(x) ((x) << S_FRAC_N_DSKEWCALCNT)
+#define G_FRAC_N_DSKEWCALCNT(x) (((x) >> S_FRAC_N_DSKEWCALCNT) & M_FRAC_N_DSKEWCALCNT)
+
+#define S_PLLEN 28
+#define V_PLLEN(x) ((x) << S_PLLEN)
+#define F_PLLEN V_PLLEN(1U)
+
+#define S_T7_BYPASS 24
+#define M_T7_BYPASS 0xfU
+#define V_T7_BYPASS(x) ((x) << S_T7_BYPASS)
+#define G_T7_BYPASS(x) (((x) >> S_T7_BYPASS) & M_T7_BYPASS)
+
+#define S_POSTDIV3A 21
+#define M_POSTDIV3A 0x7U
+#define V_POSTDIV3A(x) ((x) << S_POSTDIV3A)
+#define G_POSTDIV3A(x) (((x) >> S_POSTDIV3A) & M_POSTDIV3A)
+
+#define S_POSTDIV3B 18
+#define M_POSTDIV3B 0x7U
+#define V_POSTDIV3B(x) ((x) << S_POSTDIV3B)
+#define G_POSTDIV3B(x) (((x) >> S_POSTDIV3B) & M_POSTDIV3B)
+
+#define S_POSTDIV2A 15
+#define M_POSTDIV2A 0x7U
+#define V_POSTDIV2A(x) ((x) << S_POSTDIV2A)
+#define G_POSTDIV2A(x) (((x) >> S_POSTDIV2A) & M_POSTDIV2A)
+
+#define S_POSTDIV2B 12
+#define M_POSTDIV2B 0x7U
+#define V_POSTDIV2B(x) ((x) << S_POSTDIV2B)
+#define G_POSTDIV2B(x) (((x) >> S_POSTDIV2B) & M_POSTDIV2B)
+
+#define S_POSTDIV1A 9
+#define M_POSTDIV1A 0x7U
+#define V_POSTDIV1A(x) ((x) << S_POSTDIV1A)
+#define G_POSTDIV1A(x) (((x) >> S_POSTDIV1A) & M_POSTDIV1A)
+
+#define S_POSTDIV1B 6
+#define M_POSTDIV1B 0x7U
+#define V_POSTDIV1B(x) ((x) << S_POSTDIV1B)
+#define G_POSTDIV1B(x) (((x) >> S_POSTDIV1B) & M_POSTDIV1B)
+
+#define S_POSTDIV0A 3
+#define M_POSTDIV0A 0x7U
+#define V_POSTDIV0A(x) ((x) << S_POSTDIV0A)
+#define G_POSTDIV0A(x) (((x) >> S_POSTDIV0A) & M_POSTDIV0A)
+
+#define S_POSTDIV0B 0
+#define M_POSTDIV0B 0x7U
+#define V_POSTDIV0B(x) ((x) << S_POSTDIV0B)
+#define G_POSTDIV0B(x) (((x) >> S_POSTDIV0B) & M_POSTDIV0B)
+
+#define A_MAC_FRAC_N_PLL_CTRL_1 0x3f030
+
+#define S_FRAC_N_FRAC_N_FOUTEN 28
+#define M_FRAC_N_FRAC_N_FOUTEN 0xfU
+#define V_FRAC_N_FRAC_N_FOUTEN(x) ((x) << S_FRAC_N_FRAC_N_FOUTEN)
+#define G_FRAC_N_FRAC_N_FOUTEN(x) (((x) >> S_FRAC_N_FRAC_N_FOUTEN) & M_FRAC_N_FRAC_N_FOUTEN)
+
+#define S_FRAC_N_DSKEWCALIN 16
+#define M_FRAC_N_DSKEWCALIN 0xfffU
+#define V_FRAC_N_DSKEWCALIN(x) ((x) << S_FRAC_N_DSKEWCALIN)
+#define G_FRAC_N_DSKEWCALIN(x) (((x) >> S_FRAC_N_DSKEWCALIN) & M_FRAC_N_DSKEWCALIN)
+
+#define S_FRAC_N_REFDIV 10
+#define M_FRAC_N_REFDIV 0x3fU
+#define V_FRAC_N_REFDIV(x) ((x) << S_FRAC_N_REFDIV)
+#define G_FRAC_N_REFDIV(x) (((x) >> S_FRAC_N_REFDIV) & M_FRAC_N_REFDIV)
+
+#define S_FRAC_N_DSMEN 9
+#define V_FRAC_N_DSMEN(x) ((x) << S_FRAC_N_DSMEN)
+#define F_FRAC_N_DSMEN V_FRAC_N_DSMEN(1U)
+
+#define S_FRAC_N_PLLEN 8
+#define V_FRAC_N_PLLEN(x) ((x) << S_FRAC_N_PLLEN)
+#define F_FRAC_N_PLLEN V_FRAC_N_PLLEN(1U)
+
+#define S_FRAC_N_DACEN 7
+#define V_FRAC_N_DACEN(x) ((x) << S_FRAC_N_DACEN)
+#define F_FRAC_N_DACEN V_FRAC_N_DACEN(1U)
+
+#define S_FRAC_N_POSTDIV0PRE 6
+#define V_FRAC_N_POSTDIV0PRE(x) ((x) << S_FRAC_N_POSTDIV0PRE)
+#define F_FRAC_N_POSTDIV0PRE V_FRAC_N_POSTDIV0PRE(1U)
+
+#define S_FRAC_N_DSKEWCALBYP 5
+#define V_FRAC_N_DSKEWCALBYP(x) ((x) << S_FRAC_N_DSKEWCALBYP)
+#define F_FRAC_N_DSKEWCALBYP V_FRAC_N_DSKEWCALBYP(1U)
+
+#define S_FRAC_N_DSKEWFASTCAL 4
+#define V_FRAC_N_DSKEWFASTCAL(x) ((x) << S_FRAC_N_DSKEWFASTCAL)
+#define F_FRAC_N_DSKEWFASTCAL V_FRAC_N_DSKEWFASTCAL(1U)
+
+#define S_FRAC_N_DSKEWCALEN 3
+#define V_FRAC_N_DSKEWCALEN(x) ((x) << S_FRAC_N_DSKEWCALEN)
+#define F_FRAC_N_DSKEWCALEN V_FRAC_N_DSKEWCALEN(1U)
+
+#define S_FRAC_N_FREFCMLEN 2
+#define V_FRAC_N_FREFCMLEN(x) ((x) << S_FRAC_N_FREFCMLEN)
+#define F_FRAC_N_FREFCMLEN V_FRAC_N_FREFCMLEN(1U)
+
+#define A_MAC_FRAC_N_PLL_STATUS_0 0x3f034
+
+#define S_DSKEWCALLOCK 12
+#define V_DSKEWCALLOCK(x) ((x) << S_DSKEWCALLOCK)
+#define F_DSKEWCALLOCK V_DSKEWCALLOCK(1U)
+
+#define S_DSKEWCALOUT 0
+#define M_DSKEWCALOUT 0xfffU
+#define V_DSKEWCALOUT(x) ((x) << S_DSKEWCALOUT)
+#define G_DSKEWCALOUT(x) (((x) >> S_DSKEWCALOUT) & M_DSKEWCALOUT)
+
+#define A_MAC_MTIP_PCS_STATUS_0 0x3f100
+
+#define S_XLGMII7_TX_TSU 22
+#define M_XLGMII7_TX_TSU 0x3U
+#define V_XLGMII7_TX_TSU(x) ((x) << S_XLGMII7_TX_TSU)
+#define G_XLGMII7_TX_TSU(x) (((x) >> S_XLGMII7_TX_TSU) & M_XLGMII7_TX_TSU)
+
+#define S_XLGMII6_TX_TSU 20
+#define M_XLGMII6_TX_TSU 0x3U
+#define V_XLGMII6_TX_TSU(x) ((x) << S_XLGMII6_TX_TSU)
+#define G_XLGMII6_TX_TSU(x) (((x) >> S_XLGMII6_TX_TSU) & M_XLGMII6_TX_TSU)
+
+#define S_XLGMII5_TX_TSU 18
+#define M_XLGMII5_TX_TSU 0x3U
+#define V_XLGMII5_TX_TSU(x) ((x) << S_XLGMII5_TX_TSU)
+#define G_XLGMII5_TX_TSU(x) (((x) >> S_XLGMII5_TX_TSU) & M_XLGMII5_TX_TSU)
+
+#define S_XLGMII4_TX_TSU 16
+#define M_XLGMII4_TX_TSU 0x3U
+#define V_XLGMII4_TX_TSU(x) ((x) << S_XLGMII4_TX_TSU)
+#define G_XLGMII4_TX_TSU(x) (((x) >> S_XLGMII4_TX_TSU) & M_XLGMII4_TX_TSU)
+
+#define S_XLGMII3_TX_TSU 14
+#define M_XLGMII3_TX_TSU 0x3U
+#define V_XLGMII3_TX_TSU(x) ((x) << S_XLGMII3_TX_TSU)
+#define G_XLGMII3_TX_TSU(x) (((x) >> S_XLGMII3_TX_TSU) & M_XLGMII3_TX_TSU)
+
+#define S_XLGMII2_TX_TSU 12
+#define M_XLGMII2_TX_TSU 0x3U
+#define V_XLGMII2_TX_TSU(x) ((x) << S_XLGMII2_TX_TSU)
+#define G_XLGMII2_TX_TSU(x) (((x) >> S_XLGMII2_TX_TSU) & M_XLGMII2_TX_TSU)
+
+#define S_XLGMII1_TX_TSU 10
+#define M_XLGMII1_TX_TSU 0x3U
+#define V_XLGMII1_TX_TSU(x) ((x) << S_XLGMII1_TX_TSU)
+#define G_XLGMII1_TX_TSU(x) (((x) >> S_XLGMII1_TX_TSU) & M_XLGMII1_TX_TSU)
+
+#define S_XLGMII0_TX_TSU 8
+#define M_XLGMII0_TX_TSU 0x3U
+#define V_XLGMII0_TX_TSU(x) ((x) << S_XLGMII0_TX_TSU)
+#define G_XLGMII0_TX_TSU(x) (((x) >> S_XLGMII0_TX_TSU) & M_XLGMII0_TX_TSU)
+
+#define S_CGMII3_TX_TSU 6
+#define M_CGMII3_TX_TSU 0x3U
+#define V_CGMII3_TX_TSU(x) ((x) << S_CGMII3_TX_TSU)
+#define G_CGMII3_TX_TSU(x) (((x) >> S_CGMII3_TX_TSU) & M_CGMII3_TX_TSU)
+
+#define S_CGMII2_TX_TSU 4
+#define M_CGMII2_TX_TSU 0x3U
+#define V_CGMII2_TX_TSU(x) ((x) << S_CGMII2_TX_TSU)
+#define G_CGMII2_TX_TSU(x) (((x) >> S_CGMII2_TX_TSU) & M_CGMII2_TX_TSU)
+
+#define S_CGMII1_TX_TSU 2
+#define M_CGMII1_TX_TSU 0x3U
+#define V_CGMII1_TX_TSU(x) ((x) << S_CGMII1_TX_TSU)
+#define G_CGMII1_TX_TSU(x) (((x) >> S_CGMII1_TX_TSU) & M_CGMII1_TX_TSU)
+
+#define S_CGMII0_TX_TSU 0
+#define M_CGMII0_TX_TSU 0x3U
+#define V_CGMII0_TX_TSU(x) ((x) << S_CGMII0_TX_TSU)
+#define G_CGMII0_TX_TSU(x) (((x) >> S_CGMII0_TX_TSU) & M_CGMII0_TX_TSU)
+
+#define A_MAC_MTIP_PCS_STATUS_1 0x3f104
+
+#define S_CDMII1_RX_TSU 26
+#define M_CDMII1_RX_TSU 0x3U
+#define V_CDMII1_RX_TSU(x) ((x) << S_CDMII1_RX_TSU)
+#define G_CDMII1_RX_TSU(x) (((x) >> S_CDMII1_RX_TSU) & M_CDMII1_RX_TSU)
+
+#define S_CDMII0_RX_TSU 24
+#define M_CDMII0_RX_TSU 0x3U
+#define V_CDMII0_RX_TSU(x) ((x) << S_CDMII0_RX_TSU)
+#define G_CDMII0_RX_TSU(x) (((x) >> S_CDMII0_RX_TSU) & M_CDMII0_RX_TSU)
+
+#define S_XLGMII7_RX_TSU 22
+#define M_XLGMII7_RX_TSU 0x3U
+#define V_XLGMII7_RX_TSU(x) ((x) << S_XLGMII7_RX_TSU)
+#define G_XLGMII7_RX_TSU(x) (((x) >> S_XLGMII7_RX_TSU) & M_XLGMII7_RX_TSU)
+
+#define S_XLGMII6_RX_TSU 20
+#define M_XLGMII6_RX_TSU 0x3U
+#define V_XLGMII6_RX_TSU(x) ((x) << S_XLGMII6_RX_TSU)
+#define G_XLGMII6_RX_TSU(x) (((x) >> S_XLGMII6_RX_TSU) & M_XLGMII6_RX_TSU)
+
+#define S_XLGMII5_RX_TSU 18
+#define M_XLGMII5_RX_TSU 0x3U
+#define V_XLGMII5_RX_TSU(x) ((x) << S_XLGMII5_RX_TSU)
+#define G_XLGMII5_RX_TSU(x) (((x) >> S_XLGMII5_RX_TSU) & M_XLGMII5_RX_TSU)
+
+#define S_XLGMII4_RX_TSU 16
+#define M_XLGMII4_RX_TSU 0x3U
+#define V_XLGMII4_RX_TSU(x) ((x) << S_XLGMII4_RX_TSU)
+#define G_XLGMII4_RX_TSU(x) (((x) >> S_XLGMII4_RX_TSU) & M_XLGMII4_RX_TSU)
+
+#define S_XLGMII3_RX_TSU 14
+#define M_XLGMII3_RX_TSU 0x3U
+#define V_XLGMII3_RX_TSU(x) ((x) << S_XLGMII3_RX_TSU)
+#define G_XLGMII3_RX_TSU(x) (((x) >> S_XLGMII3_RX_TSU) & M_XLGMII3_RX_TSU)
+
+#define S_XLGMII2_RX_TSU 12
+#define M_XLGMII2_RX_TSU 0x3U
+#define V_XLGMII2_RX_TSU(x) ((x) << S_XLGMII2_RX_TSU)
+#define G_XLGMII2_RX_TSU(x) (((x) >> S_XLGMII2_RX_TSU) & M_XLGMII2_RX_TSU)
+
+#define S_XLGMII1_RX_TSU 10
+#define M_XLGMII1_RX_TSU 0x3U
+#define V_XLGMII1_RX_TSU(x) ((x) << S_XLGMII1_RX_TSU)
+#define G_XLGMII1_RX_TSU(x) (((x) >> S_XLGMII1_RX_TSU) & M_XLGMII1_RX_TSU)
+
+#define S_XLGMII0_RX_TSU 8
+#define M_XLGMII0_RX_TSU 0x3U
+#define V_XLGMII0_RX_TSU(x) ((x) << S_XLGMII0_RX_TSU)
+#define G_XLGMII0_RX_TSU(x) (((x) >> S_XLGMII0_RX_TSU) & M_XLGMII0_RX_TSU)
+
+#define S_CGMII3_RX_TSU 6
+#define M_CGMII3_RX_TSU 0x3U
+#define V_CGMII3_RX_TSU(x) ((x) << S_CGMII3_RX_TSU)
+#define G_CGMII3_RX_TSU(x) (((x) >> S_CGMII3_RX_TSU) & M_CGMII3_RX_TSU)
+
+#define S_CGMII2_RX_TSU 4
+#define M_CGMII2_RX_TSU 0x3U
+#define V_CGMII2_RX_TSU(x) ((x) << S_CGMII2_RX_TSU)
+#define G_CGMII2_RX_TSU(x) (((x) >> S_CGMII2_RX_TSU) & M_CGMII2_RX_TSU)
+
+#define S_CGMII1_RX_TSU 2
+#define M_CGMII1_RX_TSU 0x3U
+#define V_CGMII1_RX_TSU(x) ((x) << S_CGMII1_RX_TSU)
+#define G_CGMII1_RX_TSU(x) (((x) >> S_CGMII1_RX_TSU) & M_CGMII1_RX_TSU)
+
+#define S_CGMII0_RX_TSU 0
+#define M_CGMII0_RX_TSU 0x3U
+#define V_CGMII0_RX_TSU(x) ((x) << S_CGMII0_RX_TSU)
+#define G_CGMII0_RX_TSU(x) (((x) >> S_CGMII0_RX_TSU) & M_CGMII0_RX_TSU)
+
+#define A_MAC_MTIP_PCS_STATUS_2 0x3f108
+
+#define S_SD_BIT_SLIP_0 0
+#define M_SD_BIT_SLIP_0 0x3fffffffU
+#define V_SD_BIT_SLIP_0(x) ((x) << S_SD_BIT_SLIP_0)
+#define G_SD_BIT_SLIP_0(x) (((x) >> S_SD_BIT_SLIP_0) & M_SD_BIT_SLIP_0)
+
+#define A_MAC_MTIP_PCS_STATUS_3 0x3f10c
+
+#define S_SD_BIT_SLIP_1 0
+#define M_SD_BIT_SLIP_1 0x3ffffU
+#define V_SD_BIT_SLIP_1(x) ((x) << S_SD_BIT_SLIP_1)
+#define G_SD_BIT_SLIP_1(x) (((x) >> S_SD_BIT_SLIP_1) & M_SD_BIT_SLIP_1)
+
+#define A_MAC_MTIP_PCS_STATUS_4 0x3f110
+
+#define S_TSU_RX_SD 0
+#define M_TSU_RX_SD 0xffffU
+#define V_TSU_RX_SD(x) ((x) << S_TSU_RX_SD)
+#define G_TSU_RX_SD(x) (((x) >> S_TSU_RX_SD) & M_TSU_RX_SD)
+
+#define A_MAC_MTIP_PCS_STATUS_5 0x3f114
+
+#define S_RSFEC_XSTATS_STRB 0
+#define M_RSFEC_XSTATS_STRB 0xffffffU
+#define V_RSFEC_XSTATS_STRB(x) ((x) << S_RSFEC_XSTATS_STRB)
+#define G_RSFEC_XSTATS_STRB(x) (((x) >> S_RSFEC_XSTATS_STRB) & M_RSFEC_XSTATS_STRB)
+
+#define A_MAC_MTIP_PCS_STATUS_6 0x3f118
+#define A_MAC_MTIP_PCS_STATUS_7 0x3f11c
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_0 0x3f120
+
+#define S_TSV_XON_STB_2 24
+#define M_TSV_XON_STB_2 0xffU
+#define V_TSV_XON_STB_2(x) ((x) << S_TSV_XON_STB_2)
+#define G_TSV_XON_STB_2(x) (((x) >> S_TSV_XON_STB_2) & M_TSV_XON_STB_2)
+
+#define S_TSV_XOFF_STB_2 16
+#define M_TSV_XOFF_STB_2 0xffU
+#define V_TSV_XOFF_STB_2(x) ((x) << S_TSV_XOFF_STB_2)
+#define G_TSV_XOFF_STB_2(x) (((x) >> S_TSV_XOFF_STB_2) & M_TSV_XOFF_STB_2)
+
+#define S_RSV_XON_STB_2 8
+#define M_RSV_XON_STB_2 0xffU
+#define V_RSV_XON_STB_2(x) ((x) << S_RSV_XON_STB_2)
+#define G_RSV_XON_STB_2(x) (((x) >> S_RSV_XON_STB_2) & M_RSV_XON_STB_2)
+
+#define S_RSV_XOFF_STB_2 0
+#define M_RSV_XOFF_STB_2 0xffU
+#define V_RSV_XOFF_STB_2(x) ((x) << S_RSV_XOFF_STB_2)
+#define G_RSV_XOFF_STB_2(x) (((x) >> S_RSV_XOFF_STB_2) & M_RSV_XOFF_STB_2)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_1 0x3f124
+
+#define S_TSV_XON_STB_3 24
+#define M_TSV_XON_STB_3 0xffU
+#define V_TSV_XON_STB_3(x) ((x) << S_TSV_XON_STB_3)
+#define G_TSV_XON_STB_3(x) (((x) >> S_TSV_XON_STB_3) & M_TSV_XON_STB_3)
+
+#define S_TSV_XOFF_STB_3 16
+#define M_TSV_XOFF_STB_3 0xffU
+#define V_TSV_XOFF_STB_3(x) ((x) << S_TSV_XOFF_STB_3)
+#define G_TSV_XOFF_STB_3(x) (((x) >> S_TSV_XOFF_STB_3) & M_TSV_XOFF_STB_3)
+
+#define S_RSV_XON_STB_3 8
+#define M_RSV_XON_STB_3 0xffU
+#define V_RSV_XON_STB_3(x) ((x) << S_RSV_XON_STB_3)
+#define G_RSV_XON_STB_3(x) (((x) >> S_RSV_XON_STB_3) & M_RSV_XON_STB_3)
+
+#define S_RSV_XOFF_STB_3 0
+#define M_RSV_XOFF_STB_3 0xffU
+#define V_RSV_XOFF_STB_3(x) ((x) << S_RSV_XOFF_STB_3)
+#define G_RSV_XOFF_STB_3(x) (((x) >> S_RSV_XOFF_STB_3) & M_RSV_XOFF_STB_3)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_2 0x3f128
+
+#define S_TSV_XON_STB_4 24
+#define M_TSV_XON_STB_4 0xffU
+#define V_TSV_XON_STB_4(x) ((x) << S_TSV_XON_STB_4)
+#define G_TSV_XON_STB_4(x) (((x) >> S_TSV_XON_STB_4) & M_TSV_XON_STB_4)
+
+#define S_TSV_XOFF_STB_4 16
+#define M_TSV_XOFF_STB_4 0xffU
+#define V_TSV_XOFF_STB_4(x) ((x) << S_TSV_XOFF_STB_4)
+#define G_TSV_XOFF_STB_4(x) (((x) >> S_TSV_XOFF_STB_4) & M_TSV_XOFF_STB_4)
+
+#define S_RSV_XON_STB_4 8
+#define M_RSV_XON_STB_4 0xffU
+#define V_RSV_XON_STB_4(x) ((x) << S_RSV_XON_STB_4)
+#define G_RSV_XON_STB_4(x) (((x) >> S_RSV_XON_STB_4) & M_RSV_XON_STB_4)
+
+#define S_RSV_XOFF_STB_4 0
+#define M_RSV_XOFF_STB_4 0xffU
+#define V_RSV_XOFF_STB_4(x) ((x) << S_RSV_XOFF_STB_4)
+#define G_RSV_XOFF_STB_4(x) (((x) >> S_RSV_XOFF_STB_4) & M_RSV_XOFF_STB_4)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_3 0x3f12c
+
+#define S_TSV_XON_STB_5 24
+#define M_TSV_XON_STB_5 0xffU
+#define V_TSV_XON_STB_5(x) ((x) << S_TSV_XON_STB_5)
+#define G_TSV_XON_STB_5(x) (((x) >> S_TSV_XON_STB_5) & M_TSV_XON_STB_5)
+
+#define S_TSV_XOFF_STB_5 16
+#define M_TSV_XOFF_STB_5 0xffU
+#define V_TSV_XOFF_STB_5(x) ((x) << S_TSV_XOFF_STB_5)
+#define G_TSV_XOFF_STB_5(x) (((x) >> S_TSV_XOFF_STB_5) & M_TSV_XOFF_STB_5)
+
+#define S_RSV_XON_STB_5 8
+#define M_RSV_XON_STB_5 0xffU
+#define V_RSV_XON_STB_5(x) ((x) << S_RSV_XON_STB_5)
+#define G_RSV_XON_STB_5(x) (((x) >> S_RSV_XON_STB_5) & M_RSV_XON_STB_5)
+
+#define S_RSV_XOFF_STB_5 0
+#define M_RSV_XOFF_STB_5 0xffU
+#define V_RSV_XOFF_STB_5(x) ((x) << S_RSV_XOFF_STB_5)
+#define G_RSV_XOFF_STB_5(x) (((x) >> S_RSV_XOFF_STB_5) & M_RSV_XOFF_STB_5)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_4 0x3f130
+
+#define S_TX_SFD_O_5 19
+#define V_TX_SFD_O_5(x) ((x) << S_TX_SFD_O_5)
+#define F_TX_SFD_O_5 V_TX_SFD_O_5(1U)
+
+#define S_TX_SFD_O_4 18
+#define V_TX_SFD_O_4(x) ((x) << S_TX_SFD_O_4)
+#define F_TX_SFD_O_4 V_TX_SFD_O_4(1U)
+
+#define S_TX_SFD_O_3 17
+#define V_TX_SFD_O_3(x) ((x) << S_TX_SFD_O_3)
+#define F_TX_SFD_O_3 V_TX_SFD_O_3(1U)
+
+#define S_TX_SFD_O_2 16
+#define V_TX_SFD_O_2(x) ((x) << S_TX_SFD_O_2)
+#define F_TX_SFD_O_2 V_TX_SFD_O_2(1U)
+
+#define S_RX_SFD_O_5 15
+#define V_RX_SFD_O_5(x) ((x) << S_RX_SFD_O_5)
+#define F_RX_SFD_O_5 V_RX_SFD_O_5(1U)
+
+#define S_RX_SFD_O_4 14
+#define V_RX_SFD_O_4(x) ((x) << S_RX_SFD_O_4)
+#define F_RX_SFD_O_4 V_RX_SFD_O_4(1U)
+
+#define S_RX_SFD_O_3 13
+#define V_RX_SFD_O_3(x) ((x) << S_RX_SFD_O_3)
+#define F_RX_SFD_O_3 V_RX_SFD_O_3(1U)
+
+#define S_RX_SFD_O_2 12
+#define V_RX_SFD_O_2(x) ((x) << S_RX_SFD_O_2)
+#define F_RX_SFD_O_2 V_RX_SFD_O_2(1U)
+
+#define S_RX_SFD_SHIFT_O_5 11
+#define V_RX_SFD_SHIFT_O_5(x) ((x) << S_RX_SFD_SHIFT_O_5)
+#define F_RX_SFD_SHIFT_O_5 V_RX_SFD_SHIFT_O_5(1U)
+
+#define S_RX_SFD_SHIFT_O_4 10
+#define V_RX_SFD_SHIFT_O_4(x) ((x) << S_RX_SFD_SHIFT_O_4)
+#define F_RX_SFD_SHIFT_O_4 V_RX_SFD_SHIFT_O_4(1U)
+
+#define S_RX_SFD_SHIFT_O_3 9
+#define V_RX_SFD_SHIFT_O_3(x) ((x) << S_RX_SFD_SHIFT_O_3)
+#define F_RX_SFD_SHIFT_O_3 V_RX_SFD_SHIFT_O_3(1U)
+
+#define S_RX_SFD_SHIFT_O_2 8
+#define V_RX_SFD_SHIFT_O_2(x) ((x) << S_RX_SFD_SHIFT_O_2)
+#define F_RX_SFD_SHIFT_O_2 V_RX_SFD_SHIFT_O_2(1U)
+
+#define S_TX_SFD_SHIFT_O_5 7
+#define V_TX_SFD_SHIFT_O_5(x) ((x) << S_TX_SFD_SHIFT_O_5)
+#define F_TX_SFD_SHIFT_O_5 V_TX_SFD_SHIFT_O_5(1U)
+
+#define S_TX_SFD_SHIFT_O_4 6
+#define V_TX_SFD_SHIFT_O_4(x) ((x) << S_TX_SFD_SHIFT_O_4)
+#define F_TX_SFD_SHIFT_O_4 V_TX_SFD_SHIFT_O_4(1U)
+
+#define S_TX_SFD_SHIFT_O_3 5
+#define V_TX_SFD_SHIFT_O_3(x) ((x) << S_TX_SFD_SHIFT_O_3)
+#define F_TX_SFD_SHIFT_O_3 V_TX_SFD_SHIFT_O_3(1U)
+
+#define S_TX_SFD_SHIFT_O_2 4
+#define V_TX_SFD_SHIFT_O_2(x) ((x) << S_TX_SFD_SHIFT_O_2)
+#define F_TX_SFD_SHIFT_O_2 V_TX_SFD_SHIFT_O_2(1U)
+
+#define S_TS_SFD_ENA_5 3
+#define V_TS_SFD_ENA_5(x) ((x) << S_TS_SFD_ENA_5)
+#define F_TS_SFD_ENA_5 V_TS_SFD_ENA_5(1U)
+
+#define S_TS_SFD_ENA_4 2
+#define V_TS_SFD_ENA_4(x) ((x) << S_TS_SFD_ENA_4)
+#define F_TS_SFD_ENA_4 V_TS_SFD_ENA_4(1U)
+
+#define S_TS_SFD_ENA_3 1
+#define V_TS_SFD_ENA_3(x) ((x) << S_TS_SFD_ENA_3)
+#define F_TS_SFD_ENA_3 V_TS_SFD_ENA_3(1U)
+
+#define S_TS_SFD_ENA_2 0
+#define V_TS_SFD_ENA_2(x) ((x) << S_TS_SFD_ENA_2)
+#define F_TS_SFD_ENA_2 V_TS_SFD_ENA_2(1U)
+
+#define A_MAC_STS_CONFIG 0x3f200
+
+#define S_STS_ENA 30
+#define V_STS_ENA(x) ((x) << S_STS_ENA)
+#define F_STS_ENA V_STS_ENA(1U)
+
+#define S_N_PPS_ENA 29
+#define V_N_PPS_ENA(x) ((x) << S_N_PPS_ENA)
+#define F_N_PPS_ENA V_N_PPS_ENA(1U)
+
+#define S_STS_RESET 28
+#define V_STS_RESET(x) ((x) << S_STS_RESET)
+#define F_STS_RESET V_STS_RESET(1U)
+
+#define S_DEBOUNCE_CNT 0
+#define M_DEBOUNCE_CNT 0xfffffffU
+#define V_DEBOUNCE_CNT(x) ((x) << S_DEBOUNCE_CNT)
+#define G_DEBOUNCE_CNT(x) (((x) >> S_DEBOUNCE_CNT) & M_DEBOUNCE_CNT)
+
+#define A_MAC_STS_COUNTER 0x3f204
+#define A_MAC_STS_COUNT_1 0x3f208
+#define A_MAC_STS_COUNT_2 0x3f20c
+#define A_MAC_STS_N_PPS_COUNT_HI 0x3f210
+#define A_MAC_STS_N_PPS_COUNT_LO 0x3f214
+#define A_MAC_STS_N_PPS_COUNTER 0x3f218
+#define A_MAC_BGR_PQ0_FIRMWARE_COMMON_0 0x4030
+
+#define S_MAC_BGR_BGR_REG_APB_SEL 0
+#define V_MAC_BGR_BGR_REG_APB_SEL(x) ((x) << S_MAC_BGR_BGR_REG_APB_SEL)
+#define F_MAC_BGR_BGR_REG_APB_SEL V_MAC_BGR_BGR_REG_APB_SEL(1U)
+
+#define A_MAC_BGR_TOP_DIG_CTRL1_REG_LSB 0x4430
+
+#define S_MAC_BGR_BGR_REFCLK_CTRL_BYPASS 15
+#define V_MAC_BGR_BGR_REFCLK_CTRL_BYPASS(x) ((x) << S_MAC_BGR_BGR_REFCLK_CTRL_BYPASS)
+#define F_MAC_BGR_BGR_REFCLK_CTRL_BYPASS V_MAC_BGR_BGR_REFCLK_CTRL_BYPASS(1U)
+
+#define S_MAC_BGR_BGR_COREREFCLK_SEL 14
+#define V_MAC_BGR_BGR_COREREFCLK_SEL(x) ((x) << S_MAC_BGR_BGR_COREREFCLK_SEL)
+#define F_MAC_BGR_BGR_COREREFCLK_SEL V_MAC_BGR_BGR_COREREFCLK_SEL(1U)
+
+#define S_MAC_BGR_BGR_TEST_CLK_DIV 8
+#define M_MAC_BGR_BGR_TEST_CLK_DIV 0x7U
+#define V_MAC_BGR_BGR_TEST_CLK_DIV(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_DIV)
+#define G_MAC_BGR_BGR_TEST_CLK_DIV(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_DIV) & M_MAC_BGR_BGR_TEST_CLK_DIV)
+
+#define S_MAC_BGR_BGR_TEST_CLK_EN 7
+#define V_MAC_BGR_BGR_TEST_CLK_EN(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_EN)
+#define F_MAC_BGR_BGR_TEST_CLK_EN V_MAC_BGR_BGR_TEST_CLK_EN(1U)
+
+#define S_MAC_BGR_BGR_TEST_CLK_BGRSEL 5
+#define M_MAC_BGR_BGR_TEST_CLK_BGRSEL 0x3U
+#define V_MAC_BGR_BGR_TEST_CLK_BGRSEL(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_BGRSEL)
+#define G_MAC_BGR_BGR_TEST_CLK_BGRSEL(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_BGRSEL) & M_MAC_BGR_BGR_TEST_CLK_BGRSEL)
+
+#define S_MAC_BGR_BGR_TEST_CLK_SEL 0
+#define M_MAC_BGR_BGR_TEST_CLK_SEL 0x1fU
+#define V_MAC_BGR_BGR_TEST_CLK_SEL(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_SEL)
+#define G_MAC_BGR_BGR_TEST_CLK_SEL(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_SEL) & M_MAC_BGR_BGR_TEST_CLK_SEL)
+
+#define A_MAC_BGR_PQ0_FIRMWARE_SEQ0_0 0x6000
+
+#define S_MAC_BGR_BGR_REG_PRG_EN 0
+#define V_MAC_BGR_BGR_REG_PRG_EN(x) ((x) << S_MAC_BGR_BGR_REG_PRG_EN)
+#define F_MAC_BGR_BGR_REG_PRG_EN V_MAC_BGR_BGR_REG_PRG_EN(1U)
+
+#define A_MAC_BGR_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_BGR_BGR_REG_GPO 0
+#define V_MAC_BGR_BGR_REG_GPO(x) ((x) << S_MAC_BGR_BGR_REG_GPO)
+#define F_MAC_BGR_BGR_REG_GPO V_MAC_BGR_BGR_REG_GPO(1U)
+
+#define A_MAC_BGR_MGMT_SPINE_MACRO_PMA_0 0x40000
+
+#define S_MAC_BGR_CUREFCLKSEL1 0
+#define M_MAC_BGR_CUREFCLKSEL1 0x3U
+#define V_MAC_BGR_CUREFCLKSEL1(x) ((x) << S_MAC_BGR_CUREFCLKSEL1)
+#define G_MAC_BGR_CUREFCLKSEL1(x) (((x) >> S_MAC_BGR_CUREFCLKSEL1) & M_MAC_BGR_CUREFCLKSEL1)
+
+#define A_MAC_BGR_REFCLK_CONTROL_1 0x40004
+
+#define S_MAC_BGR_IM_CUREFCLKLR_EN 0
+#define V_MAC_BGR_IM_CUREFCLKLR_EN(x) ((x) << S_MAC_BGR_IM_CUREFCLKLR_EN)
+#define F_MAC_BGR_IM_CUREFCLKLR_EN V_MAC_BGR_IM_CUREFCLKLR_EN(1U)
+
+#define A_MAC_BGR_REFCLK_CONTROL_2 0x40080
+
+#define S_MAC_BGR_IM_REF_EN 0
+#define V_MAC_BGR_IM_REF_EN(x) ((x) << S_MAC_BGR_IM_REF_EN)
+#define F_MAC_BGR_IM_REF_EN V_MAC_BGR_IM_REF_EN(1U)
+
+#define A_MAC_PLL0_PLL_TOP_CUPLL_LOCK 0x4438
+
+#define S_MAC_PLL0_PLL2_LOCK_STATUS 2
+#define V_MAC_PLL0_PLL2_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL2_LOCK_STATUS)
+#define F_MAC_PLL0_PLL2_LOCK_STATUS V_MAC_PLL0_PLL2_LOCK_STATUS(1U)
+
+#define S_MAC_PLL0_PLL1_LOCK_STATUS 1
+#define V_MAC_PLL0_PLL1_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL1_LOCK_STATUS)
+#define F_MAC_PLL0_PLL1_LOCK_STATUS V_MAC_PLL0_PLL1_LOCK_STATUS(1U)
+
+#define S_MAC_PLL0_PLL0_LOCK_STATUS 0
+#define V_MAC_PLL0_PLL0_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL0_LOCK_STATUS)
+#define F_MAC_PLL0_PLL0_LOCK_STATUS V_MAC_PLL0_PLL0_LOCK_STATUS(1U)
+
+#define A_MAC_PLL0_PLL_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_PLL0_PLL_PRG_EN 0
+#define M_MAC_PLL0_PLL_PRG_EN 0xfU
+#define V_MAC_PLL0_PLL_PRG_EN(x) ((x) << S_MAC_PLL0_PLL_PRG_EN)
+#define G_MAC_PLL0_PLL_PRG_EN(x) (((x) >> S_MAC_PLL0_PLL_PRG_EN) & M_MAC_PLL0_PLL_PRG_EN)
+
+#define A_MAC_PLL0_PLL_CMUTOP_KV16_MGMT_PLL_MACRO_SELECT_0 0x7fc00
+
+#define S_MAC_PLL0_PMA_MACRO_SELECT 0
+#define M_MAC_PLL0_PMA_MACRO_SELECT 0x3ffU
+#define V_MAC_PLL0_PMA_MACRO_SELECT(x) ((x) << S_MAC_PLL0_PMA_MACRO_SELECT)
+#define G_MAC_PLL0_PMA_MACRO_SELECT(x) (((x) >> S_MAC_PLL0_PMA_MACRO_SELECT) & M_MAC_PLL0_PMA_MACRO_SELECT)
+
+#define A_MAC_PLL1_PLL_TOP_CUPLL_LOCK 0x4438
+
+#define S_MAC_PLL1_PLL2_LOCK_STATUS 2
+#define V_MAC_PLL1_PLL2_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL2_LOCK_STATUS)
+#define F_MAC_PLL1_PLL2_LOCK_STATUS V_MAC_PLL1_PLL2_LOCK_STATUS(1U)
+
+#define S_MAC_PLL1_PLL1_LOCK_STATUS 1
+#define V_MAC_PLL1_PLL1_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL1_LOCK_STATUS)
+#define F_MAC_PLL1_PLL1_LOCK_STATUS V_MAC_PLL1_PLL1_LOCK_STATUS(1U)
+
+#define S_MAC_PLL1_PLL0_LOCK_STATUS 0
+#define V_MAC_PLL1_PLL0_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL0_LOCK_STATUS)
+#define F_MAC_PLL1_PLL0_LOCK_STATUS V_MAC_PLL1_PLL0_LOCK_STATUS(1U)
+
+#define A_MAC_PLL1_PLL_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_PLL1_PLL_PRG_EN 0
+#define M_MAC_PLL1_PLL_PRG_EN 0xfU
+#define V_MAC_PLL1_PLL_PRG_EN(x) ((x) << S_MAC_PLL1_PLL_PRG_EN)
+#define G_MAC_PLL1_PLL_PRG_EN(x) (((x) >> S_MAC_PLL1_PLL_PRG_EN) & M_MAC_PLL1_PLL_PRG_EN)
+
+#define A_MAC_PLL1_PLL_CMUTOP_KV16_MGMT_PLL_MACRO_SELECT_0 0x7fc00
+
+#define S_MAC_PLL1_PMA_MACRO_SELECT 0
+#define M_MAC_PLL1_PMA_MACRO_SELECT 0x3ffU
+#define V_MAC_PLL1_PMA_MACRO_SELECT(x) ((x) << S_MAC_PLL1_PMA_MACRO_SELECT)
+#define G_MAC_PLL1_PMA_MACRO_SELECT(x) (((x) >> S_MAC_PLL1_PMA_MACRO_SELECT) & M_MAC_PLL1_PMA_MACRO_SELECT)
+
+/* registers for module CRYPTO_0 */
+#define CRYPTO_0_BASE_ADDR 0x44000
+
+#define A_TLS_TX_CH_CONFIG 0x44000
+
+#define S_SMALL_LEN_THRESH 16
+#define M_SMALL_LEN_THRESH 0xffffU
+#define V_SMALL_LEN_THRESH(x) ((x) << S_SMALL_LEN_THRESH)
+#define G_SMALL_LEN_THRESH(x) (((x) >> S_SMALL_LEN_THRESH) & M_SMALL_LEN_THRESH)
+
+#define S_CIPH0_CTL_SEL 12
+#define M_CIPH0_CTL_SEL 0x7U
+#define V_CIPH0_CTL_SEL(x) ((x) << S_CIPH0_CTL_SEL)
+#define G_CIPH0_CTL_SEL(x) (((x) >> S_CIPH0_CTL_SEL) & M_CIPH0_CTL_SEL)
+
+#define S_CIPHN_CTL_SEL 9
+#define M_CIPHN_CTL_SEL 0x7U
+#define V_CIPHN_CTL_SEL(x) ((x) << S_CIPHN_CTL_SEL)
+#define G_CIPHN_CTL_SEL(x) (((x) >> S_CIPHN_CTL_SEL) & M_CIPHN_CTL_SEL)
+
+#define S_MAC_CTL_SEL 6
+#define M_MAC_CTL_SEL 0x7U
+#define V_MAC_CTL_SEL(x) ((x) << S_MAC_CTL_SEL)
+#define G_MAC_CTL_SEL(x) (((x) >> S_MAC_CTL_SEL) & M_MAC_CTL_SEL)
+
+#define S_CIPH0_XOR_SEL 5
+#define V_CIPH0_XOR_SEL(x) ((x) << S_CIPH0_XOR_SEL)
+#define F_CIPH0_XOR_SEL V_CIPH0_XOR_SEL(1U)
+
+#define S_CIPHN_XOR_SEL 4
+#define V_CIPHN_XOR_SEL(x) ((x) << S_CIPHN_XOR_SEL)
+#define F_CIPHN_XOR_SEL V_CIPHN_XOR_SEL(1U)
+
+#define S_MAC_XOR_SEL 3
+#define V_MAC_XOR_SEL(x) ((x) << S_MAC_XOR_SEL)
+#define F_MAC_XOR_SEL V_MAC_XOR_SEL(1U)
+
+#define S_CIPH0_DP_SEL 2
+#define V_CIPH0_DP_SEL(x) ((x) << S_CIPH0_DP_SEL)
+#define F_CIPH0_DP_SEL V_CIPH0_DP_SEL(1U)
+
+#define S_CIPHN_DP_SEL 1
+#define V_CIPHN_DP_SEL(x) ((x) << S_CIPHN_DP_SEL)
+#define F_CIPHN_DP_SEL V_CIPHN_DP_SEL(1U)
+
+#define S_MAC_DP_SEL 0
+#define V_MAC_DP_SEL(x) ((x) << S_MAC_DP_SEL)
+#define F_MAC_DP_SEL V_MAC_DP_SEL(1U)
+
+#define A_TLS_TX_CH_PERR_INJECT 0x44004
+#define A_TLS_TX_CH_INT_ENABLE 0x44008
+
+#define S_KEYLENERR 3
+#define V_KEYLENERR(x) ((x) << S_KEYLENERR)
+#define F_KEYLENERR V_KEYLENERR(1U)
+
+#define S_INTF1_PERR 2
+#define V_INTF1_PERR(x) ((x) << S_INTF1_PERR)
+#define F_INTF1_PERR V_INTF1_PERR(1U)
+
+#define S_INTF0_PERR 1
+#define V_INTF0_PERR(x) ((x) << S_INTF0_PERR)
+#define F_INTF0_PERR V_INTF0_PERR(1U)
+
+#define A_TLS_TX_CH_INT_CAUSE 0x4400c
+
+#define S_KEX_CERR 4
+#define V_KEX_CERR(x) ((x) << S_KEX_CERR)
+#define F_KEX_CERR V_KEX_CERR(1U)
+
+#define A_TLS_TX_CH_PERR_ENABLE 0x44010
+#define A_TLS_TX_CH_DEBUG_FLAGS 0x44014
+#define A_TLS_TX_CH_HMACCTRL_CFG 0x44020
+#define A_TLS_TX_CH_ERR_RSP_HDR 0x44024
+#define A_TLS_TX_CH_HANG_TIMEOUT 0x44028
+
+#define S_T7_TIMEOUT 0
+#define M_T7_TIMEOUT 0xffU
+#define V_T7_TIMEOUT(x) ((x) << S_T7_TIMEOUT)
+#define G_T7_TIMEOUT(x) (((x) >> S_T7_TIMEOUT) & M_T7_TIMEOUT)
+
+#define A_TLS_TX_CH_DBG_STEP_CTRL 0x44030
+
+#define S_DBG_STEP_CTRL 1
+#define V_DBG_STEP_CTRL(x) ((x) << S_DBG_STEP_CTRL)
+#define F_DBG_STEP_CTRL V_DBG_STEP_CTRL(1U)
+
+#define S_DBG_STEP_EN 0
+#define V_DBG_STEP_EN(x) ((x) << S_DBG_STEP_EN)
+#define F_DBG_STEP_EN V_DBG_STEP_EN(1U)
+
+#define A_TLS_TX_DBG_SELL_DATA 0x44714
+#define A_TLS_TX_DBG_SELH_DATA 0x44718
+#define A_TLS_TX_DBG_SEL_CTRL 0x44730
+#define A_TLS_TX_GLOBAL_CONFIG 0x447c0
+
+#define S_QUIC_EN 2
+#define V_QUIC_EN(x) ((x) << S_QUIC_EN)
+#define F_QUIC_EN V_QUIC_EN(1U)
+
+#define S_IPSEC_IDX_UPD_EN 1
+#define V_IPSEC_IDX_UPD_EN(x) ((x) << S_IPSEC_IDX_UPD_EN)
+#define F_IPSEC_IDX_UPD_EN V_IPSEC_IDX_UPD_EN(1U)
+
+#define S_IPSEC_IDX_CTL 0
+#define V_IPSEC_IDX_CTL(x) ((x) << S_IPSEC_IDX_CTL)
+#define F_IPSEC_IDX_CTL V_IPSEC_IDX_CTL(1U)
+
+#define A_TLS_TX_CGEN 0x447f0
+
+#define S_CHCGEN 0
+#define M_CHCGEN 0x3fU
+#define V_CHCGEN(x) ((x) << S_CHCGEN)
+#define G_CHCGEN(x) (((x) >> S_CHCGEN) & M_CHCGEN)
+
+#define A_TLS_TX_IND_ADDR 0x447f8
+
+#define S_T7_3_ADDR 0
+#define M_T7_3_ADDR 0xfffU
+#define V_T7_3_ADDR(x) ((x) << S_T7_3_ADDR)
+#define G_T7_3_ADDR(x) (((x) >> S_T7_3_ADDR) & M_T7_3_ADDR)
+
+#define A_TLS_TX_IND_DATA 0x447fc
+#define A_TLS_TX_CH_IND_ING_BYTE_CNT_LO 0x0
+#define A_TLS_TX_CH_IND_ING_BYTE_CNT_HI 0x1
+#define A_TLS_TX_CH_IND_ING_PKT_CNT 0x2
+#define A_TLS_TX_CH_IND_DISPATCH_PKT_CNT 0x4
+#define A_TLS_TX_CH_IND_ERROR_CNTS0 0x5
+#define A_TLS_TX_CH_IND_DEC_ERROR_CNTS 0x7
+#define A_TLS_TX_CH_IND_DBG_SPP_CFG 0x1f
+
+#define S_DIS_IF_ERR 11
+#define V_DIS_IF_ERR(x) ((x) << S_DIS_IF_ERR)
+#define F_DIS_IF_ERR V_DIS_IF_ERR(1U)
+
+#define S_DIS_ERR_MSG 10
+#define V_DIS_ERR_MSG(x) ((x) << S_DIS_ERR_MSG)
+#define F_DIS_ERR_MSG V_DIS_ERR_MSG(1U)
+
+#define S_DIS_BP_SEQF 9
+#define V_DIS_BP_SEQF(x) ((x) << S_DIS_BP_SEQF)
+#define F_DIS_BP_SEQF V_DIS_BP_SEQF(1U)
+
+#define S_DIS_BP_LENF 8
+#define V_DIS_BP_LENF(x) ((x) << S_DIS_BP_LENF)
+#define F_DIS_BP_LENF V_DIS_BP_LENF(1U)
+
+#define S_DIS_KEX_ERR 6
+#define V_DIS_KEX_ERR(x) ((x) << S_DIS_KEX_ERR)
+#define F_DIS_KEX_ERR V_DIS_KEX_ERR(1U)
+
+#define S_CLR_STS 5
+#define V_CLR_STS(x) ((x) << S_CLR_STS)
+#define F_CLR_STS V_CLR_STS(1U)
+
+#define S_TGL_CNT 4
+#define V_TGL_CNT(x) ((x) << S_TGL_CNT)
+#define F_TGL_CNT V_TGL_CNT(1U)
+
+#define S_ENB_PAZ 3
+#define V_ENB_PAZ(x) ((x) << S_ENB_PAZ)
+#define F_ENB_PAZ V_ENB_PAZ(1U)
+
+#define S_DIS_NOP 2
+#define V_DIS_NOP(x) ((x) << S_DIS_NOP)
+#define F_DIS_NOP V_DIS_NOP(1U)
+
+#define S_DIS_CPL_ERR 1
+#define V_DIS_CPL_ERR(x) ((x) << S_DIS_CPL_ERR)
+#define F_DIS_CPL_ERR V_DIS_CPL_ERR(1U)
+
+#define S_DIS_OFF_ERR 0
+#define V_DIS_OFF_ERR(x) ((x) << S_DIS_OFF_ERR)
+#define F_DIS_OFF_ERR V_DIS_OFF_ERR(1U)
+
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID0 0x20
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID1 0x21
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID2 0x22
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID3 0x23
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID4 0x24
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID5 0x25
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID6 0x26
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID7 0x27
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W0 0x28
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W1 0x29
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W2 0x2a
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W3 0x2b
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W0 0x2c
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W1 0x2d
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W2 0x2e
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W3 0x2f
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_ERR 0x30
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_BP 0x31
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_CTL_M 0x32
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_CTL_L 0x33
+#define A_TLS_TX_CH_IND_DBG_PKT_STAT 0x3f
+
+/* registers for module CRYPTO_1 */
+#define CRYPTO_1_BASE_ADDR 0x45000
+
+/* registers for module CRYPTO_KEY */
+#define CRYPTO_KEY_BASE_ADDR 0x46000
+
+#define A_CRYPTO_KEY_CONFIG 0x46000
+
+#define S_ESNWIN 1
+#define M_ESNWIN 0x7U
+#define V_ESNWIN(x) ((x) << S_ESNWIN)
+#define G_ESNWIN(x) (((x) >> S_ESNWIN) & M_ESNWIN)
+
+#define S_INGKEY96 0
+#define V_INGKEY96(x) ((x) << S_INGKEY96)
+#define F_INGKEY96 V_INGKEY96(1U)
+
+#define A_CRYPTO_KEY_RST 0x46004
+
+#define S_CORE1RST 1
+#define V_CORE1RST(x) ((x) << S_CORE1RST)
+#define F_CORE1RST V_CORE1RST(1U)
+
+#define S_CORE0RST 0
+#define V_CORE0RST(x) ((x) << S_CORE0RST)
+#define F_CORE0RST V_CORE0RST(1U)
+
+#define A_CRYPTO_KEY_INT_ENABLE 0x46008
+
+#define S_MA_FIFO_PERR 22
+#define V_MA_FIFO_PERR(x) ((x) << S_MA_FIFO_PERR)
+#define F_MA_FIFO_PERR V_MA_FIFO_PERR(1U)
+
+#define S_MA_RSP_PERR 21
+#define V_MA_RSP_PERR(x) ((x) << S_MA_RSP_PERR)
+#define F_MA_RSP_PERR V_MA_RSP_PERR(1U)
+
+#define S_ING_CACHE_DATA_PERR 19
+#define V_ING_CACHE_DATA_PERR(x) ((x) << S_ING_CACHE_DATA_PERR)
+#define F_ING_CACHE_DATA_PERR V_ING_CACHE_DATA_PERR(1U)
+
+#define S_ING_CACHE_TAG_PERR 18
+#define V_ING_CACHE_TAG_PERR(x) ((x) << S_ING_CACHE_TAG_PERR)
+#define F_ING_CACHE_TAG_PERR V_ING_CACHE_TAG_PERR(1U)
+
+#define S_LKP_KEY_REQ_PERR 17
+#define V_LKP_KEY_REQ_PERR(x) ((x) << S_LKP_KEY_REQ_PERR)
+#define F_LKP_KEY_REQ_PERR V_LKP_KEY_REQ_PERR(1U)
+
+#define S_LKP_CLIP_TCAM_PERR 16
+#define V_LKP_CLIP_TCAM_PERR(x) ((x) << S_LKP_CLIP_TCAM_PERR)
+#define F_LKP_CLIP_TCAM_PERR V_LKP_CLIP_TCAM_PERR(1U)
+
+#define S_LKP_MAIN_TCAM_PERR 15
+#define V_LKP_MAIN_TCAM_PERR(x) ((x) << S_LKP_MAIN_TCAM_PERR)
+#define F_LKP_MAIN_TCAM_PERR V_LKP_MAIN_TCAM_PERR(1U)
+
+#define S_EGR_KEY_REQ_PERR 14
+#define V_EGR_KEY_REQ_PERR(x) ((x) << S_EGR_KEY_REQ_PERR)
+#define F_EGR_KEY_REQ_PERR V_EGR_KEY_REQ_PERR(1U)
+
+#define S_EGR_CACHE_DATA_PERR 13
+#define V_EGR_CACHE_DATA_PERR(x) ((x) << S_EGR_CACHE_DATA_PERR)
+#define F_EGR_CACHE_DATA_PERR V_EGR_CACHE_DATA_PERR(1U)
+
+#define S_EGR_CACHE_TAG_PERR 12
+#define V_EGR_CACHE_TAG_PERR(x) ((x) << S_EGR_CACHE_TAG_PERR)
+#define F_EGR_CACHE_TAG_PERR V_EGR_CACHE_TAG_PERR(1U)
+
+#define S_CIM_PERR 11
+#define V_CIM_PERR(x) ((x) << S_CIM_PERR)
+#define F_CIM_PERR V_CIM_PERR(1U)
+
+#define S_MA_INV_RSP_TAG 10
+#define V_MA_INV_RSP_TAG(x) ((x) << S_MA_INV_RSP_TAG)
+#define F_MA_INV_RSP_TAG V_MA_INV_RSP_TAG(1U)
+
+#define S_ING_KEY_RANGE_ERR 9
+#define V_ING_KEY_RANGE_ERR(x) ((x) << S_ING_KEY_RANGE_ERR)
+#define F_ING_KEY_RANGE_ERR V_ING_KEY_RANGE_ERR(1U)
+
+#define S_ING_MFIFO_OVFL 8
+#define V_ING_MFIFO_OVFL(x) ((x) << S_ING_MFIFO_OVFL)
+#define F_ING_MFIFO_OVFL V_ING_MFIFO_OVFL(1U)
+
+#define S_LKP_REQ_OVFL 7
+#define V_LKP_REQ_OVFL(x) ((x) << S_LKP_REQ_OVFL)
+#define F_LKP_REQ_OVFL V_LKP_REQ_OVFL(1U)
+
+#define S_EOK_WAIT_ERR 6
+#define V_EOK_WAIT_ERR(x) ((x) << S_EOK_WAIT_ERR)
+#define F_EOK_WAIT_ERR V_EOK_WAIT_ERR(1U)
+
+#define S_EGR_KEY_RANGE_ERR 5
+#define V_EGR_KEY_RANGE_ERR(x) ((x) << S_EGR_KEY_RANGE_ERR)
+#define F_EGR_KEY_RANGE_ERR V_EGR_KEY_RANGE_ERR(1U)
+
+#define S_EGR_MFIFO_OVFL 4
+#define V_EGR_MFIFO_OVFL(x) ((x) << S_EGR_MFIFO_OVFL)
+#define F_EGR_MFIFO_OVFL V_EGR_MFIFO_OVFL(1U)
+
+#define S_SEQ_WRAP_HP_OVFL 3
+#define V_SEQ_WRAP_HP_OVFL(x) ((x) << S_SEQ_WRAP_HP_OVFL)
+#define F_SEQ_WRAP_HP_OVFL V_SEQ_WRAP_HP_OVFL(1U)
+
+#define S_SEQ_WRAP_LP_OVFL 2
+#define V_SEQ_WRAP_LP_OVFL(x) ((x) << S_SEQ_WRAP_LP_OVFL)
+#define F_SEQ_WRAP_LP_OVFL V_SEQ_WRAP_LP_OVFL(1U)
+
+#define S_EGR_SEQ_WRAP_HP 1
+#define V_EGR_SEQ_WRAP_HP(x) ((x) << S_EGR_SEQ_WRAP_HP)
+#define F_EGR_SEQ_WRAP_HP V_EGR_SEQ_WRAP_HP(1U)
+
+#define S_EGR_SEQ_WRAP_LP 0
+#define V_EGR_SEQ_WRAP_LP(x) ((x) << S_EGR_SEQ_WRAP_LP)
+#define F_EGR_SEQ_WRAP_LP V_EGR_SEQ_WRAP_LP(1U)
+
+#define A_CRYPTO_KEY_INT_CAUSE 0x4600c
+#define A_CRYPTO_KEY_PERR_ENABLE 0x46010
+#define A_CRYPTO_KEY_EGR_SEQ_WRAP_LP_KEY_ID 0x46018
+
+#define S_KEY_VALID 31
+#define V_KEY_VALID(x) ((x) << S_KEY_VALID)
+#define F_KEY_VALID V_KEY_VALID(1U)
+
+#define S_KEY_ID 0
+#define M_KEY_ID 0x7fffffffU
+#define V_KEY_ID(x) ((x) << S_KEY_ID)
+#define G_KEY_ID(x) (((x) >> S_KEY_ID) & M_KEY_ID)
+
+#define A_CRYPTO_KEY_EGR_SEQ_WRAP_HP_KEY_ID 0x4601c
+#define A_CRYPTO_KEY_TCAM_DATA0 0x46020
+#define A_CRYPTO_KEY_TCAM_DATA1 0x46024
+#define A_CRYPTO_KEY_TCAM_DATA2 0x46028
+#define A_CRYPTO_KEY_TCAM_DATA3 0x4602c
+#define A_CRYPTO_KEY_TCAM_CTL 0x46030
+
+#define S_SRCHMHIT 21
+#define V_SRCHMHIT(x) ((x) << S_SRCHMHIT)
+#define F_SRCHMHIT V_SRCHMHIT(1U)
+
+#define S_T7_BUSY 20
+#define V_T7_BUSY(x) ((x) << S_T7_BUSY)
+#define F_T7_BUSY V_T7_BUSY(1U)
+
+#define S_SRCHHIT 19
+#define V_SRCHHIT(x) ((x) << S_SRCHHIT)
+#define F_SRCHHIT V_SRCHHIT(1U)
+
+#define S_IPVERSION 18
+#define V_IPVERSION(x) ((x) << S_IPVERSION)
+#define F_IPVERSION V_IPVERSION(1U)
+
+#define S_BITSEL 17
+#define V_BITSEL(x) ((x) << S_BITSEL)
+#define F_BITSEL V_BITSEL(1U)
+
+#define S_TCAMSEL 16
+#define V_TCAMSEL(x) ((x) << S_TCAMSEL)
+#define F_TCAMSEL V_TCAMSEL(1U)
+
+#define S_CMDTYPE 14
+#define M_CMDTYPE 0x3U
+#define V_CMDTYPE(x) ((x) << S_CMDTYPE)
+#define G_CMDTYPE(x) (((x) >> S_CMDTYPE) & M_CMDTYPE)
+
+#define S_TCAMINDEX 0
+#define M_TCAMINDEX 0x3fffU
+#define V_TCAMINDEX(x) ((x) << S_TCAMINDEX)
+#define G_TCAMINDEX(x) (((x) >> S_TCAMINDEX) & M_TCAMINDEX)
+
+#define A_CRYPTO_KEY_TCAM_CONFIG 0x46034
+
+#define S_T7_CLTCAMDEEPSLEEP_STAT 3
+#define V_T7_CLTCAMDEEPSLEEP_STAT(x) ((x) << S_T7_CLTCAMDEEPSLEEP_STAT)
+#define F_T7_CLTCAMDEEPSLEEP_STAT V_T7_CLTCAMDEEPSLEEP_STAT(1U)
+
+#define S_T7_TCAMDEEPSLEEP_STAT 2
+#define V_T7_TCAMDEEPSLEEP_STAT(x) ((x) << S_T7_TCAMDEEPSLEEP_STAT)
+#define F_T7_TCAMDEEPSLEEP_STAT V_T7_TCAMDEEPSLEEP_STAT(1U)
+
+#define S_T7_CLTCAMDEEPSLEEP 1
+#define V_T7_CLTCAMDEEPSLEEP(x) ((x) << S_T7_CLTCAMDEEPSLEEP)
+#define F_T7_CLTCAMDEEPSLEEP V_T7_CLTCAMDEEPSLEEP(1U)
+
+#define S_T7_TCAMDEEPSLEEP 0
+#define V_T7_TCAMDEEPSLEEP(x) ((x) << S_T7_TCAMDEEPSLEEP)
+#define F_T7_TCAMDEEPSLEEP V_T7_TCAMDEEPSLEEP(1U)
+
+#define A_CRYPTO_KEY_TX_CMM_CONFIG 0x46040
+#define A_CRYPTO_KEY_TX_TNL_BASE 0x46044
+#define A_CRYPTO_KEY_TX_TRN_BASE 0x46048
+#define A_CRYPTO_KEY_TX_MAX_KEYS 0x4604c
+
+#define S_TNL_MAX 16
+#define M_TNL_MAX 0xffffU
+#define V_TNL_MAX(x) ((x) << S_TNL_MAX)
+#define G_TNL_MAX(x) (((x) >> S_TNL_MAX) & M_TNL_MAX)
+
+#define S_TRN_MAX 0
+#define M_TRN_MAX 0xffffU
+#define V_TRN_MAX(x) ((x) << S_TRN_MAX)
+#define G_TRN_MAX(x) (((x) >> S_TRN_MAX) & M_TRN_MAX)
+
+#define A_CRYPTO_KEY_TX_SEQ_STAT 0x46050
+
+#define S_ESN 24
+#define V_ESN(x) ((x) << S_ESN)
+#define F_ESN V_ESN(1U)
+
+#define S_SEQHI 20
+#define M_SEQHI 0xfU
+#define V_SEQHI(x) ((x) << S_SEQHI)
+#define G_SEQHI(x) (((x) >> S_SEQHI) & M_SEQHI)
+
+#define S_KEYID 0
+#define M_KEYID 0xfffffU
+#define V_KEYID(x) ((x) << S_KEYID)
+#define G_KEYID(x) (((x) >> S_KEYID) & M_KEYID)
+
+#define A_CRYPTO_KEY_RX_CMM_CONFIG 0x46060
+#define A_CRYPTO_KEY_RX_BASE 0x46064
+#define A_CRYPTO_KEY_RX_MAX_KEYS 0x46068
+
+#define S_MAXKEYS 0
+#define M_MAXKEYS 0xffffU
+#define V_MAXKEYS(x) ((x) << S_MAXKEYS)
+#define G_MAXKEYS(x) (((x) >> S_MAXKEYS) & M_MAXKEYS)
+
+#define A_CRYPTO_KEY_CRYPTO_REVISION 0x4606c
+#define A_CRYPTO_KEY_RX_SEQ_STAT 0x46070
+#define A_CRYPTO_KEY_TCAM_BIST_CTRL 0x46074
+#define A_CRYPTO_KEY_TCAM_BIST_CB_PASS 0x46078
+#define A_CRYPTO_KEY_TCAM_BIST_CB_BUSY 0x4607c
+#define A_CRYPTO_KEY_DBG_SEL_CTRL 0x46080
+
+#define S_SEL_OVR_EN 16
+#define V_SEL_OVR_EN(x) ((x) << S_SEL_OVR_EN)
+#define F_SEL_OVR_EN V_SEL_OVR_EN(1U)
+
+#define S_T7_1_SELH 8
+#define M_T7_1_SELH 0xffU
+#define V_T7_1_SELH(x) ((x) << S_T7_1_SELH)
+#define G_T7_1_SELH(x) (((x) >> S_T7_1_SELH) & M_T7_1_SELH)
+
+#define S_T7_1_SELL 0
+#define M_T7_1_SELL 0xffU
+#define V_T7_1_SELL(x) ((x) << S_T7_1_SELL)
+#define G_T7_1_SELL(x) (((x) >> S_T7_1_SELL) & M_T7_1_SELL)
+
+#define A_CRYPTO_KEY_DBG_SELL_DATA 0x46084
+#define A_CRYPTO_KEY_DBG_SELH_DATA 0x46088
+
+/* registers for module ARM */
+#define ARM_BASE_ADDR 0x47000
+
+#define A_ARM_CPU_POR_RST 0x47000
+
+#define S_CPUPORRSTN3 3
+#define V_CPUPORRSTN3(x) ((x) << S_CPUPORRSTN3)
+#define F_CPUPORRSTN3 V_CPUPORRSTN3(1U)
+
+#define S_CPUPORRSTN2 2
+#define V_CPUPORRSTN2(x) ((x) << S_CPUPORRSTN2)
+#define F_CPUPORRSTN2 V_CPUPORRSTN2(1U)
+
+#define S_CPUPORRSTN1 1
+#define V_CPUPORRSTN1(x) ((x) << S_CPUPORRSTN1)
+#define F_CPUPORRSTN1 V_CPUPORRSTN1(1U)
+
+#define S_CPUPORRSTN0 0
+#define V_CPUPORRSTN0(x) ((x) << S_CPUPORRSTN0)
+#define F_CPUPORRSTN0 V_CPUPORRSTN0(1U)
+
+#define A_ARM_CPU_CORE_RST 0x47004
+
+#define S_CPUCORERSTN3 3
+#define V_CPUCORERSTN3(x) ((x) << S_CPUCORERSTN3)
+#define F_CPUCORERSTN3 V_CPUCORERSTN3(1U)
+
+#define S_CPUCORERSTN2 2
+#define V_CPUCORERSTN2(x) ((x) << S_CPUCORERSTN2)
+#define F_CPUCORERSTN2 V_CPUCORERSTN2(1U)
+
+#define S_CPUCORERSTN1 1
+#define V_CPUCORERSTN1(x) ((x) << S_CPUCORERSTN1)
+#define F_CPUCORERSTN1 V_CPUCORERSTN1(1U)
+
+#define S_CPUCORERSTN0 0
+#define V_CPUCORERSTN0(x) ((x) << S_CPUCORERSTN0)
+#define F_CPUCORERSTN0 V_CPUCORERSTN0(1U)
+
+#define A_ARM_CPU_WARM_RST_REQ 0x47008
+
+#define S_CPUWARMRSTREQ3 3
+#define V_CPUWARMRSTREQ3(x) ((x) << S_CPUWARMRSTREQ3)
+#define F_CPUWARMRSTREQ3 V_CPUWARMRSTREQ3(1U)
+
+#define S_CPUWARMRSTREQ2 2
+#define V_CPUWARMRSTREQ2(x) ((x) << S_CPUWARMRSTREQ2)
+#define F_CPUWARMRSTREQ2 V_CPUWARMRSTREQ2(1U)
+
+#define S_CPUWARMRSTREQ1 1
+#define V_CPUWARMRSTREQ1(x) ((x) << S_CPUWARMRSTREQ1)
+#define F_CPUWARMRSTREQ1 V_CPUWARMRSTREQ1(1U)
+
+#define S_CPUWARMRSTREQ0 0
+#define V_CPUWARMRSTREQ0(x) ((x) << S_CPUWARMRSTREQ0)
+#define F_CPUWARMRSTREQ0 V_CPUWARMRSTREQ0(1U)
+
+#define A_ARM_CPU_L2_RST 0x4700c
+
+#define S_CPUL2RSTN 0
+#define V_CPUL2RSTN(x) ((x) << S_CPUL2RSTN)
+#define F_CPUL2RSTN V_CPUL2RSTN(1U)
+
+#define A_ARM_CPU_L2_RST_DIS 0x47010
+
+#define S_CPUL2RSTDISABLE 0
+#define V_CPUL2RSTDISABLE(x) ((x) << S_CPUL2RSTDISABLE)
+#define F_CPUL2RSTDISABLE V_CPUL2RSTDISABLE(1U)
+
+#define A_ARM_CPU_PRESET_DBG 0x47014
+
+#define S_CPUPRESETDBGN 0
+#define V_CPUPRESETDBGN(x) ((x) << S_CPUPRESETDBGN)
+#define F_CPUPRESETDBGN V_CPUPRESETDBGN(1U)
+
+#define A_ARM_PL_DMA_AW_OFFSET 0x47018
+
+#define S_PL_DMA_AW_OFFSET 0
+#define M_PL_DMA_AW_OFFSET 0x3fffffffU
+#define V_PL_DMA_AW_OFFSET(x) ((x) << S_PL_DMA_AW_OFFSET)
+#define G_PL_DMA_AW_OFFSET(x) (((x) >> S_PL_DMA_AW_OFFSET) & M_PL_DMA_AW_OFFSET)
+
+#define A_ARM_PL_DMA_AR_OFFSET 0x4701c
+
+#define S_PL_DMA_AR_OFFSET 0
+#define M_PL_DMA_AR_OFFSET 0x3fffffffU
+#define V_PL_DMA_AR_OFFSET(x) ((x) << S_PL_DMA_AR_OFFSET)
+#define G_PL_DMA_AR_OFFSET(x) (((x) >> S_PL_DMA_AR_OFFSET) & M_PL_DMA_AR_OFFSET)
+
+#define A_ARM_CPU_RESET_VECTOR_BASE_ADDR0 0x47020
+#define A_ARM_CPU_RESET_VECTOR_BASE_ADDR1 0x47024
+
+#define S_CPURESETVECBA1 0
+#define M_CPURESETVECBA1 0x3ffU
+#define V_CPURESETVECBA1(x) ((x) << S_CPURESETVECBA1)
+#define G_CPURESETVECBA1(x) (((x) >> S_CPURESETVECBA1) & M_CPURESETVECBA1)
+
+#define A_ARM_CPU_PMU_EVENT 0x47028
+
+#define S_CPUPMUEVENT 0
+#define M_CPUPMUEVENT 0x1ffffffU
+#define V_CPUPMUEVENT(x) ((x) << S_CPUPMUEVENT)
+#define G_CPUPMUEVENT(x) (((x) >> S_CPUPMUEVENT) & M_CPUPMUEVENT)
+
+#define A_ARM_DMA_RST 0x4702c
+
+#define S_DMA_PL_RST_N 0
+#define V_DMA_PL_RST_N(x) ((x) << S_DMA_PL_RST_N)
+#define F_DMA_PL_RST_N V_DMA_PL_RST_N(1U)
+
+#define A_ARM_PLM_RID_CFG 0x4703c
+#define A_ARM_PLM_EROM_CFG 0x47040
+#define A_ARM_PL_ARM_HDR_CFG 0x4704c
+#define A_ARM_RC_INT_STATUS 0x4705c
+
+#define S_RC_INT_STATUS_REG 0
+#define M_RC_INT_STATUS_REG 0x3fU
+#define V_RC_INT_STATUS_REG(x) ((x) << S_RC_INT_STATUS_REG)
+#define G_RC_INT_STATUS_REG(x) (((x) >> S_RC_INT_STATUS_REG) & M_RC_INT_STATUS_REG)
+
+#define A_ARM_CPU_DBG_PWR_UP_REQ 0x47060
+
+#define S_CPUDBGPWRUPREQ3 3
+#define V_CPUDBGPWRUPREQ3(x) ((x) << S_CPUDBGPWRUPREQ3)
+#define F_CPUDBGPWRUPREQ3 V_CPUDBGPWRUPREQ3(1U)
+
+#define S_CPUDBGPWRUPREQ2 2
+#define V_CPUDBGPWRUPREQ2(x) ((x) << S_CPUDBGPWRUPREQ2)
+#define F_CPUDBGPWRUPREQ2 V_CPUDBGPWRUPREQ2(1U)
+
+#define S_CPUDBGPWRUPREQ1 1
+#define V_CPUDBGPWRUPREQ1(x) ((x) << S_CPUDBGPWRUPREQ1)
+#define F_CPUDBGPWRUPREQ1 V_CPUDBGPWRUPREQ1(1U)
+
+#define S_CPUDBGPWRUPREQ0 0
+#define V_CPUDBGPWRUPREQ0(x) ((x) << S_CPUDBGPWRUPREQ0)
+#define F_CPUDBGPWRUPREQ0 V_CPUDBGPWRUPREQ0(1U)
+
+#define A_ARM_CPU_STANDBY_WFE_WFI 0x47064
+
+#define S_CPUSTANDBYWFIL2 8
+#define V_CPUSTANDBYWFIL2(x) ((x) << S_CPUSTANDBYWFIL2)
+#define F_CPUSTANDBYWFIL2 V_CPUSTANDBYWFIL2(1U)
+
+#define S_CPUSTANDBYWFI3 7
+#define V_CPUSTANDBYWFI3(x) ((x) << S_CPUSTANDBYWFI3)
+#define F_CPUSTANDBYWFI3 V_CPUSTANDBYWFI3(1U)
+
+#define S_CPUSTANDBYWFI2 6
+#define V_CPUSTANDBYWFI2(x) ((x) << S_CPUSTANDBYWFI2)
+#define F_CPUSTANDBYWFI2 V_CPUSTANDBYWFI2(1U)
+
+#define S_CPUSTANDBYWFI1 5
+#define V_CPUSTANDBYWFI1(x) ((x) << S_CPUSTANDBYWFI1)
+#define F_CPUSTANDBYWFI1 V_CPUSTANDBYWFI1(1U)
+
+#define S_CPUSTANDBYWFI0 4
+#define V_CPUSTANDBYWFI0(x) ((x) << S_CPUSTANDBYWFI0)
+#define F_CPUSTANDBYWFI0 V_CPUSTANDBYWFI0(1U)
+
+#define S_CPUSTANDBYWFE3 3
+#define V_CPUSTANDBYWFE3(x) ((x) << S_CPUSTANDBYWFE3)
+#define F_CPUSTANDBYWFE3 V_CPUSTANDBYWFE3(1U)
+
+#define S_CPUSTANDBYWFE2 2
+#define V_CPUSTANDBYWFE2(x) ((x) << S_CPUSTANDBYWFE2)
+#define F_CPUSTANDBYWFE2 V_CPUSTANDBYWFE2(1U)
+
+#define S_CPUSTANDBYWFE1 1
+#define V_CPUSTANDBYWFE1(x) ((x) << S_CPUSTANDBYWFE1)
+#define F_CPUSTANDBYWFE1 V_CPUSTANDBYWFE1(1U)
+
+#define S_CPUSTANDBYWFE0 0
+#define V_CPUSTANDBYWFE0(x) ((x) << S_CPUSTANDBYWFE0)
+#define F_CPUSTANDBYWFE0 V_CPUSTANDBYWFE0(1U)
+
+#define A_ARM_CPU_SMPEN 0x47068
+
+#define S_CPUSMPEN3 3
+#define V_CPUSMPEN3(x) ((x) << S_CPUSMPEN3)
+#define F_CPUSMPEN3 V_CPUSMPEN3(1U)
+
+#define S_CPUSMPEN2 2
+#define V_CPUSMPEN2(x) ((x) << S_CPUSMPEN2)
+#define F_CPUSMPEN2 V_CPUSMPEN2(1U)
+
+#define S_CPUSMPEN1 1
+#define V_CPUSMPEN1(x) ((x) << S_CPUSMPEN1)
+#define F_CPUSMPEN1 V_CPUSMPEN1(1U)
+
+#define S_CPUSMPEN0 0
+#define V_CPUSMPEN0(x) ((x) << S_CPUSMPEN0)
+#define F_CPUSMPEN0 V_CPUSMPEN0(1U)
+
+#define A_ARM_CPU_QACTIVE 0x4706c
+
+#define S_CPUQACTIVE3 3
+#define V_CPUQACTIVE3(x) ((x) << S_CPUQACTIVE3)
+#define F_CPUQACTIVE3 V_CPUQACTIVE3(1U)
+
+#define S_CPUQACTIVE2 2
+#define V_CPUQACTIVE2(x) ((x) << S_CPUQACTIVE2)
+#define F_CPUQACTIVE2 V_CPUQACTIVE2(1U)
+
+#define S_CPUQACTIVE1 1
+#define V_CPUQACTIVE1(x) ((x) << S_CPUQACTIVE1)
+#define F_CPUQACTIVE1 V_CPUQACTIVE1(1U)
+
+#define S_CPUQACTIVE0 0
+#define V_CPUQACTIVE0(x) ((x) << S_CPUQACTIVE0)
+#define F_CPUQACTIVE0 V_CPUQACTIVE0(1U)
+
+#define A_ARM_CPU_QREQ 0x47070
+
+#define S_CPUL2FLUSHREQ 5
+#define V_CPUL2FLUSHREQ(x) ((x) << S_CPUL2FLUSHREQ)
+#define F_CPUL2FLUSHREQ V_CPUL2FLUSHREQ(1U)
+
+#define S_CPUL2QREQN 4
+#define V_CPUL2QREQN(x) ((x) << S_CPUL2QREQN)
+#define F_CPUL2QREQN V_CPUL2QREQN(1U)
+
+#define S_CPUQREQ3N 3
+#define V_CPUQREQ3N(x) ((x) << S_CPUQREQ3N)
+#define F_CPUQREQ3N V_CPUQREQ3N(1U)
+
+#define S_CPUQREQ2N 2
+#define V_CPUQREQ2N(x) ((x) << S_CPUQREQ2N)
+#define F_CPUQREQ2N V_CPUQREQ2N(1U)
+
+#define S_CPUQREQ1N 1
+#define V_CPUQREQ1N(x) ((x) << S_CPUQREQ1N)
+#define F_CPUQREQ1N V_CPUQREQ1N(1U)
+
+#define S_CPUQREQ0N 0
+#define V_CPUQREQ0N(x) ((x) << S_CPUQREQ0N)
+#define F_CPUQREQ0N V_CPUQREQ0N(1U)
+
+#define A_ARM_CPU_QREQ_STATUS 0x47074
+
+#define S_CPUL2FLUSHDONE 10
+#define V_CPUL2FLUSHDONE(x) ((x) << S_CPUL2FLUSHDONE)
+#define F_CPUL2FLUSHDONE V_CPUL2FLUSHDONE(1U)
+
+#define S_CPUL2QDENY 9
+#define V_CPUL2QDENY(x) ((x) << S_CPUL2QDENY)
+#define F_CPUL2QDENY V_CPUL2QDENY(1U)
+
+#define S_CPUL2QACCEPTN 8
+#define V_CPUL2QACCEPTN(x) ((x) << S_CPUL2QACCEPTN)
+#define F_CPUL2QACCEPTN V_CPUL2QACCEPTN(1U)
+
+#define S_CPUQDENY3 7
+#define V_CPUQDENY3(x) ((x) << S_CPUQDENY3)
+#define F_CPUQDENY3 V_CPUQDENY3(1U)
+
+#define S_CPUQDENY2 6
+#define V_CPUQDENY2(x) ((x) << S_CPUQDENY2)
+#define F_CPUQDENY2 V_CPUQDENY2(1U)
+
+#define S_CPUQDENY1 5
+#define V_CPUQDENY1(x) ((x) << S_CPUQDENY1)
+#define F_CPUQDENY1 V_CPUQDENY1(1U)
+
+#define S_CPUQDENY0 4
+#define V_CPUQDENY0(x) ((x) << S_CPUQDENY0)
+#define F_CPUQDENY0 V_CPUQDENY0(1U)
+
+#define S_CPUQACCEPT3N 3
+#define V_CPUQACCEPT3N(x) ((x) << S_CPUQACCEPT3N)
+#define F_CPUQACCEPT3N V_CPUQACCEPT3N(1U)
+
+#define S_CPUQACCEPT2N 2
+#define V_CPUQACCEPT2N(x) ((x) << S_CPUQACCEPT2N)
+#define F_CPUQACCEPT2N V_CPUQACCEPT2N(1U)
+
+#define S_CPUQACCEPT1N 1
+#define V_CPUQACCEPT1N(x) ((x) << S_CPUQACCEPT1N)
+#define F_CPUQACCEPT1N V_CPUQACCEPT1N(1U)
+
+#define S_CPUQACCEPT0N 0
+#define V_CPUQACCEPT0N(x) ((x) << S_CPUQACCEPT0N)
+#define F_CPUQACCEPT0N V_CPUQACCEPT0N(1U)
+
+#define A_ARM_CPU_DBG_EN 0x47078
+
+#define S_CPUDBGL1RSTDISABLE 28
+#define V_CPUDBGL1RSTDISABLE(x) ((x) << S_CPUDBGL1RSTDISABLE)
+#define F_CPUDBGL1RSTDISABLE V_CPUDBGL1RSTDISABLE(1U)
+
+#define S_CPUDBGRSTREQ3 27
+#define V_CPUDBGRSTREQ3(x) ((x) << S_CPUDBGRSTREQ3)
+#define F_CPUDBGRSTREQ3 V_CPUDBGRSTREQ3(1U)
+
+#define S_CPUDBGRSTREQ2 26
+#define V_CPUDBGRSTREQ2(x) ((x) << S_CPUDBGRSTREQ2)
+#define F_CPUDBGRSTREQ2 V_CPUDBGRSTREQ2(1U)
+
+#define S_CPUDBGRSTREQ1 25
+#define V_CPUDBGRSTREQ1(x) ((x) << S_CPUDBGRSTREQ1)
+#define F_CPUDBGRSTREQ1 V_CPUDBGRSTREQ1(1U)
+
+#define S_CPUDBGRSTREQ0 24
+#define V_CPUDBGRSTREQ0(x) ((x) << S_CPUDBGRSTREQ0)
+#define F_CPUDBGRSTREQ0 V_CPUDBGRSTREQ0(1U)
+
+#define S_CPUDBGPWRDUP3 23
+#define V_CPUDBGPWRDUP3(x) ((x) << S_CPUDBGPWRDUP3)
+#define F_CPUDBGPWRDUP3 V_CPUDBGPWRDUP3(1U)
+
+#define S_CPUDBGPWRDUP2 22
+#define V_CPUDBGPWRDUP2(x) ((x) << S_CPUDBGPWRDUP2)
+#define F_CPUDBGPWRDUP2 V_CPUDBGPWRDUP2(1U)
+
+#define S_CPUDBGPWRDUP1 21
+#define V_CPUDBGPWRDUP1(x) ((x) << S_CPUDBGPWRDUP1)
+#define F_CPUDBGPWRDUP1 V_CPUDBGPWRDUP1(1U)
+
+#define S_CPUDBGPWRDUP0 20
+#define V_CPUDBGPWRDUP0(x) ((x) << S_CPUDBGPWRDUP0)
+#define F_CPUDBGPWRDUP0 V_CPUDBGPWRDUP0(1U)
+
+#define S_CPUEXTDBGREQ3 19
+#define V_CPUEXTDBGREQ3(x) ((x) << S_CPUEXTDBGREQ3)
+#define F_CPUEXTDBGREQ3 V_CPUEXTDBGREQ3(1U)
+
+#define S_CPUEXTDBGREQ2 18
+#define V_CPUEXTDBGREQ2(x) ((x) << S_CPUEXTDBGREQ2)
+#define F_CPUEXTDBGREQ2 V_CPUEXTDBGREQ2(1U)
+
+#define S_CPUEXTDBGREQ1 17
+#define V_CPUEXTDBGREQ1(x) ((x) << S_CPUEXTDBGREQ1)
+#define F_CPUEXTDBGREQ1 V_CPUEXTDBGREQ1(1U)
+
+#define S_CPUEXTDBGREQ0 16
+#define V_CPUEXTDBGREQ0(x) ((x) << S_CPUEXTDBGREQ0)
+#define F_CPUEXTDBGREQ0 V_CPUEXTDBGREQ0(1U)
+
+#define S_CPUSPNIDEN3 15
+#define V_CPUSPNIDEN3(x) ((x) << S_CPUSPNIDEN3)
+#define F_CPUSPNIDEN3 V_CPUSPNIDEN3(1U)
+
+#define S_CPUSPNIDEN2 14
+#define V_CPUSPNIDEN2(x) ((x) << S_CPUSPNIDEN2)
+#define F_CPUSPNIDEN2 V_CPUSPNIDEN2(1U)
+
+#define S_CPUSPNIDEN1 13
+#define V_CPUSPNIDEN1(x) ((x) << S_CPUSPNIDEN1)
+#define F_CPUSPNIDEN1 V_CPUSPNIDEN1(1U)
+
+#define S_CPUSPNIDEN0 12
+#define V_CPUSPNIDEN0(x) ((x) << S_CPUSPNIDEN0)
+#define F_CPUSPNIDEN0 V_CPUSPNIDEN0(1U)
+
+#define S_CPUSPDBGEN3 11
+#define V_CPUSPDBGEN3(x) ((x) << S_CPUSPDBGEN3)
+#define F_CPUSPDBGEN3 V_CPUSPDBGEN3(1U)
+
+#define S_CPUSPDBGEN2 10
+#define V_CPUSPDBGEN2(x) ((x) << S_CPUSPDBGEN2)
+#define F_CPUSPDBGEN2 V_CPUSPDBGEN2(1U)
+
+#define S_CPUSPDBGEN1 9
+#define V_CPUSPDBGEN1(x) ((x) << S_CPUSPDBGEN1)
+#define F_CPUSPDBGEN1 V_CPUSPDBGEN1(1U)
+
+#define S_CPUSPDBGEN0 8
+#define V_CPUSPDBGEN0(x) ((x) << S_CPUSPDBGEN0)
+#define F_CPUSPDBGEN0 V_CPUSPDBGEN0(1U)
+
+#define S_CPUNIDEN3 7
+#define V_CPUNIDEN3(x) ((x) << S_CPUNIDEN3)
+#define F_CPUNIDEN3 V_CPUNIDEN3(1U)
+
+#define S_CPUNIDEN2 6
+#define V_CPUNIDEN2(x) ((x) << S_CPUNIDEN2)
+#define F_CPUNIDEN2 V_CPUNIDEN2(1U)
+
+#define S_CPUNIDEN1 5
+#define V_CPUNIDEN1(x) ((x) << S_CPUNIDEN1)
+#define F_CPUNIDEN1 V_CPUNIDEN1(1U)
+
+#define S_CPUNIDEN0 4
+#define V_CPUNIDEN0(x) ((x) << S_CPUNIDEN0)
+#define F_CPUNIDEN0 V_CPUNIDEN0(1U)
+
+#define S_CPUDBGEN3 3
+#define V_CPUDBGEN3(x) ((x) << S_CPUDBGEN3)
+#define F_CPUDBGEN3 V_CPUDBGEN3(1U)
+
+#define S_CPUDBGEN2 2
+#define V_CPUDBGEN2(x) ((x) << S_CPUDBGEN2)
+#define F_CPUDBGEN2 V_CPUDBGEN2(1U)
+
+#define S_CPUDBGEN1 1
+#define V_CPUDBGEN1(x) ((x) << S_CPUDBGEN1)
+#define F_CPUDBGEN1 V_CPUDBGEN1(1U)
+
+#define S_CPUDBGEN0 0
+#define V_CPUDBGEN0(x) ((x) << S_CPUDBGEN0)
+#define F_CPUDBGEN0 V_CPUDBGEN0(1U)
+
+#define A_ARM_CPU_DBG_ACK 0x4707c
+
+#define S_CPUDBGNOPWRDWN3 11
+#define V_CPUDBGNOPWRDWN3(x) ((x) << S_CPUDBGNOPWRDWN3)
+#define F_CPUDBGNOPWRDWN3 V_CPUDBGNOPWRDWN3(1U)
+
+#define S_CPUDBGNOPWRDWN2 10
+#define V_CPUDBGNOPWRDWN2(x) ((x) << S_CPUDBGNOPWRDWN2)
+#define F_CPUDBGNOPWRDWN2 V_CPUDBGNOPWRDWN2(1U)
+
+#define S_CPUDBGNOPWRDWN1 9
+#define V_CPUDBGNOPWRDWN1(x) ((x) << S_CPUDBGNOPWRDWN1)
+#define F_CPUDBGNOPWRDWN1 V_CPUDBGNOPWRDWN1(1U)
+
+#define S_CPUDBGNOPWRDWN0 8
+#define V_CPUDBGNOPWRDWN0(x) ((x) << S_CPUDBGNOPWRDWN0)
+#define F_CPUDBGNOPWRDWN0 V_CPUDBGNOPWRDWN0(1U)
+
+#define S_CPUDGNRSTREQ3 7
+#define V_CPUDGNRSTREQ3(x) ((x) << S_CPUDGNRSTREQ3)
+#define F_CPUDGNRSTREQ3 V_CPUDGNRSTREQ3(1U)
+
+#define S_CPUDGNRSTREQ2 6
+#define V_CPUDGNRSTREQ2(x) ((x) << S_CPUDGNRSTREQ2)
+#define F_CPUDGNRSTREQ2 V_CPUDGNRSTREQ2(1U)
+
+#define S_CPUDGNRSTREQ1 5
+#define V_CPUDGNRSTREQ1(x) ((x) << S_CPUDGNRSTREQ1)
+#define F_CPUDGNRSTREQ1 V_CPUDGNRSTREQ1(1U)
+
+#define S_CPUDGNRSTREQ0 4
+#define V_CPUDGNRSTREQ0(x) ((x) << S_CPUDGNRSTREQ0)
+#define F_CPUDGNRSTREQ0 V_CPUDGNRSTREQ0(1U)
+
+#define S_CPUDBGACK3 3
+#define V_CPUDBGACK3(x) ((x) << S_CPUDBGACK3)
+#define F_CPUDBGACK3 V_CPUDBGACK3(1U)
+
+#define S_CPUDBGACK2 2
+#define V_CPUDBGACK2(x) ((x) << S_CPUDBGACK2)
+#define F_CPUDBGACK2 V_CPUDBGACK2(1U)
+
+#define S_CPUDBGACK1 1
+#define V_CPUDBGACK1(x) ((x) << S_CPUDBGACK1)
+#define F_CPUDBGACK1 V_CPUDBGACK1(1U)
+
+#define S_CPUDBGACK0 0
+#define V_CPUDBGACK0(x) ((x) << S_CPUDBGACK0)
+#define F_CPUDBGACK0 V_CPUDBGACK0(1U)
+
+#define A_ARM_CPU_PMU_SNAPSHOT_REQ 0x47080
+
+#define S_CPUPMUSNAPSHOTREQ3 3
+#define V_CPUPMUSNAPSHOTREQ3(x) ((x) << S_CPUPMUSNAPSHOTREQ3)
+#define F_CPUPMUSNAPSHOTREQ3 V_CPUPMUSNAPSHOTREQ3(1U)
+
+#define S_CPUPMUSNAPSHOTREQ2 2
+#define V_CPUPMUSNAPSHOTREQ2(x) ((x) << S_CPUPMUSNAPSHOTREQ2)
+#define F_CPUPMUSNAPSHOTREQ2 V_CPUPMUSNAPSHOTREQ2(1U)
+
+#define S_CPUPMUSNAPSHOTREQ1 1
+#define V_CPUPMUSNAPSHOTREQ1(x) ((x) << S_CPUPMUSNAPSHOTREQ1)
+#define F_CPUPMUSNAPSHOTREQ1 V_CPUPMUSNAPSHOTREQ1(1U)
+
+#define S_CPUPMUSNAPSHOTREQ0 0
+#define V_CPUPMUSNAPSHOTREQ0(x) ((x) << S_CPUPMUSNAPSHOTREQ0)
+#define F_CPUPMUSNAPSHOTREQ0 V_CPUPMUSNAPSHOTREQ0(1U)
+
+#define A_ARM_CPU_PMU_SNAPSHOT_ACK 0x47084
+
+#define S_CPUPMUSNAPSHOTACK3 3
+#define V_CPUPMUSNAPSHOTACK3(x) ((x) << S_CPUPMUSNAPSHOTACK3)
+#define F_CPUPMUSNAPSHOTACK3 V_CPUPMUSNAPSHOTACK3(1U)
+
+#define S_CPUPMUSNAPSHOTACK2 2
+#define V_CPUPMUSNAPSHOTACK2(x) ((x) << S_CPUPMUSNAPSHOTACK2)
+#define F_CPUPMUSNAPSHOTACK2 V_CPUPMUSNAPSHOTACK2(1U)
+
+#define S_CPUPMUSNAPSHOTACK1 1
+#define V_CPUPMUSNAPSHOTACK1(x) ((x) << S_CPUPMUSNAPSHOTACK1)
+#define F_CPUPMUSNAPSHOTACK1 V_CPUPMUSNAPSHOTACK1(1U)
+
+#define S_CPUPMUSNAPSHOTACK0 0
+#define V_CPUPMUSNAPSHOTACK0(x) ((x) << S_CPUPMUSNAPSHOTACK0)
+#define F_CPUPMUSNAPSHOTACK0 V_CPUPMUSNAPSHOTACK0(1U)
+
+#define A_ARM_EMMC_CTRL 0x47088
+
+#define S_EMMC_DATA_P2 24
+#define M_EMMC_DATA_P2 0xffU
+#define V_EMMC_DATA_P2(x) ((x) << S_EMMC_DATA_P2)
+#define G_EMMC_DATA_P2(x) (((x) >> S_EMMC_DATA_P2) & M_EMMC_DATA_P2)
+
+#define S_EMMC_DATA_P1 16
+#define M_EMMC_DATA_P1 0xffU
+#define V_EMMC_DATA_P1(x) ((x) << S_EMMC_DATA_P1)
+#define G_EMMC_DATA_P1(x) (((x) >> S_EMMC_DATA_P1) & M_EMMC_DATA_P1)
+
+#define S_EMMC_CMD_P2 15
+#define V_EMMC_CMD_P2(x) ((x) << S_EMMC_CMD_P2)
+#define F_EMMC_CMD_P2 V_EMMC_CMD_P2(1U)
+
+#define S_EMMC_CMD_P1 14
+#define V_EMMC_CMD_P1(x) ((x) << S_EMMC_CMD_P1)
+#define F_EMMC_CMD_P1 V_EMMC_CMD_P1(1U)
+
+#define S_EMMC_RST_P2 13
+#define V_EMMC_RST_P2(x) ((x) << S_EMMC_RST_P2)
+#define F_EMMC_RST_P2 V_EMMC_RST_P2(1U)
+
+#define S_EMMC_RST_P1 12
+#define V_EMMC_RST_P1(x) ((x) << S_EMMC_RST_P1)
+#define F_EMMC_RST_P1 V_EMMC_RST_P1(1U)
+
+#define S_EMMC_GP_IN_P2 10
+#define M_EMMC_GP_IN_P2 0x3U
+#define V_EMMC_GP_IN_P2(x) ((x) << S_EMMC_GP_IN_P2)
+#define G_EMMC_GP_IN_P2(x) (((x) >> S_EMMC_GP_IN_P2) & M_EMMC_GP_IN_P2)
+
+#define S_EMMC_GP_IN_P1 8
+#define M_EMMC_GP_IN_P1 0x3U
+#define V_EMMC_GP_IN_P1(x) ((x) << S_EMMC_GP_IN_P1)
+#define G_EMMC_GP_IN_P1(x) (((x) >> S_EMMC_GP_IN_P1) & M_EMMC_GP_IN_P1)
+
+#define S_EMMC_CLK_SEL 0
+#define M_EMMC_CLK_SEL 0xffU
+#define V_EMMC_CLK_SEL(x) ((x) << S_EMMC_CLK_SEL)
+#define G_EMMC_CLK_SEL(x) (((x) >> S_EMMC_CLK_SEL) & M_EMMC_CLK_SEL)
+
+#define A_ARM_CPU_CFG_END_VINI_TE 0x4708c
+
+#define S_CPUSYSBARDISABLE 23
+#define V_CPUSYSBARDISABLE(x) ((x) << S_CPUSYSBARDISABLE)
+#define F_CPUSYSBARDISABLE V_CPUSYSBARDISABLE(1U)
+
+#define S_CPUBROADCACHEMAIN 22
+#define V_CPUBROADCACHEMAIN(x) ((x) << S_CPUBROADCACHEMAIN)
+#define F_CPUBROADCACHEMAIN V_CPUBROADCACHEMAIN(1U)
+
+#define S_CPUBROADOUTER 21
+#define V_CPUBROADOUTER(x) ((x) << S_CPUBROADOUTER)
+#define F_CPUBROADOUTER V_CPUBROADOUTER(1U)
+
+#define S_CPUBROADINNER 20
+#define V_CPUBROADINNER(x) ((x) << S_CPUBROADINNER)
+#define F_CPUBROADINNER V_CPUBROADINNER(1U)
+
+#define S_CPUCRYPTODISABLE3 19
+#define V_CPUCRYPTODISABLE3(x) ((x) << S_CPUCRYPTODISABLE3)
+#define F_CPUCRYPTODISABLE3 V_CPUCRYPTODISABLE3(1U)
+
+#define S_CPUCRYPTODISABLE2 18
+#define V_CPUCRYPTODISABLE2(x) ((x) << S_CPUCRYPTODISABLE2)
+#define F_CPUCRYPTODISABLE2 V_CPUCRYPTODISABLE2(1U)
+
+#define S_CPUCRYPTODISABLE1 17
+#define V_CPUCRYPTODISABLE1(x) ((x) << S_CPUCRYPTODISABLE1)
+#define F_CPUCRYPTODISABLE1 V_CPUCRYPTODISABLE1(1U)
+
+#define S_CPUCRYPTODISABLE0 16
+#define V_CPUCRYPTODISABLE0(x) ((x) << S_CPUCRYPTODISABLE0)
+#define F_CPUCRYPTODISABLE0 V_CPUCRYPTODISABLE0(1U)
+
+#define S_CPUAA64NAA323 15
+#define V_CPUAA64NAA323(x) ((x) << S_CPUAA64NAA323)
+#define F_CPUAA64NAA323 V_CPUAA64NAA323(1U)
+
+#define S_CPUAA64NAA322 14
+#define V_CPUAA64NAA322(x) ((x) << S_CPUAA64NAA322)
+#define F_CPUAA64NAA322 V_CPUAA64NAA322(1U)
+
+#define S_CPUAA64NAA321 13
+#define V_CPUAA64NAA321(x) ((x) << S_CPUAA64NAA321)
+#define F_CPUAA64NAA321 V_CPUAA64NAA321(1U)
+
+#define S_CPUAA64NAA320 12
+#define V_CPUAA64NAA320(x) ((x) << S_CPUAA64NAA320)
+#define F_CPUAA64NAA320 V_CPUAA64NAA320(1U)
+
+#define S_CPUCFGTE3 11
+#define V_CPUCFGTE3(x) ((x) << S_CPUCFGTE3)
+#define F_CPUCFGTE3 V_CPUCFGTE3(1U)
+
+#define S_CPUCFGTE2 10
+#define V_CPUCFGTE2(x) ((x) << S_CPUCFGTE2)
+#define F_CPUCFGTE2 V_CPUCFGTE2(1U)
+
+#define S_CPUCFGTE1 9
+#define V_CPUCFGTE1(x) ((x) << S_CPUCFGTE1)
+#define F_CPUCFGTE1 V_CPUCFGTE1(1U)
+
+#define S_CPUCFGTE0 8
+#define V_CPUCFGTE0(x) ((x) << S_CPUCFGTE0)
+#define F_CPUCFGTE0 V_CPUCFGTE0(1U)
+
+#define S_CPUVINIHI3 7
+#define V_CPUVINIHI3(x) ((x) << S_CPUVINIHI3)
+#define F_CPUVINIHI3 V_CPUVINIHI3(1U)
+
+#define S_CPUVINIHI2 6
+#define V_CPUVINIHI2(x) ((x) << S_CPUVINIHI2)
+#define F_CPUVINIHI2 V_CPUVINIHI2(1U)
+
+#define S_CPUVINIHI1 5
+#define V_CPUVINIHI1(x) ((x) << S_CPUVINIHI1)
+#define F_CPUVINIHI1 V_CPUVINIHI1(1U)
+
+#define S_CPUVINIHI0 4
+#define V_CPUVINIHI0(x) ((x) << S_CPUVINIHI0)
+#define F_CPUVINIHI0 V_CPUVINIHI0(1U)
+
+#define S_CPUCFGEND3 3
+#define V_CPUCFGEND3(x) ((x) << S_CPUCFGEND3)
+#define F_CPUCFGEND3 V_CPUCFGEND3(1U)
+
+#define S_CPUCFGEND2 2
+#define V_CPUCFGEND2(x) ((x) << S_CPUCFGEND2)
+#define F_CPUCFGEND2 V_CPUCFGEND2(1U)
+
+#define S_CPUCFGEND1 1
+#define V_CPUCFGEND1(x) ((x) << S_CPUCFGEND1)
+#define F_CPUCFGEND1 V_CPUCFGEND1(1U)
+
+#define S_CPUCFGEND0 0
+#define V_CPUCFGEND0(x) ((x) << S_CPUCFGEND0)
+#define F_CPUCFGEND0 V_CPUCFGEND0(1U)
+
+#define A_ARM_CPU_CP15_SDISABLE 0x47090
+
+#define S_CPUCP15SDISABLE3 3
+#define V_CPUCP15SDISABLE3(x) ((x) << S_CPUCP15SDISABLE3)
+#define F_CPUCP15SDISABLE3 V_CPUCP15SDISABLE3(1U)
+
+#define S_CPUCP15SDISABLE2 2
+#define V_CPUCP15SDISABLE2(x) ((x) << S_CPUCP15SDISABLE2)
+#define F_CPUCP15SDISABLE2 V_CPUCP15SDISABLE2(1U)
+
+#define S_CPUCP15SDISABLE1 1
+#define V_CPUCP15SDISABLE1(x) ((x) << S_CPUCP15SDISABLE1)
+#define F_CPUCP15SDISABLE1 V_CPUCP15SDISABLE1(1U)
+
+#define S_CPUCP15SDISABLE0 0
+#define V_CPUCP15SDISABLE0(x) ((x) << S_CPUCP15SDISABLE0)
+#define F_CPUCP15SDISABLE0 V_CPUCP15SDISABLE0(1U)
+
+#define A_ARM_CPU_CLUSTER_ID_AFF 0x47094
+
+#define S_CPUCLUSTERIDAFF2 8
+#define M_CPUCLUSTERIDAFF2 0xffU
+#define V_CPUCLUSTERIDAFF2(x) ((x) << S_CPUCLUSTERIDAFF2)
+#define G_CPUCLUSTERIDAFF2(x) (((x) >> S_CPUCLUSTERIDAFF2) & M_CPUCLUSTERIDAFF2)
+
+#define S_CPUCLUSTERIDAFF1 0
+#define M_CPUCLUSTERIDAFF1 0xffU
+#define V_CPUCLUSTERIDAFF1(x) ((x) << S_CPUCLUSTERIDAFF1)
+#define G_CPUCLUSTERIDAFF1(x) (((x) >> S_CPUCLUSTERIDAFF1) & M_CPUCLUSTERIDAFF1)
+
+#define A_ARM_CPU_CLK_CFG 0x47098
+
+#define S_CPUACINACTIVEM 1
+#define V_CPUACINACTIVEM(x) ((x) << S_CPUACINACTIVEM)
+#define F_CPUACINACTIVEM V_CPUACINACTIVEM(1U)
+
+#define S_CPUACLKENM 0
+#define V_CPUACLKENM(x) ((x) << S_CPUACLKENM)
+#define F_CPUACLKENM V_CPUACLKENM(1U)
+
+#define A_ARM_NVME_DB_EMU_INT_CAUSE 0x4709c
+
+#define S_INVALID_BRESP 3
+#define V_INVALID_BRESP(x) ((x) << S_INVALID_BRESP)
+#define F_INVALID_BRESP V_INVALID_BRESP(1U)
+
+#define S_DATA_LEN_OF 2
+#define V_DATA_LEN_OF(x) ((x) << S_DATA_LEN_OF)
+#define F_DATA_LEN_OF V_DATA_LEN_OF(1U)
+
+#define S_INVALID_EMU_ADDR 1
+#define V_INVALID_EMU_ADDR(x) ((x) << S_INVALID_EMU_ADDR)
+#define F_INVALID_EMU_ADDR V_INVALID_EMU_ADDR(1U)
+
+#define S_INVALID_AXI_ADDR_CFG 0
+#define V_INVALID_AXI_ADDR_CFG(x) ((x) << S_INVALID_AXI_ADDR_CFG)
+#define F_INVALID_AXI_ADDR_CFG V_INVALID_AXI_ADDR_CFG(1U)
+
+#define A_ARM_CS_RST 0x470c0
+
+#define S_ATCLKEN 9
+#define V_ATCLKEN(x) ((x) << S_ATCLKEN)
+#define F_ATCLKEN V_ATCLKEN(1U)
+
+#define S_CXAPBICRSTN 8
+#define V_CXAPBICRSTN(x) ((x) << S_CXAPBICRSTN)
+#define F_CXAPBICRSTN V_CXAPBICRSTN(1U)
+
+#define S_CSDBGEN 7
+#define V_CSDBGEN(x) ((x) << S_CSDBGEN)
+#define F_CSDBGEN V_CSDBGEN(1U)
+
+#define S_JTAGNPOTRST 6
+#define V_JTAGNPOTRST(x) ((x) << S_JTAGNPOTRST)
+#define F_JTAGNPOTRST V_JTAGNPOTRST(1U)
+
+#define S_JTAGNTRST 5
+#define V_JTAGNTRST(x) ((x) << S_JTAGNTRST)
+#define F_JTAGNTRST V_JTAGNTRST(1U)
+
+#define S_PADDR31S0 4
+#define V_PADDR31S0(x) ((x) << S_PADDR31S0)
+#define F_PADDR31S0 V_PADDR31S0(1U)
+
+#define S_CTICLKEN 3
+#define V_CTICLKEN(x) ((x) << S_CTICLKEN)
+#define F_CTICLKEN V_CTICLKEN(1U)
+
+#define S_PCLKENDBG 2
+#define V_PCLKENDBG(x) ((x) << S_PCLKENDBG)
+#define F_PCLKENDBG V_PCLKENDBG(1U)
+
+#define S_CPU_NIDEN 1
+#define V_CPU_NIDEN(x) ((x) << S_CPU_NIDEN)
+#define F_CPU_NIDEN V_CPU_NIDEN(1U)
+
+#define S_CPU_DBGEN 0
+#define V_CPU_DBGEN(x) ((x) << S_CPU_DBGEN)
+#define F_CPU_DBGEN V_CPU_DBGEN(1U)
+
+#define A_ARM_CS_ADDRL 0x470c4
+#define A_ARM_CS_ADDRH 0x470c8
+#define A_ARM_CS_DFT_CONTROL 0x470cc
+
+#define S_DFTMBISTADDR 5
+#define M_DFTMBISTADDR 0x7ffU
+#define V_DFTMBISTADDR(x) ((x) << S_DFTMBISTADDR)
+#define G_DFTMBISTADDR(x) (((x) >> S_DFTMBISTADDR) & M_DFTMBISTADDR)
+
+#define S_DFTMTESTON 3
+#define V_DFTMTESTON(x) ((x) << S_DFTMTESTON)
+#define F_DFTMTESTON V_DFTMTESTON(1U)
+
+#define S_DFTMBISTCE 2
+#define V_DFTMBISTCE(x) ((x) << S_DFTMBISTCE)
+#define F_DFTMBISTCE V_DFTMBISTCE(1U)
+
+#define S_DFTMBITWR 1
+#define V_DFTMBITWR(x) ((x) << S_DFTMBITWR)
+#define F_DFTMBITWR V_DFTMBITWR(1U)
+
+#define S_DFTSE 0
+#define V_DFTSE(x) ((x) << S_DFTSE)
+#define F_DFTSE V_DFTSE(1U)
+
+#define A_ARM_CS_DFT_IN 0x470d0
+#define A_ARM_CS_DFT_OUT 0x470d4
+#define A_ARM_CPU_EVENT_I 0x47100
+
+#define S_CPUEVENTI 0
+#define V_CPUEVENTI(x) ((x) << S_CPUEVENTI)
+#define F_CPUEVENTI V_CPUEVENTI(1U)
+
+#define A_ARM_CPU_EVENT_O 0x47104
+
+#define S_CPUEVENTO 0
+#define V_CPUEVENTO(x) ((x) << S_CPUEVENTO)
+#define F_CPUEVENTO V_CPUEVENTO(1U)
+
+#define A_ARM_CPU_CLR_EXMON_REQ 0x47108
+
+#define S_CPUCLREXMONREQ 0
+#define V_CPUCLREXMONREQ(x) ((x) << S_CPUCLREXMONREQ)
+#define F_CPUCLREXMONREQ V_CPUCLREXMONREQ(1U)
+
+#define A_ARM_CPU_CLR_EXMON_ACK 0x4710c
+
+#define S_CPUCLREXMONACK 0
+#define V_CPUCLREXMONACK(x) ((x) << S_CPUCLREXMONACK)
+#define F_CPUCLREXMONACK V_CPUCLREXMONACK(1U)
+
+#define A_ARM_UART_MSTR_RXD 0x47110
+#define A_ARM_UART_MSTR_RXC 0x47114
+
+#define S_UART_MSTR_RXC 0
+#define V_UART_MSTR_RXC(x) ((x) << S_UART_MSTR_RXC)
+#define F_UART_MSTR_RXC V_UART_MSTR_RXC(1U)
+
+#define A_ARM_UART_MSTR_TXD 0x47118
+#define A_ARM_UART_MSTR_TXC 0x4711c
+
+#define S_T7_INT 1
+#define V_T7_INT(x) ((x) << S_T7_INT)
+#define F_T7_INT V_T7_INT(1U)
+
+#define S_UART_MSTC_TXC 0
+#define V_UART_MSTC_TXC(x) ((x) << S_UART_MSTC_TXC)
+#define F_UART_MSTC_TXC V_UART_MSTC_TXC(1U)
+
+#define A_ARM_UART_SLV_SEL 0x47120
+
+#define S_UART_SLV_SEL 0
+#define V_UART_SLV_SEL(x) ((x) << S_UART_SLV_SEL)
+#define F_UART_SLV_SEL V_UART_SLV_SEL(1U)
+
+#define A_ARM_CPU_PERIPH_BASE 0x47124
+#define A_ARM_PERR_INT_ENB2 0x47128
+#define A_ARM_PERR_ENABLE2 0x4712c
+#define A_ARM_UART_CONFIG 0x47130
+#define A_ARM_UART_STAT 0x47134
+
+#define S_RSV1 6
+#define M_RSV1 0x3ffffffU
+#define V_RSV1(x) ((x) << S_RSV1)
+#define G_RSV1(x) (((x) >> S_RSV1) & M_RSV1)
+
+#define S_RXFRMERR 5
+#define V_RXFRMERR(x) ((x) << S_RXFRMERR)
+#define F_RXFRMERR V_RXFRMERR(1U)
+
+#define S_RXPARERR 4
+#define V_RXPARERR(x) ((x) << S_RXPARERR)
+#define F_RXPARERR V_RXPARERR(1U)
+
+#define S_RXOVRN 3
+#define V_RXOVRN(x) ((x) << S_RXOVRN)
+#define F_RXOVRN V_RXOVRN(1U)
+
+#define S_CTL_RXRDY 2
+#define V_CTL_RXRDY(x) ((x) << S_CTL_RXRDY)
+#define F_CTL_RXRDY V_CTL_RXRDY(1U)
+
+#define S_TXOVRN 1
+#define V_TXOVRN(x) ((x) << S_TXOVRN)
+#define F_TXOVRN V_TXOVRN(1U)
+
+#define S_CTL_TXRDY 0
+#define V_CTL_TXRDY(x) ((x) << S_CTL_TXRDY)
+#define F_CTL_TXRDY V_CTL_TXRDY(1U)
+
+#define A_ARM_UART_TX_DATA 0x47138
+
+#define S_TX_DATA 0
+#define M_TX_DATA 0xffU
+#define V_TX_DATA(x) ((x) << S_TX_DATA)
+#define G_TX_DATA(x) (((x) >> S_TX_DATA) & M_TX_DATA)
+
+#define A_ARM_UART_RX_DATA 0x4713c
+
+#define S_RX_DATA 0
+#define M_RX_DATA 0xffU
+#define V_RX_DATA(x) ((x) << S_RX_DATA)
+#define G_RX_DATA(x) (((x) >> S_RX_DATA) & M_RX_DATA)
+
+#define A_ARM_UART_DBG0 0x47140
+#define A_ARM_UART_DBG1 0x47144
+#define A_ARM_UART_DBG2 0x47148
+#define A_ARM_UART_DBG3 0x4714c
+#define A_ARM_ARM_CPU_PC0 0x47150
+#define A_ARM_ARM_CPU_PC1 0x47154
+#define A_ARM_ARM_UART_INT_CAUSE 0x47158
+
+#define S_RX_FIFO_NOT_EMPTY 1
+#define V_RX_FIFO_NOT_EMPTY(x) ((x) << S_RX_FIFO_NOT_EMPTY)
+#define F_RX_FIFO_NOT_EMPTY V_RX_FIFO_NOT_EMPTY(1U)
+
+#define S_TX_FIFO_EMPTY 0
+#define V_TX_FIFO_EMPTY(x) ((x) << S_TX_FIFO_EMPTY)
+#define F_TX_FIFO_EMPTY V_TX_FIFO_EMPTY(1U)
+
+#define A_ARM_ARM_UART_INT_EN 0x4715c
+
+#define S_RX_FIFO_INT_NOT_EMPTY 1
+#define V_RX_FIFO_INT_NOT_EMPTY(x) ((x) << S_RX_FIFO_INT_NOT_EMPTY)
+#define F_RX_FIFO_INT_NOT_EMPTY V_RX_FIFO_INT_NOT_EMPTY(1U)
+
+#define S_TX_FIFO_INT_EMPTY 0
+#define V_TX_FIFO_INT_EMPTY(x) ((x) << S_TX_FIFO_INT_EMPTY)
+#define F_TX_FIFO_INT_EMPTY V_TX_FIFO_INT_EMPTY(1U)
+
+#define A_ARM_ARM_UART_GPIO_SEL 0x47160
+
+#define S_PC_SEL 1
+#define M_PC_SEL 0x7U
+#define V_PC_SEL(x) ((x) << S_PC_SEL)
+#define G_PC_SEL(x) (((x) >> S_PC_SEL) & M_PC_SEL)
+
+#define S_UART_GPIO_SEL 0
+#define V_UART_GPIO_SEL(x) ((x) << S_UART_GPIO_SEL)
+#define F_UART_GPIO_SEL V_UART_GPIO_SEL(1U)
+
+#define A_ARM_ARM_SCRATCH_PAD0 0x47164
+#define A_ARM_ARM_SCRATCH_PAD1 0x47168
+#define A_ARM_ARM_SCRATCH_PAD2 0x4716c
+#define A_ARM_PERR_INT_CAUSE0 0x47170
+
+#define S_INIC_WRDATA_FIFO_PERR 31
+#define V_INIC_WRDATA_FIFO_PERR(x) ((x) << S_INIC_WRDATA_FIFO_PERR)
+#define F_INIC_WRDATA_FIFO_PERR V_INIC_WRDATA_FIFO_PERR(1U)
+
+#define S_INIC_RDATA_FIFO_PERR 30
+#define V_INIC_RDATA_FIFO_PERR(x) ((x) << S_INIC_RDATA_FIFO_PERR)
+#define F_INIC_RDATA_FIFO_PERR V_INIC_RDATA_FIFO_PERR(1U)
+
+#define S_MSI_MEM_PERR 29
+#define V_MSI_MEM_PERR(x) ((x) << S_MSI_MEM_PERR)
+#define F_MSI_MEM_PERR V_MSI_MEM_PERR(1U)
+
+#define S_ARM_DB_SRAM_PERR 27
+#define M_ARM_DB_SRAM_PERR 0x3U
+#define V_ARM_DB_SRAM_PERR(x) ((x) << S_ARM_DB_SRAM_PERR)
+#define G_ARM_DB_SRAM_PERR(x) (((x) >> S_ARM_DB_SRAM_PERR) & M_ARM_DB_SRAM_PERR)
+
+#define S_EMMC_FIFOPARINT 26
+#define V_EMMC_FIFOPARINT(x) ((x) << S_EMMC_FIFOPARINT)
+#define F_EMMC_FIFOPARINT V_EMMC_FIFOPARINT(1U)
+
+#define S_ICB_RAM_PERR 25
+#define V_ICB_RAM_PERR(x) ((x) << S_ICB_RAM_PERR)
+#define F_ICB_RAM_PERR V_ICB_RAM_PERR(1U)
+
+#define S_MESS2AXI4_WRFIFO_PERR 24
+#define V_MESS2AXI4_WRFIFO_PERR(x) ((x) << S_MESS2AXI4_WRFIFO_PERR)
+#define F_MESS2AXI4_WRFIFO_PERR V_MESS2AXI4_WRFIFO_PERR(1U)
+
+#define S_RC_WFIFO_OUTPERR 23
+#define V_RC_WFIFO_OUTPERR(x) ((x) << S_RC_WFIFO_OUTPERR)
+#define F_RC_WFIFO_OUTPERR V_RC_WFIFO_OUTPERR(1U)
+
+#define S_RC_SRAM_PERR 21
+#define M_RC_SRAM_PERR 0x3U
+#define V_RC_SRAM_PERR(x) ((x) << S_RC_SRAM_PERR)
+#define G_RC_SRAM_PERR(x) (((x) >> S_RC_SRAM_PERR) & M_RC_SRAM_PERR)
+
+#define S_MSI_FIFO_PAR_ERR 20
+#define V_MSI_FIFO_PAR_ERR(x) ((x) << S_MSI_FIFO_PAR_ERR)
+#define F_MSI_FIFO_PAR_ERR V_MSI_FIFO_PAR_ERR(1U)
+
+#define S_INIC2MA_INTFPERR 19
+#define V_INIC2MA_INTFPERR(x) ((x) << S_INIC2MA_INTFPERR)
+#define F_INIC2MA_INTFPERR V_INIC2MA_INTFPERR(1U)
+
+#define S_RDATAFIFO0_PERR 18
+#define V_RDATAFIFO0_PERR(x) ((x) << S_RDATAFIFO0_PERR)
+#define F_RDATAFIFO0_PERR V_RDATAFIFO0_PERR(1U)
+
+#define S_RDATAFIFO1_PERR 17
+#define V_RDATAFIFO1_PERR(x) ((x) << S_RDATAFIFO1_PERR)
+#define F_RDATAFIFO1_PERR V_RDATAFIFO1_PERR(1U)
+
+#define S_WRDATAFIFO0_PERR 16
+#define V_WRDATAFIFO0_PERR(x) ((x) << S_WRDATAFIFO0_PERR)
+#define F_WRDATAFIFO0_PERR V_WRDATAFIFO0_PERR(1U)
+
+#define S_WRDATAFIFO1_PERR 15
+#define V_WRDATAFIFO1_PERR(x) ((x) << S_WRDATAFIFO1_PERR)
+#define F_WRDATAFIFO1_PERR V_WRDATAFIFO1_PERR(1U)
+
+#define S_WR512DATAFIFO0_PERR 14
+#define V_WR512DATAFIFO0_PERR(x) ((x) << S_WR512DATAFIFO0_PERR)
+#define F_WR512DATAFIFO0_PERR V_WR512DATAFIFO0_PERR(1U)
+
+#define S_WR512DATAFIFO1_PERR 13
+#define V_WR512DATAFIFO1_PERR(x) ((x) << S_WR512DATAFIFO1_PERR)
+#define F_WR512DATAFIFO1_PERR V_WR512DATAFIFO1_PERR(1U)
+
+#define S_ROBUFF_PARERR3 12
+#define V_ROBUFF_PARERR3(x) ((x) << S_ROBUFF_PARERR3)
+#define F_ROBUFF_PARERR3 V_ROBUFF_PARERR3(1U)
+
+#define S_ROBUFF_PARERR2 11
+#define V_ROBUFF_PARERR2(x) ((x) << S_ROBUFF_PARERR2)
+#define F_ROBUFF_PARERR2 V_ROBUFF_PARERR2(1U)
+
+#define S_ROBUFF_PARERR1 10
+#define V_ROBUFF_PARERR1(x) ((x) << S_ROBUFF_PARERR1)
+#define F_ROBUFF_PARERR1 V_ROBUFF_PARERR1(1U)
+
+#define S_ROBUFF_PARERR0 9
+#define V_ROBUFF_PARERR0(x) ((x) << S_ROBUFF_PARERR0)
+#define F_ROBUFF_PARERR0 V_ROBUFF_PARERR0(1U)
+
+#define S_MA2AXI_REQDATAPARERR 8
+#define V_MA2AXI_REQDATAPARERR(x) ((x) << S_MA2AXI_REQDATAPARERR)
+#define F_MA2AXI_REQDATAPARERR V_MA2AXI_REQDATAPARERR(1U)
+
+#define S_MA2AXI_REQCTLPARERR 7
+#define V_MA2AXI_REQCTLPARERR(x) ((x) << S_MA2AXI_REQCTLPARERR)
+#define F_MA2AXI_REQCTLPARERR V_MA2AXI_REQCTLPARERR(1U)
+
+#define S_MA_RSPPERR 6
+#define V_MA_RSPPERR(x) ((x) << S_MA_RSPPERR)
+#define F_MA_RSPPERR V_MA_RSPPERR(1U)
+
+#define S_PCIE2MA_REQCTLPARERR 5
+#define V_PCIE2MA_REQCTLPARERR(x) ((x) << S_PCIE2MA_REQCTLPARERR)
+#define F_PCIE2MA_REQCTLPARERR V_PCIE2MA_REQCTLPARERR(1U)
+
+#define S_PCIE2MA_REQDATAPARERR 4
+#define V_PCIE2MA_REQDATAPARERR(x) ((x) << S_PCIE2MA_REQDATAPARERR)
+#define F_PCIE2MA_REQDATAPARERR V_PCIE2MA_REQDATAPARERR(1U)
+
+#define S_INIC2MA_REQCTLPARERR 3
+#define V_INIC2MA_REQCTLPARERR(x) ((x) << S_INIC2MA_REQCTLPARERR)
+#define F_INIC2MA_REQCTLPARERR V_INIC2MA_REQCTLPARERR(1U)
+
+#define S_INIC2MA_REQDATAPARERR 2
+#define V_INIC2MA_REQDATAPARERR(x) ((x) << S_INIC2MA_REQDATAPARERR)
+#define F_INIC2MA_REQDATAPARERR V_INIC2MA_REQDATAPARERR(1U)
+
+#define S_MA_RSPUE 1
+#define V_MA_RSPUE(x) ((x) << S_MA_RSPUE)
+#define F_MA_RSPUE V_MA_RSPUE(1U)
+
+#define S_APB2PL_RSPDATAPERR 0
+#define V_APB2PL_RSPDATAPERR(x) ((x) << S_APB2PL_RSPDATAPERR)
+#define F_APB2PL_RSPDATAPERR V_APB2PL_RSPDATAPERR(1U)
+
+#define A_ARM_PERR_INT_ENB0 0x47174
+#define A_ARM_SCRATCH_PAD3 0x47178
+
+#define S_ECO_43187 31
+#define V_ECO_43187(x) ((x) << S_ECO_43187)
+#define F_ECO_43187 V_ECO_43187(1U)
+
+#define S_TIMER_SEL 28
+#define M_TIMER_SEL 0x7U
+#define V_TIMER_SEL(x) ((x) << S_TIMER_SEL)
+#define G_TIMER_SEL(x) (((x) >> S_TIMER_SEL) & M_TIMER_SEL)
+
+#define S_TIMER 4
+#define M_TIMER 0xffffffU
+#define V_TIMER(x) ((x) << S_TIMER)
+#define G_TIMER(x) (((x) >> S_TIMER) & M_TIMER)
+
+#define S_T7_1_INT 0
+#define M_T7_1_INT 0x3U
+#define V_T7_1_INT(x) ((x) << S_T7_1_INT)
+#define G_T7_1_INT(x) (((x) >> S_T7_1_INT) & M_T7_1_INT)
+
+#define A_ARM_PERR_INT_CAUSE2 0x4717c
+
+#define S_INIC_WSTRB_FIFO_PERR 31
+#define V_INIC_WSTRB_FIFO_PERR(x) ((x) << S_INIC_WSTRB_FIFO_PERR)
+#define F_INIC_WSTRB_FIFO_PERR V_INIC_WSTRB_FIFO_PERR(1U)
+
+#define S_INIC_BID_FIFO_PERR 30
+#define V_INIC_BID_FIFO_PERR(x) ((x) << S_INIC_BID_FIFO_PERR)
+#define F_INIC_BID_FIFO_PERR V_INIC_BID_FIFO_PERR(1U)
+
+#define S_CC_SRAM_PKA_PERR 29
+#define V_CC_SRAM_PKA_PERR(x) ((x) << S_CC_SRAM_PKA_PERR)
+#define F_CC_SRAM_PKA_PERR V_CC_SRAM_PKA_PERR(1U)
+
+#define S_CC_SRAM_SEC_PERR 28
+#define V_CC_SRAM_SEC_PERR(x) ((x) << S_CC_SRAM_SEC_PERR)
+#define F_CC_SRAM_SEC_PERR V_CC_SRAM_SEC_PERR(1U)
+
+#define S_MESS2AXI4_PARERR 27
+#define V_MESS2AXI4_PARERR(x) ((x) << S_MESS2AXI4_PARERR)
+#define F_MESS2AXI4_PARERR V_MESS2AXI4_PARERR(1U)
+
+#define S_CCI2INIC_INTF_PARERR 26
+#define V_CCI2INIC_INTF_PARERR(x) ((x) << S_CCI2INIC_INTF_PARERR)
+#define F_CCI2INIC_INTF_PARERR V_CCI2INIC_INTF_PARERR(1U)
+
+#define A_ARM_MA2AXI_AW_ATTR 0x47180
+
+#define S_AWLOCKR1 29
+#define V_AWLOCKR1(x) ((x) << S_AWLOCKR1)
+#define F_AWLOCKR1 V_AWLOCKR1(1U)
+
+#define S_AWCACHER1 25
+#define M_AWCACHER1 0xfU
+#define V_AWCACHER1(x) ((x) << S_AWCACHER1)
+#define G_AWCACHER1(x) (((x) >> S_AWCACHER1) & M_AWCACHER1)
+
+#define S_AWPROTR1 21
+#define M_AWPROTR1 0xfU
+#define V_AWPROTR1(x) ((x) << S_AWPROTR1)
+#define G_AWPROTR1(x) (((x) >> S_AWPROTR1) & M_AWPROTR1)
+
+#define S_AWSNOOPR1 18
+#define M_AWSNOOPR1 0x7U
+#define V_AWSNOOPR1(x) ((x) << S_AWSNOOPR1)
+#define G_AWSNOOPR1(x) (((x) >> S_AWSNOOPR1) & M_AWSNOOPR1)
+
+#define S_AWDOMAINR1 16
+#define M_AWDOMAINR1 0x3U
+#define V_AWDOMAINR1(x) ((x) << S_AWDOMAINR1)
+#define G_AWDOMAINR1(x) (((x) >> S_AWDOMAINR1) & M_AWDOMAINR1)
+
+#define S_AWLOCKR0 13
+#define V_AWLOCKR0(x) ((x) << S_AWLOCKR0)
+#define F_AWLOCKR0 V_AWLOCKR0(1U)
+
+#define S_AWCACHER0 9
+#define M_AWCACHER0 0xfU
+#define V_AWCACHER0(x) ((x) << S_AWCACHER0)
+#define G_AWCACHER0(x) (((x) >> S_AWCACHER0) & M_AWCACHER0)
+
+#define S_AWPROTR0 5
+#define M_AWPROTR0 0xfU
+#define V_AWPROTR0(x) ((x) << S_AWPROTR0)
+#define G_AWPROTR0(x) (((x) >> S_AWPROTR0) & M_AWPROTR0)
+
+#define S_AWSNOOPR0 2
+#define M_AWSNOOPR0 0x7U
+#define V_AWSNOOPR0(x) ((x) << S_AWSNOOPR0)
+#define G_AWSNOOPR0(x) (((x) >> S_AWSNOOPR0) & M_AWSNOOPR0)
+
+#define S_AWDOMAINR0 0
+#define M_AWDOMAINR0 0x3U
+#define V_AWDOMAINR0(x) ((x) << S_AWDOMAINR0)
+#define G_AWDOMAINR0(x) (((x) >> S_AWDOMAINR0) & M_AWDOMAINR0)
+
+#define A_ARM_MA2AXI_AR_ATTR 0x47184
+
+#define S_ARLOCKR1 29
+#define V_ARLOCKR1(x) ((x) << S_ARLOCKR1)
+#define F_ARLOCKR1 V_ARLOCKR1(1U)
+
+#define S_ARCACHER1 25
+#define M_ARCACHER1 0xfU
+#define V_ARCACHER1(x) ((x) << S_ARCACHER1)
+#define G_ARCACHER1(x) (((x) >> S_ARCACHER1) & M_ARCACHER1)
+
+#define S_ARPROTR1 21
+#define M_ARPROTR1 0xfU
+#define V_ARPROTR1(x) ((x) << S_ARPROTR1)
+#define G_ARPROTR1(x) (((x) >> S_ARPROTR1) & M_ARPROTR1)
+
+#define S_ARSNOOPR1 18
+#define M_ARSNOOPR1 0x7U
+#define V_ARSNOOPR1(x) ((x) << S_ARSNOOPR1)
+#define G_ARSNOOPR1(x) (((x) >> S_ARSNOOPR1) & M_ARSNOOPR1)
+
+#define S_ARDOMAINR1 16
+#define M_ARDOMAINR1 0x3U
+#define V_ARDOMAINR1(x) ((x) << S_ARDOMAINR1)
+#define G_ARDOMAINR1(x) (((x) >> S_ARDOMAINR1) & M_ARDOMAINR1)
+
+#define S_ARLOCKR0 13
+#define V_ARLOCKR0(x) ((x) << S_ARLOCKR0)
+#define F_ARLOCKR0 V_ARLOCKR0(1U)
+
+#define S_ARCACHER0 9
+#define M_ARCACHER0 0xfU
+#define V_ARCACHER0(x) ((x) << S_ARCACHER0)
+#define G_ARCACHER0(x) (((x) >> S_ARCACHER0) & M_ARCACHER0)
+
+#define S_ARPROTR0 5
+#define M_ARPROTR0 0xfU
+#define V_ARPROTR0(x) ((x) << S_ARPROTR0)
+#define G_ARPROTR0(x) (((x) >> S_ARPROTR0) & M_ARPROTR0)
+
+#define S_ARSNOOPR0 2
+#define M_ARSNOOPR0 0x7U
+#define V_ARSNOOPR0(x) ((x) << S_ARSNOOPR0)
+#define G_ARSNOOPR0(x) (((x) >> S_ARSNOOPR0) & M_ARSNOOPR0)
+
+#define S_ARDOMAINR0 0
+#define M_ARDOMAINR0 0x3U
+#define V_ARDOMAINR0(x) ((x) << S_ARDOMAINR0)
+#define G_ARDOMAINR0(x) (((x) >> S_ARDOMAINR0) & M_ARDOMAINR0)
+
+#define A_ARM_MA2AXI_SNOOP_RGN 0x47188
+
+#define S_SNOOP_END 16
+#define M_SNOOP_END 0xffffU
+#define V_SNOOP_END(x) ((x) << S_SNOOP_END)
+#define G_SNOOP_END(x) (((x) >> S_SNOOP_END) & M_SNOOP_END)
+
+#define S_SNOOP_START 0
+#define M_SNOOP_START 0xffffU
+#define V_SNOOP_START(x) ((x) << S_SNOOP_START)
+#define G_SNOOP_START(x) (((x) >> S_SNOOP_START) & M_SNOOP_START)
+
+#define A_ARM_PERIPHERAL_INT_CAUSE 0x4718c
+
+#define S_TIMER_INT 5
+#define V_TIMER_INT(x) ((x) << S_TIMER_INT)
+#define F_TIMER_INT V_TIMER_INT(1U)
+
+#define S_NVME_INT 4
+#define V_NVME_INT(x) ((x) << S_NVME_INT)
+#define F_NVME_INT V_NVME_INT(1U)
+
+#define S_EMMC_WAKEUP_INT 3
+#define V_EMMC_WAKEUP_INT(x) ((x) << S_EMMC_WAKEUP_INT)
+#define F_EMMC_WAKEUP_INT V_EMMC_WAKEUP_INT(1U)
+
+#define S_EMMC_INT 2
+#define V_EMMC_INT(x) ((x) << S_EMMC_INT)
+#define F_EMMC_INT V_EMMC_INT(1U)
+
+#define S_USB_MC_INT 1
+#define V_USB_MC_INT(x) ((x) << S_USB_MC_INT)
+#define F_USB_MC_INT V_USB_MC_INT(1U)
+
+#define S_USB_DMA_INT 0
+#define V_USB_DMA_INT(x) ((x) << S_USB_DMA_INT)
+#define F_USB_DMA_INT V_USB_DMA_INT(1U)
+
+#define A_ARM_SCRATCH_PAD4 0x47190
+
+#define S_PAD4 15
+#define M_PAD4 0x1ffffU
+#define V_PAD4(x) ((x) << S_PAD4)
+#define G_PAD4(x) (((x) >> S_PAD4) & M_PAD4)
+
+#define S_ARM_DB_CNT 0
+#define M_ARM_DB_CNT 0x7fffU
+#define V_ARM_DB_CNT(x) ((x) << S_ARM_DB_CNT)
+#define G_ARM_DB_CNT(x) (((x) >> S_ARM_DB_CNT) & M_ARM_DB_CNT)
+
+#define A_ARM_SCRATCH_PAD5 0x47194
+#define A_ARM_SCRATCH_PAD6 0x47198
+#define A_ARM_SCRATCH_PAD7 0x4719c
+#define A_ARM_NVME_DB_EMU_INDEX 0x471a0
+#define A_ARM_NVME_DB_EMU_REGION_CTL 0x471a4
+
+#define S_WINDOW_EN 4
+#define V_WINDOW_EN(x) ((x) << S_WINDOW_EN)
+#define F_WINDOW_EN V_WINDOW_EN(1U)
+
+#define S_RGN2_INT_EN 3
+#define V_RGN2_INT_EN(x) ((x) << S_RGN2_INT_EN)
+#define F_RGN2_INT_EN V_RGN2_INT_EN(1U)
+
+#define S_RGN1_INT_EN 2
+#define V_RGN1_INT_EN(x) ((x) << S_RGN1_INT_EN)
+#define F_RGN1_INT_EN V_RGN1_INT_EN(1U)
+
+#define S_QUEUE_EN 1
+#define V_QUEUE_EN(x) ((x) << S_QUEUE_EN)
+#define F_QUEUE_EN V_QUEUE_EN(1U)
+
+#define S_RGN0_INT_EN 0
+#define V_RGN0_INT_EN(x) ((x) << S_RGN0_INT_EN)
+#define F_RGN0_INT_EN V_RGN0_INT_EN(1U)
+
+#define A_ARM_NVME_DB_EMU_DEVICE_CTL 0x471a8
+
+#define S_DEVICE_SIZE 8
+#define M_DEVICE_SIZE 0xfU
+#define V_DEVICE_SIZE(x) ((x) << S_DEVICE_SIZE)
+#define G_DEVICE_SIZE(x) (((x) >> S_DEVICE_SIZE) & M_DEVICE_SIZE)
+
+#define S_RGN1_SIZE 4
+#define M_RGN1_SIZE 0xfU
+#define V_RGN1_SIZE(x) ((x) << S_RGN1_SIZE)
+#define G_RGN1_SIZE(x) (((x) >> S_RGN1_SIZE) & M_RGN1_SIZE)
+
+#define S_RGN0_SIZE 0
+#define M_RGN0_SIZE 0xfU
+#define V_RGN0_SIZE(x) ((x) << S_RGN0_SIZE)
+#define G_RGN0_SIZE(x) (((x) >> S_RGN0_SIZE) & M_RGN0_SIZE)
+
+#define A_ARM_NVME_DB_EMU_WINDOW_START_ADDR 0x471b0
+
+#define S_T7_4_ADDR 0
+#define M_T7_4_ADDR 0xfffffffU
+#define V_T7_4_ADDR(x) ((x) << S_T7_4_ADDR)
+#define G_T7_4_ADDR(x) (((x) >> S_T7_4_ADDR) & M_T7_4_ADDR)
+
+#define A_ARM_NVME_DB_EMU_WINDOW_END_ADDR 0x471b4
+#define A_ARM_NVME_DB_EMU_QBASE_ADDR 0x471b8
+#define A_ARM_NVME_DB_EMU_QUEUE_CID 0x471bc
+
+#define S_T7_CID 0
+#define M_T7_CID 0x1ffffU
+#define V_T7_CID(x) ((x) << S_T7_CID)
+#define G_T7_CID(x) (((x) >> S_T7_CID) & M_T7_CID)
+
+#define A_ARM_NVME_DB_EMU_QUEUE_CTL 0x471c0
+
+#define S_INT_EN 27
+#define V_INT_EN(x) ((x) << S_INT_EN)
+#define F_INT_EN V_INT_EN(1U)
+
+#define S_THRESHOLD 10
+#define M_THRESHOLD 0x1ffffU
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+#define G_THRESHOLD(x) (((x) >> S_THRESHOLD) & M_THRESHOLD)
+
+#define S_T7_1_SIZE 0
+#define M_T7_1_SIZE 0x3ffU
+#define V_T7_1_SIZE(x) ((x) << S_T7_1_SIZE)
+#define G_T7_1_SIZE(x) (((x) >> S_T7_1_SIZE) & M_T7_1_SIZE)
+
+#define A_ARM_NVME_DB_EMU_MSIX_ADDR_L 0x471c4
+#define A_ARM_NVME_DB_EMU_MSIX_ADDR_H 0x471c8
+#define A_ARM_NVME_DB_EMU_MSIX_OFFSET 0x471cc
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_ADDR_L 0x471d0
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_ADDR_H 0x471d4
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_OFFSET 0x471d8
+#define A_ARM_CERR_INT_CAUSE0 0x471dc
+
+#define S_WRDATA_FIFO0_CERR 31
+#define V_WRDATA_FIFO0_CERR(x) ((x) << S_WRDATA_FIFO0_CERR)
+#define F_WRDATA_FIFO0_CERR V_WRDATA_FIFO0_CERR(1U)
+
+#define S_WRDATA_FIFO1_CERR 30
+#define V_WRDATA_FIFO1_CERR(x) ((x) << S_WRDATA_FIFO1_CERR)
+#define F_WRDATA_FIFO1_CERR V_WRDATA_FIFO1_CERR(1U)
+
+#define S_WR512DATAFIFO0_CERR 29
+#define V_WR512DATAFIFO0_CERR(x) ((x) << S_WR512DATAFIFO0_CERR)
+#define F_WR512DATAFIFO0_CERR V_WR512DATAFIFO0_CERR(1U)
+
+#define S_WR512DATAFIFO1_CERR 28
+#define V_WR512DATAFIFO1_CERR(x) ((x) << S_WR512DATAFIFO1_CERR)
+#define F_WR512DATAFIFO1_CERR V_WR512DATAFIFO1_CERR(1U)
+
+#define S_RDATAFIFO0_CERR 27
+#define V_RDATAFIFO0_CERR(x) ((x) << S_RDATAFIFO0_CERR)
+#define F_RDATAFIFO0_CERR V_RDATAFIFO0_CERR(1U)
+
+#define S_RDATAFIFO1_CERR 26
+#define V_RDATAFIFO1_CERR(x) ((x) << S_RDATAFIFO1_CERR)
+#define F_RDATAFIFO1_CERR V_RDATAFIFO1_CERR(1U)
+
+#define S_ROBUFF_CORERR0 25
+#define V_ROBUFF_CORERR0(x) ((x) << S_ROBUFF_CORERR0)
+#define F_ROBUFF_CORERR0 V_ROBUFF_CORERR0(1U)
+
+#define S_ROBUFF_CORERR1 24
+#define V_ROBUFF_CORERR1(x) ((x) << S_ROBUFF_CORERR1)
+#define F_ROBUFF_CORERR1 V_ROBUFF_CORERR1(1U)
+
+#define S_ROBUFF_CORERR2 23
+#define V_ROBUFF_CORERR2(x) ((x) << S_ROBUFF_CORERR2)
+#define F_ROBUFF_CORERR2 V_ROBUFF_CORERR2(1U)
+
+#define S_ROBUFF_CORERR3 22
+#define V_ROBUFF_CORERR3(x) ((x) << S_ROBUFF_CORERR3)
+#define F_ROBUFF_CORERR3 V_ROBUFF_CORERR3(1U)
+
+#define S_MA2AXI_RSPDATACORERR 21
+#define V_MA2AXI_RSPDATACORERR(x) ((x) << S_MA2AXI_RSPDATACORERR)
+#define F_MA2AXI_RSPDATACORERR V_MA2AXI_RSPDATACORERR(1U)
+
+#define S_RC_SRAM_CERR 19
+#define M_RC_SRAM_CERR 0x3U
+#define V_RC_SRAM_CERR(x) ((x) << S_RC_SRAM_CERR)
+#define G_RC_SRAM_CERR(x) (((x) >> S_RC_SRAM_CERR) & M_RC_SRAM_CERR)
+
+#define S_RC_WFIFO_OUTCERR 18
+#define V_RC_WFIFO_OUTCERR(x) ((x) << S_RC_WFIFO_OUTCERR)
+#define F_RC_WFIFO_OUTCERR V_RC_WFIFO_OUTCERR(1U)
+
+#define S_RC_RSPFIFO_CERR 17
+#define V_RC_RSPFIFO_CERR(x) ((x) << S_RC_RSPFIFO_CERR)
+#define F_RC_RSPFIFO_CERR V_RC_RSPFIFO_CERR(1U)
+
+#define S_MSI_MEM_CERR 16
+#define V_MSI_MEM_CERR(x) ((x) << S_MSI_MEM_CERR)
+#define F_MSI_MEM_CERR V_MSI_MEM_CERR(1U)
+
+#define S_INIC_WRDATA_FIFO_CERR 15
+#define V_INIC_WRDATA_FIFO_CERR(x) ((x) << S_INIC_WRDATA_FIFO_CERR)
+#define F_INIC_WRDATA_FIFO_CERR V_INIC_WRDATA_FIFO_CERR(1U)
+
+#define S_INIC_RDATAFIFO_CERR 14
+#define V_INIC_RDATAFIFO_CERR(x) ((x) << S_INIC_RDATAFIFO_CERR)
+#define F_INIC_RDATAFIFO_CERR V_INIC_RDATAFIFO_CERR(1U)
+
+#define S_ARM_DB_SRAM_CERR 12
+#define M_ARM_DB_SRAM_CERR 0x3U
+#define V_ARM_DB_SRAM_CERR(x) ((x) << S_ARM_DB_SRAM_CERR)
+#define G_ARM_DB_SRAM_CERR(x) (((x) >> S_ARM_DB_SRAM_CERR) & M_ARM_DB_SRAM_CERR)
+
+#define S_ICB_RAM_CERR 11
+#define V_ICB_RAM_CERR(x) ((x) << S_ICB_RAM_CERR)
+#define F_ICB_RAM_CERR V_ICB_RAM_CERR(1U)
+
+#define S_CC_SRAM_PKA_CERR 10
+#define V_CC_SRAM_PKA_CERR(x) ((x) << S_CC_SRAM_PKA_CERR)
+#define F_CC_SRAM_PKA_CERR V_CC_SRAM_PKA_CERR(1U)
+
+#define S_CC_SRAM_SEC_CERR 9
+#define V_CC_SRAM_SEC_CERR(x) ((x) << S_CC_SRAM_SEC_CERR)
+#define F_CC_SRAM_SEC_CERR V_CC_SRAM_SEC_CERR(1U)
+
+#define A_ARM_NVME_DB_EMU_QUEUE_CTL_2 0x471e0
+
+#define S_INTERRUPT_CLEAR 0
+#define V_INTERRUPT_CLEAR(x) ((x) << S_INTERRUPT_CLEAR)
+#define F_INTERRUPT_CLEAR V_INTERRUPT_CLEAR(1U)
+
+#define A_ARM_PERIPHERAL_INT_ENB 0x471e4
+#define A_ARM_CERR_INT_ENB0 0x471e8
+#define A_ARM_CPU_DBG_ROM_ADDR0 0x47200
+
+#define S_CPUDBGROMADDR0 0
+#define M_CPUDBGROMADDR0 0xfffffU
+#define V_CPUDBGROMADDR0(x) ((x) << S_CPUDBGROMADDR0)
+#define G_CPUDBGROMADDR0(x) (((x) >> S_CPUDBGROMADDR0) & M_CPUDBGROMADDR0)
+
+#define A_ARM_CPU_DBG_ROM_ADDR1 0x47204
+
+#define S_CPUDBGROMADDR1 0
+#define M_CPUDBGROMADDR1 0x3ffU
+#define V_CPUDBGROMADDR1(x) ((x) << S_CPUDBGROMADDR1)
+#define G_CPUDBGROMADDR1(x) (((x) >> S_CPUDBGROMADDR1) & M_CPUDBGROMADDR1)
+
+#define A_ARM_CPU_DBG_ROM_ADDR_VALID 0x47208
+
+#define S_CPUDBGROMADDRVALID 0
+#define V_CPUDBGROMADDRVALID(x) ((x) << S_CPUDBGROMADDRVALID)
+#define F_CPUDBGROMADDRVALID V_CPUDBGROMADDRVALID(1U)
+
+#define A_ARM_PERR_ENABLE0 0x4720c
+#define A_ARM_SRAM2_WRITE_DATA3 0x47210
+#define A_ARM_SRAM2_READ_DATA3 0x4721c
+#define A_ARM_CPU_DFT_CFG 0x47220
+
+#define S_CPUMBISTREQ 11
+#define V_CPUMBISTREQ(x) ((x) << S_CPUMBISTREQ)
+#define F_CPUMBISTREQ V_CPUMBISTREQ(1U)
+
+#define S_CPUMBISTRSTN 10
+#define V_CPUMBISTRSTN(x) ((x) << S_CPUMBISTRSTN)
+#define F_CPUMBISTRSTN V_CPUMBISTRSTN(1U)
+
+#define S_CPUDFTDFTSE 9
+#define V_CPUDFTDFTSE(x) ((x) << S_CPUDFTDFTSE)
+#define F_CPUDFTDFTSE V_CPUDFTDFTSE(1U)
+
+#define S_CPUDFTRSTDISABLE 8
+#define V_CPUDFTRSTDISABLE(x) ((x) << S_CPUDFTRSTDISABLE)
+#define F_CPUDFTRSTDISABLE V_CPUDFTRSTDISABLE(1U)
+
+#define S_CPUDFTRAMDISABLE 7
+#define V_CPUDFTRAMDISABLE(x) ((x) << S_CPUDFTRAMDISABLE)
+#define F_CPUDFTRAMDISABLE V_CPUDFTRAMDISABLE(1U)
+
+#define S_CPUDFTMCPDISABLE 6
+#define V_CPUDFTMCPDISABLE(x) ((x) << S_CPUDFTMCPDISABLE)
+#define F_CPUDFTMCPDISABLE V_CPUDFTMCPDISABLE(1U)
+
+#define S_CPUDFTL2CLKDISABLE 5
+#define V_CPUDFTL2CLKDISABLE(x) ((x) << S_CPUDFTL2CLKDISABLE)
+#define F_CPUDFTL2CLKDISABLE V_CPUDFTL2CLKDISABLE(1U)
+
+#define S_CPUDFTCLKDISABLE3 4
+#define V_CPUDFTCLKDISABLE3(x) ((x) << S_CPUDFTCLKDISABLE3)
+#define F_CPUDFTCLKDISABLE3 V_CPUDFTCLKDISABLE3(1U)
+
+#define S_CPUDFTCLKDISABLE2 3
+#define V_CPUDFTCLKDISABLE2(x) ((x) << S_CPUDFTCLKDISABLE2)
+#define F_CPUDFTCLKDISABLE2 V_CPUDFTCLKDISABLE2(1U)
+
+#define S_CPUDFTCLKDISABLE1 2
+#define V_CPUDFTCLKDISABLE1(x) ((x) << S_CPUDFTCLKDISABLE1)
+#define F_CPUDFTCLKDISABLE1 V_CPUDFTCLKDISABLE1(1U)
+
+#define S_CPUDFTCLKDISABLE0 1
+#define V_CPUDFTCLKDISABLE0(x) ((x) << S_CPUDFTCLKDISABLE0)
+#define F_CPUDFTCLKDISABLE0 V_CPUDFTCLKDISABLE0(1U)
+
+#define S_CPUDFTCLKBYPASS 0
+#define V_CPUDFTCLKBYPASS(x) ((x) << S_CPUDFTCLKBYPASS)
+#define F_CPUDFTCLKBYPASS V_CPUDFTCLKBYPASS(1U)
+
+#define A_ARM_APB_CFG 0x47224
+
+#define S_APB_CFG 0
+#define M_APB_CFG 0x3ffffU
+#define V_APB_CFG(x) ((x) << S_APB_CFG)
+#define G_APB_CFG(x) (((x) >> S_APB_CFG) & M_APB_CFG)
+
+#define A_ARM_EMMC_BUFS 0x47228
+
+#define S_EMMC_BUFS_OEN 2
+#define M_EMMC_BUFS_OEN 0x3U
+#define V_EMMC_BUFS_OEN(x) ((x) << S_EMMC_BUFS_OEN)
+#define G_EMMC_BUFS_OEN(x) (((x) >> S_EMMC_BUFS_OEN) & M_EMMC_BUFS_OEN)
+
+#define S_EMMC_BUFS_I 0
+#define M_EMMC_BUFS_I 0x3U
+#define V_EMMC_BUFS_I(x) ((x) << S_EMMC_BUFS_I)
+#define G_EMMC_BUFS_I(x) (((x) >> S_EMMC_BUFS_I) & M_EMMC_BUFS_I)
+
+#define A_ARM_SWP_EN 0x4722c
+#define A_ARM_ADB_PWR_DWN_REQ_N 0x47230
+
+#define S_ADBPWRDWNREQN 0
+#define V_ADBPWRDWNREQN(x) ((x) << S_ADBPWRDWNREQN)
+#define F_ADBPWRDWNREQN V_ADBPWRDWNREQN(1U)
+
+#define A_ARM_GIC_USER 0x47238
+
+#define S_CPU_GIC_USER 0
+#define M_CPU_GIC_USER 0x7fU
+#define V_CPU_GIC_USER(x) ((x) << S_CPU_GIC_USER)
+#define G_CPU_GIC_USER(x) (((x) >> S_CPU_GIC_USER) & M_CPU_GIC_USER)
+
+#define A_ARM_DBPROC_SRAM_TH_ADDR 0x47240
+
+#define S_DBPROC_TH_ADDR 0
+#define M_DBPROC_TH_ADDR 0x1ffU
+#define V_DBPROC_TH_ADDR(x) ((x) << S_DBPROC_TH_ADDR)
+#define G_DBPROC_TH_ADDR(x) (((x) >> S_DBPROC_TH_ADDR) & M_DBPROC_TH_ADDR)
+
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA0 0x47244
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA1 0x47248
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA2 0x4724c
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA3 0x47250
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA0 0x47254
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA1 0x47258
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA2 0x4725c
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA3 0x47260
+#define A_ARM_SWP_EN_2 0x47264
+
+#define S_SWP_EN_2 0
+#define M_SWP_EN_2 0x3U
+#define V_SWP_EN_2(x) ((x) << S_SWP_EN_2)
+#define G_SWP_EN_2(x) (((x) >> S_SWP_EN_2) & M_SWP_EN_2)
+
+#define A_ARM_GIC_ERR 0x47268
+
+#define S_ECC_FATAL 1
+#define V_ECC_FATAL(x) ((x) << S_ECC_FATAL)
+#define F_ECC_FATAL V_ECC_FATAL(1U)
+
+#define S_AXIM_ERR 0
+#define V_AXIM_ERR(x) ((x) << S_AXIM_ERR)
+#define F_AXIM_ERR V_AXIM_ERR(1U)
+
+#define A_ARM_CPU_STAT 0x4726c
+
+#define S_CPU_L2_QACTIVE 12
+#define V_CPU_L2_QACTIVE(x) ((x) << S_CPU_L2_QACTIVE)
+#define F_CPU_L2_QACTIVE V_CPU_L2_QACTIVE(1U)
+
+#define S_WAKEUPM_O_ADB 11
+#define V_WAKEUPM_O_ADB(x) ((x) << S_WAKEUPM_O_ADB)
+#define F_WAKEUPM_O_ADB V_WAKEUPM_O_ADB(1U)
+
+#define S_PWRQACTIVEM_ADB 10
+#define V_PWRQACTIVEM_ADB(x) ((x) << S_PWRQACTIVEM_ADB)
+#define F_PWRQACTIVEM_ADB V_PWRQACTIVEM_ADB(1U)
+
+#define S_CLKQACTIVEM_ADB 9
+#define V_CLKQACTIVEM_ADB(x) ((x) << S_CLKQACTIVEM_ADB)
+#define F_CLKQACTIVEM_ADB V_CLKQACTIVEM_ADB(1U)
+
+#define S_CLKQDENYM_ADB 8
+#define V_CLKQDENYM_ADB(x) ((x) << S_CLKQDENYM_ADB)
+#define F_CLKQDENYM_ADB V_CLKQDENYM_ADB(1U)
+
+#define S_CLKQACCEPTNM_ADB 7
+#define V_CLKQACCEPTNM_ADB(x) ((x) << S_CLKQACCEPTNM_ADB)
+#define F_CLKQACCEPTNM_ADB V_CLKQACCEPTNM_ADB(1U)
+
+#define S_WAKEUPS_O_ADB 6
+#define V_WAKEUPS_O_ADB(x) ((x) << S_WAKEUPS_O_ADB)
+#define F_WAKEUPS_O_ADB V_WAKEUPS_O_ADB(1U)
+
+#define S_PWRQACTIVES_ADB 5
+#define V_PWRQACTIVES_ADB(x) ((x) << S_PWRQACTIVES_ADB)
+#define F_PWRQACTIVES_ADB V_PWRQACTIVES_ADB(1U)
+
+#define S_CLKQACTIVES_ADB 4
+#define V_CLKQACTIVES_ADB(x) ((x) << S_CLKQACTIVES_ADB)
+#define F_CLKQACTIVES_ADB V_CLKQACTIVES_ADB(1U)
+
+#define S_CLKQDENYS_ADB 3
+#define V_CLKQDENYS_ADB(x) ((x) << S_CLKQDENYS_ADB)
+#define F_CLKQDENYS_ADB V_CLKQDENYS_ADB(1U)
+
+#define S_CLKQACCEPTNS_ADB 2
+#define V_CLKQACCEPTNS_ADB(x) ((x) << S_CLKQACCEPTNS_ADB)
+#define F_CLKQACCEPTNS_ADB V_CLKQACCEPTNS_ADB(1U)
+
+#define S_PWRQDENYS_ADB 1
+#define V_PWRQDENYS_ADB(x) ((x) << S_PWRQDENYS_ADB)
+#define F_PWRQDENYS_ADB V_PWRQDENYS_ADB(1U)
+
+#define S_PWRQACCEPTNS_ADB 0
+#define V_PWRQACCEPTNS_ADB(x) ((x) << S_PWRQACCEPTNS_ADB)
+#define F_PWRQACCEPTNS_ADB V_PWRQACCEPTNS_ADB(1U)
+
+#define A_ARM_DEBUG_INT_WRITE_DATA 0x47270
+
+#define S_DEBUG_INT_WRITE_DATA 0
+#define M_DEBUG_INT_WRITE_DATA 0xfffU
+#define V_DEBUG_INT_WRITE_DATA(x) ((x) << S_DEBUG_INT_WRITE_DATA)
+#define G_DEBUG_INT_WRITE_DATA(x) (((x) >> S_DEBUG_INT_WRITE_DATA) & M_DEBUG_INT_WRITE_DATA)
+
+#define A_ARM_DEBUG_INT_STAT 0x47274
+
+#define S_DEBUG_INT_STATUS_REG 0
+#define M_DEBUG_INT_STATUS_REG 0xfffU
+#define V_DEBUG_INT_STATUS_REG(x) ((x) << S_DEBUG_INT_STATUS_REG)
+#define G_DEBUG_INT_STATUS_REG(x) (((x) >> S_DEBUG_INT_STATUS_REG) & M_DEBUG_INT_STATUS_REG)
+
+#define A_ARM_DEBUG_STAT 0x47278
+
+#define S_ARM_DEBUG_STAT 0
+#define M_ARM_DEBUG_STAT 0x3fffU
+#define V_ARM_DEBUG_STAT(x) ((x) << S_ARM_DEBUG_STAT)
+#define G_ARM_DEBUG_STAT(x) (((x) >> S_ARM_DEBUG_STAT) & M_ARM_DEBUG_STAT)
+
+#define A_ARM_SIZE_STAT 0x4727c
+
+#define S_ARM_SIZE_STAT 0
+#define M_ARM_SIZE_STAT 0x3fffffffU
+#define V_ARM_SIZE_STAT(x) ((x) << S_ARM_SIZE_STAT)
+#define G_ARM_SIZE_STAT(x) (((x) >> S_ARM_SIZE_STAT) & M_ARM_SIZE_STAT)
+
+#define A_ARM_CCI_CFG0 0x47280
+
+#define S_CCIBROADCASTCACHEMAINT 28
+#define M_CCIBROADCASTCACHEMAINT 0x7U
+#define V_CCIBROADCASTCACHEMAINT(x) ((x) << S_CCIBROADCASTCACHEMAINT)
+#define G_CCIBROADCASTCACHEMAINT(x) (((x) >> S_CCIBROADCASTCACHEMAINT) & M_CCIBROADCASTCACHEMAINT)
+
+#define S_CCISTRIPINGGRANULE 25
+#define M_CCISTRIPINGGRANULE 0x7U
+#define V_CCISTRIPINGGRANULE(x) ((x) << S_CCISTRIPINGGRANULE)
+#define G_CCISTRIPINGGRANULE(x) (((x) >> S_CCISTRIPINGGRANULE) & M_CCISTRIPINGGRANULE)
+
+#define S_CCIPERIPHBASE 0
+#define M_CCIPERIPHBASE 0x1ffffffU
+#define V_CCIPERIPHBASE(x) ((x) << S_CCIPERIPHBASE)
+#define G_CCIPERIPHBASE(x) (((x) >> S_CCIPERIPHBASE) & M_CCIPERIPHBASE)
+
+#define A_ARM_CCI_CFG1 0x47284
+
+#define S_CCIDFTRSTDISABLE 18
+#define V_CCIDFTRSTDISABLE(x) ((x) << S_CCIDFTRSTDISABLE)
+#define F_CCIDFTRSTDISABLE V_CCIDFTRSTDISABLE(1U)
+
+#define S_CCISPNIDEN 17
+#define V_CCISPNIDEN(x) ((x) << S_CCISPNIDEN)
+#define F_CCISPNIDEN V_CCISPNIDEN(1U)
+
+#define S_CCINIDEN 16
+#define V_CCINIDEN(x) ((x) << S_CCINIDEN)
+#define F_CCINIDEN V_CCINIDEN(1U)
+
+#define S_CCIACCHANNELN 11
+#define M_CCIACCHANNELN 0x1fU
+#define V_CCIACCHANNELN(x) ((x) << S_CCIACCHANNELN)
+#define G_CCIACCHANNELN(x) (((x) >> S_CCIACCHANNELN) & M_CCIACCHANNELN)
+
+#define S_CCIQOSOVERRIDE 6
+#define M_CCIQOSOVERRIDE 0x1fU
+#define V_CCIQOSOVERRIDE(x) ((x) << S_CCIQOSOVERRIDE)
+#define G_CCIQOSOVERRIDE(x) (((x) >> S_CCIQOSOVERRIDE) & M_CCIQOSOVERRIDE)
+
+#define S_CCIBUFFERABLEOVERRIDE 3
+#define M_CCIBUFFERABLEOVERRIDE 0x7U
+#define V_CCIBUFFERABLEOVERRIDE(x) ((x) << S_CCIBUFFERABLEOVERRIDE)
+#define G_CCIBUFFERABLEOVERRIDE(x) (((x) >> S_CCIBUFFERABLEOVERRIDE) & M_CCIBUFFERABLEOVERRIDE)
+
+#define S_CCIBARRIERTERMINATE 0
+#define M_CCIBARRIERTERMINATE 0x7U
+#define V_CCIBARRIERTERMINATE(x) ((x) << S_CCIBARRIERTERMINATE)
+#define G_CCIBARRIERTERMINATE(x) (((x) >> S_CCIBARRIERTERMINATE) & M_CCIBARRIERTERMINATE)
+
+#define A_ARM_CCI_CFG2 0x47288
+
+#define S_CCIADDRMAP15 30
+#define M_CCIADDRMAP15 0x3U
+#define V_CCIADDRMAP15(x) ((x) << S_CCIADDRMAP15)
+#define G_CCIADDRMAP15(x) (((x) >> S_CCIADDRMAP15) & M_CCIADDRMAP15)
+
+#define S_CCIADDRMAP14 28
+#define M_CCIADDRMAP14 0x3U
+#define V_CCIADDRMAP14(x) ((x) << S_CCIADDRMAP14)
+#define G_CCIADDRMAP14(x) (((x) >> S_CCIADDRMAP14) & M_CCIADDRMAP14)
+
+#define S_CCIADDRMAP13 26
+#define M_CCIADDRMAP13 0x3U
+#define V_CCIADDRMAP13(x) ((x) << S_CCIADDRMAP13)
+#define G_CCIADDRMAP13(x) (((x) >> S_CCIADDRMAP13) & M_CCIADDRMAP13)
+
+#define S_CCIADDRMAP12 24
+#define M_CCIADDRMAP12 0x3U
+#define V_CCIADDRMAP12(x) ((x) << S_CCIADDRMAP12)
+#define G_CCIADDRMAP12(x) (((x) >> S_CCIADDRMAP12) & M_CCIADDRMAP12)
+
+#define S_CCIADDRMAP11 22
+#define M_CCIADDRMAP11 0x3U
+#define V_CCIADDRMAP11(x) ((x) << S_CCIADDRMAP11)
+#define G_CCIADDRMAP11(x) (((x) >> S_CCIADDRMAP11) & M_CCIADDRMAP11)
+
+#define S_CCIADDRMAP10 20
+#define M_CCIADDRMAP10 0x3U
+#define V_CCIADDRMAP10(x) ((x) << S_CCIADDRMAP10)
+#define G_CCIADDRMAP10(x) (((x) >> S_CCIADDRMAP10) & M_CCIADDRMAP10)
+
+#define S_CCIADDRMAP9 18
+#define M_CCIADDRMAP9 0x3U
+#define V_CCIADDRMAP9(x) ((x) << S_CCIADDRMAP9)
+#define G_CCIADDRMAP9(x) (((x) >> S_CCIADDRMAP9) & M_CCIADDRMAP9)
+
+#define S_CCIADDRMAP8 16
+#define M_CCIADDRMAP8 0x3U
+#define V_CCIADDRMAP8(x) ((x) << S_CCIADDRMAP8)
+#define G_CCIADDRMAP8(x) (((x) >> S_CCIADDRMAP8) & M_CCIADDRMAP8)
+
+#define S_CCIADDRMAP7 14
+#define M_CCIADDRMAP7 0x3U
+#define V_CCIADDRMAP7(x) ((x) << S_CCIADDRMAP7)
+#define G_CCIADDRMAP7(x) (((x) >> S_CCIADDRMAP7) & M_CCIADDRMAP7)
+
+#define S_CCIADDRMAP6 12
+#define M_CCIADDRMAP6 0x3U
+#define V_CCIADDRMAP6(x) ((x) << S_CCIADDRMAP6)
+#define G_CCIADDRMAP6(x) (((x) >> S_CCIADDRMAP6) & M_CCIADDRMAP6)
+
+#define S_CCIADDRMAP5 10
+#define M_CCIADDRMAP5 0x3U
+#define V_CCIADDRMAP5(x) ((x) << S_CCIADDRMAP5)
+#define G_CCIADDRMAP5(x) (((x) >> S_CCIADDRMAP5) & M_CCIADDRMAP5)
+
+#define S_CCIADDRMAP4 8
+#define M_CCIADDRMAP4 0x3U
+#define V_CCIADDRMAP4(x) ((x) << S_CCIADDRMAP4)
+#define G_CCIADDRMAP4(x) (((x) >> S_CCIADDRMAP4) & M_CCIADDRMAP4)
+
+#define S_CCIADDRMAP3 6
+#define M_CCIADDRMAP3 0x3U
+#define V_CCIADDRMAP3(x) ((x) << S_CCIADDRMAP3)
+#define G_CCIADDRMAP3(x) (((x) >> S_CCIADDRMAP3) & M_CCIADDRMAP3)
+
+#define S_CCIADDRMAP2 4
+#define M_CCIADDRMAP2 0x3U
+#define V_CCIADDRMAP2(x) ((x) << S_CCIADDRMAP2)
+#define G_CCIADDRMAP2(x) (((x) >> S_CCIADDRMAP2) & M_CCIADDRMAP2)
+
+#define S_CCIADDRMAP1 2
+#define M_CCIADDRMAP1 0x3U
+#define V_CCIADDRMAP1(x) ((x) << S_CCIADDRMAP1)
+#define G_CCIADDRMAP1(x) (((x) >> S_CCIADDRMAP1) & M_CCIADDRMAP1)
+
+#define S_CCIADDRMAP0 0
+#define M_CCIADDRMAP0 0x3U
+#define V_CCIADDRMAP0(x) ((x) << S_CCIADDRMAP0)
+#define G_CCIADDRMAP0(x) (((x) >> S_CCIADDRMAP0) & M_CCIADDRMAP0)
+
+#define A_ARM_CCI_STATUS 0x4728c
+
+#define S_CCICACTIVE 6
+#define V_CCICACTIVE(x) ((x) << S_CCICACTIVE)
+#define F_CCICACTIVE V_CCICACTIVE(1U)
+
+#define S_CCICSYSACK 5
+#define V_CCICSYSACK(x) ((x) << S_CCICSYSACK)
+#define F_CCICSYSACK V_CCICSYSACK(1U)
+
+#define S_CCINEVNTCNTOVERFLOW 0
+#define M_CCINEVNTCNTOVERFLOW 0x1fU
+#define V_CCINEVNTCNTOVERFLOW(x) ((x) << S_CCINEVNTCNTOVERFLOW)
+#define G_CCINEVNTCNTOVERFLOW(x) (((x) >> S_CCINEVNTCNTOVERFLOW) & M_CCINEVNTCNTOVERFLOW)
+
+#define A_ARM_CCIM_CCI_QVN_MASTER_CFG 0x47290
+
+#define S_CCIVWREADYVN3M 20
+#define V_CCIVWREADYVN3M(x) ((x) << S_CCIVWREADYVN3M)
+#define F_CCIVWREADYVN3M V_CCIVWREADYVN3M(1U)
+
+#define S_CCIVAWREADYVN3M 19
+#define V_CCIVAWREADYVN3M(x) ((x) << S_CCIVAWREADYVN3M)
+#define F_CCIVAWREADYVN3M V_CCIVAWREADYVN3M(1U)
+
+#define S_CCIVARREADYVN3M 18
+#define V_CCIVARREADYVN3M(x) ((x) << S_CCIVARREADYVN3M)
+#define F_CCIVARREADYVN3M V_CCIVARREADYVN3M(1U)
+
+#define S_CCIVWREADYVN2M 17
+#define V_CCIVWREADYVN2M(x) ((x) << S_CCIVWREADYVN2M)
+#define F_CCIVWREADYVN2M V_CCIVWREADYVN2M(1U)
+
+#define S_CCIVAWREADYVN2M 16
+#define V_CCIVAWREADYVN2M(x) ((x) << S_CCIVAWREADYVN2M)
+#define F_CCIVAWREADYVN2M V_CCIVAWREADYVN2M(1U)
+
+#define S_CCIVARREADYVN2M 15
+#define V_CCIVARREADYVN2M(x) ((x) << S_CCIVARREADYVN2M)
+#define F_CCIVARREADYVN2M V_CCIVARREADYVN2M(1U)
+
+#define S_CCIVWREADYVN1M 14
+#define V_CCIVWREADYVN1M(x) ((x) << S_CCIVWREADYVN1M)
+#define F_CCIVWREADYVN1M V_CCIVWREADYVN1M(1U)
+
+#define S_CCIVAWREADYVN1M 13
+#define V_CCIVAWREADYVN1M(x) ((x) << S_CCIVAWREADYVN1M)
+#define F_CCIVAWREADYVN1M V_CCIVAWREADYVN1M(1U)
+
+#define S_CCIVARREADYVN1M 12
+#define V_CCIVARREADYVN1M(x) ((x) << S_CCIVARREADYVN1M)
+#define F_CCIVARREADYVN1M V_CCIVARREADYVN1M(1U)
+
+#define S_CCIVWREADYVN0M 11
+#define V_CCIVWREADYVN0M(x) ((x) << S_CCIVWREADYVN0M)
+#define F_CCIVWREADYVN0M V_CCIVWREADYVN0M(1U)
+
+#define S_CCIVAWREADYVN0M 10
+#define V_CCIVAWREADYVN0M(x) ((x) << S_CCIVAWREADYVN0M)
+#define F_CCIVAWREADYVN0M V_CCIVAWREADYVN0M(1U)
+
+#define S_CCIVARREADYVN0M 9
+#define V_CCIVARREADYVN0M(x) ((x) << S_CCIVARREADYVN0M)
+#define F_CCIVARREADYVN0M V_CCIVARREADYVN0M(1U)
+
+#define S_CCIQVNPREALLOCWM 5
+#define M_CCIQVNPREALLOCWM 0xfU
+#define V_CCIQVNPREALLOCWM(x) ((x) << S_CCIQVNPREALLOCWM)
+#define G_CCIQVNPREALLOCWM(x) (((x) >> S_CCIQVNPREALLOCWM) & M_CCIQVNPREALLOCWM)
+
+#define S_CCIQVNPREALLOCRM 1
+#define M_CCIQVNPREALLOCRM 0xfU
+#define V_CCIQVNPREALLOCRM(x) ((x) << S_CCIQVNPREALLOCRM)
+#define G_CCIQVNPREALLOCRM(x) (((x) >> S_CCIQVNPREALLOCRM) & M_CCIQVNPREALLOCRM)
+
+#define S_CCIQVNENABLEM 0
+#define V_CCIQVNENABLEM(x) ((x) << S_CCIQVNENABLEM)
+#define F_CCIQVNENABLEM V_CCIQVNENABLEM(1U)
+
+#define A_ARM_CCIM_CCI_QVN_MASTER_STATUS 0x47294
+
+#define S_CCIVWVALIDN3M 31
+#define V_CCIVWVALIDN3M(x) ((x) << S_CCIVWVALIDN3M)
+#define F_CCIVWVALIDN3M V_CCIVWVALIDN3M(1U)
+
+#define S_CCIVAWVALIDN3M 30
+#define V_CCIVAWVALIDN3M(x) ((x) << S_CCIVAWVALIDN3M)
+#define F_CCIVAWVALIDN3M V_CCIVAWVALIDN3M(1U)
+
+#define S_CCIVAWQOSN3M 29
+#define V_CCIVAWQOSN3M(x) ((x) << S_CCIVAWQOSN3M)
+#define F_CCIVAWQOSN3M V_CCIVAWQOSN3M(1U)
+
+#define S_CCIVARVALIDN3M 28
+#define V_CCIVARVALIDN3M(x) ((x) << S_CCIVARVALIDN3M)
+#define F_CCIVARVALIDN3M V_CCIVARVALIDN3M(1U)
+
+#define S_CCIVARQOSN3M 24
+#define M_CCIVARQOSN3M 0xfU
+#define V_CCIVARQOSN3M(x) ((x) << S_CCIVARQOSN3M)
+#define G_CCIVARQOSN3M(x) (((x) >> S_CCIVARQOSN3M) & M_CCIVARQOSN3M)
+
+#define S_CCIVWVALIDN2M 23
+#define V_CCIVWVALIDN2M(x) ((x) << S_CCIVWVALIDN2M)
+#define F_CCIVWVALIDN2M V_CCIVWVALIDN2M(1U)
+
+#define S_CCIVAWVALIDN2M 22
+#define V_CCIVAWVALIDN2M(x) ((x) << S_CCIVAWVALIDN2M)
+#define F_CCIVAWVALIDN2M V_CCIVAWVALIDN2M(1U)
+
+#define S_CCIVAWQOSN2M 21
+#define V_CCIVAWQOSN2M(x) ((x) << S_CCIVAWQOSN2M)
+#define F_CCIVAWQOSN2M V_CCIVAWQOSN2M(1U)
+
+#define S_CCIVARVALIDN2M 20
+#define V_CCIVARVALIDN2M(x) ((x) << S_CCIVARVALIDN2M)
+#define F_CCIVARVALIDN2M V_CCIVARVALIDN2M(1U)
+
+#define S_CCIVARQOSN2M 16
+#define M_CCIVARQOSN2M 0xfU
+#define V_CCIVARQOSN2M(x) ((x) << S_CCIVARQOSN2M)
+#define G_CCIVARQOSN2M(x) (((x) >> S_CCIVARQOSN2M) & M_CCIVARQOSN2M)
+
+#define S_CCIVWVALIDN1M 15
+#define V_CCIVWVALIDN1M(x) ((x) << S_CCIVWVALIDN1M)
+#define F_CCIVWVALIDN1M V_CCIVWVALIDN1M(1U)
+
+#define S_CCIVAWVALIDN1M 14
+#define V_CCIVAWVALIDN1M(x) ((x) << S_CCIVAWVALIDN1M)
+#define F_CCIVAWVALIDN1M V_CCIVAWVALIDN1M(1U)
+
+#define S_CCIVAWQOSN1M 13
+#define V_CCIVAWQOSN1M(x) ((x) << S_CCIVAWQOSN1M)
+#define F_CCIVAWQOSN1M V_CCIVAWQOSN1M(1U)
+
+#define S_CCIVARVALIDN1M 12
+#define V_CCIVARVALIDN1M(x) ((x) << S_CCIVARVALIDN1M)
+#define F_CCIVARVALIDN1M V_CCIVARVALIDN1M(1U)
+
+#define S_CCIVARQOSN1M 8
+#define M_CCIVARQOSN1M 0xfU
+#define V_CCIVARQOSN1M(x) ((x) << S_CCIVARQOSN1M)
+#define G_CCIVARQOSN1M(x) (((x) >> S_CCIVARQOSN1M) & M_CCIVARQOSN1M)
+
+#define S_CCIVWVALIDN0M 7
+#define V_CCIVWVALIDN0M(x) ((x) << S_CCIVWVALIDN0M)
+#define F_CCIVWVALIDN0M V_CCIVWVALIDN0M(1U)
+
+#define S_CCIVAWVALIDN0M 6
+#define V_CCIVAWVALIDN0M(x) ((x) << S_CCIVAWVALIDN0M)
+#define F_CCIVAWVALIDN0M V_CCIVAWVALIDN0M(1U)
+
+#define S_CCIVAWQOSN0M 5
+#define V_CCIVAWQOSN0M(x) ((x) << S_CCIVAWQOSN0M)
+#define F_CCIVAWQOSN0M V_CCIVAWQOSN0M(1U)
+
+#define S_CCIVARVALIDN0M 4
+#define V_CCIVARVALIDN0M(x) ((x) << S_CCIVARVALIDN0M)
+#define F_CCIVARVALIDN0M V_CCIVARVALIDN0M(1U)
+
+#define S_CCIVARQOSN0M 0
+#define M_CCIVARQOSN0M 0xfU
+#define V_CCIVARQOSN0M(x) ((x) << S_CCIVARQOSN0M)
+#define G_CCIVARQOSN0M(x) (((x) >> S_CCIVARQOSN0M) & M_CCIVARQOSN0M)
+
+#define A_ARM_CCIS_CCI_QVN_SLAVE_CFG 0x472d0
+
+#define S_CCIQVNVNETS 0
+#define M_CCIQVNVNETS 0x3U
+#define V_CCIQVNVNETS(x) ((x) << S_CCIQVNVNETS)
+#define G_CCIQVNVNETS(x) (((x) >> S_CCIQVNVNETS) & M_CCIQVNVNETS)
+
+#define A_ARM_CCIS_CCI_QVN_SLAVE_STATUS 0x472d4
+
+#define S_CCIEVNTAWQOS 4
+#define M_CCIEVNTAWQOS 0xfU
+#define V_CCIEVNTAWQOS(x) ((x) << S_CCIEVNTAWQOS)
+#define G_CCIEVNTAWQOS(x) (((x) >> S_CCIEVNTAWQOS) & M_CCIEVNTAWQOS)
+
+#define S_CCIEVNTARQOS 0
+#define M_CCIEVNTARQOS 0xfU
+#define V_CCIEVNTARQOS(x) ((x) << S_CCIEVNTARQOS)
+#define G_CCIEVNTARQOS(x) (((x) >> S_CCIEVNTARQOS) & M_CCIEVNTARQOS)
+
+#define A_ARM_CCI_EVNTBUS 0x47300
+#define A_ARM_CCI_RST_N 0x47318
+
+#define S_CCIRSTN 0
+#define V_CCIRSTN(x) ((x) << S_CCIRSTN)
+#define F_CCIRSTN V_CCIRSTN(1U)
+
+#define A_ARM_CCI_CSYREQ 0x4731c
+
+#define S_CCICSYSREQ 0
+#define V_CCICSYSREQ(x) ((x) << S_CCICSYSREQ)
+#define F_CCICSYSREQ V_CCICSYSREQ(1U)
+
+#define A_ARM_CCI_TR_DEBUGS0 0x47320
+
+#define S_CCIS0RCNT 24
+#define M_CCIS0RCNT 0xffU
+#define V_CCIS0RCNT(x) ((x) << S_CCIS0RCNT)
+#define G_CCIS0RCNT(x) (((x) >> S_CCIS0RCNT) & M_CCIS0RCNT)
+
+#define S_CCIS0ARCNT 16
+#define M_CCIS0ARCNT 0xffU
+#define V_CCIS0ARCNT(x) ((x) << S_CCIS0ARCNT)
+#define G_CCIS0ARCNT(x) (((x) >> S_CCIS0ARCNT) & M_CCIS0ARCNT)
+
+#define S_CCIS0WCNT 8
+#define M_CCIS0WCNT 0xffU
+#define V_CCIS0WCNT(x) ((x) << S_CCIS0WCNT)
+#define G_CCIS0WCNT(x) (((x) >> S_CCIS0WCNT) & M_CCIS0WCNT)
+
+#define S_CCIS0AWCNT 0
+#define M_CCIS0AWCNT 0xffU
+#define V_CCIS0AWCNT(x) ((x) << S_CCIS0AWCNT)
+#define G_CCIS0AWCNT(x) (((x) >> S_CCIS0AWCNT) & M_CCIS0AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS1 0x47324
+
+#define S_CCIS1RCNT 24
+#define M_CCIS1RCNT 0xffU
+#define V_CCIS1RCNT(x) ((x) << S_CCIS1RCNT)
+#define G_CCIS1RCNT(x) (((x) >> S_CCIS1RCNT) & M_CCIS1RCNT)
+
+#define S_CCIS1ARCNT 16
+#define M_CCIS1ARCNT 0xffU
+#define V_CCIS1ARCNT(x) ((x) << S_CCIS1ARCNT)
+#define G_CCIS1ARCNT(x) (((x) >> S_CCIS1ARCNT) & M_CCIS1ARCNT)
+
+#define S_CCIS1WCNT 8
+#define M_CCIS1WCNT 0xffU
+#define V_CCIS1WCNT(x) ((x) << S_CCIS1WCNT)
+#define G_CCIS1WCNT(x) (((x) >> S_CCIS1WCNT) & M_CCIS1WCNT)
+
+#define S_CCIS1AWCNT 0
+#define M_CCIS1AWCNT 0xffU
+#define V_CCIS1AWCNT(x) ((x) << S_CCIS1AWCNT)
+#define G_CCIS1AWCNT(x) (((x) >> S_CCIS1AWCNT) & M_CCIS1AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS2 0x47328
+
+#define S_CCIS2RCNT 24
+#define M_CCIS2RCNT 0xffU
+#define V_CCIS2RCNT(x) ((x) << S_CCIS2RCNT)
+#define G_CCIS2RCNT(x) (((x) >> S_CCIS2RCNT) & M_CCIS2RCNT)
+
+#define S_CCIS2ARCNT 16
+#define M_CCIS2ARCNT 0xffU
+#define V_CCIS2ARCNT(x) ((x) << S_CCIS2ARCNT)
+#define G_CCIS2ARCNT(x) (((x) >> S_CCIS2ARCNT) & M_CCIS2ARCNT)
+
+#define S_CCIS2WCNT 8
+#define M_CCIS2WCNT 0xffU
+#define V_CCIS2WCNT(x) ((x) << S_CCIS2WCNT)
+#define G_CCIS2WCNT(x) (((x) >> S_CCIS2WCNT) & M_CCIS2WCNT)
+
+#define S_CCIS2AWCNT 0
+#define M_CCIS2AWCNT 0xffU
+#define V_CCIS2AWCNT(x) ((x) << S_CCIS2AWCNT)
+#define G_CCIS2AWCNT(x) (((x) >> S_CCIS2AWCNT) & M_CCIS2AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS3 0x4732c
+
+#define S_CCIS3RCNT 24
+#define M_CCIS3RCNT 0xffU
+#define V_CCIS3RCNT(x) ((x) << S_CCIS3RCNT)
+#define G_CCIS3RCNT(x) (((x) >> S_CCIS3RCNT) & M_CCIS3RCNT)
+
+#define S_CCIS3ARCNT 16
+#define M_CCIS3ARCNT 0xffU
+#define V_CCIS3ARCNT(x) ((x) << S_CCIS3ARCNT)
+#define G_CCIS3ARCNT(x) (((x) >> S_CCIS3ARCNT) & M_CCIS3ARCNT)
+
+#define S_CCIS3WCNT 8
+#define M_CCIS3WCNT 0xffU
+#define V_CCIS3WCNT(x) ((x) << S_CCIS3WCNT)
+#define G_CCIS3WCNT(x) (((x) >> S_CCIS3WCNT) & M_CCIS3WCNT)
+
+#define S_CCIS3AWCNT 0
+#define M_CCIS3AWCNT 0xffU
+#define V_CCIS3AWCNT(x) ((x) << S_CCIS3AWCNT)
+#define G_CCIS3AWCNT(x) (((x) >> S_CCIS3AWCNT) & M_CCIS3AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS4 0x47330
+
+#define S_CCIS4RCNT 24
+#define M_CCIS4RCNT 0xffU
+#define V_CCIS4RCNT(x) ((x) << S_CCIS4RCNT)
+#define G_CCIS4RCNT(x) (((x) >> S_CCIS4RCNT) & M_CCIS4RCNT)
+
+#define S_CCIS4ARCNT 16
+#define M_CCIS4ARCNT 0xffU
+#define V_CCIS4ARCNT(x) ((x) << S_CCIS4ARCNT)
+#define G_CCIS4ARCNT(x) (((x) >> S_CCIS4ARCNT) & M_CCIS4ARCNT)
+
+#define S_CCIS4WCNT 8
+#define M_CCIS4WCNT 0xffU
+#define V_CCIS4WCNT(x) ((x) << S_CCIS4WCNT)
+#define G_CCIS4WCNT(x) (((x) >> S_CCIS4WCNT) & M_CCIS4WCNT)
+
+#define S_CCIS4AWCNT 0
+#define M_CCIS4AWCNT 0xffU
+#define V_CCIS4AWCNT(x) ((x) << S_CCIS4AWCNT)
+#define G_CCIS4AWCNT(x) (((x) >> S_CCIS4AWCNT) & M_CCIS4AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS34 0x47334
+
+#define S_CCIS4RSPCNT 24
+#define M_CCIS4RSPCNT 0xffU
+#define V_CCIS4RSPCNT(x) ((x) << S_CCIS4RSPCNT)
+#define G_CCIS4RSPCNT(x) (((x) >> S_CCIS4RSPCNT) & M_CCIS4RSPCNT)
+
+#define S_CCIS4ACCNT 16
+#define M_CCIS4ACCNT 0xffU
+#define V_CCIS4ACCNT(x) ((x) << S_CCIS4ACCNT)
+#define G_CCIS4ACCNT(x) (((x) >> S_CCIS4ACCNT) & M_CCIS4ACCNT)
+
+#define S_CCIS3RSPCNT 8
+#define M_CCIS3RSPCNT 0xffU
+#define V_CCIS3RSPCNT(x) ((x) << S_CCIS3RSPCNT)
+#define G_CCIS3RSPCNT(x) (((x) >> S_CCIS3RSPCNT) & M_CCIS3RSPCNT)
+
+#define S_CCIS3ACCNT 0
+#define M_CCIS3ACCNT 0xffU
+#define V_CCIS3ACCNT(x) ((x) << S_CCIS3ACCNT)
+#define G_CCIS3ACCNT(x) (((x) >> S_CCIS3ACCNT) & M_CCIS3ACCNT)
+
+#define A_ARM_CCI_TR_DEBUGM0 0x47338
+
+#define S_CCIM0RCNT 24
+#define M_CCIM0RCNT 0xffU
+#define V_CCIM0RCNT(x) ((x) << S_CCIM0RCNT)
+#define G_CCIM0RCNT(x) (((x) >> S_CCIM0RCNT) & M_CCIM0RCNT)
+
+#define S_CCIM0ARCNT 16
+#define M_CCIM0ARCNT 0xffU
+#define V_CCIM0ARCNT(x) ((x) << S_CCIM0ARCNT)
+#define G_CCIM0ARCNT(x) (((x) >> S_CCIM0ARCNT) & M_CCIM0ARCNT)
+
+#define S_CCIM0WCNT 8
+#define M_CCIM0WCNT 0xffU
+#define V_CCIM0WCNT(x) ((x) << S_CCIM0WCNT)
+#define G_CCIM0WCNT(x) (((x) >> S_CCIM0WCNT) & M_CCIM0WCNT)
+
+#define S_CCIM0AWCNT 0
+#define M_CCIM0AWCNT 0xffU
+#define V_CCIM0AWCNT(x) ((x) << S_CCIM0AWCNT)
+#define G_CCIM0AWCNT(x) (((x) >> S_CCIM0AWCNT) & M_CCIM0AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGM1 0x4733c
+
+#define S_CCIM1RCNT 24
+#define M_CCIM1RCNT 0xffU
+#define V_CCIM1RCNT(x) ((x) << S_CCIM1RCNT)
+#define G_CCIM1RCNT(x) (((x) >> S_CCIM1RCNT) & M_CCIM1RCNT)
+
+#define S_CCIM1ARCNT 16
+#define M_CCIM1ARCNT 0xffU
+#define V_CCIM1ARCNT(x) ((x) << S_CCIM1ARCNT)
+#define G_CCIM1ARCNT(x) (((x) >> S_CCIM1ARCNT) & M_CCIM1ARCNT)
+
+#define S_CCIM1WCNT 8
+#define M_CCIM1WCNT 0xffU
+#define V_CCIM1WCNT(x) ((x) << S_CCIM1WCNT)
+#define G_CCIM1WCNT(x) (((x) >> S_CCIM1WCNT) & M_CCIM1WCNT)
+
+#define S_CCIM1AWCNT 0
+#define M_CCIM1AWCNT 0xffU
+#define V_CCIM1AWCNT(x) ((x) << S_CCIM1AWCNT)
+#define G_CCIM1AWCNT(x) (((x) >> S_CCIM1AWCNT) & M_CCIM1AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGM2 0x47340
+
+#define S_CCIM2RCNT 24
+#define M_CCIM2RCNT 0xffU
+#define V_CCIM2RCNT(x) ((x) << S_CCIM2RCNT)
+#define G_CCIM2RCNT(x) (((x) >> S_CCIM2RCNT) & M_CCIM2RCNT)
+
+#define S_CCIM2ARCNT 16
+#define M_CCIM2ARCNT 0xffU
+#define V_CCIM2ARCNT(x) ((x) << S_CCIM2ARCNT)
+#define G_CCIM2ARCNT(x) (((x) >> S_CCIM2ARCNT) & M_CCIM2ARCNT)
+
+#define S_CCIM2WCNT 8
+#define M_CCIM2WCNT 0xffU
+#define V_CCIM2WCNT(x) ((x) << S_CCIM2WCNT)
+#define G_CCIM2WCNT(x) (((x) >> S_CCIM2WCNT) & M_CCIM2WCNT)
+
+#define S_CCIM2AWCNT 0
+#define M_CCIM2AWCNT 0xffU
+#define V_CCIM2AWCNT(x) ((x) << S_CCIM2AWCNT)
+#define G_CCIM2AWCNT(x) (((x) >> S_CCIM2AWCNT) & M_CCIM2AWCNT)
+
+#define A_ARM_MA_TR_DEBUG 0x47344
+
+#define S_MA1_RD_CNT 24
+#define M_MA1_RD_CNT 0xffU
+#define V_MA1_RD_CNT(x) ((x) << S_MA1_RD_CNT)
+#define G_MA1_RD_CNT(x) (((x) >> S_MA1_RD_CNT) & M_MA1_RD_CNT)
+
+#define S_MA1_WR_CNT 16
+#define M_MA1_WR_CNT 0xffU
+#define V_MA1_WR_CNT(x) ((x) << S_MA1_WR_CNT)
+#define G_MA1_WR_CNT(x) (((x) >> S_MA1_WR_CNT) & M_MA1_WR_CNT)
+
+#define S_MA0_RD_CNT 8
+#define M_MA0_RD_CNT 0xffU
+#define V_MA0_RD_CNT(x) ((x) << S_MA0_RD_CNT)
+#define G_MA0_RD_CNT(x) (((x) >> S_MA0_RD_CNT) & M_MA0_RD_CNT)
+
+#define S_MA0_WR_CNT 0
+#define M_MA0_WR_CNT 0xffU
+#define V_MA0_WR_CNT(x) ((x) << S_MA0_WR_CNT)
+#define G_MA0_WR_CNT(x) (((x) >> S_MA0_WR_CNT) & M_MA0_WR_CNT)
+
+#define A_ARM_GP_INT 0x47348
+
+#define S_GP_INT 0
+#define M_GP_INT 0xffU
+#define V_GP_INT(x) ((x) << S_GP_INT)
+#define G_GP_INT(x) (((x) >> S_GP_INT) & M_GP_INT)
+
+#define A_ARM_DMA_CFG0 0x47350
+#define A_ARM_DMA_CFG1 0x47354
+
+#define S_DMABOOTPERIPHNS 16
+#define M_DMABOOTPERIPHNS 0x3ffU
+#define V_DMABOOTPERIPHNS(x) ((x) << S_DMABOOTPERIPHNS)
+#define G_DMABOOTPERIPHNS(x) (((x) >> S_DMABOOTPERIPHNS) & M_DMABOOTPERIPHNS)
+
+#define S_DMABOOTIRQNS 4
+#define M_DMABOOTIRQNS 0x3ffU
+#define V_DMABOOTIRQNS(x) ((x) << S_DMABOOTIRQNS)
+#define G_DMABOOTIRQNS(x) (((x) >> S_DMABOOTIRQNS) & M_DMABOOTIRQNS)
+
+#define S_DMABOOTMANAGERNS 1
+#define V_DMABOOTMANAGERNS(x) ((x) << S_DMABOOTMANAGERNS)
+#define F_DMABOOTMANAGERNS V_DMABOOTMANAGERNS(1U)
+
+#define S_DMABOOTFROMPC 0
+#define V_DMABOOTFROMPC(x) ((x) << S_DMABOOTFROMPC)
+#define F_DMABOOTFROMPC V_DMABOOTFROMPC(1U)
+
+#define A_ARM_ARM_CFG0 0x47380
+
+#define S_MESSAGEBYPASS_DATA 2
+#define V_MESSAGEBYPASS_DATA(x) ((x) << S_MESSAGEBYPASS_DATA)
+#define F_MESSAGEBYPASS_DATA V_MESSAGEBYPASS_DATA(1U)
+
+#define S_MESSAGEBYPASS 1
+#define V_MESSAGEBYPASS(x) ((x) << S_MESSAGEBYPASS)
+#define F_MESSAGEBYPASS V_MESSAGEBYPASS(1U)
+
+#define S_PCIEBYPASS 0
+#define V_PCIEBYPASS(x) ((x) << S_PCIEBYPASS)
+#define F_PCIEBYPASS V_PCIEBYPASS(1U)
+
+#define A_ARM_ARM_CFG1 0x47384
+#define A_ARM_ARM_CFG2 0x47390
+#define A_ARM_PCIE_MA_ADDR_REGION0 0x47400
+
+#define S_ADDRREG0 0
+#define M_ADDRREG0 0xfffffffU
+#define V_ADDRREG0(x) ((x) << S_ADDRREG0)
+#define G_ADDRREG0(x) (((x) >> S_ADDRREG0) & M_ADDRREG0)
+
+#define A_ARM_PCIE_MA_ADDR_REGION1 0x47404
+
+#define S_ADDRREG1 0
+#define M_ADDRREG1 0xfffffffU
+#define V_ADDRREG1(x) ((x) << S_ADDRREG1)
+#define G_ADDRREG1(x) (((x) >> S_ADDRREG1) & M_ADDRREG1)
+
+#define A_ARM_PCIE_MA_ADDR_REGION2 0x47408
+
+#define S_ADDRREG2 0
+#define M_ADDRREG2 0xfffffffU
+#define V_ADDRREG2(x) ((x) << S_ADDRREG2)
+#define G_ADDRREG2(x) (((x) >> S_ADDRREG2) & M_ADDRREG2)
+
+#define A_ARM_PCIE_MA_ADDR_REGION3 0x4740c
+
+#define S_ADDRREG3 0
+#define M_ADDRREG3 0xfffffffU
+#define V_ADDRREG3(x) ((x) << S_ADDRREG3)
+#define G_ADDRREG3(x) (((x) >> S_ADDRREG3) & M_ADDRREG3)
+
+#define A_ARM_PCIE_MA_ADDR_REGION4 0x47410
+
+#define S_ADDRREG4 0
+#define M_ADDRREG4 0xfffffffU
+#define V_ADDRREG4(x) ((x) << S_ADDRREG4)
+#define G_ADDRREG4(x) (((x) >> S_ADDRREG4) & M_ADDRREG4)
+
+#define A_ARM_PCIE_MA_ADDR_REGION5 0x47414
+
+#define S_ADDRREG5 0
+#define M_ADDRREG5 0xfffffffU
+#define V_ADDRREG5(x) ((x) << S_ADDRREG5)
+#define G_ADDRREG5(x) (((x) >> S_ADDRREG5) & M_ADDRREG5)
+
+#define A_ARM_PCIE_MA_ADDR_REGION6 0x47418
+
+#define S_ADDRREG6 0
+#define M_ADDRREG6 0xfffffffU
+#define V_ADDRREG6(x) ((x) << S_ADDRREG6)
+#define G_ADDRREG6(x) (((x) >> S_ADDRREG6) & M_ADDRREG6)
+
+#define A_ARM_PCIE_MA_ADDR_REGION7 0x4741c
+
+#define S_ADDRREG7 0
+#define M_ADDRREG7 0xfffffffU
+#define V_ADDRREG7(x) ((x) << S_ADDRREG7)
+#define G_ADDRREG7(x) (((x) >> S_ADDRREG7) & M_ADDRREG7)
+
+#define A_ARM_INTERRUPT_GEN 0x47420
+
+#define S_INT_GEN 0
+#define M_INT_GEN 0x3U
+#define V_INT_GEN(x) ((x) << S_INT_GEN)
+#define G_INT_GEN(x) (((x) >> S_INT_GEN) & M_INT_GEN)
+
+#define A_ARM_INTERRUPT_CLEAR 0x47424
+
+#define S_INT_CLEAR 0
+#define M_INT_CLEAR 0x3U
+#define V_INT_CLEAR(x) ((x) << S_INT_CLEAR)
+#define G_INT_CLEAR(x) (((x) >> S_INT_CLEAR) & M_INT_CLEAR)
+
+#define A_ARM_DEBUG_STATUS_0 0x47428
+#define A_ARM_DBPROC_CONTROL 0x4742c
+
+#define S_NO_OF_INTERRUPTS 0
+#define M_NO_OF_INTERRUPTS 0x3U
+#define V_NO_OF_INTERRUPTS(x) ((x) << S_NO_OF_INTERRUPTS)
+#define G_NO_OF_INTERRUPTS(x) (((x) >> S_NO_OF_INTERRUPTS) & M_NO_OF_INTERRUPTS)
+
+#define A_ARM_PERR_INT_CAUSE1 0x47430
+
+#define S_ARWFIFO0_PERR 31
+#define V_ARWFIFO0_PERR(x) ((x) << S_ARWFIFO0_PERR)
+#define F_ARWFIFO0_PERR V_ARWFIFO0_PERR(1U)
+
+#define S_ARWFIFO1_PERR 30
+#define V_ARWFIFO1_PERR(x) ((x) << S_ARWFIFO1_PERR)
+#define F_ARWFIFO1_PERR V_ARWFIFO1_PERR(1U)
+
+#define S_ARWIDFIFO0_PERR 29
+#define V_ARWIDFIFO0_PERR(x) ((x) << S_ARWIDFIFO0_PERR)
+#define F_ARWIDFIFO0_PERR V_ARWIDFIFO0_PERR(1U)
+
+#define S_ARWIDFIFO1_PERR 28
+#define V_ARWIDFIFO1_PERR(x) ((x) << S_ARWIDFIFO1_PERR)
+#define F_ARWIDFIFO1_PERR V_ARWIDFIFO1_PERR(1U)
+
+#define S_ARIDFIFO0_PERR 27
+#define V_ARIDFIFO0_PERR(x) ((x) << S_ARIDFIFO0_PERR)
+#define F_ARIDFIFO0_PERR V_ARIDFIFO0_PERR(1U)
+
+#define S_ARIDFIFO1_PERR 26
+#define V_ARIDFIFO1_PERR(x) ((x) << S_ARIDFIFO1_PERR)
+#define F_ARIDFIFO1_PERR V_ARIDFIFO1_PERR(1U)
+
+#define S_RRSPADDR_FIFO0_PERR 25
+#define V_RRSPADDR_FIFO0_PERR(x) ((x) << S_RRSPADDR_FIFO0_PERR)
+#define F_RRSPADDR_FIFO0_PERR V_RRSPADDR_FIFO0_PERR(1U)
+
+#define S_RRSPADDR_FIFO1_PERR 24
+#define V_RRSPADDR_FIFO1_PERR(x) ((x) << S_RRSPADDR_FIFO1_PERR)
+#define F_RRSPADDR_FIFO1_PERR V_RRSPADDR_FIFO1_PERR(1U)
+
+#define S_WRSTRB_FIFO0_PERR 23
+#define V_WRSTRB_FIFO0_PERR(x) ((x) << S_WRSTRB_FIFO0_PERR)
+#define F_WRSTRB_FIFO0_PERR V_WRSTRB_FIFO0_PERR(1U)
+
+#define S_WRSTRB_FIFO1_PERR 22
+#define V_WRSTRB_FIFO1_PERR(x) ((x) << S_WRSTRB_FIFO1_PERR)
+#define F_WRSTRB_FIFO1_PERR V_WRSTRB_FIFO1_PERR(1U)
+
+#define S_MA2AXI_RSPDATAPARERR 21
+#define V_MA2AXI_RSPDATAPARERR(x) ((x) << S_MA2AXI_RSPDATAPARERR)
+#define F_MA2AXI_RSPDATAPARERR V_MA2AXI_RSPDATAPARERR(1U)
+
+#define S_MA2AXI_DATA_PAR_ERR 20
+#define V_MA2AXI_DATA_PAR_ERR(x) ((x) << S_MA2AXI_DATA_PAR_ERR)
+#define F_MA2AXI_DATA_PAR_ERR V_MA2AXI_DATA_PAR_ERR(1U)
+
+#define S_MA2AXI_WR_ORD_FIFO_PARERR 19
+#define V_MA2AXI_WR_ORD_FIFO_PARERR(x) ((x) << S_MA2AXI_WR_ORD_FIFO_PARERR)
+#define F_MA2AXI_WR_ORD_FIFO_PARERR V_MA2AXI_WR_ORD_FIFO_PARERR(1U)
+
+#define S_NVME_DB_EMU_TRACKER_FIFO_PERR 18
+#define V_NVME_DB_EMU_TRACKER_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_TRACKER_FIFO_PERR)
+#define F_NVME_DB_EMU_TRACKER_FIFO_PERR V_NVME_DB_EMU_TRACKER_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR 17
+#define V_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR)
+#define F_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR V_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR 16
+#define V_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR)
+#define F_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR V_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_ID_FIFO0_PERR 15
+#define V_NVME_DB_EMU_ID_FIFO0_PERR(x) ((x) << S_NVME_DB_EMU_ID_FIFO0_PERR)
+#define F_NVME_DB_EMU_ID_FIFO0_PERR V_NVME_DB_EMU_ID_FIFO0_PERR(1U)
+
+#define S_NVME_DB_EMU_ID_FIFO1_PERR 14
+#define V_NVME_DB_EMU_ID_FIFO1_PERR(x) ((x) << S_NVME_DB_EMU_ID_FIFO1_PERR)
+#define F_NVME_DB_EMU_ID_FIFO1_PERR V_NVME_DB_EMU_ID_FIFO1_PERR(1U)
+
+#define S_RC_ARWFIFO_PERR 13
+#define V_RC_ARWFIFO_PERR(x) ((x) << S_RC_ARWFIFO_PERR)
+#define F_RC_ARWFIFO_PERR V_RC_ARWFIFO_PERR(1U)
+
+#define S_RC_ARIDBURSTADDRFIFO_PERR 12
+#define V_RC_ARIDBURSTADDRFIFO_PERR(x) ((x) << S_RC_ARIDBURSTADDRFIFO_PERR)
+#define F_RC_ARIDBURSTADDRFIFO_PERR V_RC_ARIDBURSTADDRFIFO_PERR(1U)
+
+#define S_RC_CFG_FIFO_PERR 11
+#define V_RC_CFG_FIFO_PERR(x) ((x) << S_RC_CFG_FIFO_PERR)
+#define F_RC_CFG_FIFO_PERR V_RC_CFG_FIFO_PERR(1U)
+
+#define S_RC_RSPFIFO_PERR 10
+#define V_RC_RSPFIFO_PERR(x) ((x) << S_RC_RSPFIFO_PERR)
+#define F_RC_RSPFIFO_PERR V_RC_RSPFIFO_PERR(1U)
+
+#define S_INIC_ARIDFIFO_PERR 9
+#define V_INIC_ARIDFIFO_PERR(x) ((x) << S_INIC_ARIDFIFO_PERR)
+#define F_INIC_ARIDFIFO_PERR V_INIC_ARIDFIFO_PERR(1U)
+
+#define S_INIC_ARWFIFO_PERR 8
+#define V_INIC_ARWFIFO_PERR(x) ((x) << S_INIC_ARWFIFO_PERR)
+#define F_INIC_ARWFIFO_PERR V_INIC_ARWFIFO_PERR(1U)
+
+#define S_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR 7
+#define V_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR(x) ((x) << S_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR)
+#define F_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR V_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR(1U)
+
+#define S_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR 6
+#define V_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR(x) ((x) << S_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR)
+#define F_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR V_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR(1U)
+
+#define S_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR 5
+#define V_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR(x) ((x) << S_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR)
+#define F_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR V_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR(1U)
+
+#define S_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR 4
+#define V_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR(x) ((x) << S_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR)
+#define F_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR V_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR(1U)
+
+#define S_ARM_MA_512B_ARB_FIFO_PERR 3
+#define V_ARM_MA_512B_ARB_FIFO_PERR(x) ((x) << S_ARM_MA_512B_ARB_FIFO_PERR)
+#define F_ARM_MA_512B_ARB_FIFO_PERR V_ARM_MA_512B_ARB_FIFO_PERR(1U)
+
+#define S_PCIE_INIC_MA_ARB_FIFO_PERR 2
+#define V_PCIE_INIC_MA_ARB_FIFO_PERR(x) ((x) << S_PCIE_INIC_MA_ARB_FIFO_PERR)
+#define F_PCIE_INIC_MA_ARB_FIFO_PERR V_PCIE_INIC_MA_ARB_FIFO_PERR(1U)
+
+#define S_PCIE_INIC_ARB_RSPPERR 1
+#define V_PCIE_INIC_ARB_RSPPERR(x) ((x) << S_PCIE_INIC_ARB_RSPPERR)
+#define F_PCIE_INIC_ARB_RSPPERR V_PCIE_INIC_ARB_RSPPERR(1U)
+
+#define S_ITE_CACHE_PERR 0
+#define V_ITE_CACHE_PERR(x) ((x) << S_ITE_CACHE_PERR)
+#define F_ITE_CACHE_PERR V_ITE_CACHE_PERR(1U)
+
+#define A_ARM_PERR_INT_ENB1 0x47434
+#define A_ARM_PERR_ENABLE1 0x47438
+#define A_ARM_DEBUG_STATUS_1 0x4743c
+#define A_ARM_PCIE_MA_ADDR_REGION_DST 0x47440
+
+#define S_ADDRREGDST 0
+#define M_ADDRREGDST 0x1ffU
+#define V_ADDRREGDST(x) ((x) << S_ADDRREGDST)
+#define G_ADDRREGDST(x) (((x) >> S_ADDRREGDST) & M_ADDRREGDST)
+
+#define A_ARM_ERR_INT_CAUSE0 0x47444
+
+#define S_STRB0_ERROR 31
+#define V_STRB0_ERROR(x) ((x) << S_STRB0_ERROR)
+#define F_STRB0_ERROR V_STRB0_ERROR(1U)
+
+#define S_STRB1_ERROR 30
+#define V_STRB1_ERROR(x) ((x) << S_STRB1_ERROR)
+#define F_STRB1_ERROR V_STRB1_ERROR(1U)
+
+#define S_PCIE_INIC_MA_ARB_INV_RSP_TAG 29
+#define V_PCIE_INIC_MA_ARB_INV_RSP_TAG(x) ((x) << S_PCIE_INIC_MA_ARB_INV_RSP_TAG)
+#define F_PCIE_INIC_MA_ARB_INV_RSP_TAG V_PCIE_INIC_MA_ARB_INV_RSP_TAG(1U)
+
+#define S_ERROR0_NOCMD_DATA 28
+#define V_ERROR0_NOCMD_DATA(x) ((x) << S_ERROR0_NOCMD_DATA)
+#define F_ERROR0_NOCMD_DATA V_ERROR0_NOCMD_DATA(1U)
+
+#define S_ERROR1_NOCMD_DATA 27
+#define V_ERROR1_NOCMD_DATA(x) ((x) << S_ERROR1_NOCMD_DATA)
+#define F_ERROR1_NOCMD_DATA V_ERROR1_NOCMD_DATA(1U)
+
+#define S_INIC_STRB_ERROR 26
+#define V_INIC_STRB_ERROR(x) ((x) << S_INIC_STRB_ERROR)
+#define F_INIC_STRB_ERROR V_INIC_STRB_ERROR(1U)
+
+#define A_ARM_ERR_INT_ENB0 0x47448
+#define A_ARM_DEBUG_INDEX 0x47450
+#define A_ARM_DEBUG_DATA_HIGH 0x47454
+#define A_ARM_DEBUG_DATA_LOW 0x47458
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_BA0 0x47500
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_BA1 0x47504
+
+#define S_BASEADDRESS 0
+#define M_BASEADDRESS 0x3U
+#define V_BASEADDRESS(x) ((x) << S_BASEADDRESS)
+#define G_BASEADDRESS(x) (((x) >> S_BASEADDRESS) & M_BASEADDRESS)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG0 0x47508
+
+#define S_WATERMARK 16
+#define M_WATERMARK 0x3ffU
+#define V_WATERMARK(x) ((x) << S_WATERMARK)
+#define G_WATERMARK(x) (((x) >> S_WATERMARK) & M_WATERMARK)
+
+#define S_SIZEMAX 0
+#define M_SIZEMAX 0x3ffU
+#define V_SIZEMAX(x) ((x) << S_SIZEMAX)
+#define G_SIZEMAX(x) (((x) >> S_SIZEMAX) & M_SIZEMAX)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG1 0x4750c
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG2 0x47510
+
+#define S_CPUREADADDRESS 0
+#define M_CPUREADADDRESS 0x3ffU
+#define V_CPUREADADDRESS(x) ((x) << S_CPUREADADDRESS)
+#define G_CPUREADADDRESS(x) (((x) >> S_CPUREADADDRESS) & M_CPUREADADDRESS)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG3 0x47514
+
+#define S_CPUREADADDRESSVLD 0
+#define V_CPUREADADDRESSVLD(x) ((x) << S_CPUREADADDRESSVLD)
+#define F_CPUREADADDRESSVLD V_CPUREADADDRESSVLD(1U)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG4 0x47518
+#define A_ARM_APB2MSI_INTERRUPT_0_STATUS 0x47600
+#define A_ARM_APB2MSI_INTERRUPT_1_STATUS 0x47604
+#define A_ARM_APB2MSI_INTERRUPT_2_STATUS 0x47608
+#define A_ARM_APB2MSI_INTERRUPT_3_STATUS 0x4760c
+#define A_ARM_APB2MSI_INTERRUPT_0_ENABLE 0x47610
+#define A_ARM_APB2MSI_INTERRUPT_1_ENABLE 0x47614
+#define A_ARM_APB2MSI_INTERRUPT_2_ENABLE 0x47618
+#define A_ARM_APB2MSI_INTERRUPT_3_ENABLE 0x4761c
+#define A_ARM_APB2MSI_INTERRUPT_PRIORITY_LEVEL 0x47620
+
+#define S_ARM_APB2MSI_INT_PRIORITY_LEVEL 0
+#define M_ARM_APB2MSI_INT_PRIORITY_LEVEL 0x7U
+#define V_ARM_APB2MSI_INT_PRIORITY_LEVEL(x) ((x) << S_ARM_APB2MSI_INT_PRIORITY_LEVEL)
+#define G_ARM_APB2MSI_INT_PRIORITY_LEVEL(x) (((x) >> S_ARM_APB2MSI_INT_PRIORITY_LEVEL) & M_ARM_APB2MSI_INT_PRIORITY_LEVEL)
+
+#define A_ARM_APB2MSI_MEM_READ_ADDR 0x47624
+
+#define S_ARM_APB2MSI_MEM_READ_ADDR 0
+#define M_ARM_APB2MSI_MEM_READ_ADDR 0x7fU
+#define V_ARM_APB2MSI_MEM_READ_ADDR(x) ((x) << S_ARM_APB2MSI_MEM_READ_ADDR)
+#define G_ARM_APB2MSI_MEM_READ_ADDR(x) (((x) >> S_ARM_APB2MSI_MEM_READ_ADDR) & M_ARM_APB2MSI_MEM_READ_ADDR)
+
+#define A_ARM_MSI_MEMORY_DATA 0x47628
+#define A_ARM_MSI_MEMORY_ADDR 0x4762c
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG5 0x47630
+
+#define S_CONFIGDONE 0
+#define V_CONFIGDONE(x) ((x) << S_CONFIGDONE)
+#define F_CONFIGDONE V_CONFIGDONE(1U)
+
+#define A_ARM_AXI2MA_TIMERCNT 0x47640
+#define A_ARM_AXI2MA_TRTYPE 0x47644
+
+#define S_ARMMA2AXI1ARTRTYPE 3
+#define V_ARMMA2AXI1ARTRTYPE(x) ((x) << S_ARMMA2AXI1ARTRTYPE)
+#define F_ARMMA2AXI1ARTRTYPE V_ARMMA2AXI1ARTRTYPE(1U)
+
+#define S_ARMMA2AXI1AWTRTYPE 2
+#define V_ARMMA2AXI1AWTRTYPE(x) ((x) << S_ARMMA2AXI1AWTRTYPE)
+#define F_ARMMA2AXI1AWTRTYPE V_ARMMA2AXI1AWTRTYPE(1U)
+
+#define S_ARMMA2AXI0ARTRTYPE 1
+#define V_ARMMA2AXI0ARTRTYPE(x) ((x) << S_ARMMA2AXI0ARTRTYPE)
+#define F_ARMMA2AXI0ARTRTYPE V_ARMMA2AXI0ARTRTYPE(1U)
+
+#define S_ARMMA2AXI0AWTRTYPE 0
+#define V_ARMMA2AXI0AWTRTYPE(x) ((x) << S_ARMMA2AXI0AWTRTYPE)
+#define F_ARMMA2AXI0AWTRTYPE V_ARMMA2AXI0AWTRTYPE(1U)
+
+#define A_ARM_AXI2PCIE_VENDOR 0x47660
+
+#define S_T7_VENDORID 4
+#define M_T7_VENDORID 0xffffU
+#define V_T7_VENDORID(x) ((x) << S_T7_VENDORID)
+#define G_T7_VENDORID(x) (((x) >> S_T7_VENDORID) & M_T7_VENDORID)
+
+#define S_OBFFCODE 0
+#define M_OBFFCODE 0xfU
+#define V_OBFFCODE(x) ((x) << S_OBFFCODE)
+#define G_OBFFCODE(x) (((x) >> S_OBFFCODE) & M_OBFFCODE)
+
+#define A_ARM_AXI2PCIE_VENMSGHDR_DW3 0x47664
+#define A_ARM_CLUSTER_SEL 0x47668
+
+#define S_ARM_CLUSTER_SEL 0
+#define V_ARM_CLUSTER_SEL(x) ((x) << S_ARM_CLUSTER_SEL)
+#define F_ARM_CLUSTER_SEL V_ARM_CLUSTER_SEL(1U)
+
+#define A_ARM_PWRREQ_PERMIT_ADB 0x4766c
+
+#define S_PWRQ_PERMIT_DENY_SAR 1
+#define V_PWRQ_PERMIT_DENY_SAR(x) ((x) << S_PWRQ_PERMIT_DENY_SAR)
+#define F_PWRQ_PERMIT_DENY_SAR V_PWRQ_PERMIT_DENY_SAR(1U)
+
+#define S_PWRQREQNS_ADB 0
+#define V_PWRQREQNS_ADB(x) ((x) << S_PWRQREQNS_ADB)
+#define F_PWRQREQNS_ADB V_PWRQREQNS_ADB(1U)
+
+#define A_ARM_CLK_REQ_ADB 0x47670
+
+#define S_CLKQREQNS_ADB 0
+#define V_CLKQREQNS_ADB(x) ((x) << S_CLKQREQNS_ADB)
+#define F_CLKQREQNS_ADB V_CLKQREQNS_ADB(1U)
+
+#define A_ARM_WAKEUPM 0x47674
+
+#define S_DFTRSTDISABLEM_ADB 2
+#define V_DFTRSTDISABLEM_ADB(x) ((x) << S_DFTRSTDISABLEM_ADB)
+#define F_DFTRSTDISABLEM_ADB V_DFTRSTDISABLEM_ADB(1U)
+
+#define S_DFTRSTDISABLES_ADB 1
+#define V_DFTRSTDISABLES_ADB(x) ((x) << S_DFTRSTDISABLES_ADB)
+#define F_DFTRSTDISABLES_ADB V_DFTRSTDISABLES_ADB(1U)
+
+#define S_WAKEUPM_I_ADB 0
+#define V_WAKEUPM_I_ADB(x) ((x) << S_WAKEUPM_I_ADB)
+#define F_WAKEUPM_I_ADB V_WAKEUPM_I_ADB(1U)
+
+#define A_ARM_CC_APB_FILTERING 0x47678
+
+#define S_CC_DFTSCANMODE 11
+#define V_CC_DFTSCANMODE(x) ((x) << S_CC_DFTSCANMODE)
+#define F_CC_DFTSCANMODE V_CC_DFTSCANMODE(1U)
+
+#define S_CC_OTP_FILTERING_DISABLE 10
+#define V_CC_OTP_FILTERING_DISABLE(x) ((x) << S_CC_OTP_FILTERING_DISABLE)
+#define F_CC_OTP_FILTERING_DISABLE V_CC_OTP_FILTERING_DISABLE(1U)
+
+#define S_CC_APB_FILTERING 0
+#define M_CC_APB_FILTERING 0x3ffU
+#define V_CC_APB_FILTERING(x) ((x) << S_CC_APB_FILTERING)
+#define G_CC_APB_FILTERING(x) (((x) >> S_CC_APB_FILTERING) & M_CC_APB_FILTERING)
+
+#define A_ARM_DCU_EN0 0x4767c
+#define A_ARM_DCU_EN1 0x47680
+#define A_ARM_DCU_EN2 0x47684
+#define A_ARM_DCU_EN3 0x47688
+#define A_ARM_DCU_LOCK0 0x4768c
+#define A_ARM_DCU_LOCK1 0x47690
+#define A_ARM_DCU_LOCK2 0x47694
+#define A_ARM_DCU_LOCK3 0x47698
+#define A_ARM_GPPC 0x4769c
+
+#define S_CC_SEC_DEBUG_RESET 24
+#define V_CC_SEC_DEBUG_RESET(x) ((x) << S_CC_SEC_DEBUG_RESET)
+#define F_CC_SEC_DEBUG_RESET V_CC_SEC_DEBUG_RESET(1U)
+
+#define S_CC_DFTSE 23
+#define V_CC_DFTSE(x) ((x) << S_CC_DFTSE)
+#define F_CC_DFTSE V_CC_DFTSE(1U)
+
+#define S_CC_DFTCGEN 22
+#define V_CC_DFTCGEN(x) ((x) << S_CC_DFTCGEN)
+#define F_CC_DFTCGEN V_CC_DFTCGEN(1U)
+
+#define S_CC_DFTRAMHOLD 21
+#define V_CC_DFTRAMHOLD(x) ((x) << S_CC_DFTRAMHOLD)
+#define F_CC_DFTRAMHOLD V_CC_DFTRAMHOLD(1U)
+
+#define S_CC_LOCK_BITS 12
+#define M_CC_LOCK_BITS 0x1ffU
+#define V_CC_LOCK_BITS(x) ((x) << S_CC_LOCK_BITS)
+#define G_CC_LOCK_BITS(x) (((x) >> S_CC_LOCK_BITS) & M_CC_LOCK_BITS)
+
+#define S_CC_LCS_IS_VALID 11
+#define V_CC_LCS_IS_VALID(x) ((x) << S_CC_LCS_IS_VALID)
+#define F_CC_LCS_IS_VALID V_CC_LCS_IS_VALID(1U)
+
+#define S_CC_LCS 8
+#define M_CC_LCS 0x7U
+#define V_CC_LCS(x) ((x) << S_CC_LCS)
+#define G_CC_LCS(x) (((x) >> S_CC_LCS) & M_CC_LCS)
+
+#define S_CC_GPPC 0
+#define M_CC_GPPC 0xffU
+#define V_CC_GPPC(x) ((x) << S_CC_GPPC)
+#define G_CC_GPPC(x) (((x) >> S_CC_GPPC) & M_CC_GPPC)
+
+#define A_ARM_EMMC 0x47700
+
+#define S_EMMC_CARD_CLK_EN 31
+#define V_EMMC_CARD_CLK_EN(x) ((x) << S_EMMC_CARD_CLK_EN)
+#define F_EMMC_CARD_CLK_EN V_EMMC_CARD_CLK_EN(1U)
+
+#define S_EMMC_LED_CONTROL 30
+#define V_EMMC_LED_CONTROL(x) ((x) << S_EMMC_LED_CONTROL)
+#define F_EMMC_LED_CONTROL V_EMMC_LED_CONTROL(1U)
+
+#define S_EMMC_UHS1_SWVOLT_EN 29
+#define V_EMMC_UHS1_SWVOLT_EN(x) ((x) << S_EMMC_UHS1_SWVOLT_EN)
+#define F_EMMC_UHS1_SWVOLT_EN V_EMMC_UHS1_SWVOLT_EN(1U)
+
+#define S_EMMC_UHS1_DRV_STH 27
+#define M_EMMC_UHS1_DRV_STH 0x3U
+#define V_EMMC_UHS1_DRV_STH(x) ((x) << S_EMMC_UHS1_DRV_STH)
+#define G_EMMC_UHS1_DRV_STH(x) (((x) >> S_EMMC_UHS1_DRV_STH) & M_EMMC_UHS1_DRV_STH)
+
+#define S_EMMC_SD_VDD1_ON 26
+#define V_EMMC_SD_VDD1_ON(x) ((x) << S_EMMC_SD_VDD1_ON)
+#define F_EMMC_SD_VDD1_ON V_EMMC_SD_VDD1_ON(1U)
+
+#define S_EMMC_SD_VDD1_SEL 23
+#define M_EMMC_SD_VDD1_SEL 0x7U
+#define V_EMMC_SD_VDD1_SEL(x) ((x) << S_EMMC_SD_VDD1_SEL)
+#define G_EMMC_SD_VDD1_SEL(x) (((x) >> S_EMMC_SD_VDD1_SEL) & M_EMMC_SD_VDD1_SEL)
+
+#define S_EMMC_INTCLK_EN 22
+#define V_EMMC_INTCLK_EN(x) ((x) << S_EMMC_INTCLK_EN)
+#define F_EMMC_INTCLK_EN V_EMMC_INTCLK_EN(1U)
+
+#define S_EMMC_CARD_CLK_FREQ_SEL 12
+#define M_EMMC_CARD_CLK_FREQ_SEL 0x3ffU
+#define V_EMMC_CARD_CLK_FREQ_SEL(x) ((x) << S_EMMC_CARD_CLK_FREQ_SEL)
+#define G_EMMC_CARD_CLK_FREQ_SEL(x) (((x) >> S_EMMC_CARD_CLK_FREQ_SEL) & M_EMMC_CARD_CLK_FREQ_SEL)
+
+#define S_EMMC_CARD_CLK_GEN_SEL 11
+#define V_EMMC_CARD_CLK_GEN_SEL(x) ((x) << S_EMMC_CARD_CLK_GEN_SEL)
+#define F_EMMC_CARD_CLK_GEN_SEL V_EMMC_CARD_CLK_GEN_SEL(1U)
+
+#define S_EMMC_CLK2CARD_ON 10
+#define V_EMMC_CLK2CARD_ON(x) ((x) << S_EMMC_CLK2CARD_ON)
+#define F_EMMC_CLK2CARD_ON V_EMMC_CLK2CARD_ON(1U)
+
+#define S_EMMC_CARD_CLK_STABLE 9
+#define V_EMMC_CARD_CLK_STABLE(x) ((x) << S_EMMC_CARD_CLK_STABLE)
+#define F_EMMC_CARD_CLK_STABLE V_EMMC_CARD_CLK_STABLE(1U)
+
+#define S_EMMC_INT_BCLK_STABLE 8
+#define V_EMMC_INT_BCLK_STABLE(x) ((x) << S_EMMC_INT_BCLK_STABLE)
+#define F_EMMC_INT_BCLK_STABLE V_EMMC_INT_BCLK_STABLE(1U)
+
+#define S_EMMC_INT_ACLK_STABLE 7
+#define V_EMMC_INT_ACLK_STABLE(x) ((x) << S_EMMC_INT_ACLK_STABLE)
+#define F_EMMC_INT_ACLK_STABLE V_EMMC_INT_ACLK_STABLE(1U)
+
+#define S_EMMC_INT_TMCLK_STABLE 6
+#define V_EMMC_INT_TMCLK_STABLE(x) ((x) << S_EMMC_INT_TMCLK_STABLE)
+#define F_EMMC_INT_TMCLK_STABLE V_EMMC_INT_TMCLK_STABLE(1U)
+
+#define S_EMMC_HOST_REG_VOL_STABLE 5
+#define V_EMMC_HOST_REG_VOL_STABLE(x) ((x) << S_EMMC_HOST_REG_VOL_STABLE)
+#define F_EMMC_HOST_REG_VOL_STABLE V_EMMC_HOST_REG_VOL_STABLE(1U)
+
+#define S_EMMC_CARD_DETECT_N 4
+#define V_EMMC_CARD_DETECT_N(x) ((x) << S_EMMC_CARD_DETECT_N)
+#define F_EMMC_CARD_DETECT_N V_EMMC_CARD_DETECT_N(1U)
+
+#define S_EMMC_CARD_WRITE_PROT 3
+#define V_EMMC_CARD_WRITE_PROT(x) ((x) << S_EMMC_CARD_WRITE_PROT)
+#define F_EMMC_CARD_WRITE_PROT V_EMMC_CARD_WRITE_PROT(1U)
+
+#define S_EMMC_GP_IN 2
+#define V_EMMC_GP_IN(x) ((x) << S_EMMC_GP_IN)
+#define F_EMMC_GP_IN V_EMMC_GP_IN(1U)
+
+#define S_EMMC_TEST_SCAN_MODE 1
+#define V_EMMC_TEST_SCAN_MODE(x) ((x) << S_EMMC_TEST_SCAN_MODE)
+#define F_EMMC_TEST_SCAN_MODE V_EMMC_TEST_SCAN_MODE(1U)
+
+#define S_EMMC_FIFOINJDATAERR 0
+#define V_EMMC_FIFOINJDATAERR(x) ((x) << S_EMMC_FIFOINJDATAERR)
+#define F_EMMC_FIFOINJDATAERR V_EMMC_FIFOINJDATAERR(1U)
+
+#define A_ARM_WAKEUPS 0x47704
+
+#define S_WAKEUPS_I_ADB 0
+#define V_WAKEUPS_I_ADB(x) ((x) << S_WAKEUPS_I_ADB)
+#define F_WAKEUPS_I_ADB V_WAKEUPS_I_ADB(1U)
+
+#define A_ARM_CLKREQNM_ADB 0x47708
+
+#define S_CLKQREQNM_ADB 0
+#define V_CLKQREQNM_ADB(x) ((x) << S_CLKQREQNM_ADB)
+#define F_CLKQREQNM_ADB V_CLKQREQNM_ADB(1U)
+
+#define A_ARM_ATOMICDATA0_0 0x4770c
+#define A_ARM_ATOMICDATA1_0 0x47710
+#define A_ARM_NVME_DB_EMU_INT_ENABLE 0x47740
+#define A_ARM_TCAM_WRITE_DATA 0x47744
+
+#define S_TCAM_WRITE_DATA 0
+#define M_TCAM_WRITE_DATA 0x3fffffffU
+#define V_TCAM_WRITE_DATA(x) ((x) << S_TCAM_WRITE_DATA)
+#define G_TCAM_WRITE_DATA(x) (((x) >> S_TCAM_WRITE_DATA) & M_TCAM_WRITE_DATA)
+
+#define A_ARM_TCAM_WRITE_ADDR 0x47748
+
+#define S_TCAM_WRITE_ADDR 0
+#define M_TCAM_WRITE_ADDR 0x1ffU
+#define V_TCAM_WRITE_ADDR(x) ((x) << S_TCAM_WRITE_ADDR)
+#define G_TCAM_WRITE_ADDR(x) (((x) >> S_TCAM_WRITE_ADDR) & M_TCAM_WRITE_ADDR)
+
+#define A_ARM_TCAM_READ_ADDR 0x4774c
+
+#define S_TCAM_READ_ADDR 0
+#define M_TCAM_READ_ADDR 0x1ffU
+#define V_TCAM_READ_ADDR(x) ((x) << S_TCAM_READ_ADDR)
+#define G_TCAM_READ_ADDR(x) (((x) >> S_TCAM_READ_ADDR) & M_TCAM_READ_ADDR)
+
+#define A_ARM_TCAM_CTL 0x47750
+
+#define S_TCAMCBBUSY 6
+#define V_TCAMCBBUSY(x) ((x) << S_TCAMCBBUSY)
+#define F_TCAMCBBUSY V_TCAMCBBUSY(1U)
+
+#define S_TCAMCBPASS 5
+#define V_TCAMCBPASS(x) ((x) << S_TCAMCBPASS)
+#define F_TCAMCBPASS V_TCAMCBPASS(1U)
+
+#define S_TCAMCBSTART 4
+#define V_TCAMCBSTART(x) ((x) << S_TCAMCBSTART)
+#define F_TCAMCBSTART V_TCAMCBSTART(1U)
+
+#define S_TCAMRSTCB 3
+#define V_TCAMRSTCB(x) ((x) << S_TCAMRSTCB)
+#define F_TCAMRSTCB V_TCAMRSTCB(1U)
+
+#define S_TCAM_REQBITPOS 2
+#define V_TCAM_REQBITPOS(x) ((x) << S_TCAM_REQBITPOS)
+#define F_TCAM_REQBITPOS V_TCAM_REQBITPOS(1U)
+
+#define S_TCAM_WRITE 1
+#define V_TCAM_WRITE(x) ((x) << S_TCAM_WRITE)
+#define F_TCAM_WRITE V_TCAM_WRITE(1U)
+
+#define S_TCAM_ENABLE 0
+#define V_TCAM_ENABLE(x) ((x) << S_TCAM_ENABLE)
+#define F_TCAM_ENABLE V_TCAM_ENABLE(1U)
+
+#define A_ARM_TCAM_READ_DATA 0x4775c
+
+#define S_TCAM_READ_DATA 0
+#define M_TCAM_READ_DATA 0x3fffffffU
+#define V_TCAM_READ_DATA(x) ((x) << S_TCAM_READ_DATA)
+#define G_TCAM_READ_DATA(x) (((x) >> S_TCAM_READ_DATA) & M_TCAM_READ_DATA)
+
+#define A_ARM_SRAM1_WRITE_DATA 0x47760
+
+#define S_SRAM1_WRITE_DATA 0
+#define M_SRAM1_WRITE_DATA 0x7fffffU
+#define V_SRAM1_WRITE_DATA(x) ((x) << S_SRAM1_WRITE_DATA)
+#define G_SRAM1_WRITE_DATA(x) (((x) >> S_SRAM1_WRITE_DATA) & M_SRAM1_WRITE_DATA)
+
+#define A_ARM_SRAM1_WRITE_ADDR 0x47764
+
+#define S_SRAM1_WRITE_ADDR 0
+#define M_SRAM1_WRITE_ADDR 0x1ffU
+#define V_SRAM1_WRITE_ADDR(x) ((x) << S_SRAM1_WRITE_ADDR)
+#define G_SRAM1_WRITE_ADDR(x) (((x) >> S_SRAM1_WRITE_ADDR) & M_SRAM1_WRITE_ADDR)
+
+#define A_ARM_SRAM1_READ_ADDR 0x47768
+
+#define S_SRAM1_READ_ADDR 0
+#define M_SRAM1_READ_ADDR 0x1ffU
+#define V_SRAM1_READ_ADDR(x) ((x) << S_SRAM1_READ_ADDR)
+#define G_SRAM1_READ_ADDR(x) (((x) >> S_SRAM1_READ_ADDR) & M_SRAM1_READ_ADDR)
+
+#define A_ARM_SRAM1_CTL 0x4776c
+
+#define S_SRAM1_WRITE 1
+#define V_SRAM1_WRITE(x) ((x) << S_SRAM1_WRITE)
+#define F_SRAM1_WRITE V_SRAM1_WRITE(1U)
+
+#define S_SRAM1_ENABLE 0
+#define V_SRAM1_ENABLE(x) ((x) << S_SRAM1_ENABLE)
+#define F_SRAM1_ENABLE V_SRAM1_ENABLE(1U)
+
+#define A_ARM_SRAM1_READ_DATA 0x47770
+
+#define S_SRAM1_READ_DATA 0
+#define M_SRAM1_READ_DATA 0x7fffffU
+#define V_SRAM1_READ_DATA(x) ((x) << S_SRAM1_READ_DATA)
+#define G_SRAM1_READ_DATA(x) (((x) >> S_SRAM1_READ_DATA) & M_SRAM1_READ_DATA)
+
+#define A_ARM_SRAM2_WRITE_DATA0 0x47774
+#define A_ARM_SRAM2_WRITE_DATA1 0x47778
+#define A_ARM_SRAM2_WRITE_DATA2 0x4777c
+#define A_ARM_SRAM2_WRITE_ADDR 0x47780
+
+#define S_SRAM2_WRITE_ADDR 0
+#define M_SRAM2_WRITE_ADDR 0x1fffU
+#define V_SRAM2_WRITE_ADDR(x) ((x) << S_SRAM2_WRITE_ADDR)
+#define G_SRAM2_WRITE_ADDR(x) (((x) >> S_SRAM2_WRITE_ADDR) & M_SRAM2_WRITE_ADDR)
+
+#define A_ARM_SRAM2_READ_ADDR 0x47784
+
+#define S_SRAM2_READ_ADDR 0
+#define M_SRAM2_READ_ADDR 0x1fffU
+#define V_SRAM2_READ_ADDR(x) ((x) << S_SRAM2_READ_ADDR)
+#define G_SRAM2_READ_ADDR(x) (((x) >> S_SRAM2_READ_ADDR) & M_SRAM2_READ_ADDR)
+
+#define A_ARM_SRAM2_CTL 0x47788
+
+#define S_SRAM2_WRITE 1
+#define V_SRAM2_WRITE(x) ((x) << S_SRAM2_WRITE)
+#define F_SRAM2_WRITE V_SRAM2_WRITE(1U)
+
+#define S_SRAM2_ENABLE 0
+#define V_SRAM2_ENABLE(x) ((x) << S_SRAM2_ENABLE)
+#define F_SRAM2_ENABLE V_SRAM2_ENABLE(1U)
+
+#define A_ARM_SRAM2_READ_DATA0 0x4778c
+#define A_ARM_SRAM2_READ_DATA1 0x47790
+#define A_ARM_SRAM2_READ_DATA2 0x47794
+#define A_ARM_DBPROC_SRAM_CTL 0x47798
+
+#define S_DBPROC_RD_EN 0
+#define V_DBPROC_RD_EN(x) ((x) << S_DBPROC_RD_EN)
+#define F_DBPROC_RD_EN V_DBPROC_RD_EN(1U)
+
+#define A_ARM_DBPROC_SRAM_READ_ADDR 0x4779c
+
+#define S_DBPROC_RD_ADDR 0
+#define M_DBPROC_RD_ADDR 0x1ffU
+#define V_DBPROC_RD_ADDR(x) ((x) << S_DBPROC_RD_ADDR)
+#define G_DBPROC_RD_ADDR(x) (((x) >> S_DBPROC_RD_ADDR) & M_DBPROC_RD_ADDR)
+
+#define A_ARM_DBPROC_SRAM_READ_DATA0 0x477a0
+#define A_ARM_DBPROC_SRAM_READ_DATA1 0x477a4
+#define A_ARM_DBPROC_SRAM_READ_DATA2 0x477a8
+#define A_ARM_DBPROC_SRAM_READ_DATA3 0x477ac
+#define A_ARM_ATOMICDATA0_1 0x477b0
+#define A_ARM_ATOMICDATA1_1 0x477b4
+#define A_ARM_SPIDEN 0x477b8
+
+#define S_SPIDEN 0
+#define V_SPIDEN(x) ((x) << S_SPIDEN)
+#define F_SPIDEN V_SPIDEN(1U)
+
+#define A_ARM_RC_INT_WRITE_DATA 0x477bc
+
+#define S_RC_INT_STATUS_WRITE_DATA 0
+#define M_RC_INT_STATUS_WRITE_DATA 0x3fU
+#define V_RC_INT_STATUS_WRITE_DATA(x) ((x) << S_RC_INT_STATUS_WRITE_DATA)
+#define G_RC_INT_STATUS_WRITE_DATA(x) (((x) >> S_RC_INT_STATUS_WRITE_DATA) & M_RC_INT_STATUS_WRITE_DATA)
+
+#define A_ARM_DFT_MBI 0x477c4
+
+#define S_MBISTREQ 3
+#define V_MBISTREQ(x) ((x) << S_MBISTREQ)
+#define F_MBISTREQ V_MBISTREQ(1U)
+
+#define S_MBISTRESETN 2
+#define V_MBISTRESETN(x) ((x) << S_MBISTRESETN)
+#define F_MBISTRESETN V_MBISTRESETN(1U)
+
+#define S_DFTRAMHOLD 1
+#define V_DFTRAMHOLD(x) ((x) << S_DFTRAMHOLD)
+#define F_DFTRAMHOLD V_DFTRAMHOLD(1U)
+
+#define S_DFTCGEN 0
+#define V_DFTCGEN(x) ((x) << S_DFTCGEN)
+#define F_DFTCGEN V_DFTCGEN(1U)
+
+#define A_ARM_DBPROC_SRAM_TH_CTL 0x477c8
+
+#define S_DBPROC_TH_WR_EN 1
+#define V_DBPROC_TH_WR_EN(x) ((x) << S_DBPROC_TH_WR_EN)
+#define F_DBPROC_TH_WR_EN V_DBPROC_TH_WR_EN(1U)
+
+#define S_DBPROC_TH_RD_EN 0
+#define V_DBPROC_TH_RD_EN(x) ((x) << S_DBPROC_TH_RD_EN)
+#define F_DBPROC_TH_RD_EN V_DBPROC_TH_RD_EN(1U)
+
+#define A_ARM_MBISTACK 0x477d4
+
+#define S_MBISTACK 0
+#define V_MBISTACK(x) ((x) << S_MBISTACK)
+#define F_MBISTACK V_MBISTACK(1U)
+
+#define A_ARM_MBISTADDR 0x477d8
+
+#define S_MBISTADDR 0
+#define M_MBISTADDR 0xfffU
+#define V_MBISTADDR(x) ((x) << S_MBISTADDR)
+#define G_MBISTADDR(x) (((x) >> S_MBISTADDR) & M_MBISTADDR)
+
+#define A_ARM_MBISTREADEN 0x477dc
+
+#define S_MBISTREADEN 0
+#define V_MBISTREADEN(x) ((x) << S_MBISTREADEN)
+#define F_MBISTREADEN V_MBISTREADEN(1U)
+
+#define A_ARM_MBISTWRITEEN 0x477e0
+
+#define S_MBISTWRITEEN 0
+#define V_MBISTWRITEEN(x) ((x) << S_MBISTWRITEEN)
+#define F_MBISTWRITEEN V_MBISTWRITEEN(1U)
+
+#define A_ARM_MBISTARRAY 0x477e4
+
+#define S_MBISTARRAY 0
+#define M_MBISTARRAY 0x3U
+#define V_MBISTARRAY(x) ((x) << S_MBISTARRAY)
+#define G_MBISTARRAY(x) (((x) >> S_MBISTARRAY) & M_MBISTARRAY)
+
+#define A_ARM_MBISTCFG 0x477e8
+
+#define S_MBISTCFG 0
+#define V_MBISTCFG(x) ((x) << S_MBISTCFG)
+#define F_MBISTCFG V_MBISTCFG(1U)
+
+#define A_ARM_MBISTINDATA0 0x477ec
+#define A_ARM_MBISTINDATA1 0x477f0
+#define A_ARM_MBISTOUTDATA1 0x477f4
+#define A_ARM_MBISTOUTDATA0 0x477f8
+#define A_ARM_NVME_DB_EMU_EN 0x477fc
+
+#define S_NVME_DB_EN 0
+#define V_NVME_DB_EN(x) ((x) << S_NVME_DB_EN)
+#define F_NVME_DB_EN V_NVME_DB_EN(1U)
+
+/* registers for module MC_T70 */
+#define MC_T70_BASE_ADDR 0x48000
+
+#define A_MC_IND_ADDR 0x48000
+
+#define S_T7_AUTOINCR 30
+#define M_T7_AUTOINCR 0x3U
+#define V_T7_AUTOINCR(x) ((x) << S_T7_AUTOINCR)
+#define G_T7_AUTOINCR(x) (((x) >> S_T7_AUTOINCR) & M_T7_AUTOINCR)
+
+#define S_IND_ADDR_ADDR 0
+#define M_IND_ADDR_ADDR 0x1ffffffU
+#define V_IND_ADDR_ADDR(x) ((x) << S_IND_ADDR_ADDR)
+#define G_IND_ADDR_ADDR(x) (((x) >> S_IND_ADDR_ADDR) & M_IND_ADDR_ADDR)
+
+#define A_MC_IND_DATA 0x48004
+#define A_MC_DBG_CTL 0x48018
+#define A_MC_DBG_DATA 0x4801c
+#define A_T7_MC_P_DDRPHY_RST_CTRL 0x49300
+#define A_T7_MC_P_PERFORMANCE_CTRL 0x49304
+#define A_T7_MC_P_ECC_CTRL 0x49308
+
+#define S_BISTECCHBWCTL 7
+#define M_BISTECCHBWCTL 0x3U
+#define V_BISTECCHBWCTL(x) ((x) << S_BISTECCHBWCTL)
+#define G_BISTECCHBWCTL(x) (((x) >> S_BISTECCHBWCTL) & M_BISTECCHBWCTL)
+
+#define S_BISTTESTMODE 6
+#define V_BISTTESTMODE(x) ((x) << S_BISTTESTMODE)
+#define F_BISTTESTMODE V_BISTTESTMODE(1U)
+
+#define S_RMW_CTL_CFG 4
+#define M_RMW_CTL_CFG 0x3U
+#define V_RMW_CTL_CFG(x) ((x) << S_RMW_CTL_CFG)
+#define G_RMW_CTL_CFG(x) (((x) >> S_RMW_CTL_CFG) & M_RMW_CTL_CFG)
+
+#define A_MC_P_DDRCTL_INT_ENABLE 0x4930c
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE 5
+#define V_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE V_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_DCH1_ENABLE 4
+#define V_HIF_RDATA_CRC_ERR_DCH1_ENABLE(x) ((x) << S_HIF_RDATA_CRC_ERR_DCH1_ENABLE)
+#define F_HIF_RDATA_CRC_ERR_DCH1_ENABLE V_HIF_RDATA_CRC_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_DCH1_ENABLE 3
+#define V_HIF_RDATA_ADDR_ERR_DCH1_ENABLE(x) ((x) << S_HIF_RDATA_ADDR_ERR_DCH1_ENABLE)
+#define F_HIF_RDATA_ADDR_ERR_DCH1_ENABLE V_HIF_RDATA_ADDR_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE 2
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE 1
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE V_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE 0
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE V_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE(1U)
+
+#define A_MC_P_DDRCTL_INT_CAUSE 0x49310
+
+#define S_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE 25
+#define V_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_WR_CRC_ERR_INTR_DCH1_CAUSE 24
+#define V_WR_CRC_ERR_INTR_DCH1_CAUSE(x) ((x) << S_WR_CRC_ERR_INTR_DCH1_CAUSE)
+#define F_WR_CRC_ERR_INTR_DCH1_CAUSE V_WR_CRC_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE 23
+#define V_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE 22
+#define V_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE 21
+#define V_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE(x) ((x) << S_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE)
+#define F_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE V_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE(1U)
+
+#define S_SWCMD_ERR_INTR_DCH1_CAUSE 20
+#define V_SWCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_SWCMD_ERR_INTR_DCH1_CAUSE)
+#define F_SWCMD_ERR_INTR_DCH1_CAUSE V_SWCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_DUCMD_ERR_INTR_DCH1_CAUSE 19
+#define V_DUCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_DUCMD_ERR_INTR_DCH1_CAUSE)
+#define F_DUCMD_ERR_INTR_DCH1_CAUSE V_DUCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_LCCMD_ERR_INTR_DCH1_CAUSE 18
+#define V_LCCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_LCCMD_ERR_INTR_DCH1_CAUSE)
+#define F_LCCMD_ERR_INTR_DCH1_CAUSE V_LCCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_CTRLUPD_ERR_INTR_DCH1_CAUSE 17
+#define V_CTRLUPD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_CTRLUPD_ERR_INTR_DCH1_CAUSE)
+#define F_CTRLUPD_ERR_INTR_DCH1_CAUSE V_CTRLUPD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_RFM_ALERT_INTR_DCH1_CAUSE 16
+#define V_RFM_ALERT_INTR_DCH1_CAUSE(x) ((x) << S_RFM_ALERT_INTR_DCH1_CAUSE)
+#define F_RFM_ALERT_INTR_DCH1_CAUSE V_RFM_ALERT_INTR_DCH1_CAUSE(1U)
+
+#define S_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE 15
+#define V_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_WR_CRC_ERR_INTR_DCH0_CAUSE 14
+#define V_WR_CRC_ERR_INTR_DCH0_CAUSE(x) ((x) << S_WR_CRC_ERR_INTR_DCH0_CAUSE)
+#define F_WR_CRC_ERR_INTR_DCH0_CAUSE V_WR_CRC_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE 13
+#define V_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE 12
+#define V_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE 11
+#define V_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE(x) ((x) << S_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE)
+#define F_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE V_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE(1U)
+
+#define S_SWCMD_ERR_INTR_DCH0_CAUSE 10
+#define V_SWCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_SWCMD_ERR_INTR_DCH0_CAUSE)
+#define F_SWCMD_ERR_INTR_DCH0_CAUSE V_SWCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_DUCMD_ERR_INTR_DCH0_CAUSE 9
+#define V_DUCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_DUCMD_ERR_INTR_DCH0_CAUSE)
+#define F_DUCMD_ERR_INTR_DCH0_CAUSE V_DUCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_LCCMD_ERR_INTR_DCH0_CAUSE 8
+#define V_LCCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_LCCMD_ERR_INTR_DCH0_CAUSE)
+#define F_LCCMD_ERR_INTR_DCH0_CAUSE V_LCCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_CTRLUPD_ERR_INTR_DCH0_CAUSE 7
+#define V_CTRLUPD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_CTRLUPD_ERR_INTR_DCH0_CAUSE)
+#define F_CTRLUPD_ERR_INTR_DCH0_CAUSE V_CTRLUPD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_RFM_ALERT_INTR_DCH0_CAUSE 6
+#define V_RFM_ALERT_INTR_DCH0_CAUSE(x) ((x) << S_RFM_ALERT_INTR_DCH0_CAUSE)
+#define F_RFM_ALERT_INTR_DCH0_CAUSE V_RFM_ALERT_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE 5
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE 4
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE V_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE 3
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE V_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE 2
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE 1
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE V_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE 0
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE V_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE(1U)
+
+#define A_T7_MC_P_PAR_ENABLE 0x49314
+
+#define S_HIF_WDATA_Q_PARERR_DCH1_ENABLE 13
+#define V_HIF_WDATA_Q_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH1_ENABLE)
+#define F_HIF_WDATA_Q_PARERR_DCH1_ENABLE V_HIF_WDATA_Q_PARERR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH1_ENABLE 12
+#define V_DDRCTL_ECC_CE_PAR_DCH1_ENABLE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH1_ENABLE)
+#define F_DDRCTL_ECC_CE_PAR_DCH1_ENABLE V_DDRCTL_ECC_CE_PAR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH0_ENABLE 11
+#define V_DDRCTL_ECC_CE_PAR_DCH0_ENABLE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH0_ENABLE)
+#define F_DDRCTL_ECC_CE_PAR_DCH0_ENABLE V_DDRCTL_ECC_CE_PAR_DCH0_ENABLE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH1_ENABLE 10
+#define V_DDRCTL_ECC_UE_PAR_DCH1_ENABLE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH1_ENABLE)
+#define F_DDRCTL_ECC_UE_PAR_DCH1_ENABLE V_DDRCTL_ECC_UE_PAR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH0_ENABLE 9
+#define V_DDRCTL_ECC_UE_PAR_DCH0_ENABLE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH0_ENABLE)
+#define F_DDRCTL_ECC_UE_PAR_DCH0_ENABLE V_DDRCTL_ECC_UE_PAR_DCH0_ENABLE(1U)
+
+#define S_WDATARAM_PARERR_DCH1_ENABLE 8
+#define V_WDATARAM_PARERR_DCH1_ENABLE(x) ((x) << S_WDATARAM_PARERR_DCH1_ENABLE)
+#define F_WDATARAM_PARERR_DCH1_ENABLE V_WDATARAM_PARERR_DCH1_ENABLE(1U)
+
+#define S_WDATARAM_PARERR_DCH0_ENABLE 7
+#define V_WDATARAM_PARERR_DCH0_ENABLE(x) ((x) << S_WDATARAM_PARERR_DCH0_ENABLE)
+#define F_WDATARAM_PARERR_DCH0_ENABLE V_WDATARAM_PARERR_DCH0_ENABLE(1U)
+
+#define S_BIST_ADDR_FIFO_PARERR_ENABLE 6
+#define V_BIST_ADDR_FIFO_PARERR_ENABLE(x) ((x) << S_BIST_ADDR_FIFO_PARERR_ENABLE)
+#define F_BIST_ADDR_FIFO_PARERR_ENABLE V_BIST_ADDR_FIFO_PARERR_ENABLE(1U)
+
+#define S_BIST_ERR_ADDR_FIFO_PARERR_ENABLE 5
+#define V_BIST_ERR_ADDR_FIFO_PARERR_ENABLE(x) ((x) << S_BIST_ERR_ADDR_FIFO_PARERR_ENABLE)
+#define F_BIST_ERR_ADDR_FIFO_PARERR_ENABLE V_BIST_ERR_ADDR_FIFO_PARERR_ENABLE(1U)
+
+#define S_HIF_WDATA_Q_PARERR_DCH0_ENABLE 4
+#define V_HIF_WDATA_Q_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH0_ENABLE)
+#define F_HIF_WDATA_Q_PARERR_DCH0_ENABLE V_HIF_WDATA_Q_PARERR_DCH0_ENABLE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE 3
+#define V_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE V_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE 2
+#define V_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE V_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE 1
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE 0
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE(1U)
+
+#define A_T7_MC_P_PAR_CAUSE 0x49318
+
+#define S_HIF_WDATA_Q_PARERR_DCH1_CAUSE 13
+#define V_HIF_WDATA_Q_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH1_CAUSE)
+#define F_HIF_WDATA_Q_PARERR_DCH1_CAUSE V_HIF_WDATA_Q_PARERR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH1_CAUSE 12
+#define V_DDRCTL_ECC_CE_PAR_DCH1_CAUSE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH1_CAUSE)
+#define F_DDRCTL_ECC_CE_PAR_DCH1_CAUSE V_DDRCTL_ECC_CE_PAR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH0_CAUSE 11
+#define V_DDRCTL_ECC_CE_PAR_DCH0_CAUSE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH0_CAUSE)
+#define F_DDRCTL_ECC_CE_PAR_DCH0_CAUSE V_DDRCTL_ECC_CE_PAR_DCH0_CAUSE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH1_CAUSE 10
+#define V_DDRCTL_ECC_UE_PAR_DCH1_CAUSE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH1_CAUSE)
+#define F_DDRCTL_ECC_UE_PAR_DCH1_CAUSE V_DDRCTL_ECC_UE_PAR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH0_CAUSE 9
+#define V_DDRCTL_ECC_UE_PAR_DCH0_CAUSE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH0_CAUSE)
+#define F_DDRCTL_ECC_UE_PAR_DCH0_CAUSE V_DDRCTL_ECC_UE_PAR_DCH0_CAUSE(1U)
+
+#define S_WDATARAM_PARERR_DCH1_CAUSE 8
+#define V_WDATARAM_PARERR_DCH1_CAUSE(x) ((x) << S_WDATARAM_PARERR_DCH1_CAUSE)
+#define F_WDATARAM_PARERR_DCH1_CAUSE V_WDATARAM_PARERR_DCH1_CAUSE(1U)
+
+#define S_WDATARAM_PARERR_DCH0_CAUSE 7
+#define V_WDATARAM_PARERR_DCH0_CAUSE(x) ((x) << S_WDATARAM_PARERR_DCH0_CAUSE)
+#define F_WDATARAM_PARERR_DCH0_CAUSE V_WDATARAM_PARERR_DCH0_CAUSE(1U)
+
+#define S_BIST_ADDR_FIFO_PARERR_CAUSE 6
+#define V_BIST_ADDR_FIFO_PARERR_CAUSE(x) ((x) << S_BIST_ADDR_FIFO_PARERR_CAUSE)
+#define F_BIST_ADDR_FIFO_PARERR_CAUSE V_BIST_ADDR_FIFO_PARERR_CAUSE(1U)
+
+#define S_BIST_ERR_ADDR_FIFO_PARERR_CAUSE 5
+#define V_BIST_ERR_ADDR_FIFO_PARERR_CAUSE(x) ((x) << S_BIST_ERR_ADDR_FIFO_PARERR_CAUSE)
+#define F_BIST_ERR_ADDR_FIFO_PARERR_CAUSE V_BIST_ERR_ADDR_FIFO_PARERR_CAUSE(1U)
+
+#define S_HIF_WDATA_Q_PARERR_DCH0_CAUSE 4
+#define V_HIF_WDATA_Q_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH0_CAUSE)
+#define F_HIF_WDATA_Q_PARERR_DCH0_CAUSE V_HIF_WDATA_Q_PARERR_DCH0_CAUSE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE 3
+#define V_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE V_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE 2
+#define V_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE V_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE 1
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE 0
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE(1U)
+
+#define A_T7_MC_P_INT_ENABLE 0x4931c
+
+#define S_DDRPHY_INT_ENABLE 4
+#define V_DDRPHY_INT_ENABLE(x) ((x) << S_DDRPHY_INT_ENABLE)
+#define F_DDRPHY_INT_ENABLE V_DDRPHY_INT_ENABLE(1U)
+
+#define S_DDRCTL_INT_ENABLE 3
+#define V_DDRCTL_INT_ENABLE(x) ((x) << S_DDRCTL_INT_ENABLE)
+#define F_DDRCTL_INT_ENABLE V_DDRCTL_INT_ENABLE(1U)
+
+#define S_T7_ECC_CE_INT_ENABLE 2
+#define V_T7_ECC_CE_INT_ENABLE(x) ((x) << S_T7_ECC_CE_INT_ENABLE)
+#define F_T7_ECC_CE_INT_ENABLE V_T7_ECC_CE_INT_ENABLE(1U)
+
+#define S_T7_ECC_UE_INT_ENABLE 1
+#define V_T7_ECC_UE_INT_ENABLE(x) ((x) << S_T7_ECC_UE_INT_ENABLE)
+#define F_T7_ECC_UE_INT_ENABLE V_T7_ECC_UE_INT_ENABLE(1U)
+
+#define A_T7_MC_P_INT_CAUSE 0x49320
+
+#define S_DDRPHY_INT_CAUSE 4
+#define V_DDRPHY_INT_CAUSE(x) ((x) << S_DDRPHY_INT_CAUSE)
+#define F_DDRPHY_INT_CAUSE V_DDRPHY_INT_CAUSE(1U)
+
+#define S_DDRCTL_INT_CAUSE 3
+#define V_DDRCTL_INT_CAUSE(x) ((x) << S_DDRCTL_INT_CAUSE)
+#define F_DDRCTL_INT_CAUSE V_DDRCTL_INT_CAUSE(1U)
+
+#define S_T7_ECC_CE_INT_CAUSE 2
+#define V_T7_ECC_CE_INT_CAUSE(x) ((x) << S_T7_ECC_CE_INT_CAUSE)
+#define F_T7_ECC_CE_INT_CAUSE V_T7_ECC_CE_INT_CAUSE(1U)
+
+#define S_T7_ECC_UE_INT_CAUSE 1
+#define V_T7_ECC_UE_INT_CAUSE(x) ((x) << S_T7_ECC_UE_INT_CAUSE)
+#define F_T7_ECC_UE_INT_CAUSE V_T7_ECC_UE_INT_CAUSE(1U)
+
+#define A_MC_P_ECC_UE_INT_ENABLE 0x49324
+
+#define S_BIST_RSP_SRAM_UERR_ENABLE 0
+#define V_BIST_RSP_SRAM_UERR_ENABLE(x) ((x) << S_BIST_RSP_SRAM_UERR_ENABLE)
+#define F_BIST_RSP_SRAM_UERR_ENABLE V_BIST_RSP_SRAM_UERR_ENABLE(1U)
+
+#define A_MC_P_ECC_UE_INT_CAUSE 0x49328
+
+#define S_BIST_RSP_SRAM_UERR_CAUSE 0
+#define V_BIST_RSP_SRAM_UERR_CAUSE(x) ((x) << S_BIST_RSP_SRAM_UERR_CAUSE)
+#define F_BIST_RSP_SRAM_UERR_CAUSE V_BIST_RSP_SRAM_UERR_CAUSE(1U)
+
+#define A_T7_MC_P_ECC_STATUS 0x4932c
+#define A_T7_MC_P_PHY_CTRL 0x49330
+#define A_T7_MC_P_STATIC_CFG_STATUS 0x49334
+
+#define S_DFIFREQRATIO 27
+#define V_DFIFREQRATIO(x) ((x) << S_DFIFREQRATIO)
+#define F_DFIFREQRATIO V_DFIFREQRATIO(1U)
+
+#define S_STATIC_DDR5_HBW_CHANNEL 3
+#define V_STATIC_DDR5_HBW_CHANNEL(x) ((x) << S_STATIC_DDR5_HBW_CHANNEL)
+#define F_STATIC_DDR5_HBW_CHANNEL V_STATIC_DDR5_HBW_CHANNEL(1U)
+
+#define S_STATIC_DDR5_HBW 2
+#define V_STATIC_DDR5_HBW(x) ((x) << S_STATIC_DDR5_HBW)
+#define F_STATIC_DDR5_HBW V_STATIC_DDR5_HBW(1U)
+
+#define S_T7_STATIC_WIDTH 1
+#define V_T7_STATIC_WIDTH(x) ((x) << S_T7_STATIC_WIDTH)
+#define F_T7_STATIC_WIDTH V_T7_STATIC_WIDTH(1U)
+
+#define A_T7_MC_P_CORE_PCTL_STAT 0x49338
+#define A_T7_MC_P_DEBUG_CNT 0x4933c
+#define A_T7_MC_CE_ERR_DATA_RDATA 0x49340
+#define A_T7_MC_UE_ERR_DATA_RDATA 0x49380
+#define A_T7_MC_CE_ADDR 0x493c0
+#define A_T7_MC_UE_ADDR 0x493c4
+#define A_T7_MC_P_DEEP_SLEEP 0x493c8
+#define A_T7_MC_P_FPGA_BONUS 0x493cc
+#define A_T7_MC_P_DEBUG_CFG 0x493d0
+#define A_T7_MC_P_DEBUG_RPT 0x493d4
+#define A_T7_MC_P_PHY_ADR_CK_EN 0x493d8
+#define A_MC_P_WDATARAM_INIT 0x493dc
+
+#define S_ENABLE_DCH1 1
+#define V_ENABLE_DCH1(x) ((x) << S_ENABLE_DCH1)
+#define F_ENABLE_DCH1 V_ENABLE_DCH1(1U)
+
+#define S_ENABLE_DCH0 0
+#define V_ENABLE_DCH0(x) ((x) << S_ENABLE_DCH0)
+#define F_ENABLE_DCH0 V_ENABLE_DCH0(1U)
+
+#define A_T7_MC_CE_ERR_ECC_DATA0 0x493e0
+#define A_T7_MC_CE_ERR_ECC_DATA1 0x493e4
+#define A_T7_MC_UE_ERR_ECC_DATA0 0x493e8
+#define A_T7_MC_UE_ERR_ECC_DATA1 0x493ec
+#define A_T7_MC_P_RMW_PRIO 0x493f0
+#define A_T7_MC_P_BIST_CMD 0x49400
+
+#define S_FIFO_ERROR_FLAG 30
+#define V_FIFO_ERROR_FLAG(x) ((x) << S_FIFO_ERROR_FLAG)
+#define F_FIFO_ERROR_FLAG V_FIFO_ERROR_FLAG(1U)
+
+#define A_T7_MC_P_BIST_CMD_ADDR 0x49404
+
+#define S_T7_VALUE 0
+#define M_T7_VALUE 0x1fffffffU
+#define V_T7_VALUE(x) ((x) << S_T7_VALUE)
+#define G_T7_VALUE(x) (((x) >> S_T7_VALUE) & M_T7_VALUE)
+
+#define A_MC_P_BIST_NUM_BURST 0x49408
+#define A_T7_MC_P_BIST_DATA_PATTERN 0x4940c
+
+#define S_DATA_TYPE 0
+#define M_DATA_TYPE 0xfU
+#define V_DATA_TYPE(x) ((x) << S_DATA_TYPE)
+#define G_DATA_TYPE(x) (((x) >> S_DATA_TYPE) & M_DATA_TYPE)
+
+#define A_T7_MC_P_BIST_CRC_SEED 0x49410
+#define A_T7_MC_P_BIST_NUM_ERR 0x49460
+#define A_MC_P_BIST_ERR_ADDR 0x49464
+
+#define S_ERROR_ADDR 0
+#define M_ERROR_ADDR 0x3fffffffU
+#define V_ERROR_ADDR(x) ((x) << S_ERROR_ADDR)
+#define G_ERROR_ADDR(x) (((x) >> S_ERROR_ADDR) & M_ERROR_ADDR)
+
+#define A_MC_P_BIST_USER_RWEDATA 0x49468
+#define A_MC_REGB_DDRC_CH0_SCHED0 0x10380
+
+#define S_OPT_VPRW_SCH 31
+#define V_OPT_VPRW_SCH(x) ((x) << S_OPT_VPRW_SCH)
+#define F_OPT_VPRW_SCH V_OPT_VPRW_SCH(1U)
+
+#define S_DIS_SPECULATIVE_ACT 30
+#define V_DIS_SPECULATIVE_ACT(x) ((x) << S_DIS_SPECULATIVE_ACT)
+#define F_DIS_SPECULATIVE_ACT V_DIS_SPECULATIVE_ACT(1U)
+
+#define S_OPT_ACT_LAT 27
+#define V_OPT_ACT_LAT(x) ((x) << S_OPT_ACT_LAT)
+#define F_OPT_ACT_LAT V_OPT_ACT_LAT(1U)
+
+#define S_LPR_NUM_ENTRIES 8
+#define M_LPR_NUM_ENTRIES 0x3fU
+#define V_LPR_NUM_ENTRIES(x) ((x) << S_LPR_NUM_ENTRIES)
+#define G_LPR_NUM_ENTRIES(x) (((x) >> S_LPR_NUM_ENTRIES) & M_LPR_NUM_ENTRIES)
+
+#define S_AUTOPRE_RMW 7
+#define V_AUTOPRE_RMW(x) ((x) << S_AUTOPRE_RMW)
+#define F_AUTOPRE_RMW V_AUTOPRE_RMW(1U)
+
+#define S_DIS_OPT_NTT_BY_PRE 6
+#define V_DIS_OPT_NTT_BY_PRE(x) ((x) << S_DIS_OPT_NTT_BY_PRE)
+#define F_DIS_OPT_NTT_BY_PRE V_DIS_OPT_NTT_BY_PRE(1U)
+
+#define S_DIS_OPT_NTT_BY_ACT 5
+#define V_DIS_OPT_NTT_BY_ACT(x) ((x) << S_DIS_OPT_NTT_BY_ACT)
+#define F_DIS_OPT_NTT_BY_ACT V_DIS_OPT_NTT_BY_ACT(1U)
+
+#define S_OPT_WRCAM_FILL_LEVEL 4
+#define V_OPT_WRCAM_FILL_LEVEL(x) ((x) << S_OPT_WRCAM_FILL_LEVEL)
+#define F_OPT_WRCAM_FILL_LEVEL V_OPT_WRCAM_FILL_LEVEL(1U)
+
+#define S_PAGECLOSE 2
+#define V_PAGECLOSE(x) ((x) << S_PAGECLOSE)
+#define F_PAGECLOSE V_PAGECLOSE(1U)
+
+#define S_PREFER_WRITE 1
+#define V_PREFER_WRITE(x) ((x) << S_PREFER_WRITE)
+#define F_PREFER_WRITE V_PREFER_WRITE(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG0 0x10600
+
+#define S_DIS_SCRUB 23
+#define V_DIS_SCRUB(x) ((x) << S_DIS_SCRUB)
+#define F_DIS_SCRUB V_DIS_SCRUB(1U)
+
+#define S_ECC_TYPE 4
+#define M_ECC_TYPE 0x3U
+#define V_ECC_TYPE(x) ((x) << S_ECC_TYPE)
+#define G_ECC_TYPE(x) (((x) >> S_ECC_TYPE) & M_ECC_TYPE)
+
+#define S_TEST_MODE 3
+#define V_TEST_MODE(x) ((x) << S_TEST_MODE)
+#define F_TEST_MODE V_TEST_MODE(1U)
+
+#define S_ECC_MODE 0
+#define M_ECC_MODE 0x7U
+#define V_ECC_MODE(x) ((x) << S_ECC_MODE)
+#define G_ECC_MODE(x) (((x) >> S_ECC_MODE) & M_ECC_MODE)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG1 0x10604
+
+#define S_DATA_POISON_BIT 1
+#define V_DATA_POISON_BIT(x) ((x) << S_DATA_POISON_BIT)
+#define F_DATA_POISON_BIT V_DATA_POISON_BIT(1U)
+
+#define S_DATA_POISON_EN 0
+#define V_DATA_POISON_EN(x) ((x) << S_DATA_POISON_EN)
+#define F_DATA_POISON_EN V_DATA_POISON_EN(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCSTAT 0x10608
+
+#define S_ECC_UNCORRECTED_ERR 16
+#define M_ECC_UNCORRECTED_ERR 0xffU
+#define V_ECC_UNCORRECTED_ERR(x) ((x) << S_ECC_UNCORRECTED_ERR)
+#define G_ECC_UNCORRECTED_ERR(x) (((x) >> S_ECC_UNCORRECTED_ERR) & M_ECC_UNCORRECTED_ERR)
+
+#define S_ECC_CORRECTED_ERR 8
+#define M_ECC_CORRECTED_ERR 0xffU
+#define V_ECC_CORRECTED_ERR(x) ((x) << S_ECC_CORRECTED_ERR)
+#define G_ECC_CORRECTED_ERR(x) (((x) >> S_ECC_CORRECTED_ERR) & M_ECC_CORRECTED_ERR)
+
+#define S_ECC_CORRECTED_BIT_NUM 0
+#define M_ECC_CORRECTED_BIT_NUM 0x7fU
+#define V_ECC_CORRECTED_BIT_NUM(x) ((x) << S_ECC_CORRECTED_BIT_NUM)
+#define G_ECC_CORRECTED_BIT_NUM(x) (((x) >> S_ECC_CORRECTED_BIT_NUM) & M_ECC_CORRECTED_BIT_NUM)
+
+#define A_MC_REGB_DDRC_CH0_ECCCTL 0x1060c
+
+#define S_ECC_UNCORRECTED_ERR_INTR_FORCE 17
+#define V_ECC_UNCORRECTED_ERR_INTR_FORCE(x) ((x) << S_ECC_UNCORRECTED_ERR_INTR_FORCE)
+#define F_ECC_UNCORRECTED_ERR_INTR_FORCE V_ECC_UNCORRECTED_ERR_INTR_FORCE(1U)
+
+#define S_ECC_CORRECTED_ERR_INTR_FORCE 16
+#define V_ECC_CORRECTED_ERR_INTR_FORCE(x) ((x) << S_ECC_CORRECTED_ERR_INTR_FORCE)
+#define F_ECC_CORRECTED_ERR_INTR_FORCE V_ECC_CORRECTED_ERR_INTR_FORCE(1U)
+
+#define S_ECC_UNCORRECTED_ERR_INTR_EN 9
+#define V_ECC_UNCORRECTED_ERR_INTR_EN(x) ((x) << S_ECC_UNCORRECTED_ERR_INTR_EN)
+#define F_ECC_UNCORRECTED_ERR_INTR_EN V_ECC_UNCORRECTED_ERR_INTR_EN(1U)
+
+#define S_ECC_CORRECTED_ERR_INTR_EN 8
+#define V_ECC_CORRECTED_ERR_INTR_EN(x) ((x) << S_ECC_CORRECTED_ERR_INTR_EN)
+#define F_ECC_CORRECTED_ERR_INTR_EN V_ECC_CORRECTED_ERR_INTR_EN(1U)
+
+#define S_ECC_UNCORR_ERR_CNT_CLR 3
+#define V_ECC_UNCORR_ERR_CNT_CLR(x) ((x) << S_ECC_UNCORR_ERR_CNT_CLR)
+#define F_ECC_UNCORR_ERR_CNT_CLR V_ECC_UNCORR_ERR_CNT_CLR(1U)
+
+#define S_ECC_CORR_ERR_CNT_CLR 2
+#define V_ECC_CORR_ERR_CNT_CLR(x) ((x) << S_ECC_CORR_ERR_CNT_CLR)
+#define F_ECC_CORR_ERR_CNT_CLR V_ECC_CORR_ERR_CNT_CLR(1U)
+
+#define S_ECC_UNCORRECTED_ERR_CLR 1
+#define V_ECC_UNCORRECTED_ERR_CLR(x) ((x) << S_ECC_UNCORRECTED_ERR_CLR)
+#define F_ECC_UNCORRECTED_ERR_CLR V_ECC_UNCORRECTED_ERR_CLR(1U)
+
+#define S_ECC_CORRECTED_ERR_CLR 0
+#define V_ECC_CORRECTED_ERR_CLR(x) ((x) << S_ECC_CORRECTED_ERR_CLR)
+#define F_ECC_CORRECTED_ERR_CLR V_ECC_CORRECTED_ERR_CLR(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCERRCNT 0x10610
+
+#define S_ECC_UNCORR_ERR_CNT 16
+#define M_ECC_UNCORR_ERR_CNT 0xffffU
+#define V_ECC_UNCORR_ERR_CNT(x) ((x) << S_ECC_UNCORR_ERR_CNT)
+#define G_ECC_UNCORR_ERR_CNT(x) (((x) >> S_ECC_UNCORR_ERR_CNT) & M_ECC_UNCORR_ERR_CNT)
+
+#define S_ECC_CORR_ERR_CNT 0
+#define M_ECC_CORR_ERR_CNT 0xffffU
+#define V_ECC_CORR_ERR_CNT(x) ((x) << S_ECC_CORR_ERR_CNT)
+#define G_ECC_CORR_ERR_CNT(x) (((x) >> S_ECC_CORR_ERR_CNT) & M_ECC_CORR_ERR_CNT)
+
+#define A_MC_REGB_DDRC_CH0_ECCCADDR0 0x10614
+
+#define S_ECC_CORR_RANK 24
+#define V_ECC_CORR_RANK(x) ((x) << S_ECC_CORR_RANK)
+#define F_ECC_CORR_RANK V_ECC_CORR_RANK(1U)
+
+#define S_ECC_CORR_ROW 0
+#define M_ECC_CORR_ROW 0x3ffffU
+#define V_ECC_CORR_ROW(x) ((x) << S_ECC_CORR_ROW)
+#define G_ECC_CORR_ROW(x) (((x) >> S_ECC_CORR_ROW) & M_ECC_CORR_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCCADDR1 0x10618
+
+#define S_ECC_CORR_BG 24
+#define M_ECC_CORR_BG 0x7U
+#define V_ECC_CORR_BG(x) ((x) << S_ECC_CORR_BG)
+#define G_ECC_CORR_BG(x) (((x) >> S_ECC_CORR_BG) & M_ECC_CORR_BG)
+
+#define S_ECC_CORR_BANK 16
+#define M_ECC_CORR_BANK 0x3U
+#define V_ECC_CORR_BANK(x) ((x) << S_ECC_CORR_BANK)
+#define G_ECC_CORR_BANK(x) (((x) >> S_ECC_CORR_BANK) & M_ECC_CORR_BANK)
+
+#define S_ECC_CORR_COL 0
+#define M_ECC_CORR_COL 0x7ffU
+#define V_ECC_CORR_COL(x) ((x) << S_ECC_CORR_COL)
+#define G_ECC_CORR_COL(x) (((x) >> S_ECC_CORR_COL) & M_ECC_CORR_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCCSYN0 0x1061c
+#define A_MC_REGB_DDRC_CH0_ECCCSYN1 0x10620
+#define A_MC_REGB_DDRC_CH0_ECCCSYN2 0x10624
+
+#define S_CB_CORR_SYNDROME 16
+#define M_CB_CORR_SYNDROME 0xffU
+#define V_CB_CORR_SYNDROME(x) ((x) << S_CB_CORR_SYNDROME)
+#define G_CB_CORR_SYNDROME(x) (((x) >> S_CB_CORR_SYNDROME) & M_CB_CORR_SYNDROME)
+
+#define S_ECC_CORR_SYNDROMES_71_64 0
+#define M_ECC_CORR_SYNDROMES_71_64 0xffU
+#define V_ECC_CORR_SYNDROMES_71_64(x) ((x) << S_ECC_CORR_SYNDROMES_71_64)
+#define G_ECC_CORR_SYNDROMES_71_64(x) (((x) >> S_ECC_CORR_SYNDROMES_71_64) & M_ECC_CORR_SYNDROMES_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK0 0x10628
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK1 0x1062c
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK2 0x10630
+
+#define S_ECC_CORR_BIT_MASK_71_64 0
+#define M_ECC_CORR_BIT_MASK_71_64 0xffU
+#define V_ECC_CORR_BIT_MASK_71_64(x) ((x) << S_ECC_CORR_BIT_MASK_71_64)
+#define G_ECC_CORR_BIT_MASK_71_64(x) (((x) >> S_ECC_CORR_BIT_MASK_71_64) & M_ECC_CORR_BIT_MASK_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCUADDR0 0x10634
+
+#define S_ECC_UNCORR_RANK 24
+#define V_ECC_UNCORR_RANK(x) ((x) << S_ECC_UNCORR_RANK)
+#define F_ECC_UNCORR_RANK V_ECC_UNCORR_RANK(1U)
+
+#define S_ECC_UNCORR_ROW 0
+#define M_ECC_UNCORR_ROW 0x3ffffU
+#define V_ECC_UNCORR_ROW(x) ((x) << S_ECC_UNCORR_ROW)
+#define G_ECC_UNCORR_ROW(x) (((x) >> S_ECC_UNCORR_ROW) & M_ECC_UNCORR_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCUADDR1 0x10638
+
+#define S_ECC_UNCORR_BG 24
+#define M_ECC_UNCORR_BG 0x7U
+#define V_ECC_UNCORR_BG(x) ((x) << S_ECC_UNCORR_BG)
+#define G_ECC_UNCORR_BG(x) (((x) >> S_ECC_UNCORR_BG) & M_ECC_UNCORR_BG)
+
+#define S_ECC_UNCORR_BANK 16
+#define M_ECC_UNCORR_BANK 0x3U
+#define V_ECC_UNCORR_BANK(x) ((x) << S_ECC_UNCORR_BANK)
+#define G_ECC_UNCORR_BANK(x) (((x) >> S_ECC_UNCORR_BANK) & M_ECC_UNCORR_BANK)
+
+#define S_ECC_UNCORR_COL 0
+#define M_ECC_UNCORR_COL 0x7ffU
+#define V_ECC_UNCORR_COL(x) ((x) << S_ECC_UNCORR_COL)
+#define G_ECC_UNCORR_COL(x) (((x) >> S_ECC_UNCORR_COL) & M_ECC_UNCORR_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCUSYN0 0x1063c
+#define A_MC_REGB_DDRC_CH0_ECCUSYN1 0x10640
+#define A_MC_REGB_DDRC_CH0_ECCUSYN2 0x10644
+
+#define S_CB_UNCORR_SYNDROME 16
+#define M_CB_UNCORR_SYNDROME 0xffU
+#define V_CB_UNCORR_SYNDROME(x) ((x) << S_CB_UNCORR_SYNDROME)
+#define G_CB_UNCORR_SYNDROME(x) (((x) >> S_CB_UNCORR_SYNDROME) & M_CB_UNCORR_SYNDROME)
+
+#define S_ECC_UNCORR_SYNDROMES_71_64 0
+#define M_ECC_UNCORR_SYNDROMES_71_64 0xffU
+#define V_ECC_UNCORR_SYNDROMES_71_64(x) ((x) << S_ECC_UNCORR_SYNDROMES_71_64)
+#define G_ECC_UNCORR_SYNDROMES_71_64(x) (((x) >> S_ECC_UNCORR_SYNDROMES_71_64) & M_ECC_UNCORR_SYNDROMES_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONADDR0 0x10648
+
+#define S_ECC_POISON_RANK 24
+#define V_ECC_POISON_RANK(x) ((x) << S_ECC_POISON_RANK)
+#define F_ECC_POISON_RANK V_ECC_POISON_RANK(1U)
+
+#define S_ECC_POISON_COL 0
+#define M_ECC_POISON_COL 0xfffU
+#define V_ECC_POISON_COL(x) ((x) << S_ECC_POISON_COL)
+#define G_ECC_POISON_COL(x) (((x) >> S_ECC_POISON_COL) & M_ECC_POISON_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONADDR1 0x1064c
+
+#define S_ECC_POISON_BG 28
+#define M_ECC_POISON_BG 0x7U
+#define V_ECC_POISON_BG(x) ((x) << S_ECC_POISON_BG)
+#define G_ECC_POISON_BG(x) (((x) >> S_ECC_POISON_BG) & M_ECC_POISON_BG)
+
+#define S_ECC_POISON_BANK 24
+#define M_ECC_POISON_BANK 0x3U
+#define V_ECC_POISON_BANK(x) ((x) << S_ECC_POISON_BANK)
+#define G_ECC_POISON_BANK(x) (((x) >> S_ECC_POISON_BANK) & M_ECC_POISON_BANK)
+
+#define S_ECC_POISON_ROW 0
+#define M_ECC_POISON_ROW 0x3ffffU
+#define V_ECC_POISON_ROW(x) ((x) << S_ECC_POISON_ROW)
+#define G_ECC_POISON_ROW(x) (((x) >> S_ECC_POISON_ROW) & M_ECC_POISON_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT0 0x10658
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT1 0x1065c
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT2 0x10660
+
+#define S_ECC_POISON_DATA_71_64 0
+#define M_ECC_POISON_DATA_71_64 0xffU
+#define V_ECC_POISON_DATA_71_64(x) ((x) << S_ECC_POISON_DATA_71_64)
+#define G_ECC_POISON_DATA_71_64(x) (((x) >> S_ECC_POISON_DATA_71_64) & M_ECC_POISON_DATA_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG2 0x10668
+
+#define S_FLIP_BIT_POS1 24
+#define M_FLIP_BIT_POS1 0x7fU
+#define V_FLIP_BIT_POS1(x) ((x) << S_FLIP_BIT_POS1)
+#define G_FLIP_BIT_POS1(x) (((x) >> S_FLIP_BIT_POS1) & M_FLIP_BIT_POS1)
+
+#define S_FLIP_BIT_POS0 16
+#define M_FLIP_BIT_POS0 0x7fU
+#define V_FLIP_BIT_POS0(x) ((x) << S_FLIP_BIT_POS0)
+#define G_FLIP_BIT_POS0(x) (((x) >> S_FLIP_BIT_POS0) & M_FLIP_BIT_POS0)
+
+#define A_MC_REGB_DDRC_CH1_ECCCTL 0x1160c
+#define A_MC_REGB_DDRC_CH1_ECCERRCNT 0x11610
+#define A_MC_REGB_DDRC_CH1_ECCCADDR0 0x11614
+#define A_MC_REGB_DDRC_CH1_ECCCADDR1 0x11618
+#define A_MC_REGB_DDRC_CH1_ECCCSYN0 0x1161c
+#define A_MC_REGB_DDRC_CH1_ECCCSYN1 0x11620
+#define A_MC_REGB_DDRC_CH1_ECCCSYN2 0x11624
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK0 0x11628
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK1 0x1162c
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK2 0x11630
+#define A_MC_REGB_DDRC_CH1_ECCUADDR0 0x11634
+#define A_MC_REGB_DDRC_CH1_ECCUADDR1 0x11638
+#define A_MC_REGB_DDRC_CH1_ECCUSYN0 0x1163c
+#define A_MC_REGB_DDRC_CH1_ECCUSYN1 0x11640
+#define A_MC_REGB_DDRC_CH1_ECCUSYN2 0x11644
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTENABLE 0x20100
+
+#define S_PHYSTICKYUNLOCKEN 15
+#define V_PHYSTICKYUNLOCKEN(x) ((x) << S_PHYSTICKYUNLOCKEN)
+#define F_PHYSTICKYUNLOCKEN V_PHYSTICKYUNLOCKEN(1U)
+
+#define S_PHYBSIEN 14
+#define V_PHYBSIEN(x) ((x) << S_PHYBSIEN)
+#define F_PHYBSIEN V_PHYBSIEN(1U)
+
+#define S_PHYANIBRCVERREN 13
+#define V_PHYANIBRCVERREN(x) ((x) << S_PHYANIBRCVERREN)
+#define F_PHYANIBRCVERREN V_PHYANIBRCVERREN(1U)
+
+#define S_PHYD5ACSM1PARITYEN 12
+#define V_PHYD5ACSM1PARITYEN(x) ((x) << S_PHYD5ACSM1PARITYEN)
+#define F_PHYD5ACSM1PARITYEN V_PHYD5ACSM1PARITYEN(1U)
+
+#define S_PHYD5ACSM0PARITYEN 11
+#define V_PHYD5ACSM0PARITYEN(x) ((x) << S_PHYD5ACSM0PARITYEN)
+#define F_PHYD5ACSM0PARITYEN V_PHYD5ACSM0PARITYEN(1U)
+
+#define S_PHYRXFIFOCHECKEN 10
+#define V_PHYRXFIFOCHECKEN(x) ((x) << S_PHYRXFIFOCHECKEN)
+#define F_PHYRXFIFOCHECKEN V_PHYRXFIFOCHECKEN(1U)
+
+#define S_PHYTXPPTEN 9
+#define V_PHYTXPPTEN(x) ((x) << S_PHYTXPPTEN)
+#define F_PHYTXPPTEN V_PHYTXPPTEN(1U)
+
+#define S_PHYECCEN 8
+#define V_PHYECCEN(x) ((x) << S_PHYECCEN)
+#define F_PHYECCEN V_PHYECCEN(1U)
+
+#define S_PHYFWRESERVEDEN 3
+#define M_PHYFWRESERVEDEN 0x1fU
+#define V_PHYFWRESERVEDEN(x) ((x) << S_PHYFWRESERVEDEN)
+#define G_PHYFWRESERVEDEN(x) (((x) >> S_PHYFWRESERVEDEN) & M_PHYFWRESERVEDEN)
+
+#define S_PHYTRNGFAILEN 2
+#define V_PHYTRNGFAILEN(x) ((x) << S_PHYTRNGFAILEN)
+#define F_PHYTRNGFAILEN V_PHYTRNGFAILEN(1U)
+
+#define S_PHYINITCMPLTEN 1
+#define V_PHYINITCMPLTEN(x) ((x) << S_PHYINITCMPLTEN)
+#define F_PHYINITCMPLTEN V_PHYINITCMPLTEN(1U)
+
+#define S_PHYTRNGCMPLTEN 0
+#define V_PHYTRNGCMPLTEN(x) ((x) << S_PHYTRNGCMPLTEN)
+#define F_PHYTRNGCMPLTEN V_PHYTRNGCMPLTEN(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTFWCONTROL 0x20101
+
+#define S_PHYFWRESERVEDFW 3
+#define M_PHYFWRESERVEDFW 0x1fU
+#define V_PHYFWRESERVEDFW(x) ((x) << S_PHYFWRESERVEDFW)
+#define G_PHYFWRESERVEDFW(x) (((x) >> S_PHYFWRESERVEDFW) & M_PHYFWRESERVEDFW)
+
+#define S_PHYTRNGFAILFW 2
+#define V_PHYTRNGFAILFW(x) ((x) << S_PHYTRNGFAILFW)
+#define F_PHYTRNGFAILFW V_PHYTRNGFAILFW(1U)
+
+#define S_PHYINITCMPLTFW 1
+#define V_PHYINITCMPLTFW(x) ((x) << S_PHYINITCMPLTFW)
+#define F_PHYINITCMPLTFW V_PHYINITCMPLTFW(1U)
+
+#define S_PHYTRNGCMPLTFW 0
+#define V_PHYTRNGCMPLTFW(x) ((x) << S_PHYTRNGCMPLTFW)
+#define F_PHYTRNGCMPLTFW V_PHYTRNGCMPLTFW(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTMASK 0x20102
+
+#define S_PHYSTICKYUNLOCKMSK 15
+#define V_PHYSTICKYUNLOCKMSK(x) ((x) << S_PHYSTICKYUNLOCKMSK)
+#define F_PHYSTICKYUNLOCKMSK V_PHYSTICKYUNLOCKMSK(1U)
+
+#define S_PHYBSIMSK 14
+#define V_PHYBSIMSK(x) ((x) << S_PHYBSIMSK)
+#define F_PHYBSIMSK V_PHYBSIMSK(1U)
+
+#define S_PHYANIBRCVERRMSK 13
+#define V_PHYANIBRCVERRMSK(x) ((x) << S_PHYANIBRCVERRMSK)
+#define F_PHYANIBRCVERRMSK V_PHYANIBRCVERRMSK(1U)
+
+#define S_PHYD5ACSM1PARITYMSK 12
+#define V_PHYD5ACSM1PARITYMSK(x) ((x) << S_PHYD5ACSM1PARITYMSK)
+#define F_PHYD5ACSM1PARITYMSK V_PHYD5ACSM1PARITYMSK(1U)
+
+#define S_PHYD5ACSM0PARITYMSK 11
+#define V_PHYD5ACSM0PARITYMSK(x) ((x) << S_PHYD5ACSM0PARITYMSK)
+#define F_PHYD5ACSM0PARITYMSK V_PHYD5ACSM0PARITYMSK(1U)
+
+#define S_PHYRXFIFOCHECKMSK 10
+#define V_PHYRXFIFOCHECKMSK(x) ((x) << S_PHYRXFIFOCHECKMSK)
+#define F_PHYRXFIFOCHECKMSK V_PHYRXFIFOCHECKMSK(1U)
+
+#define S_PHYTXPPTMSK 9
+#define V_PHYTXPPTMSK(x) ((x) << S_PHYTXPPTMSK)
+#define F_PHYTXPPTMSK V_PHYTXPPTMSK(1U)
+
+#define S_PHYECCMSK 8
+#define V_PHYECCMSK(x) ((x) << S_PHYECCMSK)
+#define F_PHYECCMSK V_PHYECCMSK(1U)
+
+#define S_PHYFWRESERVEDMSK 3
+#define M_PHYFWRESERVEDMSK 0x1fU
+#define V_PHYFWRESERVEDMSK(x) ((x) << S_PHYFWRESERVEDMSK)
+#define G_PHYFWRESERVEDMSK(x) (((x) >> S_PHYFWRESERVEDMSK) & M_PHYFWRESERVEDMSK)
+
+#define S_PHYTRNGFAILMSK 2
+#define V_PHYTRNGFAILMSK(x) ((x) << S_PHYTRNGFAILMSK)
+#define F_PHYTRNGFAILMSK V_PHYTRNGFAILMSK(1U)
+
+#define S_PHYINITCMPLTMSK 1
+#define V_PHYINITCMPLTMSK(x) ((x) << S_PHYINITCMPLTMSK)
+#define F_PHYINITCMPLTMSK V_PHYINITCMPLTMSK(1U)
+
+#define S_PHYTRNGCMPLTMSK 0
+#define V_PHYTRNGCMPLTMSK(x) ((x) << S_PHYTRNGCMPLTMSK)
+#define F_PHYTRNGCMPLTMSK V_PHYTRNGCMPLTMSK(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTCLEAR 0x20103
+
+#define S_PHYSTICKYUNLOCKCLR 15
+#define V_PHYSTICKYUNLOCKCLR(x) ((x) << S_PHYSTICKYUNLOCKCLR)
+#define F_PHYSTICKYUNLOCKCLR V_PHYSTICKYUNLOCKCLR(1U)
+
+#define S_PHYBSICLR 14
+#define V_PHYBSICLR(x) ((x) << S_PHYBSICLR)
+#define F_PHYBSICLR V_PHYBSICLR(1U)
+
+#define S_PHYANIBRCVERRCLR 13
+#define V_PHYANIBRCVERRCLR(x) ((x) << S_PHYANIBRCVERRCLR)
+#define F_PHYANIBRCVERRCLR V_PHYANIBRCVERRCLR(1U)
+
+#define S_PHYD5ACSM1PARITYCLR 12
+#define V_PHYD5ACSM1PARITYCLR(x) ((x) << S_PHYD5ACSM1PARITYCLR)
+#define F_PHYD5ACSM1PARITYCLR V_PHYD5ACSM1PARITYCLR(1U)
+
+#define S_PHYD5ACSM0PARITYCLR 11
+#define V_PHYD5ACSM0PARITYCLR(x) ((x) << S_PHYD5ACSM0PARITYCLR)
+#define F_PHYD5ACSM0PARITYCLR V_PHYD5ACSM0PARITYCLR(1U)
+
+#define S_PHYRXFIFOCHECKCLR 10
+#define V_PHYRXFIFOCHECKCLR(x) ((x) << S_PHYRXFIFOCHECKCLR)
+#define F_PHYRXFIFOCHECKCLR V_PHYRXFIFOCHECKCLR(1U)
+
+#define S_PHYTXPPTCLR 9
+#define V_PHYTXPPTCLR(x) ((x) << S_PHYTXPPTCLR)
+#define F_PHYTXPPTCLR V_PHYTXPPTCLR(1U)
+
+#define S_PHYECCCLR 8
+#define V_PHYECCCLR(x) ((x) << S_PHYECCCLR)
+#define F_PHYECCCLR V_PHYECCCLR(1U)
+
+#define S_PHYFWRESERVEDCLR 3
+#define M_PHYFWRESERVEDCLR 0x1fU
+#define V_PHYFWRESERVEDCLR(x) ((x) << S_PHYFWRESERVEDCLR)
+#define G_PHYFWRESERVEDCLR(x) (((x) >> S_PHYFWRESERVEDCLR) & M_PHYFWRESERVEDCLR)
+
+#define S_PHYTRNGFAILCLR 2
+#define V_PHYTRNGFAILCLR(x) ((x) << S_PHYTRNGFAILCLR)
+#define F_PHYTRNGFAILCLR V_PHYTRNGFAILCLR(1U)
+
+#define S_PHYINITCMPLTCLR 1
+#define V_PHYINITCMPLTCLR(x) ((x) << S_PHYINITCMPLTCLR)
+#define F_PHYINITCMPLTCLR V_PHYINITCMPLTCLR(1U)
+
+#define S_PHYTRNGCMPLTCLR 0
+#define V_PHYTRNGCMPLTCLR(x) ((x) << S_PHYTRNGCMPLTCLR)
+#define F_PHYTRNGCMPLTCLR V_PHYTRNGCMPLTCLR(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTSTATUS 0x20104
+
+#define S_PHYSTICKYUNLOCKERR 15
+#define V_PHYSTICKYUNLOCKERR(x) ((x) << S_PHYSTICKYUNLOCKERR)
+#define F_PHYSTICKYUNLOCKERR V_PHYSTICKYUNLOCKERR(1U)
+
+#define S_PHYBSIINT 14
+#define V_PHYBSIINT(x) ((x) << S_PHYBSIINT)
+#define F_PHYBSIINT V_PHYBSIINT(1U)
+
+#define S_PHYANIBRCVERR 13
+#define V_PHYANIBRCVERR(x) ((x) << S_PHYANIBRCVERR)
+#define F_PHYANIBRCVERR V_PHYANIBRCVERR(1U)
+
+#define S_PHYD5ACSM1PARITYERR 12
+#define V_PHYD5ACSM1PARITYERR(x) ((x) << S_PHYD5ACSM1PARITYERR)
+#define F_PHYD5ACSM1PARITYERR V_PHYD5ACSM1PARITYERR(1U)
+
+#define S_PHYD5ACSM0PARITYERR 11
+#define V_PHYD5ACSM0PARITYERR(x) ((x) << S_PHYD5ACSM0PARITYERR)
+#define F_PHYD5ACSM0PARITYERR V_PHYD5ACSM0PARITYERR(1U)
+
+#define S_PHYRXFIFOCHECKERR 10
+#define V_PHYRXFIFOCHECKERR(x) ((x) << S_PHYRXFIFOCHECKERR)
+#define F_PHYRXFIFOCHECKERR V_PHYRXFIFOCHECKERR(1U)
+
+#define S_PHYRXTXPPTERR 9
+#define V_PHYRXTXPPTERR(x) ((x) << S_PHYRXTXPPTERR)
+#define F_PHYRXTXPPTERR V_PHYRXTXPPTERR(1U)
+
+#define S_PHYECCERR 8
+#define V_PHYECCERR(x) ((x) << S_PHYECCERR)
+#define F_PHYECCERR V_PHYECCERR(1U)
+
+#define S_PHYFWRESERVED 3
+#define M_PHYFWRESERVED 0x1fU
+#define V_PHYFWRESERVED(x) ((x) << S_PHYFWRESERVED)
+#define G_PHYFWRESERVED(x) (((x) >> S_PHYFWRESERVED) & M_PHYFWRESERVED)
+
+#define S_PHYTRNGFAIL 2
+#define V_PHYTRNGFAIL(x) ((x) << S_PHYTRNGFAIL)
+#define F_PHYTRNGFAIL V_PHYTRNGFAIL(1U)
+
+#define S_PHYINITCMPLT 1
+#define V_PHYINITCMPLT(x) ((x) << S_PHYINITCMPLT)
+#define F_PHYINITCMPLT V_PHYINITCMPLT(1U)
+
+#define S_PHYTRNGCMPLT 0
+#define V_PHYTRNGCMPLT(x) ((x) << S_PHYTRNGCMPLT)
+#define F_PHYTRNGCMPLT V_PHYTRNGCMPLT(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTOVERRIDE 0x20107
+
+#define S_PHYINTERRUPTOVERRIDE 0
+#define M_PHYINTERRUPTOVERRIDE 0xffffU
+#define V_PHYINTERRUPTOVERRIDE(x) ((x) << S_PHYINTERRUPTOVERRIDE)
+#define G_PHYINTERRUPTOVERRIDE(x) (((x) >> S_PHYINTERRUPTOVERRIDE) & M_PHYINTERRUPTOVERRIDE)
+
+/* registers for module MC_T71 */
+#define MC_T71_BASE_ADDR 0x58000
+
+/* registers for module GCACHE */
+#define GCACHE_BASE_ADDR 0x51400
+
+#define A_GCACHE_MODE_SEL0 0x51400
+
+#define S_GC_MA_RSP 16
+#define V_GC_MA_RSP(x) ((x) << S_GC_MA_RSP)
+#define F_GC_MA_RSP V_GC_MA_RSP(1U)
+
+#define A_GCACHE_MEMZONE0_REGION1 0x51404
+
+#define S_REGION_EN1 18
+#define V_REGION_EN1(x) ((x) << S_REGION_EN1)
+#define F_REGION_EN1 V_REGION_EN1(1U)
+
+#define S_EDC_REGION1 17
+#define V_EDC_REGION1(x) ((x) << S_EDC_REGION1)
+#define F_EDC_REGION1 V_EDC_REGION1(1U)
+
+#define S_CACHE_REGION1 16
+#define V_CACHE_REGION1(x) ((x) << S_CACHE_REGION1)
+#define F_CACHE_REGION1 V_CACHE_REGION1(1U)
+
+#define S_END1 0
+#define M_END1 0xffffU
+#define V_END1(x) ((x) << S_END1)
+#define G_END1(x) (((x) >> S_END1) & M_END1)
+
+#define A_GCACHE_MEMZONE0_REGION2 0x51408
+
+#define S_REGION_EN2 18
+#define V_REGION_EN2(x) ((x) << S_REGION_EN2)
+#define F_REGION_EN2 V_REGION_EN2(1U)
+
+#define S_EDC_REGION2 17
+#define V_EDC_REGION2(x) ((x) << S_EDC_REGION2)
+#define F_EDC_REGION2 V_EDC_REGION2(1U)
+
+#define S_CACHE_REGION2 16
+#define V_CACHE_REGION2(x) ((x) << S_CACHE_REGION2)
+#define F_CACHE_REGION2 V_CACHE_REGION2(1U)
+
+#define S_END2 0
+#define M_END2 0xffffU
+#define V_END2(x) ((x) << S_END2)
+#define G_END2(x) (((x) >> S_END2) & M_END2)
+
+#define A_GCACHE_MEMZONE0_REGION3 0x5140c
+
+#define S_REGION_EN3 18
+#define V_REGION_EN3(x) ((x) << S_REGION_EN3)
+#define F_REGION_EN3 V_REGION_EN3(1U)
+
+#define S_EDC_REGION3 17
+#define V_EDC_REGION3(x) ((x) << S_EDC_REGION3)
+#define F_EDC_REGION3 V_EDC_REGION3(1U)
+
+#define S_CACHE_REGION3 16
+#define V_CACHE_REGION3(x) ((x) << S_CACHE_REGION3)
+#define F_CACHE_REGION3 V_CACHE_REGION3(1U)
+
+#define S_END3 0
+#define M_END3 0xffffU
+#define V_END3(x) ((x) << S_END3)
+#define G_END3(x) (((x) >> S_END3) & M_END3)
+
+#define A_GCACHE_MEMZONE0_REGION4 0x51410
+
+#define S_REGION_EN4 18
+#define V_REGION_EN4(x) ((x) << S_REGION_EN4)
+#define F_REGION_EN4 V_REGION_EN4(1U)
+
+#define S_EDC_REGION4 17
+#define V_EDC_REGION4(x) ((x) << S_EDC_REGION4)
+#define F_EDC_REGION4 V_EDC_REGION4(1U)
+
+#define S_CACHE_REGION4 16
+#define V_CACHE_REGION4(x) ((x) << S_CACHE_REGION4)
+#define F_CACHE_REGION4 V_CACHE_REGION4(1U)
+
+#define S_END4 0
+#define M_END4 0xffffU
+#define V_END4(x) ((x) << S_END4)
+#define G_END4(x) (((x) >> S_END4) & M_END4)
+
+#define A_GCACHE_MEMZONE0_REGION5 0x51414
+
+#define S_REGION_EN5 18
+#define V_REGION_EN5(x) ((x) << S_REGION_EN5)
+#define F_REGION_EN5 V_REGION_EN5(1U)
+
+#define S_EDC_REGION5 17
+#define V_EDC_REGION5(x) ((x) << S_EDC_REGION5)
+#define F_EDC_REGION5 V_EDC_REGION5(1U)
+
+#define S_CACHE_REGION5 16
+#define V_CACHE_REGION5(x) ((x) << S_CACHE_REGION5)
+#define F_CACHE_REGION5 V_CACHE_REGION5(1U)
+
+#define S_END5 0
+#define M_END5 0xffffU
+#define V_END5(x) ((x) << S_END5)
+#define G_END5(x) (((x) >> S_END5) & M_END5)
+
+#define A_GCACHE_MEMZONE0_REGION6 0x51418
+
+#define S_REGION_EN6 18
+#define V_REGION_EN6(x) ((x) << S_REGION_EN6)
+#define F_REGION_EN6 V_REGION_EN6(1U)
+
+#define S_EDC_REGION6 17
+#define V_EDC_REGION6(x) ((x) << S_EDC_REGION6)
+#define F_EDC_REGION6 V_EDC_REGION6(1U)
+
+#define S_CACHE_REGION6 16
+#define V_CACHE_REGION6(x) ((x) << S_CACHE_REGION6)
+#define F_CACHE_REGION6 V_CACHE_REGION6(1U)
+
+#define S_END6 0
+#define M_END6 0xffffU
+#define V_END6(x) ((x) << S_END6)
+#define G_END6(x) (((x) >> S_END6) & M_END6)
+
+#define A_GCACHE_MEMZONE0_REGION7 0x5141c
+
+#define S_REGION_EN7 18
+#define V_REGION_EN7(x) ((x) << S_REGION_EN7)
+#define F_REGION_EN7 V_REGION_EN7(1U)
+
+#define S_EDC_REGION7 17
+#define V_EDC_REGION7(x) ((x) << S_EDC_REGION7)
+#define F_EDC_REGION7 V_EDC_REGION7(1U)
+
+#define S_CACHE_REGION7 16
+#define V_CACHE_REGION7(x) ((x) << S_CACHE_REGION7)
+#define F_CACHE_REGION7 V_CACHE_REGION7(1U)
+
+#define S_END7 0
+#define M_END7 0xffffU
+#define V_END7(x) ((x) << S_END7)
+#define G_END7(x) (((x) >> S_END7) & M_END7)
+
+#define A_GCACHE_MEMZONE0_REGION8 0x51420
+
+#define S_REGION_EN8 18
+#define V_REGION_EN8(x) ((x) << S_REGION_EN8)
+#define F_REGION_EN8 V_REGION_EN8(1U)
+
+#define S_EDC_REGION8 17
+#define V_EDC_REGION8(x) ((x) << S_EDC_REGION8)
+#define F_EDC_REGION8 V_EDC_REGION8(1U)
+
+#define S_CACHE_REGION8 16
+#define V_CACHE_REGION8(x) ((x) << S_CACHE_REGION8)
+#define F_CACHE_REGION8 V_CACHE_REGION8(1U)
+
+#define S_END8 0
+#define M_END8 0xffffU
+#define V_END8(x) ((x) << S_END8)
+#define G_END8(x) (((x) >> S_END8) & M_END8)
+
+#define A_GCACHE_REG0_BASE_MSB 0x51424
+#define A_GCACHE_MEMZONE0_REGION1_MSB 0x51428
+
+#define S_START1 0
+#define M_START1 0xffffU
+#define V_START1(x) ((x) << S_START1)
+#define G_START1(x) (((x) >> S_START1) & M_START1)
+
+#define A_GCACHE_MEMZONE0_REGION2_MSB 0x5142c
+
+#define S_START2 0
+#define M_START2 0xffffU
+#define V_START2(x) ((x) << S_START2)
+#define G_START2(x) (((x) >> S_START2) & M_START2)
+
+#define A_GCACHE_MEMZONE0_REGION3_MSB 0x51430
+
+#define S_START3 0
+#define M_START3 0xffffU
+#define V_START3(x) ((x) << S_START3)
+#define G_START3(x) (((x) >> S_START3) & M_START3)
+
+#define A_GCACHE_MEMZONE0_REGION4_MSB 0x51434
+
+#define S_START4 0
+#define M_START4 0xffffU
+#define V_START4(x) ((x) << S_START4)
+#define G_START4(x) (((x) >> S_START4) & M_START4)
+
+#define A_GCACHE_MEMZONE0_REGION5_MSB 0x51438
+
+#define S_START5 0
+#define M_START5 0xffffU
+#define V_START5(x) ((x) << S_START5)
+#define G_START5(x) (((x) >> S_START5) & M_START5)
+
+#define A_GCACHE_MEMZONE0_REGION6_MSB 0x5143c
+
+#define S_START6 0
+#define M_START6 0xffffU
+#define V_START6(x) ((x) << S_START6)
+#define G_START6(x) (((x) >> S_START6) & M_START6)
+
+#define A_GCACHE_MEMZONE0_REGION7_MSB 0x51440
+
+#define S_START7 0
+#define M_START7 0xffffU
+#define V_START7(x) ((x) << S_START7)
+#define G_START7(x) (((x) >> S_START7) & M_START7)
+
+#define A_GCACHE_MEMZONE0_REGION8_MSB 0x51444
+
+#define S_START8 0
+#define M_START8 0xffffU
+#define V_START8(x) ((x) << S_START8)
+#define G_START8(x) (((x) >> S_START8) & M_START8)
+
+#define A_GCACHE_MODE_SEL1 0x51448
+#define A_GCACHE_MEMZONE1_REGION1 0x5144c
+#define A_GCACHE_MEMZONE1_REGION2 0x51450
+#define A_GCACHE_MEMZONE1_REGION3 0x51454
+#define A_GCACHE_MEMZONE1_REGION4 0x51458
+#define A_GCACHE_MEMZONE1_REGION5 0x5145c
+#define A_GCACHE_MEMZONE1_REGION6 0x51460
+#define A_GCACHE_MEMZONE1_REGION7 0x51464
+#define A_GCACHE_MEMZONE1_REGION8 0x51468
+#define A_GCACHE_MEMZONE1_REGION1_MSB 0x5146c
+#define A_GCACHE_MEMZONE1_REGION2_MSB 0x51470
+#define A_GCACHE_MEMZONE1_REGION3_MSB 0x51474
+#define A_GCACHE_MEMZONE1_REGION4_MSB 0x51478
+#define A_GCACHE_MEMZONE1_REGION5_MSB 0x5147c
+#define A_GCACHE_MEMZONE1_REGION6_MSB 0x51480
+#define A_GCACHE_MEMZONE1_REGION7_MSB 0x51484
+#define A_GCACHE_MEMZONE1_REGION8_MSB 0x51488
+#define A_GCACHE_HMA_MC1_EN 0x5148c
+
+#define S_MC1_EN 1
+#define V_MC1_EN(x) ((x) << S_MC1_EN)
+#define F_MC1_EN V_MC1_EN(1U)
+
+#define S_HMA_EN 0
+#define V_HMA_EN(x) ((x) << S_HMA_EN)
+#define F_HMA_EN V_HMA_EN(1U)
+
+#define A_GCACHE_P_BIST_CMD 0x51490
+#define A_GCACHE_P_BIST_CMD_ADDR 0x51494
+#define A_GCACHE_P_BIST_CMD_LEN 0x51498
+#define A_GCACHE_P_BIST_DATA_PATTERN 0x5149c
+#define A_GCACHE_P_BIST_USER_WDATA0 0x514a0
+#define A_GCACHE_P_BIST_USER_WDATA1 0x514a4
+#define A_GCACHE_P_BIST_USER_WDATA2 0x514a8
+#define A_GCACHE_P_BIST_NUM_ERR 0x514ac
+#define A_GCACHE_P_BIST_ERR_FIRST_ADDR 0x514b0
+#define A_GCACHE_P_BIST_STATUS_RDATA 0x514b4
+#define A_GCACHE_P_BIST_CRC_SEED 0x514fc
+#define A_GCACHE_CACHE_SIZE 0x51500
+
+#define S_HMA_2MB 1
+#define V_HMA_2MB(x) ((x) << S_HMA_2MB)
+#define F_HMA_2MB V_HMA_2MB(1U)
+
+#define S_MC0_2MB 0
+#define V_MC0_2MB(x) ((x) << S_MC0_2MB)
+#define F_MC0_2MB V_MC0_2MB(1U)
+
+#define A_GCACHE_HINT_MAPPING 0x51504
+
+#define S_CLIENT_HINT_EN 16
+#define M_CLIENT_HINT_EN 0x7fffU
+#define V_CLIENT_HINT_EN(x) ((x) << S_CLIENT_HINT_EN)
+#define G_CLIENT_HINT_EN(x) (((x) >> S_CLIENT_HINT_EN) & M_CLIENT_HINT_EN)
+
+#define S_HINT_ADDR_SPLIT_EN 8
+#define V_HINT_ADDR_SPLIT_EN(x) ((x) << S_HINT_ADDR_SPLIT_EN)
+#define F_HINT_ADDR_SPLIT_EN V_HINT_ADDR_SPLIT_EN(1U)
+
+#define S_TP_HINT_HMA_MC 2
+#define V_TP_HINT_HMA_MC(x) ((x) << S_TP_HINT_HMA_MC)
+#define F_TP_HINT_HMA_MC V_TP_HINT_HMA_MC(1U)
+
+#define S_CIM_HINT_HMA_MC 1
+#define V_CIM_HINT_HMA_MC(x) ((x) << S_CIM_HINT_HMA_MC)
+#define F_CIM_HINT_HMA_MC V_CIM_HINT_HMA_MC(1U)
+
+#define S_LE_HINT_HMA_MC 0
+#define V_LE_HINT_HMA_MC(x) ((x) << S_LE_HINT_HMA_MC)
+#define F_LE_HINT_HMA_MC V_LE_HINT_HMA_MC(1U)
+
+#define A_GCACHE_PERF_EN 0x51508
+
+#define S_PERF_CLEAR_GC1 3
+#define V_PERF_CLEAR_GC1(x) ((x) << S_PERF_CLEAR_GC1)
+#define F_PERF_CLEAR_GC1 V_PERF_CLEAR_GC1(1U)
+
+#define S_PERF_CLEAR_GC0 2
+#define V_PERF_CLEAR_GC0(x) ((x) << S_PERF_CLEAR_GC0)
+#define F_PERF_CLEAR_GC0 V_PERF_CLEAR_GC0(1U)
+
+#define S_PERF_EN_GC1 1
+#define V_PERF_EN_GC1(x) ((x) << S_PERF_EN_GC1)
+#define F_PERF_EN_GC1 V_PERF_EN_GC1(1U)
+
+#define S_PERF_EN_GC0 0
+#define V_PERF_EN_GC0(x) ((x) << S_PERF_EN_GC0)
+#define F_PERF_EN_GC0 V_PERF_EN_GC0(1U)
+
+#define A_GCACHE_PERF_GC0_RD_HIT 0x5150c
+#define A_GCACHE_PERF_GC1_RD_HIT 0x51510
+#define A_GCACHE_PERF_GC0_WR_HIT 0x51514
+#define A_GCACHE_PERF_GC1_WR_HIT 0x51518
+#define A_GCACHE_PERF_GC0_RD_MISS 0x5151c
+#define A_GCACHE_PERF_GC1_RD_MISS 0x51520
+#define A_GCACHE_PERF_GC0_WR_MISS 0x51524
+#define A_GCACHE_PERF_GC1_WR_MISS 0x51528
+#define A_GCACHE_PERF_GC0_RD_REQ 0x5152c
+#define A_GCACHE_PERF_GC1_RD_REQ 0x51530
+#define A_GCACHE_PERF_GC0_WR_REQ 0x51534
+#define A_GCACHE_PERF_GC1_WR_REQ 0x51538
+#define A_GCACHE_PAR_CAUSE 0x5153c
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE V_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE V_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_PAR_CAUSE 25
+#define V_GC1_WQDATA_FIFO_PERR_PAR_CAUSE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_PAR_CAUSE)
+#define F_GC1_WQDATA_FIFO_PERR_PAR_CAUSE V_GC1_WQDATA_FIFO_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_PAR_CAUSE 24
+#define V_GC0_WQDATA_FIFO_PERR_PAR_CAUSE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_PAR_CAUSE)
+#define F_GC0_WQDATA_FIFO_PERR_PAR_CAUSE V_GC0_WQDATA_FIFO_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE 23
+#define V_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE 22
+#define V_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_RSP_PERR_PAR_CAUSE 19
+#define V_GC1_RSP_PERR_PAR_CAUSE(x) ((x) << S_GC1_RSP_PERR_PAR_CAUSE)
+#define F_GC1_RSP_PERR_PAR_CAUSE V_GC1_RSP_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_RSP_PERR_PAR_CAUSE 18
+#define V_GC0_RSP_PERR_PAR_CAUSE(x) ((x) << S_GC0_RSP_PERR_PAR_CAUSE)
+#define F_GC0_RSP_PERR_PAR_CAUSE V_GC0_RSP_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_LRU_UERR_PAR_CAUSE 17
+#define V_GC1_LRU_UERR_PAR_CAUSE(x) ((x) << S_GC1_LRU_UERR_PAR_CAUSE)
+#define F_GC1_LRU_UERR_PAR_CAUSE V_GC1_LRU_UERR_PAR_CAUSE(1U)
+
+#define S_GC0_LRU_UERR_PAR_CAUSE 16
+#define V_GC0_LRU_UERR_PAR_CAUSE(x) ((x) << S_GC0_LRU_UERR_PAR_CAUSE)
+#define F_GC0_LRU_UERR_PAR_CAUSE V_GC0_LRU_UERR_PAR_CAUSE(1U)
+
+#define S_GC1_TAG_UERR_PAR_CAUSE 15
+#define V_GC1_TAG_UERR_PAR_CAUSE(x) ((x) << S_GC1_TAG_UERR_PAR_CAUSE)
+#define F_GC1_TAG_UERR_PAR_CAUSE V_GC1_TAG_UERR_PAR_CAUSE(1U)
+
+#define S_GC0_TAG_UERR_PAR_CAUSE 14
+#define V_GC0_TAG_UERR_PAR_CAUSE(x) ((x) << S_GC0_TAG_UERR_PAR_CAUSE)
+#define F_GC0_TAG_UERR_PAR_CAUSE V_GC0_TAG_UERR_PAR_CAUSE(1U)
+
+#define S_GC1_LRU_CERR_PAR_CAUSE 13
+#define V_GC1_LRU_CERR_PAR_CAUSE(x) ((x) << S_GC1_LRU_CERR_PAR_CAUSE)
+#define F_GC1_LRU_CERR_PAR_CAUSE V_GC1_LRU_CERR_PAR_CAUSE(1U)
+
+#define S_GC0_LRU_CERR_PAR_CAUSE 12
+#define V_GC0_LRU_CERR_PAR_CAUSE(x) ((x) << S_GC0_LRU_CERR_PAR_CAUSE)
+#define F_GC0_LRU_CERR_PAR_CAUSE V_GC0_LRU_CERR_PAR_CAUSE(1U)
+
+#define S_GC1_TAG_CERR_PAR_CAUSE 11
+#define V_GC1_TAG_CERR_PAR_CAUSE(x) ((x) << S_GC1_TAG_CERR_PAR_CAUSE)
+#define F_GC1_TAG_CERR_PAR_CAUSE V_GC1_TAG_CERR_PAR_CAUSE(1U)
+
+#define S_GC0_TAG_CERR_PAR_CAUSE 10
+#define V_GC0_TAG_CERR_PAR_CAUSE(x) ((x) << S_GC0_TAG_CERR_PAR_CAUSE)
+#define F_GC0_TAG_CERR_PAR_CAUSE V_GC0_TAG_CERR_PAR_CAUSE(1U)
+
+#define S_GC1_CE_PAR_CAUSE 9
+#define V_GC1_CE_PAR_CAUSE(x) ((x) << S_GC1_CE_PAR_CAUSE)
+#define F_GC1_CE_PAR_CAUSE V_GC1_CE_PAR_CAUSE(1U)
+
+#define S_GC0_CE_PAR_CAUSE 8
+#define V_GC0_CE_PAR_CAUSE(x) ((x) << S_GC0_CE_PAR_CAUSE)
+#define F_GC0_CE_PAR_CAUSE V_GC0_CE_PAR_CAUSE(1U)
+
+#define S_GC1_UE_PAR_CAUSE 7
+#define V_GC1_UE_PAR_CAUSE(x) ((x) << S_GC1_UE_PAR_CAUSE)
+#define F_GC1_UE_PAR_CAUSE V_GC1_UE_PAR_CAUSE(1U)
+
+#define S_GC0_UE_PAR_CAUSE 6
+#define V_GC0_UE_PAR_CAUSE(x) ((x) << S_GC0_UE_PAR_CAUSE)
+#define F_GC0_UE_PAR_CAUSE V_GC0_UE_PAR_CAUSE(1U)
+
+#define S_GC1_CMD_PAR_CAUSE 5
+#define V_GC1_CMD_PAR_CAUSE(x) ((x) << S_GC1_CMD_PAR_CAUSE)
+#define F_GC1_CMD_PAR_CAUSE V_GC1_CMD_PAR_CAUSE(1U)
+
+#define S_GC1_DATA_PAR_CAUSE 4
+#define V_GC1_DATA_PAR_CAUSE(x) ((x) << S_GC1_DATA_PAR_CAUSE)
+#define F_GC1_DATA_PAR_CAUSE V_GC1_DATA_PAR_CAUSE(1U)
+
+#define S_GC0_CMD_PAR_CAUSE 3
+#define V_GC0_CMD_PAR_CAUSE(x) ((x) << S_GC0_CMD_PAR_CAUSE)
+#define F_GC0_CMD_PAR_CAUSE V_GC0_CMD_PAR_CAUSE(1U)
+
+#define S_GC0_DATA_PAR_CAUSE 2
+#define V_GC0_DATA_PAR_CAUSE(x) ((x) << S_GC0_DATA_PAR_CAUSE)
+#define F_GC0_DATA_PAR_CAUSE V_GC0_DATA_PAR_CAUSE(1U)
+
+#define S_ILLADDRACCESS1_PAR_CAUSE 1
+#define V_ILLADDRACCESS1_PAR_CAUSE(x) ((x) << S_ILLADDRACCESS1_PAR_CAUSE)
+#define F_ILLADDRACCESS1_PAR_CAUSE V_ILLADDRACCESS1_PAR_CAUSE(1U)
+
+#define S_ILLADDRACCESS0_PAR_CAUSE 0
+#define V_ILLADDRACCESS0_PAR_CAUSE(x) ((x) << S_ILLADDRACCESS0_PAR_CAUSE)
+#define F_ILLADDRACCESS0_PAR_CAUSE V_ILLADDRACCESS0_PAR_CAUSE(1U)
+
+#define A_GCACHE_PAR_ENABLE 0x51540
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE V_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE V_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_PAR_ENABLE 25
+#define V_GC1_WQDATA_FIFO_PERR_PAR_ENABLE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_PAR_ENABLE)
+#define F_GC1_WQDATA_FIFO_PERR_PAR_ENABLE V_GC1_WQDATA_FIFO_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_PAR_ENABLE 24
+#define V_GC0_WQDATA_FIFO_PERR_PAR_ENABLE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_PAR_ENABLE)
+#define F_GC0_WQDATA_FIFO_PERR_PAR_ENABLE V_GC0_WQDATA_FIFO_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE 23
+#define V_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE 22
+#define V_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_RSP_PERR_PAR_ENABLE 19
+#define V_GC1_RSP_PERR_PAR_ENABLE(x) ((x) << S_GC1_RSP_PERR_PAR_ENABLE)
+#define F_GC1_RSP_PERR_PAR_ENABLE V_GC1_RSP_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_RSP_PERR_PAR_ENABLE 18
+#define V_GC0_RSP_PERR_PAR_ENABLE(x) ((x) << S_GC0_RSP_PERR_PAR_ENABLE)
+#define F_GC0_RSP_PERR_PAR_ENABLE V_GC0_RSP_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_LRU_UERR_PAR_ENABLE 17
+#define V_GC1_LRU_UERR_PAR_ENABLE(x) ((x) << S_GC1_LRU_UERR_PAR_ENABLE)
+#define F_GC1_LRU_UERR_PAR_ENABLE V_GC1_LRU_UERR_PAR_ENABLE(1U)
+
+#define S_GC0_LRU_UERR_PAR_ENABLE 16
+#define V_GC0_LRU_UERR_PAR_ENABLE(x) ((x) << S_GC0_LRU_UERR_PAR_ENABLE)
+#define F_GC0_LRU_UERR_PAR_ENABLE V_GC0_LRU_UERR_PAR_ENABLE(1U)
+
+#define S_GC1_TAG_UERR_PAR_ENABLE 15
+#define V_GC1_TAG_UERR_PAR_ENABLE(x) ((x) << S_GC1_TAG_UERR_PAR_ENABLE)
+#define F_GC1_TAG_UERR_PAR_ENABLE V_GC1_TAG_UERR_PAR_ENABLE(1U)
+
+#define S_GC0_TAG_UERR_PAR_ENABLE 14
+#define V_GC0_TAG_UERR_PAR_ENABLE(x) ((x) << S_GC0_TAG_UERR_PAR_ENABLE)
+#define F_GC0_TAG_UERR_PAR_ENABLE V_GC0_TAG_UERR_PAR_ENABLE(1U)
+
+#define S_GC1_LRU_CERR_PAR_ENABLE 13
+#define V_GC1_LRU_CERR_PAR_ENABLE(x) ((x) << S_GC1_LRU_CERR_PAR_ENABLE)
+#define F_GC1_LRU_CERR_PAR_ENABLE V_GC1_LRU_CERR_PAR_ENABLE(1U)
+
+#define S_GC0_LRU_CERR_PAR_ENABLE 12
+#define V_GC0_LRU_CERR_PAR_ENABLE(x) ((x) << S_GC0_LRU_CERR_PAR_ENABLE)
+#define F_GC0_LRU_CERR_PAR_ENABLE V_GC0_LRU_CERR_PAR_ENABLE(1U)
+
+#define S_GC1_TAG_CERR_PAR_ENABLE 11
+#define V_GC1_TAG_CERR_PAR_ENABLE(x) ((x) << S_GC1_TAG_CERR_PAR_ENABLE)
+#define F_GC1_TAG_CERR_PAR_ENABLE V_GC1_TAG_CERR_PAR_ENABLE(1U)
+
+#define S_GC0_TAG_CERR_PAR_ENABLE 10
+#define V_GC0_TAG_CERR_PAR_ENABLE(x) ((x) << S_GC0_TAG_CERR_PAR_ENABLE)
+#define F_GC0_TAG_CERR_PAR_ENABLE V_GC0_TAG_CERR_PAR_ENABLE(1U)
+
+#define S_GC1_CE_PAR_ENABLE 9
+#define V_GC1_CE_PAR_ENABLE(x) ((x) << S_GC1_CE_PAR_ENABLE)
+#define F_GC1_CE_PAR_ENABLE V_GC1_CE_PAR_ENABLE(1U)
+
+#define S_GC0_CE_PAR_ENABLE 8
+#define V_GC0_CE_PAR_ENABLE(x) ((x) << S_GC0_CE_PAR_ENABLE)
+#define F_GC0_CE_PAR_ENABLE V_GC0_CE_PAR_ENABLE(1U)
+
+#define S_GC1_UE_PAR_ENABLE 7
+#define V_GC1_UE_PAR_ENABLE(x) ((x) << S_GC1_UE_PAR_ENABLE)
+#define F_GC1_UE_PAR_ENABLE V_GC1_UE_PAR_ENABLE(1U)
+
+#define S_GC0_UE_PAR_ENABLE 6
+#define V_GC0_UE_PAR_ENABLE(x) ((x) << S_GC0_UE_PAR_ENABLE)
+#define F_GC0_UE_PAR_ENABLE V_GC0_UE_PAR_ENABLE(1U)
+
+#define S_GC1_CMD_PAR_ENABLE 5
+#define V_GC1_CMD_PAR_ENABLE(x) ((x) << S_GC1_CMD_PAR_ENABLE)
+#define F_GC1_CMD_PAR_ENABLE V_GC1_CMD_PAR_ENABLE(1U)
+
+#define S_GC1_DATA_PAR_ENABLE 4
+#define V_GC1_DATA_PAR_ENABLE(x) ((x) << S_GC1_DATA_PAR_ENABLE)
+#define F_GC1_DATA_PAR_ENABLE V_GC1_DATA_PAR_ENABLE(1U)
+
+#define S_GC0_CMD_PAR_ENABLE 3
+#define V_GC0_CMD_PAR_ENABLE(x) ((x) << S_GC0_CMD_PAR_ENABLE)
+#define F_GC0_CMD_PAR_ENABLE V_GC0_CMD_PAR_ENABLE(1U)
+
+#define S_GC0_DATA_PAR_ENABLE 2
+#define V_GC0_DATA_PAR_ENABLE(x) ((x) << S_GC0_DATA_PAR_ENABLE)
+#define F_GC0_DATA_PAR_ENABLE V_GC0_DATA_PAR_ENABLE(1U)
+
+#define S_ILLADDRACCESS1_PAR_ENABLE 1
+#define V_ILLADDRACCESS1_PAR_ENABLE(x) ((x) << S_ILLADDRACCESS1_PAR_ENABLE)
+#define F_ILLADDRACCESS1_PAR_ENABLE V_ILLADDRACCESS1_PAR_ENABLE(1U)
+
+#define S_ILLADDRACCESS0_PAR_ENABLE 0
+#define V_ILLADDRACCESS0_PAR_ENABLE(x) ((x) << S_ILLADDRACCESS0_PAR_ENABLE)
+#define F_ILLADDRACCESS0_PAR_ENABLE V_ILLADDRACCESS0_PAR_ENABLE(1U)
+
+#define A_GCACHE_INT_ENABLE 0x51544
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE V_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE V_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_INT_ENABLE 25
+#define V_GC1_WQDATA_FIFO_PERR_INT_ENABLE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_INT_ENABLE)
+#define F_GC1_WQDATA_FIFO_PERR_INT_ENABLE V_GC1_WQDATA_FIFO_PERR_INT_ENABLE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_INT_ENABLE 24
+#define V_GC0_WQDATA_FIFO_PERR_INT_ENABLE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_INT_ENABLE)
+#define F_GC0_WQDATA_FIFO_PERR_INT_ENABLE V_GC0_WQDATA_FIFO_PERR_INT_ENABLE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_INT_ENABLE 23
+#define V_GC1_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC1_RDTAG_QUEUE_PERR_INT_ENABLE V_GC1_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_INT_ENABLE 22
+#define V_GC0_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC0_RDTAG_QUEUE_PERR_INT_ENABLE V_GC0_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC1_RSP_PERR_INT_ENABLE 19
+#define V_GC1_RSP_PERR_INT_ENABLE(x) ((x) << S_GC1_RSP_PERR_INT_ENABLE)
+#define F_GC1_RSP_PERR_INT_ENABLE V_GC1_RSP_PERR_INT_ENABLE(1U)
+
+#define S_GC0_RSP_PERR_INT_ENABLE 18
+#define V_GC0_RSP_PERR_INT_ENABLE(x) ((x) << S_GC0_RSP_PERR_INT_ENABLE)
+#define F_GC0_RSP_PERR_INT_ENABLE V_GC0_RSP_PERR_INT_ENABLE(1U)
+
+#define S_GC1_LRU_UERR_INT_ENABLE 17
+#define V_GC1_LRU_UERR_INT_ENABLE(x) ((x) << S_GC1_LRU_UERR_INT_ENABLE)
+#define F_GC1_LRU_UERR_INT_ENABLE V_GC1_LRU_UERR_INT_ENABLE(1U)
+
+#define S_GC0_LRU_UERR_INT_ENABLE 16
+#define V_GC0_LRU_UERR_INT_ENABLE(x) ((x) << S_GC0_LRU_UERR_INT_ENABLE)
+#define F_GC0_LRU_UERR_INT_ENABLE V_GC0_LRU_UERR_INT_ENABLE(1U)
+
+#define S_GC1_TAG_UERR_INT_ENABLE 15
+#define V_GC1_TAG_UERR_INT_ENABLE(x) ((x) << S_GC1_TAG_UERR_INT_ENABLE)
+#define F_GC1_TAG_UERR_INT_ENABLE V_GC1_TAG_UERR_INT_ENABLE(1U)
+
+#define S_GC0_TAG_UERR_INT_ENABLE 14
+#define V_GC0_TAG_UERR_INT_ENABLE(x) ((x) << S_GC0_TAG_UERR_INT_ENABLE)
+#define F_GC0_TAG_UERR_INT_ENABLE V_GC0_TAG_UERR_INT_ENABLE(1U)
+
+#define S_GC1_LRU_CERR_INT_ENABLE 13
+#define V_GC1_LRU_CERR_INT_ENABLE(x) ((x) << S_GC1_LRU_CERR_INT_ENABLE)
+#define F_GC1_LRU_CERR_INT_ENABLE V_GC1_LRU_CERR_INT_ENABLE(1U)
+
+#define S_GC0_LRU_CERR_INT_ENABLE 12
+#define V_GC0_LRU_CERR_INT_ENABLE(x) ((x) << S_GC0_LRU_CERR_INT_ENABLE)
+#define F_GC0_LRU_CERR_INT_ENABLE V_GC0_LRU_CERR_INT_ENABLE(1U)
+
+#define S_GC1_TAG_CERR_INT_ENABLE 11
+#define V_GC1_TAG_CERR_INT_ENABLE(x) ((x) << S_GC1_TAG_CERR_INT_ENABLE)
+#define F_GC1_TAG_CERR_INT_ENABLE V_GC1_TAG_CERR_INT_ENABLE(1U)
+
+#define S_GC0_TAG_CERR_INT_ENABLE 10
+#define V_GC0_TAG_CERR_INT_ENABLE(x) ((x) << S_GC0_TAG_CERR_INT_ENABLE)
+#define F_GC0_TAG_CERR_INT_ENABLE V_GC0_TAG_CERR_INT_ENABLE(1U)
+
+#define S_GC1_CE_INT_ENABLE 9
+#define V_GC1_CE_INT_ENABLE(x) ((x) << S_GC1_CE_INT_ENABLE)
+#define F_GC1_CE_INT_ENABLE V_GC1_CE_INT_ENABLE(1U)
+
+#define S_GC0_CE_INT_ENABLE 8
+#define V_GC0_CE_INT_ENABLE(x) ((x) << S_GC0_CE_INT_ENABLE)
+#define F_GC0_CE_INT_ENABLE V_GC0_CE_INT_ENABLE(1U)
+
+#define S_GC1_UE_INT_ENABLE 7
+#define V_GC1_UE_INT_ENABLE(x) ((x) << S_GC1_UE_INT_ENABLE)
+#define F_GC1_UE_INT_ENABLE V_GC1_UE_INT_ENABLE(1U)
+
+#define S_GC0_UE_INT_ENABLE 6
+#define V_GC0_UE_INT_ENABLE(x) ((x) << S_GC0_UE_INT_ENABLE)
+#define F_GC0_UE_INT_ENABLE V_GC0_UE_INT_ENABLE(1U)
+
+#define S_GC1_CMD_PAR_INT_ENABLE 5
+#define V_GC1_CMD_PAR_INT_ENABLE(x) ((x) << S_GC1_CMD_PAR_INT_ENABLE)
+#define F_GC1_CMD_PAR_INT_ENABLE V_GC1_CMD_PAR_INT_ENABLE(1U)
+
+#define S_GC1_DATA_PAR_INT_ENABLE 4
+#define V_GC1_DATA_PAR_INT_ENABLE(x) ((x) << S_GC1_DATA_PAR_INT_ENABLE)
+#define F_GC1_DATA_PAR_INT_ENABLE V_GC1_DATA_PAR_INT_ENABLE(1U)
+
+#define S_GC0_CMD_PAR_INT_ENABLE 3
+#define V_GC0_CMD_PAR_INT_ENABLE(x) ((x) << S_GC0_CMD_PAR_INT_ENABLE)
+#define F_GC0_CMD_PAR_INT_ENABLE V_GC0_CMD_PAR_INT_ENABLE(1U)
+
+#define S_GC0_DATA_PAR_INT_ENABLE 2
+#define V_GC0_DATA_PAR_INT_ENABLE(x) ((x) << S_GC0_DATA_PAR_INT_ENABLE)
+#define F_GC0_DATA_PAR_INT_ENABLE V_GC0_DATA_PAR_INT_ENABLE(1U)
+
+#define S_ILLADDRACCESS1_INT_ENABLE 1
+#define V_ILLADDRACCESS1_INT_ENABLE(x) ((x) << S_ILLADDRACCESS1_INT_ENABLE)
+#define F_ILLADDRACCESS1_INT_ENABLE V_ILLADDRACCESS1_INT_ENABLE(1U)
+
+#define S_ILLADDRACCESS0_INT_ENABLE 0
+#define V_ILLADDRACCESS0_INT_ENABLE(x) ((x) << S_ILLADDRACCESS0_INT_ENABLE)
+#define F_ILLADDRACCESS0_INT_ENABLE V_ILLADDRACCESS0_INT_ENABLE(1U)
+
+#define A_GCACHE_INT_CAUSE 0x51548
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE V_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE V_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_INT_CAUSE 25
+#define V_GC1_WQDATA_FIFO_PERR_INT_CAUSE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_INT_CAUSE)
+#define F_GC1_WQDATA_FIFO_PERR_INT_CAUSE V_GC1_WQDATA_FIFO_PERR_INT_CAUSE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_INT_CAUSE 24
+#define V_GC0_WQDATA_FIFO_PERR_INT_CAUSE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_INT_CAUSE)
+#define F_GC0_WQDATA_FIFO_PERR_INT_CAUSE V_GC0_WQDATA_FIFO_PERR_INT_CAUSE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_INT_CAUSE 23
+#define V_GC1_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC1_RDTAG_QUEUE_PERR_INT_CAUSE V_GC1_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_INT_CAUSE 22
+#define V_GC0_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC0_RDTAG_QUEUE_PERR_INT_CAUSE V_GC0_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC1_RSP_PERR_INT_CAUSE 19
+#define V_GC1_RSP_PERR_INT_CAUSE(x) ((x) << S_GC1_RSP_PERR_INT_CAUSE)
+#define F_GC1_RSP_PERR_INT_CAUSE V_GC1_RSP_PERR_INT_CAUSE(1U)
+
+#define S_GC0_RSP_PERR_INT_CAUSE 18
+#define V_GC0_RSP_PERR_INT_CAUSE(x) ((x) << S_GC0_RSP_PERR_INT_CAUSE)
+#define F_GC0_RSP_PERR_INT_CAUSE V_GC0_RSP_PERR_INT_CAUSE(1U)
+
+#define S_GC1_LRU_UERR_INT_CAUSE 17
+#define V_GC1_LRU_UERR_INT_CAUSE(x) ((x) << S_GC1_LRU_UERR_INT_CAUSE)
+#define F_GC1_LRU_UERR_INT_CAUSE V_GC1_LRU_UERR_INT_CAUSE(1U)
+
+#define S_GC0_LRU_UERR_INT_CAUSE 16
+#define V_GC0_LRU_UERR_INT_CAUSE(x) ((x) << S_GC0_LRU_UERR_INT_CAUSE)
+#define F_GC0_LRU_UERR_INT_CAUSE V_GC0_LRU_UERR_INT_CAUSE(1U)
+
+#define S_GC1_TAG_UERR_INT_CAUSE 15
+#define V_GC1_TAG_UERR_INT_CAUSE(x) ((x) << S_GC1_TAG_UERR_INT_CAUSE)
+#define F_GC1_TAG_UERR_INT_CAUSE V_GC1_TAG_UERR_INT_CAUSE(1U)
+
+#define S_GC0_TAG_UERR_INT_CAUSE 14
+#define V_GC0_TAG_UERR_INT_CAUSE(x) ((x) << S_GC0_TAG_UERR_INT_CAUSE)
+#define F_GC0_TAG_UERR_INT_CAUSE V_GC0_TAG_UERR_INT_CAUSE(1U)
+
+#define S_GC1_LRU_CERR_INT_CAUSE 13
+#define V_GC1_LRU_CERR_INT_CAUSE(x) ((x) << S_GC1_LRU_CERR_INT_CAUSE)
+#define F_GC1_LRU_CERR_INT_CAUSE V_GC1_LRU_CERR_INT_CAUSE(1U)
+
+#define S_GC0_LRU_CERR_INT_CAUSE 12
+#define V_GC0_LRU_CERR_INT_CAUSE(x) ((x) << S_GC0_LRU_CERR_INT_CAUSE)
+#define F_GC0_LRU_CERR_INT_CAUSE V_GC0_LRU_CERR_INT_CAUSE(1U)
+
+#define S_GC1_TAG_CERR_INT_CAUSE 11
+#define V_GC1_TAG_CERR_INT_CAUSE(x) ((x) << S_GC1_TAG_CERR_INT_CAUSE)
+#define F_GC1_TAG_CERR_INT_CAUSE V_GC1_TAG_CERR_INT_CAUSE(1U)
+
+#define S_GC0_TAG_CERR_INT_CAUSE 10
+#define V_GC0_TAG_CERR_INT_CAUSE(x) ((x) << S_GC0_TAG_CERR_INT_CAUSE)
+#define F_GC0_TAG_CERR_INT_CAUSE V_GC0_TAG_CERR_INT_CAUSE(1U)
+
+#define S_GC1_CE_INT_CAUSE 9
+#define V_GC1_CE_INT_CAUSE(x) ((x) << S_GC1_CE_INT_CAUSE)
+#define F_GC1_CE_INT_CAUSE V_GC1_CE_INT_CAUSE(1U)
+
+#define S_GC0_CE_INT_CAUSE 8
+#define V_GC0_CE_INT_CAUSE(x) ((x) << S_GC0_CE_INT_CAUSE)
+#define F_GC0_CE_INT_CAUSE V_GC0_CE_INT_CAUSE(1U)
+
+#define S_GC1_UE_INT_CAUSE 7
+#define V_GC1_UE_INT_CAUSE(x) ((x) << S_GC1_UE_INT_CAUSE)
+#define F_GC1_UE_INT_CAUSE V_GC1_UE_INT_CAUSE(1U)
+
+#define S_GC0_UE_INT_CAUSE 6
+#define V_GC0_UE_INT_CAUSE(x) ((x) << S_GC0_UE_INT_CAUSE)
+#define F_GC0_UE_INT_CAUSE V_GC0_UE_INT_CAUSE(1U)
+
+#define S_GC1_CMD_PAR_INT_CAUSE 5
+#define V_GC1_CMD_PAR_INT_CAUSE(x) ((x) << S_GC1_CMD_PAR_INT_CAUSE)
+#define F_GC1_CMD_PAR_INT_CAUSE V_GC1_CMD_PAR_INT_CAUSE(1U)
+
+#define S_GC1_DATA_PAR_INT_CAUSE 4
+#define V_GC1_DATA_PAR_INT_CAUSE(x) ((x) << S_GC1_DATA_PAR_INT_CAUSE)
+#define F_GC1_DATA_PAR_INT_CAUSE V_GC1_DATA_PAR_INT_CAUSE(1U)
+
+#define S_GC0_CMD_PAR_INT_CAUSE 3
+#define V_GC0_CMD_PAR_INT_CAUSE(x) ((x) << S_GC0_CMD_PAR_INT_CAUSE)
+#define F_GC0_CMD_PAR_INT_CAUSE V_GC0_CMD_PAR_INT_CAUSE(1U)
+
+#define S_GC0_DATA_PAR_INT_CAUSE 2
+#define V_GC0_DATA_PAR_INT_CAUSE(x) ((x) << S_GC0_DATA_PAR_INT_CAUSE)
+#define F_GC0_DATA_PAR_INT_CAUSE V_GC0_DATA_PAR_INT_CAUSE(1U)
+
+#define S_ILLADDRACCESS1_INT_CAUSE 1
+#define V_ILLADDRACCESS1_INT_CAUSE(x) ((x) << S_ILLADDRACCESS1_INT_CAUSE)
+#define F_ILLADDRACCESS1_INT_CAUSE V_ILLADDRACCESS1_INT_CAUSE(1U)
+
+#define S_ILLADDRACCESS0_INT_CAUSE 0
+#define V_ILLADDRACCESS0_INT_CAUSE(x) ((x) << S_ILLADDRACCESS0_INT_CAUSE)
+#define F_ILLADDRACCESS0_INT_CAUSE V_ILLADDRACCESS0_INT_CAUSE(1U)
+
+#define A_GCACHE_DBG_SEL_CTRL 0x51550
+
+#define S_DBG_SEL_CTRLSEL_OVR_EN 31
+#define V_DBG_SEL_CTRLSEL_OVR_EN(x) ((x) << S_DBG_SEL_CTRLSEL_OVR_EN)
+#define F_DBG_SEL_CTRLSEL_OVR_EN V_DBG_SEL_CTRLSEL_OVR_EN(1U)
+
+#define S_T7_DEBUG_HI 16
+#define V_T7_DEBUG_HI(x) ((x) << S_T7_DEBUG_HI)
+#define F_T7_DEBUG_HI V_T7_DEBUG_HI(1U)
+
+#define S_DBG_SEL_CTRLSELH 8
+#define M_DBG_SEL_CTRLSELH 0xffU
+#define V_DBG_SEL_CTRLSELH(x) ((x) << S_DBG_SEL_CTRLSELH)
+#define G_DBG_SEL_CTRLSELH(x) (((x) >> S_DBG_SEL_CTRLSELH) & M_DBG_SEL_CTRLSELH)
+
+#define S_DBG_SEL_CTRLSELL 0
+#define M_DBG_SEL_CTRLSELL 0xffU
+#define V_DBG_SEL_CTRLSELL(x) ((x) << S_DBG_SEL_CTRLSELL)
+#define G_DBG_SEL_CTRLSELL(x) (((x) >> S_DBG_SEL_CTRLSELL) & M_DBG_SEL_CTRLSELL)
+
+#define A_GCACHE_LOCAL_DEBUG_RPT 0x51554
+#define A_GCACHE_DBG_ILL_ACC 0x5155c
+#define A_GCACHE_DBG_ILL_ADDR0 0x51560
+#define A_GCACHE_DBG_ILL_ADDR1 0x51564
+#define A_GCACHE_GC0_DBG_ADDR_0_32 0x51568
+#define A_GCACHE_GC0_DBG_ADDR_32_32 0x5156c
+#define A_GCACHE_GC0_DBG_ADDR_64_32 0x51570
+#define A_GCACHE_GC0_DBG_ADDR_96_32 0x51574
+#define A_GCACHE_GC0_DBG_ADDR_0_64 0x51578
+#define A_GCACHE_GC0_DBG_ADDR_64_64 0x5157c
+#define A_GCACHE_GC0_DBG_ADDR_0_96 0x51580
+#define A_GCACHE_GC0_DBG_ADDR_32_96 0x51584
+#define A_GCACHE_GC1_DBG_ADDR_0_32 0x5158c
+#define A_GCACHE_GC1_DBG_ADDR_32_32 0x51590
+#define A_GCACHE_GC1_DBG_ADDR_64_32 0x51594
+#define A_GCACHE_GC1_DBG_ADDR_96_32 0x51598
+#define A_GCACHE_GC1_DBG_ADDR_0_64 0x5159c
+#define A_GCACHE_GC1_DBG_ADDR_64_64 0x515a0
+#define A_GCACHE_GC1_DBG_ADDR_0_96 0x515a4
+#define A_GCACHE_GC1_DBG_ADDR_32_96 0x515a8
+#define A_GCACHE_GC0_DBG_ADDR_32_64 0x515ac
+#define A_GCACHE_GC1_DBG_ADDR_32_64 0x515b0
+#define A_GCACHE_PERF_GC0_EVICT 0x515b4
+#define A_GCACHE_PERF_GC1_EVICT 0x515b8
+#define A_GCACHE_PERF_GC0_CE_COUNT 0x515bc
+#define A_GCACHE_PERF_GC1_CE_COUNT 0x515c0
+#define A_GCACHE_PERF_GC0_UE_COUNT 0x515c4
+#define A_GCACHE_PERF_GC1_UE_COUNT 0x515c8
+#define A_GCACHE_DBG_CTL 0x515f0
+#define A_GCACHE_DBG_DATA 0x515f4
diff --git a/sys/dev/cxgbe/common/t4_regs_values.h b/sys/dev/cxgbe/common/t4_regs_values.h
index 830828097802..6485fa50bd08 100644
--- a/sys/dev/cxgbe/common/t4_regs_values.h
+++ b/sys/dev/cxgbe/common/t4_regs_values.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -269,6 +268,7 @@
#define X_WINDOW_SHIFT 10
#define X_PCIEOFST_SHIFT 10
+#define X_T7_MEMOFST_SHIFT 4
/*
* TP definitions.
@@ -284,6 +284,10 @@
#define S_FT_FIRST S_FCOE
#define S_FT_LAST S_FRAGMENTATION
+#define S_T7_FT_FIRST S_IPSECIDX
+#define S_T7_FT_LAST S_TCPFLAGS
+
+#define W_FT_IPSECIDX 12
#define W_FT_FCOE 1
#define W_FT_PORT 3
#define W_FT_VNIC_ID 17
@@ -294,17 +298,9 @@
#define W_FT_MACMATCH 9
#define W_FT_MPSHITTYPE 3
#define W_FT_FRAGMENTATION 1
-
-#define M_FT_FCOE ((1ULL << W_FT_FCOE) - 1)
-#define M_FT_PORT ((1ULL << W_FT_PORT) - 1)
-#define M_FT_VNIC_ID ((1ULL << W_FT_VNIC_ID) - 1)
-#define M_FT_VLAN ((1ULL << W_FT_VLAN) - 1)
-#define M_FT_TOS ((1ULL << W_FT_TOS) - 1)
-#define M_FT_PROTOCOL ((1ULL << W_FT_PROTOCOL) - 1)
-#define M_FT_ETHERTYPE ((1ULL << W_FT_ETHERTYPE) - 1)
-#define M_FT_MACMATCH ((1ULL << W_FT_MACMATCH) - 1)
-#define M_FT_MPSHITTYPE ((1ULL << W_FT_MPSHITTYPE) - 1)
-#define M_FT_FRAGMENTATION ((1ULL << W_FT_FRAGMENTATION) - 1)
+#define W_FT_ROCE 1
+#define W_FT_SYNONLY 1
+#define W_FT_TCPFLAGS 12
/*
* Some of the Compressed Filter Tuple fields have internal structure. These
@@ -327,6 +323,6 @@
#define S_FT_VNID_ID_VLD 16
#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
-#define F_FT_VNID_ID_VLD(x) V_FT_VNID_ID_VLD(1U)
+#define F_FT_VNID_ID_VLD V_FT_VNID_ID_VLD(1U)
#endif /* __T4_REGS_VALUES_H__ */
diff --git a/sys/dev/cxgbe/common/t4_tcb.h b/sys/dev/cxgbe/common/t4_tcb.h
index f9631ba58418..8bff15f04e7a 100644
--- a/sys/dev/cxgbe/common/t4_tcb.h
+++ b/sys/dev/cxgbe/common/t4_tcb.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -340,10 +339,9 @@
/* 1023:1020 */
#define W_TCB_ULP_EXT 31
-#define S_TCP_ULP_EXT 28
+#define S_TCB_ULP_EXT 28
#define M_TCB_ULP_EXT 0xfULL
-#define V_TCB_ULP_EXT(x) ((x) << S_TCP_ULP_EXT)
-
+#define V_TCB_ULP_EXT(x) ((x) << S_TCB_ULP_EXT)
/* 840:832 */
#define W_TCB_IRS_ULP 26
@@ -495,31 +493,31 @@
#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-/* 855:832 */
+/* 855:832 */
#define W_TCB_RX_TLS_BUF_OFFSET 26
#define S_TCB_RX_TLS_BUF_OFFSET 0
#define M_TCB_RX_TLS_BUF_OFFSET 0xffffffULL
#define V_TCB_RX_TLS_BUF_OFFSET(x) ((x) << S_TCB_RX_TLS_BUF_OFFSET)
-/* 876:856 */
+/* 879:856 */
#define W_TCB_RX_TLS_BUF_LEN 26
#define S_TCB_RX_TLS_BUF_LEN 24
#define M_TCB_RX_TLS_BUF_LEN 0xffffffULL
#define V_TCB_RX_TLS_BUF_LEN(x) ((__u64)(x) << S_TCB_RX_TLS_BUF_LEN)
-/* 895:880 */
-#define W_TCB_RX_TLS_FLAGS 26
-#define S_TCB_RX_TLS_FLAGS 48
+/* 895:880 */
+#define W_TCB_RX_TLS_FLAGS 27
+#define S_TCB_RX_TLS_FLAGS 16
#define M_TCB_RX_TLS_FLAGS 0xffffULL
#define V_TCB_RX_TLS_FLAGS(x) ((__u64)(x) << S_TCB_RX_TLS_FLAGS)
-/* 959:896 */
-#define W_TCB_TLS_SEQ 28
-#define S_TCB_TLS_SEQ 0
-#define M_TCB_TLS_SEQ 0xffffffffffffffffULL
-#define V_TCB_TLS_SEQ(x) ((__u64)(x) << S_TCB_TLS_SEQ)
+/* 959:896 */
+#define W_TCB_RX_TLS_SEQ 28
+#define S_TCB_RX_TLS_SEQ 0
+#define M_TCB_RX_TLS_SEQ 0xffffffffffffffffULL
+#define V_TCB_RX_TLS_SEQ(x) ((__u64)(x) << S_TCB_RX_TLS_SEQ)
-/* 991:960 */
+/* 991:960 */
#define W_TCB_RX_TLS_BUF_TAG 30
#define S_TCB_RX_TLS_BUF_TAG 0
#define M_TCB_RX_TLS_BUF_TAG 0xffffffffULL
@@ -531,17 +529,113 @@
#define M_TCB_RX_TLS_KEY_TAG 0xffffffffULL
#define V_TCB_RX_TLS_KEY_TAG(x) ((x) << S_TCB_RX_TLS_KEY_TAG)
+#define S_TF_TLS_ENABLE 0
+#define V_TF_TLS_ENABLE(x) ((x) << S_TF_TLS_ENABLE)
+
+#define S_TF_TLS_ACTIVE 1
+#define V_TF_TLS_ACTIVE(x) ((x) << S_TF_TLS_ACTIVE)
+
+#define S_TF_TLS_CONTROL 2
+#define V_TF_TLS_CONTROL(x) ((x) << S_TF_TLS_CONTROL)
+
#define S_TF_TLS_KEY_SIZE 7
#define V_TF_TLS_KEY_SIZE(x) ((x) << S_TF_TLS_KEY_SIZE)
-#define S_TF_TLS_CONTROL 2
-#define V_TF_TLS_CONTROL(x) ((x) << S_TF_TLS_CONTROL)
+/* 853:832 */
+#define W_TCB_TPT_OFFSET 26
+#define S_TCB_TPT_OFFSET 0
+#define M_TCB_TPT_OFFSET 0x3fffffULL
+#define V_TCB_TPT_OFFSET(x) ((x) << S_TCB_TPT_OFFSET)
+
+/* 863:854 */
+#define W_TCB_T10_CONFIG 26
+#define S_TCB_T10_CONFIG 22
+#define M_TCB_T10_CONFIG 0x3ffULL
+#define V_TCB_T10_CONFIG(x) ((x) << S_TCB_T10_CONFIG)
+
+/* 871:864 */
+#define W_TCB_PDU_HLEN 27
+#define S_TCB_PDU_HLEN 0
+#define M_TCB_PDU_HLEN 0xffULL
+#define V_TCB_PDU_HLEN(x) ((x) << S_TCB_PDU_HLEN)
+
+/* 879:872 */
+#define W_TCB_PDU_PDO 27
+#define S_TCB_PDU_PDO 8
+#define M_TCB_PDU_PDO 0xffULL
+#define V_TCB_PDU_PDO(x) ((x) << S_TCB_PDU_PDO)
-#define S_TF_TLS_ACTIVE 1
-#define V_TF_TLS_ACTIVE(x) ((x) << S_TF_TLS_ACTIVE)
+/* 895:880 */
+#define W_TCB_N_CQ_IDX_RQ 27
+#define S_TCB_N_CQ_IDX_RQ 16
+#define M_TCB_N_CQ_IDX_RQ 0xffffULL
+#define V_TCB_N_CQ_IDX_RQ(x) ((x) << S_TCB_N_CQ_IDX_RQ)
+
+/* 900:896 */
+#define W_TCB_NVMT_PDA 28
+#define S_TCB_NVMT_PDA 0
+#define M_TCB_NVMT_PDA 0x1fULL
+#define V_TCB_NVMT_PDA(x) ((x) << S_TCB_NVMT_PDA)
+
+/* 911:901 */
+#define W_TCB_RSVD 28
+#define S_TCB_RSVD 5
+#define M_TCB_RSVD 0x7ffULL
+#define V_TCB_RSVD(x) ((x) << S_TCB_RSVD)
-#define S_TF_TLS_ENABLE 0
-#define V_TF_TLS_ENABLE(x) ((x) << S_TF_TLS_ENABLE)
+/* 927:912 */
+#define W_TCB_N_PD_ID 28
+#define S_TCB_N_PD_ID 16
+#define M_TCB_N_PD_ID 0xffffULL
+#define V_TCB_N_PD_ID(x) ((x) << S_TCB_N_PD_ID)
+
+/* 929:928 */
+#define W_TCB_CMP_IMM_SZ 29
+#define S_TCB_CMP_IMM_SZ 0
+#define M_TCB_CMP_IMM_SZ 0x3ULL
+#define V_TCB_CMP_IMM_SZ(x) ((x) << S_TCB_CMP_IMM_SZ)
+
+/* 931:930 */
+#define W_TCB_PDU_DGST_FLAGS 29
+#define S_TCB_PDU_DGST_FLAGS 2
+#define M_TCB_PDU_DGST_FLAGS 0x3ULL
+#define V_TCB_PDU_DGST_FLAGS(x) ((x) << S_TCB_PDU_DGST_FLAGS)
+
+/* 959:932 */
+#define W_TCB_RSVD1 29
+#define S_TCB_RSVD1 4
+#define M_TCB_RSVD1 0xfffffffULL
+#define V_TCB_RSVD1(x) ((x) << S_TCB_RSVD1)
+
+/* 985:960 */
+#define W_TCB_N_RQ_START 30
+#define S_TCB_N_RQ_START 0
+#define M_TCB_N_RQ_START 0x3ffffffULL
+#define V_TCB_N_RQ_START(x) ((x) << S_TCB_N_RQ_START)
+
+/* 998:986 */
+#define W_TCB_N_RQ_MSN 30
+#define S_TCB_N_RQ_MSN 26
+#define M_TCB_N_RQ_MSN 0x1fffULL
+#define V_TCB_N_RQ_MSN(x) ((__u64)(x) << S_TCB_N_RQ_MSN)
+
+/* 1002:999 */
+#define W_TCB_N_RQ_MAX_OFFSET 31
+#define S_TCB_N_RQ_MAX_OFFSET 7
+#define M_TCB_N_RQ_MAX_OFFSET 0xfULL
+#define V_TCB_N_RQ_MAX_OFFSET(x) ((x) << S_TCB_N_RQ_MAX_OFFSET)
+
+/* 1015:1003 */
+#define W_TCB_N_RQ_WRITE_PTR 31
+#define S_TCB_N_RQ_WRITE_PTR 11
+#define M_TCB_N_RQ_WRITE_PTR 0x1fffULL
+#define V_TCB_N_RQ_WRITE_PTR(x) ((x) << S_TCB_N_RQ_WRITE_PTR)
+
+/* 1023:1016 */
+#define W_TCB_N_PDU_TYPE 31
+#define S_TCB_N_PDU_TYPE 24
+#define M_TCB_N_PDU_TYPE 0xffULL
+#define V_TCB_N_PDU_TYPE(x) ((x) << S_TCB_N_PDU_TYPE)
#define S_TF_MIGRATING 0
#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
@@ -549,15 +643,24 @@
#define S_TF_NON_OFFLOAD 1
#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
+#define S_TF_FILTER 1
+#define V_TF_FILTER(x) ((x) << S_TF_FILTER)
+
#define S_TF_LOCK_TID 2
#define V_TF_LOCK_TID(x) ((x) << S_TF_LOCK_TID)
#define S_TF_KEEPALIVE 3
#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
+#define S_TF_DROP_ENCAPS_HDR 3
+#define V_TF_DROP_ENCAPS_HDR(x) ((x) << S_TF_DROP_ENCAPS_HDR)
+
#define S_TF_DACK 4
#define V_TF_DACK(x) ((x) << S_TF_DACK)
+#define S_TF_COUNT_HITS 4
+#define V_TF_COUNT_HITS(x) ((x) << S_TF_COUNT_HITS)
+
#define S_TF_DACK_MSS 5
#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
@@ -567,6 +670,9 @@
#define S_TF_NAGLE 7
#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
+#define S_TF_REMOVE_VLAN 7
+#define V_TF_REMOVE_VLAN(x) ((x) << S_TF_REMOVE_VLAN)
+
#define S_TF_SSWS_DISABLED 8
#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
@@ -576,15 +682,24 @@
#define S_TF_RX_FLOW_CONTROL_DISABLE 10
#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
+#define S_TF_NAT_SEQ_CHECK 10
+#define V_TF_NAT_SEQ_CHECK(x) ((x) << S_TF_NAT_SEQ_CHECK)
+
#define S_TF_RX_CHANNEL 11
#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
#define S_TF_TX_CHANNEL0 12
#define V_TF_TX_CHANNEL0(x) ((x) << S_TF_TX_CHANNEL0)
+#define S_TF_LPBK_TX_CHANNEL0 12
+#define V_TF_LPBK_TX_CHANNEL0(x) ((x) << S_TF_LPBK_TX_CHANNEL0)
+
#define S_TF_TX_CHANNEL1 13
#define V_TF_TX_CHANNEL1(x) ((x) << S_TF_TX_CHANNEL1)
+#define S_TF_LPBK_TX_CHANNEL1 13
+#define V_TF_LPBK_TX_CHANNEL1(x) ((x) << S_TF_LPBK_TX_CHANNEL1)
+
#define S_TF_TX_QUIESCE 14
#define V_TF_TX_QUIESCE(x) ((x) << S_TF_TX_QUIESCE)
@@ -607,6 +722,10 @@
#define M_TF_TX_QUEUE 0x7ULL
#define V_TF_TX_QUEUE(x) ((x) << S_TF_TX_QUEUE)
+#define S_TF_NAT_MODE 18
+#define M_TF_NAT_MODE 0x7ULL
+#define V_TF_NAT_MODE(x) ((x) << S_TF_NAT_MODE)
+
#define S_TF_TURBO 21
#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
@@ -652,8 +771,8 @@
#define S_TF_RCV_COALESCE_HEARTBEAT 32
#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((__u64)(x) << S_TF_RCV_COALESCE_HEARTBEAT)
-#define S_TF_INIT 33
-#define V_TF_INIT(x) ((__u64)(x) << S_TF_INIT)
+#define S_TF_RSS_FW 33
+#define V_TF_RSS_FW(x) ((__u64)(x) << S_TF_RSS_FW)
#define S_TF_ACTIVE_OPEN 34
#define V_TF_ACTIVE_OPEN(x) ((__u64)(x) << S_TF_ACTIVE_OPEN)
@@ -712,12 +831,21 @@
#define S_TF_RECV_SCALE 52
#define V_TF_RECV_SCALE(x) ((__u64)(x) << S_TF_RECV_SCALE)
+#define S_TF_NAT_FLAG_CHECK 52
+#define V_TF_NAT_FLAG_CHECK(x) ((__u64)(x) << S_TF_NAT_FLAG_CHECK)
+
#define S_TF_RECV_TSTMP 53
#define V_TF_RECV_TSTMP(x) ((__u64)(x) << S_TF_RECV_TSTMP)
+#define S_TF_LPBK_TX_LPBK 53
+#define V_TF_LPBK_TX_LPBK(x) ((__u64)(x) << S_TF_LPBK_TX_LPBK)
+
#define S_TF_RECV_SACK 54
#define V_TF_RECV_SACK(x) ((__u64)(x) << S_TF_RECV_SACK)
+#define S_TF_SWAP_MAC_ADDR 54
+#define V_TF_SWAP_MAC_ADDR(x) ((__u64)(x) << S_TF_SWAP_MAC_ADDR)
+
#define S_TF_PEND_CTL0 55
#define V_TF_PEND_CTL0(x) ((__u64)(x) << S_TF_PEND_CTL0)
@@ -751,6 +879,9 @@
#define S_TF_CCTRL_RFR 62
#define V_TF_CCTRL_RFR(x) ((__u64)(x) << S_TF_CCTRL_RFR)
+#define S_TF_INSERT_VLAN 62
+#define V_TF_INSERT_VLAN(x) ((__u64)(x) << S_TF_INSERT_VLAN)
+
#define S_TF_CORE_BYPASS 63
#define V_TF_CORE_BYPASS(x) ((__u64)(x) << S_TF_CORE_BYPASS)
@@ -772,6 +903,9 @@
#define S_TF_DDP_RX2TX 21
#define V_TF_DDP_RX2TX(x) ((x) << S_TF_DDP_RX2TX)
+#define S_TF_DDP_INDICATE_FLL 22
+#define V_TF_DDP_INDICATE_FLL(x) ((x) << S_TF_DDP_INDICATE_FLL)
+
#define S_TF_DDP_BUF0_VALID 24
#define V_TF_DDP_BUF0_VALID(x) ((x) << S_TF_DDP_BUF0_VALID)
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.c b/sys/dev/cxgbe/crypto/t4_crypto.c
index 2c83b10b13d6..80e31b1159fd 100644
--- a/sys/dev/cxgbe/crypto/t4_crypto.c
+++ b/sys/dev/cxgbe/crypto/t4_crypto.c
@@ -208,6 +208,7 @@ struct ccr_softc {
counter_u64_t stats_pad_error;
counter_u64_t stats_sglist_error;
counter_u64_t stats_process_error;
+ counter_u64_t stats_pointer_error;
counter_u64_t stats_sw_fallback;
struct sysctl_ctx_list ctx;
@@ -458,8 +459,9 @@ ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s,
crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
V_ULP_TXPKT_DATAMODIFY(0) |
- V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
+ V_T7_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
V_ULP_TXPKT_DEST(0) |
+ (is_t7(sc->adapter) ? V_ULP_TXPKT_CMDMORE(1) : 0) |
V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1));
crwr->ulptx.len = htobe32(
((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
@@ -545,7 +547,7 @@ ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
@@ -705,7 +707,7 @@ ccr_cipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1006,7 +1008,7 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1293,7 +1295,7 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1645,7 +1647,7 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1883,6 +1885,9 @@ ccr_sysctls(struct ccr_softc *sc)
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error",
CTLFLAG_RD, &sc->stats_process_error,
"Requests failed during queueing");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pointer_error",
+ CTLFLAG_RD, &sc->stats_pointer_error,
+ "Requests with a misaligned request pointer");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback",
CTLFLAG_RD, &sc->stats_sw_fallback,
"Requests processed by falling back to software");
@@ -1932,13 +1937,15 @@ ccr_init_port(struct ccr_softc *sc, int port)
"Too many ports to fit in port_mask");
/*
- * Completions for crypto requests on port 1 can sometimes
+ * Completions for crypto requests on port 1 on T6 can sometimes
* return a stale cookie value due to a firmware bug. Disable
* requests on port 1 by default on affected firmware.
*/
- if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) ||
- port == 0)
- sc->port_mask |= 1u << port;
+ if (port != 0 && is_t6(sc->adapter) &&
+ sc->adapter->params.fw_vers < FW_VERSION32(1, 25, 4, 0))
+ return;
+
+ sc->port_mask |= 1u << port;
}
static int
@@ -1988,6 +1995,7 @@ ccr_attach(device_t dev)
sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
sc->stats_process_error = counter_u64_alloc(M_WAITOK);
+ sc->stats_pointer_error = counter_u64_alloc(M_WAITOK);
sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
ccr_sysctls(sc);
@@ -2034,6 +2042,7 @@ ccr_detach(device_t dev)
counter_u64_free(sc->stats_pad_error);
counter_u64_free(sc->stats_sglist_error);
counter_u64_free(sc->stats_process_error);
+ counter_u64_free(sc->stats_pointer_error);
counter_u64_free(sc->stats_sw_fallback);
for_each_port(sc->adapter, i) {
ccr_free_port(sc, i);
@@ -2531,6 +2540,16 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
s = crypto_get_driver_session(crp->crp_session);
sc = device_get_softc(dev);
+ /*
+ * Request pointers with the low bit set in the pointer can't
+ * be stored as the cookie in the CPL_FW6_PLD reply.
+ */
+ if (((uintptr_t)crp & CPL_FW6_COOKIE_MASK) != 0) {
+ counter_u64_add(sc->stats_pointer_error, 1);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+
mtx_lock(&s->lock);
error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
@@ -2637,6 +2656,7 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
out:
mtx_unlock(&s->lock);
+out_unlocked:
if (error) {
crp->crp_etype = error;
crypto_done(crp);
@@ -2646,7 +2666,7 @@ out:
}
static int
-do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
+fw6_pld_ccr(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
struct ccr_softc *sc;
@@ -2661,7 +2681,7 @@ do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
else
cpl = (const void *)(rss + 1);
- crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
+ crp = (struct cryptop *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
s = crypto_get_driver_session(crp->crp_session);
status = be64toh(cpl->data[0]);
if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
@@ -2715,10 +2735,12 @@ ccr_modevent(module_t mod, int cmd, void *arg)
switch (cmd) {
case MOD_LOAD:
- t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, fw6_pld_ccr,
+ CPL_FW6_COOKIE_CCR);
return (0);
case MOD_UNLOAD:
- t4_register_cpl_handler(CPL_FW6_PLD, NULL);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL,
+ CPL_FW6_COOKIE_CCR);
return (0);
default:
return (EOPNOTSUPP);
@@ -2745,7 +2767,9 @@ static driver_t ccr_driver = {
sizeof(struct ccr_softc)
};
-DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_modevent, NULL);
+DRIVER_MODULE(ccr, chnex, ccr_driver, ccr_modevent, NULL);
+DRIVER_MODULE(ccr, t6nex, ccr_driver, NULL, NULL);
MODULE_VERSION(ccr, 1);
MODULE_DEPEND(ccr, crypto, 1, 1, 1);
+MODULE_DEPEND(ccr, chnex, 1, 1, 1);
MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.h b/sys/dev/cxgbe/crypto/t4_crypto.h
index 452e48d20dfd..71c9ec3903ef 100644
--- a/sys/dev/cxgbe/crypto/t4_crypto.h
+++ b/sys/dev/cxgbe/crypto/t4_crypto.h
@@ -139,6 +139,7 @@ struct phys_sge_pairs {
#define SCMD_PROTO_VERSION_TLS_1_2 0
#define SCMD_PROTO_VERSION_TLS_1_1 1
#define SCMD_PROTO_VERSION_GENERIC 4
+#define SCMD_PROTO_VERSION_TLS_1_3 8
#define SCMD_CIPH_MODE_NOP 0
#define SCMD_CIPH_MODE_AES_CBC 1
diff --git a/sys/dev/cxgbe/crypto/t4_keyctx.c b/sys/dev/cxgbe/crypto/t4_keyctx.c
index 50e339ac2e05..b85e50fd6cb1 100644
--- a/sys/dev/cxgbe/crypto/t4_keyctx.c
+++ b/sys/dev/cxgbe/crypto/t4_keyctx.c
@@ -437,10 +437,16 @@ t4_tls_key_info_size(const struct ktls_session *tls)
int
t4_tls_proto_ver(const struct ktls_session *tls)
{
- if (tls->params.tls_vminor == TLS_MINOR_VER_ONE)
+ switch (tls->params.tls_vminor) {
+ case TLS_MINOR_VER_ONE:
return (SCMD_PROTO_VERSION_TLS_1_1);
- else
+ case TLS_MINOR_VER_TWO:
return (SCMD_PROTO_VERSION_TLS_1_2);
+ case TLS_MINOR_VER_THREE:
+ return (SCMD_PROTO_VERSION_TLS_1_3);
+ default:
+ __assert_unreachable();
+ }
}
int
@@ -492,6 +498,17 @@ t4_tls_hmac_ctrl(const struct ktls_session *tls)
}
static int
+tls_seqnum_ctrl(const struct ktls_session *tls)
+{
+ switch (tls->params.tls_vminor) {
+ case TLS_MINOR_VER_THREE:
+ return (0);
+ default:
+ return (3);
+ }
+}
+
+static int
tls_cipher_key_size(const struct ktls_session *tls)
{
switch (tls->params.cipher_key_len) {
@@ -557,7 +574,7 @@ t4_tls_key_ctx(const struct ktls_session *tls, int direction,
kctx->u.rxhdr.authmode_to_rxvalid =
V_TLS_KEYCTX_TX_WR_AUTHMODE(t4_tls_auth_mode(tls)) |
- V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
+ V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(tls_seqnum_ctrl(tls)) |
V_TLS_KEYCTX_TX_WR_RXVALID(1);
kctx->u.rxhdr.ivpresent_to_rxmk_size =
@@ -607,7 +624,8 @@ t4_tls_key_ctx(const struct ktls_session *tls, int direction,
_Static_assert(offsetof(struct tx_keyctx_hdr, txsalt) ==
offsetof(struct rx_keyctx_hdr, rxsalt),
"salt offset mismatch");
- memcpy(kctx->u.txhdr.txsalt, tls->params.iv, SALT_SIZE);
+ memcpy(kctx->u.txhdr.txsalt, tls->params.iv,
+ tls->params.iv_len);
t4_init_gmac_hash(tls->params.cipher_key,
tls->params.cipher_key_len, hash);
} else {
@@ -665,6 +683,10 @@ t4_write_tlskey_wr(const struct ktls_session *tls, int direction, int tid,
kwr->reneg_to_write_rx = V_KEY_GET_LOC(direction == KTLS_TX ?
KEY_WRITE_TX : KEY_WRITE_RX);
+ /* We don't need to use V_T7_ULP_MEMIO_DATA_LEN in this routine. */
+ _Static_assert(V_T7_ULP_MEMIO_DATA_LEN(TLS_KEY_CONTEXT_SZ >> 5) ==
+ V_ULP_MEMIO_DATA_LEN(TLS_KEY_CONTEXT_SZ >> 5), "datalen mismatch");
+
/* master command */
kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
diff --git a/sys/dev/cxgbe/crypto/t6_kern_tls.c b/sys/dev/cxgbe/crypto/t6_kern_tls.c
index 04bb6c944050..454b2e264a0e 100644
--- a/sys/dev/cxgbe/crypto/t6_kern_tls.c
+++ b/sys/dev/cxgbe/crypto/t6_kern_tls.c
@@ -2003,7 +2003,7 @@ t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
if (tlsp->l2te)
t4_l2t_release(tlsp->l2te);
tlsp->l2te = t4_l2t_alloc_tls(tlsp->sc, txq, dst, &ndesc,
- vlan_tag, tlsp->vi->pi->lport, eh->ether_dhost);
+ vlan_tag, tlsp->vi->pi->hw_port, eh->ether_dhost);
if (tlsp->l2te == NULL)
CXGBE_UNIMPLEMENTED("failed to allocate TLS L2TE");
if (ndesc != 0) {
diff --git a/sys/dev/cxgbe/crypto/t7_kern_tls.c b/sys/dev/cxgbe/crypto/t7_kern_tls.c
new file mode 100644
index 000000000000..217459126361
--- /dev/null
+++ b/sys/dev/cxgbe/crypto/t7_kern_tls.c
@@ -0,0 +1,2196 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Chelsio Communications
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_kern_tls.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/ktr.h>
+#include <sys/ktls.h>
+#include <sys/sglist.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sockbuf.h>
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp_var.h>
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+#include "common/t4_tcb.h"
+#include "t4_l2t.h"
+#include "t4_clip.h"
+#include "t4_mp_ring.h"
+#include "crypto/t4_crypto.h"
+
+#if defined(INET) || defined(INET6)
+
+#define TLS_HEADER_LENGTH 5
+
+struct tls_scmd {
+ __be32 seqno_numivs;
+ __be32 ivgen_hdrlen;
+};
+
+struct tlspcb {
+ struct m_snd_tag com;
+ struct vi_info *vi; /* virtual interface */
+ struct adapter *sc;
+ struct sge_txq *txq;
+
+ int tx_key_addr;
+ bool inline_key;
+ bool tls13;
+ unsigned char enc_mode;
+
+ struct tls_scmd scmd0;
+ struct tls_scmd scmd0_partial;
+ struct tls_scmd scmd0_short;
+
+ unsigned int tx_key_info_size;
+
+ uint16_t prev_mss;
+
+ /* Fields used for GCM records using GHASH state. */
+ uint16_t ghash_offset;
+ uint64_t ghash_tls_seqno;
+ char ghash[AES_GMAC_HASH_LEN];
+ bool ghash_valid;
+ bool ghash_pending;
+ bool ghash_lcb;
+ bool queue_mbufs;
+ uint8_t rx_chid;
+ uint16_t rx_qid;
+ struct mbufq pending_mbufs;
+
+ /*
+ * Only used outside of setup and teardown when using inline
+ * keys or for partial GCM mode.
+ */
+ struct tls_keyctx keyctx;
+};
+
+static void t7_tls_tag_free(struct m_snd_tag *mst);
+static int ktls_setup_keys(struct tlspcb *tlsp,
+ const struct ktls_session *tls, struct sge_txq *txq);
+
+static void *zero_buffer;
+static vm_paddr_t zero_buffer_pa;
+
+static const struct if_snd_tag_sw t7_tls_tag_sw = {
+ .snd_tag_free = t7_tls_tag_free,
+ .type = IF_SND_TAG_TYPE_TLS
+};
+
+static inline struct tlspcb *
+mst_to_tls(struct m_snd_tag *t)
+{
+ return (__containerof(t, struct tlspcb, com));
+}
+
+static struct tlspcb *
+alloc_tlspcb(struct ifnet *ifp, struct vi_info *vi, int flags)
+{
+ struct port_info *pi = vi->pi;
+ struct adapter *sc = pi->adapter;
+ struct tlspcb *tlsp;
+
+ tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
+ if (tlsp == NULL)
+ return (NULL);
+
+ m_snd_tag_init(&tlsp->com, ifp, &t7_tls_tag_sw);
+ tlsp->vi = vi;
+ tlsp->sc = sc;
+ tlsp->tx_key_addr = -1;
+ tlsp->ghash_offset = -1;
+ tlsp->rx_chid = pi->rx_chan;
+ tlsp->rx_qid = sc->sge.rxq[pi->vi->first_rxq].iq.abs_id;
+ mbufq_init(&tlsp->pending_mbufs, INT_MAX);
+
+ return (tlsp);
+}
+
+int
+t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
+ struct m_snd_tag **pt)
+{
+ const struct ktls_session *tls;
+ struct tlspcb *tlsp;
+ struct adapter *sc;
+ struct vi_info *vi;
+ struct inpcb *inp;
+ struct sge_txq *txq;
+ int error, iv_size, keyid, mac_first;
+
+ tls = params->tls.tls;
+
+ /* TLS 1.1 through TLS 1.3 are currently supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE)
+ return (EPROTONOSUPPORT);
+
+ /* Sanity check values in *tls. */
+ switch (tls->params.cipher_algorithm) {
+ case CRYPTO_AES_CBC:
+ /* XXX: Explicitly ignore any provided IV. */
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 192 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EINVAL);
+ }
+ switch (tls->params.auth_algorithm) {
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ break;
+ default:
+ return (EPROTONOSUPPORT);
+ }
+ iv_size = AES_BLOCK_LEN;
+ mac_first = 1;
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 192 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ /*
+ * The IV size for TLS 1.2 is the explicit IV in the
+ * record header. For TLS 1.3 it is the size of the
+ * sequence number.
+ */
+ iv_size = 8;
+ mac_first = 0;
+ break;
+ default:
+ return (EPROTONOSUPPORT);
+ }
+
+ vi = if_getsoftc(ifp);
+ sc = vi->adapter;
+
+ tlsp = alloc_tlspcb(ifp, vi, M_WAITOK);
+
+ /*
+ * Pointers with the low bit set in the pointer can't
+ * be stored as the cookie in the CPL_FW6_PLD reply.
+ */
+ if (((uintptr_t)tlsp & CPL_FW6_COOKIE_MASK) != 0) {
+ error = EINVAL;
+ goto failed;
+ }
+
+ tlsp->tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
+
+ if (sc->tlst.inline_keys)
+ keyid = -1;
+ else
+ keyid = t4_alloc_tls_keyid(sc);
+ if (keyid < 0) {
+ CTR(KTR_CXGBE, "%s: %p using immediate key ctx", __func__,
+ tlsp);
+ tlsp->inline_key = true;
+ } else {
+ tlsp->tx_key_addr = keyid;
+ CTR(KTR_CXGBE, "%s: %p allocated TX key addr %#x", __func__,
+ tlsp, tlsp->tx_key_addr);
+ }
+
+ inp = params->tls.inp;
+ INP_RLOCK(inp);
+ if (inp->inp_flags & INP_DROPPED) {
+ INP_RUNLOCK(inp);
+ error = ECONNRESET;
+ goto failed;
+ }
+
+ txq = &sc->sge.txq[vi->first_txq];
+ if (inp->inp_flowtype != M_HASHTYPE_NONE)
+ txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
+ vi->rsrv_noflowq);
+ tlsp->txq = txq;
+ INP_RUNLOCK(inp);
+
+ error = ktls_setup_keys(tlsp, tls, txq);
+ if (error)
+ goto failed;
+
+ tlsp->enc_mode = t4_tls_cipher_mode(tls);
+ tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
+
+ /* The SCMD fields used when encrypting a full TLS record. */
+ if (tlsp->tls13)
+ tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0);
+ else
+ tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(3);
+ tlsp->scmd0.seqno_numivs |=
+ V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_CIPH_MODE(tlsp->enc_mode) |
+ V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
+ V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
+ V_SCMD_IV_SIZE(iv_size / 2) | V_SCMD_NUM_IVS(1);
+ tlsp->scmd0.seqno_numivs = htobe32(tlsp->scmd0.seqno_numivs);
+
+ tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0);
+ if (tlsp->inline_key)
+ tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
+
+ /*
+ * The SCMD fields used when encrypting a short TLS record
+ * (no trailer and possibly a truncated payload).
+ */
+ tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
+ V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
+ V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0);
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
+ tlsp->scmd0_short.seqno_numivs |=
+ V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CTR);
+ else
+ tlsp->scmd0_short.seqno_numivs |=
+ V_SCMD_CIPH_MODE(tlsp->enc_mode);
+ tlsp->scmd0_short.seqno_numivs =
+ htobe32(tlsp->scmd0_short.seqno_numivs);
+
+ tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1);
+ if (tlsp->inline_key)
+ tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
+
+ /*
+ * The SCMD fields used when encrypting a short TLS record
+ * using a partial GHASH.
+ */
+ tlsp->scmd0_partial.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_CIPH_MODE(tlsp->enc_mode) |
+ V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
+ V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
+ V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(1);
+ tlsp->scmd0_partial.seqno_numivs =
+ htobe32(tlsp->scmd0_partial.seqno_numivs);
+
+ tlsp->scmd0_partial.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1) |
+ V_SCMD_KEY_CTX_INLINE(1);
+
+ TXQ_LOCK(txq);
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
+ txq->kern_tls_gcm++;
+ else
+ txq->kern_tls_cbc++;
+ TXQ_UNLOCK(txq);
+ *pt = &tlsp->com;
+ return (0);
+
+failed:
+ m_snd_tag_rele(&tlsp->com);
+ return (error);
+}
+
+static int
+ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls,
+ struct sge_txq *txq)
+{
+ struct tls_key_req *kwr;
+ struct tls_keyctx *kctx;
+ void *items[1];
+ struct mbuf *m;
+ int error;
+
+ /*
+ * Store the salt and keys in the key context. For
+ * connections with an inline key, this key context is passed
+ * as immediate data in each work request. For connections
+ * storing the key in DDR, a work request is used to store a
+ * copy of the key context in DDR.
+ */
+ t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
+ if (tlsp->inline_key)
+ return (0);
+
+ /* Populate key work request. */
+ m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT);
+ if (m == NULL) {
+ CTR(KTR_CXGBE, "%s: %p failed to alloc WR mbuf", __func__,
+ tlsp);
+ return (ENOMEM);
+ }
+ m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
+ m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
+ kwr = mtod(m, void *);
+ memset(kwr, 0, TLS_KEY_WR_SZ);
+
+ t4_write_tlskey_wr(tls, KTLS_TX, 0, 0, tlsp->tx_key_addr, kwr);
+ kctx = (struct tls_keyctx *)(kwr + 1);
+ memcpy(kctx, &tlsp->keyctx, sizeof(*kctx));
+
+ /*
+ * Place the key work request in the transmit queue. It
+ * should be sent to the NIC before any TLS packets using this
+ * session.
+ */
+ items[0] = m;
+ error = mp_ring_enqueue(txq->r, items, 1, 1);
+ if (error)
+ m_free(m);
+ else
+ CTR(KTR_CXGBE, "%s: %p sent key WR", __func__, tlsp);
+ return (error);
+}
+
+static u_int
+ktls_base_wr_size(struct tlspcb *tlsp, bool inline_key)
+{
+ u_int wr_len;
+
+ wr_len = sizeof(struct fw_ulptx_wr); // 16
+ wr_len += sizeof(struct ulp_txpkt); // 8
+ wr_len += sizeof(struct ulptx_idata); // 8
+ wr_len += sizeof(struct cpl_tx_sec_pdu);// 32
+ if (inline_key)
+ wr_len += tlsp->tx_key_info_size;
+ else {
+ wr_len += sizeof(struct ulptx_sc_memrd);// 8
+ wr_len += sizeof(struct ulptx_idata); // 8
+ }
+ /* SplitMode CPL_RX_PHYS_DSGL here if needed. */
+ /* CPL_TX_*_LSO here if needed. */
+ wr_len += sizeof(struct cpl_tx_pkt_core);// 16
+ return (wr_len);
+}
+
+static u_int
+ktls_sgl_size(u_int nsegs)
+{
+ u_int wr_len;
+
+ /* First segment is part of ulptx_sgl. */
+ nsegs--;
+
+ wr_len = sizeof(struct ulptx_sgl);
+ wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
+ return (wr_len);
+}
+
+/*
+ * A request that doesn't need to generate the TLS trailer is a short
+ * record. For these requests, part of the TLS record payload is
+ * encrypted without invoking the MAC.
+ *
+ * Returns true if this record should be sent as a short record. In
+ * either case, the remaining outputs describe the how much of the
+ * TLS record to send as input to the crypto block and the amount of
+ * crypto output to trim via SplitMode:
+ *
+ * *header_len - Number of bytes of TLS header to pass as immediate
+ * data
+ *
+ * *offset - Start offset of TLS record payload to pass as DSGL data
+ *
+ * *plen - Length of TLS record payload to pass as DSGL data
+ *
+ * *leading_waste - amount of non-packet-header bytes to drop at the
+ * start of the crypto output
+ *
+ * *trailing_waste - amount of crypto output to drop from the end
+ */
+static bool
+ktls_is_short_record(struct tlspcb *tlsp, struct mbuf *m_tls, u_int tlen,
+ u_int rlen, u_int *header_len, u_int *offset, u_int *plen,
+ u_int *leading_waste, u_int *trailing_waste, bool send_partial_ghash,
+ bool request_ghash)
+{
+ u_int new_tlen, trailer_len;
+
+ MPASS(tlen > m_tls->m_epg_hdrlen);
+
+ /*
+ * For TLS 1.3 treat the inner record type stored as the first
+ * byte of the trailer as part of the payload rather than part
+ * of the trailer.
+ */
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+
+ /*
+ * Default to sending the full record as input to the crypto
+ * engine and relying on SplitMode to drop any waste.
+ */
+ *header_len = m_tls->m_epg_hdrlen;
+ *offset = 0;
+ *plen = rlen - (m_tls->m_epg_hdrlen + trailer_len);
+ *leading_waste = mtod(m_tls, vm_offset_t);
+ *trailing_waste = rlen - tlen;
+ if (!tlsp->sc->tlst.short_records)
+ return (false);
+
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC) {
+ /*
+ * For AES-CBC we have to send input from the start of
+ * the TLS record payload that is a multiple of the
+ * block size. new_tlen rounds up tlen to the end of
+ * the containing AES block. If this last block
+ * overlaps with the trailer, send the full record to
+ * generate the MAC.
+ */
+ new_tlen = TLS_HEADER_LENGTH +
+ roundup2(tlen - TLS_HEADER_LENGTH, AES_BLOCK_LEN);
+ if (rlen - new_tlen < trailer_len)
+ return (false);
+
+ *trailing_waste = new_tlen - tlen;
+ *plen = new_tlen - m_tls->m_epg_hdrlen;
+ } else {
+ if (rlen - tlen < trailer_len ||
+ (rlen - tlen == trailer_len && request_ghash)) {
+ /*
+ * For AES-GCM we have to send the full record
+ * if the end overlaps with the trailer and a
+ * partial GHASH isn't being sent.
+ */
+ if (!send_partial_ghash)
+ return (false);
+
+ /*
+ * Will need to treat any excess trailer bytes as
+ * trailing waste. *trailing_waste is already
+ * correct.
+ */
+ } else {
+ /*
+ * We can use AES-CTR or AES-GCM in partial GHASH
+ * mode to encrypt a partial PDU.
+ *
+ * The last block can be partially encrypted
+ * without any trailing waste.
+ */
+ *trailing_waste = 0;
+ *plen = tlen - m_tls->m_epg_hdrlen;
+ }
+
+ /*
+ * If this request starts at the first byte of the
+ * payload (so the previous request sent the full TLS
+ * header as a tunnel packet) and a partial GHASH is
+ * being requested, the full TLS header must be sent
+ * as input for the GHASH.
+ */
+ if (mtod(m_tls, vm_offset_t) == m_tls->m_epg_hdrlen &&
+ request_ghash)
+ return (true);
+
+ /*
+ * In addition, we can minimize leading waste by
+ * starting encryption at the start of the closest AES
+ * block.
+ */
+ if (mtod(m_tls, vm_offset_t) >= m_tls->m_epg_hdrlen) {
+ *header_len = 0;
+ *offset = mtod(m_tls, vm_offset_t) -
+ m_tls->m_epg_hdrlen;
+ if (*offset >= *plen)
+ *offset = *plen;
+ else
+ *offset = rounddown2(*offset, AES_BLOCK_LEN);
+
+ /*
+ * If the request is just bytes from the trailer,
+ * trim the offset to the end of the payload.
+ */
+ *offset = min(*offset, *plen);
+ *plen -= *offset;
+ *leading_waste -= (m_tls->m_epg_hdrlen + *offset);
+ }
+ }
+ return (true);
+}
+
+/* Size of the AES-GCM TLS AAD for a given connection. */
+static int
+ktls_gcm_aad_len(struct tlspcb *tlsp)
+{
+ return (tlsp->tls13 ? sizeof(struct tls_aead_data_13) :
+ sizeof(struct tls_aead_data));
+}
+
+static int
+ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
+ int *nsegsp)
+{
+ const struct tls_record_layer *hdr;
+ u_int header_len, imm_len, offset, plen, rlen, tlen, wr_len;
+ u_int leading_waste, trailing_waste;
+ bool inline_key, last_ghash_frag, request_ghash, send_partial_ghash;
+ bool short_record;
+
+ M_ASSERTEXTPG(m_tls);
+
+ /*
+ * The relative offset of the last byte to send from the TLS
+ * record.
+ */
+ tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
+ if (tlen <= m_tls->m_epg_hdrlen) {
+ /*
+ * For requests that only want to send the TLS header,
+ * send a tunnelled packet as immediate data.
+ */
+ wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_core) +
+ roundup2(m->m_len + m_tls->m_len, 16);
+ if (wr_len > SGE_MAX_WR_LEN) {
+ CTR(KTR_CXGBE,
+ "%s: %p TLS header-only packet too long (len %d)",
+ __func__, tlsp, m->m_len + m_tls->m_len);
+ }
+
+ /* This should always be the last TLS record in a chain. */
+ MPASS(m_tls->m_next == NULL);
+ *nsegsp = 0;
+ return (wr_len);
+ }
+
+ hdr = (void *)m_tls->m_epg_hdr;
+ rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
+
+ /*
+ * See if this request might make use of GHASH state. This
+ * errs on the side of over-budgeting the WR size.
+ */
+ last_ghash_frag = false;
+ request_ghash = false;
+ send_partial_ghash = false;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
+ tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
+ u_int trailer_len;
+
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+ KASSERT(trailer_len == AES_GMAC_HASH_LEN,
+ ("invalid trailer length for AES-GCM"));
+
+ /* Is this the start of a TLS record? */
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
+ /*
+ * Might use partial GHASH if this doesn't
+ * send the full record.
+ */
+ if (tlen < rlen) {
+ if (tlen < (rlen - trailer_len))
+ send_partial_ghash = true;
+ request_ghash = true;
+ }
+ } else {
+ send_partial_ghash = true;
+ if (tlen < rlen)
+ request_ghash = true;
+ if (tlen >= (rlen - trailer_len))
+ last_ghash_frag = true;
+ }
+ }
+
+ /*
+ * Assume not sending partial GHASH for this call to get the
+ * larger size.
+ */
+ short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
+ &header_len, &offset, &plen, &leading_waste, &trailing_waste,
+ false, request_ghash);
+
+ inline_key = send_partial_ghash || tlsp->inline_key;
+
+ /* Calculate the size of the work request. */
+ wr_len = ktls_base_wr_size(tlsp, inline_key);
+
+ if (send_partial_ghash)
+ wr_len += AES_GMAC_HASH_LEN;
+
+ if (leading_waste != 0 || trailing_waste != 0) {
+ /*
+ * Partial records might require a SplitMode
+ * CPL_RX_PHYS_DSGL.
+ */
+ wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ }
+
+ /* Budget for an LSO header even if we don't use it. */
+ wr_len += sizeof(struct cpl_tx_pkt_lso_core);
+
+ /*
+ * Headers (including the TLS header) are always sent as
+ * immediate data. Short records include a raw AES IV as
+ * immediate data. TLS 1.3 non-short records include a
+ * placeholder for the sequence number as immediate data.
+ * Short records using a partial hash may also need to send
+ * TLS AAD. If a partial hash might be sent, assume a short
+ * record to get the larger size.
+ */
+ imm_len = m->m_len + header_len;
+ if (short_record || send_partial_ghash) {
+ imm_len += AES_BLOCK_LEN;
+ if (send_partial_ghash && header_len != 0)
+ imm_len += ktls_gcm_aad_len(tlsp);
+ } else if (tlsp->tls13)
+ imm_len += sizeof(uint64_t);
+ wr_len += roundup2(imm_len, 16);
+
+ /*
+ * TLS record payload via DSGL. For partial GCM mode we
+ * might need an extra SG entry for a placeholder.
+ */
+ *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
+ plen);
+ wr_len += ktls_sgl_size(*nsegsp + (last_ghash_frag ? 1 : 0));
+
+ if (request_ghash) {
+ /* AES-GCM records might return a partial hash. */
+ wr_len += sizeof(struct ulp_txpkt);
+ wr_len += sizeof(struct ulptx_idata);
+ wr_len += sizeof(struct cpl_tx_tls_ack);
+ wr_len += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ wr_len += AES_GMAC_HASH_LEN;
+ }
+
+ wr_len = roundup2(wr_len, 16);
+ return (wr_len);
+}
+
+/* Queue the next pending packet. */
+static void
+ktls_queue_next_packet(struct tlspcb *tlsp, bool enqueue_only)
+{
+#ifdef KTR
+ struct ether_header *eh;
+ struct tcphdr *tcp;
+ tcp_seq tcp_seqno;
+#endif
+ struct mbuf *m;
+ void *items[1];
+ int rc;
+
+ TXQ_LOCK_ASSERT_OWNED(tlsp->txq);
+ KASSERT(tlsp->queue_mbufs, ("%s: mbufs not being queued for %p",
+ __func__, tlsp));
+ for (;;) {
+ m = mbufq_dequeue(&tlsp->pending_mbufs);
+ if (m == NULL) {
+ tlsp->queue_mbufs = false;
+ return;
+ }
+
+#ifdef KTR
+ eh = mtod(m, struct ether_header *);
+ tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
+ m->m_pkthdr.l3hlen);
+ tcp_seqno = ntohl(tcp->th_seq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__,
+ m->m_pkthdr.len, tcp_seqno);
+#endif
+#endif
+
+ items[0] = m;
+ if (enqueue_only)
+ rc = mp_ring_enqueue_only(tlsp->txq->r, items, 1);
+ else {
+ TXQ_UNLOCK(tlsp->txq);
+ rc = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
+ TXQ_LOCK(tlsp->txq);
+ }
+ if (__predict_true(rc == 0))
+ return;
+
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u dropped", __func__,
+ m->m_pkthdr.len, tcp_seqno);
+ m_freem(m);
+ }
+}
+
+int
+t7_ktls_parse_pkt(struct mbuf *m)
+{
+ struct tlspcb *tlsp;
+ struct ether_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *tcp;
+ struct mbuf *m_tls;
+ void *items[1];
+ int error, nsegs;
+ u_int wr_len, tot_len;
+ uint16_t eh_type;
+
+ /*
+ * Locate headers in initial mbuf.
+ *
+ * XXX: This assumes all of the headers are in the initial mbuf.
+ * Could perhaps use m_advance() like parse_pkt() if that turns
+ * out to not be true.
+ */
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_pkthdr.snd_tag != NULL);
+ tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
+
+ if (m->m_len <= sizeof(*eh) + sizeof(*ip)) {
+ CTR(KTR_CXGBE, "%s: %p header mbuf too short", __func__, tlsp);
+ return (EINVAL);
+ }
+ eh = mtod(m, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+ if (eh_type == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evh = (void *)eh;
+
+ eh_type = ntohs(evh->evl_proto);
+ m->m_pkthdr.l2hlen = sizeof(*evh);
+ } else
+ m->m_pkthdr.l2hlen = sizeof(*eh);
+
+ switch (eh_type) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(eh + 1);
+ if (ip->ip_p != IPPROTO_TCP) {
+ CTR(KTR_CXGBE, "%s: %p mbuf not IPPROTO_TCP", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ m->m_pkthdr.l3hlen = ip->ip_hl * 4;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(eh + 1);
+ if (ip6->ip6_nxt != IPPROTO_TCP) {
+ CTR(KTR_CXGBE, "%s: %p, mbuf not IPPROTO_TCP (%u)",
+ __func__, tlsp, ip6->ip6_nxt);
+ return (EINVAL);
+ }
+ m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
+ break;
+ default:
+ CTR(KTR_CXGBE, "%s: %p mbuf not ETHERTYPE_IP{,V6}", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
+ sizeof(*tcp)) {
+ CTR(KTR_CXGBE, "%s: %p header mbuf too short (2)", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen);
+ m->m_pkthdr.l4hlen = tcp->th_off * 4;
+
+ /* Bail if there is TCP payload before the TLS record. */
+ if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
+ m->m_pkthdr.l4hlen) {
+ CTR(KTR_CXGBE,
+ "%s: %p header mbuf bad length (%d + %d + %d != %d)",
+ __func__, tlsp, m->m_pkthdr.l2hlen, m->m_pkthdr.l3hlen,
+ m->m_pkthdr.l4hlen, m->m_len);
+ return (EINVAL);
+ }
+
+ /* Assume all headers are in 'm' for now. */
+ MPASS(m->m_next != NULL);
+ MPASS(m->m_next->m_flags & M_EXTPG);
+
+ tot_len = 0;
+
+ /*
+ * Each of the remaining mbufs in the chain should reference a
+ * TLS record.
+ */
+ for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
+ MPASS(m_tls->m_flags & M_EXTPG);
+
+ wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p wr_len %d nsegs %d", __func__, tlsp,
+ wr_len, nsegs);
+#endif
+ if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS)
+ return (EFBIG);
+ tot_len += roundup2(wr_len, EQ_ESIZE);
+
+ /*
+ * Store 'nsegs' for the first TLS record in the
+ * header mbuf's metadata.
+ */
+ if (m_tls == m->m_next)
+ set_mbuf_nsegs(m, nsegs);
+ }
+
+ MPASS(tot_len != 0);
+ set_mbuf_len16(m, tot_len / 16);
+
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ /* Defer packets beyond what has been sent so far. */
+ TXQ_LOCK(tlsp->txq);
+ if (tlsp->queue_mbufs) {
+ error = mbufq_enqueue(&tlsp->pending_mbufs, m);
+ if (error == 0) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p len16 %d nsegs %d TCP seq %u deferred",
+ __func__, tlsp, mbuf_len16(m),
+ mbuf_nsegs(m), ntohl(tcp->th_seq));
+#endif
+ }
+ TXQ_UNLOCK(tlsp->txq);
+ return (error);
+ }
+ tlsp->queue_mbufs = true;
+ TXQ_UNLOCK(tlsp->txq);
+ }
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p len16 %d nsegs %d", __func__, tlsp,
+ mbuf_len16(m), mbuf_nsegs(m));
+#endif
+ items[0] = m;
+ error = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
+ if (__predict_false(error != 0)) {
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ TXQ_LOCK(tlsp->txq);
+ ktls_queue_next_packet(tlsp, false);
+ TXQ_UNLOCK(tlsp->txq);
+ }
+ }
+ return (error);
+}
+
+static inline bool
+needs_vlan_insertion(struct mbuf *m)
+{
+
+ M_ASSERTPKTHDR(m);
+
+ return (m->m_flags & M_VLANTAG);
+}
+
+static inline uint64_t
+pkt_ctrl1(struct sge_txq *txq, struct mbuf *m, uint16_t eh_type)
+{
+ uint64_t ctrl1;
+
+ /* Checksums are always offloaded */
+ if (eh_type == ETHERTYPE_IP) {
+ ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
+ V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
+ V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
+ } else {
+ MPASS(m->m_pkthdr.l3hlen == sizeof(struct ip6_hdr));
+ ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
+ V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
+ V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
+ }
+ txq->txcsum++;
+
+ /* VLAN tag insertion */
+ if (needs_vlan_insertion(m)) {
+ ctrl1 |= F_TXPKT_VLAN_VLD |
+ V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
+ txq->vlan_insertion++;
+ }
+
+ return (ctrl1);
+}
+
+static inline void *
+write_lso_cpl(void *cpl, struct mbuf *m0, uint16_t mss, uint16_t eh_type,
+ int total_len)
+{
+ struct cpl_tx_pkt_lso_core *lso;
+ uint32_t ctrl;
+
+ KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
+ m0->m_pkthdr.l4hlen > 0,
+ ("%s: mbuf %p needs TSO but missing header lengths",
+ __func__, m0));
+
+ ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
+ V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
+ V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
+ if (eh_type == ETHERTYPE_IPV6)
+ ctrl |= F_LSO_IPV6;
+
+ lso = cpl;
+ lso->lso_ctrl = htobe32(ctrl);
+ lso->ipid_ofst = htobe16(0);
+ lso->mss = htobe16(mss);
+ lso->seqno_offset = htobe32(0);
+ lso->len = htobe32(total_len);
+
+ return (lso + 1);
+}
+
+static inline void *
+write_tx_tls_ack(void *dst, u_int rx_chid, u_int hash_len, bool ghash_lcb)
+{
+ struct cpl_tx_tls_ack *cpl;
+ uint32_t flags;
+
+ flags = ghash_lcb ? F_CPL_TX_TLS_ACK_LCB : F_CPL_TX_TLS_ACK_PHASH;
+ cpl = dst;
+ cpl->op_to_Rsvd2 = htobe32(V_CPL_TX_TLS_ACK_OPCODE(CPL_TX_TLS_ACK) |
+ V_T7_CPL_TX_TLS_ACK_RXCHID(rx_chid) | F_CPL_TX_TLS_ACK_ULPTXLPBK |
+ flags);
+
+ /* 32 == AckEncCpl, 16 == LCB */
+ cpl->PldLen = htobe32(V_CPL_TX_TLS_ACK_PLDLEN(32 + 16 + hash_len));
+ cpl->Rsvd3 = 0;
+
+ return (cpl + 1);
+}
+
+static inline void *
+write_fw6_pld(void *dst, u_int rx_chid, u_int rx_qid, u_int hash_len,
+ uint64_t cookie)
+{
+ struct rss_header *rss;
+ struct cpl_fw6_pld *cpl;
+
+ rss = dst;
+ memset(rss, 0, sizeof(*rss));
+ rss->opcode = CPL_FW6_PLD;
+ rss->qid = htobe16(rx_qid);
+ rss->channel = rx_chid;
+
+ cpl = (void *)(rss + 1);
+ memset(cpl, 0, sizeof(*cpl));
+ cpl->opcode = CPL_FW6_PLD;
+ cpl->len = htobe16(hash_len);
+ cpl->data[1] = htobe64(cookie);
+
+ return (cpl + 1);
+}
+
+static inline void *
+write_split_mode_rx_phys(void *dst, struct mbuf *m, struct mbuf *m_tls,
+ u_int crypto_hdr_len, u_int leading_waste, u_int trailing_waste)
+{
+ struct cpl_t7_rx_phys_dsgl *cpl;
+ uint16_t *len;
+ uint8_t numsge;
+
+ /* Forward first (3) and third (1) segments. */
+ numsge = 0xa;
+
+ cpl = dst;
+ cpl->ot.opcode = CPL_RX_PHYS_DSGL;
+ cpl->PhysAddrFields_lo_to_NumSGE =
+ htobe32(F_CPL_T7_RX_PHYS_DSGL_SPLITMODE |
+ V_CPL_T7_RX_PHYS_DSGL_NUMSGE(numsge));
+
+ len = (uint16_t *)(cpl->RSSCopy);
+
+ /*
+ * First segment always contains packet headers as well as
+ * transmit-related CPLs.
+ */
+ len[0] = htobe16(crypto_hdr_len);
+
+ /*
+ * Second segment is "gap" of data to drop at the front of the
+ * TLS record.
+ */
+ len[1] = htobe16(leading_waste);
+
+ /* Third segment is how much of the TLS record to send. */
+ len[2] = htobe16(m_tls->m_len);
+
+ /* Fourth segment is how much data to drop at the end. */
+ len[3] = htobe16(trailing_waste);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: forward %u skip %u forward %u skip %u",
+ __func__, be16toh(len[0]), be16toh(len[1]), be16toh(len[2]),
+ be16toh(len[3]));
+#endif
+ return (cpl + 1);
+}
+
+/*
+ * If the SGL ends on an address that is not 16 byte aligned, this function will
+ * add a 0 filled flit at the end.
+ */
+static void *
+write_gl_to_buf(struct sglist *gl, caddr_t to)
+{
+ struct sglist_seg *seg;
+ __be64 *flitp;
+ struct ulptx_sgl *usgl;
+ int i, nflits, nsegs;
+
+ KASSERT(((uintptr_t)to & 0xf) == 0,
+ ("%s: SGL must start at a 16 byte boundary: %p", __func__, to));
+
+ nsegs = gl->sg_nseg;
+ MPASS(nsegs > 0);
+
+ nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
+ flitp = (__be64 *)to;
+ seg = &gl->sg_segs[0];
+ usgl = (void *)flitp;
+
+ usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(nsegs));
+ usgl->len0 = htobe32(seg->ss_len);
+ usgl->addr0 = htobe64(seg->ss_paddr);
+ seg++;
+
+ for (i = 0; i < nsegs - 1; i++, seg++) {
+ usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
+ usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
+ }
+ if (i & 1)
+ usgl->sge[i / 2].len[1] = htobe32(0);
+ flitp += nflits;
+
+ if (nflits & 1) {
+ MPASS(((uintptr_t)flitp) & 0xf);
+ *flitp++ = 0;
+ }
+
+ MPASS((((uintptr_t)flitp) & 0xf) == 0);
+ return (flitp);
+}
+
+static inline void
+copy_to_txd(struct sge_eq *eq, const char *from, caddr_t *to, int len)
+{
+
+ MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
+ MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
+
+ if (__predict_true((uintptr_t)(*to) + len <=
+ (uintptr_t)&eq->desc[eq->sidx])) {
+ bcopy(from, *to, len);
+ (*to) += len;
+ if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx])
+ (*to) = (caddr_t)eq->desc;
+ } else {
+ int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
+
+ bcopy(from, *to, portion);
+ from += portion;
+ portion = len - portion; /* remaining */
+ bcopy(from, (void *)eq->desc, portion);
+ (*to) = (caddr_t)eq->desc + portion;
+ }
+}
+
+static int
+ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
+ const void *src, u_int len, u_int available, tcp_seq tcp_seqno, u_int pidx,
+ uint16_t eh_type, bool last_wr)
+{
+ struct tx_sdesc *txsd;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ uint32_t ctrl;
+ int len16, ndesc, pktlen;
+ struct ether_header *eh;
+ struct ip *ip, newip;
+ struct ip6_hdr *ip6, newip6;
+ struct tcphdr *tcp, newtcp;
+ caddr_t out;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ M_ASSERTPKTHDR(m);
+
+ wr = dst;
+ pktlen = m->m_len + len;
+ ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
+ len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
+ ndesc = tx_len16_to_desc(len16);
+ MPASS(ndesc <= available);
+
+ /* Firmware work request header */
+ /* TODO: Handle VF work request. */
+ wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
+
+ ctrl = V_FW_WR_LEN16(len16);
+ wr->equiq_to_len16 = htobe32(ctrl);
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = txq->cpl_ctrl0;
+ cpl->pack = 0;
+ cpl->len = htobe16(pktlen);
+
+ out = (void *)(cpl + 1);
+
+ /* Copy over Ethernet header. */
+ eh = mtod(m, struct ether_header *);
+ copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
+
+ /* Fixup length in IP header and copy out. */
+ if (eh_type == ETHERTYPE_IP) {
+ ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
+ newip = *ip;
+ newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
+ copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
+ if (m->m_pkthdr.l3hlen > sizeof(*ip))
+ copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
+ m->m_pkthdr.l3hlen - sizeof(*ip));
+ } else {
+ ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
+ newip6 = *ip6;
+ newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen -
+ sizeof(*ip6));
+ copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
+ MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
+ }
+ cpl->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
+
+ /* Set sequence number in TCP header. */
+ tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
+ newtcp = *tcp;
+ newtcp.th_seq = htonl(tcp_seqno);
+ copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
+
+ /* Copy rest of TCP header. */
+ copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
+ (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
+
+ /* Copy the payload data. */
+ copy_to_txd(&txq->eq, src, &out, len);
+ txq->imm_wrs++;
+
+ txq->txpkt_wrs++;
+
+ txsd = &txq->sdesc[pidx];
+ if (last_wr)
+ txsd->m = m;
+ else
+ txsd->m = NULL;
+ txsd->desc_used = ndesc;
+
+ return (ndesc);
+}
+
+static int
+ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
+ void *dst, struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls,
+ u_int available, tcp_seq tcp_seqno, u_int pidx, uint16_t eh_type,
+ uint16_t mss)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tx_sdesc *txsd;
+ struct fw_ulptx_wr *wr;
+ struct ulp_txpkt *txpkt;
+ struct ulptx_sc_memrd *memrd;
+ struct ulptx_idata *idata;
+ struct cpl_tx_sec_pdu *sec_pdu;
+ struct cpl_tx_pkt_core *tx_pkt;
+ const struct tls_record_layer *hdr;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *newtcp;
+ char *iv, *out;
+ u_int aad_start, aad_stop;
+ u_int auth_start, auth_stop, auth_insert;
+ u_int cipher_start, cipher_stop, iv_offset;
+ u_int header_len, offset, plen, rlen, tlen;
+ u_int imm_len, ndesc, nsegs, txpkt_lens[2], wr_len;
+ u_int cpl_len, crypto_hdr_len, post_key_context_len;
+ u_int leading_waste, trailing_waste;
+ u_short ip_len;
+ bool inline_key, ghash_lcb, last_ghash_frag, last_wr, need_lso;
+ bool request_ghash, send_partial_ghash, short_record, split_mode;
+ bool using_scratch;
+
+ MPASS(tlsp->txq == txq);
+ M_ASSERTEXTPG(m_tls);
+
+ /* Final work request for this mbuf chain? */
+ last_wr = (m_tls->m_next == NULL);
+
+ /*
+ * The relative offset of the last byte to send from the TLS
+ * record.
+ */
+ tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
+ if (tlen <= m_tls->m_epg_hdrlen) {
+ /*
+ * For requests that only want to send the TLS header,
+ * send a tunnelled packet as immediate data.
+ */
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p header-only TLS record %u", __func__,
+ tlsp, (u_int)m_tls->m_epg_seqno);
+#endif
+ /* This should always be the last TLS record in a chain. */
+ MPASS(last_wr);
+
+ txq->kern_tls_header++;
+
+ return (ktls_write_tunnel_packet(txq, dst, m,
+ (char *)m_tls->m_epg_hdr + mtod(m_tls, vm_offset_t),
+ m_tls->m_len, available, tcp_seqno, pidx, eh_type,
+ last_wr));
+ }
+
+ /* Locate the TLS header. */
+ hdr = (void *)m_tls->m_epg_hdr;
+ rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: offset %lu len %u TCP seq %u TLS record %u",
+ __func__, mtod(m_tls, vm_offset_t), m_tls->m_len, tcp_seqno,
+ (u_int)m_tls->m_epg_seqno);
+#endif
+
+ /* Should this request make use of GHASH state? */
+ ghash_lcb = false;
+ last_ghash_frag = false;
+ request_ghash = false;
+ send_partial_ghash = false;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
+ tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
+ u_int trailer_len;
+
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+ KASSERT(trailer_len == AES_GMAC_HASH_LEN,
+ ("invalid trailer length for AES-GCM"));
+
+ /* Is this the start of a TLS record? */
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
+ /*
+ * If this is the very first TLS record or
+ * if this is a newer TLS record, request a partial
+ * hash, but not if we are going to send the whole
+ * thing.
+ */
+ if ((tlsp->ghash_tls_seqno == 0 ||
+ tlsp->ghash_tls_seqno < m_tls->m_epg_seqno) &&
+ tlen < rlen) {
+ /*
+ * If we are only missing part or all
+ * of the trailer, send a normal full
+ * record but request the hash.
+ * Otherwise, use partial GHASH mode.
+ */
+ if (tlen >= (rlen - trailer_len))
+ ghash_lcb = true;
+ else
+ send_partial_ghash = true;
+ request_ghash = true;
+ tlsp->ghash_tls_seqno = m_tls->m_epg_seqno;
+ }
+ } else if (tlsp->ghash_tls_seqno == m_tls->m_epg_seqno &&
+ tlsp->ghash_valid) {
+ /*
+ * Compute the offset of the first AES block as
+ * is done in ktls_is_short_record.
+ */
+ if (rlen - tlen < trailer_len)
+ plen = rlen - (m_tls->m_epg_hdrlen +
+ trailer_len);
+ else
+ plen = tlen - m_tls->m_epg_hdrlen;
+ offset = mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen;
+ if (offset >= plen)
+ offset = plen;
+ else
+ offset = rounddown2(offset, AES_BLOCK_LEN);
+ if (tlsp->ghash_offset == offset) {
+ if (offset == plen) {
+ /*
+ * Send a partial trailer as a
+ * tunnelled packet as
+ * immediate data.
+ */
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p trailer-only TLS record %u",
+ __func__, tlsp,
+ (u_int)m_tls->m_epg_seqno);
+#endif
+
+ txq->kern_tls_trailer++;
+
+ offset = mtod(m_tls, vm_offset_t) -
+ (m_tls->m_epg_hdrlen + plen);
+ KASSERT(offset <= AES_GMAC_HASH_LEN,
+ ("offset outside of trailer"));
+ return (ktls_write_tunnel_packet(txq,
+ dst, m, tlsp->ghash + offset,
+ m_tls->m_len, available, tcp_seqno,
+ pidx, eh_type, last_wr));
+ }
+
+ /*
+ * If this request sends the end of
+ * the payload, it is the last
+ * fragment.
+ */
+ if (tlen >= (rlen - trailer_len)) {
+ last_ghash_frag = true;
+ ghash_lcb = true;
+ }
+
+ /*
+ * Only use partial GCM mode (rather
+ * than an AES-CTR short record) if
+ * there is input auth data to pass to
+ * the GHASH. That is true so long as
+ * there is at least one full block of
+ * payload data, or if the remaining
+ * payload data is the final partial
+ * block.
+ */
+ if (plen - offset >= GMAC_BLOCK_LEN ||
+ last_ghash_frag) {
+ send_partial_ghash = true;
+
+ /*
+ * If not sending the complete
+ * end of the record, this is
+ * a middle request so needs
+ * to request an updated
+ * partial hash.
+ */
+ if (tlen < rlen)
+ request_ghash = true;
+ }
+ }
+ }
+ }
+
+ short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
+ &header_len, &offset, &plen, &leading_waste, &trailing_waste,
+ send_partial_ghash, request_ghash);
+
+ if (short_record) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p short TLS record %u hdr %u offs %u plen %u",
+ __func__, tlsp, (u_int)m_tls->m_epg_seqno, header_len,
+ offset, plen);
+ if (send_partial_ghash) {
+ if (header_len != 0)
+ CTR(KTR_CXGBE, "%s: %p sending initial GHASH",
+ __func__, tlsp);
+ else
+ CTR(KTR_CXGBE, "%s: %p sending partial GHASH for offset %u%s",
+ __func__, tlsp, tlsp->ghash_offset,
+ last_ghash_frag ? ", last_frag" : "");
+ }
+#endif
+ KASSERT(send_partial_ghash || !request_ghash,
+ ("requesting but not sending partial hash for short record"));
+ } else {
+ KASSERT(!send_partial_ghash,
+ ("sending partial hash with full record"));
+ }
+
+ if (tlen < rlen && m_tls->m_next == NULL &&
+ (tcp->th_flags & TH_FIN) != 0) {
+ txq->kern_tls_fin_short++;
+#ifdef INVARIANTS
+ panic("%s: FIN on short TLS record", __func__);
+#endif
+ }
+
+ /*
+ * Use cached value for first record in chain if not using
+ * partial GCM mode. ktls_parse_pkt() calculates nsegs based
+ * on send_partial_ghash being false.
+ */
+ if (m->m_next == m_tls && !send_partial_ghash)
+ nsegs = mbuf_nsegs(m);
+ else
+ nsegs = sglist_count_mbuf_epg(m_tls,
+ m_tls->m_epg_hdrlen + offset, plen);
+
+ /* Determine if we need an LSO header. */
+ need_lso = (m_tls->m_len > mss);
+
+ /* Calculate the size of the TLS work request. */
+ inline_key = send_partial_ghash || tlsp->inline_key;
+ wr_len = ktls_base_wr_size(tlsp, inline_key);
+
+ if (send_partial_ghash) {
+ /* Inline key context includes partial hash in OPAD. */
+ wr_len += AES_GMAC_HASH_LEN;
+ }
+
+ /*
+ * SplitMode is required if there is any thing we need to trim
+ * from the crypto output, either at the front or end of the
+ * record. Note that short records might not need trimming.
+ */
+ split_mode = leading_waste != 0 || trailing_waste != 0;
+ if (split_mode) {
+ /*
+ * Partial records require a SplitMode
+ * CPL_RX_PHYS_DSGL.
+ */
+ wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ }
+
+ if (need_lso)
+ wr_len += sizeof(struct cpl_tx_pkt_lso_core);
+
+ imm_len = m->m_len + header_len;
+ if (short_record) {
+ imm_len += AES_BLOCK_LEN;
+ if (send_partial_ghash && header_len != 0)
+ imm_len += ktls_gcm_aad_len(tlsp);
+ } else if (tlsp->tls13)
+ imm_len += sizeof(uint64_t);
+ wr_len += roundup2(imm_len, 16);
+ wr_len += ktls_sgl_size(nsegs + (last_ghash_frag ? 1 : 0));
+ wr_len = roundup2(wr_len, 16);
+ txpkt_lens[0] = wr_len - sizeof(*wr);
+
+ if (request_ghash) {
+ /*
+ * Requesting the hash entails a second ULP_TX_PKT
+ * containing CPL_TX_TLS_ACK, CPL_FW6_PLD, and space
+ * for the hash.
+ */
+ txpkt_lens[1] = sizeof(struct ulp_txpkt);
+ txpkt_lens[1] += sizeof(struct ulptx_idata);
+ txpkt_lens[1] += sizeof(struct cpl_tx_tls_ack);
+ txpkt_lens[1] += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ txpkt_lens[1] += AES_GMAC_HASH_LEN;
+ wr_len += txpkt_lens[1];
+ } else
+ txpkt_lens[1] = 0;
+
+ ndesc = howmany(wr_len, EQ_ESIZE);
+ MPASS(ndesc <= available);
+
+ /*
+ * Use the per-txq scratch pad if near the end of the ring to
+ * simplify handling of wrap-around.
+ */
+ using_scratch = (eq->sidx - pidx < ndesc);
+ if (using_scratch)
+ wr = (void *)txq->ss;
+ else
+ wr = dst;
+
+ /* FW_ULPTX_WR */
+ wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
+ wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
+ V_FW_WR_LEN16(wr_len / 16));
+ wr->cookie = 0;
+
+ /* ULP_TXPKT */
+ txpkt = (void *)(wr + 1);
+ txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DATAMODIFY(0) |
+ V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
+ V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_CMDMORE(request_ghash ? 1 : 0) |
+ V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
+ txpkt->len = htobe32(howmany(txpkt_lens[0], 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (void *)(txpkt + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = sizeof(struct cpl_tx_sec_pdu);
+
+ /*
+ * After the key context comes CPL_RX_PHYS_DSGL, CPL_TX_*, and
+ * immediate data containing headers. When using an inline
+ * key, these are counted as part of this ULPTX_IDATA. When
+ * reading the key from memory, these are part of a separate
+ * ULPTX_IDATA.
+ */
+ cpl_len = sizeof(struct cpl_tx_pkt_core);
+ if (need_lso)
+ cpl_len += sizeof(struct cpl_tx_pkt_lso_core);
+ if (split_mode)
+ cpl_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ post_key_context_len = cpl_len + imm_len;
+
+ if (inline_key) {
+ idata->len += tlsp->tx_key_info_size + post_key_context_len;
+ if (send_partial_ghash) {
+ /* Partial GHASH in key context. */
+ idata->len += AES_GMAC_HASH_LEN;
+ }
+ }
+ idata->len = htobe32(idata->len);
+
+ /* CPL_TX_SEC_PDU */
+ sec_pdu = (void *)(idata + 1);
+
+ /*
+ * Packet headers are passed through unchanged by the crypto
+ * engine by marking them as header data in SCMD0.
+ */
+ crypto_hdr_len = m->m_len;
+
+ if (send_partial_ghash) {
+ /*
+ * For short records using a partial hash, the TLS
+ * header is counted as header data in SCMD0. TLS AAD
+ * is next (if AAD is present) followed by the AES-CTR
+ * IV. Last is the cipher region for the payload.
+ */
+ if (header_len != 0) {
+ aad_start = 1;
+ aad_stop = ktls_gcm_aad_len(tlsp);
+ } else {
+ aad_start = 0;
+ aad_stop = 0;
+ }
+ iv_offset = aad_stop + 1;
+ cipher_start = iv_offset + AES_BLOCK_LEN;
+ cipher_stop = 0;
+ if (last_ghash_frag) {
+ auth_start = cipher_start;
+ auth_stop = AES_GMAC_HASH_LEN;
+ auth_insert = auth_stop;
+ } else if (plen < GMAC_BLOCK_LEN) {
+ /*
+ * A request that sends part of the first AES
+ * block will only have AAD.
+ */
+ KASSERT(header_len != 0,
+ ("%s: partial GHASH with no auth", __func__));
+ auth_start = 0;
+ auth_stop = 0;
+ auth_insert = 0;
+ } else {
+ auth_start = cipher_start;
+ auth_stop = plen % GMAC_BLOCK_LEN;
+ auth_insert = 0;
+ }
+
+ sec_pdu->pldlen = htobe32(aad_stop + AES_BLOCK_LEN + plen +
+ (last_ghash_frag ? AES_GMAC_HASH_LEN : 0));
+
+ /*
+ * For short records, the TLS header is treated as
+ * header data.
+ */
+ crypto_hdr_len += header_len;
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0_partial.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = tlsp->scmd0_partial.ivgen_hdrlen;
+ if (last_ghash_frag)
+ sec_pdu->ivgen_hdrlen |= V_SCMD_LAST_FRAG(1);
+ else
+ sec_pdu->ivgen_hdrlen |= V_SCMD_MORE_FRAGS(1);
+ sec_pdu->ivgen_hdrlen = htobe32(sec_pdu->ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ txq->kern_tls_partial_ghash++;
+ } else if (short_record) {
+ /*
+ * For short records without a partial hash, the TLS
+ * header is counted as header data in SCMD0 and the
+ * IV is next, followed by a cipher region for the
+ * payload.
+ */
+ aad_start = 0;
+ aad_stop = 0;
+ iv_offset = 1;
+ auth_start = 0;
+ auth_stop = 0;
+ auth_insert = 0;
+ cipher_start = AES_BLOCK_LEN + 1;
+ cipher_stop = 0;
+
+ sec_pdu->pldlen = htobe32(AES_BLOCK_LEN + plen);
+
+ /*
+ * For short records, the TLS header is treated as
+ * header data.
+ */
+ crypto_hdr_len += header_len;
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = htobe32(
+ tlsp->scmd0_short.ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ txq->kern_tls_short++;
+ } else {
+ /*
+ * AAD is TLS header. IV is after AAD for TLS < 1.3.
+ * For TLS 1.3, a placeholder for the TLS sequence
+ * number is provided as an IV before the AAD. The
+ * cipher region starts after the AAD and IV. See
+ * comments in ccr_authenc() and ccr_gmac() in
+ * t4_crypto.c regarding cipher and auth start/stop
+ * values.
+ */
+ if (tlsp->tls13) {
+ iv_offset = 1;
+ aad_start = 1 + sizeof(uint64_t);
+ aad_stop = sizeof(uint64_t) + TLS_HEADER_LENGTH;
+ cipher_start = aad_stop + 1;
+ } else {
+ aad_start = 1;
+ aad_stop = TLS_HEADER_LENGTH;
+ iv_offset = TLS_HEADER_LENGTH + 1;
+ cipher_start = m_tls->m_epg_hdrlen + 1;
+ }
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ cipher_stop = 0;
+ auth_start = cipher_start;
+ auth_stop = 0;
+ auth_insert = 0;
+ } else {
+ cipher_stop = 0;
+ auth_start = cipher_start;
+ auth_stop = 0;
+ auth_insert = 0;
+ }
+
+ sec_pdu->pldlen = htobe32((tlsp->tls13 ? sizeof(uint64_t) : 0) +
+ m_tls->m_epg_hdrlen + plen);
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ if (split_mode)
+ txq->kern_tls_partial++;
+ else
+ txq->kern_tls_full++;
+ }
+ sec_pdu->op_ivinsrtofst = htobe32(
+ V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
+ V_CPL_TX_SEC_PDU_CPLLEN(cpl_len / 8) |
+ V_CPL_TX_SEC_PDU_PLACEHOLDER(send_partial_ghash ? 1 : 0) |
+ V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset));
+ sec_pdu->aadstart_cipherstop_hi = htobe32(
+ V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
+ V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
+ V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
+ sec_pdu->cipherstop_lo_authinsert = htobe32(
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
+ V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
+ V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
+ V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
+
+ if (send_partial_ghash && last_ghash_frag) {
+ uint64_t aad_len, cipher_len;
+
+ aad_len = ktls_gcm_aad_len(tlsp);
+ cipher_len = rlen - (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
+ sec_pdu->scmd1 = htobe64(aad_len << 44 | cipher_len);
+ } else
+ sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
+
+ /* Key context */
+ out = (void *)(sec_pdu + 1);
+ if (inline_key) {
+ memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
+ if (send_partial_ghash) {
+ struct tls_keyctx *keyctx = (void *)out;
+
+ keyctx->u.txhdr.ctxlen++;
+ keyctx->u.txhdr.dualck_to_txvalid &= ~htobe16(
+ V_KEY_CONTEXT_MK_SIZE(M_KEY_CONTEXT_MK_SIZE));
+ keyctx->u.txhdr.dualck_to_txvalid |= htobe16(
+ F_KEY_CONTEXT_OPAD_PRESENT |
+ V_KEY_CONTEXT_MK_SIZE(0));
+ }
+ out += tlsp->tx_key_info_size;
+ if (send_partial_ghash) {
+ if (header_len != 0)
+ memset(out, 0, AES_GMAC_HASH_LEN);
+ else
+ memcpy(out, tlsp->ghash, AES_GMAC_HASH_LEN);
+ out += AES_GMAC_HASH_LEN;
+ }
+ } else {
+ /* ULPTX_SC_MEMRD to read key context. */
+ memrd = (void *)out;
+ memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
+ V_ULP_TX_SC_MORE(1) |
+ V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4));
+ memrd->addr = htobe32(tlsp->tx_key_addr >> 5);
+
+ /* ULPTX_IDATA for CPL_TX_* and headers. */
+ idata = (void *)(memrd + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = htobe32(post_key_context_len);
+
+ out = (void *)(idata + 1);
+ }
+
+ /* CPL_RX_PHYS_DSGL */
+ if (split_mode) {
+ crypto_hdr_len = sizeof(struct cpl_tx_pkt_core);
+ if (need_lso)
+ crypto_hdr_len += sizeof(struct cpl_tx_pkt_lso_core);
+ crypto_hdr_len += m->m_len;
+ out = write_split_mode_rx_phys(out, m, m_tls, crypto_hdr_len,
+ leading_waste, trailing_waste);
+ }
+
+ /* CPL_TX_PKT_LSO */
+ if (need_lso) {
+ out = write_lso_cpl(out, m, mss, eh_type, m->m_len +
+ m_tls->m_len);
+ txq->tso_wrs++;
+ }
+
+ /* CPL_TX_PKT_XT */
+ tx_pkt = (void *)out;
+ tx_pkt->ctrl0 = txq->cpl_ctrl0;
+ tx_pkt->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
+ tx_pkt->pack = 0;
+ tx_pkt->len = htobe16(m->m_len + m_tls->m_len);
+
+ /* Copy the packet headers. */
+ out = (void *)(tx_pkt + 1);
+ memcpy(out, mtod(m, char *), m->m_len);
+
+ /* Modify the packet length in the IP header. */
+ ip_len = m->m_len + m_tls->m_len - m->m_pkthdr.l2hlen;
+ if (eh_type == ETHERTYPE_IP) {
+ ip = (void *)(out + m->m_pkthdr.l2hlen);
+ be16enc(&ip->ip_len, ip_len);
+ } else {
+ ip6 = (void *)(out + m->m_pkthdr.l2hlen);
+ be16enc(&ip6->ip6_plen, ip_len - sizeof(*ip6));
+ }
+
+ /* Modify sequence number and flags in TCP header. */
+ newtcp = (void *)(out + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
+ be32enc(&newtcp->th_seq, tcp_seqno);
+ if (!last_wr)
+ newtcp->th_flags = tcp->th_flags & ~(TH_PUSH | TH_FIN);
+ out += m->m_len;
+
+ /*
+ * Insert placeholder for sequence number as IV for TLS 1.3
+ * non-short records.
+ */
+ if (tlsp->tls13 && !short_record) {
+ memset(out, 0, sizeof(uint64_t));
+ out += sizeof(uint64_t);
+ }
+
+ /* Populate the TLS header */
+ memcpy(out, m_tls->m_epg_hdr, header_len);
+ out += header_len;
+
+ /* TLS AAD for short records using a partial hash. */
+ if (send_partial_ghash && header_len != 0) {
+ if (tlsp->tls13) {
+ struct tls_aead_data_13 ad;
+
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ ad.tls_length = hdr->tls_length;
+ memcpy(out, &ad, sizeof(ad));
+ out += sizeof(ad);
+ } else {
+ struct tls_aead_data ad;
+ uint16_t cipher_len;
+
+ cipher_len = rlen -
+ (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
+ ad.seq = htobe64(m_tls->m_epg_seqno);
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ ad.tls_length = htons(cipher_len);
+ memcpy(out, &ad, sizeof(ad));
+ out += sizeof(ad);
+ }
+ }
+
+ /* AES IV for a short record. */
+ if (short_record) {
+ iv = out;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE);
+ if (tlsp->tls13) {
+ uint64_t value;
+
+ value = be64dec(tlsp->keyctx.u.txhdr.txsalt +
+ 4);
+ value ^= m_tls->m_epg_seqno;
+ be64enc(iv + 4, value);
+ } else
+ memcpy(iv + 4, hdr + 1, 8);
+ if (send_partial_ghash)
+ be32enc(iv + 12, 1 + offset / AES_BLOCK_LEN);
+ else
+ be32enc(iv + 12, 2 + offset / AES_BLOCK_LEN);
+ } else
+ memcpy(iv, hdr + 1, AES_BLOCK_LEN);
+ out += AES_BLOCK_LEN;
+ }
+
+ if (imm_len % 16 != 0) {
+ if (imm_len % 8 != 0) {
+ /* Zero pad to an 8-byte boundary. */
+ memset(out, 0, 8 - (imm_len % 8));
+ out += 8 - (imm_len % 8);
+ }
+
+ /*
+ * Insert a ULP_TX_SC_NOOP if needed so the SGL is
+ * 16-byte aligned.
+ */
+ if (imm_len % 16 <= 8) {
+ idata = (void *)out;
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = htobe32(0);
+ out = (void *)(idata + 1);
+ }
+ }
+
+ /* SGL for record payload */
+ sglist_reset(txq->gl);
+ if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
+ plen) != 0) {
+#ifdef INVARIANTS
+ panic("%s: failed to append sglist", __func__);
+#endif
+ }
+ if (last_ghash_frag) {
+ if (sglist_append_phys(txq->gl, zero_buffer_pa,
+ AES_GMAC_HASH_LEN) != 0) {
+#ifdef INVARIANTS
+ panic("%s: failed to append sglist (2)", __func__);
+#endif
+ }
+ }
+ out = write_gl_to_buf(txq->gl, out);
+
+ if (request_ghash) {
+ /* ULP_TXPKT */
+ txpkt = (void *)out;
+ txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DATAMODIFY(0) |
+ V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
+ V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
+ txpkt->len = htobe32(howmany(txpkt_lens[1], 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (void *)(txpkt + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(0));
+ idata->len = sizeof(struct cpl_tx_tls_ack);
+ idata->len += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ idata->len += AES_GMAC_HASH_LEN;
+ idata->len = htobe32(idata->len);
+ out = (void *)(idata + 1);
+
+ /* CPL_TX_TLS_ACK */
+ out = write_tx_tls_ack(out, tlsp->rx_chid, AES_GMAC_HASH_LEN,
+ ghash_lcb);
+
+ /* CPL_FW6_PLD */
+ out = write_fw6_pld(out, tlsp->rx_chid, tlsp->rx_qid,
+ AES_GMAC_HASH_LEN, (uintptr_t)tlsp | CPL_FW6_COOKIE_KTLS);
+
+ /* Space for partial hash. */
+ memset(out, 0, AES_GMAC_HASH_LEN);
+ out += AES_GMAC_HASH_LEN;
+
+ tlsp->ghash_pending = true;
+ tlsp->ghash_valid = false;
+ tlsp->ghash_lcb = ghash_lcb;
+ if (last_ghash_frag)
+ tlsp->ghash_offset = offset + plen;
+ else
+ tlsp->ghash_offset = rounddown2(offset + plen,
+ GMAC_BLOCK_LEN);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p requesting GHASH for offset %u",
+ __func__, tlsp, tlsp->ghash_offset);
+#endif
+ m_snd_tag_ref(&tlsp->com);
+
+ txq->kern_tls_ghash_requested++;
+ }
+
+ if (using_scratch) {
+ out = dst;
+ copy_to_txd(eq, txq->ss, &out, wr_len);
+ }
+
+ txq->kern_tls_records++;
+ txq->kern_tls_octets += m_tls->m_len;
+ if (split_mode) {
+ txq->kern_tls_splitmode++;
+ txq->kern_tls_waste += leading_waste + trailing_waste;
+ }
+ if (need_lso)
+ txq->kern_tls_lso++;
+
+ txsd = &txq->sdesc[pidx];
+ if (last_wr)
+ txsd->m = m;
+ else
+ txsd->m = NULL;
+ txsd->desc_used = ndesc;
+
+ return (ndesc);
+}
+
+int
+t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
+ u_int available)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tlspcb *tlsp;
+ struct tcphdr *tcp;
+ struct mbuf *m_tls;
+ struct ether_header *eh;
+ tcp_seq tcp_seqno;
+ u_int ndesc, pidx, totdesc;
+ uint16_t eh_type, mss;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_pkthdr.snd_tag != NULL);
+ tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
+
+ totdesc = 0;
+ eh = mtod(m, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+ if (eh_type == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evh = (void *)eh;
+
+ eh_type = ntohs(evh->evl_proto);
+ }
+
+ tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
+ m->m_pkthdr.l3hlen);
+ pidx = eq->pidx;
+
+ /* Determine MSS. */
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ mss = m->m_pkthdr.tso_segsz;
+ tlsp->prev_mss = mss;
+ } else if (tlsp->prev_mss != 0)
+ mss = tlsp->prev_mss;
+ else
+ mss = if_getmtu(tlsp->vi->ifp) -
+ (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
+
+ /* Fetch the starting TCP sequence number for this chain. */
+ tcp_seqno = ntohl(tcp->th_seq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__, m->m_pkthdr.len,
+ tcp_seqno);
+#endif
+ KASSERT(!tlsp->ghash_pending, ("%s: GHASH pending for send", __func__));
+
+ /*
+ * Iterate over each TLS record constructing a work request
+ * for that record.
+ */
+ for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
+ MPASS(m_tls->m_flags & M_EXTPG);
+
+ ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
+ available - totdesc, tcp_seqno, pidx, eh_type, mss);
+ totdesc += ndesc;
+ IDXINCR(pidx, ndesc, eq->sidx);
+ dst = &eq->desc[pidx];
+
+ tcp_seqno += m_tls->m_len;
+ }
+
+ /*
+ * Queue another packet if this was a GCM request that didn't
+ * request a GHASH response.
+ */
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM && !tlsp->ghash_pending)
+ ktls_queue_next_packet(tlsp, true);
+
+ MPASS(totdesc <= available);
+ return (totdesc);
+}
+
+static void
+t7_tls_tag_free(struct m_snd_tag *mst)
+{
+ struct adapter *sc;
+ struct tlspcb *tlsp;
+
+ tlsp = mst_to_tls(mst);
+ sc = tlsp->sc;
+
+ CTR2(KTR_CXGBE, "%s: %p", __func__, tlsp);
+
+ if (tlsp->tx_key_addr >= 0)
+ t4_free_tls_keyid(sc, tlsp->tx_key_addr);
+
+ KASSERT(mbufq_len(&tlsp->pending_mbufs) == 0,
+ ("%s: pending mbufs", __func__));
+
+ zfree(tlsp, M_CXGBE);
+}
+
+static int
+ktls_fw6_pld(struct sge_iq *iq, const struct rss_header *rss,
+ struct mbuf *m)
+{
+ const struct cpl_fw6_pld *cpl;
+ struct tlspcb *tlsp;
+ const void *ghash;
+
+ if (m != NULL)
+ cpl = mtod(m, const void *);
+ else
+ cpl = (const void *)(rss + 1);
+
+ tlsp = (struct tlspcb *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
+ KASSERT(cpl->data[0] == 0, ("%s: error status returned", __func__));
+
+ TXQ_LOCK(tlsp->txq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p received GHASH for offset %u%s", __func__, tlsp,
+ tlsp->ghash_offset, tlsp->ghash_lcb ? " in LCB" : "");
+#endif
+ if (tlsp->ghash_lcb)
+ ghash = &cpl->data[2];
+ else
+ ghash = cpl + 1;
+ memcpy(tlsp->ghash, ghash, AES_GMAC_HASH_LEN);
+ tlsp->ghash_valid = true;
+ tlsp->ghash_pending = false;
+ tlsp->txq->kern_tls_ghash_received++;
+
+ ktls_queue_next_packet(tlsp, false);
+ TXQ_UNLOCK(tlsp->txq);
+
+ m_snd_tag_rele(&tlsp->com);
+ m_freem(m);
+ return (0);
+}
+
+void
+t7_ktls_modload(void)
+{
+ zero_buffer = malloc_aligned(AES_GMAC_HASH_LEN, AES_GMAC_HASH_LEN,
+ M_CXGBE, M_ZERO | M_WAITOK);
+ zero_buffer_pa = vtophys(zero_buffer);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, ktls_fw6_pld,
+ CPL_FW6_COOKIE_KTLS);
+}
+
+void
+t7_ktls_modunload(void)
+{
+ free(zero_buffer, M_CXGBE);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL, CPL_FW6_COOKIE_KTLS);
+}
+
+#else
+
+int
+t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
+ struct m_snd_tag **pt)
+{
+ return (ENXIO);
+}
+
+int
+t7_ktls_parse_pkt(struct mbuf *m)
+{
+ return (EINVAL);
+}
+
+int
+t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
+ u_int available)
+{
+ panic("can't happen");
+}
+
+void
+t7_ktls_modload(void)
+{
+}
+
+void
+t7_ktls_modunload(void)
+{
+}
+
+#endif
diff --git a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
index b8e6eeba0280..2cd24c635325 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
+++ b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
@@ -32,19 +32,6 @@
#include "cudbg.h"
#include "cudbg_lib_common.h"
-enum {
- SF_ATTEMPTS = 10, /* max retries for SF operations */
-
- /* flash command opcodes */
- SF_PROG_PAGE = 2, /* program page */
- SF_WR_DISABLE = 4, /* disable writes */
- SF_RD_STATUS = 5, /* read status register */
- SF_WR_ENABLE = 6, /* enable writes */
- SF_RD_DATA_FAST = 0xb, /* read flash */
- SF_RD_ID = 0x9f, /* read ID */
- SF_ERASE_SECTOR = 0xd8, /* erase sector */
-};
-
int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size);
int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
u32 start_address);
@@ -56,10 +43,12 @@ update_skip_size(struct cudbg_flash_sec_info *sec_info, u32 size)
}
static
-void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
- int sector_nu, int avail)
+void set_sector_availability(struct adapter *adap,
+ struct cudbg_flash_sec_info *sec_info, int sector_nu, int avail)
{
- sector_nu -= CUDBG_START_SEC;
+ int start = t4_flash_loc_start(adap, FLASH_LOC_CUDBG, NULL);
+
+ sector_nu -= start / SF_SEC_SIZE;;
if (avail)
set_dbg_bitmap(sec_info->sec_bitmap, sector_nu);
else
@@ -68,13 +57,17 @@ void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
/* This function will return empty sector available for filling */
static int
-find_empty_sec(struct cudbg_flash_sec_info *sec_info)
+find_empty_sec(struct adapter *adap, struct cudbg_flash_sec_info *sec_info)
{
int i, index, bit;
-
- for (i = CUDBG_START_SEC; i < CUDBG_SF_MAX_SECTOR; i++) {
- index = (i - CUDBG_START_SEC) / 8;
- bit = (i - CUDBG_START_SEC) % 8;
+ unsigned int len = 0;
+ int start = t4_flash_loc_start(adap, FLASH_LOC_CUDBG, &len);
+
+ start /= SF_SEC_SIZE; /* addr -> sector */
+ len /= SF_SEC_SIZE;
+ for (i = start; i < start + len; i++) {
+ index = (i - start) / 8;
+ bit = (i - start) % 8;
if (!(sec_info->sec_bitmap[index] & (1 << bit)))
return i;
}
@@ -102,7 +95,7 @@ static void update_headers(void *handle, struct cudbg_buffer *dbg_buff,
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
sec_hdr = sec_info->sec_data + sec_hdr_start_addr;
flash_hdr = (struct cudbg_flash_hdr *)(sec_hdr);
@@ -166,11 +159,13 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
u32 space_left;
int rc = 0;
int sec;
+ unsigned int cudbg_max_size = 0;
+ t4_flash_loc_start(adap, FLASH_LOC_CUDBG, &cudbg_max_size);
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
sec_data_size = sec_hdr_start_addr;
cudbg_init->print("\tWriting %u bytes to flash\n", cur_entity_size);
@@ -191,12 +186,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
flash_hdr = (struct cudbg_flash_hdr *)(sec_info->sec_data +
sec_hdr_start_addr);
- if (flash_hdr->data_len > CUDBG_FLASH_SIZE) {
+ if (flash_hdr->data_len > cudbg_max_size) {
rc = CUDBG_STATUS_FLASH_FULL;
goto out;
}
- space_left = CUDBG_FLASH_SIZE - flash_hdr->data_len;
+ space_left = cudbg_max_size - flash_hdr->data_len;
if (cur_entity_size > space_left) {
rc = CUDBG_STATUS_FLASH_FULL;
@@ -204,10 +199,11 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
}
while (cur_entity_size > 0) {
- sec = find_empty_sec(sec_info);
+ sec = find_empty_sec(adap, sec_info);
if (sec_info->par_sec) {
sec_data_offset = sec_info->par_sec_offset;
- set_sector_availability(sec_info, sec_info->par_sec, 0);
+ set_sector_availability(adap, sec_info,
+ sec_info->par_sec, 0);
sec_info->par_sec = 0;
sec_info->par_sec_offset = 0;
@@ -230,13 +226,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
(void *)((char *)dbg_buff->data + start_offset),
tmp_size);
- rc = write_flash(adap, sec, sec_info->sec_data,
- CUDBG_SF_SECTOR_SIZE);
+ rc = write_flash(adap, sec, sec_info->sec_data, SF_SEC_SIZE);
if (rc)
goto out;
cur_entity_size -= tmp_size;
- set_sector_availability(sec_info, sec, 1);
+ set_sector_availability(adap, sec_info, sec, 1);
start_offset += tmp_size;
}
out:
@@ -247,19 +242,14 @@ int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size)
{
unsigned int addr;
unsigned int i, n;
- unsigned int sf_sec_size;
int rc = 0;
u8 *ptr = (u8 *)data;
- sf_sec_size = adap->params.sf_size/adap->params.sf_nsec;
-
- addr = start_sec * CUDBG_SF_SECTOR_SIZE;
- i = DIV_ROUND_UP(size,/* # of sectors spanned */
- sf_sec_size);
+ addr = start_sec * SF_SEC_SIZE;
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE);
- rc = t4_flash_erase_sectors(adap, start_sec,
- start_sec + i - 1);
+ rc = t4_flash_erase_sectors(adap, start_sec, start_sec + i - 1);
/*
* If size == 0 then we're simply erasing the FLASH sectors associated
* with the on-adapter OptionROM Configuration File.
@@ -337,6 +327,9 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
u32 data_offset = 0;
u32 i, j;
int rc;
+ unsigned int cudbg_len = 0;
+ int cudbg_start_sec = t4_flash_loc_start(adap, FLASH_LOC_CUDBG,
+ &cudbg_len) / SF_SEC_SIZE;
rc = t4_get_flash_params(adap);
if (rc) {
@@ -348,7 +341,7 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
if (!data_flag) {
/* fill header */
@@ -357,14 +350,14 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
* have older filled sector also
*/
memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
- rc = read_flash(adap, CUDBG_START_SEC, &flash_hdr,
+ rc = read_flash(adap, cudbg_start_sec, &flash_hdr,
sizeof(struct cudbg_flash_hdr),
sec_hdr_start_addr);
if (flash_hdr.signature == CUDBG_FL_SIGNATURE) {
sec_info->max_timestamp = flash_hdr.timestamp;
} else {
- rc = read_flash(adap, CUDBG_START_SEC + 1,
+ rc = read_flash(adap, cudbg_start_sec + 1,
&flash_hdr,
sizeof(struct cudbg_flash_hdr),
sec_hdr_start_addr);
@@ -383,8 +376,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
/* finding max sequence number because max sequenced
* sector has updated header
*/
- for (i = CUDBG_START_SEC; i <
- CUDBG_SF_MAX_SECTOR; i++) {
+ for (i = cudbg_start_sec; i < cudbg_start_sec +
+ cudbg_len / SF_SEC_SIZE; i++) {
memset(&flash_hdr, 0,
sizeof(struct cudbg_flash_hdr));
rc = read_flash(adap, i, &flash_hdr,
@@ -423,7 +416,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
/* finding sector sequence sorted */
for (i = 1; i <= sec_info->max_seq_no; i++) {
- for (j = CUDBG_START_SEC; j < CUDBG_SF_MAX_SECTOR; j++) {
+ for (j = cudbg_start_sec; j < cudbg_start_sec +
+ cudbg_len / SF_SEC_SIZE; j++) {
memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
rc = read_flash(adap, j, &flash_hdr,
sizeof(struct cudbg_flash_hdr),
@@ -434,10 +428,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
sec_info->max_timestamp ==
flash_hdr.timestamp &&
flash_hdr.sec_seq_no == i) {
- if (size + total_hdr_size >
- CUDBG_SF_SECTOR_SIZE)
- tmp_size = CUDBG_SF_SECTOR_SIZE -
- total_hdr_size;
+ if (size + total_hdr_size > SF_SEC_SIZE)
+ tmp_size = SF_SEC_SIZE - total_hdr_size;
else
tmp_size = size;
@@ -468,7 +460,7 @@ int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
unsigned int addr, i, n;
int rc;
u32 *ptr = (u32 *)data;
- addr = start_sec * CUDBG_SF_SECTOR_SIZE + start_address;
+ addr = start_sec * SF_SEC_SIZE + start_address;
size = size / 4;
for (i = 0; i < size; i += SF_PAGE_SIZE) {
if ((size - i) < SF_PAGE_SIZE)
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib.c b/sys/dev/cxgbe/cudbg/cudbg_lib.c
index a36c53f68223..f0273349263a 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_lib.c
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib.c
@@ -155,23 +155,25 @@ static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
u32 flash_data_offset;
u32 data_hdr_size;
int rc = -1;
+ unsigned int cudbg_len;
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
+ t4_flash_loc_start(cudbg_init->adap, FLASH_LOC_CUDBG, &cudbg_len);
- flash_data_offset = (FLASH_CUDBG_NSECS *
+ flash_data_offset = ((cudbg_len / SF_SEC_SIZE) *
(sizeof(struct cudbg_flash_hdr) +
data_hdr_size)) +
(cur_entity_data_offset - data_hdr_size);
- if (flash_data_offset > CUDBG_FLASH_SIZE) {
+ if (flash_data_offset > cudbg_len) {
update_skip_size(sec_info, cur_entity_size);
if (cudbg_init->verbose)
cudbg_init->print("Large entity skipping...\n");
return rc;
}
- remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
+ remain_flash_size = cudbg_len - flash_data_offset;
if (cur_entity_size > remain_flash_size) {
update_skip_size(sec_info, cur_entity_size);
@@ -1292,6 +1294,7 @@ static int collect_macstats(struct cudbg_init *pdbg_init,
mac_stats_buff->port_count = n;
for (i = 0; i < mac_stats_buff->port_count; i++)
+ /* Incorrect, should use hport instead of i */
t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
rc = write_compression_hdr(&scratch_buff, dbg_buff);
@@ -1967,7 +1970,7 @@ static int collect_fw_devlog(struct cudbg_init *pdbg_init,
u32 offset;
int rc = 0, i;
- rc = t4_init_devlog_params(padap, 1);
+ rc = t4_init_devlog_ncores_params(padap, 1);
if (rc < 0) {
pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
index 86390eb4399d..b6a85f436db0 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
@@ -59,11 +59,6 @@
#include "common/t4_hw.h"
#endif
-#define CUDBG_SF_MAX_SECTOR (FLASH_CUDBG_START_SEC + FLASH_CUDBG_NSECS)
-#define CUDBG_SF_SECTOR_SIZE SF_SEC_SIZE
-#define CUDBG_START_SEC FLASH_CUDBG_START_SEC
-#define CUDBG_FLASH_SIZE FLASH_CUDBG_MAX_SIZE
-
#define CUDBG_EXT_DATA_BIT 0
#define CUDBG_EXT_DATA_VALID (1 << CUDBG_EXT_DATA_BIT)
@@ -121,7 +116,7 @@ struct cudbg_flash_sec_info {
u32 hdr_data_len; /* Total data */
u32 skip_size; /* Total size of large entities. */
u64 max_timestamp;
- char sec_data[CUDBG_SF_SECTOR_SIZE];
+ char sec_data[SF_SEC_SIZE];
u8 sec_bitmap[8];
};
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index c8592807f843..9cdfd0fb9652 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -976,42 +976,6 @@ icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace,
return (0);
}
-/*
- * Request/response structure used to find out the adapter offloading a socket.
- */
-struct find_ofld_adapter_rr {
- struct socket *so;
- struct adapter *sc; /* result */
-};
-
-static void
-find_offload_adapter(struct adapter *sc, void *arg)
-{
- struct find_ofld_adapter_rr *fa = arg;
- struct socket *so = fa->so;
- struct tom_data *td = sc->tom_softc;
- struct tcpcb *tp;
- struct inpcb *inp;
-
- /* Non-TCP were filtered out earlier. */
- MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
-
- if (fa->sc != NULL)
- return; /* Found already. */
-
- if (td == NULL)
- return; /* TOE not enabled on this adapter. */
-
- inp = sotoinpcb(so);
- INP_WLOCK(inp);
- if ((inp->inp_flags & INP_DROPPED) == 0) {
- tp = intotcpcb(inp);
- if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
- fa->sc = sc; /* Found. */
- }
- INP_WUNLOCK(inp);
-}
-
static bool
is_memfree(struct adapter *sc)
{
@@ -1025,46 +989,6 @@ is_memfree(struct adapter *sc)
return (true);
}
-/* XXXNP: move this to t4_tom. */
-static void
-send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
-{
- struct wrqe *wr;
- struct fw_flowc_wr *flowc;
- const u_int nparams = 1;
- u_int flowclen;
- struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
-
- flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
-
- wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
- if (wr == NULL) {
- /* XXX */
- panic("%s: allocation failure.", __func__);
- }
- flowc = wrtod(wr);
- memset(flowc, 0, wr->wr_len);
-
- flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
- V_FW_FLOWC_WR_NPARAMS(nparams));
- flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
- V_FW_WR_FLOWID(toep->tid));
-
- flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
- flowc->mnemval[0].val = htobe32(maxlen);
-
- txsd->tx_credits = howmany(flowclen, 16);
- txsd->plen = 0;
- KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
- ("%s: not enough credits (%d)", __func__, toep->tx_credits));
- toep->tx_credits -= txsd->tx_credits;
- if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
- toep->txsd_pidx = 0;
- toep->txsd_avail--;
-
- t4_wrq_tx(sc, wr);
-}
-
static void
set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, u_int ulp_submode)
{
@@ -1093,7 +1017,6 @@ int
icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
{
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
- struct find_ofld_adapter_rr fa;
struct file *fp;
struct socket *so;
struct inpcb *inp;
@@ -1137,15 +1060,11 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
fdrop(fp, curthread);
ICL_CONN_UNLOCK(ic);
- /* Find the adapter offloading this socket. */
- fa.sc = NULL;
- fa.so = so;
- t4_iterate(find_offload_adapter, &fa);
- if (fa.sc == NULL) {
+ icc->sc = find_offload_adapter(so);
+ if (icc->sc == NULL) {
error = EINVAL;
goto out;
}
- icc->sc = fa.sc;
max_rx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_recv_data_segment_length;
max_tx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_send_data_segment_length;
@@ -1203,7 +1122,7 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
toep->params.ulp_mode = ULP_MODE_ISCSI;
toep->ulpcb = icc;
- send_iscsi_flowc_wr(icc->sc, toep,
+ send_txdataplen_max_flowc_wr(icc->sc, toep,
roundup(max_iso_pdus * max_tx_pdu_len, tp->t_maxseg));
set_ulp_mode_iscsi(icc->sc, toep, icc->ulp_submode);
INP_WUNLOCK(inp);
@@ -1776,7 +1695,6 @@ cxgbei_limits(struct adapter *sc, void *arg)
static int
cxgbei_limits_fd(struct icl_drv_limits *idl, int fd)
{
- struct find_ofld_adapter_rr fa;
struct file *fp;
struct socket *so;
struct adapter *sc;
@@ -1799,17 +1717,13 @@ cxgbei_limits_fd(struct icl_drv_limits *idl, int fd)
return (EINVAL);
}
- /* Find the adapter offloading this socket. */
- fa.sc = NULL;
- fa.so = so;
- t4_iterate(find_offload_adapter, &fa);
- if (fa.sc == NULL) {
+ sc = find_offload_adapter(so);
+ if (sc == NULL) {
fdrop(fp, curthread);
return (ENXIO);
}
fdrop(fp, curthread);
- sc = fa.sc;
error = begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims");
if (error != 0)
return (error);
diff --git a/sys/dev/cxgbe/firmware/t4fw_interface.h b/sys/dev/cxgbe/firmware/t4fw_interface.h
index 2794bae9474b..5874f0343b03 100644
--- a/sys/dev/cxgbe/firmware/t4fw_interface.h
+++ b/sys/dev/cxgbe/firmware/t4fw_interface.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2012-2017 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2012-2017, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,8 +66,8 @@ enum fw_retval {
FW_FCOE_NO_XCHG = 136, /* */
FW_SCSI_RSP_ERR = 137, /* */
FW_ERR_RDEV_IMPL_LOGO = 138, /* */
- FW_SCSI_UNDER_FLOW_ERR = 139, /* */
- FW_SCSI_OVER_FLOW_ERR = 140, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
FW_SCSI_DDP_ERR = 141, /* DDP error*/
FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
FW_SCSI_IO_BLOCK = 143, /* IO is going to be blocked due to resource failure */
@@ -85,7 +84,7 @@ enum fw_memtype {
FW_MEMTYPE_FLASH = 0x4,
FW_MEMTYPE_INTERNAL = 0x5,
FW_MEMTYPE_EXTMEM1 = 0x6,
- FW_MEMTYPE_HMA = 0x7,
+ FW_MEMTYPE_HMA = 0x7,
};
/******************************************************************************
@@ -106,10 +105,14 @@ enum fw_wr_opcodes {
FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
+ FW_OFLD_TX_DATA_V2_WR = 0x0f,
FW_CMD_WR = 0x10,
FW_ETH_TX_PKT_VM_WR = 0x11,
FW_ETH_TX_PKTS_VM_WR = 0x12,
FW_RI_RES_WR = 0x0c,
+ FW_QP_RES_WR = FW_RI_RES_WR,
+ /* iwarp wr used from rdma kernel and user space */
+ FW_V2_NVMET_TX_DATA_WR = 0x13,
FW_RI_RDMA_WRITE_WR = 0x14,
FW_RI_SEND_WR = 0x15,
FW_RI_RDMA_READ_WR = 0x16,
@@ -118,6 +121,15 @@ enum fw_wr_opcodes {
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_FR_NSMR_TPTE_WR = 0x20,
FW_RI_RDMA_WRITE_CMPL_WR = 0x21,
+ /* rocev2 wr used from rdma kernel and user space */
+ FW_RI_V2_RDMA_WRITE_WR = 0x22,
+ FW_RI_V2_SEND_WR = 0x23,
+ FW_RI_V2_RDMA_READ_WR = 0x24,
+ FW_RI_V2_BIND_MW_WR = 0x25,
+ FW_RI_V2_FR_NSMR_WR = 0x26,
+ FW_RI_V2_ATOMIC_WR = 0x27,
+ FW_NVMET_V2_FR_NSMR_WR = 0x28,
+ FW_RI_V2_INV_LSTAG_WR = 0x1e,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_RI_SEND_IMMEDIATE_WR = 0x15,
FW_RI_ATOMIC_WR = 0x16,
@@ -138,10 +150,11 @@ enum fw_wr_opcodes {
FW_POFCOE_TCB_WR = 0x42,
FW_POFCOE_ULPTX_WR = 0x43,
FW_ISCSI_TX_DATA_WR = 0x45,
- FW_PTP_TX_PKT_WR = 0x46,
+ FW_PTP_TX_PKT_WR = 0x46,
FW_TLSTX_DATA_WR = 0x68,
FW_TLS_TUNNEL_OFLD_WR = 0x69,
FW_CRYPTO_LOOKASIDE_WR = 0x6d,
+ FW_CRYPTO_UPDATE_SA_WR = 0x6e,
FW_COISCSI_TGT_WR = 0x70,
FW_COISCSI_TGT_CONN_WR = 0x71,
FW_COISCSI_TGT_XMIT_WR = 0x72,
@@ -149,7 +162,8 @@ enum fw_wr_opcodes {
FW_ISNS_WR = 0x75,
FW_ISNS_XMIT_WR = 0x76,
FW_FILTER2_WR = 0x77,
- FW_LASTC2E_WR = 0x80
+ /* FW_LASTC2E_WR = 0x80 */
+ FW_LASTC2E_WR = 0xB0
};
/*
@@ -308,7 +322,7 @@ enum fw_filter_wr_cookie {
enum fw_filter_wr_nat_mode {
FW_FILTER_WR_NATMODE_NONE = 0,
- FW_FILTER_WR_NATMODE_DIP ,
+ FW_FILTER_WR_NATMODE_DIP,
FW_FILTER_WR_NATMODE_DIPDP,
FW_FILTER_WR_NATMODE_DIPDPSIP,
FW_FILTER_WR_NATMODE_DIPDPSP,
@@ -387,7 +401,7 @@ struct fw_filter2_wr {
__u8 newlip[16];
__u8 newfip[16];
__be32 natseqcheck;
- __be32 r9;
+ __be32 rocev2_qpn;
__be64 r10;
__be64 r11;
__be64 r12;
@@ -675,6 +689,19 @@ struct fw_filter2_wr {
#define G_FW_FILTER_WR_MATCHTYPEM(x) \
(((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
+#define S_FW_FILTER2_WR_ROCEV2 31
+#define M_FW_FILTER2_WR_ROCEV2 0x1
+#define V_FW_FILTER2_WR_ROCEV2(x) ((x) << S_FW_FILTER2_WR_ROCEV2)
+#define G_FW_FILTER2_WR_ROCEV2(x) \
+ (((x) >> S_FW_FILTER2_WR_ROCEV2) & M_FW_FILTER2_WR_ROCEV2)
+#define F_FW_FILTER2_WR_ROCEV2 V_FW_FILTER2_WR_ROCEV2(1U)
+
+#define S_FW_FILTER2_WR_QPN 0
+#define M_FW_FILTER2_WR_QPN 0xffffff
+#define V_FW_FILTER2_WR_QPN(x) ((x) << S_FW_FILTER2_WR_QPN)
+#define G_FW_FILTER2_WR_QPN(x) \
+ (((x) >> S_FW_FILTER2_WR_QPN) & M_FW_FILTER2_WR_QPN)
+
struct fw_ulptx_wr {
__be32 op_to_compl;
__be32 flowid_len16;
@@ -1034,7 +1061,10 @@ enum fw_flowc_mnem {
FW_FLOWC_MNEM_SND_SCALE = 13,
FW_FLOWC_MNEM_RCV_SCALE = 14,
FW_FLOWC_MNEM_ULP_MODE = 15,
- FW_FLOWC_MNEM_MAX = 16,
+ FW_FLOWC_MNEM_EQID = 16,
+ FW_FLOWC_MNEM_CONG_ALG = 17,
+ FW_FLOWC_MNEM_TXDATAPLEN_MIN = 18,
+ FW_FLOWC_MNEM_MAX = 19,
};
struct fw_flowc_mnemval {
@@ -1153,6 +1183,55 @@ struct fw_ofld_tx_data_wr {
#define G_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x) \
(((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) & M_FW_ISCSI_TX_DATA_WR_FLAGS_LO)
+struct fw_ofld_tx_data_v2_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 r4;
+ __be16 r5;
+ __be16 wrid;
+ __be32 r6;
+ __be32 seqno;
+ __be32 plen;
+ __be32 lsodisable_to_flags;
+};
+
+#define S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE 31
+#define M_FW_OFLD_TX_DATA_V2_WR_LSODISABLE 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE)
+#define G_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE) & \
+ M_FW_OFLD_TX_DATA_V2_WR_LSODISABLE)
+#define F_FW_OFLD_TX_DATA_V2_WR_LSODISABLE \
+ V_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD 30
+#define M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD)
+#define G_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD) & \
+ M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD)
+#define F_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD \
+ V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE 29
+#define M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE)
+#define G_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE) & \
+ M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE)
+#define F_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE \
+ V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_FLAGS 0
+#define M_FW_OFLD_TX_DATA_V2_WR_FLAGS 0xfffffff
+#define V_FW_OFLD_TX_DATA_V2_WR_FLAGS(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_FLAGS)
+#define G_FW_OFLD_TX_DATA_V2_WR_FLAGS(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_FLAGS) & M_FW_OFLD_TX_DATA_V2_WR_FLAGS)
+
struct fw_cmd_wr {
__be32 op_dma;
__be32 len16_pkd;
@@ -1218,8 +1297,15 @@ enum fw_ri_wr_opcode {
FW_RI_FAST_REGISTER = 0xd,
FW_RI_LOCAL_INV = 0xe,
#endif
+ /* Chelsio specific */
FW_RI_SGE_EC_CR_RETURN = 0xf,
FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT,
+ FW_RI_SEND_IMMEDIATE = FW_RI_RDMA_INIT,
+
+ FW_RI_ROCEV2_SEND = 0x0,
+ FW_RI_ROCEV2_WRITE = 0x0,
+ FW_RI_ROCEV2_SEND_WITH_INV = 0x5,
+ FW_RI_ROCEV2_SEND_IMMEDIATE = 0xa,
};
enum fw_ri_wr_flags {
@@ -1229,7 +1315,8 @@ enum fw_ri_wr_flags {
FW_RI_READ_FENCE_FLAG = 0x08,
FW_RI_LOCAL_FENCE_FLAG = 0x10,
FW_RI_RDMA_READ_INVALIDATE = 0x20,
- FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40
+ FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40,
+ //FW_RI_REPLAYED_WR_FLAG = 0x80,
};
enum fw_ri_mpa_attrs {
@@ -1522,18 +1609,302 @@ struct fw_ri_cqe {
#define G_FW_RI_CQE_TYPE(x) \
(((x) >> S_FW_RI_CQE_TYPE) & M_FW_RI_CQE_TYPE)
-enum fw_ri_res_type {
+enum fw_res_type {
FW_RI_RES_TYPE_SQ,
FW_RI_RES_TYPE_RQ,
FW_RI_RES_TYPE_CQ,
FW_RI_RES_TYPE_SRQ,
+ FW_QP_RES_TYPE_SQ = FW_RI_RES_TYPE_SQ,
+ FW_QP_RES_TYPE_CQ = FW_RI_RES_TYPE_CQ,
};
-enum fw_ri_res_op {
+enum fw_res_op {
FW_RI_RES_OP_WRITE,
FW_RI_RES_OP_RESET,
+ FW_QP_RES_OP_WRITE = FW_RI_RES_OP_WRITE,
+ FW_QP_RES_OP_RESET = FW_RI_RES_OP_RESET,
+};
+
+enum fw_qp_transport_type {
+ FW_QP_TRANSPORT_TYPE_IWARP,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_UD,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_RC,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_XRC_INI,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_XRC_TGT,
+ FW_QP_TRANSPORT_TYPE_NVMET,
+ FW_QP_TRANSPORT_TYPE_TOE,
+ FW_QP_TRANSPORT_TYPE_ISCSI,
+};
+
+struct fw_qp_res {
+ union fw_qp_restype {
+ struct fw_qp_res_sqrq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ } sqrq;
+ struct fw_qp_res_cq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 iqid;
+ __be32 r4[2];
+ __be32 iqandst_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_iqro;
+ __be32 r6_lo;
+ __be64 r7;
+ } cq;
+ } u;
+};
+
+struct fw_qp_res_wr {
+ __be32 op_to_nres;
+ __be32 len16_pkd;
+ __u64 cookie;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_qp_res res[0];
+#endif
};
+#define S_FW_QP_RES_WR_TRANSPORT_TYPE 16
+#define M_FW_QP_RES_WR_TRANSPORT_TYPE 0x7
+#define V_FW_QP_RES_WR_TRANSPORT_TYPE(x) \
+ ((x) << S_FW_QP_RES_WR_TRANSPORT_TYPE)
+#define G_FW_QP_RES_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_QP_RES_WR_TRANSPORT_TYPE) & M_FW_QP_RES_WR_TRANSPORT_TYPE)
+
+#define S_FW_QP_RES_WR_VFN 8
+#define M_FW_QP_RES_WR_VFN 0xff
+#define V_FW_QP_RES_WR_VFN(x) ((x) << S_FW_QP_RES_WR_VFN)
+#define G_FW_QP_RES_WR_VFN(x) \
+ (((x) >> S_FW_QP_RES_WR_VFN) & M_FW_QP_RES_WR_VFN)
+
+#define S_FW_QP_RES_WR_NRES 0
+#define M_FW_QP_RES_WR_NRES 0xff
+#define V_FW_QP_RES_WR_NRES(x) ((x) << S_FW_QP_RES_WR_NRES)
+#define G_FW_QP_RES_WR_NRES(x) \
+ (((x) >> S_FW_QP_RES_WR_NRES) & M_FW_QP_RES_WR_NRES)
+
+#define S_FW_QP_RES_WR_FETCHSZM 26
+#define M_FW_QP_RES_WR_FETCHSZM 0x1
+#define V_FW_QP_RES_WR_FETCHSZM(x) ((x) << S_FW_QP_RES_WR_FETCHSZM)
+#define G_FW_QP_RES_WR_FETCHSZM(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHSZM) & M_FW_QP_RES_WR_FETCHSZM)
+#define F_FW_QP_RES_WR_FETCHSZM V_FW_QP_RES_WR_FETCHSZM(1U)
+
+#define S_FW_QP_RES_WR_STATUSPGNS 25
+#define M_FW_QP_RES_WR_STATUSPGNS 0x1
+#define V_FW_QP_RES_WR_STATUSPGNS(x) ((x) << S_FW_QP_RES_WR_STATUSPGNS)
+#define G_FW_QP_RES_WR_STATUSPGNS(x) \
+ (((x) >> S_FW_QP_RES_WR_STATUSPGNS) & M_FW_QP_RES_WR_STATUSPGNS)
+#define F_FW_QP_RES_WR_STATUSPGNS V_FW_QP_RES_WR_STATUSPGNS(1U)
+
+#define S_FW_QP_RES_WR_STATUSPGRO 24
+#define M_FW_QP_RES_WR_STATUSPGRO 0x1
+#define V_FW_QP_RES_WR_STATUSPGRO(x) ((x) << S_FW_QP_RES_WR_STATUSPGRO)
+#define G_FW_QP_RES_WR_STATUSPGRO(x) \
+ (((x) >> S_FW_QP_RES_WR_STATUSPGRO) & M_FW_QP_RES_WR_STATUSPGRO)
+#define F_FW_QP_RES_WR_STATUSPGRO V_FW_QP_RES_WR_STATUSPGRO(1U)
+
+#define S_FW_QP_RES_WR_FETCHNS 23
+#define M_FW_QP_RES_WR_FETCHNS 0x1
+#define V_FW_QP_RES_WR_FETCHNS(x) ((x) << S_FW_QP_RES_WR_FETCHNS)
+#define G_FW_QP_RES_WR_FETCHNS(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHNS) & M_FW_QP_RES_WR_FETCHNS)
+#define F_FW_QP_RES_WR_FETCHNS V_FW_QP_RES_WR_FETCHNS(1U)
+
+#define S_FW_QP_RES_WR_FETCHRO 22
+#define M_FW_QP_RES_WR_FETCHRO 0x1
+#define V_FW_QP_RES_WR_FETCHRO(x) ((x) << S_FW_QP_RES_WR_FETCHRO)
+#define G_FW_QP_RES_WR_FETCHRO(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHRO) & M_FW_QP_RES_WR_FETCHRO)
+#define F_FW_QP_RES_WR_FETCHRO V_FW_QP_RES_WR_FETCHRO(1U)
+
+#define S_FW_QP_RES_WR_HOSTFCMODE 20
+#define M_FW_QP_RES_WR_HOSTFCMODE 0x3
+#define V_FW_QP_RES_WR_HOSTFCMODE(x) ((x) << S_FW_QP_RES_WR_HOSTFCMODE)
+#define G_FW_QP_RES_WR_HOSTFCMODE(x) \
+ (((x) >> S_FW_QP_RES_WR_HOSTFCMODE) & M_FW_QP_RES_WR_HOSTFCMODE)
+
+#define S_FW_QP_RES_WR_CPRIO 19
+#define M_FW_QP_RES_WR_CPRIO 0x1
+#define V_FW_QP_RES_WR_CPRIO(x) ((x) << S_FW_QP_RES_WR_CPRIO)
+#define G_FW_QP_RES_WR_CPRIO(x) \
+ (((x) >> S_FW_QP_RES_WR_CPRIO) & M_FW_QP_RES_WR_CPRIO)
+#define F_FW_QP_RES_WR_CPRIO V_FW_QP_RES_WR_CPRIO(1U)
+
+#define S_FW_QP_RES_WR_ONCHIP 18
+#define M_FW_QP_RES_WR_ONCHIP 0x1
+#define V_FW_QP_RES_WR_ONCHIP(x) ((x) << S_FW_QP_RES_WR_ONCHIP)
+#define G_FW_QP_RES_WR_ONCHIP(x) \
+ (((x) >> S_FW_QP_RES_WR_ONCHIP) & M_FW_QP_RES_WR_ONCHIP)
+#define F_FW_QP_RES_WR_ONCHIP V_FW_QP_RES_WR_ONCHIP(1U)
+
+#define S_FW_QP_RES_WR_PCIECHN 16
+#define M_FW_QP_RES_WR_PCIECHN 0x3
+#define V_FW_QP_RES_WR_PCIECHN(x) ((x) << S_FW_QP_RES_WR_PCIECHN)
+#define G_FW_QP_RES_WR_PCIECHN(x) \
+ (((x) >> S_FW_QP_RES_WR_PCIECHN) & M_FW_QP_RES_WR_PCIECHN)
+
+#define S_FW_QP_RES_WR_IQID 0
+#define M_FW_QP_RES_WR_IQID 0xffff
+#define V_FW_QP_RES_WR_IQID(x) ((x) << S_FW_QP_RES_WR_IQID)
+#define G_FW_QP_RES_WR_IQID(x) \
+ (((x) >> S_FW_QP_RES_WR_IQID) & M_FW_QP_RES_WR_IQID)
+
+#define S_FW_QP_RES_WR_DCAEN 31
+#define M_FW_QP_RES_WR_DCAEN 0x1
+#define V_FW_QP_RES_WR_DCAEN(x) ((x) << S_FW_QP_RES_WR_DCAEN)
+#define G_FW_QP_RES_WR_DCAEN(x) \
+ (((x) >> S_FW_QP_RES_WR_DCAEN) & M_FW_QP_RES_WR_DCAEN)
+#define F_FW_QP_RES_WR_DCAEN V_FW_QP_RES_WR_DCAEN(1U)
+
+#define S_FW_QP_RES_WR_DCACPU 26
+#define M_FW_QP_RES_WR_DCACPU 0x1f
+#define V_FW_QP_RES_WR_DCACPU(x) ((x) << S_FW_QP_RES_WR_DCACPU)
+#define G_FW_QP_RES_WR_DCACPU(x) \
+ (((x) >> S_FW_QP_RES_WR_DCACPU) & M_FW_QP_RES_WR_DCACPU)
+
+#define S_FW_QP_RES_WR_FBMIN 23
+#define M_FW_QP_RES_WR_FBMIN 0x7
+#define V_FW_QP_RES_WR_FBMIN(x) ((x) << S_FW_QP_RES_WR_FBMIN)
+#define G_FW_QP_RES_WR_FBMIN(x) \
+ (((x) >> S_FW_QP_RES_WR_FBMIN) & M_FW_QP_RES_WR_FBMIN)
+
+#define S_FW_QP_RES_WR_FBMAX 20
+#define M_FW_QP_RES_WR_FBMAX 0x7
+#define V_FW_QP_RES_WR_FBMAX(x) ((x) << S_FW_QP_RES_WR_FBMAX)
+#define G_FW_QP_RES_WR_FBMAX(x) \
+ (((x) >> S_FW_QP_RES_WR_FBMAX) & M_FW_QP_RES_WR_FBMAX)
+
+#define S_FW_QP_RES_WR_CIDXFTHRESHO 19
+#define M_FW_QP_RES_WR_CIDXFTHRESHO 0x1
+#define V_FW_QP_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_QP_RES_WR_CIDXFTHRESHO)
+#define G_FW_QP_RES_WR_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_QP_RES_WR_CIDXFTHRESHO) & M_FW_QP_RES_WR_CIDXFTHRESHO)
+#define F_FW_QP_RES_WR_CIDXFTHRESHO V_FW_QP_RES_WR_CIDXFTHRESHO(1U)
+
+#define S_FW_QP_RES_WR_CIDXFTHRESH 16
+#define M_FW_QP_RES_WR_CIDXFTHRESH 0x7
+#define V_FW_QP_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_QP_RES_WR_CIDXFTHRESH)
+#define G_FW_QP_RES_WR_CIDXFTHRESH(x) \
+ (((x) >> S_FW_QP_RES_WR_CIDXFTHRESH) & M_FW_QP_RES_WR_CIDXFTHRESH)
+
+#define S_FW_QP_RES_WR_EQSIZE 0
+#define M_FW_QP_RES_WR_EQSIZE 0xffff
+#define V_FW_QP_RES_WR_EQSIZE(x) ((x) << S_FW_QP_RES_WR_EQSIZE)
+#define G_FW_QP_RES_WR_EQSIZE(x) \
+ (((x) >> S_FW_QP_RES_WR_EQSIZE) & M_FW_QP_RES_WR_EQSIZE)
+
+#define S_FW_QP_RES_WR_IQANDST 15
+#define M_FW_QP_RES_WR_IQANDST 0x1
+#define V_FW_QP_RES_WR_IQANDST(x) ((x) << S_FW_QP_RES_WR_IQANDST)
+#define G_FW_QP_RES_WR_IQANDST(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANDST) & M_FW_QP_RES_WR_IQANDST)
+#define F_FW_QP_RES_WR_IQANDST V_FW_QP_RES_WR_IQANDST(1U)
+
+#define S_FW_QP_RES_WR_IQANUS 14
+#define M_FW_QP_RES_WR_IQANUS 0x1
+#define V_FW_QP_RES_WR_IQANUS(x) ((x) << S_FW_QP_RES_WR_IQANUS)
+#define G_FW_QP_RES_WR_IQANUS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANUS) & M_FW_QP_RES_WR_IQANUS)
+#define F_FW_QP_RES_WR_IQANUS V_FW_QP_RES_WR_IQANUS(1U)
+
+#define S_FW_QP_RES_WR_IQANUD 12
+#define M_FW_QP_RES_WR_IQANUD 0x3
+#define V_FW_QP_RES_WR_IQANUD(x) ((x) << S_FW_QP_RES_WR_IQANUD)
+#define G_FW_QP_RES_WR_IQANUD(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANUD) & M_FW_QP_RES_WR_IQANUD)
+
+#define S_FW_QP_RES_WR_IQANDSTINDEX 0
+#define M_FW_QP_RES_WR_IQANDSTINDEX 0xfff
+#define V_FW_QP_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_QP_RES_WR_IQANDSTINDEX)
+#define G_FW_QP_RES_WR_IQANDSTINDEX(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANDSTINDEX) & M_FW_QP_RES_WR_IQANDSTINDEX)
+
+#define S_FW_QP_RES_WR_IQDROPRSS 15
+#define M_FW_QP_RES_WR_IQDROPRSS 0x1
+#define V_FW_QP_RES_WR_IQDROPRSS(x) ((x) << S_FW_QP_RES_WR_IQDROPRSS)
+#define G_FW_QP_RES_WR_IQDROPRSS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDROPRSS) & M_FW_QP_RES_WR_IQDROPRSS)
+#define F_FW_QP_RES_WR_IQDROPRSS V_FW_QP_RES_WR_IQDROPRSS(1U)
+
+#define S_FW_QP_RES_WR_IQGTSMODE 14
+#define M_FW_QP_RES_WR_IQGTSMODE 0x1
+#define V_FW_QP_RES_WR_IQGTSMODE(x) ((x) << S_FW_QP_RES_WR_IQGTSMODE)
+#define G_FW_QP_RES_WR_IQGTSMODE(x) \
+ (((x) >> S_FW_QP_RES_WR_IQGTSMODE) & M_FW_QP_RES_WR_IQGTSMODE)
+#define F_FW_QP_RES_WR_IQGTSMODE V_FW_QP_RES_WR_IQGTSMODE(1U)
+
+#define S_FW_QP_RES_WR_IQPCIECH 12
+#define M_FW_QP_RES_WR_IQPCIECH 0x3
+#define V_FW_QP_RES_WR_IQPCIECH(x) ((x) << S_FW_QP_RES_WR_IQPCIECH)
+#define G_FW_QP_RES_WR_IQPCIECH(x) \
+ (((x) >> S_FW_QP_RES_WR_IQPCIECH) & M_FW_QP_RES_WR_IQPCIECH)
+
+#define S_FW_QP_RES_WR_IQDCAEN 11
+#define M_FW_QP_RES_WR_IQDCAEN 0x1
+#define V_FW_QP_RES_WR_IQDCAEN(x) ((x) << S_FW_QP_RES_WR_IQDCAEN)
+#define G_FW_QP_RES_WR_IQDCAEN(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDCAEN) & M_FW_QP_RES_WR_IQDCAEN)
+#define F_FW_QP_RES_WR_IQDCAEN V_FW_QP_RES_WR_IQDCAEN(1U)
+
+#define S_FW_QP_RES_WR_IQDCACPU 6
+#define M_FW_QP_RES_WR_IQDCACPU 0x1f
+#define V_FW_QP_RES_WR_IQDCACPU(x) ((x) << S_FW_QP_RES_WR_IQDCACPU)
+#define G_FW_QP_RES_WR_IQDCACPU(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDCACPU) & M_FW_QP_RES_WR_IQDCACPU)
+
+#define S_FW_QP_RES_WR_IQINTCNTTHRESH 4
+#define M_FW_QP_RES_WR_IQINTCNTTHRESH 0x3
+#define V_FW_QP_RES_WR_IQINTCNTTHRESH(x) \
+ ((x) << S_FW_QP_RES_WR_IQINTCNTTHRESH)
+#define G_FW_QP_RES_WR_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_QP_RES_WR_IQINTCNTTHRESH) & M_FW_QP_RES_WR_IQINTCNTTHRESH)
+
+#define S_FW_QP_RES_WR_IQO 3
+#define M_FW_QP_RES_WR_IQO 0x1
+#define V_FW_QP_RES_WR_IQO(x) ((x) << S_FW_QP_RES_WR_IQO)
+#define G_FW_QP_RES_WR_IQO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQO) & M_FW_QP_RES_WR_IQO)
+#define F_FW_QP_RES_WR_IQO V_FW_QP_RES_WR_IQO(1U)
+
+#define S_FW_QP_RES_WR_IQCPRIO 2
+#define M_FW_QP_RES_WR_IQCPRIO 0x1
+#define V_FW_QP_RES_WR_IQCPRIO(x) ((x) << S_FW_QP_RES_WR_IQCPRIO)
+#define G_FW_QP_RES_WR_IQCPRIO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQCPRIO) & M_FW_QP_RES_WR_IQCPRIO)
+#define F_FW_QP_RES_WR_IQCPRIO V_FW_QP_RES_WR_IQCPRIO(1U)
+
+#define S_FW_QP_RES_WR_IQESIZE 0
+#define M_FW_QP_RES_WR_IQESIZE 0x3
+#define V_FW_QP_RES_WR_IQESIZE(x) ((x) << S_FW_QP_RES_WR_IQESIZE)
+#define G_FW_QP_RES_WR_IQESIZE(x) \
+ (((x) >> S_FW_QP_RES_WR_IQESIZE) & M_FW_QP_RES_WR_IQESIZE)
+
+#define S_FW_QP_RES_WR_IQNS 31
+#define M_FW_QP_RES_WR_IQNS 0x1
+#define V_FW_QP_RES_WR_IQNS(x) ((x) << S_FW_QP_RES_WR_IQNS)
+#define G_FW_QP_RES_WR_IQNS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQNS) & M_FW_QP_RES_WR_IQNS)
+#define F_FW_QP_RES_WR_IQNS V_FW_QP_RES_WR_IQNS(1U)
+
+#define S_FW_QP_RES_WR_IQRO 30
+#define M_FW_QP_RES_WR_IQRO 0x1
+#define V_FW_QP_RES_WR_IQRO(x) ((x) << S_FW_QP_RES_WR_IQRO)
+#define G_FW_QP_RES_WR_IQRO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQRO) & M_FW_QP_RES_WR_IQRO)
+#define F_FW_QP_RES_WR_IQRO V_FW_QP_RES_WR_IQRO(1U)
+
+
struct fw_ri_res {
union fw_ri_restype {
struct fw_ri_res_sqrq {
@@ -1586,6 +1957,13 @@ struct fw_ri_res_wr {
#endif
};
+#define S_FW_RI_RES_WR_TRANSPORT_TYPE 16
+#define M_FW_RI_RES_WR_TRANSPORT_TYPE 0x7
+#define V_FW_RI_RES_WR_TRANSPORT_TYPE(x) \
+ ((x) << S_FW_RI_RES_WR_TRANSPORT_TYPE)
+#define G_FW_RI_RES_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_RI_RES_WR_TRANSPORT_TYPE) & M_FW_RI_RES_WR_TRANSPORT_TYPE)
+
#define S_FW_RI_RES_WR_VFN 8
#define M_FW_RI_RES_WR_VFN 0xff
#define V_FW_RI_RES_WR_VFN(x) ((x) << S_FW_RI_RES_WR_VFN)
@@ -2092,8 +2470,18 @@ enum fw_ri_init_rqeqid_srq {
FW_RI_INIT_RQEQID_SRQ = 1 << 31,
};
+enum fw_nvmet_ulpsubmode {
+ FW_NVMET_ULPSUBMODE_HCRC = 0x1<<0,
+ FW_NVMET_ULPSUBMODE_DCRC = 0x1<<1,
+ FW_NVMET_ULPSUBMODE_ING_DIR = 0x1<<2,
+ FW_NVMET_ULPSUBMODE_SRQ_ENABLE = 0x1<<3,
+ FW_NVMET_ULPSUBMODE_PER_PDU_CMP = 0x1<<4,
+ FW_NVMET_ULPSUBMODE_PI_ENABLE = 0x1<<5,
+ FW_NVMET_ULPSUBMODE_USER_MODE = 0x1<<6,
+};
+
struct fw_ri_wr {
- __be32 op_compl;
+ __be32 op_compl; /* op_to_transport_type */
__be32 flowid_len16;
__u64 cookie;
union fw_ri {
@@ -2123,6 +2511,55 @@ struct fw_ri_wr {
struct fw_ri_send_wr send;
} u;
} init;
+ struct fw_ri_rocev2_init {
+ __u8 type;
+ __u8 r3[3];
+ __u8 rocev2_flags;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 ord_max;
+ __be32 ird_max;
+ __be32 psn_pkd;
+ __be32 epsn_pkd;
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __be32 q_key;
+ __u8 pkthdrsize;
+ __u8 r;
+ __be16 p_key;
+ //struct cpl_tx_tnl_lso tnl_lso;
+ __u8 tnl_lso[48]; /* cpl_tx_tnl_lso + cpl_tx_pkt_xt */
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_immd pkthdr[0];
+#endif
+ } rocev2_init;
+ struct fw_ri_nvmet_init {
+ __u8 type;
+ __u8 r3[3];
+ __u8 nvmt_flags;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 r4[4];
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __u8 ulpsubmode;
+ __u8 nvmt_pda_cmp_imm_sz;
+ __be16 r7;
+ __be32 tpt_offset_t10_config;
+ __be32 r8[2];
+ } nvmet_init;
struct fw_ri_fini {
__u8 type;
__u8 r3[7];
@@ -2137,6 +2574,12 @@ struct fw_ri_wr {
} u;
};
+#define S_FW_RI_WR_TRANSPORT_TYPE 16
+#define M_FW_RI_WR_TRANSPORT_TYPE 0x7
+#define V_FW_RI_WR_TRANSPORT_TYPE(x) ((x) << S_FW_RI_WR_TRANSPORT_TYPE)
+#define G_FW_RI_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_RI_WR_TRANSPORT_TYPE) & M_FW_RI_WR_TRANSPORT_TYPE)
+
#define S_FW_RI_WR_MPAREQBIT 7
#define M_FW_RI_WR_MPAREQBIT 0x1
#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
@@ -2157,6 +2600,414 @@ struct fw_ri_wr {
#define G_FW_RI_WR_P2PTYPE(x) \
(((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+#define S_FW_RI_WR_PSN 0
+#define M_FW_RI_WR_PSN 0xffffff
+#define V_FW_RI_WR_PSN(x) ((x) << S_FW_RI_WR_PSN)
+#define G_FW_RI_WR_PSN(x) (((x) >> S_FW_RI_WR_PSN) & M_FW_RI_WR_PSN)
+
+#define S_FW_RI_WR_EPSN 0
+#define M_FW_RI_WR_EPSN 0xffffff
+#define V_FW_RI_WR_EPSN(x) ((x) << S_FW_RI_WR_EPSN)
+#define G_FW_RI_WR_EPSN(x) (((x) >> S_FW_RI_WR_EPSN) & M_FW_RI_WR_EPSN)
+
+#define S_FW_RI_WR_NVMT_PDA 3
+#define M_FW_RI_WR_NVMT_PDA 0x1f
+#define V_FW_RI_WR_NVMT_PDA(x) ((x) << S_FW_RI_WR_NVMT_PDA)
+#define G_FW_RI_WR_NVMT_PDA(x) \
+ (((x) >> S_FW_RI_WR_NVMT_PDA) & M_FW_RI_WR_NVMT_PDA)
+
+#define S_FW_RI_WR_CMP_IMM_SZ 1
+#define M_FW_RI_WR_CMP_IMM_SZ 0x3
+#define V_FW_RI_WR_CMP_IMM_SZ(x) ((x) << S_FW_RI_WR_CMP_IMM_SZ)
+#define G_FW_RI_WR_CMP_IMM_SZ(x) \
+ (((x) >> S_FW_RI_WR_CMP_IMM_SZ) & M_FW_RI_WR_CMP_IMM_SZ)
+
+#define S_FW_RI_WR_TPT_OFFSET 10
+#define M_FW_RI_WR_TPT_OFFSET 0x3fffff
+#define V_FW_RI_WR_TPT_OFFSET(x) ((x) << S_FW_RI_WR_TPT_OFFSET)
+#define G_FW_RI_WR_TPT_OFFSET(x) \
+ (((x) >> S_FW_RI_WR_TPT_OFFSET) & M_FW_RI_WR_TPT_OFFSET)
+
+#define S_FW_RI_WR_T10_CONFIG 0
+#define M_FW_RI_WR_T10_CONFIG 0x3ff
+#define V_FW_RI_WR_T10_CONFIG(x) ((x) << S_FW_RI_WR_T10_CONFIG)
+#define G_FW_RI_WR_T10_CONFIG(x) \
+ (((x) >> S_FW_RI_WR_T10_CONFIG) & M_FW_RI_WR_T10_CONFIG)
+
+
+/******************************************************************************
+ * R o C E V 2 W O R K R E Q U E S T s
+ **************************************/
+enum fw_rocev2_wr_opcode {
+ /* RC */
+ FW_ROCEV2_RC_SEND_FIRST = 0x00,
+ FW_ROCEV2_RC_SEND_MIDDLE = 0x01,
+ FW_ROCEV2_RC_SEND_LAST = 0x02,
+ FW_ROCEV2_RC_SEND_LAST_WITH_IMMD = 0x03,
+ FW_ROCEV2_RC_SEND_ONLY = 0x04,
+ FW_ROCEV2_RC_SEND_ONLY_WITH_IMMD = 0x05,
+ FW_ROCEV2_RC_RDMA_WRITE_FIRST = 0x06,
+ FW_ROCEV2_RC_RDMA_WRITE_MIDDLE = 0x07,
+ FW_ROCEV2_RC_RDMA_WRITE_LAST = 0x08,
+ FW_ROCEV2_RC_RDMA_WRITE_LAST_WITH_IMMD = 0x09,
+ FW_ROCEV2_RC_RDMA_WRITE_ONLY = 0x0a,
+ FW_ROCEV2_RC_RDMA_WRITE_ONLY_WITH_IMMD = 0x0b,
+ FW_ROCEV2_RC_RDMA_READ_REQ = 0x0c,
+ FW_ROCEV2_RC_RDMA_READ_RESP_FIRST = 0x0d,
+ FW_ROCEV2_RC_RDMA_READ_RESP_MIDDLE = 0x0e,
+ FW_ROCEV2_RC_RDMA_READ_RESP_LAST = 0x0f,
+ FW_ROCEV2_RC_RDMA_READ_RESP_ONLY = 0x10,
+ FW_ROCEV2_RC_ACK = 0x11,
+ FW_ROCEV2_RC_ATOMIC_ACK = 0x12,
+ FW_ROCEV2_RC_CMP_SWAP = 0x13,
+ FW_ROCEV2_RC_FETCH_ADD = 0x14,
+ FW_ROCEV2_RC_SEND_LAST_WITH_INV = 0x16,
+ FW_ROCEV2_RC_SEND_ONLY_WITH_INV = 0x17,
+
+ /* XRC */
+ FW_ROCEV2_XRC_SEND_FIRST = 0xa0,
+ FW_ROCEV2_XRC_SEND_MIDDLE = 0xa1,
+ FW_ROCEV2_XRC_SEND_LAST = 0xa2,
+ FW_ROCEV2_XRC_SEND_LAST_WITH_IMMD = 0xa3,
+ FW_ROCEV2_XRC_SEND_ONLY = 0xa4,
+ FW_ROCEV2_XRC_SEND_ONLY_WITH_IMMD = 0xa5,
+ FW_ROCEV2_XRC_RDMA_WRITE_FIRST = 0xa6,
+ FW_ROCEV2_XRC_RDMA_WRITE_MIDDLE = 0xa7,
+ FW_ROCEV2_XRC_RDMA_WRITE_LAST = 0xa8,
+ FW_ROCEV2_XRC_RDMA_WRITE_LAST_WITH_IMMD = 0xa9,
+ FW_ROCEV2_XRC_RDMA_WRITE_ONLY = 0xaa,
+ FW_ROCEV2_XRC_RDMA_WRITE_ONLY_WITH_IMMD = 0xab,
+ FW_ROCEV2_XRC_RDMA_READ_REQ = 0xac,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_FIRST = 0xad,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_MIDDLE = 0xae,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_LAST = 0xaf,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_ONLY = 0xb0,
+ FW_ROCEV2_XRC_ACK = 0xb1,
+ FW_ROCEV2_XRC_ATOMIC_ACK = 0xb2,
+ FW_ROCEV2_XRC_CMP_SWAP = 0xb3,
+ FW_ROCEV2_XRC_FETCH_ADD = 0xb4,
+ FW_ROCEV2_XRC_SEND_LAST_WITH_INV = 0xb6,
+ FW_ROCEV2_XRC_SEND_ONLY_WITH_INV = 0xb7,
+};
+
+#if 0
+enum fw_rocev2_cqe_err {
+ /* TODO */
+};
+#endif
+
+struct fw_ri_v2_rdma_write_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 psn_pkd;
+ __be32 r4[2];
+ __be32 r5;
+ __be32 immd_data;
+ __be64 to_sink;
+ __be32 stag_sink;
+ __be32 plen;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_V2_RDMA_WRITE_WR_PSN 0
+#define M_FW_RI_V2_RDMA_WRITE_WR_PSN 0xffffff
+#define V_FW_RI_V2_RDMA_WRITE_WR_PSN(x) ((x) << S_FW_RI_V2_RDMA_WRITE_WR_PSN)
+#define G_FW_RI_V2_RDMA_WRITE_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_RDMA_WRITE_WR_PSN) & M_FW_RI_V2_RDMA_WRITE_WR_PSN)
+
+struct fw_ri_v2_send_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 sendop_psn;
+ __u8 immdlen;
+ __u8 r3[3];
+ __be32 r4;
+ /* CPL_TX_TNL_LSO, CPL_TX_PKT_XT and Eth/IP/UDP/BTH
+ * headers in UD QP case, align size to 16B */
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_V2_SEND_WR_SENDOP 24
+#define M_FW_RI_V2_SEND_WR_SENDOP 0xff
+#define V_FW_RI_V2_SEND_WR_SENDOP(x) ((x) << S_FW_RI_V2_SEND_WR_SENDOP)
+#define G_FW_RI_V2_SEND_WR_SENDOP(x) \
+ (((x) >> S_FW_RI_V2_SEND_WR_SENDOP) & M_FW_RI_V2_SEND_WR_SENDOP)
+
+#define S_FW_RI_V2_SEND_WR_PSN 0
+#define M_FW_RI_V2_SEND_WR_PSN 0xffffff
+#define V_FW_RI_V2_SEND_WR_PSN(x) ((x) << S_FW_RI_V2_SEND_WR_PSN)
+#define G_FW_RI_V2_SEND_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_SEND_WR_PSN) & M_FW_RI_V2_SEND_WR_PSN)
+
+struct fw_ri_v2_rdma_read_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 psn_pkd;
+ __be64 to_src;
+ __be32 stag_src;
+ __be32 plen;
+ struct fw_ri_isgl isgl_sink; /* RRQ, max 4 nsge in rocev2, 1 in iwarp */
+};
+
+#define S_FW_RI_V2_RDMA_READ_WR_PSN 0
+#define M_FW_RI_V2_RDMA_READ_WR_PSN 0xffffff
+#define V_FW_RI_V2_RDMA_READ_WR_PSN(x) ((x) << S_FW_RI_V2_RDMA_READ_WR_PSN)
+#define G_FW_RI_V2_RDMA_READ_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_RDMA_READ_WR_PSN) & M_FW_RI_V2_RDMA_READ_WR_PSN)
+
+struct fw_ri_v2_atomic_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 atomicop_psn;
+};
+
+#define S_FW_RI_V2_ATOMIC_WR_ATOMICOP 28
+#define M_FW_RI_V2_ATOMIC_WR_ATOMICOP 0xf
+#define V_FW_RI_V2_ATOMIC_WR_ATOMICOP(x) \
+ ((x) << S_FW_RI_V2_ATOMIC_WR_ATOMICOP)
+#define G_FW_RI_V2_ATOMIC_WR_ATOMICOP(x) \
+ (((x) >> S_FW_RI_V2_ATOMIC_WR_ATOMICOP) & M_FW_RI_V2_ATOMIC_WR_ATOMICOP)
+
+#define S_FW_RI_V2_ATOMIC_WR_PSN 0
+#define M_FW_RI_V2_ATOMIC_WR_PSN 0xffffff
+#define V_FW_RI_V2_ATOMIC_WR_PSN(x) ((x) << S_FW_RI_V2_ATOMIC_WR_PSN)
+#define G_FW_RI_V2_ATOMIC_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_ATOMIC_WR_PSN) & M_FW_RI_V2_ATOMIC_WR_PSN)
+
+struct fw_ri_v2_bind_mw_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 r5;
+ __be32 r6[2];
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag_mr;
+ __be32 stag_mw;
+ __be32 r3;
+ __be64 len_mw;
+ __be64 va_fbo;
+ __be64 r4;
+};
+
+
+#define S_FW_RI_V2_BIND_MW_WR_QPBINDE 6
+#define M_FW_RI_V2_BIND_MW_WR_QPBINDE 0x1
+#define V_FW_RI_V2_BIND_MW_WR_QPBINDE(x) \
+ ((x) << S_FW_RI_V2_BIND_MW_WR_QPBINDE)
+#define G_FW_RI_V2_BIND_MW_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_QPBINDE) & M_FW_RI_V2_BIND_MW_WR_QPBINDE)
+#define F_FW_RI_V2_BIND_MW_WR_QPBINDE V_FW_RI_V2_BIND_MW_WR_QPBINDE(1U)
+
+#define S_FW_RI_V2_BIND_MW_WR_NS 5
+#define M_FW_RI_V2_BIND_MW_WR_NS 0x1
+#define V_FW_RI_V2_BIND_MW_WR_NS(x) ((x) << S_FW_RI_V2_BIND_MW_WR_NS)
+#define G_FW_RI_V2_BIND_MW_WR_NS(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_NS) & M_FW_RI_V2_BIND_MW_WR_NS)
+#define F_FW_RI_V2_BIND_MW_WR_NS V_FW_RI_V2_BIND_MW_WR_NS(1U)
+
+#define S_FW_RI_V2_BIND_MW_WR_DCACPU 0
+#define M_FW_RI_V2_BIND_MW_WR_DCACPU 0x1f
+#define V_FW_RI_V2_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_V2_BIND_MW_WR_DCACPU)
+#define G_FW_RI_V2_BIND_MW_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_DCACPU) & M_FW_RI_V2_BIND_MW_WR_DCACPU)
+
+struct fw_ri_v2_fr_nsmr_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 r3;
+ __be32 r4[2];
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag;
+ __be32 len_hi;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+};
+
+#define S_FW_RI_V2_FR_NSMR_WR_QPBINDE 6
+#define M_FW_RI_V2_FR_NSMR_WR_QPBINDE 0x1
+#define V_FW_RI_V2_FR_NSMR_WR_QPBINDE(x) \
+ ((x) << S_FW_RI_V2_FR_NSMR_WR_QPBINDE)
+#define G_FW_RI_V2_FR_NSMR_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_QPBINDE) & M_FW_RI_V2_FR_NSMR_WR_QPBINDE)
+#define F_FW_RI_V2_FR_NSMR_WR_QPBINDE V_FW_RI_V2_FR_NSMR_WR_QPBINDE(1U)
+
+#define S_FW_RI_V2_FR_NSMR_WR_NS 5
+#define M_FW_RI_V2_FR_NSMR_WR_NS 0x1
+#define V_FW_RI_V2_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_V2_FR_NSMR_WR_NS)
+#define G_FW_RI_V2_FR_NSMR_WR_NS(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_NS) & M_FW_RI_V2_FR_NSMR_WR_NS)
+#define F_FW_RI_V2_FR_NSMR_WR_NS V_FW_RI_V2_FR_NSMR_WR_NS(1U)
+
+#define S_FW_RI_V2_FR_NSMR_WR_DCACPU 0
+#define M_FW_RI_V2_FR_NSMR_WR_DCACPU 0x1f
+#define V_FW_RI_V2_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_V2_FR_NSMR_WR_DCACPU)
+#define G_FW_RI_V2_FR_NSMR_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_DCACPU) & M_FW_RI_V2_FR_NSMR_WR_DCACPU)
+
+/******************************************************************************
+ * N V M E - T C P W O R K R E Q U E S T s
+ *****************************************************************************/
+
+struct fw_nvmet_v2_fr_nsmr_wr {
+ __be32 op_to_wrid;
+ __be32 flowid_len16;
+ __be32 r3;
+ __be32 r4;
+ __be32 mem_write_addr32;
+ __u8 r5;
+ __u8 imm_data_len32;
+ union {
+ __be16 dsgl_data_len32;
+ __be16 reset_mem_len32;
+ };
+ __be64 r6;
+};
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL 23
+#define M_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL 0x1
+#define V_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL)
+#define G_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL) & \
+ M_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL)
+#define F_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL \
+ V_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(1U)
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM 22
+#define M_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM 0x1
+#define V_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM)
+#define G_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM) & \
+ M_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM)
+#define F_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM \
+ V_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(1U)
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_WRID 0
+#define M_FW_NVMET_V2_FR_NSMR_WR_WRID 0xffff
+#define V_FW_NVMET_V2_FR_NSMR_WR_WRID(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_WRID)
+#define G_FW_NVMET_V2_FR_NSMR_WR_WRID(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_WRID) & M_FW_NVMET_V2_FR_NSMR_WR_WRID)
+
+struct fw_v2_nvmet_tx_data_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 r4;
+ __be16 r5;
+ __be16 wrid;
+ __be32 r6;
+ __be32 seqno;
+ __be32 plen;
+ __be32 flags_hi_to_flags_lo;
+ /* optional immdlen data (fw_tx_pi_hdr, iso cpl, nvmet header etc) */
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_dsgl dsgl_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI 10
+#define M_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI 0x3fffff
+#define V_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI)
+#define G_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI) & \
+ M_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO 9
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI 8
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC 7
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC 6
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO 0
+#define M_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO 0x3f
+#define V_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO)
+#define G_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO) & \
+ M_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO)
+
+
/******************************************************************************
* F O i S C S I W O R K R E Q U E S T s
*********************************************/
@@ -3827,17 +4678,17 @@ struct fw_pi_error {
(((x) >> S_FW_PI_ERROR_ERR_TYPE) & M_FW_PI_ERROR_ERR_TYPE)
struct fw_tlstx_data_wr {
- __be32 op_to_immdlen;
- __be32 flowid_len16;
- __be32 plen;
- __be32 lsodisable_to_flags;
- __be32 r5;
- __be32 ctxloc_to_exp;
- __be16 mfs;
- __be16 adjustedplen_pkd;
- __be16 expinplenmax_pkd;
- __u8 pdusinplenmax_pkd;
- __u8 r10;
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 plen;
+ __be32 lsodisable_to_flags;
+ __be32 r5;
+ __be32 ctxloc_to_exp;
+ __be16 mfs;
+ __be16 adjustedplen_pkd;
+ __be16 expinplenmax_pkd;
+ __u8 pdusinplenmax_pkd;
+ __u8 r10;
};
#define S_FW_TLSTX_DATA_WR_OPCODE 24
@@ -4092,6 +4943,265 @@ struct fw_tls_tunnel_ofld_wr {
__be32 r4;
};
+struct fw_crypto_update_sa_wr {
+ __u8 opcode;
+ __u8 saop_to_txrx;
+ __u8 vfn;
+ __u8 r1;
+ __u8 r2[3];
+ __u8 len16;
+ __be64 cookie;
+ __be16 r3;
+ __be16 ipsecidx;
+ __be32 SPI;
+ __be64 dip_hi;
+ __be64 dip_lo;
+ __be64 lip_hi;
+ __be64 lip_lo;
+ union fw_crypto_update_sa_sa {
+ struct egress_sa {
+ __be32 valid_SPI_hi;
+ __be32 SPI_lo_eSeqNum_hi;
+ __be32 eSeqNum_lo_Salt_hi;
+ __be32 Salt_lo_to_keyID;
+ } egress;
+ struct ingress_sa {
+ __be32 valid_to_iSeqNum_hi;
+ __be32 iSeqNum_mi;
+ __be32 iSeqNum_lo_Salt_hi;
+ __be32 Salt_lo_to_IPVer;
+ } ingress;
+ } sa;
+ union fw_crypto_update_sa_key {
+ struct _aes128 {
+ __u8 key128[16];
+ __u8 H128[16];
+ __u8 rsvd[16];
+ } aes128;
+ struct _aes192 {
+ __u8 key192[24];
+ __be64 r3;
+ __u8 H192[16];
+ } aes192;
+ struct _aes256 {
+ __u8 key256[32];
+ __u8 H256[16];
+ } aes256;
+ } key;
+};
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SAOP 2
+#define M_FW_CRYPTO_UPDATE_SA_WR_SAOP 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_SAOP(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SAOP)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SAOP(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SAOP) & M_FW_CRYPTO_UPDATE_SA_WR_SAOP)
+#define F_FW_CRYPTO_UPDATE_SA_WR_SAOP V_FW_CRYPTO_UPDATE_SA_WR_SAOP(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_MODE 1
+#define M_FW_CRYPTO_UPDATE_SA_WR_MODE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_MODE) & M_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_MODE V_FW_CRYPTO_UPDATE_SA_WR_MODE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_TXRX 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_TXRX 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_TXRX(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_TXRX)
+#define G_FW_CRYPTO_UPDATE_SA_WR_TXRX(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_TXRX) & M_FW_CRYPTO_UPDATE_SA_WR_TXRX)
+#define F_FW_CRYPTO_UPDATE_SA_WR_TXRX V_FW_CRYPTO_UPDATE_SA_WR_TXRX(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_VALID 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_VALID 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_VALID) & M_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define F_FW_CRYPTO_UPDATE_SA_WR_VALID V_FW_CRYPTO_UPDATE_SA_WR_VALID(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SPI_HI 0x7fffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SPI_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SPI_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SPI_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_SPI_LO 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SPI_LO)
+#define F_FW_CRYPTO_UPDATE_SA_WR_SPI_LO V_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI 0x7fffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 5
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE 4
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE \
+ V_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYID 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYID 0xf
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYID) & M_FW_CRYPTO_UPDATE_SA_WR_KEYID)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_VALID 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_VALID 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_VALID) & M_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define F_FW_CRYPTO_UPDATE_SA_WR_VALID V_FW_CRYPTO_UPDATE_SA_WR_VALID(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID 12
+#define M_FW_CRYPTO_UPDATE_SA_WR_EGKEYID 0xfff
+#define V_FW_CRYPTO_UPDATE_SA_WR_EGKEYID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_EGKEYID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_EGKEYID)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN 11
+#define M_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN)
+#define F_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN \
+ V_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW 0xf
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 5
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH 3
+#define M_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESNEN 2
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESNEN 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESNEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESNEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESNEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESNEN) & M_FW_CRYPTO_UPDATE_SA_WR_ESNEN)
+#define F_FW_CRYPTO_UPDATE_SA_WR_ESNEN V_FW_CRYPTO_UPDATE_SA_WR_ESNEN(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_MODE 1
+#define M_FW_CRYPTO_UPDATE_SA_WR_MODE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_MODE) & M_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_MODE V_FW_CRYPTO_UPDATE_SA_WR_MODE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_IPVER 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_IPVER 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_IPVER(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_IPVER)
+#define G_FW_CRYPTO_UPDATE_SA_WR_IPVER(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_IPVER) & M_FW_CRYPTO_UPDATE_SA_WR_IPVER)
+#define F_FW_CRYPTO_UPDATE_SA_WR_IPVER V_FW_CRYPTO_UPDATE_SA_WR_IPVER(1U)
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -4157,11 +5267,12 @@ enum fw_cmd_opcodes {
FW_FCOE_SPARAMS_CMD = 0x35,
FW_FCOE_STATS_CMD = 0x37,
FW_FCOE_FCF_CMD = 0x38,
- FW_DCB_IEEE_CMD = 0x3a,
- FW_DIAG_CMD = 0x3d,
+ FW_DCB_IEEE_CMD = 0x3a,
+ FW_DIAG_CMD = 0x3d,
FW_PTP_CMD = 0x3e,
FW_HMA_CMD = 0x3f,
- FW_LASTC2E_CMD = 0x40,
+ FW_JBOF_WIN_REG_CMD = 0x40,
+ FW_LASTC2E_CMD = 0x41,
FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81,
};
@@ -4246,7 +5357,7 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FUNC = 0x0028,
FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
FW_LDST_ADDRSPC_FUNC_I2C = 0x002A, /* legacy */
- FW_LDST_ADDRSPC_LE = 0x0030,
+ FW_LDST_ADDRSPC_LE = 0x0030,
FW_LDST_ADDRSPC_I2C = 0x0038,
FW_LDST_ADDRSPC_PCIE_CFGS = 0x0040,
FW_LDST_ADDRSPC_PCIE_DBG = 0x0041,
@@ -4665,11 +5776,17 @@ enum fw_caps_config_nic {
enum fw_caps_config_toe {
FW_CAPS_CONFIG_TOE = 0x00000001,
+ FW_CAPS_CONFIG_TOE_SENDPATH = 0x00000002,
};
enum fw_caps_config_rdma {
FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
+ FW_CAPS_CONFIG_RDMA_ROCEV2 = 0x00000004,
+};
+
+enum fw_caps_config_nvme {
+ FW_CAPS_CONFIG_NVME_TCP = 0x00000001,
};
enum fw_caps_config_iscsi {
@@ -4687,8 +5804,9 @@ enum fw_caps_config_iscsi {
enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLSKEYS = 0x00000002,
- FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
+ FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004, /* NIC over ipsecofld */
FW_CAPS_CONFIG_TLS_HW = 0x00000008,
+ FW_CAPS_CONFIG_OFLD_OVER_IPSEC_INLINE = 0x00000010,/* ofld over ipsecofld */
};
enum fw_caps_config_fcoe {
@@ -4716,7 +5834,7 @@ struct fw_caps_config_cmd {
__be16 nbmcaps;
__be16 linkcaps;
__be16 switchcaps;
- __be16 r3;
+ __be16 nvmecaps;
__be16 niccaps;
__be16 toecaps;
__be16 rdmacaps;
@@ -4840,6 +5958,8 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DEV_512SGL_MR = 0x30,
FW_PARAMS_PARAM_DEV_KTLS_HW = 0x31,
FW_PARAMS_PARAM_DEV_VI_ENABLE_INGRESS_AFTER_LINKUP = 0x32,
+ FW_PARAMS_PARAM_DEV_TID_QID_SEL_MASK = 0x33,
+ FW_PARAMS_PARAM_DEV_TX_TPCHMAP = 0x3A,
};
/*
@@ -4911,6 +6031,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+ /* no separate STAG/PBL START/END for nvmet.
+ * use same rdma stag/pbl memory range */
FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
@@ -4943,7 +6065,7 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32,
FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
FW_PARAMS_PARAM_PFVF_TLS_START = 0x34,
- FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
+ FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36,
FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_RSSKEYINFO = 0x38,
@@ -4955,6 +6077,13 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_GET_SMT_START = 0x3E,
FW_PARAMS_PARAM_PFVF_GET_SMT_SIZE = 0x3F,
FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
+ FW_PARAMS_PARAM_PFVF_RRQ_START = 0x41,
+ FW_PARAMS_PARAM_PFVF_RRQ_END = 0x42,
+ FW_PARAMS_PARAM_PFVF_PKTHDR_START = 0x43,
+ FW_PARAMS_PARAM_PFVF_PKTHDR_END = 0x44,
+ FW_PARAMS_PARAM_PFVF_NIPSEC_TUNNEL = 0x45,
+ FW_PARAMS_PARAM_PFVF_NIPSEC_TRANSPORT = 0x46,
+ FW_PARAMS_PARAM_PFVF_OFLD_NIPSEC_TUNNEL = 0x47,
};
/*
@@ -4984,6 +6113,19 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_FLM_DCA = 0x30
};
+#define S_T7_DMAQ_CONM_CTXT_CNGTPMODE 0
+#define M_T7_DMAQ_CONM_CTXT_CNGTPMODE 0x3
+#define V_T7_DMAQ_CONM_CTXT_CNGTPMODE(x) ((x) << S_T7_DMAQ_CONM_CTXT_CNGTPMODE)
+#define G_T7_DMAQ_CONM_CTXT_CNGTPMODE(x) \
+ (((x) >> S_T7_DMAQ_CONM_CTXT_CNGTPMODE) & M_T7_DMAQ_CONM_CTXT_CNGTPMODE)
+
+#define S_T7_DMAQ_CONM_CTXT_CH_VEC 2
+#define M_T7_DMAQ_CONM_CTXT_CH_VEC 0xf
+#define V_T7_DMAQ_CONM_CTXT_CH_VEC(x) ((x) << S_T7_DMAQ_CONM_CTXT_CH_VEC)
+#define G_T7_DMAQ_CONM_CTXT_CH_VEC(x) \
+ (((x) >> S_T7_DMAQ_CONM_CTXT_CH_VEC) & M_T7_DMAQ_CONM_CTXT_CH_VEC)
+
+
/*
* chnet parameters
*/
@@ -5199,7 +6341,8 @@ struct fw_pfvf_cmd {
enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
FW_IQ_TYPE_NO_FL_INT_CAP,
- FW_IQ_TYPE_VF_CQ
+ FW_IQ_TYPE_VF_CQ,
+ FW_IQ_TYPE_CQ,
};
enum fw_iq_iqtype {
@@ -5787,6 +6930,12 @@ struct fw_eq_mngt_cmd {
(((x) >> S_FW_EQ_MNGT_CMD_EQSTOP) & M_FW_EQ_MNGT_CMD_EQSTOP)
#define F_FW_EQ_MNGT_CMD_EQSTOP V_FW_EQ_MNGT_CMD_EQSTOP(1U)
+#define S_FW_EQ_MNGT_CMD_COREGROUP 16
+#define M_FW_EQ_MNGT_CMD_COREGROUP 0x3f
+#define V_FW_EQ_MNGT_CMD_COREGROUP(x) ((x) << S_FW_EQ_MNGT_CMD_COREGROUP)
+#define G_FW_EQ_MNGT_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_COREGROUP) & M_FW_EQ_MNGT_CMD_COREGROUP)
+
#define S_FW_EQ_MNGT_CMD_CMPLIQID 20
#define M_FW_EQ_MNGT_CMD_CMPLIQID 0xfff
#define V_FW_EQ_MNGT_CMD_CMPLIQID(x) ((x) << S_FW_EQ_MNGT_CMD_CMPLIQID)
@@ -5977,6 +7126,12 @@ struct fw_eq_eth_cmd {
(((x) >> S_FW_EQ_ETH_CMD_EQSTOP) & M_FW_EQ_ETH_CMD_EQSTOP)
#define F_FW_EQ_ETH_CMD_EQSTOP V_FW_EQ_ETH_CMD_EQSTOP(1U)
+#define S_FW_EQ_ETH_CMD_COREGROUP 16
+#define M_FW_EQ_ETH_CMD_COREGROUP 0x3f
+#define V_FW_EQ_ETH_CMD_COREGROUP(x) ((x) << S_FW_EQ_ETH_CMD_COREGROUP)
+#define G_FW_EQ_ETH_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_COREGROUP) & M_FW_EQ_ETH_CMD_COREGROUP)
+
#define S_FW_EQ_ETH_CMD_EQID 0
#define M_FW_EQ_ETH_CMD_EQID 0xfffff
#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID)
@@ -6190,6 +7345,12 @@ struct fw_eq_ctrl_cmd {
(((x) >> S_FW_EQ_CTRL_CMD_EQSTOP) & M_FW_EQ_CTRL_CMD_EQSTOP)
#define F_FW_EQ_CTRL_CMD_EQSTOP V_FW_EQ_CTRL_CMD_EQSTOP(1U)
+#define S_FW_EQ_CTRL_CMD_COREGROUP 16
+#define M_FW_EQ_CTRL_CMD_COREGROUP 0x3f
+#define V_FW_EQ_CTRL_CMD_COREGROUP(x) ((x) << S_FW_EQ_CTRL_CMD_COREGROUP)
+#define G_FW_EQ_CTRL_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_COREGROUP) & M_FW_EQ_CTRL_CMD_COREGROUP)
+
#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
#define M_FW_EQ_CTRL_CMD_CMPLIQID 0xfff
#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
@@ -6377,6 +7538,12 @@ struct fw_eq_ofld_cmd {
(((x) >> S_FW_EQ_OFLD_CMD_EQSTOP) & M_FW_EQ_OFLD_CMD_EQSTOP)
#define F_FW_EQ_OFLD_CMD_EQSTOP V_FW_EQ_OFLD_CMD_EQSTOP(1U)
+#define S_FW_EQ_OFLD_CMD_COREGROUP 16
+#define M_FW_EQ_OFLD_CMD_COREGROUP 0x3f
+#define V_FW_EQ_OFLD_CMD_COREGROUP(x) ((x) << S_FW_EQ_OFLD_CMD_COREGROUP)
+#define G_FW_EQ_OFLD_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_COREGROUP) & M_FW_EQ_OFLD_CMD_COREGROUP)
+
#define S_FW_EQ_OFLD_CMD_EQID 0
#define M_FW_EQ_OFLD_CMD_EQID 0xfffff
#define V_FW_EQ_OFLD_CMD_EQID(x) ((x) << S_FW_EQ_OFLD_CMD_EQID)
@@ -7285,7 +8452,8 @@ fec_supported(uint32_t caps)
{
return ((caps & (FW_PORT_CAP32_SPEED_25G | FW_PORT_CAP32_SPEED_50G |
- FW_PORT_CAP32_SPEED_100G)) != 0);
+ FW_PORT_CAP32_SPEED_100G | FW_PORT_CAP32_SPEED_200G |
+ FW_PORT_CAP32_SPEED_400G)) != 0);
}
enum fw_port_action {
@@ -7799,6 +8967,8 @@ enum fw_port_type {
FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
FW_PORT_TYPE_KR_XLAUI = 22, /* No, 4, 40G/10G/1G, No AN*/
+ FW_PORT_TYPE_SFP56 = 26,
+ FW_PORT_TYPE_QSFP56 = 27,
FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
};
@@ -8862,7 +10032,9 @@ struct fw_devlog_cmd {
__u8 r2[7];
__be32 memtype_devlog_memaddr16_devlog;
__be32 memsize_devlog;
- __be32 r3[2];
+ __u8 num_devlog;
+ __u8 r3[3];
+ __be32 r4;
};
#define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 28
@@ -9786,6 +10958,45 @@ struct fw_hma_cmd {
#define G_FW_HMA_CMD_ADDR_SIZE(x) \
(((x) >> S_FW_HMA_CMD_ADDR_SIZE) & M_FW_HMA_CMD_ADDR_SIZE)
+struct fw_jbof_win_reg_cmd {
+ __be32 op_pkd;
+ __be32 alloc_to_len16;
+ __be32 window_num_pcie_params;
+ __be32 window_size;
+ __be64 bus_addr;
+ __be64 phy_address;
+};
+
+#define S_FW_JBOF_WIN_REG_CMD_ALLOC 31
+#define M_FW_JBOF_WIN_REG_CMD_ALLOC 0x1
+#define V_FW_JBOF_WIN_REG_CMD_ALLOC(x) ((x) << S_FW_JBOF_WIN_REG_CMD_ALLOC)
+#define G_FW_JBOF_WIN_REG_CMD_ALLOC(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_ALLOC) & M_FW_JBOF_WIN_REG_CMD_ALLOC)
+#define F_FW_JBOF_WIN_REG_CMD_ALLOC V_FW_JBOF_WIN_REG_CMD_ALLOC(1U)
+
+#define S_FW_JBOF_WIN_REG_CMD_FREE 30
+#define M_FW_JBOF_WIN_REG_CMD_FREE 0x1
+#define V_FW_JBOF_WIN_REG_CMD_FREE(x) ((x) << S_FW_JBOF_WIN_REG_CMD_FREE)
+#define G_FW_JBOF_WIN_REG_CMD_FREE(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_FREE) & M_FW_JBOF_WIN_REG_CMD_FREE)
+#define F_FW_JBOF_WIN_REG_CMD_FREE V_FW_JBOF_WIN_REG_CMD_FREE(1U)
+
+#define S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM 7
+#define M_FW_JBOF_WIN_REG_CMD_WINDOW_NUM 0xf
+#define V_FW_JBOF_WIN_REG_CMD_WINDOW_NUM(x) \
+ ((x) << S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM)
+#define G_FW_JBOF_WIN_REG_CMD_WINDOW_NUM(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM) & \
+ M_FW_JBOF_WIN_REG_CMD_WINDOW_NUM)
+
+#define S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS 0
+#define M_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS 0x7f
+#define V_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS(x) \
+ ((x) << S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS)
+#define G_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS) & \
+ M_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS)
+
/******************************************************************************
* P C I E F W R E G I S T E R
**************************************/
@@ -9914,8 +11125,15 @@ enum pcie_fw_eval {
*/
#define PCIE_FW_PF_DEVLOG 7
+#define S_PCIE_FW_PF_DEVLOG_COUNT_MSB 31
+#define M_PCIE_FW_PF_DEVLOG_COUNT_MSB 0x1
+#define V_PCIE_FW_PF_DEVLOG_COUNT_MSB(x) \
+ ((x) << S_PCIE_FW_PF_DEVLOG_COUNT_MSB)
+#define G_PCIE_FW_PF_DEVLOG_COUNT_MSB(x) \
+ (((x) >> S_PCIE_FW_PF_DEVLOG_COUNT_MSB) & M_PCIE_FW_PF_DEVLOG_COUNT_MSB)
+
#define S_PCIE_FW_PF_DEVLOG_NENTRIES128 28
-#define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0xf
+#define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0x7
#define V_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
((x) << S_PCIE_FW_PF_DEVLOG_NENTRIES128)
#define G_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
@@ -9928,8 +11146,15 @@ enum pcie_fw_eval {
#define G_PCIE_FW_PF_DEVLOG_ADDR16(x) \
(((x) >> S_PCIE_FW_PF_DEVLOG_ADDR16) & M_PCIE_FW_PF_DEVLOG_ADDR16)
+#define S_PCIE_FW_PF_DEVLOG_COUNT_LSB 3
+#define M_PCIE_FW_PF_DEVLOG_COUNT_LSB 0x1
+#define V_PCIE_FW_PF_DEVLOG_COUNT_LSB(x) \
+ ((x) << S_PCIE_FW_PF_DEVLOG_COUNT_LSB)
+#define G_PCIE_FW_PF_DEVLOG_COUNT_LSB(x) \
+ (((x) >> S_PCIE_FW_PF_DEVLOG_COUNT_LSB) & M_PCIE_FW_PF_DEVLOG_COUNT_LSB)
+
#define S_PCIE_FW_PF_DEVLOG_MEMTYPE 0
-#define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0xf
+#define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0x7
#define V_PCIE_FW_PF_DEVLOG_MEMTYPE(x) ((x) << S_PCIE_FW_PF_DEVLOG_MEMTYPE)
#define G_PCIE_FW_PF_DEVLOG_MEMTYPE(x) \
(((x) >> S_PCIE_FW_PF_DEVLOG_MEMTYPE) & M_PCIE_FW_PF_DEVLOG_MEMTYPE)
@@ -9969,7 +11194,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
FW_HDR_CHIP_T5,
- FW_HDR_CHIP_T6
+ FW_HDR_CHIP_T6,
+ FW_HDR_CHIP_T7
};
#define S_FW_HDR_FW_VER_MAJOR 24
@@ -10015,6 +11241,11 @@ enum {
T6FW_VERSION_MINOR = 27,
T6FW_VERSION_MICRO = 5,
T6FW_VERSION_BUILD = 0,
+
+ T7FW_VERSION_MAJOR = 2,
+ T7FW_VERSION_MINOR = 0,
+ T7FW_VERSION_MICRO = 0,
+ T7FW_VERSION_BUILD = 0,
};
enum {
@@ -10050,6 +11281,17 @@ enum {
T6FW_HDR_INTFVER_ISCSI = 0x00,
T6FW_HDR_INTFVER_FCOEPDU= 0x00,
T6FW_HDR_INTFVER_FCOE = 0x00,
+
+ /* T7
+ */
+ T7FW_HDR_INTFVER_NIC = 0x00,
+ T7FW_HDR_INTFVER_VNIC = 0x00,
+ T7FW_HDR_INTFVER_OFLD = 0x00,
+ T7FW_HDR_INTFVER_RI = 0x00,
+ T7FW_HDR_INTFVER_ISCSIPDU= 0x00,
+ T7FW_HDR_INTFVER_ISCSI = 0x00,
+ T7FW_HDR_INTFVER_FCOEPDU= 0x00,
+ T7FW_HDR_INTFVER_FCOE = 0x00,
};
#define FW_VERSION32(MAJOR, MINOR, MICRO, BUILD) ( \
@@ -10085,7 +11327,7 @@ struct fw_ephy_hdr {
enum {
FW_EPHY_HDR_MAGIC = 0x65706879,
};
-
+
struct fw_ifconf_dhcp_info {
__be32 addr;
__be32 mask;
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg.txt b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
new file mode 100644
index 000000000000..499af3675bd9
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
@@ -0,0 +1,644 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 200 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 50 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+
+ #CIM_QUEUE_FEATURE_DISABLE.obq_eom_enable bit needs to be set to 1 for CmdMore handling support
+ reg[0x7c4c] = 0x20/0x20
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 #Crypto core reset
+
+ #Tick granularities in kbps
+ tsch_ticks = 100000, 10000, 1000, 10
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+[function "0"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port
+
+
+[function "1"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port
+
+[function "2"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x4 # access to only one port
+ pmask = 0x1 # access to only one port
+
+[function "3"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x2 # access to only one port
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+
+[function "4"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 224 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 320 # number of clip region entries
+ nfilter = 480 # number of filter region entries
+ nserver = 480 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "5"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 1536
+ tp_l2t = 508
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+[function "6"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 66 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX) + 2 (EXTRA)
+ nexactf = 32 # NPORTS + adding 28 exact entries for FCoE
+ # which is OK since < MIN(SUM PF0..3, PF4)
+ # and we never load PF0..3 and PF4 concurrently
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nhash = 1536
+ tp_l2t = 4
+ protocol = fcoe_initiator
+ tp_ddp = 1
+ fcoe_nfcf = 16
+ fcoe_nvnp = 32
+ fcoe_nssn = 1024
+
+# Following function 7 is used by embedded ARM to communicate to
+# the firmware.
+[function "7"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 16 # number of high priority filter region entries
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nserver = 16
+ nhash = 1024
+ tp_l2t = 512
+ protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+[function "2/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "3/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ #dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ #dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x684e23fb
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt
new file mode 100644
index 000000000000..f06f059f4112
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt
@@ -0,0 +1,530 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 1000 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 1 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 # Crypto core reset
+ reg[0x46000] = 0xa/0xe # 16K ESH Hi Extraction window
+
+ #Tick granularities in kbps
+ tsch_ticks = 1000, 100, 10, 1
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ #gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+[function "0"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 96 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 32 # number of clip region entries
+ nfilter = 48 # number of filter region entries
+ nserver = 48 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "1"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 2048
+ tp_l2t = 1020
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x22432d98
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
new file mode 100644
index 000000000000..0bca1c194af8
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
@@ -0,0 +1,644 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 200 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 50 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+
+ #CIM_QUEUE_FEATURE_DISABLE.obq_eom_enable bit needs to be set to 1 for CmdMore handling support
+ reg[0x7c4c] = 0x20/0x20
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 #Crypto core reset
+
+ #Tick granularities in kbps
+ tsch_ticks = 100000, 10000, 1000, 10
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+[function "0"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port
+
+
+[function "1"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port
+
+[function "2"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x4 # access to only one port
+ pmask = 0x1 # access to only one port
+
+[function "3"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x2 # access to only one port
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+
+[function "4"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 224 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 320 # number of clip region entries
+ nfilter = 480 # number of filter region entries
+ nserver = 480 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "5"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 1536
+ tp_l2t = 508
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+[function "6"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 66 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX) + 2 (EXTRA)
+ nexactf = 32 # NPORTS + adding 28 exact entries for FCoE
+ # which is OK since < MIN(SUM PF0..3, PF4)
+ # and we never load PF0..3 and PF4 concurrently
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nhash = 1536
+ tp_l2t = 4
+ protocol = fcoe_initiator
+ tp_ddp = 1
+ fcoe_nfcf = 16
+ fcoe_nvnp = 32
+ fcoe_nssn = 1024
+
+# Following function 7 is used by embedded ARM to communicate to
+# the firmware.
+[function "7"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 16 # number of high priority filter region entries
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nserver = 16
+ nhash = 1024
+ tp_l2t = 512
+ protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+[function "2/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "3/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ dcb = ppp, dcbx
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ dcb = ppp, dcbx
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x5cab62d4
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c
index 3c4d269f6c69..4610f91e96ac 100644
--- a/sys/dev/cxgbe/iw_cxgbe/device.c
+++ b/sys/dev/cxgbe/iw_cxgbe/device.c
@@ -132,26 +132,21 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->stats.rqt.total = sc->vres.rq.size;
rdev->stats.qid.total = sc->vres.qp.size;
- rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ rc = c4iw_init_resource(rdev, T4_MAX_NUM_PD);
if (rc) {
device_printf(sc->dev, "error %d initializing resources\n", rc);
goto err1;
}
- rc = c4iw_pblpool_create(rdev);
- if (rc) {
- device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
- goto err2;
- }
rc = c4iw_rqtpool_create(rdev);
if (rc) {
device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
- goto err3;
+ goto err2;
}
rdev->status_page = (struct t4_dev_status_page *)
__get_free_page(GFP_KERNEL);
if (!rdev->status_page) {
rc = -ENOMEM;
- goto err4;
+ goto err3;
}
rdev->status_page->qp_start = sc->vres.qp.start;
rdev->status_page->qp_size = sc->vres.qp.size;
@@ -168,15 +163,13 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
rc = -ENOMEM;
- goto err5;
+ goto err4;
}
return (0);
-err5:
- free_page((unsigned long)rdev->status_page);
err4:
- c4iw_rqtpool_destroy(rdev);
+ free_page((unsigned long)rdev->status_page);
err3:
- c4iw_pblpool_destroy(rdev);
+ c4iw_rqtpool_destroy(rdev);
err2:
c4iw_destroy_resource(&rdev->resource);
err1:
@@ -186,7 +179,6 @@ err1:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
free_page((unsigned long)rdev->status_page);
- c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
}
diff --git a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
index ca2595b65b02..47ce10562c66 100644
--- a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
+++ b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
@@ -99,7 +99,6 @@ struct c4iw_id_table {
};
struct c4iw_resource {
- struct c4iw_id_table tpt_table;
struct c4iw_id_table qid_table;
struct c4iw_id_table pdid_table;
};
@@ -904,11 +903,9 @@ int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
struct l2t_entry *l2t);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
-int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
diff --git a/sys/dev/cxgbe/iw_cxgbe/mem.c b/sys/dev/cxgbe/iw_cxgbe/mem.c
index 4a1adc118b7c..ae0aa0edc17a 100644
--- a/sys/dev/cxgbe/iw_cxgbe/mem.c
+++ b/sys/dev/cxgbe/iw_cxgbe/mem.c
@@ -56,46 +56,23 @@ mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
static int
_c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, int wait)
+ dma_addr_t data, int wait)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- addr &= 0x7FFFFFF;
-
if (wait)
c4iw_init_wr_wait(&wr_wait);
- wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16);
+ wr_len = T4_WRITE_MEM_DMA_LEN;
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- (wait ? F_FW_WR_COMPL : 0));
- ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0;
- ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
- V_T5_ULP_MEMIO_ORDER(1) |
- V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr));
-
- sgl = (struct ulptx_sgl *)(ulpmc + 1);
- sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
- V_ULPTX_NSGE(1));
- sgl->len0 = cpu_to_be32(len);
- sgl->addr0 = cpu_to_be64((u64)data);
-
+ t4_write_mem_dma_wr(sc, wrtod(wr), wr_len, 0, addr, len, data,
+ wait ? (u64)(unsigned long)&wr_wait : 0);
t4_wrq_tx(sc, wr);
if (wait)
@@ -108,70 +85,32 @@ static int
_c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_idata *ulpsc;
- u8 wr_len, *to_dp, *from_dp;
+ u8 wr_len, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- u32 cmd;
-
- cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
- cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
-
- addr &= 0x7FFFFFF;
CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
- num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait);
+ num_wqe = DIV_ROUND_UP(len, T4_MAX_INLINE_SIZE);
+ from_dp = data;
for (i = 0; i < num_wqe; i++) {
-
- copy_len = min(len, C4IW_MAX_INLINE_SIZE);
- wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
- roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+ copy_len = min(len, T4_MAX_INLINE_SIZE);
+ wr_len = T4_WRITE_MEM_INLINE_LEN(copy_len);
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
-
- if (i == (num_wqe-1)) {
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- F_FW_WR_COMPL);
- ulpmc->wr.wr_lo =
- (__force __be64)(unsigned long) &wr_wait;
- } else
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
- ulpmc->wr.wr_mid = cpu_to_be32(
- V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
-
- ulpmc->cmd = cmd;
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
- DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
- 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
-
- to_dp = (u8 *)(ulpsc + 1);
- from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
- if (data)
- memcpy(to_dp, from_dp, copy_len);
- else
- memset(to_dp, 0, copy_len);
- if (copy_len % T4_ULPTX_MIN_IO)
- memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
- (copy_len % T4_ULPTX_MIN_IO));
+ t4_write_mem_inline_wr(sc, wrtod(wr), wr_len, 0, addr, copy_len,
+ from_dp, i == (num_wqe - 1) ?
+ (__force __be64)(unsigned long) &wr_wait : 0);
t4_wrq_tx(sc, wr);
- len -= C4IW_MAX_INLINE_SIZE;
- }
+ if (from_dp != NULL)
+ from_dp += T4_MAX_INLINE_SIZE;
+ addr += T4_MAX_INLINE_SIZE >> 5;
+ len -= T4_MAX_INLINE_SIZE;
+ }
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
return ret;
}
@@ -201,7 +140,7 @@ _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen,
- (void *)daddr, !remain);
+ daddr, !remain);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -263,8 +202,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
stag_idx = (*stag) >> 8;
if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
- stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
- if (!stag_idx) {
+ stag_idx = t4_stag_alloc(rdev->adap, 1);
+ if (stag_idx == T4_STAG_UNSET) {
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
@@ -309,7 +248,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
sizeof(tpt), &tpt);
if (reset_tpt_entry) {
- c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+ t4_stag_free(rdev->adap, stag_idx, 1);
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c
index 0e374bc961c4..cbf4bae00a60 100644
--- a/sys/dev/cxgbe/iw_cxgbe/qp.c
+++ b/sys/dev/cxgbe/iw_cxgbe/qp.c
@@ -1326,6 +1326,8 @@ creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
return (EINVAL);
}
txsd = &toep->txsd[toep->txsd_pidx];
+ KASSERT(howmany(wrsize, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %zu too large", __func__, howmany(wrsize, 16)));
txsd->tx_credits = howmany(wrsize, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
diff --git a/sys/dev/cxgbe/iw_cxgbe/resource.c b/sys/dev/cxgbe/iw_cxgbe/resource.c
index 644ea0c631bf..cd20f1eafdd6 100644
--- a/sys/dev/cxgbe/iw_cxgbe/resource.c
+++ b/sys/dev/cxgbe/iw_cxgbe/resource.c
@@ -59,13 +59,9 @@ static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
}
/* nr_* must be power of 2 */
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid)
{
int err = 0;
- err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
- C4IW_ID_TABLE_F_RANDOM);
- if (err)
- goto tpt_err;
err = c4iw_init_qid_table(rdev);
if (err)
goto qid_err;
@@ -77,8 +73,6 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
- c4iw_id_table_free(&rdev->resource.tpt_table);
- tpt_err:
return -ENOMEM;
}
@@ -243,7 +237,6 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
void c4iw_destroy_resource(struct c4iw_resource *rscp)
{
- c4iw_id_table_free(&rscp->tpt_table);
c4iw_id_table_free(&rscp->qid_table);
c4iw_id_table_free(&rscp->pdid_table);
}
@@ -254,12 +247,9 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
- unsigned long addr;
+ u32 addr;
- vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
- 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
- M_FIRSTFIT|M_NOWAIT, &addr);
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
+ addr = t4_pblpool_alloc(rdev->adap, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,33 +258,15 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
- return (u32)addr;
+ return addr;
}
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
- vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
-}
-
-int c4iw_pblpool_create(struct c4iw_rdev *rdev)
-{
- rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
- rdev->adap->vres.pbl.start,
- rdev->adap->vres.pbl.size,
- 1, 0, M_FIRSTFIT| M_NOWAIT);
- if (!rdev->pbl_arena)
- return -ENOMEM;
-
- return 0;
-}
-
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
-{
- vmem_destroy(rdev->pbl_arena);
+ t4_pblpool_free(rdev->adap, addr, size);
}
/* RQT Memory Manager. */
diff --git a/sys/dev/cxgbe/iw_cxgbe/t4.h b/sys/dev/cxgbe/iw_cxgbe/t4.h
index 48f85cf7965b..ffb610420640 100644
--- a/sys/dev/cxgbe/iw_cxgbe/t4.h
+++ b/sys/dev/cxgbe/iw_cxgbe/t4.h
@@ -64,7 +64,6 @@
#define T4_MAX_NUM_PD 65536
#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */
-#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define A_PCIE_MA_SYNC 0x30b4
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index b57c03f076b5..91a43785aaca 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -229,7 +229,17 @@ struct iw_tunables {
struct tls_tunables {
int inline_keys;
- int combo_wrs;
+ union {
+ struct {
+ /* T6 only. */
+ int combo_wrs;
+ };
+ struct {
+ /* T7 only. */
+ int short_records;
+ int partial_ghash;
+ };
+ };
};
#ifdef TCP_OFFLOAD
diff --git a/sys/dev/cxgbe/t4_filter.c b/sys/dev/cxgbe/t4_filter.c
index 8d4552116d96..4b583b67ba07 100644
--- a/sys/dev/cxgbe/t4_filter.c
+++ b/sys/dev/cxgbe/t4_filter.c
@@ -322,48 +322,85 @@ remove_hftid(struct adapter *sc, struct filter_entry *f)
LIST_REMOVE(f, link_tid);
}
-/*
- * Input: driver's 32b filter mode.
- * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
- */
static uint16_t
-mode_to_fconf(uint32_t mode)
+mode_to_fconf_t4(uint32_t mode)
{
uint32_t fconf = 0;
if (mode & T4_FILTER_IP_FRAGMENT)
fconf |= F_FRAGMENTATION;
-
if (mode & T4_FILTER_MPS_HIT_TYPE)
fconf |= F_MPSHITTYPE;
-
if (mode & T4_FILTER_MAC_IDX)
fconf |= F_MACMATCH;
-
if (mode & T4_FILTER_ETH_TYPE)
fconf |= F_ETHERTYPE;
-
if (mode & T4_FILTER_IP_PROTO)
fconf |= F_PROTOCOL;
-
if (mode & T4_FILTER_IP_TOS)
fconf |= F_TOS;
-
if (mode & T4_FILTER_VLAN)
fconf |= F_VLAN;
-
if (mode & T4_FILTER_VNIC)
fconf |= F_VNIC_ID;
-
if (mode & T4_FILTER_PORT)
fconf |= F_PORT;
-
if (mode & T4_FILTER_FCoE)
fconf |= F_FCOE;
return (fconf);
}
+static uint16_t
+mode_to_fconf_t7(uint32_t mode)
+{
+ uint32_t fconf = 0;
+
+ if (mode & T4_FILTER_TCPFLAGS)
+ fconf |= F_TCPFLAGS;
+ if (mode & T4_FILTER_SYNONLY)
+ fconf |= F_SYNONLY;
+ if (mode & T4_FILTER_ROCE)
+ fconf |= F_ROCE;
+ if (mode & T4_FILTER_IP_FRAGMENT)
+ fconf |= F_T7_FRAGMENTATION;
+ if (mode & T4_FILTER_MPS_HIT_TYPE)
+ fconf |= F_T7_MPSHITTYPE;
+ if (mode & T4_FILTER_MAC_IDX)
+ fconf |= F_T7_MACMATCH;
+ if (mode & T4_FILTER_ETH_TYPE)
+ fconf |= F_T7_ETHERTYPE;
+ if (mode & T4_FILTER_IP_PROTO)
+ fconf |= F_T7_PROTOCOL;
+ if (mode & T4_FILTER_IP_TOS)
+ fconf |= F_T7_TOS;
+ if (mode & T4_FILTER_VLAN)
+ fconf |= F_T7_VLAN;
+ if (mode & T4_FILTER_VNIC)
+ fconf |= F_T7_VNIC_ID;
+ if (mode & T4_FILTER_PORT)
+ fconf |= F_T7_PORT;
+ if (mode & T4_FILTER_FCoE)
+ fconf |= F_T7_FCOE;
+ if (mode & T4_FILTER_IPSECIDX)
+ fconf |= F_IPSECIDX;
+
+ return (fconf);
+}
+
+/*
+ * Input: driver's 32b filter mode.
+ * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
+ */
+static uint16_t
+mode_to_fconf(struct adapter *sc, uint32_t mode)
+{
+ if (chip_id(sc) >= CHELSIO_T7)
+ return (mode_to_fconf_t7(mode));
+ else
+ return (mode_to_fconf_t4(mode));
+}
+
/*
* Input: driver's 32b filter mode.
* Returns: hardware vnic mode (ingress config) matching the input.
@@ -389,65 +426,100 @@ check_fspec_against_fconf_iconf(struct adapter *sc,
struct tp_params *tpp = &sc->params.tp;
uint32_t fconf = 0;
- if (fs->val.frag || fs->mask.frag)
- fconf |= F_FRAGMENTATION;
-
- if (fs->val.matchtype || fs->mask.matchtype)
- fconf |= F_MPSHITTYPE;
-
- if (fs->val.macidx || fs->mask.macidx)
- fconf |= F_MACMATCH;
-
- if (fs->val.ethtype || fs->mask.ethtype)
- fconf |= F_ETHERTYPE;
-
- if (fs->val.proto || fs->mask.proto)
- fconf |= F_PROTOCOL;
-
- if (fs->val.tos || fs->mask.tos)
- fconf |= F_TOS;
-
- if (fs->val.vlan_vld || fs->mask.vlan_vld)
- fconf |= F_VLAN;
-
- if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
- return (EINVAL);
- fconf |= F_VNIC_ID;
- }
-
- if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
- return (EINVAL);
- fconf |= F_VNIC_ID;
- }
-
+ if (chip_id(sc) >= CHELSIO_T7) {
+ if (fs->val.tcpflags || fs->mask.tcpflags)
+ fconf |= F_TCPFLAGS;
+ if (fs->val.synonly || fs->mask.synonly)
+ fconf |= F_SYNONLY;
+ if (fs->val.roce || fs->mask.roce)
+ fconf |= F_ROCE;
+ if (fs->val.frag || fs->mask.frag)
+ fconf |= F_T7_FRAGMENTATION;
+ if (fs->val.matchtype || fs->mask.matchtype)
+ fconf |= F_T7_MPSHITTYPE;
+ if (fs->val.macidx || fs->mask.macidx)
+ fconf |= F_T7_MACMATCH;
+ if (fs->val.ethtype || fs->mask.ethtype)
+ fconf |= F_T7_ETHERTYPE;
+ if (fs->val.proto || fs->mask.proto)
+ fconf |= F_T7_PROTOCOL;
+ if (fs->val.tos || fs->mask.tos)
+ fconf |= F_T7_TOS;
+ if (fs->val.vlan_vld || fs->mask.vlan_vld)
+ fconf |= F_T7_VLAN;
+ if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
+ if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
#ifdef notyet
- if (fs->val.encap_vld || fs->mask.encap_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ if (fs->val.encap_vld || fs->mask.encap_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
+#endif
+ if (fs->val.iport || fs->mask.iport)
+ fconf |= F_T7_PORT;
+ if (fs->val.fcoe || fs->mask.fcoe)
+ fconf |= F_T7_FCOE;
+ if (fs->val.ipsecidx || fs->mask.ipsecidx)
+ fconf |= F_IPSECIDX;
+ } else {
+ if (fs->val.tcpflags || fs->mask.tcpflags ||
+ fs->val.synonly || fs->mask.synonly ||
+ fs->val.roce || fs->mask.roce ||
+ fs->val.ipsecidx || fs->mask.ipsecidx)
return (EINVAL);
- fconf |= F_VNIC_ID;
- }
+ if (fs->val.frag || fs->mask.frag)
+ fconf |= F_FRAGMENTATION;
+ if (fs->val.matchtype || fs->mask.matchtype)
+ fconf |= F_MPSHITTYPE;
+ if (fs->val.macidx || fs->mask.macidx)
+ fconf |= F_MACMATCH;
+ if (fs->val.ethtype || fs->mask.ethtype)
+ fconf |= F_ETHERTYPE;
+ if (fs->val.proto || fs->mask.proto)
+ fconf |= F_PROTOCOL;
+ if (fs->val.tos || fs->mask.tos)
+ fconf |= F_TOS;
+ if (fs->val.vlan_vld || fs->mask.vlan_vld)
+ fconf |= F_VLAN;
+ if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
+ if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
+#ifdef notyet
+ if (fs->val.encap_vld || fs->mask.encap_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
#endif
-
- if (fs->val.iport || fs->mask.iport)
- fconf |= F_PORT;
-
- if (fs->val.fcoe || fs->mask.fcoe)
- fconf |= F_FCOE;
-
+ if (fs->val.iport || fs->mask.iport)
+ fconf |= F_PORT;
+ if (fs->val.fcoe || fs->mask.fcoe)
+ fconf |= F_FCOE;
+ }
if ((tpp->filter_mode | fconf) != tpp->filter_mode)
return (E2BIG);
return (0);
}
-/*
- * Input: hardware filter configuration (filter mode/mask, ingress config).
- * Input: driver's 32b filter mode matching the input.
- */
static uint32_t
-fconf_to_mode(uint16_t hwmode, int vnic_mode)
+fconf_to_mode_t4(uint16_t hwmode, int vnic_mode)
{
uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
@@ -488,6 +560,69 @@ fconf_to_mode(uint16_t hwmode, int vnic_mode)
return (mode);
}
+static uint32_t
+fconf_to_mode_t7(uint16_t hwmode, int vnic_mode)
+{
+ uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
+ T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
+
+ if (hwmode & F_TCPFLAGS)
+ mode |= T4_FILTER_TCPFLAGS;
+ if (hwmode & F_SYNONLY)
+ mode |= T4_FILTER_SYNONLY;
+ if (hwmode & F_ROCE)
+ mode |= T4_FILTER_ROCE;
+ if (hwmode & F_T7_FRAGMENTATION)
+ mode |= T4_FILTER_IP_FRAGMENT;
+ if (hwmode & F_T7_MPSHITTYPE)
+ mode |= T4_FILTER_MPS_HIT_TYPE;
+ if (hwmode & F_T7_MACMATCH)
+ mode |= T4_FILTER_MAC_IDX;
+ if (hwmode & F_T7_ETHERTYPE)
+ mode |= T4_FILTER_ETH_TYPE;
+ if (hwmode & F_T7_PROTOCOL)
+ mode |= T4_FILTER_IP_PROTO;
+ if (hwmode & F_T7_TOS)
+ mode |= T4_FILTER_IP_TOS;
+ if (hwmode & F_T7_VLAN)
+ mode |= T4_FILTER_VLAN;
+ if (hwmode & F_T7_VNIC_ID)
+ mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
+ if (hwmode & F_T7_PORT)
+ mode |= T4_FILTER_PORT;
+ if (hwmode & F_T7_FCOE)
+ mode |= T4_FILTER_FCoE;
+ if (hwmode & F_IPSECIDX)
+ mode |= T4_FILTER_IPSECIDX;
+
+ switch (vnic_mode) {
+ case FW_VNIC_MODE_PF_VF:
+ mode |= T4_FILTER_IC_VNIC;
+ break;
+ case FW_VNIC_MODE_ENCAP_EN:
+ mode |= T4_FILTER_IC_ENCAP;
+ break;
+ case FW_VNIC_MODE_OUTER_VLAN:
+ default:
+ break;
+ }
+
+ return (mode);
+}
+
+/*
+ * Input: hardware filter configuration (filter mode/mask, ingress config).
+ * Output: driver's 32b filter mode matching the input.
+ */
+static inline uint32_t
+fconf_to_mode(struct adapter *sc, uint16_t hwmode, int vnic_mode)
+{
+ if (chip_id(sc) >= CHELSIO_T7)
+ return (fconf_to_mode_t7(hwmode, vnic_mode));
+ else
+ return (fconf_to_mode_t4(hwmode, vnic_mode));
+}
+
int
get_filter_mode(struct adapter *sc, uint32_t *mode)
{
@@ -499,7 +634,7 @@ get_filter_mode(struct adapter *sc, uint32_t *mode)
/* Non-zero incoming value in mode means "hashfilter mode". */
filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
- *mode = fconf_to_mode(filter_mode, tp->vnic_mode);
+ *mode = fconf_to_mode(sc, filter_mode, tp->vnic_mode);
return (0);
}
@@ -512,7 +647,7 @@ set_filter_mode(struct adapter *sc, uint32_t mode)
uint16_t fconf;
iconf = mode_to_iconf(mode);
- fconf = mode_to_fconf(mode);
+ fconf = mode_to_fconf(sc, mode);
if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
return (0); /* Nothing to do */
@@ -554,7 +689,7 @@ set_filter_mask(struct adapter *sc, uint32_t mode)
uint16_t fmask;
iconf = mode_to_iconf(mode);
- fmask = mode_to_fconf(mode);
+ fmask = mode_to_fconf(sc, mode);
if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
return (0); /* Nothing to do */
@@ -811,71 +946,138 @@ hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
struct tp_params *tp = &sc->params.tp;
uint16_t fmask;
- *ftuple = fmask = 0;
-
/*
* Initialize each of the fields which we care about which are present
* in the Compressed Filter Tuple.
*/
- if (tp->vlan_shift >= 0 && fs->mask.vlan) {
- *ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) <<
- tp->vlan_shift;
- fmask |= F_VLAN;
- }
-
- if (tp->port_shift >= 0 && fs->mask.iport) {
- *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
- fmask |= F_PORT;
- }
-
- if (tp->protocol_shift >= 0 && fs->mask.proto) {
- *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
- fmask |= F_PROTOCOL;
- }
-
- if (tp->tos_shift >= 0 && fs->mask.tos) {
- *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
- fmask |= F_TOS;
- }
-
- if (tp->vnic_shift >= 0 && fs->mask.vnic) {
- /* vnic_mode was already validated. */
- if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
- MPASS(fs->mask.pfvf_vld);
- else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
- MPASS(fs->mask.ovlan_vld);
+#define SFF(V, S) ((uint64_t)(V) << S) /* Shifted Filter Field. */
+ *ftuple = fmask = 0;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ if (tp->ipsecidx_shift >= 0 && fs->mask.ipsecidx) {
+ *ftuple |= SFF(fs->val.ipsecidx, tp->ipsecidx_shift);
+ fmask |= F_IPSECIDX;
+ }
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
+ *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
+ fmask |= F_T7_FCOE;
+ }
+ if (tp->port_shift >= 0 && fs->mask.iport) {
+ *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
+ fmask |= F_T7_PORT;
+ }
+ if (tp->vnic_shift >= 0 && fs->mask.vnic) {
+ /* vnic_mode was already validated. */
+ if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
+ MPASS(fs->mask.pfvf_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
+ MPASS(fs->mask.ovlan_vld);
#ifdef notyet
- else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
- MPASS(fs->mask.encap_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
+ MPASS(fs->mask.encap_vld);
#endif
- *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
- fmask |= F_VNIC_ID;
- }
-
- if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
- *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
- fmask |= F_MACMATCH;
- }
-
- if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
- *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
- fmask |= F_ETHERTYPE;
- }
-
- if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
- *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
- fmask |= F_MPSHITTYPE;
- }
-
- if (tp->frag_shift >= 0 && fs->mask.frag) {
- *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
- fmask |= F_FRAGMENTATION;
- }
-
- if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
- *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
- fmask |= F_FCOE;
+ *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
+ fmask |= F_T7_VNIC_ID;
+ }
+ if (tp->vlan_shift >= 0 && fs->mask.vlan) {
+ *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
+ fmask |= F_T7_VLAN;
+ }
+ if (tp->tos_shift >= 0 && fs->mask.tos) {
+ *ftuple |= SFF(fs->val.tos, tp->tos_shift);
+ fmask |= F_T7_TOS;
+ }
+ if (tp->protocol_shift >= 0 && fs->mask.proto) {
+ *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
+ fmask |= F_T7_PROTOCOL;
+ }
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
+ *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
+ fmask |= F_T7_ETHERTYPE;
+ }
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
+ *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
+ fmask |= F_T7_MACMATCH;
+ }
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
+ *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
+ fmask |= F_T7_MPSHITTYPE;
+ }
+ if (tp->frag_shift >= 0 && fs->mask.frag) {
+ *ftuple |= SFF(fs->val.frag, tp->frag_shift);
+ fmask |= F_T7_FRAGMENTATION;
+ }
+ if (tp->roce_shift >= 0 && fs->mask.roce) {
+ *ftuple |= SFF(fs->val.roce, tp->roce_shift);
+ fmask |= F_ROCE;
+ }
+ if (tp->synonly_shift >= 0 && fs->mask.synonly) {
+ *ftuple |= SFF(fs->val.synonly, tp->synonly_shift);
+ fmask |= F_SYNONLY;
+ }
+ if (tp->tcpflags_shift >= 0 && fs->mask.tcpflags) {
+ *ftuple |= SFF(fs->val.tcpflags, tp->synonly_shift);
+ fmask |= F_TCPFLAGS;
+ }
+ } else {
+ if (fs->mask.ipsecidx || fs->mask.roce || fs->mask.synonly ||
+ fs->mask.tcpflags) {
+ MPASS(tp->ipsecidx_shift == -1);
+ MPASS(tp->roce_shift == -1);
+ MPASS(tp->synonly_shift == -1);
+ MPASS(tp->tcpflags_shift == -1);
+ return (EINVAL);
+ }
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
+ *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
+ fmask |= F_FCOE;
+ }
+ if (tp->port_shift >= 0 && fs->mask.iport) {
+ *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
+ fmask |= F_PORT;
+ }
+ if (tp->vnic_shift >= 0 && fs->mask.vnic) {
+ /* vnic_mode was already validated. */
+ if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
+ MPASS(fs->mask.pfvf_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
+ MPASS(fs->mask.ovlan_vld);
+#ifdef notyet
+ else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
+ MPASS(fs->mask.encap_vld);
+#endif
+ *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
+ fmask |= F_VNIC_ID;
+ }
+ if (tp->vlan_shift >= 0 && fs->mask.vlan) {
+ *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
+ fmask |= F_VLAN;
+ }
+ if (tp->tos_shift >= 0 && fs->mask.tos) {
+ *ftuple |= SFF(fs->val.tos, tp->tos_shift);
+ fmask |= F_TOS;
+ }
+ if (tp->protocol_shift >= 0 && fs->mask.proto) {
+ *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
+ fmask |= F_PROTOCOL;
+ }
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
+ *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
+ fmask |= F_ETHERTYPE;
+ }
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
+ *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
+ fmask |= F_MACMATCH;
+ }
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
+ *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
+ fmask |= F_MPSHITTYPE;
+ }
+ if (tp->frag_shift >= 0 && fs->mask.frag) {
+ *ftuple |= SFF(fs->val.frag, tp->frag_shift);
+ fmask |= F_FRAGMENTATION;
+ }
}
+#undef SFF
/* A hashfilter must conform to the hardware filter mask. */
if (fmask != tp->filter_mask)
@@ -1195,11 +1397,19 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
return (ENOMEM);
bzero(req, sizeof(*req));
INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
- if (no_reply == 0) {
- req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
- V_NO_REPLY(0));
- } else
- req->reply_ctrl = htobe16(V_NO_REPLY(1));
+ if (no_reply) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ } else {
+ const int qid = sc->sge.fwq.abs_id;
+
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ }
req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
req->mask = htobe64(mask);
req->val = htobe64(val);
@@ -1594,7 +1804,7 @@ static int
act_open_cpl_len16(struct adapter *sc, int isipv6)
{
int idx;
- static const int sz_table[3][2] = {
+ static const int sz_table[4][2] = {
{
howmany(sizeof (struct cpl_act_open_req), 16),
howmany(sizeof (struct cpl_act_open_req6), 16)
@@ -1607,10 +1817,14 @@ act_open_cpl_len16(struct adapter *sc, int isipv6)
howmany(sizeof (struct cpl_t6_act_open_req), 16),
howmany(sizeof (struct cpl_t6_act_open_req6), 16)
},
+ {
+ howmany(sizeof (struct cpl_t7_act_open_req), 16),
+ howmany(sizeof (struct cpl_t7_act_open_req6), 16)
+ },
};
MPASS(chip_id(sc) >= CHELSIO_T4);
- idx = min(chip_id(sc) - CHELSIO_T4, 2);
+ idx = min(chip_id(sc) - CHELSIO_T4, 3);
return (sz_table[idx][!!isipv6]);
}
diff --git a/sys/dev/cxgbe/t4_ioctl.h b/sys/dev/cxgbe/t4_ioctl.h
index ba9a17dbaddf..f7c8ee24d596 100644
--- a/sys/dev/cxgbe/t4_ioctl.h
+++ b/sys/dev/cxgbe/t4_ioctl.h
@@ -64,6 +64,7 @@ enum {
T4_SET_FILTER_MASK, /* set filter mask (hashfilter mode) */
T4_HOLD_CLIP_ADDR, /* add ref on an IP in the CLIP */
T4_RELEASE_CLIP_ADDR, /* remove ref from an IP in the CLIP */
+ T4_GET_SGE_CTXT, /* get SGE context for a queue */
};
struct t4_reg {
@@ -119,6 +120,10 @@ struct t4_i2c_data {
#define T4_FILTER_MAC_IDX 0x2000 /* MPS MAC address match index */
#define T4_FILTER_MPS_HIT_TYPE 0x4000 /* MPS match type */
#define T4_FILTER_IP_FRAGMENT 0x8000 /* IP fragment */
+#define T4_FILTER_IPSECIDX 0x10000
+#define T4_FILTER_ROCE 0x20000
+#define T4_FILTER_SYNONLY 0x40000
+#define T4_FILTER_TCPFLAGS 0x80000
/*
* T4_FILTER_VNIC's real meaning depends on the ingress config.
*/
@@ -199,6 +204,10 @@ struct t4_filter_tuple {
uint32_t vlan_vld:1; /* VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN tag valid, value in "vnic" */
uint32_t pfvf_vld:1; /* VNIC id (PF/VF) valid, value in "vnic" */
+ uint32_t roce:1;
+ uint32_t synonly:1;
+ uint32_t tcpflags:6;
+ uint32_t ipsecidx:12;
};
struct t4_filter_specification {
@@ -322,6 +331,7 @@ struct t4_sched_queue {
};
#define T4_SGE_CONTEXT_SIZE 24
+#define T7_SGE_CONTEXT_SIZE 28
enum {
SGE_CONTEXT_EGRESS,
SGE_CONTEXT_INGRESS,
@@ -335,6 +345,12 @@ struct t4_sge_context {
uint32_t data[T4_SGE_CONTEXT_SIZE / 4];
};
+struct t4_sge_ctxt {
+ uint32_t mem_id;
+ uint32_t cid;
+ uint32_t data[T7_SGE_CONTEXT_SIZE / 4];
+};
+
struct t4_mem_range {
uint32_t addr;
uint32_t len;
@@ -444,4 +460,5 @@ struct t4_clip_addr {
#define CHELSIO_T4_SET_FILTER_MASK _IOW('f', T4_SET_FILTER_MASK, uint32_t)
#define CHELSIO_T4_HOLD_CLIP_ADDR _IOW('f', T4_HOLD_CLIP_ADDR, struct t4_clip_addr)
#define CHELSIO_T4_RELEASE_CLIP_ADDR _IOW('f', T4_RELEASE_CLIP_ADDR, struct t4_clip_addr)
+#define CHELSIO_T4_GET_SGE_CTXT _IOWR('f', T4_GET_SGE_CTXT, struct t4_sge_ctxt)
#endif
diff --git a/sys/dev/cxgbe/t4_iov.c b/sys/dev/cxgbe/t4_iov.c
index bfd1613e9795..452ebaaf0172 100644
--- a/sys/dev/cxgbe/t4_iov.c
+++ b/sys/dev/cxgbe/t4_iov.c
@@ -119,6 +119,28 @@ struct {
{0x6085, "Chelsio T6240-SO 85"},
{0x6086, "Chelsio T6225-SO-CR 86"},
{0x6087, "Chelsio T6225-CR 87"},
+}, t7iov_pciids[] = {
+ {0xd000, "Chelsio Terminator 7 FPGA"}, /* T7 PE12K FPGA */
+ {0x7000, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
+ {0x7001, "Chelsio T7250"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7002, "Chelsio S7250"}, /* 2 x 10/25/50G, nomem */
+ {0x7003, "Chelsio T7450"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7004, "Chelsio S7450"}, /* 4 x 10/25/50G, nomem */
+ {0x7005, "Chelsio T72200"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7006, "Chelsio S72200"}, /* 2 x 40/100/200G, nomem */
+ {0x7007, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7008, "Chelsio T71400"}, /* 1 x 400G, nomem */
+ {0x7009, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
+ {0x700a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x700b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x700c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x700d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
+ {0x700e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x700f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7010, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
+ {0x7011, "Chelsio T7_MAYRA_7"}, /* Motherboard */
+
+ {0x7080, "Custom T7"},
};
static inline uint32_t
@@ -191,6 +213,26 @@ t6iov_probe(device_t dev)
}
static int
+chiov_probe(device_t dev)
+{
+ uint16_t d;
+ size_t i;
+
+ if (pci_get_vendor(dev) != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ d = pci_get_device(dev);
+ for (i = 0; i < nitems(t7iov_pciids); i++) {
+ if (d == t7iov_pciids[i].device) {
+ device_set_desc(dev, t7iov_pciids[i].desc);
+ device_quiet(dev);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ return (ENXIO);
+}
+
+static int
t4iov_attach(device_t dev)
{
struct t4iov_softc *sc;
@@ -460,6 +502,28 @@ static driver_t t6iov_driver = {
sizeof(struct t4iov_softc)
};
+static device_method_t chiov_methods[] = {
+ DEVMETHOD(device_probe, chiov_probe),
+ DEVMETHOD(device_attach, t4iov_attach),
+ DEVMETHOD(device_detach, t4iov_detach),
+
+#ifdef PCI_IOV
+ DEVMETHOD(pci_iov_init, t4iov_iov_init),
+ DEVMETHOD(pci_iov_uninit, t4iov_iov_uninit),
+ DEVMETHOD(pci_iov_add_vf, t4iov_add_vf),
+#endif
+
+ DEVMETHOD(t4_attach_child, t4iov_attach_child),
+ DEVMETHOD(t4_detach_child, t4iov_detach_child),
+
+ DEVMETHOD_END
+};
+
+static driver_t chiov_driver = {
+ "chiov",
+ chiov_methods,
+ sizeof(struct t4iov_softc)
+};
DRIVER_MODULE(t4iov, pci, t4iov_driver, 0, 0);
MODULE_VERSION(t4iov, 1);
@@ -468,3 +532,6 @@ MODULE_VERSION(t5iov, 1);
DRIVER_MODULE(t6iov, pci, t6iov_driver, 0, 0);
MODULE_VERSION(t6iov, 1);
+
+DRIVER_MODULE(chiov, pci, chiov_driver, 0, 0);
+MODULE_VERSION(chiov, 1);
diff --git a/sys/dev/cxgbe/t4_l2t.c b/sys/dev/cxgbe/t4_l2t.c
index b1307bf2ace5..5f9c26a0f720 100644
--- a/sys/dev/cxgbe/t4_l2t.c
+++ b/sys/dev/cxgbe/t4_l2t.c
@@ -119,7 +119,7 @@ find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac
first_free = e;
} else if (e->state == L2T_STATE_SWITCHING &&
memcmp(e->dmac, dmac, ETHER_ADDR_LEN) == 0 &&
- e->vlan == vlan && e->lport == port)
+ e->vlan == vlan && e->hw_port == port)
return (e); /* Found existing entry that matches. */
}
@@ -156,7 +156,7 @@ mk_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync, int reply,
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
- req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!reply));
+ req->params = htons(V_L2T_W_PORT(e->hw_port) | V_L2T_W_NOREPLY(!reply));
req->l2t_idx = htons(idx);
req->vlan = htons(e->vlan);
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
@@ -227,7 +227,7 @@ t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
e = &d->l2tab[i];
if (e->state != L2T_STATE_TLS)
continue;
- if (e->vlan == vlan && e->lport == port &&
+ if (e->vlan == vlan && e->hw_port == port &&
e->wrq == (struct sge_wrq *)txq &&
memcmp(e->dmac, eth_addr, ETHER_ADDR_LEN) == 0) {
if (atomic_fetchadd_int(&e->refcnt, 1) == 0) {
@@ -263,7 +263,7 @@ t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
/* Initialize the entry. */
e->state = L2T_STATE_TLS;
e->vlan = vlan;
- e->lport = port;
+ e->hw_port = port;
e->iqid = sc->sge.fwq.abs_id;
e->wrq = (struct sge_wrq *)txq;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
@@ -303,7 +303,7 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
e->iqid = sc->sge.fwq.abs_id;
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
- e->lport = port;
+ e->hw_port = port;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
atomic_store_rel_int(&e->refcnt, 1);
atomic_subtract_int(&d->nfree, 1);
@@ -313,7 +313,7 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
e = NULL;
} else {
MPASS(e->vlan == vlan);
- MPASS(e->lport == port);
+ MPASS(e->hw_port == port);
atomic_add_int(&e->refcnt, 1);
}
}
@@ -488,7 +488,7 @@ sysctl_l2t(SYSCTL_HANDLER_ARGS)
" %u %2u %c %5u %s",
e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
e->dmac[3], e->dmac[4], e->dmac[5],
- e->vlan & 0xfff, vlan_prio(e), e->lport,
+ e->vlan & 0xfff, vlan_prio(e), e->hw_port,
l2e_state(e), atomic_load_acq_int(&e->refcnt),
e->ifp ? if_name(e->ifp) : "-");
skip:
diff --git a/sys/dev/cxgbe/t4_l2t.h b/sys/dev/cxgbe/t4_l2t.h
index 13e085bb7467..989d2d5ec8f3 100644
--- a/sys/dev/cxgbe/t4_l2t.h
+++ b/sys/dev/cxgbe/t4_l2t.h
@@ -71,7 +71,7 @@ struct l2t_entry {
volatile int refcnt; /* entry reference count */
uint16_t hash; /* hash bucket the entry is on */
uint8_t ipv6; /* entry is for an IPv6 address */
- uint8_t lport; /* associated offload logical port */
+ uint8_t hw_port; /* associated hardware port idx */
uint8_t dmac[ETHER_ADDR_LEN]; /* next hop's MAC address */
};
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 9e91250cb61c..22d2f504c257 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -241,6 +240,45 @@ static driver_t vcc_driver = {
sizeof(struct vi_info)
};
+/* T7+ bus driver interface */
+static int ch_probe(device_t);
+static device_method_t ch_methods[] = {
+ DEVMETHOD(device_probe, ch_probe),
+ DEVMETHOD(device_attach, t4_attach),
+ DEVMETHOD(device_detach, t4_detach),
+ DEVMETHOD(device_suspend, t4_suspend),
+ DEVMETHOD(device_resume, t4_resume),
+
+ DEVMETHOD(bus_child_location, t4_child_location),
+ DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
+ DEVMETHOD(bus_reset_post, t4_reset_post),
+
+ DEVMETHOD(t4_is_main_ready, t4_ready),
+ DEVMETHOD(t4_read_port_device, t4_read_port_device),
+
+ DEVMETHOD_END
+};
+static driver_t ch_driver = {
+ "chnex",
+ ch_methods,
+ sizeof(struct adapter)
+};
+
+
+/* T7+ port (che) interface */
+static driver_t che_driver = {
+ "che",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
+/* T7+ VI (vche) interface */
+static driver_t vche_driver = {
+ "vche",
+ vcxgbe_methods,
+ sizeof(struct vi_info)
+};
+
/* ifnet interface */
static void cxgbe_init(void *);
static int cxgbe_ioctl(if_t, unsigned long, caddr_t);
@@ -519,6 +557,9 @@ static int t4_fec = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
"Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
+static const char *
+t4_fec_bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2\6auto\7module";
+
/*
* Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it
* issues to the firmware. If the firmware doesn't support FORCE_FEC then the
@@ -570,6 +611,10 @@ static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
&t4_switchcaps_allowed, 0, "Default switch capabilities");
+static int t4_nvmecaps_allowed = 0;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, nvmecaps_allowed, CTLFLAG_RDTUN,
+ &t4_nvmecaps_allowed, 0, "Default NVMe capabilities");
+
#ifdef RATELIMIT
static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
@@ -716,6 +761,14 @@ SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
static int t4_tls_combo_wrs = 0;
SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
0, "Attempt to combine TCB field updates with TLS record work requests.");
+
+static int t4_tls_short_records = 1;
+SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, short_records, CTLFLAG_RDTUN,
+ &t4_tls_short_records, 0, "Use cipher-only mode for short records.");
+
+static int t4_tls_partial_ghash = 1;
+SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, partial_ghash, CTLFLAG_RDTUN,
+ &t4_tls_partial_ghash, 0, "Use partial GHASH for AES-GCM records.");
#endif
/* Functions used by VIs to obtain unique MAC addresses for each VI. */
@@ -809,17 +862,20 @@ static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
static int sysctl_force_fec(SYSCTL_HANDLER_ARGS);
+static int sysctl_handle_t4_portstat64(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
-static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_ibq(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_obq(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_qcfg_t7(SYSCTL_HANDLER_ARGS);
static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
@@ -831,6 +887,7 @@ static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
+static int sysctl_mps_tcam_t7(SYSCTL_HANDLER_ARGS);
static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
@@ -855,7 +912,7 @@ static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
#endif
-static int get_sge_context(struct adapter *, struct t4_sge_context *);
+static int get_sge_context(struct adapter *, int, uint32_t, int, uint32_t *);
static int load_fw(struct adapter *, struct t4_data *);
static int load_cfg(struct adapter *, struct t4_data *);
static int load_boot(struct adapter *, struct t4_bootrom *);
@@ -960,6 +1017,29 @@ struct {
{0x6485, "Custom T6240-SO"},
{0x6486, "Custom T6225-SO-CR"},
{0x6487, "Custom T6225-CR"},
+}, t7_pciids[] = {
+ {0xd000, "Chelsio Terminator 7 FPGA"}, /* T7 PE12K FPGA */
+ {0x7400, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
+ {0x7401, "Chelsio T7250"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7402, "Chelsio S7250"}, /* 2 x 10/25/50G, nomem */
+ {0x7403, "Chelsio T7450"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7404, "Chelsio S7450"}, /* 4 x 10/25/50G, nomem */
+ {0x7405, "Chelsio T72200"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7406, "Chelsio S72200"}, /* 2 x 40/100/200G, nomem */
+ {0x7407, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7408, "Chelsio S71400"}, /* 1 x 400G, nomem */
+ {0x7409, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
+ {0x740a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x740b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x740c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x740d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
+ {0x740e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x740f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7410, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
+ {0x7411, "Chelsio T7_MAYRA_7"}, /* Motherboard */
+
+ /* Custom */
+ {0x7480, "Custom T7"},
};
#ifdef TCP_OFFLOAD
@@ -1042,6 +1122,31 @@ t6_probe(device_t dev)
return (ENXIO);
}
+static int
+ch_probe(device_t dev)
+{
+ int i;
+ uint16_t v = pci_get_vendor(dev);
+ uint16_t d = pci_get_device(dev);
+ uint8_t f = pci_get_function(dev);
+
+ if (v != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ /* Attach only to PF0 of the FPGA */
+ if (d == 0xd000 && f != 0)
+ return (ENXIO);
+
+ for (i = 0; i < nitems(t7_pciids); i++) {
+ if (d == t7_pciids[i].device) {
+ device_set_desc(dev, t7_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
static void
t5_attribute_workaround(device_t dev)
{
@@ -1091,6 +1196,13 @@ static const struct devnames devnames[] = {
.pf03_drv_name = "t6iov",
.vf_nexus_name = "t6vf",
.vf_ifnet_name = "ccv"
+ }, {
+ .nexus_name = "chnex",
+ .ifnet_name = "che",
+ .vi_ifnet_name = "vche",
+ .pf03_drv_name = "chiov",
+ .vf_nexus_name = "chvf",
+ .vf_ifnet_name = "chev"
}
};
@@ -1100,12 +1212,13 @@ t4_init_devnames(struct adapter *sc)
int id;
id = chip_id(sc);
- if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
- sc->names = &devnames[id - CHELSIO_T4];
- else {
+ if (id < CHELSIO_T4) {
device_printf(sc->dev, "chip id %d is not supported.\n", id);
sc->names = NULL;
- }
+ } else if (id - CHELSIO_T4 < nitems(devnames))
+ sc->names = &devnames[id - CHELSIO_T4];
+ else
+ sc->names = &devnames[nitems(devnames) - 1];
}
static int
@@ -1277,6 +1390,7 @@ t4_attach(device_t dev)
goto done; /* error message displayed already */
memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
+ memset(sc->port_map, 0xff, sizeof(sc->port_map));
/* Prepare the adapter for operation. */
buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
@@ -1309,7 +1423,7 @@ t4_attach(device_t dev)
* will work even in "recovery mode".
*/
setup_memwin(sc);
- if (t4_init_devlog_params(sc, 0) == 0)
+ if (t4_init_devlog_ncores_params(sc, 0) == 0)
fixup_devlog_params(sc);
make_dev_args_init(&mda);
mda.mda_devsw = &t4_cdevsw;
@@ -1407,14 +1521,16 @@ t4_attach(device_t dev)
}
if (is_bt(pi->port_type))
- setbit(&sc->bt_map, pi->tx_chan);
+ setbit(&sc->bt_map, pi->hw_port);
else
- MPASS(!isset(&sc->bt_map, pi->tx_chan));
+ MPASS(!isset(&sc->bt_map, pi->hw_port));
snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
device_get_nameunit(dev), i);
mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
- sc->chan_map[pi->tx_chan] = i;
+ for (j = 0; j < sc->params.tp.lb_nchan; j++)
+ sc->chan_map[pi->tx_chan + j] = i;
+ sc->port_map[pi->hw_port] = i;
/*
* The MPS counter for FCS errors doesn't work correctly on the
@@ -1424,10 +1540,8 @@ t4_attach(device_t dev)
*/
if (is_t6(sc))
pi->fcs_reg = -1;
- else {
- pi->fcs_reg = t4_port_reg(sc, pi->tx_chan,
- A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
- }
+ else
+ pi->fcs_reg = A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L;
pi->fcs_base = 0;
/* All VIs on this port share this media. */
@@ -1467,6 +1581,7 @@ t4_attach(device_t dev)
sc->intr_count = iaq.nirq;
s = &sc->sge;
+ s->nctrlq = max(sc->params.nports, sc->params.ncores);
s->nrxq = nports * iaq.nrxq;
s->ntxq = nports * iaq.ntxq;
if (num_vis > 1) {
@@ -1521,7 +1636,7 @@ t4_attach(device_t dev)
MPASS(s->niq <= s->iqmap_sz);
MPASS(s->neq <= s->eqmap_sz);
- s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
+ s->ctrlq = malloc(s->nctrlq * sizeof(struct sge_wrq), M_CXGBE,
M_ZERO | M_WAITOK);
s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
M_ZERO | M_WAITOK);
@@ -1548,6 +1663,7 @@ t4_attach(device_t dev)
if (sc->vres.key.size != 0)
sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
+ t4_init_tpt(sc);
/*
* Second pass over the ports. This time we know the number of rx and
@@ -1849,6 +1965,7 @@ t4_detach_common(device_t dev)
#endif
if (sc->key_map)
vmem_destroy(sc->key_map);
+ t4_free_tpt(sc);
#ifdef INET6
t4_destroy_clip_table(sc);
#endif
@@ -2156,6 +2273,7 @@ struct adapter_pre_reset_state {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -2187,6 +2305,7 @@ save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
o->nbmcaps = sc->nbmcaps;
o->linkcaps = sc->linkcaps;
o->switchcaps = sc->switchcaps;
+ o->nvmecaps = sc->nvmecaps;
o->niccaps = sc->niccaps;
o->toecaps = sc->toecaps;
o->rdmacaps = sc->rdmacaps;
@@ -2225,6 +2344,7 @@ compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
COMPARE_CAPS(nbm);
COMPARE_CAPS(link);
COMPARE_CAPS(switch);
+ COMPARE_CAPS(nvme);
COMPARE_CAPS(nic);
COMPARE_CAPS(toe);
COMPARE_CAPS(rdma);
@@ -2417,11 +2537,7 @@ restart_lld(struct adapter *sc)
}
if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
- t4_write_reg(sc, is_t4(sc) ?
- A_MPS_TRC_RSS_CONTROL :
- A_MPS_T5_TRC_RSS_CONTROL,
- V_RSSCONTROL(pi->tx_chan) |
- V_QUEUENUMBER(sc->traceq));
+ t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq);
pi->flags |= HAS_TRACEQ;
}
@@ -3407,7 +3523,7 @@ cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
if (is_t6(vi->pi->adapter))
error = t6_tls_tag_alloc(ifp, params, pt);
else
- error = EOPNOTSUPP;
+ error = t7_tls_tag_alloc(ifp, params, pt);
break;
}
#endif
@@ -3534,6 +3650,8 @@ port_mword(struct port_info *pi, uint32_t speed)
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_CR2_QSFP:
case FW_PORT_TYPE_SFP28:
+ case FW_PORT_TYPE_SFP56:
+ case FW_PORT_TYPE_QSFP56:
/* Pluggable transceiver */
switch (pi->mod_type) {
case FW_PORT_MOD_TYPE_LR:
@@ -3551,6 +3669,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_LR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_LR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_LR4);
}
break;
case FW_PORT_MOD_TYPE_SR:
@@ -3567,6 +3687,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_SR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_SR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_SR4);
}
break;
case FW_PORT_MOD_TYPE_ER:
@@ -3588,6 +3710,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_CR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_CR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_CR4_PAM4);
}
break;
case FW_PORT_MOD_TYPE_LRM:
@@ -3597,6 +3721,8 @@ port_mword(struct port_info *pi, uint32_t speed)
case FW_PORT_MOD_TYPE_DR:
if (speed == FW_PORT_CAP32_SPEED_100G)
return (IFM_100G_DR);
+ if (speed == FW_PORT_CAP32_SPEED_200G)
+ return (IFM_200G_DR4);
break;
case FW_PORT_MOD_TYPE_NA:
MPASS(0); /* Not pluggable? */
@@ -3684,7 +3810,7 @@ alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
("%s: VI %s doesn't have a MAC func", __func__,
device_get_nameunit(vi->dev)));
func = vi_mac_funcs[index];
- rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
+ rc = t4_alloc_vi_func(sc, sc->mbox, pi->hw_port, sc->pf, 0, 1,
vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
if (rc < 0) {
CH_ERR(vi, "failed to allocate virtual interface %d"
@@ -3954,7 +4080,7 @@ setup_memwin(struct adapter *sc)
const struct memwin_init *mw_init;
struct memwin *mw;
int i;
- uint32_t bar0;
+ uint32_t bar0, reg;
if (is_t4(sc)) {
/*
@@ -3982,9 +4108,10 @@ setup_memwin(struct adapter *sc)
mw->mw_aperture = mw_init->aperture;
mw->mw_curpos = 0;
}
- t4_write_reg(sc,
- PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
- (mw->mw_base + bar0) | V_BIR(0) |
+ reg = chip_id(sc) > CHELSIO_T6 ?
+ PCIE_MEM_ACCESS_T7_REG(A_T7_PCIE_MEM_ACCESS_BASE_WIN, i) :
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i);
+ t4_write_reg(sc, reg, (mw->mw_base + bar0) | V_BIR(0) |
V_WINDOW(ilog2(mw->mw_aperture) - 10));
rw_wlock(&mw->mw_lock);
position_memwin(sc, i, mw->mw_curpos);
@@ -3992,7 +4119,7 @@ setup_memwin(struct adapter *sc)
}
/* flush */
- t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
+ t4_read_reg(sc, reg);
}
/*
@@ -4005,8 +4132,7 @@ static void
position_memwin(struct adapter *sc, int idx, uint32_t addr)
{
struct memwin *mw;
- uint32_t pf;
- uint32_t reg;
+ uint32_t pf, reg, val;
MPASS(idx >= 0 && idx < NUM_MEMWIN);
mw = &sc->memwin[idx];
@@ -4019,8 +4145,14 @@ position_memwin(struct adapter *sc, int idx, uint32_t addr)
pf = V_PFNUM(sc->pf);
mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
}
- reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
- t4_write_reg(sc, reg, mw->mw_curpos | pf);
+ if (chip_id(sc) > CHELSIO_T6) {
+ reg = PCIE_MEM_ACCESS_T7_REG(A_PCIE_MEM_ACCESS_OFFSET0, idx);
+ val = (mw->mw_curpos >> X_T7_MEMOFST_SHIFT) | pf;
+ } else {
+ reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
+ val = mw->mw_curpos | pf;
+ }
+ t4_write_reg(sc, reg, val);
t4_read_reg(sc, reg); /* flush */
}
@@ -4453,8 +4585,27 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
iaq->nrxq_vi = t4_nrxq_vi;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
if (is_offload(sc) || is_ethoffload(sc)) {
- iaq->nofldtxq = t4_nofldtxq;
- iaq->nofldtxq_vi = t4_nofldtxq_vi;
+ if (sc->params.tid_qid_sel_mask == 0) {
+ iaq->nofldtxq = t4_nofldtxq;
+ iaq->nofldtxq_vi = t4_nofldtxq_vi;
+ } else {
+ iaq->nofldtxq = roundup(t4_nofldtxq, sc->params.ncores);
+ iaq->nofldtxq_vi = roundup(t4_nofldtxq_vi,
+ sc->params.ncores);
+ if (iaq->nofldtxq != t4_nofldtxq)
+ device_printf(sc->dev,
+ "nofldtxq updated (%d -> %d) for correct"
+ " operation with %d firmware cores.\n",
+ t4_nofldtxq, iaq->nofldtxq,
+ sc->params.ncores);
+ if (iaq->num_vis > 1 &&
+ iaq->nofldtxq_vi != t4_nofldtxq_vi)
+ device_printf(sc->dev,
+ "nofldtxq_vi updated (%d -> %d) for correct"
+ " operation with %d firmware cores.\n",
+ t4_nofldtxq_vi, iaq->nofldtxq_vi,
+ sc->params.ncores);
+ }
}
#endif
#ifdef TCP_OFFLOAD
@@ -4555,6 +4706,10 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
if (iaq->nofldrxq > 0) {
iaq->nofldrxq = 1;
iaq->nofldtxq = 1;
+ if (sc->params.tid_qid_sel_mask == 0)
+ iaq->nofldtxq = 1;
+ else
+ iaq->nofldtxq = sc->params.ncores;
}
iaq->nnmtxq = 0;
iaq->nnmrxq = 0;
@@ -4567,9 +4722,10 @@ done:
MPASS(iaq->nirq > 0);
MPASS(iaq->nrxq > 0);
MPASS(iaq->ntxq > 0);
- if (itype == INTR_MSI) {
+ if (itype == INTR_MSI)
MPASS(powerof2(iaq->nirq));
- }
+ if (sc->params.tid_qid_sel_mask != 0)
+ MPASS(iaq->nofldtxq % sc->params.ncores == 0);
}
static int
@@ -4711,6 +4867,22 @@ struct fw_info {
.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
.intfver_fcoe = FW_INTFVER(T6, FCOE),
},
+ }, {
+ .chip = CHELSIO_T7,
+ .kld_name = "t7fw_cfg",
+ .fw_mod_name = "t7fw",
+ .fw_h = {
+ .chip = FW_HDR_CHIP_T7,
+ .fw_ver = htobe32(FW_VERSION(T7)),
+ .intfver_nic = FW_INTFVER(T7, NIC),
+ .intfver_vnic = FW_INTFVER(T7, VNIC),
+ .intfver_ofld = FW_INTFVER(T7, OFLD),
+ .intfver_ri = FW_INTFVER(T7, RI),
+ .intfver_iscsipdu = FW_INTFVER(T7, ISCSIPDU),
+ .intfver_iscsi = FW_INTFVER(T7, ISCSI),
+ .intfver_fcoepdu = FW_INTFVER(T7, FCOEPDU),
+ .intfver_fcoe = FW_INTFVER(T7, FCOE),
+ },
}
};
@@ -5032,7 +5204,7 @@ done:
static int
copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
- uint32_t mtype, uint32_t moff)
+ uint32_t mtype, uint32_t moff, u_int maxlen)
{
struct fw_info *fw_info;
const struct firmware *dcfg, *rcfg = NULL;
@@ -5084,10 +5256,10 @@ copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
cflen = rcfg->datasize & ~3;
}
- if (cflen > FLASH_CFG_MAX_SIZE) {
+ if (cflen > maxlen) {
device_printf(sc->dev,
"config file too long (%d, max allowed is %d).\n",
- cflen, FLASH_CFG_MAX_SIZE);
+ cflen, maxlen);
rc = EINVAL;
goto done;
}
@@ -5112,6 +5284,7 @@ struct caps_allowed {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -5139,6 +5312,8 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
int rc;
struct fw_caps_config_cmd caps;
uint32_t mtype, moff, finicsum, cfcsum, param, val;
+ unsigned int maxlen = 0;
+ const int cfg_addr = t4_flash_cfg_addr(sc, &maxlen);
rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
if (rc != 0) {
@@ -5155,7 +5330,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
} else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
mtype = FW_MEMTYPE_FLASH;
- moff = t4_flash_cfg_addr(sc);
+ moff = cfg_addr;
caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
@@ -5179,7 +5354,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
FW_LEN16(caps));
- rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
+ rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff, maxlen);
if (rc != 0) {
device_printf(sc->dev,
"failed to upload config file to card: %d.\n", rc);
@@ -5213,6 +5388,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
LIMIT_CAPS(nbm);
LIMIT_CAPS(link);
LIMIT_CAPS(switch);
+ LIMIT_CAPS(nvme);
LIMIT_CAPS(nic);
LIMIT_CAPS(toe);
LIMIT_CAPS(rdma);
@@ -5278,6 +5454,7 @@ partition_resources(struct adapter *sc)
COPY_CAPS(nbm);
COPY_CAPS(link);
COPY_CAPS(switch);
+ COPY_CAPS(nvme);
COPY_CAPS(nic);
COPY_CAPS(toe);
COPY_CAPS(rdma);
@@ -5354,7 +5531,7 @@ get_params__pre_init(struct adapter *sc)
sc->params.vpd.cclk = val[1];
/* Read device log parameters. */
- rc = -t4_init_devlog_params(sc, 1);
+ rc = -t4_init_devlog_ncores_params(sc, 1);
if (rc == 0)
fixup_devlog_params(sc);
else {
@@ -5508,6 +5685,14 @@ get_params__post_init(struct adapter *sc)
}
}
+ if (sc->params.ncores > 1) {
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ param[0] = FW_PARAM_DEV(TID_QID_SEL_MASK);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ sc->params.tid_qid_sel_mask = rc == 0 ? val[0] : 0;
+ }
+
/*
* The parameters that follow may not be available on all firmwares. We
* query them individually rather than in a compound query because old
@@ -5533,6 +5718,14 @@ get_params__post_init(struct adapter *sc)
else
sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */
+ param[0] = FW_PARAM_DEV(TX_TPCHMAP);
+ val[0] = 0;
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ if (rc == 0)
+ sc->params.tx_tp_ch_map = val[0];
+ else
+ sc->params.tx_tp_ch_map = UINT32_MAX; /* Not a legal value. */
+
/*
* Determine whether the firmware supports the filter2 work request.
*/
@@ -5604,6 +5797,7 @@ get_params__post_init(struct adapter *sc)
READ_CAPS(nbmcaps);
READ_CAPS(linkcaps);
READ_CAPS(switchcaps);
+ READ_CAPS(nvmecaps);
READ_CAPS(niccaps);
READ_CAPS(toecaps);
READ_CAPS(rdmacaps);
@@ -5946,9 +6140,13 @@ set_params__post_init(struct adapter *sc)
#ifdef KERN_TLS
if (is_ktls(sc)) {
sc->tlst.inline_keys = t4_tls_inline_keys;
- sc->tlst.combo_wrs = t4_tls_combo_wrs;
- if (t4_kern_tls != 0 && is_t6(sc))
+ if (t4_kern_tls != 0 && is_t6(sc)) {
+ sc->tlst.combo_wrs = t4_tls_combo_wrs;
t6_config_kern_tls(sc, true);
+ } else {
+ sc->tlst.short_records = t4_tls_short_records;
+ sc->tlst.partial_ghash = t4_tls_partial_ghash;
+ }
}
#endif
return (0);
@@ -6220,7 +6418,7 @@ apply_link_config(struct port_info *pi)
MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
#endif
if (!(sc->flags & IS_VF)) {
- rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
+ rc = -t4_link_l1cfg(sc, sc->mbox, pi->hw_port, lc);
if (rc != 0) {
device_printf(pi->dev, "l1cfg failed: %d\n", rc);
return (rc);
@@ -6581,9 +6779,7 @@ cxgbe_init_synchronized(struct vi_info *vi)
*/
if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
- t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
- A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
- V_QUEUENUMBER(sc->traceq));
+ t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq);
pi->flags |= HAS_TRACEQ;
}
@@ -7443,7 +7639,7 @@ cxgbe_refresh_stats(struct vi_info *vi)
pi = vi->pi;
sc = vi->adapter;
tnl_cong_drops = 0;
- t4_get_port_stats(sc, pi->port_id, &pi->stats);
+ t4_get_port_stats(sc, pi->hw_port, &pi->stats);
chan_map = pi->rx_e_chan_map;
while (chan_map) {
i = ffs(chan_map) - 1;
@@ -7481,6 +7677,150 @@ vi_tick(void *arg)
callout_schedule(&vi->tick, hz);
}
+/* CIM inbound queues */
+static const char *t4_ibq[CIM_NUM_IBQ] = {
+ "ibq_tp0", "ibq_tp1", "ibq_ulp", "ibq_sge0", "ibq_sge1", "ibq_ncsi"
+};
+static const char *t7_ibq[CIM_NUM_IBQ_T7] = {
+ "ibq_tp0", "ibq_tp1", "ibq_tp2", "ibq_tp3", "ibq_ulp", "ibq_sge0",
+ "ibq_sge1", "ibq_ncsi", NULL, "ibq_ipc1", "ibq_ipc2", "ibq_ipc3",
+ "ibq_ipc4", "ibq_ipc5", "ibq_ipc6", "ibq_ipc7"
+};
+static const char *t7_ibq_sec[] = {
+ "ibq_tp0", "ibq_tp1", "ibq_tp2", "ibq_tp3", "ibq_ulp", "ibq_sge0",
+ NULL, NULL, NULL, "ibq_ipc0"
+};
+
+/* CIM outbound queues */
+static const char *t4_obq[CIM_NUM_OBQ_T5] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", "obq_ncsi",
+ "obq_sge_rx_q0", "obq_sge_rx_q1" /* These two are T5/T6 only */
+};
+static const char *t7_obq[CIM_NUM_OBQ_T7] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", "obq_ncsi",
+ "obq_sge_rx_q0", NULL, NULL, "obq_ipc1", "obq_ipc2", "obq_ipc3",
+ "obq_ipc4", "obq_ipc5", "obq_ipc6", "obq_ipc7"
+};
+static const char *t7_obq_sec[] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", NULL,
+ "obq_sge_rx_q0", NULL, NULL, "obq_ipc0"
+};
+
+static void
+cim_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *c0)
+{
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children1;
+ int i, j, qcount;
+ char s[16];
+ const char **qname;
+
+ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "cim",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CIM block");
+ c0 = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_U8(ctx, c0, OID_AUTO, "ncores", CTLFLAG_RD, NULL,
+ sc->params.ncores, "# of active CIM cores");
+
+ for (i = 0; i < sc->params.ncores; i++) {
+ snprintf(s, sizeof(s), "%u", i);
+ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, s,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CIM core");
+ children1 = SYSCTL_CHILDREN(oid);
+
+ /*
+ * CTLFLAG_SKIP because the misc.devlog sysctl already displays
+ * the log for all cores. Use this sysctl to get the log for a
+ * particular core only.
+ */
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "devlog",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
+ sc, i, sysctl_devlog, "A", "firmware's device log");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "loadavg",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_loadavg, "A",
+ "microprocessor load averages (select firmwares only)");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "qcfg",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ chip_id(sc) > CHELSIO_T6 ? sysctl_cim_qcfg_t7 : sysctl_cim_qcfg,
+ "A", "Queue configuration");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_la, "A", "Logic analyzer");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "ma_la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "pif_la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
+
+ /* IBQs */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ qname = &t4_ibq[0];
+ qcount = nitems(t4_ibq);
+ break;
+ case CHELSIO_T7:
+ default:
+ if (i == 0) {
+ qname = &t7_ibq[0];
+ qcount = nitems(t7_ibq);
+ } else {
+ qname = &t7_ibq_sec[0];
+ qcount = nitems(t7_ibq_sec);
+ }
+ break;
+ }
+ MPASS(qcount <= sc->chip_params->cim_num_ibq);
+ for (j = 0; j < qcount; j++) {
+ if (qname[j] == NULL)
+ continue;
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, qname[j],
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
+ (i << 16) | j, sysctl_cim_ibq, "A", NULL);
+ }
+
+ /* OBQs */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ qname = t4_obq;
+ qcount = CIM_NUM_OBQ;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ qname = t4_obq;
+ qcount = nitems(t4_obq);
+ break;
+ case CHELSIO_T7:
+ default:
+ if (i == 0) {
+ qname = t7_obq;
+ qcount = nitems(t7_obq);
+ } else {
+ qname = t7_obq_sec;
+ qcount = nitems(t7_obq_sec);
+ }
+ break;
+ }
+ MPASS(qcount <= sc->chip_params->cim_num_obq);
+ for (j = 0; j < qcount; j++) {
+ if (qname[j] == NULL)
+ continue;
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, qname[j],
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
+ (i << 16) | j, sysctl_cim_obq, "A", NULL);
+ }
+ }
+}
+
/*
* Should match fw_caps_config_<foo> enums in t4fw_interface.h
*/
@@ -7490,17 +7830,18 @@ static char *caps_decoder[] = {
"\20\001INGRESS\002EGRESS", /* 2: switch */
"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
"\006HASHFILTER\007ETHOFLD",
- "\20\001TOE", /* 4: TOE */
- "\20\001RDDP\002RDMAC", /* 5: RDMA */
+ "\20\001TOE\002SENDPATH", /* 4: TOE */
+ "\20\001RDDP\002RDMAC\003ROCEv2", /* 5: RDMA */
"\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
"\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
"\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
"\007T10DIF"
"\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
"\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */
- "\004TLS_HW",
+ "\004TLS_HW,\005TOE_IPSEC",
"\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
"\004PO_INITIATOR\005PO_TARGET",
+ "\20\001NVMe_TCP", /* 9: NVMe */
};
void
@@ -7605,6 +7946,7 @@ t4_sysctls(struct adapter *sc)
SYSCTL_CAP(nbmcaps, 0, "NBM");
SYSCTL_CAP(linkcaps, 1, "link");
SYSCTL_CAP(switchcaps, 2, "switch");
+ SYSCTL_CAP(nvmecaps, 9, "NVMe");
SYSCTL_CAP(niccaps, 3, "NIC");
SYSCTL_CAP(toecaps, 4, "TCP offload");
SYSCTL_CAP(rdmacaps, 5, "RDMA");
@@ -7623,11 +7965,6 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_loadavg, "A",
- "microprocessor load averages (debug firmwares only)");
-
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
"I", "core Vdd (in mV)");
@@ -7659,81 +7996,7 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cctrl, "A", "congestion control");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_la, "A", "CIM logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
-
- if (chip_id(sc) > CHELSIO_T4) {
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
- "CIM OBQ 6 (SGE0-RX)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
- "CIM OBQ 7 (SGE1-RX)");
- }
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_qcfg, "A", "CIM queue configuration");
+ cim_sysctls(sc, ctx, children);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
@@ -7748,8 +8011,8 @@ t4_sysctls(struct adapter *sc)
sysctl_tid_stats, "A", "tid stats");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_devlog, "A", "firmware's device log");
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, -1,
+ sysctl_devlog, "A", "firmware's device log (all cores)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
@@ -7783,7 +8046,8 @@ t4_sysctls(struct adapter *sc)
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
+ chip_id(sc) >= CHELSIO_T7 ? sysctl_mps_tcam_t7 :
+ (chip_id(sc) >= CHELSIO_T6 ? sysctl_mps_tcam_t6 : sysctl_mps_tcam),
"A", "MPS TCAM entries");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
@@ -7855,6 +8119,14 @@ t4_sysctls(struct adapter *sc)
CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to "
"combine TCB field updates with TLS record work "
"requests.");
+ else {
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "short_records",
+ CTLFLAG_RW, &sc->tlst.short_records, 0,
+ "Use cipher-only mode for short records.");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "partial_ghash",
+ CTLFLAG_RW, &sc->tlst.partial_ghash, 0,
+ "Use partial GHASH for AES-GCM records.");
+ }
}
#endif
@@ -8230,86 +8502,112 @@ cxgbe_sysctls(struct port_info *pi)
&pi->tx_parse_error, 0,
"# of tx packets with invalid length or # of segments");
-#define T4_REGSTAT(name, stat, desc) \
- SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
- CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
- t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
- sysctl_handle_t4_reg64, "QU", desc)
-
-/* We get these from port_stats and they may be stale by up to 1s */
-#define T4_PORTSTAT(name, desc) \
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
- &pi->stats.name, desc)
-
- T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
- T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
- T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
- T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
- T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
- T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
- T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
- T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
- T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
- T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
- T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
- T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
- T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
- T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
- T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
- T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
- T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
-
- T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
- T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
- T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
- T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
- T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
- T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
- T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
+#define T4_LBSTAT(name, stat, desc) do { \
+ if (sc->params.tp.lb_mode) { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, \
+ A_MPS_PORT_STAT_##stat##_L, \
+ sysctl_handle_t4_portstat64, "QU", desc); \
+ } else { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
+ t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
+ sysctl_handle_t4_reg64, "QU", desc); \
+ } \
+} while (0)
+
+ T4_LBSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
+ T4_LBSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
+ T4_LBSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
+ T4_LBSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
+ T4_LBSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
+ T4_LBSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
+ T4_LBSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
+ T4_LBSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
+ T4_LBSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
+ T4_LBSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
+ T4_LBSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
+ T4_LBSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
+ T4_LBSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
+ T4_LBSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
+ T4_LBSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
+ T4_LBSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
+ T4_LBSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
+
+ T4_LBSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
+ T4_LBSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
+ T4_LBSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
+ T4_LBSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
+ T4_LBSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
+ T4_LBSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
+ T4_LBSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
if (is_t6(sc)) {
- T4_PORTSTAT(rx_fcs_err,
+ /* Read from port_stats and may be stale by up to 1s */
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "rx_fcs_err",
+ CTLFLAG_RD, &pi->stats.rx_fcs_err,
"# of frames received with bad FCS since last link up");
} else {
- T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
+ T4_LBSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
"# of frames received with bad FCS");
}
- T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
- T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
- T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
- T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
- T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
- T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
- T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
- T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
- T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
- T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
- T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
- T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
- T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
-
- T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
- T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
- T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
- T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
- T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
- T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
- T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
- T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
+ T4_LBSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
+ T4_LBSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
+ T4_LBSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
+ T4_LBSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
+ T4_LBSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
+ T4_LBSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
+ T4_LBSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
+ T4_LBSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
+ T4_LBSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
+ T4_LBSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
+ T4_LBSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
+ T4_LBSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
+ T4_LBSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
+#undef T4_LBSTAT
+
+#define T4_REGSTAT(name, stat, desc) do { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
+ A_MPS_STAT_##stat##_L, sysctl_handle_t4_reg64, "QU", desc); \
+} while (0)
+ if (pi->mps_bg_map & 1) {
+ T4_REGSTAT(rx_ovflow0, RX_BG_0_MAC_DROP_FRAME,
+ "# drops due to buffer-group 0 overflows");
+ T4_REGSTAT(rx_trunc0, RX_BG_0_MAC_TRUNC_FRAME,
+ "# of buffer-group 0 truncated packets");
+ }
+ if (pi->mps_bg_map & 2) {
+ T4_REGSTAT(rx_ovflow1, RX_BG_1_MAC_DROP_FRAME,
+ "# drops due to buffer-group 1 overflows");
+ T4_REGSTAT(rx_trunc1, RX_BG_1_MAC_TRUNC_FRAME,
+ "# of buffer-group 1 truncated packets");
+ }
+ if (pi->mps_bg_map & 4) {
+ T4_REGSTAT(rx_ovflow2, RX_BG_2_MAC_DROP_FRAME,
+ "# drops due to buffer-group 2 overflows");
+ T4_REGSTAT(rx_trunc2, RX_BG_2_MAC_TRUNC_FRAME,
+ "# of buffer-group 2 truncated packets");
+ }
+ if (pi->mps_bg_map & 8) {
+ T4_REGSTAT(rx_ovflow3, RX_BG_3_MAC_DROP_FRAME,
+ "# drops due to buffer-group 3 overflows");
+ T4_REGSTAT(rx_trunc3, RX_BG_3_MAC_TRUNC_FRAME,
+ "# of buffer-group 3 truncated packets");
+ }
#undef T4_REGSTAT
-#undef T4_PORTSTAT
}
static int
@@ -8452,14 +8750,14 @@ sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
vi->flags |= TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan));
+ V_TXPKT_INTF(pi->hw_port));
if (!(sc->flags & IS_VF))
npkt--;
} else {
vi->flags &= ~TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
}
for_each_txq(vi, i, txq) {
@@ -8669,13 +8967,12 @@ sysctl_link_fec(SYSCTL_HANDLER_ARGS)
struct link_config *lc = &pi->link_cfg;
int rc;
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
if (lc->link_ok)
- sbuf_printf(sb, "%b", lc->fec, bits);
+ sbuf_printf(sb, "%b", lc->fec, t4_fec_bits);
else
sbuf_printf(sb, "no link");
rc = sbuf_finish(sb);
@@ -8695,14 +8992,12 @@ sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
if (req->newptr == NULL) {
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
- "\5RSVD3\6auto\7module";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
- sbuf_printf(sb, "%b", lc->requested_fec, bits);
+ sbuf_printf(sb, "%b", lc->requested_fec, t4_fec_bits);
rc = sbuf_finish(sb);
sbuf_delete(sb);
} else {
@@ -8771,7 +9066,6 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
int rc;
int8_t fec;
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
@@ -8805,7 +9099,7 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
if (fec == 0)
fec = FEC_NONE;
PORT_UNLOCK(pi);
- sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
+ sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, t4_fec_bits);
}
rc = sbuf_finish(sb);
done:
@@ -8913,6 +9207,31 @@ sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_handle_t4_portstat64(SYSCTL_HANDLER_ARGS)
+{
+ struct port_info *pi = arg1;
+ struct adapter *sc = pi->adapter;
+ int rc, i, reg = arg2;
+ uint64_t val;
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ val = 0;
+ for (i = 0; i < sc->params.tp.lb_nchan; i++) {
+ val += t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan + i, reg));
+ }
+ rc = 0;
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc == 0)
+ rc = sysctl_handle_64(oidp, &val, 0, req);
+ return (rc);
+}
+
+static int
sysctl_temperature(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -9012,15 +9331,20 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
int rc;
uint32_t param, val;
+ uint8_t coreid = (uint8_t)arg2;
+
+ KASSERT(coreid < sc->params.ncores,
+ ("%s: bad coreid %u\n", __func__, coreid));
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
if (rc)
return (rc);
- if (hw_all_ok(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD) |
+ V_FW_PARAMS_PARAM_Y(coreid);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
}
end_synchronized_op(sc, 0);
@@ -9086,50 +9410,30 @@ done:
return (rc);
}
-static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
- "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
- "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
- "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
-};
-
static int
-sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
+sysctl_cim_ibq(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, i, n, qid = arg2;
+ int rc, i, n, qid, coreid;
uint32_t *buf, *p;
- char *qtype;
- u_int cim_num_obq = sc->chip_params->cim_num_obq;
- KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
- ("%s: bad qid %d\n", __func__, qid));
+ qid = arg2 & 0xffff;
+ coreid = arg2 >> 16;
- if (qid < CIM_NUM_IBQ) {
- /* inbound queue */
- qtype = "IBQ";
- n = 4 * CIM_IBQ_SIZE;
- buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
- rc = -ENXIO;
- else
- rc = t4_read_cim_ibq(sc, qid, buf, n);
- mtx_unlock(&sc->reg_lock);
- } else {
- /* outbound queue */
- qtype = "OBQ";
- qid -= CIM_NUM_IBQ;
- n = 4 * cim_num_obq * CIM_OBQ_SIZE;
- buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
- rc = -ENXIO;
- else
- rc = t4_read_cim_obq(sc, qid, buf, n);
- mtx_unlock(&sc->reg_lock);
- }
+ KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_ibq,
+ ("%s: bad ibq qid %d\n", __func__, qid));
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+ n = 4 * CIM_IBQ_SIZE;
+ buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_ibq_core(sc, coreid, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
if (rc < 0) {
rc = -rc;
goto done;
@@ -9141,12 +9445,58 @@ sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
rc = ENOMEM;
goto done;
}
-
- sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
for (i = 0, p = buf; i < n; i += 16, p += 4)
sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
p[2], p[3]);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+done:
+ free(buf, M_CXGBE);
+ return (rc);
+}
+
+static int
+sysctl_cim_obq(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct sbuf *sb;
+ int rc, i, n, qid, coreid;
+ uint32_t *buf, *p;
+
+ qid = arg2 & 0xffff;
+ coreid = arg2 >> 16;
+
+ KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_obq,
+ ("%s: bad obq qid %d\n", __func__, qid));
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
+ n = 6 * CIM_OBQ_SIZE * 4;
+ buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_obq_core(sc, coreid, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
+ if (rc < 0) {
+ rc = -rc;
+ goto done;
+ }
+ n = rc * sizeof(uint32_t); /* rc has # of words actually read */
+ rc = sysctl_wire_old_buffer(req, 0);
+ if (rc != 0)
+ goto done;
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
+ if (sb == NULL) {
+ rc = ENOMEM;
+ goto done;
+ }
+ for (i = 0, p = buf; i < n; i += 16, p += 4)
+ sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
+ p[2], p[3]);
rc = sbuf_finish(sb);
sbuf_delete(sb);
done:
@@ -9217,7 +9567,7 @@ sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
}
static int
-sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
+sbuf_cim_la(struct adapter *sc, int coreid, struct sbuf *sb, int flags)
{
uint32_t cfg, *buf;
int rc;
@@ -9232,9 +9582,10 @@ sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
if (hw_off_limits(sc))
rc = ENXIO;
else {
- rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ rc = -t4_cim_read_core(sc, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &cfg);
if (rc == 0)
- rc = -t4_cim_read_la(sc, buf, NULL);
+ rc = -t4_cim_read_la_core(sc, coreid, buf, NULL);
}
mtx_unlock(&sc->reg_lock);
if (rc == 0) {
@@ -9251,6 +9602,7 @@ static int
sysctl_cim_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
+ int coreid = arg2;
struct sbuf *sb;
int rc;
@@ -9258,7 +9610,7 @@ sysctl_cim_la(SYSCTL_HANDLER_ARGS)
if (sb == NULL)
return (ENOMEM);
- rc = sbuf_cim_la(sc, sb, M_WAITOK);
+ rc = sbuf_cim_la(sc, coreid, sb, M_WAITOK);
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -9295,7 +9647,7 @@ dump_cimla(struct adapter *sc)
device_get_nameunit(sc->dev));
return;
}
- rc = sbuf_cim_la(sc, &sb, M_WAITOK);
+ rc = sbuf_cim_la(sc, 0, &sb, M_WAITOK);
if (rc == 0) {
rc = sbuf_finish(&sb);
if (rc == 0) {
@@ -9419,6 +9771,13 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
+ static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
+ "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
+ "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
+ };
+
+ MPASS(chip_id(sc) < CHELSIO_T7);
cim_num_obq = sc->chip_params->cim_num_obq;
if (is_t4(sc)) {
@@ -9471,6 +9830,104 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_cim_qcfg_t7(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ u_int coreid = arg2;
+ struct sbuf *sb;
+ int rc, i;
+ u_int addr;
+ uint16_t base[CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7];
+ uint16_t size[CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7];
+ uint16_t thres[CIM_NUM_IBQ_T7];
+ uint32_t obq_wr[2 * CIM_NUM_OBQ_T7], *wr = obq_wr;
+ uint32_t stat[4 * (CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7)], *p = stat;
+ static const char * const qname_ibq_t7[] = {
+ "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "SGE1", "NC-SI",
+ "RSVD", "IPC1", "IPC2", "IPC3", "IPC4", "IPC5", "IPC6", "IPC7",
+ };
+ static const char * const qname_obq_t7[] = {
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", "SGE0-RX",
+ "RSVD", "RSVD", "IPC1", "IPC2", "IPC3", "IPC4", "IPC5",
+ "IPC6", "IPC7"
+ };
+ static const char * const qname_ibq_sec_t7[] = {
+ "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "RSVD", "RSVD",
+ "RSVD", "IPC0", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD",
+ };
+ static const char * const qname_obq_sec_t7[] = {
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "RSVD", "SGE0-RX",
+ "RSVD", "RSVD", "IPC0", "RSVD", "RSVD", "RSVD", "RSVD",
+ "RSVD", "RSVD",
+ };
+
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = -t4_cim_read_core(sc, 1, coreid,
+ A_T7_UP_IBQ_0_SHADOW_RDADDR, 4 * CIM_NUM_IBQ_T7, stat);
+ if (rc != 0)
+ goto unlock;
+
+ rc = -t4_cim_read_core(sc, 1, coreid,
+ A_T7_UP_OBQ_0_SHADOW_RDADDR, 4 * CIM_NUM_OBQ_T7,
+ &stat[4 * CIM_NUM_IBQ_T7]);
+ if (rc != 0)
+ goto unlock;
+
+ addr = A_T7_UP_OBQ_0_SHADOW_REALADDR;
+ for (i = 0; i < CIM_NUM_OBQ_T7 * 2; i++, addr += 8) {
+ rc = -t4_cim_read_core(sc, 1, coreid, addr, 1,
+ &obq_wr[i]);
+ if (rc != 0)
+ goto unlock;
+ }
+ t4_read_cimq_cfg_core(sc, coreid, base, size, thres);
+ }
+unlock:
+ mtx_unlock(&sc->reg_lock);
+ if (rc)
+ return (rc);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
+ if (sb == NULL)
+ return (ENOMEM);
+
+ sbuf_printf(sb,
+ " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
+
+ for (i = 0; i < CIM_NUM_IBQ_T7; i++, p += 4) {
+ if (!size[i])
+ continue;
+
+ sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
+ coreid == 0 ? qname_ibq_t7[i] : qname_ibq_sec_t7[i],
+ base[i], size[i], thres[i], G_IBQRDADDR(p[0]) & 0xfff,
+ G_IBQWRADDR(p[1]) & 0xfff, G_QUESOPCNT(p[3]),
+ G_QUEEOPCNT(p[3]), G_T7_QUEREMFLITS(p[2]) * 16);
+ }
+
+ for ( ; i < CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7; i++, p += 4, wr += 2) {
+ if (!size[i])
+ continue;
+
+ sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u",
+ coreid == 0 ? qname_obq_t7[i - CIM_NUM_IBQ_T7] :
+ qname_obq_sec_t7[i - CIM_NUM_IBQ_T7],
+ base[i], size[i], G_QUERDADDR(p[0]) & 0xfff,
+ wr[0] << 1, G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
+ G_T7_QUEREMFLITS(p[2]) * 16);
+ }
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+}
+
+static int
sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -9612,18 +10069,25 @@ static const char * const devlog_facility_strings[] = {
};
static int
-sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
+sbuf_devlog(struct adapter *sc, int coreid, struct sbuf *sb, int flags)
{
int i, j, rc, nentries, first = 0;
struct devlog_params *dparams = &sc->params.devlog;
struct fw_devlog_e *buf, *e;
+ uint32_t addr, size;
uint64_t ftstamp = UINT64_MAX;
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
if (dparams->addr == 0)
return (ENXIO);
+ size = dparams->size / sc->params.ncores;
+ addr = dparams->addr + coreid * size;
+
MPASS(flags == M_WAITOK || flags == M_NOWAIT);
- buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
+ buf = malloc(size, M_CXGBE, M_ZERO | flags);
if (buf == NULL)
return (ENOMEM);
@@ -9631,13 +10095,12 @@ sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
if (hw_off_limits(sc))
rc = ENXIO;
else
- rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
- dparams->size);
+ rc = read_via_memwin(sc, 1, addr, (void *)buf, size);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
- nentries = dparams->size / sizeof(struct fw_devlog_e);
+ nentries = size / sizeof(struct fw_devlog_e);
for (i = 0; i < nentries; i++) {
e = &buf[i];
@@ -9689,14 +10152,24 @@ static int
sysctl_devlog(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- int rc;
+ int rc, i, coreid = arg2;
struct sbuf *sb;
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
-
- rc = sbuf_devlog(sc, sb, M_WAITOK);
+ if (coreid == -1) {
+ /* -1 means all cores */
+ for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) {
+ if (sc->params.ncores > 0)
+ sbuf_printf(sb, "=== CIM core %u ===\n", i);
+ rc = sbuf_devlog(sc, i, sb, M_WAITOK);
+ }
+ } else {
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+ rc = sbuf_devlog(sc, coreid, sb, M_WAITOK);
+ }
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -9706,7 +10179,7 @@ sysctl_devlog(SYSCTL_HANDLER_ARGS)
static void
dump_devlog(struct adapter *sc)
{
- int rc;
+ int rc, i;
struct sbuf sb;
if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
@@ -9714,13 +10187,15 @@ dump_devlog(struct adapter *sc)
device_get_nameunit(sc->dev));
return;
}
- rc = sbuf_devlog(sc, &sb, M_WAITOK);
+ for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) {
+ if (sc->params.ncores > 0)
+ sbuf_printf(&sb, "=== CIM core %u ===\n", i);
+ rc = sbuf_devlog(sc, i, &sb, M_WAITOK);
+ }
if (rc == 0) {
- rc = sbuf_finish(&sb);
- if (rc == 0) {
- log(LOG_DEBUG, "%s: device log follows.\n%s",
- device_get_nameunit(sc->dev), sbuf_data(&sb));
- }
+ sbuf_finish(&sb);
+ log(LOG_DEBUG, "%s: device log follows.\n%s",
+ device_get_nameunit(sc->dev), sbuf_data(&sb));
}
sbuf_delete(&sb);
}
@@ -9909,16 +10384,16 @@ sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
}
struct mem_desc {
- u_int base;
- u_int limit;
+ uint64_t base;
+ uint64_t limit;
u_int idx;
};
static int
mem_desc_cmp(const void *a, const void *b)
{
- const u_int v1 = ((const struct mem_desc *)a)->base;
- const u_int v2 = ((const struct mem_desc *)b)->base;
+ const uint64_t v1 = ((const struct mem_desc *)a)->base;
+ const uint64_t v2 = ((const struct mem_desc *)b)->base;
if (v1 < v2)
return (-1);
@@ -9929,10 +10404,9 @@ mem_desc_cmp(const void *a, const void *b)
}
static void
-mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
- unsigned int to)
+mem_region_show(struct sbuf *sb, const char *name, uint64_t from, uint64_t to)
{
- unsigned int size;
+ uintmax_t size;
if (from == to)
return;
@@ -9941,8 +10415,12 @@ mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
if (size == 0)
return;
- /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
- sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
+ if (from > UINT32_MAX || to > UINT32_MAX)
+ sbuf_printf(sb, "%-18s 0x%012jx-0x%012jx [%ju]\n", name,
+ (uintmax_t)from, (uintmax_t)to, size);
+ else
+ sbuf_printf(sb, "%-18s 0x%08jx-0x%08jx [%ju]\n", name,
+ (uintmax_t)from, (uintmax_t)to, size);
}
static int
@@ -9950,7 +10428,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, i, n;
+ int rc, i, n, nchan;
uint32_t lo, hi, used, free, alloc;
static const char *memory[] = {
"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"
@@ -9961,12 +10439,14 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
"RQUDP region:", "PBL region:", "TXPBL region:",
- "TLSKey region:", "DBVFIFO region:", "ULPRX state:",
- "ULPTX state:", "On-chip queues:",
+ "TLSKey region:", "RRQ region:", "NVMe STAG region:",
+ "NVMe RQ region:", "NVMe RXPBL region:", "NVMe TPT region:",
+ "NVMe TXPBL region:", "DBVFIFO region:", "ULPRX state:",
+ "ULPTX state:", "RoCE RRQ region:", "On-chip queues:",
};
struct mem_desc avail[4];
struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
- struct mem_desc *md = mem;
+ struct mem_desc *md;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
@@ -9992,36 +10472,91 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
if (lo & F_EDRAM0_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
- avail[i].base = G_EDRAM0_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EDRAM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EDRAM0_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EDRAM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EDRAM0_SIZE(hi) << 20);
+ }
avail[i].idx = 0;
i++;
}
if (lo & F_EDRAM1_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
- avail[i].base = G_EDRAM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EDRAM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EDRAM1_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EDRAM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EDRAM1_SIZE(hi) << 20);
+ }
avail[i].idx = 1;
i++;
}
if (lo & F_EXT_MEM_ENABLE) {
- hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
- avail[i].base = G_EXT_MEM_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
- avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ case CHELSIO_T6:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
+ avail[i].base = (uint64_t)G_EXT_MEM_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM_SIZE(hi) << 20);
+ avail[i].idx = 2;
+ break;
+ case CHELSIO_T5:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY0_BAR);
+ avail[i].base = (uint64_t)G_EXT_MEM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM0_SIZE(hi) << 20);
+ avail[i].idx = 3; /* Call it MC0 for T5 */
+ break;
+ default:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY0_BAR);
+ avail[i].base = (uint64_t)G_T7_EXT_MEM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EXT_MEM0_SIZE(hi) << 20);
+ avail[i].idx = 3; /* Call it MC0 for T7+ */
+ break;
+ }
i++;
}
- if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
+ if (lo & F_EXT_MEM1_ENABLE && !(lo & F_MC_SPLIT)) {
+ /* Only T5 and T7+ have 2 MCs. */
+ MPASS(is_t5(sc) || chip_id(sc) >= CHELSIO_T7);
+
hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
- avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EXT_MEM1_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ }
avail[i].idx = 4;
i++;
}
- if (is_t6(sc) && lo & F_HMA_MUX) {
- hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
- avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
+ if (lo & F_HMA_MUX) {
+ /* Only T6+ have HMA. */
+ MPASS(chip_id(sc) >= CHELSIO_T6);
+
+ if (chip_id(sc) >= CHELSIO_T7) {
+ hi = t4_read_reg(sc, A_MA_HOST_MEMORY_BAR);
+ avail[i].base = (uint64_t)G_HMATARGETBASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_HMA_SIZE(hi) << 20);
+ } else {
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
+ avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ }
avail[i].idx = 5;
i++;
}
@@ -10030,6 +10565,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
goto done;
qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
+ md = &mem[0];
(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
@@ -10065,22 +10601,52 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
}
md++;
-#define ulp_region(reg) \
- md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
- (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
+#define ulp_region(reg) do {\
+ const u_int shift = chip_id(sc) >= CHELSIO_T7 ? 4 : 0; \
+ md->base = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT) << shift; \
+ md->limit = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) << shift; \
+ md->limit += (1 << shift) - 1; \
+ md++; \
+ } while (0)
+
+#define hide_ulp_region() do { \
+ md->base = 0; \
+ md->idx = nitems(region); \
+ md++; \
+ } while (0)
ulp_region(RX_ISCSI);
ulp_region(RX_TDDP);
ulp_region(TX_TPT);
ulp_region(RX_STAG);
ulp_region(RX_RQ);
- ulp_region(RX_RQUDP);
+ if (chip_id(sc) < CHELSIO_T7)
+ ulp_region(RX_RQUDP);
+ else
+ hide_ulp_region();
ulp_region(RX_PBL);
ulp_region(TX_PBL);
- if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
+ if (chip_id(sc) >= CHELSIO_T6)
ulp_region(RX_TLS_KEY);
+ else
+ hide_ulp_region();
+ if (chip_id(sc) >= CHELSIO_T7) {
+ ulp_region(RX_RRQ);
+ ulp_region(RX_NVME_TCP_STAG);
+ ulp_region(RX_NVME_TCP_RQ);
+ ulp_region(RX_NVME_TCP_PBL);
+ ulp_region(TX_NVME_TCP_TPT);
+ ulp_region(TX_NVME_TCP_PBL);
+ } else {
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
}
#undef ulp_region
+#undef hide_ulp_region
md->base = 0;
if (is_t4(sc))
@@ -10111,6 +10677,15 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
md->limit = 0;
md++;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ t4_tp_pio_read(sc, &lo, 1, A_TP_ROCE_RRQ_BASE, false);
+ md->base = lo;
+ } else {
+ md->base = 0;
+ md->idx = nitems(region);
+ }
+ md++;
+
md->base = sc->vres.ocq.start;
if (sc->vres.ocq.size)
md->limit = md->base + sc->vres.ocq.size - 1;
@@ -10143,31 +10718,41 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
mem[i].limit);
}
- sbuf_printf(sb, "\n");
lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
- mem_region_show(sb, "uP RAM:", lo, hi);
+ if (hi != lo - 1) {
+ sbuf_printf(sb, "\n");
+ mem_region_show(sb, "uP RAM:", lo, hi);
+ }
lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
- mem_region_show(sb, "uP Extmem2:", lo, hi);
+ if (hi != lo - 1)
+ mem_region_show(sb, "uP Extmem2:", lo, hi);
lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
- for (i = 0, free = 0; i < 2; i++)
+ if (chip_id(sc) >= CHELSIO_T7)
+ nchan = 1 << G_T7_PMRXNUMCHN(lo);
+ else
+ nchan = lo & F_PMRXNUMCHN ? 2 : 1;
+ for (i = 0, free = 0; i < nchan; i++)
free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT));
sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
G_PMRXMAXPAGE(lo), free,
- t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
- (lo & F_PMRXNUMCHN) ? 2 : 1);
+ t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, nchan);
lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
- for (i = 0, free = 0; i < 4; i++)
+ if (chip_id(sc) >= CHELSIO_T7)
+ nchan = 1 << G_T7_PMTXNUMCHN(lo);
+ else
+ nchan = 1 << G_PMTXNUMCHN(lo);
+ for (i = 0, free = 0; i < nchan; i++)
free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT));
sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
G_PMTXMAXPAGE(lo), free,
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
- hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
+ hi >= (1 << 20) ? 'M' : 'K', nchan);
sbuf_printf(sb, "%u p-structs (%u free)\n",
t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT),
G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT)));
@@ -10184,7 +10769,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
- /* For T6 these are MAC buffer groups */
+ /* For T6+ these are MAC buffer groups */
sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
i, used, alloc);
}
@@ -10200,7 +10785,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
- /* For T6 these are MAC buffer groups */
+ /* For T6+ these are MAC buffer groups */
sbuf_printf(sb,
"\nLoopback %d using %u pages out of %u allocated",
i, used, alloc);
@@ -10329,7 +10914,7 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
int rc, i;
- MPASS(chip_id(sc) > CHELSIO_T5);
+ MPASS(chip_id(sc) == CHELSIO_T6);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
@@ -10338,7 +10923,7 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
" IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
" Replication"
- " P0 P1 P2 P3 ML\n");
+ " P0 P1 P2 P3 ML");
rc = 0;
for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
@@ -10503,6 +11088,206 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_mps_tcam_t7(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct sbuf *sb;
+ int rc, i;
+
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+ if (sb == NULL)
+ return (ENOMEM);
+
+ sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
+ " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
+ " Replication"
+ " P0 P1 P2 P3 ML");
+
+ rc = 0;
+ for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
+ uint8_t dip_hit, vlan_vld, lookup_type, port_num;
+ uint16_t ivlan;
+ uint64_t tcamx, tcamy, val, mask;
+ uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
+ uint8_t addr[ETHER_ADDR_LEN];
+
+ /* Read tcamy */
+ ctl = (V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+ if (chip_rev(sc) == 0) {
+ if (i < 256)
+ ctl |= V_CTLTCAMINDEX(i) | V_T7_CTLTCAMSEL(0);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 256) | V_T7_CTLTCAMSEL(1);
+ } else {
+#if 0
+ ctl = (V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+#endif
+ if (i < 512)
+ ctl |= V_CTLTCAMINDEX(i) | V_T7_CTLTCAMSEL(0);
+ else if (i < 1024)
+ ctl |= V_CTLTCAMINDEX(i - 512) | V_T7_CTLTCAMSEL(1);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 1024) | V_T7_CTLTCAMSEL(2);
+ }
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA1_REQ_ID1);
+ tcamy = G_DMACH(val) << 32;
+ tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ lookup_type = G_DATALKPTYPE(data2);
+ port_num = G_DATAPORTNUM(data2);
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ /* Inner header VNI */
+ vniy = (((data2 & F_DATAVIDH2) |
+ G_DATAVIDH1(data2)) << 16) | G_VIDL(val);
+ dip_hit = data2 & F_DATADIPHIT;
+ vlan_vld = 0;
+ } else {
+ vniy = 0;
+ dip_hit = 0;
+ vlan_vld = data2 & F_DATAVIDH2;
+ ivlan = G_VIDL(val);
+ }
+
+ ctl |= V_CTLXYBITSEL(1);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA1_REQ_ID1);
+ tcamx = G_DMACH(val) << 32;
+ tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ /* Inner header VNI mask */
+ vnix = (((data2 & F_DATAVIDH2) |
+ G_DATAVIDH1(data2)) << 16) | G_VIDL(val);
+ } else
+ vnix = 0;
+
+ if (tcamx & tcamy)
+ continue;
+ tcamxy2valmask(tcamx, tcamy, addr, &mask);
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ if (chip_rev(sc) == 0) {
+ cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
+ cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+ } else {
+ t4_write_reg(sc, A_MPS_CLS_SRAM_H,
+ V_SRAMWRN(0) | V_SRAMINDEX(i));
+ cls_lo = t4_read_reg(sc, A_MPS_CLS_SRAM_L);
+ cls_hi = t4_read_reg(sc, A_MPS_CLS_SRAM_H);
+ }
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012jx %06x %06x - - %3c"
+ " I %4x %3c %#x%4u%4d", i, addr[0],
+ addr[1], addr[2], addr[3], addr[4], addr[5],
+ (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
+ port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+ G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+ cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+ } else {
+ sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012jx - - ", i, addr[0], addr[1],
+ addr[2], addr[3], addr[4], addr[5],
+ (uintmax_t)mask);
+
+ if (vlan_vld)
+ sbuf_printf(sb, "%4u Y ", ivlan);
+ else
+ sbuf_printf(sb, " - N ");
+
+ sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
+ lookup_type ? 'I' : 'O', port_num,
+ cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+ G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+ cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+ }
+
+ if (cls_lo & F_T6_REPLICATE) {
+ struct fw_ldst_cmd ldst_cmd;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
+ ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.mps.rplc.fid_idx =
+ htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
+ V_FW_LDST_CMD_IDX(i));
+
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
+ "t6mps");
+ if (rc)
+ break;
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+ end_synchronized_op(sc, 0);
+ if (rc != 0)
+ break;
+ else {
+ sbuf_printf(sb, " %08x %08x %08x %08x"
+ " %08x %08x %08x %08x",
+ be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
+ be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
+ be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
+ be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
+ be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
+ be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
+ be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
+ be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
+ }
+ } else
+ sbuf_printf(sb, "%72s", "");
+
+ sbuf_printf(sb, "%4u%3u%3u%3u %#x",
+ G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
+ G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
+ (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
+ }
+
+ if (rc)
+ (void) sbuf_finish(sb);
+ else
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+
+ return (rc);
+}
+
+static int
sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -10543,6 +11328,7 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
int rc, i;
uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
+ uint32_t stats[T7_PM_RX_CACHE_NSTATS];
static const char *tx_stats[MAX_PM_NSTATS] = {
"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
"Tx FIFO wait", NULL, "Tx latency"
@@ -10559,12 +11345,14 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
else {
t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
+ if (chip_id(sc) >= CHELSIO_T7)
+ t4_pmrx_cache_get_stats(sc, stats);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
- sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -10599,6 +11387,61 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
rx_cyc[i]);
}
+ if (chip_id(sc) >= CHELSIO_T7) {
+ i = 0;
+ sbuf_printf(sb, "\n\nPM RX Cache Stats\n");
+ sbuf_printf(sb, "%-40s %u\n", "ReqWrite", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "ReqReadInv", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "ReqReadNoInv", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Split Request",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Split (Read Invalidate)", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Feedback Read Split (Read NoInvalidate)",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Hit", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Feedback Read Hit",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full UnAvail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Hit Partial Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full UnAvail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Partial Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Full Free",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Part-avail Mul-Regions",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "FB Read Part-avail Mul-Regions",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Miss FL Used",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Miss LRU Used",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Write Miss LRU-Multiple Evict", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Write Hit Increasing Islands", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Island Read split", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Overflow Eviction",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u", "Read Overflow Eviction",
+ stats[i++]);
+ }
+
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -11609,15 +12452,17 @@ sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
#endif
static int
-get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
+get_sge_context(struct adapter *sc, int mem_id, uint32_t cid, int len,
+ uint32_t *data)
{
int rc;
- if (cntxt->cid > M_CTXTQID)
+ if (len < sc->chip_params->sge_ctxt_size)
+ return (ENOBUFS);
+ if (cid > M_CTXTQID)
return (EINVAL);
-
- if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
- cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
+ if (mem_id != CTXT_EGRESS && mem_id != CTXT_INGRESS &&
+ mem_id != CTXT_FLM && mem_id != CTXT_CNM)
return (EINVAL);
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
@@ -11630,8 +12475,7 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
}
if (sc->flags & FW_OK) {
- rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
- &cntxt->data[0]);
+ rc = -t4_sge_ctxt_rd(sc, sc->mbox, cid, mem_id, data);
if (rc == 0)
goto done;
}
@@ -11640,7 +12484,7 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
* Read via firmware failed or wasn't even attempted. Read directly via
* the backdoor.
*/
- rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
+ rc = -t4_sge_ctxt_rd_bd(sc, cid, mem_id, data);
done:
end_synchronized_op(sc, 0);
return (rc);
@@ -12048,10 +12892,11 @@ clear_stats(struct adapter *sc, u_int port_id)
mtx_lock(&sc->reg_lock);
if (!hw_off_limits(sc)) {
/* MAC stats */
- t4_clr_port_stats(sc, pi->tx_chan);
+ t4_clr_port_stats(sc, pi->hw_port);
if (is_t6(sc)) {
if (pi->fcs_reg != -1)
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ pi->fcs_base = t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan, pi->fcs_reg));
else
pi->stats.rx_fcs_err = 0;
}
@@ -12114,12 +12959,21 @@ clear_stats(struct adapter *sc, u_int port_id)
txq->kern_tls_full = 0;
txq->kern_tls_octets = 0;
txq->kern_tls_waste = 0;
- txq->kern_tls_options = 0;
txq->kern_tls_header = 0;
- txq->kern_tls_fin = 0;
txq->kern_tls_fin_short = 0;
txq->kern_tls_cbc = 0;
txq->kern_tls_gcm = 0;
+ if (is_t6(sc)) {
+ txq->kern_tls_options = 0;
+ txq->kern_tls_fin = 0;
+ } else {
+ txq->kern_tls_ghash_received = 0;
+ txq->kern_tls_ghash_requested = 0;
+ txq->kern_tls_lso = 0;
+ txq->kern_tls_partial_ghash = 0;
+ txq->kern_tls_splitmode = 0;
+ txq->kern_tls_trailer = 0;
+ }
mp_ring_reset_stats(txq->r);
}
@@ -12264,14 +13118,12 @@ t4_os_link_changed(struct port_info *pi)
if (is_t6(sc)) {
if (lc->link_ok) {
if (lc->speed > 25000 ||
- (lc->speed == 25000 && lc->fec == FEC_RS)) {
- pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
- A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS);
- } else {
- pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
- A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS);
- }
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ (lc->speed == 25000 && lc->fec == FEC_RS))
+ pi->fcs_reg = A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS;
+ else
+ pi->fcs_reg = A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS;
+ pi->fcs_base = t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan, pi->fcs_reg));
pi->stats.rx_fcs_err = 0;
} else {
pi->fcs_reg = -1;
@@ -12404,9 +13256,13 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
case CHELSIO_T4_DEL_FILTER:
rc = del_filter(sc, (struct t4_filter *)data);
break;
- case CHELSIO_T4_GET_SGE_CONTEXT:
- rc = get_sge_context(sc, (struct t4_sge_context *)data);
+ case CHELSIO_T4_GET_SGE_CONTEXT: {
+ struct t4_sge_context *ctxt = (struct t4_sge_context *)data;
+
+ rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid,
+ sizeof(ctxt->data), &ctxt->data[0]);
break;
+ }
case CHELSIO_T4_LOAD_FW:
rc = load_fw(sc, (struct t4_data *)data);
break;
@@ -12452,6 +13308,13 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
case CHELSIO_T4_RELEASE_CLIP_ADDR:
rc = release_clip_addr(sc, (struct t4_clip_addr *)data);
break;
+ case CHELSIO_T4_GET_SGE_CTXT: {
+ struct t4_sge_ctxt *ctxt = (struct t4_sge_ctxt *)data;
+
+ rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid,
+ sizeof(ctxt->data), &ctxt->data[0]);
+ break;
+ }
default:
rc = ENOTTY;
}
@@ -12898,7 +13761,9 @@ t4_dump_mem(struct adapter *sc, u_int addr, u_int len)
{
uint32_t base, j, off, pf, reg, save, win_pos;
- reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
+ reg = chip_id(sc) > CHELSIO_T6 ?
+ PCIE_MEM_ACCESS_T7_REG(A_PCIE_MEM_ACCESS_OFFSET0, 2) :
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
save = t4_read_reg(sc, reg);
base = sc->memwin[2].mw_base;
@@ -12910,6 +13775,8 @@ t4_dump_mem(struct adapter *sc, u_int addr, u_int len)
win_pos = addr & ~0x7f; /* start must be 128B aligned */
}
off = addr - win_pos;
+ if (chip_id(sc) > CHELSIO_T6)
+ win_pos >>= X_T7_MEMOFST_SHIFT;
t4_write_reg(sc, reg, win_pos | pf);
t4_read_reg(sc, reg);
@@ -13274,6 +14141,7 @@ mod_event(module_t mod, int cmd, void *arg)
#endif
#ifdef KERN_TLS
t6_ktls_modload();
+ t7_ktls_modload();
#endif
t4_tracer_modload();
tweak_tunables();
@@ -13337,6 +14205,7 @@ mod_event(module_t mod, int cmd, void *arg)
vxlan_stop_evtag);
t4_tracer_modunload();
#ifdef KERN_TLS
+ t7_ktls_modunload();
t6_ktls_modunload();
#endif
#ifdef INET6
@@ -13383,6 +14252,14 @@ MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
+DRIVER_MODULE(chnex, pci, ch_driver, mod_event, 0);
+MODULE_VERSION(chnex, 1);
+MODULE_DEPEND(chnex, crypto, 1, 1, 1);
+MODULE_DEPEND(chnex, firmware, 1, 1, 1);
+#ifdef DEV_NETMAP
+MODULE_DEPEND(chnex, netmap, 1, 1, 1);
+#endif /* DEV_NETMAP */
+
DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0);
MODULE_VERSION(cxgbe, 1);
@@ -13392,6 +14269,9 @@ MODULE_VERSION(cxl, 1);
DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0);
MODULE_VERSION(cc, 1);
+DRIVER_MODULE(che, chnex, che_driver, 0, 0);
+MODULE_VERSION(che, 1);
+
DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0);
MODULE_VERSION(vcxgbe, 1);
@@ -13400,3 +14280,6 @@ MODULE_VERSION(vcxl, 1);
DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0);
MODULE_VERSION(vcc, 1);
+
+DRIVER_MODULE(vche, che, vche_driver, 0, 0);
+MODULE_VERSION(vche, 1);
diff --git a/sys/dev/cxgbe/t4_mp_ring.c b/sys/dev/cxgbe/t4_mp_ring.c
index 531fd356728e..916c363a0c2a 100644
--- a/sys/dev/cxgbe/t4_mp_ring.c
+++ b/sys/dev/cxgbe/t4_mp_ring.c
@@ -305,7 +305,6 @@ failed:
}
void
-
mp_ring_free(struct mp_ring *r)
{
int i;
@@ -472,6 +471,86 @@ mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
return (0);
}
+/*
+ * Enqueue n items but never drain the ring. Can be called
+ * to enqueue new items while draining the ring.
+ *
+ * Returns an errno.
+ */
+int
+mp_ring_enqueue_only(struct mp_ring *r, void **items, int n)
+{
+ union ring_state os, ns;
+ uint16_t pidx_start, pidx_stop;
+ int i;
+
+ MPASS(items != NULL);
+ MPASS(n > 0);
+
+ /*
+ * Reserve room for the new items. Our reservation, if successful, is
+ * from 'pidx_start' to 'pidx_stop'.
+ */
+ os.state = atomic_load_64(&r->state);
+
+ /* Should only be used from the drain callback. */
+ MPASS(os.flags == BUSY || os.flags == TOO_BUSY ||
+ os.flags == TAKING_OVER);
+
+ for (;;) {
+ if (__predict_false(space_available(r, os) < n)) {
+ /* Not enough room in the ring. */
+ counter_u64_add(r->dropped, n);
+ return (ENOBUFS);
+ }
+
+ /* There is room in the ring. */
+
+ ns.state = os.state;
+ ns.pidx_head = increment_idx(r, os.pidx_head, n);
+ critical_enter();
+ if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
+ break;
+ critical_exit();
+ cpu_spinwait();
+ };
+
+ pidx_start = os.pidx_head;
+ pidx_stop = ns.pidx_head;
+
+ /*
+ * Wait for other producers who got in ahead of us to enqueue their
+ * items, one producer at a time. It is our turn when the ring's
+ * pidx_tail reaches the beginning of our reservation (pidx_start).
+ */
+ while (ns.pidx_tail != pidx_start) {
+ cpu_spinwait();
+ ns.state = atomic_load_64(&r->state);
+ }
+
+ /* Now it is our turn to fill up the area we reserved earlier. */
+ i = pidx_start;
+ do {
+ r->items[i] = *items++;
+ if (__predict_false(++i == r->size))
+ i = 0;
+ } while (i != pidx_stop);
+
+ /*
+ * Update the ring's pidx_tail. The release style atomic guarantees
+ * that the items are visible to any thread that sees the updated pidx.
+ */
+ os.state = atomic_load_64(&r->state);
+ do {
+ ns.state = os.state;
+ ns.pidx_tail = pidx_stop;
+ } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
+ critical_exit();
+
+ counter_u64_add(r->not_consumer, 1);
+ return (0);
+}
+
void
mp_ring_check_drainage(struct mp_ring *r, int budget)
{
diff --git a/sys/dev/cxgbe/t4_mp_ring.h b/sys/dev/cxgbe/t4_mp_ring.h
index 949174b9056d..07b15906cd43 100644
--- a/sys/dev/cxgbe/t4_mp_ring.h
+++ b/sys/dev/cxgbe/t4_mp_ring.h
@@ -62,6 +62,7 @@ int mp_ring_alloc(struct mp_ring **, int, void *, ring_drain_t,
ring_can_drain_t, struct malloc_type *, struct mtx *, int);
void mp_ring_free(struct mp_ring *);
int mp_ring_enqueue(struct mp_ring *, void **, int, int);
+int mp_ring_enqueue_only(struct mp_ring *, void **, int);
void mp_ring_check_drainage(struct mp_ring *, int);
void mp_ring_reset_stats(struct mp_ring *);
bool mp_ring_is_idle(struct mp_ring *);
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index e53fb5054316..0135bec6e2c1 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -232,7 +232,7 @@ alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx)
nm_txq->nid = idx;
nm_txq->iqidx = iqidx;
nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0))
nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
@@ -276,7 +276,7 @@ free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
static int
alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, cong_map;
__be32 v;
struct adapter *sc = vi->adapter;
struct port_info *pi = vi->pi;
@@ -284,7 +284,6 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
struct netmap_adapter *na = NA(vi->ifp);
struct fw_iq_cmd c;
const int cong_drop = nm_cong_drop;
- const int cong_map = pi->rx_e_chan_map;
MPASS(na != NULL);
MPASS(nm_rxq->iq_desc != NULL);
@@ -314,13 +313,17 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
c.iqsize = htobe16(vi->qsize_rxq);
c.iqaddr = htobe64(nm_rxq->iq_ba);
if (cong_drop != -1) {
+ if (chip_id(sc) >= CHELSIO_T7)
+ cong_map = 1 << pi->hw_port;
+ else
+ cong_map = pi->rx_e_chan_map;
c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) | F_FW_IQ_CMD_FL0CONGCIF |
F_FW_IQ_CMD_FL0CONGEN);
@@ -421,15 +424,19 @@ alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
V_FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
- if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
- c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC);
- else
+ if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) {
+ const int core = sc->params.ncores > 1 ?
+ nm_txq->nid % sc->params.ncores : 0;
+
+ c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ V_FW_EQ_ETH_CMD_COREGROUP(core));
+ } else
c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
diff --git a/sys/dev/cxgbe/t4_sched.c b/sys/dev/cxgbe/t4_sched.c
index 2186c8aa2ac0..65c2720d692c 100644
--- a/sys/dev/cxgbe/t4_sched.c
+++ b/sys/dev/cxgbe/t4_sched.c
@@ -272,7 +272,7 @@ update_tx_sched(void *context, int pending)
}
rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
FW_SCHED_PARAMS_LEVEL_CL_RL, tc->mode, tc->rateunit,
- tc->ratemode, pi->tx_chan, j, 0, tc->maxrate, 0,
+ tc->ratemode, pi->hw_port, j, 0, tc->maxrate, 0,
tc->pktsize, tc->burstsize, 1);
end_synchronized_op(sc, 0);
@@ -291,7 +291,7 @@ update_tx_sched(void *context, int pending)
"params: mode %d, rateunit %d, ratemode %d, "
"channel %d, minrate %d, maxrate %d, pktsize %d, "
"burstsize %d\n", j, rc, tc->mode, tc->rateunit,
- tc->ratemode, pi->tx_chan, 0, tc->maxrate,
+ tc->ratemode, pi->hw_port, 0, tc->maxrate,
tc->pktsize, tc->burstsize);
}
}
@@ -839,7 +839,7 @@ failed:
cst->tx_total = cst->tx_credits;
cst->plen = 0;
cst->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
/*
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 86454bc4fe10..2f9cb1a4ebb5 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -259,17 +258,20 @@ static void free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
static void add_ofld_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
struct sge_ofld_rxq *);
#endif
-static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
-static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
+static int ctrl_eq_alloc(struct adapter *, struct sge_eq *, int);
+static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
-static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
+static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
#endif
static int alloc_eq(struct adapter *, struct sge_eq *, struct sysctl_ctx_list *,
struct sysctl_oid *);
static void free_eq(struct adapter *, struct sge_eq *);
static void add_eq_sysctls(struct adapter *, struct sysctl_ctx_list *,
struct sysctl_oid *, struct sge_eq *);
-static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
+static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
static int free_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
struct sysctl_ctx_list *, struct sysctl_oid *);
@@ -348,6 +350,7 @@ cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES];
cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES];
cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES];
cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES];
+cpl_handler_t fw6_pld_handlers[NUM_CPL_FW6_COOKIES];
void
t4_register_an_handler(an_handler_t h)
@@ -477,6 +480,21 @@ fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (fw4_ack_handlers[cookie](iq, rss, m));
}
+static int
+fw6_pld_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+ const struct cpl_fw6_pld *cpl;
+ uint64_t cookie;
+
+ if (m != NULL)
+ cpl = mtod(m, const void *);
+ else
+ cpl = (const void *)(rss + 1);
+ cookie = be64toh(cpl->data[1]) & CPL_FW6_COOKIE_MASK;
+
+ return (fw6_pld_handlers[cookie](iq, rss, m));
+}
+
static void
t4_init_shared_cpl_handlers(void)
{
@@ -486,6 +504,7 @@ t4_init_shared_cpl_handlers(void)
t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler);
t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler);
t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler);
+ t4_register_cpl_handler(CPL_FW6_PLD, fw6_pld_handler);
}
void
@@ -494,8 +513,12 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
uintptr_t *loc;
MPASS(opcode < nitems(t4_cpl_handler));
- MPASS(cookie > CPL_COOKIE_RESERVED);
- MPASS(cookie < NUM_CPL_COOKIES);
+ if (opcode == CPL_FW6_PLD) {
+ MPASS(cookie < NUM_CPL_FW6_COOKIES);
+ } else {
+ MPASS(cookie > CPL_COOKIE_RESERVED);
+ MPASS(cookie < NUM_CPL_COOKIES);
+ }
MPASS(t4_cpl_handler[opcode] != NULL);
switch (opcode) {
@@ -514,6 +537,9 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
case CPL_FW4_ACK:
loc = (uintptr_t *)&fw4_ack_handlers[cookie];
break;
+ case CPL_FW6_PLD:
+ loc = (uintptr_t *)&fw6_pld_handlers[cookie];
+ break;
default:
MPASS(0);
return;
@@ -1064,9 +1090,9 @@ t4_setup_adapter_queues(struct adapter *sc)
*/
/*
- * Control queues, one per port.
+ * Control queues. At least one per port and per internal core.
*/
- for_each_port(sc, i) {
+ for (i = 0; i < sc->sge.nctrlq; i++) {
rc = alloc_ctrlq(sc, i);
if (rc != 0)
return (rc);
@@ -1087,7 +1113,7 @@ t4_teardown_adapter_queues(struct adapter *sc)
if (sc->sge.ctrlq != NULL) {
MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */
- for_each_port(sc, i)
+ for (i = 0; i < sc->sge.nctrlq; i++)
free_ctrlq(sc, i);
}
free_fwq(sc);
@@ -2701,9 +2727,14 @@ restart:
#endif
#ifdef KERN_TLS
if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) {
+ struct vi_info *vi = if_getsoftc(mst->ifp);
+
cflags |= MC_TLS;
set_mbuf_cflags(m0, cflags);
- rc = t6_ktls_parse_pkt(m0);
+ if (is_t6(vi->pi->adapter))
+ rc = t6_ktls_parse_pkt(m0);
+ else
+ rc = t7_ktls_parse_pkt(m0);
if (rc != 0)
goto fail;
return (EINPROGRESS);
@@ -3273,7 +3304,10 @@ skip_coalescing:
#ifdef KERN_TLS
} else if (mbuf_cflags(m0) & MC_TLS) {
ETHER_BPF_MTAP(ifp, m0);
- n = t6_ktls_write_wr(txq, wr, m0, avail);
+ if (is_t6(sc))
+ n = t6_ktls_write_wr(txq, wr, m0, avail);
+ else
+ n = t7_ktls_write_wr(txq, wr, m0, avail);
#endif
} else {
ETHER_BPF_MTAP(ifp, m0);
@@ -3414,6 +3448,7 @@ init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
eq->type = eqtype;
eq->port_id = port_id;
eq->tx_chan = sc->port[port_id]->tx_chan;
+ eq->hw_port = sc->port[port_id]->hw_port;
eq->iq = iq;
eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
strlcpy(eq->lockname, name, sizeof(eq->lockname));
@@ -3577,7 +3612,7 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
@@ -3585,7 +3620,13 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
c.iqaddr = htobe64(iq->ba);
c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype));
if (iq->cong_drop != -1) {
- cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0;
+ if (iq->qtype == IQ_ETH) {
+ if (chip_id(sc) >= CHELSIO_T7)
+ cong_map = 1 << pi->hw_port;
+ else
+ cong_map = pi->rx_e_chan_map;
+ } else
+ cong_map = 0;
c.iqns_to_fl0congen |= htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
}
@@ -3842,7 +3883,7 @@ alloc_ctrlq(struct adapter *sc, int idx)
struct sysctl_oid *oid;
struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx];
- MPASS(idx < sc->params.nports);
+ MPASS(idx < sc->sge.nctrlq);
if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) {
MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
@@ -3854,8 +3895,8 @@ alloc_ctrlq(struct adapter *sc, int idx)
snprintf(name, sizeof(name), "%s ctrlq%d",
device_get_nameunit(sc->dev), idx);
- init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx,
- &sc->sge.fwq, name);
+ init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE,
+ idx % sc->params.nports, &sc->sge.fwq, name);
rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid);
if (rc != 0) {
CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc);
@@ -3870,7 +3911,7 @@ alloc_ctrlq(struct adapter *sc, int idx)
MPASS(ctrlq->nwr_pending == 0);
MPASS(ctrlq->ndesc_needed == 0);
- rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq);
+ rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq, idx);
if (rc != 0) {
CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc);
return (rc);
@@ -3938,14 +3979,19 @@ t4_sge_set_conm_context(struct adapter *sc, int cntxt_id, int cong_drop,
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
V_FW_PARAMS_PARAM_YZ(cntxt_id);
- val = V_CONMCTXT_CNGTPMODE(cong_mode);
- if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
- cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
- for (i = 0, ch_map = 0; i < 4; i++) {
- if (cong_map & (1 << i))
- ch_map |= 1 << (i << cng_ch_bits_log);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ val = V_T7_DMAQ_CONM_CTXT_CNGTPMODE(cong_mode) |
+ V_T7_DMAQ_CONM_CTXT_CH_VEC(cong_map);
+ } else {
+ val = V_CONMCTXT_CNGTPMODE(cong_mode);
+ if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
+ cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
+ for (i = 0, ch_map = 0; i < 4; i++) {
+ if (cong_map & (1 << i))
+ ch_map |= 1 << (i << cng_ch_bits_log);
+ }
+ val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
- val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc != 0) {
@@ -4253,24 +4299,26 @@ qsize_to_fthresh(int qsize)
}
static int
-ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
+ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq, int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_ctrl_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ core = sc->params.tid_qid_sel_mask != 0 ? idx % sc->params.ncores : 0;
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
V_FW_EQ_CTRL_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
+ V_FW_EQ_CTRL_CMD_COREGROUP(core) |
F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
c.physeqid_pkd = htobe32(0);
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(eq->hw_port) |
F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4282,8 +4330,8 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
if (rc != 0) {
- CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n",
- eq->tx_chan, rc);
+ CH_ERR(sc, "failed to create hw ctrlq for port %d: %d\n",
+ eq->port_id, rc);
return (rc);
}
@@ -4299,24 +4347,26 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
}
static int
-eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_eth_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ core = sc->params.ncores > 1 ? idx % sc->params.ncores : 0;
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
V_FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ V_FW_EQ_ETH_CMD_COREGROUP(core) |
F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(eq->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4344,23 +4394,44 @@ eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+/*
+ * ncores number of uP cores.
+ * nq number of queues for this VI
+ * idx queue index
+ */
+static inline int
+qidx_to_core(int ncores, int nq, int idx)
+{
+ MPASS(nq % ncores == 0);
+ MPASS(idx >= 0 && idx < nq);
+
+ return (idx * ncores / nq);
+}
+
static int
-ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq,
+ int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_ofld_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ if (sc->params.tid_qid_sel_mask != 0)
+ core = qidx_to_core(sc->params.ncores, vi->nofldtxq, idx);
+ else
+ core = 0;
+
bzero(&c, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
V_FW_EQ_OFLD_CMD_VFN(0));
c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
+ V_FW_EQ_OFLD_CMD_COREGROUP(core) |
F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid =
htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
+ V_FW_EQ_OFLD_CMD_PCIECHN(eq->hw_port) |
F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4449,7 +4520,7 @@ add_eq_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
}
static int
-alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx)
{
int rc;
@@ -4464,16 +4535,16 @@ alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
switch (eq->type) {
case EQ_CTRL:
- rc = ctrl_eq_alloc(sc, eq);
+ rc = ctrl_eq_alloc(sc, eq, idx);
break;
case EQ_ETH:
- rc = eth_eq_alloc(sc, vi, eq);
+ rc = eth_eq_alloc(sc, vi, eq, idx);
break;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
case EQ_OFLD:
- rc = ofld_eq_alloc(sc, vi, eq);
+ rc = ofld_eq_alloc(sc, vi, eq, idx);
break;
#endif
@@ -4653,7 +4724,7 @@ failed:
if (!(eq->flags & EQ_HW_ALLOCATED)) {
MPASS(eq->flags & EQ_SW_ALLOCATED);
- rc = alloc_eq_hwq(sc, vi, eq);
+ rc = alloc_eq_hwq(sc, vi, eq, idx);
if (rc != 0) {
CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc);
return (rc);
@@ -4678,10 +4749,10 @@ failed:
if (vi->flags & TX_USES_VM_WR)
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan));
+ V_TXPKT_INTF(pi->hw_port));
else
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
txq->tc_idx = -1;
@@ -4788,18 +4859,46 @@ add_txq_sysctls(struct vi_info *vi, struct sysctl_ctx_list *ctx,
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_waste",
CTLFLAG_RD, &txq->kern_tls_waste,
"# of octets DMAd but not transmitted in NIC TLS records");
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_options",
- CTLFLAG_RD, &txq->kern_tls_options,
- "# of NIC TLS options-only packets transmitted");
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_header",
CTLFLAG_RD, &txq->kern_tls_header,
"# of NIC TLS header-only packets transmitted");
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin",
- CTLFLAG_RD, &txq->kern_tls_fin,
- "# of NIC TLS FIN-only packets transmitted");
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin_short",
CTLFLAG_RD, &txq->kern_tls_fin_short,
"# of NIC TLS padded FIN packets on short TLS records");
+ if (is_t6(sc)) {
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_options", CTLFLAG_RD,
+ &txq->kern_tls_options,
+ "# of NIC TLS options-only packets transmitted");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin,
+ "# of NIC TLS FIN-only packets transmitted");
+ } else {
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_ghash_received", CTLFLAG_RD,
+ &txq->kern_tls_ghash_received,
+ "# of NIC TLS GHASHes received");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_ghash_requested", CTLFLAG_RD,
+ &txq->kern_tls_ghash_requested,
+ "# of NIC TLS GHASHes requested");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_lso", CTLFLAG_RD,
+ &txq->kern_tls_lso,
+ "# of NIC TLS records transmitted using LSO");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_partial_ghash", CTLFLAG_RD,
+ &txq->kern_tls_partial_ghash,
+ "# of NIC TLS records encrypted using a partial GHASH");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_splitmode", CTLFLAG_RD,
+ &txq->kern_tls_splitmode,
+ "# of NIC TLS records using SplitMode");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_trailer", CTLFLAG_RD,
+ &txq->kern_tls_trailer,
+ "# of NIC TLS trailer-only packets transmitted");
+ }
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_cbc",
CTLFLAG_RD, &txq->kern_tls_cbc,
"# of NIC TLS sessions using AES-CBC");
@@ -4869,7 +4968,7 @@ alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
MPASS(eq->flags & EQ_SW_ALLOCATED);
MPASS(ofld_txq->wrq.nwr_pending == 0);
MPASS(ofld_txq->wrq.ndesc_needed == 0);
- rc = alloc_eq_hwq(sc, vi, eq);
+ rc = alloc_eq_hwq(sc, vi, eq, idx);
if (rc != 0) {
CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx,
rc);
@@ -5418,7 +5517,8 @@ write_tnl_lso_cpl(void *cpl, struct mbuf *m0)
m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen +
m0->m_pkthdr.l5hlen) |
V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN));
- tnl_lso->r1 = 0;
+ tnl_lso->ipsecen_to_rocev2 = 0;
+ tnl_lso->roce_eth = 0;
/* Inner headers. */
ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN(
@@ -6583,10 +6683,11 @@ send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
V_FW_WR_FLOWID(cst->etid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
+ /* Firmware expects hw port and will translate to channel itself. */
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
- flowc->mnemval[1].val = htobe32(pi->tx_chan);
+ flowc->mnemval[1].val = htobe32(pi->hw_port);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
- flowc->mnemval[2].val = htobe32(pi->tx_chan);
+ flowc->mnemval[2].val = htobe32(pi->hw_port);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(cst->iqid);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE;
diff --git a/sys/dev/cxgbe/t4_tpt.c b/sys/dev/cxgbe/t4_tpt.c
new file mode 100644
index 000000000000..d18eabb026f1
--- /dev/null
+++ b/sys/dev/cxgbe/t4_tpt.c
@@ -0,0 +1,193 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Chelsio Communications, Inc.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "common/common.h"
+
+/*
+ * Support routines to manage TPT entries used for both RDMA and NVMe
+ * offloads. This includes allocating STAG indices and managing the
+ * PBL pool.
+ */
+
+#define T4_ULPTX_MIN_IO 32
+#define T4_MAX_INLINE_SIZE 96
+#define T4_ULPTX_MAX_DMA 1024
+
+/* PBL and STAG Memory Managers. */
+
+#define MIN_PBL_SHIFT 5 /* 32B == min PBL size (4 entries) */
+
+uint32_t
+t4_pblpool_alloc(struct adapter *sc, int size)
+{
+ vmem_addr_t addr;
+
+ if (vmem_xalloc(sc->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
+ 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_FIRSTFIT | M_NOWAIT,
+ &addr) != 0)
+ return (0);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%lx size %d", __func__, addr, size);
+#endif
+ return (addr);
+}
+
+void
+t4_pblpool_free(struct adapter *sc, uint32_t addr, int size)
+{
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%x size %d", __func__, addr, size);
+#endif
+ vmem_xfree(sc->pbl_arena, addr, roundup(size, (1 << MIN_PBL_SHIFT)));
+}
+
+uint32_t
+t4_stag_alloc(struct adapter *sc, int size)
+{
+ vmem_addr_t stag_idx;
+
+ if (vmem_alloc(sc->stag_arena, size, M_FIRSTFIT | M_NOWAIT,
+ &stag_idx) != 0)
+ return (T4_STAG_UNSET);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: idx 0x%lx size %d", __func__, stag_idx, size);
+#endif
+ return (stag_idx);
+}
+
+void
+t4_stag_free(struct adapter *sc, uint32_t stag_idx, int size)
+{
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: idx 0x%x size %d", __func__, stag_idx, size);
+#endif
+ vmem_free(sc->stag_arena, stag_idx, size);
+}
+
+void
+t4_init_tpt(struct adapter *sc)
+{
+ if (sc->vres.pbl.size != 0)
+ sc->pbl_arena = vmem_create("PBL_MEM_POOL", sc->vres.pbl.start,
+ sc->vres.pbl.size, 1, 0, M_FIRSTFIT | M_WAITOK);
+ if (sc->vres.stag.size != 0)
+ sc->stag_arena = vmem_create("STAG", 1,
+ sc->vres.stag.size >> 5, 1, 0, M_FIRSTFIT | M_WAITOK);
+}
+
+void
+t4_free_tpt(struct adapter *sc)
+{
+ if (sc->pbl_arena != NULL)
+ vmem_destroy(sc->pbl_arena);
+ if (sc->stag_arena != NULL)
+ vmem_destroy(sc->stag_arena);
+}
+
+/*
+ * TPT support routines. TPT entries are stored in the STAG adapter
+ * memory region and are written to via ULP_TX_MEM_WRITE commands in
+ * FW_ULPTX_WR work requests.
+ */
+
+void
+t4_write_mem_dma_wr(struct adapter *sc, void *wr, int wr_len, int tid,
+ uint32_t addr, uint32_t len, vm_paddr_t data, uint64_t cookie)
+{
+ struct ulp_mem_io *ulpmc;
+ struct ulptx_sgl *sgl;
+
+ MPASS(wr_len == T4_WRITE_MEM_DMA_LEN);
+
+ addr &= 0x7FFFFFF;
+
+ memset(wr, 0, wr_len);
+ ulpmc = wr;
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
+ if (cookie != 0) {
+ ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
+ ulpmc->wr.wr_lo = cookie;
+ }
+ ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ V_T5_ULP_MEMIO_ORDER(1) |
+ V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(len >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(len >> 5));
+ ulpmc->len16 = htobe32((tid << 8) |
+ DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
+
+ sgl = (struct ulptx_sgl *)(ulpmc + 1);
+ sgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(1));
+ sgl->len0 = htobe32(len);
+ sgl->addr0 = htobe64(data);
+}
+
+void
+t4_write_mem_inline_wr(struct adapter *sc, void *wr, int wr_len, int tid,
+ uint32_t addr, uint32_t len, void *data, uint64_t cookie)
+{
+ struct ulp_mem_io *ulpmc;
+ struct ulptx_idata *ulpsc;
+
+ MPASS(len > 0 && len <= T4_MAX_INLINE_SIZE);
+ MPASS(wr_len == T4_WRITE_MEM_INLINE_LEN(len));
+
+ addr &= 0x7FFFFFF;
+
+ memset(wr, 0, wr_len);
+ ulpmc = wr;
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
+
+ if (cookie != 0) {
+ ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
+ ulpmc->wr.wr_lo = cookie;
+ }
+
+ ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ F_T5_ULP_MEMIO_IMM);
+
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
+ ulpmc->len16 = htobe32((tid << 8) |
+ DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
+
+ ulpsc = (struct ulptx_idata *)(ulpmc + 1);
+ ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ ulpsc->len = htobe32(roundup(len, T4_ULPTX_MIN_IO));
+
+ if (data != NULL)
+ memcpy(ulpsc + 1, data, len);
+}
diff --git a/sys/dev/cxgbe/t4_tracer.c b/sys/dev/cxgbe/t4_tracer.c
index 80689a543e83..4f8d28626bc9 100644
--- a/sys/dev/cxgbe/t4_tracer.c
+++ b/sys/dev/cxgbe/t4_tracer.c
@@ -123,9 +123,8 @@ static int
t4_cloner_match(struct if_clone *ifc, const char *name)
{
- if (strncmp(name, "t4nex", 5) != 0 &&
- strncmp(name, "t5nex", 5) != 0 &&
- strncmp(name, "t6nex", 5) != 0)
+ if (strncmp(name, "t4nex", 5) != 0 && strncmp(name, "t5nex", 5) != 0 &&
+ strncmp(name, "t6nex", 5) != 0 && strncmp(name, "chnex", 5) != 0)
return (0);
if (name[5] < '0' || name[5] > '9')
return (0);
diff --git a/sys/dev/cxgbe/t4_vf.c b/sys/dev/cxgbe/t4_vf.c
index b7b08e226a57..89dae02e9332 100644
--- a/sys/dev/cxgbe/t4_vf.c
+++ b/sys/dev/cxgbe/t4_vf.c
@@ -125,6 +125,28 @@ struct {
{0x6885, "Chelsio T6240-SO 85 VF"},
{0x6886, "Chelsio T6225-SO-CR 86 VF"},
{0x6887, "Chelsio T6225-CR 87 VF"},
+}, t7vf_pciids[] = {
+ {0xd800, "Chelsio T7 FPGA VF"}, /* T7 PE12K FPGA */
+ {0x7800, "Chelsio T72200-DBG VF"}, /* 2 x 200G, debug */
+ {0x7801, "Chelsio T7250 VF"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7802, "Chelsio S7250 VF"}, /* 2 x 10/25/50G, nomem */
+ {0x7803, "Chelsio T7450 VF"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7804, "Chelsio S7450 VF"}, /* 4 x 10/25/50G, nomem */
+ {0x7805, "Chelsio T72200 VF"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7806, "Chelsio S72200 VF"}, /* 2 x 40/100/200G, nomem */
+ {0x7807, "Chelsio T72200-FH VF"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7808, "Chelsio T71400 VF"}, /* 1 x 400G, nomem */
+ {0x7809, "Chelsio S7210-BT VF"}, /* 2 x 10GBASE-T, nomem */
+ {0x780a, "Chelsio T7450-RC VF"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x780b, "Chelsio T72200-RC VF"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x780c, "Chelsio T72200-FH-RC VF"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x780d, "Chelsio S72200-OCP3 VF"}, /* 2 x 40/100/200G OCP3 */
+ {0x780e, "Chelsio S7450-OCP3 VF"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x780f, "Chelsio S7410-BT-OCP3 VF"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7810, "Chelsio S7210-BT-A VF"}, /* 2 x 10GBASE-T */
+ {0x7811, "Chelsio T7_MAYRA_7 VF"}, /* Motherboard */
+
+ {0x7880, "Custom T7 VF"},
};
static d_ioctl_t t4vf_ioctl;
@@ -183,6 +205,22 @@ t6vf_probe(device_t dev)
return (ENXIO);
}
+static int
+chvf_probe(device_t dev)
+{
+ uint16_t d;
+ size_t i;
+
+ d = pci_get_device(dev);
+ for (i = 0; i < nitems(t7vf_pciids); i++) {
+ if (d == t7vf_pciids[i].device) {
+ device_set_desc(dev, t7vf_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ return (ENXIO);
+}
+
#define FW_PARAM_DEV(param) \
(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
@@ -956,6 +994,20 @@ static driver_t t6vf_driver = {
sizeof(struct adapter)
};
+static device_method_t chvf_methods[] = {
+ DEVMETHOD(device_probe, chvf_probe),
+ DEVMETHOD(device_attach, t4vf_attach),
+ DEVMETHOD(device_detach, t4_detach_common),
+
+ DEVMETHOD_END
+};
+
+static driver_t chvf_driver = {
+ "chvf",
+ chvf_methods,
+ sizeof(struct adapter)
+};
+
static driver_t cxgbev_driver = {
"cxgbev",
cxgbe_methods,
@@ -974,6 +1026,12 @@ static driver_t ccv_driver = {
sizeof(struct port_info)
};
+static driver_t chev_driver = {
+ "chev",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
DRIVER_MODULE(t4vf, pci, t4vf_driver, 0, 0);
MODULE_VERSION(t4vf, 1);
MODULE_DEPEND(t4vf, t4nex, 1, 1, 1);
@@ -986,6 +1044,10 @@ DRIVER_MODULE(t6vf, pci, t6vf_driver, 0, 0);
MODULE_VERSION(t6vf, 1);
MODULE_DEPEND(t6vf, t6nex, 1, 1, 1);
+DRIVER_MODULE(chvf, pci, chvf_driver, 0, 0);
+MODULE_VERSION(chvf, 1);
+MODULE_DEPEND(chvf, chnex, 1, 1, 1);
+
DRIVER_MODULE(cxgbev, t4vf, cxgbev_driver, 0, 0);
MODULE_VERSION(cxgbev, 1);
@@ -994,3 +1056,6 @@ MODULE_VERSION(cxlv, 1);
DRIVER_MODULE(ccv, t6vf, ccv_driver, 0, 0);
MODULE_VERSION(ccv, 1);
+
+DRIVER_MODULE(chev, chvf, chev_driver, 0, 0);
+MODULE_VERSION(chev, 1);
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index 99e4c222996d..c236ee060bc2 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -89,6 +89,12 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
INP_WLOCK(inp);
toep->tid = tid;
insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
+ if (sc->params.tid_qid_sel_mask != 0) {
+ update_tid_qid_sel(toep->vi, &toep->params, tid);
+ toep->ofld_txq = &sc->sge.ofld_txq[toep->params.txq_idx];
+ toep->ctrlq = &sc->sge.ctrlq[toep->params.ctrlq_idx];
+ }
+
if (inp->inp_flags & INP_DROPPED) {
/* socket closed by the kernel before hw told us it connected */
@@ -205,7 +211,7 @@ static inline int
act_open_cpl_size(struct adapter *sc, int isipv6)
{
int idx;
- static const int sz_table[3][2] = {
+ static const int sz_table[4][2] = {
{
sizeof (struct cpl_act_open_req),
sizeof (struct cpl_act_open_req6)
@@ -218,10 +224,14 @@ act_open_cpl_size(struct adapter *sc, int isipv6)
sizeof (struct cpl_t6_act_open_req),
sizeof (struct cpl_t6_act_open_req6)
},
+ {
+ sizeof (struct cpl_t7_act_open_req),
+ sizeof (struct cpl_t7_act_open_req6)
+ },
};
MPASS(chip_id(sc) >= CHELSIO_T4);
- idx = min(chip_id(sc) - CHELSIO_T4, 2);
+ idx = min(chip_id(sc) - CHELSIO_T4, 3);
return (sz_table[idx][!!isipv6]);
}
@@ -255,6 +265,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct offload_settings settings;
struct epoch_tracker et;
uint16_t vid = 0xfff, pcp = 0;
+ uint64_t ntuple;
INP_WLOCK_ASSERT(inp);
KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
@@ -308,10 +319,12 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) |
V_TID_COOKIE(CPL_COOKIE_TOM);
+ ntuple = select_ntuple(vi, toep->l2te);
if (isipv6) {
struct cpl_act_open_req6 *cpl = wrtod(wr);
struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
+ struct cpl_t7_act_open_req6 *cpl7 = (void *)cpl;
if ((inp->inp_vflag & INP_IPV6) == 0)
DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
@@ -323,18 +336,23 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
switch (chip_id(sc)) {
case CHELSIO_T4:
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(vi, toep->l2te);
+ cpl->params = htobe32((uint32_t)ntuple);
break;
case CHELSIO_T5:
INIT_TP_WR(cpl5, 0);
cpl5->iss = htobe32(tp->iss);
- cpl5->params = select_ntuple(vi, toep->l2te);
+ cpl5->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
case CHELSIO_T6:
- default:
INIT_TP_WR(cpl6, 0);
cpl6->iss = htobe32(tp->iss);
- cpl6->params = select_ntuple(vi, toep->l2te);
+ cpl6->params = htobe64(V_FILTER_TUPLE(ntuple));
+ break;
+ case CHELSIO_T7:
+ default:
+ INIT_TP_WR(cpl7, 0);
+ cpl7->iss = htobe32(tp->iss);
+ cpl7->params = htobe64(V_T7_FILTER_TUPLE(ntuple));
break;
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
@@ -356,23 +374,28 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct cpl_act_open_req *cpl = wrtod(wr);
struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
+ struct cpl_t7_act_open_req *cpl7 = (void *)cpl;
switch (chip_id(sc)) {
case CHELSIO_T4:
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(vi, toep->l2te);
+ cpl->params = htobe32((uint32_t)ntuple);
break;
case CHELSIO_T5:
INIT_TP_WR(cpl5, 0);
cpl5->iss = htobe32(tp->iss);
- cpl5->params = select_ntuple(vi, toep->l2te);
+ cpl5->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
case CHELSIO_T6:
- default:
INIT_TP_WR(cpl6, 0);
cpl6->iss = htobe32(tp->iss);
- cpl6->params = select_ntuple(vi, toep->l2te);
+ cpl6->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
+ case CHELSIO_T7:
+ default:
+ INIT_TP_WR(cpl7, 0);
+ cpl7->iss = htobe32(tp->iss);
+ cpl7->params = htobe64(V_T7_FILTER_TUPLE(ntuple));
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 7a6b1cbdd736..84e31efa8b58 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -127,8 +127,9 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
paramidx = 0;
FLOWC_PARAM(PFNVFN, pfvf);
- FLOWC_PARAM(CH, pi->tx_chan);
- FLOWC_PARAM(PORT, pi->tx_chan);
+ /* Firmware expects hw port and will translate to channel itself. */
+ FLOWC_PARAM(CH, pi->hw_port);
+ FLOWC_PARAM(PORT, pi->hw_port);
FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id);
FLOWC_PARAM(SNDBUF, toep->params.sndbuf);
if (tp) {
@@ -148,6 +149,8 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
KASSERT(paramidx == nparams, ("nparams mismatch"));
+ KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
txsd->tx_credits = howmany(flowclen, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
@@ -215,6 +218,8 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps)
else
flowc->mnemval[0].val = htobe32(tc_idx);
+ KASSERT(flowclen16 <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, flowclen16));
txsd->tx_credits = flowclen16;
txsd->plen = 0;
toep->tx_credits -= txsd->tx_credits;
@@ -491,6 +496,9 @@ t4_close_conn(struct adapter *sc, struct toepcb *toep)
#define MIN_TX_CREDITS(iso) \
(MIN_OFLD_TX_CREDITS + ((iso) ? MIN_ISO_TX_CREDITS : 0))
+_Static_assert(MAX_OFLD_TX_CREDITS <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
+
/* Maximum amount of immediate data we could stuff in a WR */
static inline int
max_imm_payload(int tx_credits, int iso)
@@ -612,6 +620,48 @@ write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
__func__, nsegs, start, stop));
}
+bool
+t4_push_raw_wr(struct adapter *sc, struct toepcb *toep, struct mbuf *m)
+{
+#ifdef INVARIANTS
+ struct inpcb *inp = toep->inp;
+#endif
+ struct wrqe *wr;
+ struct ofld_tx_sdesc *txsd;
+ u_int credits, plen;
+
+ INP_WLOCK_ASSERT(inp);
+ MPASS(mbuf_raw_wr(m));
+ plen = m->m_pkthdr.len;
+ credits = howmany(plen, 16);
+ if (credits > toep->tx_credits)
+ return (false);
+
+ wr = alloc_wrqe(roundup2(plen, 16), &toep->ofld_txq->wrq);
+ if (wr == NULL)
+ return (false);
+
+ m_copydata(m, 0, plen, wrtod(wr));
+ m_freem(m);
+
+ toep->tx_credits -= credits;
+ if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
+ toep->flags |= TPF_TX_SUSPENDED;
+
+ KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(credits <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, credits));
+ txsd = &toep->txsd[toep->txsd_pidx];
+ txsd->plen = 0;
+ txsd->tx_credits = credits;
+ if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
+ toep->txsd_pidx = 0;
+ toep->txsd_avail--;
+
+ t4_wrq_tx(sc, wr);
+ return (true);
+}
+
/*
* Max number of SGL entries an offload tx work request can have. This is 41
* (1 + 40) for a full 512B work request.
@@ -644,6 +694,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
struct tcpcb *tp = intotcpcb(inp);
struct socket *so = inp->inp_socket;
struct sockbuf *sb = &so->so_snd;
+ struct mbufq *pduq = &toep->ulp_pduq;
int tx_credits, shove, compl, sowwakeup;
struct ofld_tx_sdesc *txsd;
bool nomap_mbuf_seen;
@@ -688,6 +739,19 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
max_imm = max_imm_payload(tx_credits, 0);
max_nsegs = max_dsgl_nsegs(tx_credits, 0);
+ if (__predict_false((sndptr = mbufq_first(pduq)) != NULL)) {
+ if (!t4_push_raw_wr(sc, toep, sndptr)) {
+ toep->flags |= TPF_TX_SUSPENDED;
+ return;
+ }
+
+ m = mbufq_dequeue(pduq);
+ MPASS(m == sndptr);
+
+ txsd = &toep->txsd[toep->txsd_pidx];
+ continue;
+ }
+
SOCKBUF_LOCK(sb);
sowwakeup = drop;
if (drop) {
@@ -705,6 +769,8 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
if ((m->m_flags & M_NOTREADY) != 0)
break;
+ if (plen + m->m_len > MAX_OFLD_TX_SDESC_PLEN)
+ break;
if (m->m_flags & M_EXTPG) {
#ifdef KERN_TLS
if (m->m_epg_tls != NULL) {
@@ -870,6 +936,8 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(plen <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, plen));
txsd->plen = plen;
txsd->tx_credits = credits;
txsd++;
@@ -1211,6 +1279,8 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(plen <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, plen));
txsd->plen = plen;
txsd->tx_credits = credits;
txsd++;
@@ -1240,6 +1310,35 @@ t4_push_data(struct adapter *sc, struct toepcb *toep, int drop)
t4_push_frames(sc, toep, drop);
}
+void
+t4_raw_wr_tx(struct adapter *sc, struct toepcb *toep, struct mbuf *m)
+{
+#ifdef INVARIANTS
+ struct inpcb *inp = toep->inp;
+#endif
+
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * If there are other raw WRs enqueued, enqueue to preserve
+ * FIFO ordering.
+ */
+ if (!mbufq_empty(&toep->ulp_pduq)) {
+ mbufq_enqueue(&toep->ulp_pduq, m);
+ return;
+ }
+
+ /*
+ * Cannot call t4_push_data here as that will lock so_snd and
+ * some callers of this run in rx handlers with so_rcv locked.
+ * Instead, just try to transmit this WR.
+ */
+ if (!t4_push_raw_wr(sc, toep, m)) {
+ mbufq_enqueue(&toep->ulp_pduq, m);
+ toep->flags |= TPF_TX_SUSPENDED;
+ }
+}
+
int
t4_tod_output(struct toedev *tod, struct tcpcb *tp)
{
@@ -1941,35 +2040,55 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
void
-t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep,
+write_set_tcb_field(struct adapter *sc, void *dst, struct toepcb *toep,
uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie)
{
- struct wrqe *wr;
- struct cpl_set_tcb_field *req;
- struct ofld_tx_sdesc *txsd;
+ struct cpl_set_tcb_field *req = dst;
MPASS((cookie & ~M_COOKIE) == 0);
if (reply) {
MPASS(cookie != CPL_COOKIE_RESERVED);
}
- wr = alloc_wrqe(sizeof(*req), wrq);
+ INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
+ if (reply == 0) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ } else {
+ const int qid = toep->ofld_rxq->iq.abs_id;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ }
+ req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
+ req->mask = htobe64(mask);
+ req->val = htobe64(val);
+}
+
+void
+t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep,
+ uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie)
+{
+ struct wrqe *wr;
+ struct ofld_tx_sdesc *txsd;
+ const u_int len = sizeof(struct cpl_set_tcb_field);
+
+ wr = alloc_wrqe(len, wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
}
- req = wrtod(wr);
+ write_set_tcb_field(sc, wrtod(wr), toep, word, mask, val, reply,
+ cookie);
- INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
- req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id));
- if (reply == 0)
- req->reply_ctrl |= htobe16(F_NO_REPLY);
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
- req->mask = htobe64(mask);
- req->val = htobe64(val);
if (wrq->eq.type == EQ_OFLD) {
txsd = &toep->txsd[toep->txsd_pidx];
- txsd->tx_credits = howmany(sizeof(*req), 16);
+ _Static_assert(howmany(len, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
+ txsd->tx_credits = howmany(len, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits &&
toep->txsd_avail > 0,
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index 2fee8fa91dac..35fb1061d867 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -1655,7 +1655,10 @@ t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid,
INIT_ULPTX_WR(ulpmc, len, 0, 0);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -1785,7 +1788,7 @@ t4_write_page_pods_for_rcvbuf(struct adapter *sc, struct sge_wrq *wrq, int tid,
return (0);
}
-static struct mbuf *
+struct mbuf *
alloc_raw_wr_mbuf(int len)
{
struct mbuf *m;
@@ -1842,7 +1845,10 @@ t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep,
ulpmc = mtod(m, struct ulp_mem_io *);
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -1922,7 +1928,10 @@ t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep,
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -2013,7 +2022,10 @@ t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep,
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index 06c495dcafc3..b879f6883f25 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -508,10 +508,11 @@ send_flowc_wr_synqe(struct adapter *sc, struct synq_entry *synqe)
V_FW_WR_FLOWID(synqe->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
+ /* Firmware expects hw port and will translate to channel itself. */
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
- flowc->mnemval[1].val = htobe32(pi->tx_chan);
+ flowc->mnemval[1].val = htobe32(pi->hw_port);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
- flowc->mnemval[2].val = htobe32(pi->tx_chan);
+ flowc->mnemval[2].val = htobe32(pi->hw_port);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(ofld_rxq->iq.abs_id);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
@@ -1507,6 +1508,8 @@ found:
init_conn_params(vi, &settings, &inc, so, &cpl->tcpopt, e->idx,
&synqe->params);
+ if (sc->params.tid_qid_sel_mask != 0)
+ update_tid_qid_sel(vi, &synqe->params, tid);
/*
* If all goes well t4_syncache_respond will get called during
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index 27c16b9988ae..bbcc1c88c3db 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -61,11 +61,21 @@
static void
t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
- uint64_t val)
+ uint64_t val, int reply, int cookie)
{
struct adapter *sc = td_adapter(toep->td);
+ struct mbuf *m;
+
+ m = alloc_raw_wr_mbuf(sizeof(struct cpl_set_tcb_field));
+ if (m == NULL) {
+ /* XXX */
+ panic("%s: out of memory", __func__);
+ }
- t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
+ write_set_tcb_field(sc, mtod(m, void *), toep, word, mask, val, reply,
+ cookie);
+
+ t4_raw_wr_tx(sc, toep, m);
}
/* TLS and DTLS common routines */
@@ -88,10 +98,9 @@ tls_tx_key(struct toepcb *toep)
static void
t4_set_rx_quiesce(struct toepcb *toep)
{
- struct adapter *sc = td_adapter(toep->td);
- t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, W_TCB_T_FLAGS,
- V_TF_RX_QUIESCE(1), V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
+ t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1),
+ V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
}
/* Clear TF_RX_QUIESCE to re-enable receive. */
@@ -99,7 +108,7 @@ static void
t4_clear_rx_quiesce(struct toepcb *toep)
{
- t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
+ t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0, 0, 0);
}
/* TLS/DTLS content type for CPL SFO */
@@ -145,16 +154,15 @@ get_tp_plen_max(struct ktls_session *tls)
return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX);
}
-/* Send request to get the key-id */
+/* Send request to save the key in on-card memory. */
static int
tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
int direction)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
struct adapter *sc = td_adapter(toep->td);
- struct ofld_tx_sdesc *txsd;
int keyid;
- struct wrqe *wr;
+ struct mbuf *m;
struct tls_key_req *kwr;
struct tls_keyctx *kctx;
@@ -173,12 +181,12 @@ tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
return (ENOSPC);
}
- wr = alloc_wrqe(TLS_KEY_WR_SZ, &toep->ofld_txq->wrq);
- if (wr == NULL) {
+ m = alloc_raw_wr_mbuf(TLS_KEY_WR_SZ);
+ if (m == NULL) {
t4_free_tls_keyid(sc, keyid);
return (ENOMEM);
}
- kwr = wrtod(wr);
+ kwr = mtod(m, struct tls_key_req *);
memset(kwr, 0, TLS_KEY_WR_SZ);
t4_write_tlskey_wr(tls, direction, toep->tid, F_FW_WR_COMPL, keyid,
@@ -190,15 +198,7 @@ tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
tls_ofld->rx_key_addr = keyid;
t4_tls_key_ctx(tls, direction, kctx);
- txsd = &toep->txsd[toep->txsd_pidx];
- txsd->tx_credits = DIV_ROUND_UP(TLS_KEY_WR_SZ, 16);
- txsd->plen = 0;
- toep->tx_credits -= txsd->tx_credits;
- if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
- toep->txsd_pidx = 0;
- toep->txsd_avail--;
-
- t4_wrq_tx(sc, wr);
+ t4_raw_wr_tx(sc, toep, m);
return (0);
}
@@ -207,7 +207,7 @@ int
tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
{
struct adapter *sc = td_adapter(toep->td);
- int error, explicit_iv_size, mac_first;
+ int error, iv_size, mac_first;
if (!can_tls_offload(sc))
return (EINVAL);
@@ -228,6 +228,21 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
}
}
+ /* TLS 1.1 through TLS 1.3 are currently supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE) {
+ return (EPROTONOSUPPORT);
+ }
+
+ /* TLS 1.3 is only supported on T7+. */
+ if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) {
+ if (is_t6(sc)) {
+ return (EPROTONOSUPPORT);
+ }
+ }
+
+ /* Sanity check values in *tls. */
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_CBC:
/* XXX: Explicitly ignore any provided IV. */
@@ -247,13 +262,10 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
default:
return (EPROTONOSUPPORT);
}
- explicit_iv_size = AES_BLOCK_LEN;
+ iv_size = AES_BLOCK_LEN;
mac_first = 1;
break;
case CRYPTO_AES_NIST_GCM_16:
- if (tls->params.iv_len != SALT_SIZE) {
- return (EINVAL);
- }
switch (tls->params.cipher_key_len) {
case 128 / 8:
case 192 / 8:
@@ -262,20 +274,19 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
default:
return (EINVAL);
}
- explicit_iv_size = 8;
+
+ /*
+ * The IV size for TLS 1.2 is the explicit IV in the
+ * record header. For TLS 1.3 it is the size of the
+ * sequence number.
+ */
+ iv_size = 8;
mac_first = 0;
break;
default:
return (EPROTONOSUPPORT);
}
- /* Only TLS 1.1 and TLS 1.2 are currently supported. */
- if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
- tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
- tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
- return (EPROTONOSUPPORT);
- }
-
/* Bail if we already have a key. */
if (direction == KTLS_TX) {
if (toep->tls.tx_key_addr != -1)
@@ -289,6 +300,7 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
if (error)
return (error);
+ toep->tls.tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
if (direction == KTLS_TX) {
toep->tls.scmd0.seqno_numivs =
(V_SCMD_SEQ_NO_CTRL(3) |
@@ -298,14 +310,14 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
V_SCMD_CIPH_MODE(t4_tls_cipher_mode(tls)) |
V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
- V_SCMD_IV_SIZE(explicit_iv_size / 2));
+ V_SCMD_IV_SIZE(iv_size / 2));
toep->tls.scmd0.ivgen_hdrlen =
(V_SCMD_IV_GEN_CTRL(1) |
V_SCMD_KEY_CTX_INLINE(0) |
V_SCMD_TLS_FRAG_ENABLE(1));
- toep->tls.iv_len = explicit_iv_size;
+ toep->tls.iv_len = iv_size;
toep->tls.frag_size = tls->params.max_frame_len;
toep->tls.fcplenmax = get_tp_plen_max(tls);
toep->tls.expn_per_ulp = tls->params.tls_hlen +
@@ -352,7 +364,8 @@ tls_uninit_toep(struct toepcb *toep)
static void
write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
- unsigned int plen, unsigned int expn, uint8_t credits, int shove)
+ unsigned int plen, unsigned int expn, uint8_t credits, int shove,
+ int num_ivs)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
unsigned int len = plen + expn;
@@ -365,7 +378,7 @@ write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
txwr->plen = htobe32(len);
txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
- txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) |
+ txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(num_ivs) |
V_FW_TLSTX_DATA_WR_EXP(expn) |
V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) |
V_FW_TLSTX_DATA_WR_IVDSGL(0) |
@@ -381,20 +394,20 @@ write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
static void
write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
- struct tls_hdr *tls_hdr, unsigned int plen, uint64_t seqno)
+ struct tls_hdr *tls_hdr, unsigned int plen, uint8_t rec_type,
+ uint64_t seqno)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
int data_type, seglen;
seglen = plen;
- data_type = tls_content_type(tls_hdr->type);
+ data_type = tls_content_type(rec_type);
cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
cpl->pld_len = htobe32(plen);
if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM)
- cpl->type_protover = htobe32(
- V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
+ cpl->type_protover = htobe32(V_CPL_TX_TLS_SFO_TYPE(rec_type));
cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
V_SCMD_NUM_IVS(1));
cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
@@ -494,9 +507,11 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
struct tcpcb *tp = intotcpcb(inp);
struct socket *so = inp->inp_socket;
struct sockbuf *sb = &so->so_snd;
+ struct mbufq *pduq = &toep->ulp_pduq;
int tls_size, tx_credits, shove, sowwakeup;
struct ofld_tx_sdesc *txsd;
char *buf;
+ bool tls13;
INP_WLOCK_ASSERT(inp);
KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
@@ -532,10 +547,23 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
return;
}
+ tls13 = toep->tls.tls13;
txsd = &toep->txsd[toep->txsd_pidx];
for (;;) {
tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
+ if (__predict_false((m = mbufq_first(pduq)) != NULL)) {
+ if (!t4_push_raw_wr(sc, toep, m)) {
+ toep->flags |= TPF_TX_SUSPENDED;
+ return;
+ }
+
+ (void)mbufq_dequeue(pduq);
+
+ txsd = &toep->txsd[toep->txsd_pidx];
+ continue;
+ }
+
SOCKBUF_LOCK(sb);
sowwakeup = drop;
if (drop) {
@@ -586,9 +614,11 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
sizeof(struct cpl_tx_tls_sfo) +
sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd);
- /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
- MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
- wr_len += AES_BLOCK_LEN;
+ if (!tls13) {
+ /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
+ MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
+ wr_len += AES_BLOCK_LEN;
+ }
/* Account for SGL in work request length. */
nsegs = count_ext_pgs_segs(m);
@@ -658,8 +688,10 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
expn_size = m->m_epg_hdrlen +
m->m_epg_trllen;
tls_size = m->m_len - expn_size;
- write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove);
- write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno);
+ write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove,
+ tls13 ? 0 : 1);
+ write_tlstx_cpl(cpl, toep, thdr, tls_size,
+ tls13 ? m->m_epg_record_type : thdr->type, m->m_epg_seqno);
idata = (struct ulptx_idata *)(cpl + 1);
idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
@@ -670,10 +702,12 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4));
memrd->addr = htobe32(toep->tls.tx_key_addr >> 5);
- /* Copy IV. */
buf = (char *)(memrd + 1);
- memcpy(buf, thdr + 1, toep->tls.iv_len);
- buf += AES_BLOCK_LEN;
+ if (!tls13) {
+ /* Copy IV. */
+ memcpy(buf, thdr + 1, toep->tls.iv_len);
+ buf += AES_BLOCK_LEN;
+ }
write_ktlstx_sgl(buf, m, nsegs);
@@ -694,6 +728,8 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(m->m_len <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, m->m_len));
txsd->plen = m->m_len;
txsd->tx_credits = credits;
txsd++;
@@ -793,8 +829,8 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
struct sockbuf *sb;
struct mbuf *tls_data;
struct tls_get_record *tgr;
- struct mbuf *control;
- int pdu_length, trailer_len;
+ struct mbuf *control, *n;
+ int pdu_length, resid, trailer_len;
#if defined(KTR) || defined(INVARIANTS)
int len;
#endif
@@ -842,7 +878,9 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
/*
* The payload of this CPL is the TLS header followed by
- * additional fields.
+ * additional fields. For TLS 1.3 the type field holds the
+ * inner record type and the length field has been updated to
+ * strip the inner record type, padding, and MAC.
*/
KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
("%s: payload too small", __func__));
@@ -854,7 +892,14 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
("%s: sequence mismatch", __func__));
}
- /* Report decryption errors as EBADMSG. */
+ /*
+ * Report decryption errors as EBADMSG.
+ *
+ * XXX: To support rekeying for TLS 1.3 this will eventually
+ * have to be updated to recrypt the data with the old key and
+ * then decrypt with the new key. Punt for now as KTLS
+ * doesn't yet support rekeying.
+ */
if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) {
CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x",
__func__, toep->tid, tls_hdr_pkt->res_to_mac_error,
@@ -872,6 +917,33 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (0);
}
+ /* For TLS 1.3 trim the header and trailer. */
+ if (toep->tls.tls13) {
+ KASSERT(tls_data != NULL, ("%s: TLS 1.3 record without data",
+ __func__));
+ MPASS(tls_data->m_pkthdr.len == pdu_length);
+ m_adj(tls_data, sizeof(struct tls_record_layer));
+ if (tls_data->m_pkthdr.len > be16toh(tls_hdr_pkt->length))
+ tls_data->m_pkthdr.len = be16toh(tls_hdr_pkt->length);
+ resid = tls_data->m_pkthdr.len;
+ if (resid == 0) {
+ m_freem(tls_data);
+ tls_data = NULL;
+ } else {
+ for (n = tls_data;; n = n->m_next) {
+ if (n->m_len < resid) {
+ resid -= n->m_len;
+ continue;
+ }
+
+ n->m_len = resid;
+ m_freem(n->m_next);
+ n->m_next = NULL;
+ break;
+ }
+ }
+ }
+
/* Handle data received after the socket is closed. */
sb = &so->so_rcv;
SOCKBUF_LOCK(sb);
@@ -1076,33 +1148,60 @@ out:
}
/*
- * Send a work request setting multiple TCB fields to enable
- * ULP_MODE_TLS.
+ * Send a work request setting one or more TCB fields to partially or
+ * fully enable ULP_MODE_TLS.
+ *
+ * - If resid == 0, the socket buffer ends at a record boundary
+ * (either empty or contains one or more complete records). Switch
+ * to ULP_MODE_TLS (if not already) and enable TLS decryption.
+ *
+ * - If resid != 0, the socket buffer contains a partial record. In
+ * this case, switch to ULP_MODE_TLS partially and configure the TCB
+ * to pass along the remaining resid bytes undecrypted. Once they
+ * arrive, this is called again with resid == 0 and enables TLS
+ * decryption.
*/
static void
-tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
+tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno,
+ size_t resid)
{
- struct wrqe *wr;
+ struct mbuf *m;
struct work_request_hdr *wrh;
struct ulp_txpkt *ulpmc;
int fields, key_offset, len;
- KASSERT(ulp_mode(toep) == ULP_MODE_NONE,
- ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid));
+ /*
+ * If we are already in ULP_MODE_TLS, then we should now be at
+ * a record boundary and ready to finish enabling TLS RX.
+ */
+ KASSERT(resid == 0 || ulp_mode(toep) == ULP_MODE_NONE,
+ ("%s: tid %d needs %zu more data but already ULP_MODE_TLS",
+ __func__, toep->tid, resid));
fields = 0;
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* 2 writes for the overlay region */
+ fields += 2;
+ }
- /* 2 writes for the overlay region */
- fields += 2;
+ if (resid == 0) {
+ /* W_TCB_TLS_SEQ */
+ fields++;
- /* W_TCB_TLS_SEQ */
- fields++;
+ /* W_TCB_ULP_RAW */
+ fields++;
+ } else {
+ /* W_TCB_PDU_LEN */
+ fields++;
- /* W_TCB_ULP_RAW */
- fields++;
+ /* W_TCB_ULP_RAW */
+ fields++;
+ }
- /* W_TCB_ULP_TYPE */
- fields ++;
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* W_TCB_ULP_TYPE */
+ fields ++;
+ }
/* W_TCB_T_FLAGS */
fields++;
@@ -1111,59 +1210,94 @@ tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
KASSERT(len <= SGE_MAX_WR_LEN,
("%s: WR with %d TCB field updates too large", __func__, fields));
- wr = alloc_wrqe(len, toep->ctrlq);
- if (wr == NULL) {
+ m = alloc_raw_wr_mbuf(len);
+ if (m == NULL) {
/* XXX */
panic("%s: out of memory", __func__);
}
- wrh = wrtod(wr);
- INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
+ wrh = mtod(m, struct work_request_hdr *);
+ INIT_ULPTX_WRH(wrh, len, 1, toep->tid); /* atomic */
ulpmc = (struct ulp_txpkt *)(wrh + 1);
- /*
- * Clear the TLS overlay region: 1023:832.
- *
- * Words 26/27 are always set to zero. Words 28/29
- * contain seqno and are set when enabling TLS
- * decryption. Word 30 is zero and Word 31 contains
- * the keyid.
- */
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
- 0xffffffffffffffff, 0);
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /*
+ * Clear the TLS overlay region: 1023:832.
+ *
+ * Words 26/27 are always set to zero. Words 28/29
+ * contain seqno and are set when enabling TLS
+ * decryption. Word 30 is zero and Word 31 contains
+ * the keyid.
+ */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
+ 0xffffffffffffffff, 0);
- /*
- * RX key tags are an index into the key portion of MA
- * memory stored as an offset from the base address in
- * units of 64 bytes.
- */
- key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
- 0xffffffffffffffff,
- (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
-
- CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
- toep->tid, seqno);
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TLS_SEQ,
- V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), V_TCB_TLS_SEQ(seqno));
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_RAW,
- V_TCB_ULP_RAW(M_TCB_ULP_RAW),
- V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
- V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
-
- toep->flags &= ~TPF_TLS_STARTING;
- toep->flags |= TPF_TLS_RECEIVE;
-
- /* Set the ULP mode to ULP_MODE_TLS. */
- toep->params.ulp_mode = ULP_MODE_TLS;
- ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE,
- V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_TLS));
+ /*
+ * RX key tags are an index into the key portion of MA
+ * memory stored as an offset from the base address in
+ * units of 64 bytes.
+ */
+ key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
+ 0xffffffffffffffff,
+ (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
+ }
+
+ if (resid == 0) {
+ /*
+ * The socket buffer is empty or only contains
+ * complete TLS records: Set the sequence number and
+ * enable TLS decryption.
+ */
+ CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
+ toep->tid, seqno);
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_RX_TLS_SEQ, V_TCB_RX_TLS_SEQ(M_TCB_RX_TLS_SEQ),
+ V_TCB_RX_TLS_SEQ(seqno));
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
+ V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
+ V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
+
+ toep->flags &= ~TPF_TLS_STARTING;
+ toep->flags |= TPF_TLS_RECEIVE;
+ } else {
+ /*
+ * The socket buffer ends with a partial record with a
+ * full header and needs at least 6 bytes.
+ *
+ * Set PDU length. This is treating the 'resid' bytes
+ * as a TLS PDU, so the first 5 bytes are a fake
+ * header and the rest are the PDU length.
+ */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_PDU_LEN, V_TCB_PDU_LEN(M_TCB_PDU_LEN),
+ V_TCB_PDU_LEN(resid - sizeof(struct tls_hdr)));
+ CTR3(KTR_CXGBE, "%s: tid %d setting PDU_LEN to %zu",
+ __func__, toep->tid, resid - sizeof(struct tls_hdr));
+
+ /* Clear all bits in ULP_RAW except for ENABLE. */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
+ V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
+
+ /* Wait for 'resid' bytes to be delivered as CPL_RX_DATA. */
+ toep->tls.rx_resid = resid;
+ }
+
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* Set the ULP mode to ULP_MODE_TLS. */
+ toep->params.ulp_mode = ULP_MODE_TLS;
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_TYPE, V_TCB_ULP_TYPE(M_TCB_ULP_TYPE),
+ V_TCB_ULP_TYPE(ULP_MODE_TLS));
+ }
/* Clear TF_RX_QUIESCE. */
ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS,
V_TF_RX_QUIESCE(1), 0);
- t4_wrq_tx(sc, wr);
+ t4_raw_wr_tx(sc, toep, m);
}
/*
@@ -1190,7 +1324,8 @@ tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
* size of a TLS record, re-enable receive and pause again once
* we get more data to try again.
*/
- if (!have_header || resid != 0) {
+ if (!have_header || (resid != 0 && (resid < sizeof(struct tls_hdr) ||
+ is_t6(sc)))) {
CTR(KTR_CXGBE, "%s: tid %d waiting for more data", __func__,
toep->tid);
toep->flags &= ~TPF_TLS_RX_QUIESCED;
@@ -1198,7 +1333,7 @@ tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
return;
}
- tls_update_tcb(sc, toep, seqno);
+ tls_update_tcb(sc, toep, seqno, resid);
}
void
diff --git a/sys/dev/cxgbe/tom/t4_tls.h b/sys/dev/cxgbe/tom/t4_tls.h
index 753a30890fdc..6faf946e9e3c 100644
--- a/sys/dev/cxgbe/tom/t4_tls.h
+++ b/sys/dev/cxgbe/tom/t4_tls.h
@@ -74,6 +74,7 @@ struct tls_ofld_info {
unsigned short adjusted_plen;
unsigned short expn_per_ulp;
unsigned short pdus_per_ulp;
+ bool tls13;
struct tls_scmd scmd0;
u_int iv_len;
unsigned int tx_key_info_size;
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 9b09facd05a7..53a945f8b4cc 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -182,7 +182,7 @@ init_toepcb(struct vi_info *vi, struct toepcb *toep)
}
toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx];
- toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
+ toep->ctrlq = &sc->sge.ctrlq[cp->ctrlq_idx];
tls_init_toep(toep);
MPASS(ulp_mode(toep) != ULP_MODE_TCPDDP);
@@ -494,8 +494,15 @@ send_get_tcb(struct adapter *sc, u_int tid)
bzero(cpl, sizeof(*cpl));
INIT_TP_WR(cpl, tid);
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid));
- cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
- V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
+ if (chip_id(sc) >= CHELSIO_T7) {
+ cpl->reply_ctrl =
+ htobe16(V_T7_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ cpl->reply_ctrl =
+ htobe16(V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
cpl->cookie = 0xff;
commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
@@ -882,6 +889,8 @@ send_mss_flowc_wr(struct adapter *sc, struct toepcb *toep)
flowc->mnemval[0].val = htobe32(toep->params.emss);
txsd = &toep->txsd[toep->txsd_pidx];
+ _Static_assert(flowclen16 <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
txsd->tx_credits = flowclen16;
txsd->plen = 0;
toep->tx_credits -= txsd->tx_credits;
@@ -1219,7 +1228,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
- ntuple |= (uint64_t)e->lport << tp->port_shift;
+ ntuple |= (uint64_t)e->hw_port << tp->port_shift;
if (tp->protocol_shift >= 0)
ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
@@ -1230,10 +1239,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
tp->vnic_shift;
}
- if (is_t4(sc))
- return (htobe32((uint32_t)ntuple));
- else
- return (htobe64(V_FILTER_TUPLE(ntuple)));
+ return (ntuple);
}
/*
@@ -1324,6 +1330,9 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
*/
cp->mtu_idx = find_best_mtu_idx(sc, inc, s);
+ /* Control queue. */
+ cp->ctrlq_idx = vi->pi->port_id;
+
/* Tx queue for this connection. */
if (s->txq == QUEUE_RANDOM)
q_idx = arc4random();
@@ -1436,6 +1445,32 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
cp->emss = 0;
}
+void
+update_tid_qid_sel(struct vi_info *vi, struct conn_params *cp, int tid)
+{
+ struct adapter *sc = vi->adapter;
+ const int mask = sc->params.tid_qid_sel_mask;
+ struct sge_ofld_txq *ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
+ uint32_t ngroup;
+ int g, nqpg;
+
+ cp->ctrlq_idx = ofld_txq_group(tid, mask);
+ CTR(KTR_CXGBE, "tid %u is on core %u", tid, cp->ctrlq_idx);
+ if ((ofld_txq->wrq.eq.cntxt_id & mask) == (tid & mask))
+ return;
+
+ ngroup = 1 << bitcount32(mask);
+ MPASS(vi->nofldtxq % ngroup == 0);
+ g = ofld_txq_group(tid, mask);
+ nqpg = vi->nofldtxq / ngroup;
+ cp->txq_idx = vi->first_ofld_txq + g * nqpg + arc4random() % nqpg;
+#ifdef INVARIANTS
+ MPASS(cp->txq_idx < vi->first_ofld_txq + vi->nofldtxq);
+ ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
+ MPASS((ofld_txq->wrq.eq.cntxt_id & mask) == (tid & mask));
+#endif
+}
+
int
negative_advice(int status)
{
@@ -2231,6 +2266,98 @@ t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
return (0);
}
+/*
+ * Request/response structure used to find out the adapter offloading
+ * a socket.
+ */
+struct find_offload_adapter_data {
+ struct socket *so;
+ struct adapter *sc; /* result */
+};
+
+static void
+find_offload_adapter_cb(struct adapter *sc, void *arg)
+{
+ struct find_offload_adapter_data *fa = arg;
+ struct socket *so = fa->so;
+ struct tom_data *td = sc->tom_softc;
+ struct tcpcb *tp;
+ struct inpcb *inp;
+
+ /* Non-TCP were filtered out earlier. */
+ MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
+
+ if (fa->sc != NULL)
+ return; /* Found already. */
+
+ if (td == NULL)
+ return; /* TOE not enabled on this adapter. */
+
+ inp = sotoinpcb(so);
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) == 0) {
+ tp = intotcpcb(inp);
+ if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
+ fa->sc = sc; /* Found. */
+ }
+ INP_WUNLOCK(inp);
+}
+
+struct adapter *
+find_offload_adapter(struct socket *so)
+{
+ struct find_offload_adapter_data fa;
+
+ fa.sc = NULL;
+ fa.so = so;
+ t4_iterate(find_offload_adapter_cb, &fa);
+ return (fa.sc);
+}
+
+void
+send_txdataplen_max_flowc_wr(struct adapter *sc, struct toepcb *toep,
+ int maxlen)
+{
+ struct wrqe *wr;
+ struct fw_flowc_wr *flowc;
+ const u_int nparams = 1;
+ u_int flowclen;
+ struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
+
+ CTR(KTR_CXGBE, "%s: tid %u maxlen=%d", __func__, toep->tid, maxlen);
+
+ flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
+
+ wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
+ if (wr == NULL) {
+ /* XXX */
+ panic("%s: allocation failure.", __func__);
+ }
+ flowc = wrtod(wr);
+ memset(flowc, 0, wr->wr_len);
+
+ flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
+ V_FW_FLOWC_WR_NPARAMS(nparams));
+ flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
+ V_FW_WR_FLOWID(toep->tid));
+
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ flowc->mnemval[0].val = htobe32(maxlen);
+
+ KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
+ txsd->tx_credits = howmany(flowclen, 16);
+ txsd->plen = 0;
+ KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
+ ("%s: not enough credits (%d)", __func__, toep->tx_credits));
+ toep->tx_credits -= txsd->tx_credits;
+ if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
+ toep->txsd_pidx = 0;
+ toep->txsd_avail--;
+
+ t4_wrq_tx(sc, wr);
+}
+
static int
t4_tom_mod_load(void)
{
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 6295a3484b9f..c8c2d432b8f1 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -113,6 +113,7 @@ struct conn_params {
int8_t mtu_idx;
int8_t ulp_mode;
int8_t tx_align;
+ int8_t ctrlq_idx; /* ctrlq = &sc->sge.ctrlq[ctrlq_idx] */
int16_t txq_idx; /* ofld_txq = &sc->sge.ofld_txq[txq_idx] */
int16_t rxq_idx; /* ofld_rxq = &sc->sge.ofld_rxq[rxq_idx] */
int16_t l2t_idx;
@@ -122,10 +123,13 @@ struct conn_params {
};
struct ofld_tx_sdesc {
- uint32_t plen; /* payload length */
- uint8_t tx_credits; /* firmware tx credits (unit is 16B) */
+ uint32_t plen : 26; /* payload length */
+ uint32_t tx_credits : 6; /* firmware tx credits (unit is 16B) */
};
+#define MAX_OFLD_TX_SDESC_PLEN ((1u << 26) - 1)
+#define MAX_OFLD_TX_SDESC_CREDITS ((1u << 6) - 1)
+
struct ppod_region {
u_int pr_start;
u_int pr_len;
@@ -474,11 +478,14 @@ int select_rcv_wscale(void);
void init_conn_params(struct vi_info *, struct offload_settings *,
struct in_conninfo *, struct socket *, const struct tcp_options *, int16_t,
struct conn_params *cp);
+void update_tid_qid_sel(struct vi_info *, struct conn_params *, int);
__be64 calc_options0(struct vi_info *, struct conn_params *);
__be32 calc_options2(struct vi_info *, struct conn_params *);
uint64_t select_ntuple(struct vi_info *, struct l2t_entry *);
int negative_advice(int);
int add_tid_to_history(struct adapter *, u_int);
+struct adapter *find_offload_adapter(struct socket *);
+void send_txdataplen_max_flowc_wr(struct adapter *, struct toepcb *, int);
void t4_pcb_detach(struct toedev *, struct tcpcb *);
/* t4_connect.c */
@@ -526,6 +533,10 @@ int t4_send_rst(struct toedev *, struct tcpcb *);
void t4_set_tcb_field(struct adapter *, struct sge_wrq *, struct toepcb *,
uint16_t, uint64_t, uint64_t, int, int);
void t4_push_pdus(struct adapter *, struct toepcb *, int);
+bool t4_push_raw_wr(struct adapter *, struct toepcb *, struct mbuf *);
+void t4_raw_wr_tx(struct adapter *, struct toepcb *, struct mbuf *);
+void write_set_tcb_field(struct adapter *, void *, struct toepcb *, uint16_t,
+ uint64_t, uint64_t, int, int);
/* t4_ddp.c */
int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int,
@@ -551,6 +562,7 @@ int t4_aio_queue_ddp(struct socket *, struct kaiocb *);
int t4_enable_ddp_rcv(struct socket *, struct toepcb *);
void t4_ddp_mod_load(void);
void t4_ddp_mod_unload(void);
+struct mbuf *alloc_raw_wr_mbuf(int);
void ddp_assert_empty(struct toepcb *);
void ddp_uninit_toep(struct toepcb *);
void ddp_queue_toep(struct toepcb *);
@@ -574,4 +586,10 @@ int tls_tx_key(struct toepcb *);
void tls_uninit_toep(struct toepcb *);
int tls_alloc_ktls(struct toepcb *, struct ktls_session *, int);
+/* t4_tpt.c */
+uint32_t t4_pblpool_alloc(struct adapter *, int);
+void t4_pblpool_free(struct adapter *, uint32_t, int);
+int t4_pblpool_create(struct adapter *);
+void t4_pblpool_destroy(struct adapter *);
+
#endif
diff --git a/sys/dev/cxgbe/tom/t4_tom_l2t.c b/sys/dev/cxgbe/tom/t4_tom_l2t.c
index 3fd0d5ca41d4..e245c2b6fd5b 100644
--- a/sys/dev/cxgbe/tom/t4_tom_l2t.c
+++ b/sys/dev/cxgbe/tom/t4_tom_l2t.c
@@ -403,7 +403,7 @@ t4_l2t_get(struct port_info *pi, if_t ifp, struct sockaddr *sa)
l2_store(sa, e);
e->ifp = ifp;
e->hash = hash;
- e->lport = pi->lport;
+ e->hw_port = pi->hw_port;
e->wrq = &sc->sge.ctrlq[pi->port_id];
e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
atomic_store_rel_int(&e->refcnt, 1);
diff --git a/sys/dev/cyapa/cyapa.c b/sys/dev/cyapa/cyapa.c
index 50fa4faa560a..ed755f992949 100644
--- a/sys/dev/cyapa/cyapa.c
+++ b/sys/dev/cyapa/cyapa.c
@@ -761,42 +761,60 @@ again:
/*
* Generate report
*/
- c0 = 0;
- if (delta_x < 0)
- c0 |= 0x10;
- if (delta_y < 0)
- c0 |= 0x20;
- c0 |= 0x08;
- if (but & CYAPA_FNGR_LEFT)
- c0 |= 0x01;
- if (but & CYAPA_FNGR_MIDDLE)
- c0 |= 0x04;
- if (but & CYAPA_FNGR_RIGHT)
- c0 |= 0x02;
-
- fifo_write_char(sc, &sc->rfifo, c0);
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x);
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y);
- switch(sc->zenabled) {
- case 1:
- /* Z axis all 8 bits */
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z);
- break;
- case 2:
- /*
- * Z axis low 4 bits + 4th button and 5th button
- * (high 2 bits must be left 0). Auto-scale
- * delta_z to fit to avoid a wrong-direction
- * overflow (don't try to retain the remainder).
- */
- while (delta_z > 7 || delta_z < -8)
- delta_z >>= 1;
- c0 = (uint8_t)delta_z & 0x0F;
+ if (sc->mode.level == 1) {
+ c0 = MOUSE_SYS_SYNC;
+ if (but & CYAPA_FNGR_LEFT)
+ c0 |= MOUSE_SYS_BUTTON1UP;
+ if (but & CYAPA_FNGR_MIDDLE)
+ c0 |= MOUSE_SYS_BUTTON2UP;
+ if (but & CYAPA_FNGR_RIGHT)
+ c0 |= MOUSE_SYS_BUTTON3UP;
fifo_write_char(sc, &sc->rfifo, c0);
- break;
- default:
- /* basic PS/2 */
- break;
+ fifo_write_char(sc, &sc->rfifo, delta_x >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_y >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_x - (delta_x >> 1));
+ fifo_write_char(sc, &sc->rfifo, delta_y - (delta_y >> 1));
+ fifo_write_char(sc, &sc->rfifo, delta_z >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_z - (delta_z >> 1));
+ fifo_write_char(sc, &sc->rfifo, MOUSE_SYS_EXTBUTTONS);
+ } else {
+ c0 = 0;
+ if (delta_x < 0)
+ c0 |= 0x10;
+ if (delta_y < 0)
+ c0 |= 0x20;
+ c0 |= 0x08;
+ if (but & CYAPA_FNGR_LEFT)
+ c0 |= 0x01;
+ if (but & CYAPA_FNGR_MIDDLE)
+ c0 |= 0x04;
+ if (but & CYAPA_FNGR_RIGHT)
+ c0 |= 0x02;
+
+ fifo_write_char(sc, &sc->rfifo, c0);
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x);
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y);
+ switch(sc->zenabled) {
+ case 1:
+ /* Z axis all 8 bits */
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z);
+ break;
+ case 2:
+ /*
+ * Z axis low 4 bits + 4th button and 5th button
+ * (high 2 bits must be left 0). Auto-scale
+ * delta_z to fit to avoid a wrong-direction
+ * overflow (don't try to retain the remainder).
+ */
+ while (delta_z > 7 || delta_z < -8)
+ delta_z >>= 1;
+ c0 = (uint8_t)delta_z & 0x0F;
+ fifo_write_char(sc, &sc->rfifo, c0);
+ break;
+ default:
+ /* basic PS/2 */
+ break;
+ }
}
cyapa_notify(sc);
}
@@ -1205,6 +1223,11 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
((mousemode_t *)data)->packetsize =
MOUSE_PS2_PACKETSIZE;
break;
+ case 1:
+ ((mousemode_t *)data)->protocol = MOUSE_PROTO_SYSMOUSE;
+ ((mousemode_t *)data)->packetsize =
+ MOUSE_SYS_PACKETSIZE;
+ break;
case 2:
((mousemode_t *)data)->protocol = MOUSE_PROTO_PS2;
((mousemode_t *)data)->packetsize =
@@ -1223,7 +1246,7 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
error = EINVAL;
break;
}
- sc->mode.level = *(int *)data ? 2 : 0;
+ sc->mode.level = *(int *)data;
sc->zenabled = sc->mode.level ? 1 : 0;
break;
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index c34897e3b31a..634f48171c3e 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -1707,10 +1707,9 @@ s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
* autonegotiation.
*/
ret_val = e1000_copper_link_autoneg(hw);
- if (ret_val && !hw->mac.forced_speed_duplex)
+ if (ret_val)
return ret_val;
- }
- if (!hw->mac.autoneg || (ret_val && hw->mac.forced_speed_duplex)) {
+ } else {
/* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index f0ef6051fab1..247cf9d7fed3 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -407,6 +407,7 @@ static int em_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
int);
static void em_if_queues_free(if_ctx_t);
+static uint64_t em_if_get_vf_counter(if_ctx_t, ift_counter);
static uint64_t em_if_get_counter(if_ctx_t, ift_counter);
static void em_if_init(if_ctx_t);
static void em_if_stop(if_ctx_t);
@@ -440,6 +441,7 @@ static int igb_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
static void em_if_multi_set(if_ctx_t);
static void em_if_update_admin_status(if_ctx_t);
static void em_if_debug(if_ctx_t);
+static void em_update_vf_stats_counters(struct e1000_softc *);
static void em_update_stats_counters(struct e1000_softc *);
static void em_add_hw_stats(struct e1000_softc *);
static int em_if_set_promisc(if_ctx_t, int);
@@ -1377,6 +1379,11 @@ em_if_attach_post(if_ctx_t ctx)
em_reset(ctx);
/* Initialize statistics */
+ if (sc->vf_ifp)
+ sc->ustats.vf_stats = (struct e1000_vf_stats){};
+ else
+ sc->ustats.stats = (struct e1000_hw_stats){};
+
em_update_stats_counters(sc);
hw->mac.get_link_status = 1;
em_if_update_admin_status(ctx);
@@ -2000,18 +2007,7 @@ em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
(sc->hw.phy.media_type == e1000_media_type_internal_serdes)) {
if (sc->hw.mac.type == e1000_82545)
fiber_type = IFM_1000_LX;
- switch (sc->link_speed) {
- case 10:
- ifmr->ifm_active |= IFM_10_FL;
- break;
- case 100:
- ifmr->ifm_active |= IFM_100_FX;
- break;
- case 1000:
- default:
- ifmr->ifm_active |= fiber_type | IFM_FDX;
- break;
- }
+ ifmr->ifm_active |= fiber_type | IFM_FDX;
} else {
switch (sc->link_speed) {
case 10:
@@ -2024,12 +2020,11 @@ em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
ifmr->ifm_active |= IFM_1000_T;
break;
}
+ if (sc->link_duplex == FULL_DUPLEX)
+ ifmr->ifm_active |= IFM_FDX;
+ else
+ ifmr->ifm_active |= IFM_HDX;
}
-
- if (sc->link_duplex == FULL_DUPLEX)
- ifmr->ifm_active |= IFM_FDX;
- else
- ifmr->ifm_active |= IFM_HDX;
}
/*********************************************************************
@@ -2063,26 +2058,6 @@ em_if_media_change(if_ctx_t ctx)
sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case IFM_100_TX:
- sc->hw.mac.autoneg = DO_AUTO_NEG;
- if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
- sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
- } else {
- sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
- }
- break;
- case IFM_10_T:
- sc->hw.mac.autoneg = DO_AUTO_NEG;
- if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
- sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
- } else {
- sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
- }
- break;
- case IFM_100_FX:
sc->hw.mac.autoneg = false;
sc->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
@@ -2090,7 +2065,7 @@ em_if_media_change(if_ctx_t ctx)
else
sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
break;
- case IFM_10_FL:
+ case IFM_10_T:
sc->hw.mac.autoneg = false;
sc->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
@@ -4700,122 +4675,176 @@ em_disable_aspm(struct e1000_softc *sc)
static void
em_update_stats_counters(struct e1000_softc *sc)
{
- u64 prev_xoffrxc = sc->stats.xoffrxc;
+ struct e1000_hw_stats *stats;
+ u64 prev_xoffrxc;
+
+ if (sc->vf_ifp) {
+ em_update_vf_stats_counters(sc);
+ return;
+ }
+
+ stats = &sc->ustats.stats;
+ prev_xoffrxc = stats->xoffrxc;
if(sc->hw.phy.media_type == e1000_media_type_copper ||
(E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) {
- sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
- sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC);
- }
- sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
- sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
- sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC);
- sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
-
- sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
- sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
- sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC);
- sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC);
- sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
- sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
- sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
- sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
+ stats->symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
+ stats->sec += E1000_READ_REG(&sc->hw, E1000_SEC);
+ }
+ stats->crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
+ stats->mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
+ stats->scc += E1000_READ_REG(&sc->hw, E1000_SCC);
+ stats->ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
+
+ stats->mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
+ stats->latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
+ stats->colc += E1000_READ_REG(&sc->hw, E1000_COLC);
+ stats->dc += E1000_READ_REG(&sc->hw, E1000_DC);
+ stats->rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
+ stats->xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
+ stats->xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
+ stats->xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
/*
** For watchdog management we need to know if we have been
** paused during the last interval, so capture that here.
*/
- if (sc->stats.xoffrxc != prev_xoffrxc)
+ if (stats->xoffrxc != prev_xoffrxc)
sc->shared->isc_pause_frames = 1;
- sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
- sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
- sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
- sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
- sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
- sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
- sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
- sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
- sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
- sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
- sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
- sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
+ stats->xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
+ stats->fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
+ stats->prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
+ stats->prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
+ stats->prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
+ stats->prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
+ stats->prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
+ stats->prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
+ stats->gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
+ stats->bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
+ stats->mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
+ stats->gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
/* For the 64-bit byte counters the low dword must be read first. */
/* Both registers clear on the read of the high dword */
- sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCL) +
+ stats->gorc += E1000_READ_REG(&sc->hw, E1000_GORCL) +
((u64)E1000_READ_REG(&sc->hw, E1000_GORCH) << 32);
- sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCL) +
+ stats->gotc += E1000_READ_REG(&sc->hw, E1000_GOTCL) +
((u64)E1000_READ_REG(&sc->hw, E1000_GOTCH) << 32);
- sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
- sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
- sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
- sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC);
- sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
-
- sc->stats.mgprc += E1000_READ_REG(&sc->hw, E1000_MGTPRC);
- sc->stats.mgpdc += E1000_READ_REG(&sc->hw, E1000_MGTPDC);
- sc->stats.mgptc += E1000_READ_REG(&sc->hw, E1000_MGTPTC);
-
- sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH);
- sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
-
- sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
- sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
- sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
- sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
- sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
- sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
- sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
- sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
- sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
- sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
+ stats->rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
+ stats->ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
+ stats->rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
+ stats->roc += E1000_READ_REG(&sc->hw, E1000_ROC);
+ stats->rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
+
+ stats->mgprc += E1000_READ_REG(&sc->hw, E1000_MGTPRC);
+ stats->mgpdc += E1000_READ_REG(&sc->hw, E1000_MGTPDC);
+ stats->mgptc += E1000_READ_REG(&sc->hw, E1000_MGTPTC);
+
+ stats->tor += E1000_READ_REG(&sc->hw, E1000_TORH);
+ stats->tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
+
+ stats->tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
+ stats->tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
+ stats->ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
+ stats->ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
+ stats->ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
+ stats->ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
+ stats->ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
+ stats->ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
+ stats->mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
+ stats->bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
/* Interrupt Counts */
- sc->stats.iac += E1000_READ_REG(&sc->hw, E1000_IAC);
- sc->stats.icrxptc += E1000_READ_REG(&sc->hw, E1000_ICRXPTC);
- sc->stats.icrxatc += E1000_READ_REG(&sc->hw, E1000_ICRXATC);
- sc->stats.ictxptc += E1000_READ_REG(&sc->hw, E1000_ICTXPTC);
- sc->stats.ictxatc += E1000_READ_REG(&sc->hw, E1000_ICTXATC);
- sc->stats.ictxqec += E1000_READ_REG(&sc->hw, E1000_ICTXQEC);
- sc->stats.ictxqmtc += E1000_READ_REG(&sc->hw, E1000_ICTXQMTC);
- sc->stats.icrxdmtc += E1000_READ_REG(&sc->hw, E1000_ICRXDMTC);
- sc->stats.icrxoc += E1000_READ_REG(&sc->hw, E1000_ICRXOC);
+ stats->iac += E1000_READ_REG(&sc->hw, E1000_IAC);
+ stats->icrxptc += E1000_READ_REG(&sc->hw, E1000_ICRXPTC);
+ stats->icrxatc += E1000_READ_REG(&sc->hw, E1000_ICRXATC);
+ stats->ictxptc += E1000_READ_REG(&sc->hw, E1000_ICTXPTC);
+ stats->ictxatc += E1000_READ_REG(&sc->hw, E1000_ICTXATC);
+ stats->ictxqec += E1000_READ_REG(&sc->hw, E1000_ICTXQEC);
+ stats->ictxqmtc += E1000_READ_REG(&sc->hw, E1000_ICTXQMTC);
+ stats->icrxdmtc += E1000_READ_REG(&sc->hw, E1000_ICRXDMTC);
+ stats->icrxoc += E1000_READ_REG(&sc->hw, E1000_ICRXOC);
if (sc->hw.mac.type >= e1000_82543) {
- sc->stats.algnerrc +=
+ stats->algnerrc +=
E1000_READ_REG(&sc->hw, E1000_ALGNERRC);
- sc->stats.rxerrc +=
+ stats->rxerrc +=
E1000_READ_REG(&sc->hw, E1000_RXERRC);
- sc->stats.tncrs +=
+ stats->tncrs +=
E1000_READ_REG(&sc->hw, E1000_TNCRS);
- sc->stats.cexterr +=
+ stats->cexterr +=
E1000_READ_REG(&sc->hw, E1000_CEXTERR);
- sc->stats.tsctc +=
+ stats->tsctc +=
E1000_READ_REG(&sc->hw, E1000_TSCTC);
- sc->stats.tsctfc +=
+ stats->tsctfc +=
E1000_READ_REG(&sc->hw, E1000_TSCTFC);
}
}
+static void
+em_update_vf_stats_counters(struct e1000_softc *sc)
+{
+ struct e1000_vf_stats *stats;
+
+ if (sc->link_speed == 0)
+ return;
+
+ stats = &sc->ustats.vf_stats;
+
+ UPDATE_VF_REG(E1000_VFGPRC,
+ stats->last_gprc, stats->gprc);
+ UPDATE_VF_REG(E1000_VFGORC,
+ stats->last_gorc, stats->gorc);
+ UPDATE_VF_REG(E1000_VFGPTC,
+ stats->last_gptc, stats->gptc);
+ UPDATE_VF_REG(E1000_VFGOTC,
+ stats->last_gotc, stats->gotc);
+ UPDATE_VF_REG(E1000_VFMPRC,
+ stats->last_mprc, stats->mprc);
+}
+
+static uint64_t
+em_if_get_vf_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct e1000_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+
+ switch (cnt) {
+ case IFCOUNTER_IERRORS:
+ return sc->dropped_pkts;
+ case IFCOUNTER_OERRORS:
+ return (if_get_counter_default(ifp, cnt) +
+ sc->watchdog_events);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
static uint64_t
em_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct e1000_softc *sc = iflib_get_softc(ctx);
+ struct e1000_hw_stats *stats;
if_t ifp = iflib_get_ifp(ctx);
+ if (sc->vf_ifp)
+ return (em_if_get_vf_counter(ctx, cnt));
+
+ stats = &sc->ustats.stats;
+
switch (cnt) {
case IFCOUNTER_COLLISIONS:
- return (sc->stats.colc);
+ return (stats->colc);
case IFCOUNTER_IERRORS:
- return (sc->dropped_pkts + sc->stats.rxerrc +
- sc->stats.crcerrs + sc->stats.algnerrc +
- sc->stats.ruc + sc->stats.roc +
- sc->stats.mpc + sc->stats.cexterr);
+ return (sc->dropped_pkts + stats->rxerrc +
+ stats->crcerrs + stats->algnerrc +
+ stats->ruc + stats->roc +
+ stats->mpc + stats->cexterr);
case IFCOUNTER_OERRORS:
- return (sc->stats.ecol + sc->stats.latecol +
- sc->watchdog_events);
+ return (if_get_counter_default(ifp, cnt) +
+ stats->ecol + stats->latecol + sc->watchdog_events);
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -4916,7 +4945,7 @@ em_add_hw_stats(struct e1000_softc *sc)
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct e1000_hw_stats *stats = &sc->stats;
+ struct e1000_hw_stats *stats;
struct sysctl_oid *stat_node, *queue_node, *int_node;
struct sysctl_oid_list *stat_list, *queue_list, *int_list;
@@ -5007,6 +5036,33 @@ em_add_hw_stats(struct e1000_softc *sc)
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
stat_list = SYSCTL_CHILDREN(stat_node);
+ /*
+ ** VF adapter has a very limited set of stats
+ ** since its not managing the metal, so to speak.
+ */
+ if (sc->vf_ifp) {
+ struct e1000_vf_stats *vfstats = &sc->ustats.vf_stats;
+
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+ CTLFLAG_RD, &vfstats->gprc,
+ "Good Packets Received");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+ CTLFLAG_RD, &vfstats->gptc,
+ "Good Packets Transmitted");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
+ CTLFLAG_RD, &vfstats->gorc,
+ "Good Octets Received");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ CTLFLAG_RD, &vfstats->gotc,
+ "Good Octets Transmitted");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+ CTLFLAG_RD, &vfstats->mprc,
+ "Multicast Packets Received");
+ return;
+ }
+
+ stats = &sc->ustats.stats;
+
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
CTLFLAG_RD, &stats->ecol,
"Excessive collisions");
@@ -5023,147 +5079,147 @@ em_add_hw_stats(struct e1000_softc *sc)
CTLFLAG_RD, &stats->colc,
"Collision Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
- CTLFLAG_RD, &sc->stats.symerrs,
+ CTLFLAG_RD, &stats->symerrs,
"Symbol Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
- CTLFLAG_RD, &sc->stats.sec,
+ CTLFLAG_RD, &stats->sec,
"Sequence Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
- CTLFLAG_RD, &sc->stats.dc,
+ CTLFLAG_RD, &stats->dc,
"Defer Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
- CTLFLAG_RD, &sc->stats.mpc,
+ CTLFLAG_RD, &stats->mpc,
"Missed Packets");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_length_errors",
- CTLFLAG_RD, &sc->stats.rlec,
+ CTLFLAG_RD, &stats->rlec,
"Receive Length Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
- CTLFLAG_RD, &sc->stats.rnbc,
+ CTLFLAG_RD, &stats->rnbc,
"Receive No Buffers");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
- CTLFLAG_RD, &sc->stats.ruc,
+ CTLFLAG_RD, &stats->ruc,
"Receive Undersize");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
- CTLFLAG_RD, &sc->stats.rfc,
+ CTLFLAG_RD, &stats->rfc,
"Fragmented Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
- CTLFLAG_RD, &sc->stats.roc,
+ CTLFLAG_RD, &stats->roc,
"Oversized Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
- CTLFLAG_RD, &sc->stats.rjc,
+ CTLFLAG_RD, &stats->rjc,
"Recevied Jabber");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
- CTLFLAG_RD, &sc->stats.rxerrc,
+ CTLFLAG_RD, &stats->rxerrc,
"Receive Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
- CTLFLAG_RD, &sc->stats.crcerrs,
+ CTLFLAG_RD, &stats->crcerrs,
"CRC errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
- CTLFLAG_RD, &sc->stats.algnerrc,
+ CTLFLAG_RD, &stats->algnerrc,
"Alignment Errors");
/* On 82575 these are collision counts */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
- CTLFLAG_RD, &sc->stats.cexterr,
+ CTLFLAG_RD, &stats->cexterr,
"Collision/Carrier extension errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
- CTLFLAG_RD, &sc->stats.xonrxc,
+ CTLFLAG_RD, &stats->xonrxc,
"XON Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
- CTLFLAG_RD, &sc->stats.xontxc,
+ CTLFLAG_RD, &stats->xontxc,
"XON Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
- CTLFLAG_RD, &sc->stats.xoffrxc,
+ CTLFLAG_RD, &stats->xoffrxc,
"XOFF Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
- CTLFLAG_RD, &sc->stats.xofftxc,
+ CTLFLAG_RD, &stats->xofftxc,
"XOFF Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd",
- CTLFLAG_RD, &sc->stats.fcruc,
+ CTLFLAG_RD, &stats->fcruc,
"Unsupported Flow Control Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd",
- CTLFLAG_RD, &sc->stats.mgprc,
+ CTLFLAG_RD, &stats->mgprc,
"Management Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop",
- CTLFLAG_RD, &sc->stats.mgpdc,
+ CTLFLAG_RD, &stats->mgpdc,
"Management Packets Dropped");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd",
- CTLFLAG_RD, &sc->stats.mgptc,
+ CTLFLAG_RD, &stats->mgptc,
"Management Packets Transmitted");
/* Packet Reception Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
- CTLFLAG_RD, &sc->stats.tpr,
+ CTLFLAG_RD, &stats->tpr,
"Total Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
- CTLFLAG_RD, &sc->stats.gprc,
+ CTLFLAG_RD, &stats->gprc,
"Good Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
- CTLFLAG_RD, &sc->stats.bprc,
+ CTLFLAG_RD, &stats->bprc,
"Broadcast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
- CTLFLAG_RD, &sc->stats.mprc,
+ CTLFLAG_RD, &stats->mprc,
"Multicast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
- CTLFLAG_RD, &sc->stats.prc64,
+ CTLFLAG_RD, &stats->prc64,
"64 byte frames received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
- CTLFLAG_RD, &sc->stats.prc127,
+ CTLFLAG_RD, &stats->prc127,
"65-127 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
- CTLFLAG_RD, &sc->stats.prc255,
+ CTLFLAG_RD, &stats->prc255,
"128-255 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
- CTLFLAG_RD, &sc->stats.prc511,
+ CTLFLAG_RD, &stats->prc511,
"256-511 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
- CTLFLAG_RD, &sc->stats.prc1023,
+ CTLFLAG_RD, &stats->prc1023,
"512-1023 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
- CTLFLAG_RD, &sc->stats.prc1522,
+ CTLFLAG_RD, &stats->prc1522,
"1023-1522 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
- CTLFLAG_RD, &sc->stats.gorc,
+ CTLFLAG_RD, &stats->gorc,
"Good Octets Received");
/* Packet Transmission Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
- CTLFLAG_RD, &sc->stats.gotc,
+ CTLFLAG_RD, &stats->gotc,
"Good Octets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
- CTLFLAG_RD, &sc->stats.tpt,
+ CTLFLAG_RD, &stats->tpt,
"Total Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
- CTLFLAG_RD, &sc->stats.gptc,
+ CTLFLAG_RD, &stats->gptc,
"Good Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
- CTLFLAG_RD, &sc->stats.bptc,
+ CTLFLAG_RD, &stats->bptc,
"Broadcast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
- CTLFLAG_RD, &sc->stats.mptc,
+ CTLFLAG_RD, &stats->mptc,
"Multicast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
- CTLFLAG_RD, &sc->stats.ptc64,
+ CTLFLAG_RD, &stats->ptc64,
"64 byte frames transmitted ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
- CTLFLAG_RD, &sc->stats.ptc127,
+ CTLFLAG_RD, &stats->ptc127,
"65-127 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
- CTLFLAG_RD, &sc->stats.ptc255,
+ CTLFLAG_RD, &stats->ptc255,
"128-255 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
- CTLFLAG_RD, &sc->stats.ptc511,
+ CTLFLAG_RD, &stats->ptc511,
"256-511 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
- CTLFLAG_RD, &sc->stats.ptc1023,
+ CTLFLAG_RD, &stats->ptc1023,
"512-1023 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
- CTLFLAG_RD, &sc->stats.ptc1522,
+ CTLFLAG_RD, &stats->ptc1522,
"1024-1522 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
- CTLFLAG_RD, &sc->stats.tsctc,
+ CTLFLAG_RD, &stats->tsctc,
"TSO Contexts Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
- CTLFLAG_RD, &sc->stats.tsctfc,
+ CTLFLAG_RD, &stats->tsctfc,
"TSO Contexts Failed");
/* Interrupt Stats */
@@ -5172,39 +5228,39 @@ em_add_hw_stats(struct e1000_softc *sc)
int_list = SYSCTL_CHILDREN(int_node);
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
- CTLFLAG_RD, &sc->stats.iac,
+ CTLFLAG_RD, &stats->iac,
"Interrupt Assertion Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
- CTLFLAG_RD, &sc->stats.icrxptc,
+ CTLFLAG_RD, &stats->icrxptc,
"Interrupt Cause Rx Pkt Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
- CTLFLAG_RD, &sc->stats.icrxatc,
+ CTLFLAG_RD, &stats->icrxatc,
"Interrupt Cause Rx Abs Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
- CTLFLAG_RD, &sc->stats.ictxptc,
+ CTLFLAG_RD, &stats->ictxptc,
"Interrupt Cause Tx Pkt Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
- CTLFLAG_RD, &sc->stats.ictxatc,
+ CTLFLAG_RD, &stats->ictxatc,
"Interrupt Cause Tx Abs Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
- CTLFLAG_RD, &sc->stats.ictxqec,
+ CTLFLAG_RD, &stats->ictxqec,
"Interrupt Cause Tx Queue Empty Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
- CTLFLAG_RD, &sc->stats.ictxqmtc,
+ CTLFLAG_RD, &stats->ictxqmtc,
"Interrupt Cause Tx Queue Min Thresh Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
- CTLFLAG_RD, &sc->stats.icrxdmtc,
+ CTLFLAG_RD, &stats->icrxdmtc,
"Interrupt Cause Rx Desc Min Thresh Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun",
- CTLFLAG_RD, &sc->stats.icrxoc,
+ CTLFLAG_RD, &stats->icrxoc,
"Interrupt Cause Receiver Overrun Count");
}
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index 52bfed0f9a42..582e8d9c6327 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -370,6 +370,19 @@
#define EM_NVM_MSIX_N_MASK (0x7 << EM_NVM_MSIX_N_SHIFT)
#define EM_NVM_MSIX_N_SHIFT 7
+/*
+ * VFs use 32-bit counter that rolls over.
+ */
+#define UPDATE_VF_REG(reg, last, cur) \
+do { \
+ u32 new = E1000_READ_REG(&sc->hw, reg); \
+ if (new < last) \
+ cur += 0x100000000LL; \
+ last = new; \
+ cur &= 0xFFFFFFFF00000000LL; \
+ cur |= new; \
+} while (0)
+
struct e1000_softc;
struct em_int_delay_info {
@@ -546,7 +559,11 @@ struct e1000_softc {
unsigned long rx_overruns;
unsigned long watchdog_events;
- struct e1000_hw_stats stats;
+ union {
+ struct e1000_hw_stats stats; /* !sc->vf_ifp */
+ struct e1000_vf_stats vf_stats; /* sc->vf_ifp */
+ } ustats;
+
u16 vf_ifp;
};
diff --git a/sys/dev/enetc/if_enetc.c b/sys/dev/enetc/if_enetc.c
index 3a5d6ec23282..53002f9d73ce 100644
--- a/sys/dev/enetc/if_enetc.c
+++ b/sys/dev/enetc/if_enetc.c
@@ -848,7 +848,7 @@ enetc_hash_vid(uint16_t vid)
bool bit;
int i;
- for (i = 0;i < 6;i++) {
+ for (i = 0; i < 6; i++) {
bit = vid & BIT(i);
bit ^= !!(vid & BIT(i + 6));
hash |= bit << i;
@@ -1020,7 +1020,7 @@ enetc_msix_intr_assign(if_ctx_t ctx, int msix)
ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR));
}
vector = 0;
- for (i = 0;i < sc->tx_num_queues; i++, vector++) {
+ for (i = 0; i < sc->tx_num_queues; i++, vector++) {
tx_queue = &sc->tx_queues[i];
snprintf(irq_name, sizeof(irq_name), "txq%d", i);
iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
@@ -1130,7 +1130,7 @@ enetc_isc_txd_encap(void *data, if_pkt_info_t ipi)
}
/* Now add remaining descriptors. */
- for (;i < ipi->ipi_nsegs; i++) {
+ for (; i < ipi->ipi_nsegs; i++) {
desc = &queue->ring[pidx];
bzero(desc, sizeof(*desc));
desc->addr = segs[i].ds_addr;
@@ -1343,7 +1343,8 @@ enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_IERRORS:
return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
case IFCOUNTER_OERRORS:
- return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
+ return (if_get_counter_default(ifp, cnt) +
+ ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
default:
return (if_get_counter_default(ifp, cnt));
}
diff --git a/sys/dev/fdt/fdt_common.c b/sys/dev/fdt/fdt_common.c
index 1fea4c6f1392..f43551c6310e 100644
--- a/sys/dev/fdt/fdt_common.c
+++ b/sys/dev/fdt/fdt_common.c
@@ -62,8 +62,6 @@
SYSCTL_NODE(_hw, OID_AUTO, fdt, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Flattened Device Tree");
-struct fdt_ic_list fdt_ic_list_head = SLIST_HEAD_INITIALIZER(fdt_ic_list_head);
-
static int
fdt_get_range_by_busaddr(phandle_t node, u_long addr, u_long *base,
u_long *size)
diff --git a/sys/dev/fdt/fdt_common.h b/sys/dev/fdt/fdt_common.h
index ece54290a6ad..f597233f9771 100644
--- a/sys/dev/fdt/fdt_common.h
+++ b/sys/dev/fdt/fdt_common.h
@@ -59,13 +59,6 @@ struct fdt_fixup_entry {
extern struct fdt_fixup_entry fdt_fixup_table[];
#endif
-extern SLIST_HEAD(fdt_ic_list, fdt_ic) fdt_ic_list_head;
-struct fdt_ic {
- SLIST_ENTRY(fdt_ic) fdt_ics;
- ihandle_t iph;
- device_t dev;
-};
-
#if defined(FDT_DTB_STATIC)
extern u_char fdt_static_dtb;
#endif
diff --git a/sys/dev/fdt/fdt_slicer.c b/sys/dev/fdt/fdt_slicer.c
index 3ba4eddf8b61..50112db5cfae 100644
--- a/sys/dev/fdt/fdt_slicer.c
+++ b/sys/dev/fdt/fdt_slicer.c
@@ -45,7 +45,7 @@
static int fill_slices(device_t dev, const char *provider,
struct flash_slice *slices, int *slices_num);
-static void fdt_slicer_init(void);
+static void fdt_slicer_init(void *);
static int
fill_slices_from_node(phandle_t node, struct flash_slice *slices, int *count)
@@ -138,7 +138,7 @@ fill_slices(device_t dev, const char *provider __unused,
}
static void
-fdt_slicer_init(void)
+fdt_slicer_init(void *dummy __unused)
{
flash_register_slicer(fill_slices, FLASH_SLICES_TYPE_NAND, false);
@@ -147,7 +147,7 @@ fdt_slicer_init(void)
}
static void
-fdt_slicer_cleanup(void)
+fdt_slicer_cleanup(void *dummy __unused)
{
flash_register_slicer(NULL, FLASH_SLICES_TYPE_NAND, true);
diff --git a/sys/dev/ftgpio/ftgpio.c b/sys/dev/ftgpio/ftgpio.c
index 7acfdd5b900e..68787b54bb16 100644
--- a/sys/dev/ftgpio/ftgpio.c
+++ b/sys/dev/ftgpio/ftgpio.c
@@ -398,12 +398,13 @@ ftgpio_attach(device_t dev)
FTGPIO_VERBOSE_PRINTF(sc->dev, "groups GPIO1..GPIO6 enabled\n");
GPIO_UNLOCK(sc);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
GPIO_LOCK_DESTROY(sc);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gpio/acpi_gpiobus.c b/sys/dev/gpio/acpi_gpiobus.c
index 94f4e5771266..0d2455cab399 100644
--- a/sys/dev/gpio/acpi_gpiobus.c
+++ b/sys/dev/gpio/acpi_gpiobus.c
@@ -37,6 +37,7 @@
#include <dev/gpio/gpiobusvar.h>
#include <dev/gpio/acpi_gpiobusvar.h>
#include <dev/gpio/gpiobus_internal.h>
+#include <sys/sbuf.h>
#include "gpiobus_if.h"
@@ -52,12 +53,11 @@ struct acpi_gpiobus_ctx {
struct acpi_gpiobus_ivar
{
- struct gpiobus_ivar gpiobus; /* Must come first */
- ACPI_HANDLE dev_handle; /* ACPI handle for bus */
- uint32_t flags;
+ struct gpiobus_ivar gpiobus;
+ ACPI_HANDLE handle;
};
-static uint32_t
+uint32_t
acpi_gpiobus_convflags(ACPI_RESOURCE_GPIO *gpio_res)
{
uint32_t flags = 0;
@@ -150,70 +150,24 @@ acpi_gpiobus_enumerate_res(ACPI_RESOURCE *res, void *context)
return (AE_OK);
}
-static struct acpi_gpiobus_ivar *
-acpi_gpiobus_setup_devinfo(device_t bus, device_t child,
- ACPI_RESOURCE_GPIO *gpio_res)
-{
- struct acpi_gpiobus_ivar *devi;
-
- devi = malloc(sizeof(*devi), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (devi == NULL)
- return (NULL);
- resource_list_init(&devi->gpiobus.rl);
-
- devi->flags = acpi_gpiobus_convflags(gpio_res);
- if (acpi_quirks & ACPI_Q_AEI_NOPULL)
- devi->flags &= ~GPIO_PIN_PULLUP;
-
- devi->gpiobus.npins = 1;
- if (gpiobus_alloc_ivars(&devi->gpiobus) != 0) {
- free(devi, M_DEVBUF);
- return (NULL);
- }
-
- for (int i = 0; i < devi->gpiobus.npins; i++)
- devi->gpiobus.pins[i] = gpio_res->PinTable[i];
-
- return (devi);
-}
-
static ACPI_STATUS
acpi_gpiobus_enumerate_aei(ACPI_RESOURCE *res, void *context)
{
ACPI_RESOURCE_GPIO *gpio_res = &res->Data.Gpio;
- struct acpi_gpiobus_ctx *ctx = context;
- device_t bus = ctx->sc->sc_busdev;
- device_t child;
- struct acpi_gpiobus_ivar *devi;
+ uint32_t *npins = context, *pins = npins + 1;
- /* Check that we have a GpioInt object. */
+ /*
+ * Check that we have a GpioInt object.
+ * Note that according to the spec this
+ * should always be the case.
+ */
if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
return (AE_OK);
if (gpio_res->ConnectionType != ACPI_RESOURCE_GPIO_TYPE_INT)
return (AE_OK);
- /* Add a child. */
- child = device_add_child_ordered(bus, 0, "gpio_aei", DEVICE_UNIT_ANY);
- if (child == NULL)
- return (AE_OK);
- devi = acpi_gpiobus_setup_devinfo(bus, child, gpio_res);
- if (devi == NULL) {
- device_delete_child(bus, child);
- return (AE_OK);
- }
- device_set_ivars(child, devi);
-
- for (int i = 0; i < devi->gpiobus.npins; i++) {
- if (GPIOBUS_PIN_SETFLAGS(bus, child, 0, devi->flags &
- ~GPIO_INTR_MASK)) {
- device_delete_child(bus, child);
- return (AE_OK);
- }
- }
-
- /* Pass ACPI information to children. */
- devi->dev_handle = ctx->dev_handle;
-
+ for (int i = 0; i < gpio_res->PinTableLength; i++)
+ pins[(*npins)++] = gpio_res->PinTable[i];
return (AE_OK);
}
@@ -296,6 +250,63 @@ err:
return (AE_BAD_PARAMETER);
}
+static void
+acpi_gpiobus_attach_aei(struct acpi_gpiobus_softc *sc, ACPI_HANDLE handle)
+{
+ struct acpi_gpiobus_ivar *devi;
+ ACPI_HANDLE aei_handle;
+ device_t child;
+ uint32_t *pins;
+ ACPI_STATUS status;
+ int err;
+
+ status = AcpiGetHandle(handle, "_AEI", &aei_handle);
+ if (ACPI_FAILURE(status))
+ return;
+
+ /* pins[0] specifies the length of the array. */
+ pins = mallocarray(sc->super_sc.sc_npins + 1,
+ sizeof(uint32_t), M_DEVBUF, M_WAITOK);
+ pins[0] = 0;
+
+ status = AcpiWalkResources(handle, "_AEI",
+ acpi_gpiobus_enumerate_aei, pins);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to enumerate AEI resources\n");
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ child = BUS_ADD_CHILD(sc->super_sc.sc_busdev, 0, "gpio_aei",
+ DEVICE_UNIT_ANY);
+ if (child == NULL) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to add gpio_aei child\n");
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ devi = device_get_ivars(child);
+ devi->gpiobus.npins = pins[0];
+ devi->handle = aei_handle;
+
+ err = gpiobus_alloc_ivars(&devi->gpiobus);
+ if (err != 0) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to allocate gpio_aei ivars\n");
+ device_delete_child(sc->super_sc.sc_busdev, child);
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ for (int i = 0; i < pins[0]; i++)
+ devi->gpiobus.pins[i] = pins[i + 1];
+ free(pins, M_DEVBUF);
+
+ bus_attach_children(sc->super_sc.sc_busdev);
+}
+
static int
acpi_gpiobus_probe(device_t dev)
{
@@ -353,13 +364,8 @@ acpi_gpiobus_attach(device_t dev)
if (ACPI_FAILURE(status))
device_printf(dev, "Failed to enumerate GPIO resources\n");
- /* Look for AEI children */
- status = AcpiWalkResources(handle, "_AEI", acpi_gpiobus_enumerate_aei,
- &ctx);
-
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
- device_printf(dev, "Failed to enumerate AEI resources\n");
-
+ /* Look for AEI child */
+ acpi_gpiobus_attach_aei(sc, handle);
return (0);
}
@@ -383,16 +389,14 @@ acpi_gpiobus_detach(device_t dev)
}
static int
-acpi_gpiobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+acpi_gpiobus_read_ivar(device_t dev, device_t child, int which,
+ uintptr_t *result)
{
struct acpi_gpiobus_ivar *devi = device_get_ivars(child);
switch (which) {
case ACPI_GPIOBUS_IVAR_HANDLE:
- *result = (uintptr_t)devi->dev_handle;
- break;
- case ACPI_GPIOBUS_IVAR_FLAGS:
- *result = (uintptr_t)devi->flags;
+ *result = (uintptr_t)devi->handle;
break;
default:
return (gpiobus_read_ivar(dev, child, which, result));
@@ -401,6 +405,28 @@ acpi_gpiobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *resul
return (0);
}
+static device_t
+acpi_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ return (gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct acpi_gpiobus_ivar)));
+}
+
+static int
+acpi_gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
+{
+ struct acpi_gpiobus_ivar *devi;
+ int err;
+
+ err = gpiobus_child_location(bus, child, sb);
+ if (err != 0)
+ return (err);
+
+ devi = device_get_ivars(child);
+ sbuf_printf(sb, " handle=%s", acpi_name(devi->handle));
+ return (0);
+}
+
static device_method_t acpi_gpiobus_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, acpi_gpiobus_probe),
@@ -409,6 +435,8 @@ static device_method_t acpi_gpiobus_methods[] = {
/* Bus interface */
DEVMETHOD(bus_read_ivar, acpi_gpiobus_read_ivar),
+ DEVMETHOD(bus_add_child, acpi_gpiobus_add_child),
+ DEVMETHOD(bus_child_location, acpi_gpiobus_child_location),
DEVMETHOD_END
};
diff --git a/sys/dev/gpio/acpi_gpiobusvar.h b/sys/dev/gpio/acpi_gpiobusvar.h
index f8d502eab9d1..288e8bd0f2af 100644
--- a/sys/dev/gpio/acpi_gpiobusvar.h
+++ b/sys/dev/gpio/acpi_gpiobusvar.h
@@ -33,16 +33,16 @@
#include <contrib/dev/acpica/include/acpi.h>
enum acpi_gpiobus_ivars {
- ACPI_GPIOBUS_IVAR_HANDLE = 10600,
- ACPI_GPIOBUS_IVAR_FLAGS,
+ ACPI_GPIOBUS_IVAR_HANDLE = 10600
};
#define ACPI_GPIOBUS_ACCESSOR(var, ivar, type) \
__BUS_ACCESSOR(acpi_gpiobus, var, ACPI_GPIOBUS, ivar, type)
ACPI_GPIOBUS_ACCESSOR(handle, HANDLE, ACPI_HANDLE)
-ACPI_GPIOBUS_ACCESSOR(flags, FLAGS, uint32_t)
#undef ACPI_GPIOBUS_ACCESSOR
+uint32_t acpi_gpiobus_convflags(ACPI_RESOURCE_GPIO *);
+
#endif /* __ACPI_GPIOBUS_H__ */
diff --git a/sys/dev/gpio/bytgpio.c b/sys/dev/gpio/bytgpio.c
index f7b2a73ec6cb..5d685c155a03 100644
--- a/sys/dev/gpio/bytgpio.c
+++ b/sys/dev/gpio/bytgpio.c
@@ -608,7 +608,7 @@ bytgpio_attach(device_t dev)
sc->sc_pad_funcs[pin] = val & BYTGPIO_PCONF0_FUNC_MASK;
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
BYTGPIO_LOCK_DESTROY(sc);
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -616,6 +616,7 @@ bytgpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
error:
diff --git a/sys/dev/gpio/chvgpio.c b/sys/dev/gpio/chvgpio.c
index 199ad4d6f373..3273aad9242b 100644
--- a/sys/dev/gpio/chvgpio.c
+++ b/sys/dev/gpio/chvgpio.c
@@ -441,7 +441,7 @@ chvgpio_attach(device_t dev)
bus_write_4(sc->sc_mem_res, CHVGPIO_INTERRUPT_MASK, 0);
bus_write_4(sc->sc_mem_res, CHVGPIO_INTERRUPT_STATUS, 0xffff);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
CHVGPIO_LOCK_DESTROY(sc);
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -451,6 +451,7 @@ chvgpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gpio/dwgpio/dwgpio.c b/sys/dev/gpio/dwgpio/dwgpio.c
index 5acb99ca591e..3908113d5fd4 100644
--- a/sys/dev/gpio/dwgpio/dwgpio.c
+++ b/sys/dev/gpio/dwgpio/dwgpio.c
@@ -167,12 +167,13 @@ dwgpio_attach(device_t dev)
snprintf(sc->gpio_pins[i].gp_name, GPIOMAXNAME,
"dwgpio%d.%d", device_get_unit(dev), i);
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
mtx_destroy(&sc->sc_mtx);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gpio/gpio_if.m b/sys/dev/gpio/gpio_if.m
index 5501b2b5c0e7..0b6988ceba79 100644
--- a/sys/dev/gpio/gpio_if.m
+++ b/sys/dev/gpio/gpio_if.m
@@ -62,6 +62,22 @@ CODE {
return (0);
}
+
+ static int
+ gpio_default_get_pin_list(device_t dev, uint32_t *pin_list)
+ {
+ uint32_t maxpin;
+ int err;
+
+ err = GPIO_PIN_MAX(dev, &maxpin);
+ if (err != 0)
+ return (ENXIO);
+
+ for (int i = 0; i <= maxpin; i++)
+ pin_list[i] = i;
+
+ return (0);
+ }
};
HEADER {
@@ -185,3 +201,13 @@ METHOD int pin_config_32 {
uint32_t num_pins;
uint32_t *pin_flags;
} DEFAULT gpio_default_nosupport;
+
+#
+# Get the controller's pin numbers. pin_list is expected to be an array with at
+# least GPIO_PIN_MAX() elements. Populates pin_list from 0 to GPIO_PIN_MAX() by
+# default.
+#
+METHOD int get_pin_list {
+ device_t dev;
+ uint32_t *pin_list;
+} DEFAULT gpio_default_get_pin_list;
diff --git a/sys/dev/gpio/gpioaei.c b/sys/dev/gpio/gpioaei.c
index ecae8ccaf2fa..7b97277aaf61 100644
--- a/sys/dev/gpio/gpioaei.c
+++ b/sys/dev/gpio/gpioaei.c
@@ -45,13 +45,21 @@ enum gpio_aei_type {
ACPI_AEI_TYPE_EVT
};
-struct gpio_aei_softc {
- ACPI_HANDLE handle;
- enum gpio_aei_type type;
- int pin;
+struct gpio_aei_ctx {
+ SLIST_ENTRY(gpio_aei_ctx) next;
struct resource * intr_res;
- int intr_rid;
void * intr_cookie;
+ ACPI_HANDLE handle;
+ gpio_pin_t gpio;
+ uint32_t pin;
+ int intr_rid;
+ enum gpio_aei_type type;
+};
+
+struct gpio_aei_softc {
+ SLIST_HEAD(, gpio_aei_ctx) aei_ctx;
+ ACPI_HANDLE dev_handle;
+ device_t dev;
};
static int
@@ -65,69 +73,157 @@ gpio_aei_probe(device_t dev)
static void
gpio_aei_intr(void * arg)
{
- struct gpio_aei_softc * sc = arg;
+ struct gpio_aei_ctx * ctx = arg;
/* Ask ACPI to run the appropriate _EVT, _Exx or _Lxx method. */
- if (sc->type == ACPI_AEI_TYPE_EVT)
- acpi_SetInteger(sc->handle, NULL, sc->pin);
+ if (ctx->type == ACPI_AEI_TYPE_EVT)
+ acpi_SetInteger(ctx->handle, NULL, ctx->pin);
else
- AcpiEvaluateObject(sc->handle, NULL, NULL, NULL);
+ AcpiEvaluateObject(ctx->handle, NULL, NULL, NULL);
+}
+
+static ACPI_STATUS
+gpio_aei_enumerate(ACPI_RESOURCE * res, void * context)
+{
+ ACPI_RESOURCE_GPIO * gpio_res = &res->Data.Gpio;
+ struct gpio_aei_softc * sc = context;
+ uint32_t flags, maxpin;
+ device_t busdev;
+ int err;
+
+ /*
+ * Check that we have a GpioInt object.
+ * Note that according to the spec this
+ * should always be the case.
+ */
+ if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
+ return (AE_OK);
+ if (gpio_res->ConnectionType != ACPI_RESOURCE_GPIO_TYPE_INT)
+ return (AE_OK);
+
+ flags = acpi_gpiobus_convflags(gpio_res);
+ if (acpi_quirks & ACPI_Q_AEI_NOPULL)
+ flags &= ~GPIO_PIN_PULLUP;
+
+ err = GPIO_PIN_MAX(acpi_get_device(sc->dev_handle), &maxpin);
+ if (err != 0)
+ return (AE_ERROR);
+
+ busdev = GPIO_GET_BUS(acpi_get_device(sc->dev_handle));
+ for (int i = 0; i < gpio_res->PinTableLength; i++) {
+ struct gpio_aei_ctx * ctx;
+ uint32_t pin = gpio_res->PinTable[i];
+
+ if (__predict_false(pin > maxpin)) {
+ device_printf(sc->dev,
+ "Invalid pin 0x%x, max: 0x%x (bad ACPI tables?)\n",
+ pin, maxpin);
+ continue;
+ }
+
+ ctx = malloc(sizeof(struct gpio_aei_ctx), M_DEVBUF, M_WAITOK);
+ ctx->type = ACPI_AEI_TYPE_UNKNOWN;
+ if (pin <= 255) {
+ char objname[5]; /* "_EXX" or "_LXX" */
+ sprintf(objname, "_%c%02X",
+ (flags & GPIO_INTR_EDGE_MASK) ? 'E' : 'L', pin);
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->dev_handle, objname,
+ &ctx->handle)))
+ ctx->type = ACPI_AEI_TYPE_ELX;
+ }
+
+ if (ctx->type == ACPI_AEI_TYPE_UNKNOWN) {
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->dev_handle, "_EVT",
+ &ctx->handle)))
+ ctx->type = ACPI_AEI_TYPE_EVT;
+ else {
+ device_printf(sc->dev,
+ "AEI Device type is unknown for pin 0x%x\n",
+ pin);
+
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+ }
+
+ err = gpio_pin_get_by_bus_pinnum(busdev, pin, &ctx->gpio);
+ if (err != 0) {
+ device_printf(sc->dev, "Cannot acquire pin 0x%x\n",
+ pin);
+
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ err = gpio_pin_setflags(ctx->gpio, flags & ~GPIO_INTR_MASK);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "Cannot set pin flags for pin 0x%x\n", pin);
+
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ ctx->intr_rid = 0;
+ ctx->intr_res = gpio_alloc_intr_resource(sc->dev,
+ &ctx->intr_rid, RF_ACTIVE, ctx->gpio,
+ flags & GPIO_INTR_MASK);
+ if (ctx->intr_res == NULL) {
+ device_printf(sc->dev,
+ "Cannot allocate an IRQ for pin 0x%x\n", pin);
+
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ err = bus_setup_intr(sc->dev, ctx->intr_res, INTR_TYPE_MISC |
+ INTR_MPSAFE | INTR_EXCL | INTR_SLEEPABLE, NULL,
+ gpio_aei_intr, ctx, &ctx->intr_cookie);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "Cannot set up an IRQ for pin 0x%x\n", pin);
+
+ bus_release_resource(sc->dev, ctx->intr_res);
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ ctx->pin = pin;
+ SLIST_INSERT_HEAD(&sc->aei_ctx, ctx, next);
+ }
+
+ return (AE_OK);
}
static int
gpio_aei_attach(device_t dev)
{
struct gpio_aei_softc * sc = device_get_softc(dev);
- gpio_pin_t pin;
- uint32_t flags;
ACPI_HANDLE handle;
- int err;
+ ACPI_STATUS status;
/* This is us. */
device_set_desc(dev, "ACPI Event Information Device");
- /* Store parameters needed by gpio_aei_intr. */
handle = acpi_gpiobus_get_handle(dev);
- if (gpio_pin_get_by_child_index(dev, 0, &pin) != 0) {
- device_printf(dev, "Unable to get the input pin\n");
+ status = AcpiGetParent(handle, &sc->dev_handle);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Cannot get parent of %s\n",
+ acpi_name(handle));
return (ENXIO);
}
- sc->type = ACPI_AEI_TYPE_UNKNOWN;
- sc->pin = pin->pin;
-
- flags = acpi_gpiobus_get_flags(dev);
- if (pin->pin <= 255) {
- char objname[5]; /* "_EXX" or "_LXX" */
- sprintf(objname, "_%c%02X",
- (flags & GPIO_INTR_EDGE_MASK) ? 'E' : 'L', pin->pin);
- if (ACPI_SUCCESS(AcpiGetHandle(handle, objname, &sc->handle)))
- sc->type = ACPI_AEI_TYPE_ELX;
- }
- if (sc->type == ACPI_AEI_TYPE_UNKNOWN) {
- if (ACPI_SUCCESS(AcpiGetHandle(handle, "_EVT", &sc->handle)))
- sc->type = ACPI_AEI_TYPE_EVT;
- }
-
- if (sc->type == ACPI_AEI_TYPE_UNKNOWN) {
- device_printf(dev, "ACPI Event Information Device type is unknown");
- return (ENOTSUP);
- }
+ SLIST_INIT(&sc->aei_ctx);
+ sc->dev = dev;
- /* Set up the interrupt. */
- if ((sc->intr_res = gpio_alloc_intr_resource(dev, &sc->intr_rid,
- RF_ACTIVE, pin, flags & GPIO_INTR_MASK)) == NULL) {
- device_printf(dev, "Cannot allocate an IRQ\n");
- return (ENOTSUP);
- }
- err = bus_setup_intr(dev, sc->intr_res, INTR_TYPE_MISC | INTR_MPSAFE |
- INTR_EXCL | INTR_SLEEPABLE, NULL, gpio_aei_intr, sc,
- &sc->intr_cookie);
- if (err != 0) {
- device_printf(dev, "Cannot set up IRQ\n");
- bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
- sc->intr_res);
- return (err);
+ status = AcpiWalkResources(sc->dev_handle, "_AEI",
+ gpio_aei_enumerate, sc);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Failed to enumerate AEI resources\n");
+ return (ENXIO);
}
return (0);
@@ -137,9 +233,15 @@ static int
gpio_aei_detach(device_t dev)
{
struct gpio_aei_softc * sc = device_get_softc(dev);
+ struct gpio_aei_ctx * ctx, * tctx;
+
+ SLIST_FOREACH_SAFE(ctx, &sc->aei_ctx, next, tctx) {
+ bus_teardown_intr(dev, ctx->intr_res, ctx->intr_cookie);
+ bus_release_resource(dev, ctx->intr_res);
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ }
- bus_teardown_intr(dev, sc->intr_res, sc->intr_cookie);
- bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, sc->intr_res);
return (0);
}
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index 764bcb7e6ee8..698b5e5fdd01 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -57,7 +57,6 @@ static int gpiobus_suspend(device_t);
static int gpiobus_resume(device_t);
static void gpiobus_probe_nomatch(device_t, device_t);
static int gpiobus_print_child(device_t, device_t);
-static int gpiobus_child_location(device_t, device_t, struct sbuf *);
static device_t gpiobus_add_child(device_t, u_int, const char *, int);
static void gpiobus_hinted_child(device_t, const char *, int);
@@ -320,34 +319,12 @@ gpiobus_add_bus(device_t dev)
busdev = device_add_child(dev, "gpiobus", DEVICE_UNIT_ANY);
if (busdev == NULL)
return (NULL);
- if (device_add_child(dev, "gpioc", DEVICE_UNIT_ANY) == NULL) {
- device_delete_child(dev, busdev);
- return (NULL);
- }
#ifdef FDT
ofw_gpiobus_register_provider(dev);
#endif
return (busdev);
}
-/*
- * Attach a gpiobus child.
- * Note that the controller is expected
- * to be fully initialized at this point.
- */
-device_t
-gpiobus_attach_bus(device_t dev)
-{
- device_t busdev;
-
- busdev = gpiobus_add_bus(dev);
- if (busdev == NULL)
- return (NULL);
-
- bus_attach_children(dev);
- return (busdev);
-}
-
int
gpiobus_detach_bus(device_t dev)
{
@@ -391,6 +368,37 @@ gpiobus_init_softc(device_t dev)
}
int
+gpiobus_add_gpioc(device_t dev)
+{
+ struct gpiobus_ivar *devi;
+ struct gpiobus_softc *sc;
+ device_t gpioc;
+ int err;
+
+ gpioc = BUS_ADD_CHILD(dev, 0, "gpioc", DEVICE_UNIT_ANY);
+ if (gpioc == NULL)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+ devi = device_get_ivars(gpioc);
+
+ devi->npins = sc->sc_npins;
+ err = gpiobus_alloc_ivars(devi);
+ if (err != 0) {
+ device_delete_child(dev, gpioc);
+ return (err);
+ }
+
+ err = GPIO_GET_PIN_LIST(sc->sc_dev, devi->pins);
+ if (err != 0) {
+ device_delete_child(dev, gpioc);
+ gpiobus_free_ivars(devi);
+ }
+
+ return (err);
+}
+
+int
gpiobus_alloc_ivars(struct gpiobus_ivar *devi)
{
@@ -581,6 +589,10 @@ gpiobus_attach(device_t dev)
if (err != 0)
return (err);
+ err = gpiobus_add_gpioc(dev);
+ if (err != 0)
+ return (err);
+
/*
* Get parent's pins and mark them as unmapped
*/
@@ -680,7 +692,7 @@ gpiobus_print_child(device_t dev, device_t child)
return (retval);
}
-static int
+int
gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
{
struct gpiobus_ivar *devi;
@@ -692,16 +704,19 @@ gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
return (0);
}
-static device_t
-gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+device_t
+gpiobus_add_child_common(device_t dev, u_int order, const char *name, int unit,
+ size_t ivars_size)
{
device_t child;
struct gpiobus_ivar *devi;
+ KASSERT(ivars_size >= sizeof(struct gpiobus_ivar),
+ ("child ivars must include gpiobus_ivar as their first member"));
child = device_add_child_ordered(dev, order, name, unit);
if (child == NULL)
return (child);
- devi = malloc(sizeof(struct gpiobus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO);
+ devi = malloc(ivars_size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (devi == NULL) {
device_delete_child(dev, child);
return (NULL);
@@ -712,6 +727,13 @@ gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
return (child);
}
+static device_t
+gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ return (gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct gpiobus_ivar)));
+}
+
static void
gpiobus_child_deleted(device_t dev, device_t child)
{
@@ -970,7 +992,7 @@ gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags);
+ return (GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags));
}
static int
@@ -983,7 +1005,7 @@ gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps);
+ return (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps));
}
static int
@@ -996,7 +1018,7 @@ gpiobus_pin_set(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value);
+ return (GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value));
}
static int
@@ -1009,7 +1031,7 @@ gpiobus_pin_get(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value);
+ return (GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value));
}
static int
@@ -1021,7 +1043,57 @@ gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin)
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]);
+ return (GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]));
+}
+
+/*
+ * Verify that a child has all the pins they are requesting
+ * to access in their ivars.
+ */
+static bool
+gpiobus_pin_verify_32(struct gpiobus_ivar *devi, uint32_t first_pin,
+ uint32_t num_pins)
+{
+ if (first_pin + num_pins > devi->npins)
+ return (false);
+
+ /* Make sure the pins are consecutive. */
+ for (uint32_t pin = first_pin; pin < first_pin + num_pins - 1; pin++) {
+ if (devi->pins[pin] + 1 != devi->pins[pin + 1])
+ return (false);
+ }
+
+ return (true);
+}
+
+static int
+gpiobus_pin_access_32(device_t dev, device_t child, uint32_t first_pin,
+ uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins)
+{
+ struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
+ struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
+
+ if (!gpiobus_pin_verify_32(devi, first_pin, 32))
+ return (EINVAL);
+
+ return (GPIO_PIN_ACCESS_32(sc->sc_dev, devi->pins[first_pin],
+ clear_pins, change_pins, orig_pins));
+}
+
+static int
+gpiobus_pin_config_32(device_t dev, device_t child, uint32_t first_pin,
+ uint32_t num_pins, uint32_t *pin_flags)
+{
+ struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
+ struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
+
+ if (num_pins > 32)
+ return (EINVAL);
+ if (!gpiobus_pin_verify_32(devi, first_pin, num_pins))
+ return (EINVAL);
+
+ return (GPIO_PIN_CONFIG_32(sc->sc_dev,
+ devi->pins[first_pin], num_pins, pin_flags));
}
static int
@@ -1102,6 +1174,8 @@ static device_method_t gpiobus_methods[] = {
DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get),
DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set),
DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle),
+ DEVMETHOD(gpiobus_pin_access_32,gpiobus_pin_access_32),
+ DEVMETHOD(gpiobus_pin_config_32,gpiobus_pin_config_32),
DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname),
DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname),
diff --git a/sys/dev/gpio/gpiobus_if.m b/sys/dev/gpio/gpiobus_if.m
index 8bf29839ef4e..890738c4e809 100644
--- a/sys/dev/gpio/gpiobus_if.m
+++ b/sys/dev/gpio/gpiobus_if.m
@@ -107,6 +107,36 @@ METHOD int pin_setflags {
};
#
+# Simultaneously read and/or change up to 32 adjacent pins.
+# If the device cannot change the pins simultaneously, returns EOPNOTSUPP.
+#
+# More details about using this interface can be found in sys/gpio.h
+#
+METHOD int pin_access_32 {
+ device_t dev;
+ device_t child;
+ uint32_t first_pin;
+ uint32_t clear_pins;
+ uint32_t change_pins;
+ uint32_t *orig_pins;
+};
+
+#
+# Simultaneously configure up to 32 adjacent pins.
+# This is intended to change the configuration of all the pins simultaneously,
+# but unlike pin_access_32, this will not fail if the hardware can't do so.
+#
+# More details about using this interface can be found in sys/gpio.h
+#
+METHOD int pin_config_32 {
+ device_t dev;
+ device_t child;
+ uint32_t first_pin;
+ uint32_t num_pins;
+ uint32_t *pin_flags;
+};
+
+#
# Get the pin name
#
METHOD int pin_getname {
diff --git a/sys/dev/gpio/gpiobus_internal.h b/sys/dev/gpio/gpiobus_internal.h
index de3f57663132..58f862343403 100644
--- a/sys/dev/gpio/gpiobus_internal.h
+++ b/sys/dev/gpio/gpiobus_internal.h
@@ -42,6 +42,9 @@ void gpiobus_free_ivars(struct gpiobus_ivar *);
int gpiobus_read_ivar(device_t, device_t, int, uintptr_t *);
int gpiobus_acquire_pin(device_t, uint32_t);
void gpiobus_release_pin(device_t, uint32_t);
+int gpiobus_child_location(device_t, device_t, struct sbuf *);
+device_t gpiobus_add_child_common(device_t, u_int, const char *, int, size_t);
+int gpiobus_add_gpioc(device_t);
extern driver_t gpiobus_driver;
#endif
diff --git a/sys/dev/gpio/gpiobusvar.h b/sys/dev/gpio/gpiobusvar.h
index 7f504236a774..0528efe45525 100644
--- a/sys/dev/gpio/gpiobusvar.h
+++ b/sys/dev/gpio/gpiobusvar.h
@@ -171,7 +171,6 @@ struct resource *gpio_alloc_intr_resource(device_t consumer_dev, int *rid,
int gpio_check_flags(uint32_t, uint32_t);
device_t gpiobus_add_bus(device_t);
-device_t gpiobus_attach_bus(device_t);
int gpiobus_detach_bus(device_t);
#endif /* __GPIOBUS_H__ */
diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c
index 87fed38ebe3e..6c6f79227166 100644
--- a/sys/dev/gpio/gpioc.c
+++ b/sys/dev/gpio/gpioc.c
@@ -45,7 +45,6 @@
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
#include "gpiobus_if.h"
#undef GPIOC_DEBUG
@@ -59,7 +58,7 @@
struct gpioc_softc {
device_t sc_dev; /* gpiocX dev */
- device_t sc_pdev; /* gpioX dev */
+ device_t sc_pdev; /* gpiobusX dev */
struct cdev *sc_ctl_dev; /* controller device */
int sc_unit;
int sc_npins;
@@ -69,6 +68,7 @@ struct gpioc_softc {
struct gpioc_pin_intr {
struct gpioc_softc *sc;
gpio_pin_t pin;
+ uint32_t intr_mode;
bool config_locked;
int intr_rid;
struct resource *intr_res;
@@ -112,8 +112,10 @@ struct gpioc_pin_event {
static MALLOC_DEFINE(M_GPIOC, "gpioc", "gpioc device data");
-static int gpioc_allocate_pin_intr(struct gpioc_pin_intr*, uint32_t);
-static int gpioc_release_pin_intr(struct gpioc_pin_intr*);
+static int gpioc_allocate_pin_intr(struct gpioc_softc*,
+ struct gpioc_pin_intr*, uint32_t, uint32_t);
+static int gpioc_release_pin_intr(struct gpioc_softc*,
+ struct gpioc_pin_intr*);
static int gpioc_attach_priv_pin(struct gpioc_cdevpriv*,
struct gpioc_pin_intr*);
static int gpioc_detach_priv_pin(struct gpioc_cdevpriv*,
@@ -191,27 +193,36 @@ number_of_events(struct gpioc_cdevpriv *priv)
}
static int
-gpioc_allocate_pin_intr(struct gpioc_pin_intr *intr_conf, uint32_t flags)
+gpioc_allocate_pin_intr(struct gpioc_softc *sc,
+ struct gpioc_pin_intr *intr_conf, uint32_t pin, uint32_t flags)
{
int err;
intr_conf->config_locked = true;
mtx_unlock(&intr_conf->mtx);
- intr_conf->intr_res = gpio_alloc_intr_resource(intr_conf->pin->dev,
+ MPASS(intr_conf->pin == NULL);
+ err = gpio_pin_get_by_bus_pinnum(sc->sc_pdev, pin, &intr_conf->pin);
+ if (err != 0)
+ goto error_exit;
+
+ intr_conf->intr_res = gpio_alloc_intr_resource(sc->sc_dev,
&intr_conf->intr_rid, RF_ACTIVE, intr_conf->pin, flags);
if (intr_conf->intr_res == NULL) {
err = ENXIO;
- goto error_exit;
+ goto error_pin;
}
- err = bus_setup_intr(intr_conf->pin->dev, intr_conf->intr_res,
+ err = bus_setup_intr(sc->sc_dev, intr_conf->intr_res,
INTR_TYPE_MISC | INTR_MPSAFE, NULL, gpioc_interrupt_handler,
intr_conf, &intr_conf->intr_cookie);
- if (err != 0)
- goto error_exit;
+ if (err != 0) {
+ bus_release_resource(sc->sc_dev, intr_conf->intr_res);
+ intr_conf->intr_res = NULL;
+ goto error_pin;
+ }
- intr_conf->pin->flags = flags;
+ intr_conf->intr_mode = flags;
error_exit:
mtx_lock(&intr_conf->mtx);
@@ -219,10 +230,15 @@ error_exit:
wakeup(&intr_conf->config_locked);
return (err);
+
+error_pin:
+ gpio_pin_release(intr_conf->pin);
+ intr_conf->pin = NULL;
+ goto error_exit;
}
static int
-gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
+gpioc_release_pin_intr(struct gpioc_softc *sc, struct gpioc_pin_intr *intr_conf)
{
int err;
@@ -230,8 +246,8 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
mtx_unlock(&intr_conf->mtx);
if (intr_conf->intr_cookie != NULL) {
- err = bus_teardown_intr(intr_conf->pin->dev,
- intr_conf->intr_res, intr_conf->intr_cookie);
+ err = bus_teardown_intr(sc->sc_dev, intr_conf->intr_res,
+ intr_conf->intr_cookie);
if (err != 0)
goto error_exit;
else
@@ -239,7 +255,7 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
}
if (intr_conf->intr_res != NULL) {
- err = bus_release_resource(intr_conf->pin->dev, SYS_RES_IRQ,
+ err = bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
intr_conf->intr_rid, intr_conf->intr_res);
if (err != 0)
goto error_exit;
@@ -249,7 +265,10 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
}
}
- intr_conf->pin->flags = 0;
+ gpio_pin_release(intr_conf->pin);
+ intr_conf->pin = NULL;
+
+ intr_conf->intr_mode = 0;
err = 0;
error_exit:
@@ -386,7 +405,7 @@ gpioc_get_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
struct gpioc_privs *priv_link;
uint32_t flags;
- flags = intr_conf->pin->flags;
+ flags = intr_conf->intr_mode;
if (flags == 0)
return (0);
@@ -411,7 +430,7 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
int res;
res = 0;
- if (intr_conf->pin->flags == 0 && flags == 0) {
+ if (intr_conf->intr_mode == 0 && flags == 0) {
/* No interrupt configured and none requested: Do nothing. */
return (0);
}
@@ -419,17 +438,17 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
while (intr_conf->config_locked == true)
mtx_sleep(&intr_conf->config_locked, &intr_conf->mtx, 0,
"gpicfg", 0);
- if (intr_conf->pin->flags == 0 && flags != 0) {
+ if (intr_conf->intr_mode == 0 && flags != 0) {
/*
* No interrupt is configured, but one is requested: Allocate
* and setup interrupt on the according pin.
*/
- res = gpioc_allocate_pin_intr(intr_conf, flags);
+ res = gpioc_allocate_pin_intr(sc, intr_conf, pin, flags);
if (res == 0)
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
res = 0;
- } else if (intr_conf->pin->flags == flags) {
+ } else if (intr_conf->intr_mode == flags) {
/*
* Same interrupt requested as already configured: Attach the
* cdevpriv to the corresponding pin.
@@ -437,14 +456,14 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
res = 0;
- } else if (intr_conf->pin->flags != 0 && flags == 0) {
+ } else if (intr_conf->intr_mode != 0 && flags == 0) {
/*
* Interrupt configured, but none requested: Teardown and
* release the pin when no other cdevpriv is attached. Otherwise
* just detach pin and cdevpriv from each other.
*/
if (gpioc_intr_reconfig_allowed(priv, intr_conf)) {
- res = gpioc_release_pin_intr(intr_conf);
+ res = gpioc_release_pin_intr(sc, intr_conf);
}
if (res == 0)
res = gpioc_detach_priv_pin(priv, intr_conf);
@@ -456,9 +475,10 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
if (!gpioc_intr_reconfig_allowed(priv, intr_conf))
res = EBUSY;
else {
- res = gpioc_release_pin_intr(intr_conf);
+ res = gpioc_release_pin_intr(sc, intr_conf);
if (res == 0)
- res = gpioc_allocate_pin_intr(intr_conf, flags);
+ res = gpioc_allocate_pin_intr(sc, intr_conf,
+ pin, flags);
if (res == 0)
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
@@ -475,18 +495,16 @@ gpioc_interrupt_handler(void *arg)
{
struct gpioc_pin_intr *intr_conf;
struct gpioc_privs *privs;
- struct gpioc_softc *sc;
sbintime_t evtime;
- uint32_t pin_state;
+ bool pin_state;
intr_conf = arg;
- sc = intr_conf->sc;
/* Capture time and pin state first. */
evtime = sbinuptime();
- if (intr_conf->pin->flags & GPIO_INTR_EDGE_BOTH)
- GPIO_PIN_GET(sc->sc_pdev, intr_conf->pin->pin, &pin_state);
- else if (intr_conf->pin->flags & GPIO_INTR_EDGE_RISING)
+ if (intr_conf->intr_mode & GPIO_INTR_EDGE_BOTH)
+ gpio_pin_is_active(intr_conf->pin, &pin_state);
+ else if (intr_conf->intr_mode & GPIO_INTR_EDGE_RISING)
pin_state = true;
else
pin_state = false;
@@ -575,18 +593,11 @@ gpioc_attach(device_t dev)
sc->sc_pdev = device_get_parent(dev);
sc->sc_unit = device_get_unit(dev);
- err = GPIO_PIN_MAX(sc->sc_pdev, &sc->sc_npins);
- sc->sc_npins++; /* Number of pins is one more than max pin number. */
- if (err != 0)
- return (err);
+ sc->sc_npins = gpiobus_get_npins(dev);
sc->sc_pin_intr = malloc(sizeof(struct gpioc_pin_intr) * sc->sc_npins,
M_GPIOC, M_WAITOK | M_ZERO);
for (int i = 0; i < sc->sc_npins; i++) {
- sc->sc_pin_intr[i].pin = malloc(sizeof(struct gpiobus_pin),
- M_GPIOC, M_WAITOK | M_ZERO);
sc->sc_pin_intr[i].sc = sc;
- sc->sc_pin_intr[i].pin->pin = i;
- sc->sc_pin_intr[i].pin->dev = sc->sc_pdev;
mtx_init(&sc->sc_pin_intr[i].mtx, "gpioc pin", NULL, MTX_DEF);
SLIST_INIT(&sc->sc_pin_intr[i].privs);
}
@@ -610,20 +621,16 @@ static int
gpioc_detach(device_t dev)
{
struct gpioc_softc *sc = device_get_softc(dev);
- int err;
if (sc->sc_ctl_dev)
destroy_dev(sc->sc_ctl_dev);
for (int i = 0; i < sc->sc_npins; i++) {
mtx_destroy(&sc->sc_pin_intr[i].mtx);
- free(sc->sc_pin_intr[i].pin, M_GPIOC);
+ MPASS(sc->sc_pin_intr[i].pin == NULL);
}
free(sc->sc_pin_intr, M_GPIOC);
- if ((err = bus_generic_detach(dev)) != 0)
- return (err);
-
return (0);
}
@@ -655,7 +662,7 @@ gpioc_cdevpriv_dtor(void *data)
KASSERT(consistency == 1,
("inconsistent links between pin config and cdevpriv"));
if (gpioc_intr_reconfig_allowed(priv, pin_link->pin)) {
- gpioc_release_pin_intr(pin_link->pin);
+ gpioc_release_pin_intr(priv->sc, pin_link->pin);
}
mtx_unlock(&pin_link->pin->mtx);
SLIST_REMOVE(&priv->pins, pin_link, gpioc_pins, next);
@@ -697,7 +704,7 @@ gpioc_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
* npins isn't a horrible fifo size for that either.
*/
priv->numevents = priv->sc->sc_npins * 2;
- priv->events = malloc(priv->numevents * sizeof(struct gpio_event_detail),
+ priv->events = malloc(priv->numevents * sizeof(struct gpioc_pin_event),
M_GPIOC, M_WAITOK | M_ZERO);
priv->evidx_head = priv->evidx_tail = 0;
@@ -778,7 +785,6 @@ static int
gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct thread *td)
{
- device_t bus;
int max_pin, res;
struct gpioc_softc *sc = cdev->si_drv1;
struct gpioc_cdevpriv *priv;
@@ -787,32 +793,35 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct gpio_access_32 *a32;
struct gpio_config_32 *c32;
struct gpio_event_config *evcfg;
+ struct gpioc_pin_event *tmp;
uint32_t caps, intrflags;
- bus = GPIO_GET_BUS(sc->sc_pdev);
- if (bus == NULL)
- return (EINVAL);
switch (cmd) {
case GPIOMAXPIN:
- max_pin = -1;
- res = GPIO_PIN_MAX(sc->sc_pdev, &max_pin);
+ res = 0;
+ max_pin = sc->sc_npins - 1;
bcopy(&max_pin, arg, sizeof(max_pin));
break;
case GPIOGETCONFIG:
bcopy(arg, &pin, sizeof(pin));
dprintf("get config pin %d\n", pin.gp_pin);
- res = GPIO_PIN_GETFLAGS(sc->sc_pdev, pin.gp_pin,
+ res = GPIOBUS_PIN_GETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
&pin.gp_flags);
/* Fail early */
- if (res)
+ if (res != 0)
break;
res = devfs_get_cdevpriv((void **)&priv);
- if (res)
+ if (res != 0)
break;
pin.gp_flags |= gpioc_get_intr_config(sc, priv,
pin.gp_pin);
- GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &pin.gp_caps);
- GPIOBUS_PIN_GETNAME(bus, pin.gp_pin, pin.gp_name);
+ res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
+ &pin.gp_caps);
+ if (res != 0)
+ break;
+ res = GPIOBUS_PIN_GETNAME(sc->sc_pdev, pin.gp_pin, pin.gp_name);
+ if (res != 0)
+ break;
bcopy(&pin, arg, sizeof(pin));
break;
case GPIOSETCONFIG:
@@ -821,7 +830,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
res = devfs_get_cdevpriv((void **)&priv);
if (res != 0)
break;
- res = GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &caps);
+ res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev,
+ pin.gp_pin, &caps);
if (res != 0)
break;
res = gpio_check_flags(caps, pin.gp_flags);
@@ -847,8 +857,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
}
if (res != 0)
break;
- res = GPIO_PIN_SETFLAGS(sc->sc_pdev, pin.gp_pin,
- (pin.gp_flags & ~GPIO_INTR_MASK));
+ res = GPIOBUS_PIN_SETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
+ pin.gp_flags & ~GPIO_INTR_MASK);
if (res != 0)
break;
res = gpioc_set_intr_config(sc, priv, pin.gp_pin,
@@ -856,67 +866,78 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
break;
case GPIOGET:
bcopy(arg, &req, sizeof(req));
- res = GPIO_PIN_GET(sc->sc_pdev, req.gp_pin,
+ res = GPIOBUS_PIN_GET(sc->sc_pdev, sc->sc_dev, req.gp_pin,
&req.gp_value);
- dprintf("read pin %d -> %d\n",
+ if (res != 0)
+ break;
+ dprintf("read pin %d -> %d\n",
req.gp_pin, req.gp_value);
bcopy(&req, arg, sizeof(req));
break;
case GPIOSET:
bcopy(arg, &req, sizeof(req));
- res = GPIO_PIN_SET(sc->sc_pdev, req.gp_pin,
+ res = GPIOBUS_PIN_SET(sc->sc_pdev, sc->sc_dev, req.gp_pin,
req.gp_value);
- dprintf("write pin %d -> %d\n",
+ dprintf("write pin %d -> %d\n",
req.gp_pin, req.gp_value);
break;
case GPIOTOGGLE:
bcopy(arg, &req, sizeof(req));
- dprintf("toggle pin %d\n",
+ dprintf("toggle pin %d\n",
req.gp_pin);
- res = GPIO_PIN_TOGGLE(sc->sc_pdev, req.gp_pin);
+ res = GPIOBUS_PIN_TOGGLE(sc->sc_pdev, sc->sc_dev, req.gp_pin);
break;
case GPIOSETNAME:
bcopy(arg, &pin, sizeof(pin));
dprintf("set name on pin %d\n", pin.gp_pin);
- res = GPIOBUS_PIN_SETNAME(bus, pin.gp_pin,
+ res = GPIOBUS_PIN_SETNAME(sc->sc_pdev, pin.gp_pin,
pin.gp_name);
break;
case GPIOACCESS32:
a32 = (struct gpio_access_32 *)arg;
- res = GPIO_PIN_ACCESS_32(sc->sc_pdev, a32->first_pin,
- a32->clear_pins, a32->change_pins, &a32->orig_pins);
+ res = GPIOBUS_PIN_ACCESS_32(sc->sc_pdev, sc->sc_dev,
+ a32->first_pin, a32->clear_pins, a32->change_pins,
+ &a32->orig_pins);
break;
case GPIOCONFIG32:
c32 = (struct gpio_config_32 *)arg;
- res = GPIO_PIN_CONFIG_32(sc->sc_pdev, c32->first_pin,
- c32->num_pins, c32->pin_flags);
+ res = GPIOBUS_PIN_CONFIG_32(sc->sc_pdev, sc->sc_dev,
+ c32->first_pin, c32->num_pins, c32->pin_flags);
break;
case GPIOCONFIGEVENTS:
evcfg = (struct gpio_event_config *)arg;
res = devfs_get_cdevpriv((void **)&priv);
if (res != 0)
break;
- /* If any pins have been configured, changes aren't allowed. */
- if (!SLIST_EMPTY(&priv->pins)) {
- res = EINVAL;
- break;
- }
if (evcfg->gp_report_type != GPIO_EVENT_REPORT_DETAIL &&
evcfg->gp_report_type != GPIO_EVENT_REPORT_SUMMARY) {
res = EINVAL;
break;
}
- priv->report_option = evcfg->gp_report_type;
/* Reallocate the events buffer if the user wants it bigger. */
- if (priv->report_option == GPIO_EVENT_REPORT_DETAIL &&
+ tmp = NULL;
+ if (evcfg->gp_report_type == GPIO_EVENT_REPORT_DETAIL &&
priv->numevents < evcfg->gp_fifo_size) {
+ tmp = malloc(evcfg->gp_fifo_size *
+ sizeof(struct gpioc_pin_event), M_GPIOC,
+ M_WAITOK | M_ZERO);
+ }
+ mtx_lock(&priv->mtx);
+ /* If any pins have been configured, changes aren't allowed. */
+ if (!SLIST_EMPTY(&priv->pins)) {
+ mtx_unlock(&priv->mtx);
+ free(tmp, M_GPIOC);
+ res = EINVAL;
+ break;
+ }
+ if (tmp != NULL) {
free(priv->events, M_GPIOC);
+ priv->events = tmp;
priv->numevents = evcfg->gp_fifo_size;
- priv->events = malloc(priv->numevents *
- sizeof(struct gpio_event_detail), M_GPIOC,
- M_WAITOK | M_ZERO);
priv->evidx_head = priv->evidx_tail = 0;
}
+ priv->report_option = evcfg->gp_report_type;
+ mtx_unlock(&priv->mtx);
break;
case FIONBIO:
/*
@@ -1050,9 +1071,6 @@ static device_method_t gpioc_methods[] = {
DEVMETHOD(device_probe, gpioc_probe),
DEVMETHOD(device_attach, gpioc_attach),
DEVMETHOD(device_detach, gpioc_detach),
- DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, bus_generic_suspend),
- DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD_END
};
@@ -1063,5 +1081,5 @@ driver_t gpioc_driver = {
sizeof(struct gpioc_softc)
};
-DRIVER_MODULE(gpioc, gpio, gpioc_driver, 0, 0);
+DRIVER_MODULE(gpioc, gpiobus, gpioc_driver, 0, 0);
MODULE_VERSION(gpioc, 1);
diff --git a/sys/dev/gpio/gpioled.c b/sys/dev/gpio/gpioled.c
index ba53cb733971..a36c2faef379 100644
--- a/sys/dev/gpio/gpioled.c
+++ b/sys/dev/gpio/gpioled.c
@@ -55,13 +55,13 @@
device_get_nameunit((_sc)->sc_dev), "gpioled", MTX_DEF)
#define GPIOLED_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
-struct gpioled_softc
+struct gpioled_softc
{
device_t sc_dev;
device_t sc_busdev;
struct mtx sc_mtx;
struct cdev *sc_leddev;
- int sc_invert;
+ int sc_softinvert;
};
static void gpioled_control(void *, int);
@@ -69,20 +69,17 @@ static int gpioled_probe(device_t);
static int gpioled_attach(device_t);
static int gpioled_detach(device_t);
-static void
+static void
gpioled_control(void *priv, int onoff)
{
struct gpioled_softc *sc;
sc = (struct gpioled_softc *)priv;
+ if (sc->sc_softinvert)
+ onoff = !onoff;
GPIOLED_LOCK(sc);
- if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
- GPIO_PIN_OUTPUT) == 0) {
- if (sc->sc_invert)
- onoff = !onoff;
- GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
- onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
- }
+ GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
GPIOLED_UNLOCK(sc);
}
@@ -95,26 +92,101 @@ gpioled_probe(device_t dev)
}
static int
+gpioled_inv(device_t dev, uint32_t *pin_flags)
+{
+ struct gpioled_softc *sc;
+ int invert;
+ uint32_t pin_caps;
+
+ sc = device_get_softc(dev);
+
+ if (resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "invert", &invert))
+ invert = 0;
+
+ if (GPIOBUS_PIN_GETCAPS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ &pin_caps) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "unable to get pin caps\n");
+ return (-1);
+ }
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags &= ~GPIO_PIN_INVOUT;
+ sc->sc_softinvert = 0;
+ if (invert) {
+ const char *invmode;
+
+ if (resource_string_value(device_get_name(dev),
+ device_get_unit(dev), "invmode", &invmode))
+ invmode = NULL;
+
+ if (invmode) {
+ if (!strcmp(invmode, "sw"))
+ sc->sc_softinvert = 1;
+ else if (!strcmp(invmode, "hw")) {
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags |= GPIO_PIN_INVOUT;
+ else {
+ device_printf(sc->sc_dev, "hardware pin inversion not supported\n");
+ return (-1);
+ }
+ } else {
+ if (strcmp(invmode, "auto") != 0)
+ device_printf(sc->sc_dev, "invalid pin inversion mode\n");
+ invmode = NULL;
+ }
+ }
+ /*
+ * auto inversion mode: use hardware support if available, else fallback to
+ * software emulation.
+ */
+ if (invmode == NULL) {
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags |= GPIO_PIN_INVOUT;
+ else
+ sc->sc_softinvert = 1;
+ }
+ }
+ MPASS(!invert ||
+ (((*pin_flags & GPIO_PIN_INVOUT) != 0) && !sc->sc_softinvert) ||
+ (((*pin_flags & GPIO_PIN_INVOUT) == 0) && sc->sc_softinvert));
+ return (invert);
+}
+
+static int
gpioled_attach(device_t dev)
{
struct gpioled_softc *sc;
int state;
const char *name;
+ uint32_t pin_flags;
+ int invert;
sc = device_get_softc(dev);
sc->sc_dev = dev;
sc->sc_busdev = device_get_parent(dev);
GPIOLED_LOCK_INIT(sc);
- state = 0;
-
- if (resource_string_value(device_get_name(dev),
+ if (resource_string_value(device_get_name(dev),
device_get_unit(dev), "name", &name))
name = NULL;
- resource_int_value(device_get_name(dev),
- device_get_unit(dev), "invert", &sc->sc_invert);
- resource_int_value(device_get_name(dev),
- device_get_unit(dev), "state", &state);
+
+ if (resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "state", &state))
+ state = 0;
+
+ pin_flags = GPIO_PIN_OUTPUT;
+ invert = gpioled_inv(dev, &pin_flags);
+ if (invert < 0)
+ return (ENXIO);
+ device_printf(sc->sc_dev, "state %d invert %s\n",
+ state, (invert ? (sc->sc_softinvert ? "sw" : "hw") : "no"));
+ if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ pin_flags) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "unable to set pin flags, %#x\n", pin_flags);
+ return (ENXIO);
+ }
sc->sc_leddev = led_create_state(gpioled_control, sc, name ? name :
device_get_nameunit(dev), state);
diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c
index fc5fb03d6824..da1bfbc268b8 100644
--- a/sys/dev/gpio/ofw_gpiobus.c
+++ b/sys/dev/gpio/ofw_gpiobus.c
@@ -426,6 +426,9 @@ ofw_gpiobus_attach(device_t dev)
err = gpiobus_init_softc(dev);
if (err != 0)
return (err);
+ err = gpiobus_add_gpioc(dev);
+ if (err != 0)
+ return (err);
bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
/*
@@ -451,28 +454,22 @@ ofw_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
device_t child;
struct ofw_gpiobus_devinfo *devi;
- child = device_add_child_ordered(dev, order, name, unit);
+ child = gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct ofw_gpiobus_devinfo));
if (child == NULL)
- return (child);
- devi = malloc(sizeof(struct ofw_gpiobus_devinfo), M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (devi == NULL) {
- device_delete_child(dev, child);
- return (0);
- }
+ return (NULL);
/*
* NULL all the OFW-related parts of the ivars for non-OFW
* children.
*/
+ devi = device_get_ivars(child);
devi->opd_obdinfo.obd_node = -1;
devi->opd_obdinfo.obd_name = NULL;
devi->opd_obdinfo.obd_compat = NULL;
devi->opd_obdinfo.obd_type = NULL;
devi->opd_obdinfo.obd_model = NULL;
- device_set_ivars(child, devi);
-
return (child);
}
diff --git a/sys/dev/gpio/pl061.c b/sys/dev/gpio/pl061.c
index 87d4310a6396..9996b0253c7d 100644
--- a/sys/dev/gpio/pl061.c
+++ b/sys/dev/gpio/pl061.c
@@ -495,13 +495,14 @@ pl061_attach(device_t dev)
goto free_isrc;
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
device_printf(dev, "couldn't attach gpio bus\n");
PL061_LOCK_DESTROY(sc);
goto free_isrc;
}
+ bus_attach_children(dev);
return (0);
free_isrc:
@@ -557,8 +558,7 @@ static device_method_t pl061_methods[] = {
/* Bus interface */
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
- DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
- DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, pl061_get_bus),
diff --git a/sys/dev/gpio/qoriq_gpio.c b/sys/dev/gpio/qoriq_gpio.c
index 8b44cd256c79..d11868a23751 100644
--- a/sys/dev/gpio/qoriq_gpio.c
+++ b/sys/dev/gpio/qoriq_gpio.c
@@ -379,12 +379,13 @@ qoriq_gpio_attach(device_t dev)
OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
qoriq_gpio_detach(dev);
return (ENOMEM);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/hid/hidbus.c b/sys/dev/hid/hidbus.c
index 96d36c8d191d..683449fca49c 100644
--- a/sys/dev/hid/hidbus.c
+++ b/sys/dev/hid/hidbus.c
@@ -65,7 +65,7 @@ struct hidbus_ivars {
struct mtx *mtx; /* child intr mtx */
hid_intr_t *intr_handler; /* executed under mtx*/
void *intr_ctx;
- unsigned int refcnt; /* protected by mtx */
+ bool active; /* protected by mtx */
struct epoch_context epoch_ctx;
CK_STAILQ_ENTRY(hidbus_ivars) link;
};
@@ -398,7 +398,7 @@ hidbus_child_detached(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *tlc = device_get_ivars(child);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
tlc->mtx = &sc->mtx;
tlc->intr_handler = NULL;
tlc->flags &= ~HIDBUS_FLAG_CAN_POLL;
@@ -423,7 +423,7 @@ hidbus_child_deleted(device_t bus, device_t child)
struct hidbus_ivars *tlc = device_get_ivars(child);
sx_xlock(&sc->sx);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
CK_STAILQ_REMOVE(&sc->tlcs, tlc, hidbus_ivars, link);
sx_unlock(&sc->sx);
epoch_call(INPUT_EPOCH, hidbus_ivar_dtor, &tlc->epoch_ctx);
@@ -572,7 +572,7 @@ hidbus_intr(void *context, void *buf, hid_size_t len)
if (!HID_IN_POLLING_MODE())
epoch_enter_preempt(INPUT_EPOCH, &et);
CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc->refcnt == 0 || tlc->intr_handler == NULL)
+ if (!tlc->active || tlc->intr_handler == NULL)
continue;
if (HID_IN_POLLING_MODE()) {
if ((tlc->flags & HIDBUS_FLAG_CAN_POLL) != 0)
@@ -602,21 +602,14 @@ hidbus_intr_start(device_t bus, device_t child)
MPASS(bus == device_get_parent(child));
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
- struct hidbus_ivars *tlc;
- bool refcnted = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- refcnted |= (tlc->refcnt != 0);
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- ++tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- }
- error = refcnted ? 0 : hid_intr_start(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = true;
+ mtx_unlock(ivar->mtx);
+ error = hid_intr_start(bus);
sx_unlock(&sc->sx);
return (error);
@@ -629,21 +622,17 @@ hidbus_intr_stop(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
struct hidbus_ivars *tlc;
- bool refcnted = false;
+ bool active = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- MPASS(tlc->refcnt != 0);
- --tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- refcnted |= (tlc->refcnt != 0);
- }
- error = refcnted ? 0 : hid_intr_stop(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = false;
+ mtx_unlock(ivar->mtx);
+ CK_STAILQ_FOREACH(tlc, &sc->tlcs, link)
+ active |= tlc->active;
+ error = active ? 0 : hid_intr_stop(bus);
sx_unlock(&sc->sx);
return (error);
diff --git a/sys/dev/hid/hidquirk.h b/sys/dev/hid/hidquirk.h
index 4f8b8acbe201..f6fa9f88c6c9 100644
--- a/sys/dev/hid/hidquirk.h
+++ b/sys/dev/hid/hidquirk.h
@@ -50,6 +50,7 @@
HQ(IS_XBOX360GP), /* device is XBox 360 GamePad */ \
HQ(NOWRITE), /* device does not support writes */ \
HQ(IICHID_SAMPLING), /* IIC backend runs in sampling mode */ \
+ HQ(NO_READAHEAD), /* Disable interrupt after one report */\
\
/* Various quirks */ \
HQ(HID_IGNORE), /* device should be ignored by hid class */ \
diff --git a/sys/dev/hid/hidraw.c b/sys/dev/hid/hidraw.c
index 06f70070f61b..4855843cd265 100644
--- a/sys/dev/hid/hidraw.c
+++ b/sys/dev/hid/hidraw.c
@@ -85,6 +85,12 @@ SYSCTL_INT(_hw_hid_hidraw, OID_AUTO, debug, CTLFLAG_RWTUN,
free((buf), M_DEVBUF); \
}
+#ifdef HIDRAW_MAKE_UHID_ALIAS
+#define HIDRAW_NAME "uhid"
+#else
+#define HIDRAW_NAME "hidraw"
+#endif
+
struct hidraw_softc {
device_t sc_dev; /* base device */
@@ -183,8 +189,8 @@ hidraw_identify(driver_t *driver, device_t parent)
{
device_t child;
- if (device_find_child(parent, "hidraw", DEVICE_UNIT_ANY) == NULL) {
- child = BUS_ADD_CHILD(parent, 0, "hidraw",
+ if (device_find_child(parent, HIDRAW_NAME, DEVICE_UNIT_ANY) == NULL) {
+ child = BUS_ADD_CHILD(parent, 0, HIDRAW_NAME,
device_get_unit(parent));
if (child != NULL)
hidbus_set_index(child, HIDRAW_INDEX);
@@ -1050,7 +1056,7 @@ static device_method_t hidraw_methods[] = {
};
static driver_t hidraw_driver = {
- "hidraw",
+ HIDRAW_NAME,
hidraw_methods,
sizeof(struct hidraw_softc)
};
diff --git a/sys/dev/hid/hkbd.c b/sys/dev/hid/hkbd.c
index 5eff7557bc42..6255c42d3b62 100644
--- a/sys/dev/hid/hkbd.c
+++ b/sys/dev/hid/hkbd.c
@@ -95,14 +95,16 @@
#ifdef HID_DEBUG
static int hkbd_debug = 0;
+#endif
static int hkbd_no_leds = 0;
static SYSCTL_NODE(_hw_hid, OID_AUTO, hkbd, CTLFLAG_RW, 0, "USB keyboard");
+#ifdef HID_DEBUG
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, debug, CTLFLAG_RWTUN,
&hkbd_debug, 0, "Debug level");
+#endif
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, no_leds, CTLFLAG_RWTUN,
&hkbd_no_leds, 0, "Disables setting of keyboard leds");
-#endif
#define INPUT_EPOCH global_epoch_preempt
@@ -1596,8 +1598,16 @@ hkbd_ioctl_locked(keyboard_t *kbd, u_long cmd, caddr_t arg)
sc->sc_state &= ~LOCK_MASK;
sc->sc_state |= *(int *)arg;
- /* set LEDs and quit */
- return (hkbd_ioctl_locked(kbd, KDSETLED, arg));
+ /*
+ * Attempt to set the keyboard LEDs; ignore the return value
+ * intentionally. Note: Some hypervisors/emulators (e.g., QEMU,
+ * Parallels—at least as of the time of writing) may fail when
+ * setting LEDs. This can prevent kbdmux from attaching the
+ * keyboard, which in turn may block the console from accessing
+ * it.
+ */
+ (void)hkbd_ioctl_locked(kbd, KDSETLED, arg);
+ return (0);
case KDSETREPEAT: /* set keyboard repeat rate (new
* interface) */
@@ -1766,10 +1776,8 @@ hkbd_set_leds(struct hkbd_softc *sc, uint8_t leds)
SYSCONS_LOCK_ASSERT();
DPRINTF("leds=0x%02x\n", leds);
-#ifdef HID_DEBUG
if (hkbd_no_leds)
return (0);
-#endif
memset(sc->sc_buffer, 0, HKBD_BUFFER_SIZE);
@@ -1820,6 +1828,7 @@ hkbd_set_leds(struct hkbd_softc *sc, uint8_t leds)
SYSCONS_UNLOCK();
error = hid_write(sc->sc_dev, buf, len);
SYSCONS_LOCK();
+ DPRINTF("error %d", error);
return (error);
}
diff --git a/sys/dev/hid/ietp.c b/sys/dev/hid/ietp.c
index 217585a7948b..a9d0295fb121 100644
--- a/sys/dev/hid/ietp.c
+++ b/sys/dev/hid/ietp.c
@@ -102,6 +102,7 @@ struct ietp_softc {
device_t dev;
struct evdev_dev *evdev;
+ bool open;
uint8_t report_id;
hid_size_t report_len;
@@ -198,17 +199,32 @@ static const struct hid_device_id ietp_iic_devs[] = {
IETP_IIC_DEV("ELAN1000"),
};
-static uint8_t const ietp_dummy_rdesc[] = {
+static uint8_t const ietp_dummy_rdesc_lo[] = {
0x05, HUP_GENERIC_DESKTOP, /* Usage Page (Generic Desktop Ctrls) */
0x09, HUG_MOUSE, /* Usage (Mouse) */
0xA1, 0x01, /* Collection (Application) */
0x09, 0x01, /* Usage (0x01) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
0x95, IETP_REPORT_LEN_LO, /* Report Count (IETP_REPORT_LEN_LO) */
0x75, 0x08, /* Report Size (8) */
0x81, 0x02, /* Input (Data,Var,Abs) */
0xC0, /* End Collection */
};
+static uint8_t const ietp_dummy_rdesc_hi[] = {
+ 0x05, HUP_GENERIC_DESKTOP, /* Usage Page (Generic Desktop Ctrls) */
+ 0x09, HUG_MOUSE, /* Usage (Mouse) */
+ 0xA1, 0x01, /* Collection (Application) */
+ 0x09, 0x01, /* Usage (0x01) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x95, IETP_REPORT_LEN_HI, /* Report Count (IETP_REPORT_LEN_HI) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0xC0, /* End Collection */
+};
+
static const struct evdev_methods ietp_evdev_methods = {
.ev_open = &ietp_ev_open,
.ev_close = &ietp_ev_close,
@@ -217,13 +233,25 @@ static const struct evdev_methods ietp_evdev_methods = {
static int
ietp_ev_open(struct evdev_dev *evdev)
{
- return (hid_intr_start(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_start(sc->dev);
+ if (error == 0)
+ sc->open = true;
+ return (error);
}
static int
ietp_ev_close(struct evdev_dev *evdev)
{
- return (hid_intr_stop(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_stop(sc->dev);
+ if (error == 0)
+ sc->open = false;
+ return (error);
}
static int
@@ -275,7 +303,7 @@ ietp_attach(struct ietp_softc *sc)
evdev_set_id(sc->evdev, hw->idBus, hw->idVendor, hw->idProduct,
hw->idVersion);
evdev_set_serial(sc->evdev, hw->serial);
- evdev_set_methods(sc->evdev, sc->dev, &ietp_evdev_methods);
+ evdev_set_methods(sc->evdev, sc, &ietp_evdev_methods);
evdev_set_flag(sc->evdev, EVDEV_FLAG_MT_STCOMPAT);
evdev_set_flag(sc->evdev, EVDEV_FLAG_EXT_EPOCH); /* hidbus child */
@@ -420,28 +448,38 @@ ietp_res2dpmm(uint8_t res, bool hi_precision)
static void
ietp_iic_identify(driver_t *driver, device_t parent)
{
- void *d_ptr;
- hid_size_t d_len;
- int isize;
- uint8_t iid;
+ device_t iichid = device_get_parent(parent);
+ static const uint16_t reg = IETP_PATTERN;
+ uint16_t addr = iicbus_get_addr(iichid) << 1;
+ uint8_t resp[2];
+ uint8_t cmd[2] = { reg & 0xff, (reg >> 8) & 0xff };
+ struct iic_msg msgs[2] = {
+ { addr, IIC_M_WR | IIC_M_NOSTOP, sizeof(cmd), cmd },
+ { addr, IIC_M_RD, sizeof(resp), resp },
+ };
+ struct iic_rdwr_data ird = { msgs, nitems(msgs) };
+ uint8_t pattern;
if (HIDBUS_LOOKUP_ID(parent, ietp_iic_devs) == NULL)
return;
- if (hid_get_report_descr(parent, &d_ptr, &d_len) != 0)
+
+ if (device_get_devclass(iichid) != devclass_find("iichid"))
return;
- /*
- * Some Elantech trackpads have a mangled HID report descriptor, which
- * reads as having an incorrect input size (i.e. < IETP_REPORT_LEN_LO).
- * If the input size is incorrect, load a dummy report descriptor.
- */
+ DPRINTF("Read reg 0x%04x with size %zu\n", reg, sizeof(resp));
- isize = hid_report_size_max(d_ptr, d_len, hid_input, &iid);
- if (isize >= IETP_REPORT_LEN_LO)
+ if (hid_ioctl(parent, I2CRDWR, (uintptr_t)&ird) != 0)
return;
- hid_set_report_descr(parent, ietp_dummy_rdesc,
- sizeof(ietp_dummy_rdesc));
+ DPRINTF("Response: %*D\n", (int)size(resp), resp, " ");
+
+ pattern = (resp[0] == 0xFF && resp[1] == 0xFF) ? 0 : resp[1];
+ if (pattern >= 0x02)
+ hid_set_report_descr(parent, ietp_dummy_rdesc_hi,
+ sizeof(ietp_dummy_rdesc_hi));
+ else
+ hid_set_report_descr(parent, ietp_dummy_rdesc_lo,
+ sizeof(ietp_dummy_rdesc_lo));
}
static int
@@ -584,11 +622,13 @@ ietp_iic_set_absolute_mode(device_t dev, bool enable)
* Some ASUS touchpads need to be powered on to enter absolute mode.
*/
require_wakeup = false;
- for (i = 0; i < nitems(special_fw); i++) {
- if (sc->ic_type == special_fw[i].ic_type &&
- sc->product_id == special_fw[i].product_id) {
- require_wakeup = true;
- break;
+ if (!sc->open) {
+ for (i = 0; i < nitems(special_fw); i++) {
+ if (sc->ic_type == special_fw[i].ic_type &&
+ sc->product_id == special_fw[i].product_id) {
+ require_wakeup = true;
+ break;
+ }
}
}
diff --git a/sys/dev/hid/u2f.c b/sys/dev/hid/u2f.c
new file mode 100644
index 000000000000..08f1a5ceedba
--- /dev/null
+++ b/sys/dev/hid/u2f.c
@@ -0,0 +1,603 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022-2023 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_hid.h"
+
+#include <sys/param.h>
+#ifdef COMPAT_FREEBSD32
+#include <sys/abi_compat.h>
+#endif
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/poll.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/selinfo.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+#include <sys/uio.h>
+
+#include <dev/evdev/input.h>
+
+#define HID_DEBUG_VAR u2f_debug
+#include <dev/hid/hid.h>
+#include <dev/hid/hidbus.h>
+#include <dev/hid/hidquirk.h>
+
+#include <dev/usb/usb_ioctl.h>
+
+#ifdef HID_DEBUG
+static int u2f_debug = 0;
+static SYSCTL_NODE(_hw_hid, OID_AUTO, u2f, CTLFLAG_RW, 0,
+ "FIDO/U2F authenticator");
+SYSCTL_INT(_hw_hid_u2f, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &u2f_debug, 0, "Debug level");
+#endif
+
+#define U2F_MAX_REPORT_SIZE 64
+
+/* A match on these entries will load u2f */
+static const struct hid_device_id u2f_devs[] = {
+ { HID_BUS(BUS_USB), HID_TLC(HUP_FIDO, HUF_U2FHID) },
+};
+
+struct u2f_softc {
+ device_t sc_dev; /* base device */
+ struct cdev *dev;
+
+ struct mtx sc_mtx; /* hidbus private mutex */
+ struct task sc_kqtask; /* kqueue task */
+ void *sc_rdesc;
+ hid_size_t sc_rdesc_size;
+ hid_size_t sc_isize;
+ hid_size_t sc_osize;
+ struct selinfo sc_rsel;
+ struct { /* driver state */
+ bool open:1; /* device is open */
+ bool aslp:1; /* waiting for device data in read() */
+ bool sel:1; /* waiting for device data in poll() */
+ bool data:1; /* input report is stored in sc_buf */
+ int reserved:28;
+ } sc_state;
+ int sc_fflags; /* access mode for open lifetime */
+
+ uint8_t sc_buf[U2F_MAX_REPORT_SIZE];
+};
+
+static d_open_t u2f_open;
+static d_read_t u2f_read;
+static d_write_t u2f_write;
+static d_ioctl_t u2f_ioctl;
+static d_poll_t u2f_poll;
+static d_kqfilter_t u2f_kqfilter;
+
+static d_priv_dtor_t u2f_dtor;
+
+static struct cdevsw u2f_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = u2f_open,
+ .d_read = u2f_read,
+ .d_write = u2f_write,
+ .d_ioctl = u2f_ioctl,
+ .d_poll = u2f_poll,
+ .d_kqfilter = u2f_kqfilter,
+ .d_name = "u2f",
+};
+
+static hid_intr_t u2f_intr;
+
+static device_probe_t u2f_probe;
+static device_attach_t u2f_attach;
+static device_detach_t u2f_detach;
+
+static void u2f_kqtask(void *context, int pending);
+static int u2f_kqread(struct knote *, long);
+static void u2f_kqdetach(struct knote *);
+static void u2f_notify(struct u2f_softc *);
+
+static struct filterops u2f_filterops_read = {
+ .f_isfd = 1,
+ .f_detach = u2f_kqdetach,
+ .f_event = u2f_kqread,
+};
+
+static int
+u2f_probe(device_t dev)
+{
+ int error;
+
+ error = HIDBUS_LOOKUP_DRIVER_INFO(dev, u2f_devs);
+ if (error != 0)
+ return (error);
+
+ hidbus_set_desc(dev, "Authenticator");
+
+ return (BUS_PROBE_GENERIC);
+}
+
+static int
+u2f_attach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+ struct hid_device_info *hw = __DECONST(struct hid_device_info *,
+ hid_get_device_info(dev));
+ struct make_dev_args mda;
+ int error;
+
+ sc->sc_dev = dev;
+
+ error = hid_get_report_descr(dev, &sc->sc_rdesc, &sc->sc_rdesc_size);
+ if (error != 0)
+ return (ENXIO);
+ sc->sc_isize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_input, NULL);
+ if (sc->sc_isize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Input report size too large. Truncate.\n");
+ sc->sc_isize = U2F_MAX_REPORT_SIZE;
+ }
+ sc->sc_osize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_output, NULL);
+ if (sc->sc_osize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Output report size too large. Truncate.\n");
+ sc->sc_osize = U2F_MAX_REPORT_SIZE;
+ }
+
+ mtx_init(&sc->sc_mtx, "u2f lock", NULL, MTX_DEF);
+ knlist_init_mtx(&sc->sc_rsel.si_note, &sc->sc_mtx);
+ TASK_INIT(&sc->sc_kqtask, 0, u2f_kqtask, sc);
+
+ make_dev_args_init(&mda);
+ mda.mda_flags = MAKEDEV_WAITOK;
+ mda.mda_devsw = &u2f_cdevsw;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_U2F;
+ mda.mda_mode = 0660;
+ mda.mda_si_drv1 = sc;
+
+ error = make_dev_s(&mda, &sc->dev, "u2f/%d", device_get_unit(dev));
+ if (error) {
+ device_printf(dev, "Can not create character device\n");
+ u2f_detach(dev);
+ return (error);
+ }
+#ifndef U2F_DROP_UHID_ALIAS
+ (void)make_dev_alias(sc->dev, "uhid%d", device_get_unit(dev));
+#endif
+
+ hid_add_dynamic_quirk(hw, HQ_NO_READAHEAD);
+
+ hidbus_set_lock(dev, &sc->sc_mtx);
+ hidbus_set_intr(dev, u2f_intr, sc);
+
+ return (0);
+}
+
+static int
+u2f_detach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+
+ DPRINTF("sc=%p\n", sc);
+
+ if (sc->dev != NULL) {
+ mtx_lock(&sc->sc_mtx);
+ sc->dev->si_drv1 = NULL;
+ /* Wake everyone */
+ u2f_notify(sc);
+ mtx_unlock(&sc->sc_mtx);
+ destroy_dev(sc->dev);
+ }
+
+ taskqueue_drain(taskqueue_thread, &sc->sc_kqtask);
+ hid_intr_stop(sc->sc_dev);
+
+ knlist_clear(&sc->sc_rsel.si_note, 0);
+ knlist_destroy(&sc->sc_rsel.si_note);
+ seldrain(&sc->sc_rsel);
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+void
+u2f_intr(void *context, void *buf, hid_size_t len)
+{
+ struct u2f_softc *sc = context;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ DPRINTFN(5, "len=%d\n", len);
+ DPRINTFN(5, "data = %*D\n", len, buf, " ");
+
+ if (sc->sc_state.data)
+ return;
+
+ if (len > sc->sc_isize)
+ len = sc->sc_isize;
+
+ bcopy(buf, sc->sc_buf, len);
+
+ /* Make sure we don't process old data */
+ if (len < sc->sc_isize)
+ bzero(sc->sc_buf + len, sc->sc_isize - len);
+
+ sc->sc_state.data = true;
+
+ u2f_notify(sc);
+}
+
+static int
+u2f_open(struct cdev *dev, int flag, int mode, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ DPRINTF("sc=%p\n", sc);
+
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.open) {
+ mtx_unlock(&sc->sc_mtx);
+ return (EBUSY);
+ }
+ sc->sc_state.open = true;
+ mtx_unlock(&sc->sc_mtx);
+
+ error = devfs_set_cdevpriv(sc, u2f_dtor);
+ if (error != 0) {
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+ return (error);
+ }
+
+ /* Set up interrupt pipe. */
+ sc->sc_state.data = false;
+ sc->sc_fflags = flag;
+
+ return (0);
+}
+
+
+static void
+u2f_dtor(void *data)
+{
+ struct u2f_softc *sc = data;
+
+#ifdef NOT_YET
+ /* Disable interrupts. */
+ hid_intr_stop(sc->sc_dev);
+#endif
+
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+}
+
+static int
+u2f_read(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ size_t length = 0;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (!sc->sc_state.data)
+ hid_intr_start(sc->sc_dev);
+
+ mtx_lock(&sc->sc_mtx);
+ if (dev->si_drv1 == NULL) {
+ error = EIO;
+ goto exit;
+ }
+
+ while (!sc->sc_state.data) {
+ if (flag & O_NONBLOCK) {
+ error = EWOULDBLOCK;
+ goto exit;
+ }
+ sc->sc_state.aslp = true;
+ DPRINTFN(5, "sleep on %p\n", &sc->sc_buf);
+ error = mtx_sleep(&sc->sc_buf, &sc->sc_mtx, PZERO | PCATCH,
+ "u2frd", 0);
+ DPRINTFN(5, "woke, error=%d\n", error);
+ if (dev->si_drv1 == NULL)
+ error = EIO;
+ if (error) {
+ sc->sc_state.aslp = false;
+ goto exit;
+ }
+ }
+
+ if (sc->sc_state.data && uio->uio_resid > 0) {
+ length = min(uio->uio_resid, sc->sc_isize);
+ memcpy(buf, sc->sc_buf, length);
+ sc->sc_state.data = false;
+ }
+exit:
+ mtx_unlock(&sc->sc_mtx);
+ if (length != 0) {
+ /* Copy the data to the user process. */
+ DPRINTFN(5, "got %lu chars\n", (u_long)length);
+ error = uiomove(buf, length, uio);
+ }
+
+ return (error);
+}
+
+static int
+u2f_write(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (uio->uio_resid != sc->sc_osize)
+ return (EINVAL);
+ error = uiomove(buf, uio->uio_resid, uio);
+ if (error == 0)
+ error = hid_write(sc->sc_dev, buf, sc->sc_osize);
+
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+static void
+update_ugd32(const struct usb_gen_descriptor *ugd,
+ struct usb_gen_descriptor32 *ugd32)
+{
+ /* Don't update hgd_data pointer */
+ CP(*ugd, *ugd32, ugd_lang_id);
+ CP(*ugd, *ugd32, ugd_maxlen);
+ CP(*ugd, *ugd32, ugd_actlen);
+ CP(*ugd, *ugd32, ugd_offset);
+ CP(*ugd, *ugd32, ugd_config_index);
+ CP(*ugd, *ugd32, ugd_string_index);
+ CP(*ugd, *ugd32, ugd_iface_index);
+ CP(*ugd, *ugd32, ugd_altif_index);
+ CP(*ugd, *ugd32, ugd_endpt_index);
+ CP(*ugd, *ugd32, ugd_report_type);
+ /* Don't update reserved */
+}
+#endif
+
+static int
+u2f_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td)
+{
+#ifdef COMPAT_FREEBSD32
+ struct usb_gen_descriptor local_ugd;
+ struct usb_gen_descriptor32 *ugd32 = NULL;
+#endif
+ struct u2f_softc *sc = dev->si_drv1;
+ struct usb_gen_descriptor *ugd = (struct usb_gen_descriptor *)addr;
+ uint32_t size;
+
+ DPRINTFN(2, "cmd=%lx\n", cmd);
+
+ if (sc == NULL)
+ return (EIO);
+
+#ifdef COMPAT_FREEBSD32
+ switch (cmd) {
+ case USB_GET_REPORT_DESC32:
+ cmd = _IOC_NEWTYPE(cmd, struct usb_gen_descriptor);
+ ugd32 = (struct usb_gen_descriptor32 *)addr;
+ ugd = &local_ugd;
+ PTRIN_CP(*ugd32, *ugd, ugd_data);
+ CP(*ugd32, *ugd, ugd_lang_id);
+ CP(*ugd32, *ugd, ugd_maxlen);
+ CP(*ugd32, *ugd, ugd_actlen);
+ CP(*ugd32, *ugd, ugd_offset);
+ CP(*ugd32, *ugd, ugd_config_index);
+ CP(*ugd32, *ugd, ugd_string_index);
+ CP(*ugd32, *ugd, ugd_iface_index);
+ CP(*ugd32, *ugd, ugd_altif_index);
+ CP(*ugd32, *ugd, ugd_endpt_index);
+ CP(*ugd32, *ugd, ugd_report_type);
+ /* Don't copy reserved */
+ break;
+ }
+#endif
+
+ /* fixed-length ioctls handling */
+ switch (cmd) {
+ case FIONBIO:
+ /* All handled in the upper FS layer. */
+ return (0);
+
+ case USB_GET_REPORT_DESC:
+ size = MIN(sc->sc_rdesc_size, ugd->ugd_maxlen);
+ ugd->ugd_actlen = size;
+#ifdef COMPAT_FREEBSD32
+ if (ugd32 != NULL)
+ update_ugd32(ugd, ugd32);
+#endif
+ if (ugd->ugd_data == NULL)
+ return (0); /* descriptor length only */
+
+ return (copyout(sc->sc_rdesc, ugd->ugd_data, size));
+
+ case USB_GET_DEVICEINFO:
+ return(hid_ioctl(
+ sc->sc_dev, USB_GET_DEVICEINFO, (uintptr_t)addr));
+ }
+
+ return (EINVAL);
+}
+
+static int
+u2f_poll(struct cdev *dev, int events, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int revents = 0;
+ bool start_intr = false;
+
+ if (sc == NULL)
+ return (POLLHUP);
+
+ if (events & (POLLOUT | POLLWRNORM) && (sc->sc_fflags & FWRITE))
+ revents |= events & (POLLOUT | POLLWRNORM);
+ if (events & (POLLIN | POLLRDNORM) && (sc->sc_fflags & FREAD)) {
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.data)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else {
+ sc->sc_state.sel = true;
+ start_intr = true;
+ selrecord(td, &sc->sc_rsel);
+ }
+ mtx_unlock(&sc->sc_mtx);
+ if (start_intr)
+ hid_intr_start(sc->sc_dev);
+ }
+
+ return (revents);
+}
+
+static int
+u2f_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ switch(kn->kn_filter) {
+ case EVFILT_READ:
+ if (sc->sc_fflags & FREAD) {
+ kn->kn_fop = &u2f_filterops_read;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ return(EINVAL);
+ }
+ kn->kn_hook = sc;
+
+ knlist_add(&sc->sc_rsel.si_note, kn, 0);
+ return (0);
+}
+
+static void
+u2f_kqtask(void *context, int pending)
+{
+ struct u2f_softc *sc = context;
+
+ hid_intr_start(sc->sc_dev);
+}
+
+static int
+u2f_kqread(struct knote *kn, long hint)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+ int ret;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->dev->si_drv1 == NULL) {
+ kn->kn_flags |= EV_EOF;
+ ret = 1;
+ } else {
+ ret = sc->sc_state.data ? 1 : 0;
+ if (!sc->sc_state.data)
+ taskqueue_enqueue(taskqueue_thread, &sc->sc_kqtask);
+ }
+
+ return (ret);
+}
+
+static void
+u2f_kqdetach(struct knote *kn)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+
+ knlist_remove(&sc->sc_rsel.si_note, kn, 0);
+}
+
+static void
+u2f_notify(struct u2f_softc *sc)
+{
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->sc_state.aslp) {
+ sc->sc_state.aslp = false;
+ DPRINTFN(5, "waking %p\n", &sc->sc_buf);
+ wakeup(&sc->sc_buf);
+ }
+ if (sc->sc_state.sel) {
+ sc->sc_state.sel = false;
+ selwakeuppri(&sc->sc_rsel, PZERO);
+ }
+ KNOTE_LOCKED(&sc->sc_rsel.si_note, 0);
+}
+
+static device_method_t u2f_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, u2f_probe),
+ DEVMETHOD(device_attach, u2f_attach),
+ DEVMETHOD(device_detach, u2f_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t u2f_driver = {
+#ifdef U2F_DROP_UHID_ALIAS
+ "uf2",
+#else
+ "uhid",
+#endif
+ u2f_methods,
+ sizeof(struct u2f_softc)
+};
+
+DRIVER_MODULE(u2f, hidbus, u2f_driver, NULL, NULL);
+MODULE_DEPEND(u2f, hidbus, 1, 1, 1);
+MODULE_DEPEND(u2f, hid, 1, 1, 1);
+MODULE_VERSION(u2f, 1);
+HID_PNP_INFO(u2f_devs);
diff --git a/sys/dev/hpt27xx/hptintf.h b/sys/dev/hpt27xx/hptintf.h
index 558b479ec2ee..eb8105ec5666 100644
--- a/sys/dev/hpt27xx/hptintf.h
+++ b/sys/dev/hpt27xx/hptintf.h
@@ -155,8 +155,8 @@ typedef HPT_U32 DEVICEID;
#define ARRAY_FLAG_NEED_AUTOREBUILD 0x00000080 /* auto-rebuild should start */
#define ARRAY_FLAG_VERIFYING 0x00000100 /* is being verified */
#define ARRAY_FLAG_INITIALIZING 0x00000200 /* is being initialized */
-#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* tranform in progress */
-#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need tranform */
+#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* transform in progress */
+#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need transform */
#define ARRAY_FLAG_NEEDINITIALIZING 0x00001000 /* the array's initialization hasn't finished*/
#define ARRAY_FLAG_BROKEN_REDUNDANT 0x00002000 /* broken but redundant (raid6) */
#define ARRAY_FLAG_RAID15PLUS 0x80000000 /* display this RAID 1 as RAID 1.5 */
@@ -2018,7 +2018,7 @@ DEVICEID hpt_create_transform_v2(DEVICEID idArray, PCREATE_ARRAY_PARAMS_V3 destI
#endif
/* hpt_step_transform
- * move a block in a tranform progress.
+ * move a block in a transform progress.
* This function is called by mid-layer, not GUI (which uses set_array_state instead).
* Version compatibility: v2.0.0.0 or later
* Parameters:
diff --git a/sys/dev/hptmv/entry.c b/sys/dev/hptmv/entry.c
index 5c4718bf582f..f3d58f285b39 100644
--- a/sys/dev/hptmv/entry.c
+++ b/sys/dev/hptmv/entry.c
@@ -430,7 +430,7 @@ static void device_change(IAL_ADAPTER_T *pAdapter , MV_U8 channelIndex, int plug
if(pVDev->pParent)
{
int iMember;
- for(iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++)
+ for (iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++)
if((PVDevice)pVDev->pParent->u.array.pMember[iMember] == pVDev)
pVDev->pParent->u.array.pMember[iMember] = NULL;
pVDev->pParent = NULL;
@@ -984,7 +984,7 @@ fRegisterVdevice(IAL_ADAPTER_T *pAdapter)
PVBus pVBus;
int i,j;
- for(i=0;i<MV_SATA_CHANNELS_NUM;i++) {
+ for (i = 0; i < MV_SATA_CHANNELS_NUM; i++) {
pPhysical = &(pAdapter->VDevices[i]);
pLogical = pPhysical;
while (pLogical->pParent) pLogical = pLogical->pParent;
@@ -1027,8 +1027,7 @@ GetSpareDisk(_VBUS_ARG PVDevice pArray)
PVDevice pVDevice, pFind = NULL;
int i;
- for(i=0;i<MV_SATA_CHANNELS_NUM;i++)
- {
+ for (i=0; i < MV_SATA_CHANNELS_NUM; i++) {
pVDevice = &pAdapter->VDevices[i];
if(!pVDevice)
continue;
@@ -1356,7 +1355,7 @@ unregister:
goto unregister;
}
- for (i=0; i<MAX_COMMAND_BLOCKS_FOR_EACH_VBUS; i++) {
+ for (i = 0; i < MAX_COMMAND_BLOCKS_FOR_EACH_VBUS; i++) {
FreeCommand(_VBUS_P &(pAdapter->pCommandBlocks[i]));
}
@@ -1370,7 +1369,7 @@ unregister:
memset((void *)pAdapter->pbus_dmamap, 0, sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM);
pAdapter->pbus_dmamap_list = 0;
- for (i=0; i < MAX_QUEUE_COMM; i++) {
+ for (i = 0; i < MAX_QUEUE_COMM; i++) {
PBUS_DMAMAP pmap = &(pAdapter->pbus_dmamap[i]);
pmap->pAdapter = pAdapter;
dmamap_put(pmap);
@@ -1398,7 +1397,7 @@ unregister:
pAdapter->prdTableAlignedAddr = (PUCHAR)(((ULONG_PTR)pAdapter->prdTableAddr + 0x1f) & ~(ULONG_PTR)0x1fL);
{
PUCHAR PRDTable = pAdapter->prdTableAlignedAddr;
- for (i=0; i<PRD_TABLES_FOR_VBUS; i++)
+ for (i = 0; i < PRD_TABLES_FOR_VBUS; i++)
{
/* KdPrint(("i=%d,pAdapter->pFreePRDLink=%p\n",i,pAdapter->pFreePRDLink)); */
FreePRDTable(pAdapter, PRDTable);
@@ -1447,7 +1446,7 @@ unregister:
}
#ifdef SUPPORT_ARRAY
- for(i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) {
+ for (i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) {
pVDev = ArrayTables(i);
mArFreeArrayTable(pVDev);
}
@@ -1467,7 +1466,7 @@ unregister:
_vbus_p->nInstances = 1;
fRegisterVdevice(pAdapter);
- for (channel=0;channel<MV_SATA_CHANNELS_NUM;channel++) {
+ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) {
pVDev = _vbus_p->pVDevice[channel];
if (pVDev && pVDev->vf_online)
fCheckBootable(pVDev);
@@ -1567,7 +1566,7 @@ fResetActiveCommands(PVBus _vbus_p)
{
MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter;
MV_U8 channel;
- for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) {
+ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) {
if (pMvSataAdapter->sataChannel[channel] && pMvSataAdapter->sataChannel[channel]->outstandingCommands)
MvSataResetChannel(pMvSataAdapter,channel);
}
@@ -1590,7 +1589,7 @@ check_cmds:
dataxfer_poll();
xor_poll();
#endif
- for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) {
+ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) {
pMvSataChannel = pMvSataAdapter->sataChannel[channel];
if (pMvSataChannel && pMvSataChannel->outstandingCommands)
{
@@ -1716,7 +1715,7 @@ fDeviceSendCommand(_VBUS_ARG PCommand pCmd)
MV_BOOLEAN is48bit;
MV_U8 channel;
- int i=0;
+ int i = 0;
DECLARE_BUFFER(FPSCAT_GATH, tmpSg);
@@ -2141,7 +2140,7 @@ FlushAdapter(IAL_ADAPTER_T *pAdapter)
hpt_printk(("flush all devices\n"));
/* flush all devices */
- for (i=0; i<MAX_VDEVICE_PER_VBUS; i++) {
+ for (i = 0; i < MAX_VDEVICE_PER_VBUS; i++) {
PVDevice pVDev = pAdapter->VBus.pVDevice[i];
if(pVDev) fFlushVDev(pVDev);
}
@@ -2174,7 +2173,7 @@ Check_Idle_Call(IAL_ADAPTER_T *pAdapter)
{
int i;
PVDevice pArray;
- for(i = 0; i < MAX_ARRAY_PER_VBUS; i++){
+ for (i = 0; i < MAX_ARRAY_PER_VBUS; i++) {
if ((pArray=ArrayTables(i))->u.array.dArStamp==0)
continue;
else if (pArray->u.array.rf_auto_rebuild) {
@@ -2378,7 +2377,7 @@ hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb)
static void hpt_worker_thread(void)
{
- for(;;) {
+ for (;;) {
mtx_lock(&DpcQueue_Lock);
while (DpcQueue_First!=DpcQueue_Last) {
ST_HPT_DPC p;
@@ -2418,7 +2417,7 @@ static void hpt_worker_thread(void)
mtx_lock(&pAdapter->lock);
_vbus_p = &pAdapter->VBus;
- for (i=0;i<MAX_ARRAY_PER_VBUS;i++)
+ for (i = 0; i < MAX_ARRAY_PER_VBUS; i++)
{
if ((pArray=ArrayTables(i))->u.array.dArStamp==0)
continue;
@@ -2472,7 +2471,7 @@ launch_worker_thread(void)
int i;
PVDevice pVDev;
- for(i = 0; i < MAX_ARRAY_PER_VBUS; i++)
+ for (i = 0; i < MAX_ARRAY_PER_VBUS; i++)
if ((pVDev=ArrayTables(i))->u.array.dArStamp==0)
continue;
else{
diff --git a/sys/dev/hptmv/gui_lib.c b/sys/dev/hptmv/gui_lib.c
index d78fdcca69d2..f11044db733a 100644
--- a/sys/dev/hptmv/gui_lib.c
+++ b/sys/dev/hptmv/gui_lib.c
@@ -86,8 +86,7 @@ check_VDevice_valid(PVDevice p)
while(pAdapter != NULL)
{
_vbus_p = &pAdapter->VBus;
- for (i=0;i<MAX_ARRAY_PER_VBUS;i++)
- {
+ for (i = 0; i<MAX_ARRAY_PER_VBUS; i++) {
pVDevice=ArrayTables(i);
if ((pVDevice->u.array.dArStamp != 0) && (pVDevice == p))
return 0;
@@ -244,9 +243,9 @@ static void get_array_info(PVDevice pVDevice, PHPT_ARRAY_INFO pArrayInfo)
if(pVDevice->u.array.pMember[i] != NULL)
pArrayInfo->Members[pArrayInfo->nDisk++] = VDEV_TO_ID(pVDevice->u.array.pMember[i]);
- for(i=pArrayInfo->nDisk; i<MAX_ARRAY_MEMBERS; i++)
+ for (i = pArrayInfo->nDisk; i < MAX_ARRAY_MEMBERS; i++)
pArrayInfo->Members[i] = INVALID_DEVICEID;
- }
+}
static void get_array_info_v2(PVDevice pVDevice, PHPT_ARRAY_INFO_V2 pArrayInfo)
{
@@ -266,7 +265,7 @@ static void get_array_info_v2(PVDevice pVDevice, PHPT_ARRAY_INFO_V2 pArrayInfo)
if(pVDevice->u.array.pMember[i] != NULL)
pArrayInfo->Members[pArrayInfo->nDisk++] = VDEV_TO_ID(pVDevice->u.array.pMember[i]);
- for(i=pArrayInfo->nDisk; i<MAX_ARRAY_MEMBERS_V2; i++)
+ for (i = pArrayInfo->nDisk; i < MAX_ARRAY_MEMBERS_V2; i++)
pArrayInfo->Members[i] = INVALID_DEVICEID;
}
#endif
@@ -461,8 +460,7 @@ found:
pInfo->IoPort = 0;
pInfo->ControlPort = 0;
- for (i=0; i<2 ;i++)
- {
+ for (i = 0; i < 2; i++) {
pInfo->Devices[i] = (DEVICEID)INVALID_DEVICEID;
}
diff --git a/sys/dev/hptmv/hptproc.c b/sys/dev/hptmv/hptproc.c
index 38fe61ee7e04..328750d9034c 100644
--- a/sys/dev/hptmv/hptproc.c
+++ b/sys/dev/hptmv/hptproc.c
@@ -107,7 +107,7 @@ hpt_set_asc_info(IAL_ADAPTER_T *pAdapter, char *buffer,int length)
return -EINVAL;
}
- for (i=0;i<MV_SATA_CHANNELS_NUM;i++)
+ for (i = 0; i < MV_SATA_CHANNELS_NUM; i++)
if(i == ichan)
goto rebuild;
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 9b85c989dc96..a6a6ae68996c 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -210,7 +210,7 @@ static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
static bool pmc_can_allocate_row(int ri, enum pmc_mode mode);
static bool pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
int cpu);
-static int pmc_can_attach(struct pmc *pm, struct proc *p);
+static bool pmc_can_attach(struct pmc *pm, struct proc *p);
static void pmc_capture_user_callchain(int cpu, int soft,
struct trapframe *tf);
static void pmc_cleanup(void);
@@ -1029,19 +1029,19 @@ pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
* Check if PMC 'pm' may be attached to target process 't'.
*/
-static int
+static bool
pmc_can_attach(struct pmc *pm, struct proc *t)
{
struct proc *o; /* pmc owner */
struct ucred *oc, *tc; /* owner, target credentials */
- int decline_attach, i;
+ bool decline_attach;
/*
* A PMC's owner can always attach that PMC to itself.
*/
if ((o = pm->pm_owner->po_owner) == t)
- return 0;
+ return (true);
PROC_LOCK(o);
oc = o->p_ucred;
@@ -1066,18 +1066,17 @@ pmc_can_attach(struct pmc *pm, struct proc *t)
* Every one of the target's group ids, must be in the owner's
* group list.
*/
- for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
+ for (int i = 0; !decline_attach && i < tc->cr_ngroups; i++)
decline_attach = !groupmember(tc->cr_groups[i], oc);
-
- /* check the read and saved gids too */
- if (decline_attach == 0)
- decline_attach = !groupmember(tc->cr_rgid, oc) ||
+ if (!decline_attach)
+ decline_attach = !groupmember(tc->cr_gid, oc) ||
+ !groupmember(tc->cr_rgid, oc) ||
!groupmember(tc->cr_svgid, oc);
crfree(tc);
crfree(oc);
- return !decline_attach;
+ return (!decline_attach);
}
/*
@@ -1412,7 +1411,7 @@ pmc_process_exec(struct thread *td, struct pmckern_procexec *pk)
*/
for (ri = 0; ri < md->pmd_npmc; ri++) {
if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
- if (pmc_can_attach(pm, td->td_proc) != 0) {
+ if (pmc_can_attach(pm, td->td_proc)) {
pmc_detach_one_process(td->td_proc, pm,
PMC_FLAG_NONE);
}
diff --git a/sys/dev/hwt/hwt_ioctl.c b/sys/dev/hwt/hwt_ioctl.c
index 592db4931bb4..184c7e72f986 100644
--- a/sys/dev/hwt/hwt_ioctl.c
+++ b/sys/dev/hwt/hwt_ioctl.c
@@ -112,12 +112,11 @@ hwt_priv_check(struct proc *o, struct proc *t)
error = EPERM;
goto done;
}
-
- /* Check the read and saved GIDs too. */
- if (!groupmember(tc->cr_rgid, oc) ||
+ if (!groupmember(tc->cr_gid, oc) ||
+ !groupmember(tc->cr_rgid, oc) ||
!groupmember(tc->cr_svgid, oc)) {
- error = EPERM;
- goto done;
+ error = EPERM;
+ goto done;
}
done:
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
index c480900596f4..a623f810c101 100644
--- a/sys/dev/ice/ice_bitops.h
+++ b/sys/dev/ice/ice_bitops.h
@@ -198,7 +198,7 @@ static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
* ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise AND on two "source" bitmaps of the same size
@@ -237,7 +237,7 @@ ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
* ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise OR on two "source" bitmaps of the same size
diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c
index ad4ea4c8e7a1..b895f661bc46 100644
--- a/sys/dev/ice/ice_common.c
+++ b/sys/dev/ice/ice_common.c
@@ -213,6 +213,15 @@ int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E830_L_QSFP:
case ICE_DEV_ID_E830C_SFP:
case ICE_DEV_ID_E830_L_SFP:
+ case ICE_DEV_ID_E835CC_BACKPLANE:
+ case ICE_DEV_ID_E835CC_QSFP56:
+ case ICE_DEV_ID_E835CC_SFP:
+ case ICE_DEV_ID_E835C_BACKPLANE:
+ case ICE_DEV_ID_E835C_QSFP:
+ case ICE_DEV_ID_E835C_SFP:
+ case ICE_DEV_ID_E835_L_BACKPLANE:
+ case ICE_DEV_ID_E835_L_QSFP:
+ case ICE_DEV_ID_E835_L_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
diff --git a/sys/dev/ice/ice_devids.h b/sys/dev/ice/ice_devids.h
index 3f91e9dfbcaf..74712c61ae8e 100644
--- a/sys/dev/ice/ice_devids.h
+++ b/sys/dev/ice/ice_devids.h
@@ -62,6 +62,24 @@
#define ICE_DEV_ID_E830C_SFP 0x12DA
/* Intel(R) Ethernet Controller E830-L for SFP */
#define ICE_DEV_ID_E830_L_SFP 0x12DE
+/* Intel(R) Ethernet Controller E835-CC for backplane */
+#define ICE_DEV_ID_E835CC_BACKPLANE 0x1248
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835CC_QSFP56 0x1249
+/* Intel(R) Ethernet Controller E835-CC for SFP */
+#define ICE_DEV_ID_E835CC_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-C for backplane */
+#define ICE_DEV_ID_E835C_BACKPLANE 0x1261
+/* Intel(R) Ethernet Controller E835-C for QSFP */
+#define ICE_DEV_ID_E835C_QSFP 0x1262
+/* Intel(R) Ethernet Controller E835-C for SFP */
+#define ICE_DEV_ID_E835C_SFP 0x1263
+/* Intel(R) Ethernet Controller E835-L for backplane */
+#define ICE_DEV_ID_E835_L_BACKPLANE 0x1265
+/* Intel(R) Ethernet Controller E835-L for QSFP */
+#define ICE_DEV_ID_E835_L_QSFP 0x1266
+/* Intel(R) Ethernet Controller E835-L for SFP */
+#define ICE_DEV_ID_E835_L_SFP 0x1267
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h
index 2a51a7394424..46965f4124bc 100644
--- a/sys/dev/ice/ice_drv_info.h
+++ b/sys/dev/ice/ice_drv_info.h
@@ -218,6 +218,45 @@ static const pci_vendor_info_t ice_vendor_info_array[] = {
"Intel(R) Ethernet Network Adapter E830-XXV-2"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_SFP,
"Intel(R) Ethernet Connection E830-L for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_BACKPLANE,
+ "Intel(R) Ethernet Connection E835-CC for backplane"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E835-C-Q2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E835-C-Q2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E835-CC-Q1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E835-CC-Q1 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ "Intel(R) Ethernet Connection E835-CC for QSFP56"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E835-XXV-2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E835-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E835-XXV-4 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ "Intel(R) Ethernet Connection E835-CC for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835C_BACKPLANE,
+ "Intel(R) Ethernet Connection E835-C for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835C_QSFP,
+ "Intel(R) Ethernet Connection E835-C for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835C_SFP,
+ "Intel(R) Ethernet Connection E835-C for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835_L_BACKPLANE,
+ "Intel(R) Ethernet Connection E835-L for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835_L_QSFP,
+ "Intel(R) Ethernet Connection E835-L for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835_L_SFP,
+ "Intel(R) Ethernet Connection E835-L for SFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_BACKPLANE,
"Intel(R) Ethernet Connection E825-C for backplane"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_QSFP,
diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c
index 0025a65d73fc..16a9ab6823bf 100644
--- a/sys/dev/ice/ice_fw_logging.c
+++ b/sys/dev/ice/ice_fw_logging.c
@@ -48,7 +48,7 @@ SDT_PROVIDER_DEFINE(ice_fwlog);
/*
* SDT DTrace probe fired when a firmware log message is received over the
- * AdminQ. It passes the buffer of the firwmare log message along with its
+ * AdminQ. It passes the buffer of the firmware log message along with its
* length in bytes to the DTrace framework.
*/
SDT_PROBE_DEFINE2(ice_fwlog, , , message, "uint8_t *", "int");
diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h
index 693e0ca5efc6..eedacdab0216 100644
--- a/sys/dev/ice/ice_lan_tx_rx.h
+++ b/sys/dev/ice/ice_lan_tx_rx.h
@@ -630,7 +630,7 @@ enum ice_rxdid {
ICE_RXDID_LAST = 63,
};
-/* Recceive Flex descriptor Dword Index */
+/* Receive Flex descriptor Dword Index */
enum ice_flex_word {
ICE_RX_FLEX_DWORD_0 = 0,
ICE_RX_FLEX_DWORD_1,
diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c
index 442111e5ffaf..8b6349f686eb 100644
--- a/sys/dev/ice/ice_lib.c
+++ b/sys/dev/ice/ice_lib.c
@@ -7818,7 +7818,8 @@ ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
case IFCOUNTER_OPACKETS:
return (es->tx_unicast + es->tx_multicast + es->tx_broadcast);
case IFCOUNTER_OERRORS:
- return (es->tx_errors);
+ return (if_get_counter_default(vsi->sc->ifp, counter) +
+ es->tx_errors);
case IFCOUNTER_COLLISIONS:
return (0);
case IFCOUNTER_IBYTES:
@@ -7832,7 +7833,8 @@ ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
case IFCOUNTER_IQDROPS:
return (es->rx_discards);
case IFCOUNTER_OQDROPS:
- return (hs->tx_dropped_link_down);
+ return (if_get_counter_default(vsi->sc->ifp, counter) +
+ hs->tx_dropped_link_down);
case IFCOUNTER_NOPROTO:
return (es->rx_unknown_protocol);
default:
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index 308b2bda2790..640bdf8fed7b 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -313,7 +313,7 @@ enum ice_dyn_idx_t {
ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
-/* By convenction ITR0 is used for RX, and ITR1 is used for TX */
+/* By convention ITR0 is used for RX, and ITR1 is used for TX */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h
index 300d61bfb5d9..b90c25e6c427 100644
--- a/sys/dev/ice/ice_protocol_type.h
+++ b/sys/dev/ice/ice_protocol_type.h
@@ -143,7 +143,7 @@ enum ice_prot_id {
ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_EAPOL_OF = 120,
- ICE_PROT_META_ID = 255, /* when offset == metaddata */
+ ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/sys/dev/ichsmb/ichsmb_pci.c b/sys/dev/ichsmb/ichsmb_pci.c
index 728bb942d503..e4d87fe1fed2 100644
--- a/sys/dev/ichsmb/ichsmb_pci.c
+++ b/sys/dev/ichsmb/ichsmb_pci.c
@@ -107,6 +107,7 @@
#define ID_COMETLAKE2 0x06a3
#define ID_TIGERLAKE 0xa0a3
#define ID_TIGERLAKE2 0x43a3
+#define ID_ELKHARTLAKE 0x4b23
#define ID_GEMINILAKE 0x31d4
#define ID_CEDARFORK 0x18df
#define ID_ICELAKE 0x34a3
@@ -206,6 +207,8 @@ static const struct pci_device_table ichsmb_devices[] = {
PCI_DESCR("Intel Tiger Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_TIGERLAKE2),
PCI_DESCR("Intel Tiger Lake SMBus controller") },
+ { PCI_DEV(PCI_VENDOR_INTEL, ID_ELKHARTLAKE),
+ PCI_DESCR("Intel Elkhart Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_GEMINILAKE),
PCI_DESCR("Intel Gemini Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_CEDARFORK),
diff --git a/sys/dev/ichwd/i6300esbwd.c b/sys/dev/ichwd/i6300esbwd.c
new file mode 100644
index 000000000000..03d504a350aa
--- /dev/null
+++ b/sys/dev/ichwd/i6300esbwd.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+/*
+ * Reference: Intel 6300ESB Controller Hub Datasheet Section 16
+ */
+
+#include <sys/param.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <sys/errno.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <sys/watchdog.h>
+
+#include <dev/pci/pcireg.h>
+
+#include <dev/ichwd/ichwd.h>
+#include <dev/ichwd/i6300esbwd.h>
+
+#include <x86/pci_cfgreg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+struct i6300esbwd_softc {
+ device_t dev;
+ int res_id;
+ struct resource *res;
+ eventhandler_tag ev_tag;
+ bool locked;
+};
+
+static const struct i6300esbwd_pci_id {
+ uint16_t id;
+ const char *name;
+} i6300esbwd_pci_devices[] = {
+ { DEVICEID_6300ESB_2, "6300ESB Watchdog Timer" },
+};
+
+static uint16_t __unused
+i6300esbwd_cfg_read(struct i6300esbwd_softc *sc)
+{
+ return (pci_read_config(sc->dev, WDT_CONFIG_REG, 2));
+}
+
+static void
+i6300esbwd_cfg_write(struct i6300esbwd_softc *sc, uint16_t val)
+{
+ pci_write_config(sc->dev, WDT_CONFIG_REG, val, 2);
+}
+
+static uint8_t
+i6300esbwd_lock_read(struct i6300esbwd_softc *sc)
+{
+ return (pci_read_config(sc->dev, WDT_LOCK_REG, 1));
+}
+
+static void
+i6300esbwd_lock_write(struct i6300esbwd_softc *sc, uint8_t val)
+{
+ pci_write_config(sc->dev, WDT_LOCK_REG, val, 1);
+}
+
+/*
+ * According to Intel 6300ESB I/O Controller Hub Datasheet 16.5.2,
+ * the resource should be unlocked before modifing any registers.
+ * The way to unlock is by write 0x80, 0x86 to the reload register.
+ */
+static void
+i6300esbwd_unlock_res(struct i6300esbwd_softc *sc)
+{
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_1_VAL);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_2_VAL);
+}
+
+static int
+i6300esbwd_sysctl_locked(SYSCTL_HANDLER_ARGS)
+{
+ struct i6300esbwd_softc *sc = (struct i6300esbwd_softc *)arg1;
+ int error;
+ int result;
+
+ result = sc->locked;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1 && !sc->locked) {
+ i6300esbwd_lock_write(sc, i6300esbwd_lock_read(sc) | WDT_LOCK);
+ sc->locked = true;
+ }
+
+ return (0);
+}
+
+static void
+i6300esbwd_event(void *arg, unsigned int cmd, int *error)
+{
+ struct i6300esbwd_softc *sc = arg;
+ uint32_t timeout;
+ uint16_t regval;
+
+ cmd &= WD_INTERVAL;
+ if (cmd != 0 &&
+ (cmd < WD_TO_1MS || (cmd - WD_TO_1MS) >= WDT_PRELOAD_BIT)) {
+ *error = EINVAL;
+ return;
+ }
+ timeout = 1 << (cmd - WD_TO_1MS);
+
+ /* reset the timer to prevent timeout a timeout is about to occur */
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD);
+
+ if (!cmd) {
+ /*
+ * when the lock is enabled, we are unable to overwrite LOCK
+ * register
+ */
+ if (sc->locked)
+ *error = EPERM;
+ else
+ i6300esbwd_lock_write(sc,
+ i6300esbwd_lock_read(sc) & ~WDT_ENABLE);
+ return;
+ }
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_4(sc->res, WDT_PRELOAD_1_REG, timeout);
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_4(sc->res, WDT_PRELOAD_2_REG, timeout);
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD);
+
+ if (!sc->locked) {
+ i6300esbwd_lock_write(sc, WDT_ENABLE);
+ regval = i6300esbwd_lock_read(sc);
+ sc->locked = regval & WDT_LOCK;
+ }
+}
+
+static int
+i6300esbwd_probe(device_t dev)
+{
+ const struct i6300esbwd_pci_id *pci_id;
+ uint16_t pci_dev_id;
+ int err = ENXIO;
+
+ if (pci_get_vendor(dev) != VENDORID_INTEL)
+ goto end;
+
+ pci_dev_id = pci_get_device(dev);
+ for (pci_id = i6300esbwd_pci_devices;
+ pci_id < i6300esbwd_pci_devices + nitems(i6300esbwd_pci_devices);
+ ++pci_id) {
+ if (pci_id->id == pci_dev_id) {
+ device_set_desc(dev, pci_id->name);
+ err = BUS_PROBE_DEFAULT;
+ break;
+ }
+ }
+
+end:
+ return (err);
+}
+
+static int
+i6300esbwd_attach(device_t dev)
+{
+ struct i6300esbwd_softc *sc = device_get_softc(dev);
+ uint16_t regval;
+
+ sc->dev = dev;
+ sc->res_id = PCIR_BAR(0);
+ sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->res_id,
+ RF_ACTIVE);
+ if (sc->res == NULL) {
+ device_printf(dev, "unable to map memory region\n");
+ return (ENXIO);
+ }
+
+ i6300esbwd_cfg_write(sc, WDT_INT_TYPE_DISABLED_VAL);
+ regval = i6300esbwd_lock_read(sc);
+ if (regval & WDT_LOCK)
+ sc->locked = true;
+ else {
+ sc->locked = false;
+ i6300esbwd_lock_write(sc, WDT_TOUT_CNF_WT_MODE);
+ }
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD | WDT_TIMEOUT);
+
+ sc->ev_tag = EVENTHANDLER_REGISTER(watchdog_list, i6300esbwd_event, sc,
+ 0);
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "locked",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
+ i6300esbwd_sysctl_locked, "I",
+ "Lock the timer so that we cannot disable it");
+
+ return (0);
+}
+
+static int
+i6300esbwd_detach(device_t dev)
+{
+ struct i6300esbwd_softc *sc = device_get_softc(dev);
+
+ if (sc->ev_tag)
+ EVENTHANDLER_DEREGISTER(watchdog_list, sc->ev_tag);
+
+ if (sc->res)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->res_id, sc->res);
+
+ return (0);
+}
+
+static device_method_t i6300esbwd_methods[] = {
+ DEVMETHOD(device_probe, i6300esbwd_probe),
+ DEVMETHOD(device_attach, i6300esbwd_attach),
+ DEVMETHOD(device_detach, i6300esbwd_detach),
+ DEVMETHOD(device_shutdown, i6300esbwd_detach),
+ DEVMETHOD_END
+};
+
+static driver_t i6300esbwd_driver = {
+ "i6300esbwd",
+ i6300esbwd_methods,
+ sizeof(struct i6300esbwd_softc),
+};
+
+DRIVER_MODULE(i6300esbwd, pci, i6300esbwd_driver, NULL, NULL);
diff --git a/sys/dev/ichwd/i6300esbwd.h b/sys/dev/ichwd/i6300esbwd.h
new file mode 100644
index 000000000000..39ed5d5a84f6
--- /dev/null
+++ b/sys/dev/ichwd/i6300esbwd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef _I6300ESBWD_H_
+#define _I6300ESBWD_H_
+
+#define WDT_CONFIG_REG 0x60
+#define WDT_LOCK_REG 0x68
+
+#define WDT_PRELOAD_1_REG 0x00
+#define WDT_PRELOAD_2_REG 0x04
+#define WDT_INTR_REG 0x08
+#define WDT_RELOAD_REG 0x0C
+
+/* For config register */
+#define WDT_OUTPUT_EN (0x1 << 5)
+#define WDT_PRE_SEL (0x1 << 2)
+#define WDT_INT_TYPE_BITS (0x3)
+#define WDT_INT_TYPE_IRQ_VAL (0x0)
+#define WDT_INT_TYPE_RES_VAL (0x1)
+#define WDT_INT_TYPE_SMI_VAL (0x2)
+#define WDT_INT_TYPE_DISABLED_VAL (0x3)
+
+/* For lock register */
+#define WDT_TOUT_CNF_WT_MODE (0x0 << 2)
+#define WDT_TOUT_CNF_FR_MODE (0x1 << 2)
+#define WDT_ENABLE (0x02)
+#define WDT_LOCK (0x01)
+
+/* For preload 1/2 registers */
+#define WDT_PRELOAD_BIT 20
+#define WDT_PRELOAD_BITS ((0x1 << WDT_PRELOAD_BIT) - 1)
+
+/* For interrupt register */
+#define WDT_INTR_ACT (0x01 << 0)
+
+/* For reload register */
+#define WDT_TIMEOUT (0x01 << 9)
+#define WDT_RELOAD (0x01 << 8)
+#define WDT_UNLOCK_SEQ_1_VAL 0x80
+#define WDT_UNLOCK_SEQ_2_VAL 0x86
+
+#endif /* _I6300ESBWD_H_ */
diff --git a/sys/dev/ichwd/ichwd.c b/sys/dev/ichwd/ichwd.c
index cade2cc4fb45..5481553cc175 100644
--- a/sys/dev/ichwd/ichwd.c
+++ b/sys/dev/ichwd/ichwd.c
@@ -90,7 +90,7 @@ static struct ichwd_device ichwd_devices[] = {
{ DEVICEID_82801E, "Intel 82801E watchdog timer", 5, 1 },
{ DEVICEID_82801EB, "Intel 82801EB watchdog timer", 5, 1 },
{ DEVICEID_82801EBR, "Intel 82801EB/ER watchdog timer", 5, 1 },
- { DEVICEID_6300ESB, "Intel 6300ESB watchdog timer", 5, 1 },
+ { DEVICEID_6300ESB_1, "Intel 6300ESB watchdog timer", 5, 1 },
{ DEVICEID_82801FBR, "Intel 82801FB/FR watchdog timer", 6, 2 },
{ DEVICEID_ICH6M, "Intel ICH6M watchdog timer", 6, 2 },
{ DEVICEID_ICH6W, "Intel ICH6W watchdog timer", 6, 2 },
diff --git a/sys/dev/ichwd/ichwd.h b/sys/dev/ichwd/ichwd.h
index 90fda08b74c1..72d0ca1cd6aa 100644
--- a/sys/dev/ichwd/ichwd.h
+++ b/sys/dev/ichwd/ichwd.h
@@ -151,7 +151,8 @@ struct ichwd_softc {
#define DEVICEID_82801E 0x2450
#define DEVICEID_82801EB 0x24dc
#define DEVICEID_82801EBR 0x24d0
-#define DEVICEID_6300ESB 0x25a1
+#define DEVICEID_6300ESB_1 0x25a1
+#define DEVICEID_6300ESB_2 0x25ab
#define DEVICEID_82801FBR 0x2640
#define DEVICEID_ICH6M 0x2641
#define DEVICEID_ICH6W 0x2642
diff --git a/sys/dev/igc/if_igc.c b/sys/dev/igc/if_igc.c
index a1ae35c7aa43..f199a128c783 100644
--- a/sys/dev/igc/if_igc.c
+++ b/sys/dev/igc/if_igc.c
@@ -2599,8 +2599,8 @@ igc_if_get_counter(if_ctx_t ctx, ift_counter cnt)
sc->stats.ruc + sc->stats.roc +
sc->stats.mpc + sc->stats.htdpmc);
case IFCOUNTER_OERRORS:
- return (sc->stats.ecol + sc->stats.latecol +
- sc->watchdog_events);
+ return (if_get_counter_default(ifp, cnt) +
+ sc->stats.ecol + sc->stats.latecol + sc->watchdog_events);
default:
return (if_get_counter_default(ifp, cnt));
}
diff --git a/sys/dev/iicbus/gpio/pcf8574.c b/sys/dev/iicbus/gpio/pcf8574.c
index ab6e2bc07d1f..bf60dec67557 100644
--- a/sys/dev/iicbus/gpio/pcf8574.c
+++ b/sys/dev/iicbus/gpio/pcf8574.c
@@ -142,12 +142,13 @@ pcf8574_attach(device_t dev)
(void)pcf8574_write(sc, 0xff);
sx_init(&sc->lock, "pcf8574");
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "Could not create busdev child\n");
sx_destroy(&sc->lock);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
@@ -158,9 +159,7 @@ pcf8574_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->busdev != NULL)
- gpiobus_detach_bus(sc->busdev);
-
+ gpiobus_detach_bus(dev);
sx_destroy(&sc->lock);
return (0);
}
diff --git a/sys/dev/iicbus/gpio/tca64xx.c b/sys/dev/iicbus/gpio/tca64xx.c
index cd011ae9be75..ab8fedd3f8fd 100644
--- a/sys/dev/iicbus/gpio/tca64xx.c
+++ b/sys/dev/iicbus/gpio/tca64xx.c
@@ -262,7 +262,7 @@ tca64xx_attach(device_t dev)
mtx_init(&sc->mtx, "tca64xx gpio", "gpio", MTX_DEF);
OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "Could not create busdev child\n");
return (ENXIO);
@@ -281,6 +281,7 @@ tca64xx_attach(device_t dev)
}
#endif
+ bus_attach_children(dev);
return (0);
}
@@ -291,9 +292,7 @@ tca64xx_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->busdev != NULL)
- gpiobus_detach_bus(sc->busdev);
-
+ gpiobus_detach_bus(dev);
mtx_destroy(&sc->mtx);
return (0);
diff --git a/sys/dev/iicbus/iicbb.c b/sys/dev/iicbus/iicbb.c
index c344bda930b0..5f6423135f46 100644
--- a/sys/dev/iicbus/iicbb.c
+++ b/sys/dev/iicbus/iicbb.c
@@ -331,7 +331,7 @@ iicbb_getack(device_t dev)
{
struct iicbb_softc *sc = device_get_softc(dev);
int noack, err;
- int t;
+ int t = 0;
/* Release SDA so that the slave can drive it. */
err = iicbb_clockin(dev, 1);
@@ -341,12 +341,13 @@ iicbb_getack(device_t dev)
}
/* Sample SDA until ACK (low) or udelay runs out. */
- for (t = 0; t < sc->udelay; t++) {
+ do {
noack = I2C_GETSDA(dev);
if (!noack)
break;
DELAY(1);
- }
+ t++;
+ } while(t < sc->udelay);
DELAY(sc->udelay - t);
iicbb_clockout(dev);
diff --git a/sys/dev/iicbus/iichid.c b/sys/dev/iicbus/iichid.c
index 3f1d7a0cefba..5ca3f1b84e48 100644
--- a/sys/dev/iicbus/iichid.c
+++ b/sys/dev/iicbus/iichid.c
@@ -540,7 +540,7 @@ iichid_sampling_task(void *context, int pending)
error = iichid_cmd_read(sc, sc->intr_buf, sc->intr_bufsize, &actual);
if (error == 0) {
if (actual > 0) {
- sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual);
+ sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual - 2);
sc->missing_samples = 0;
if (sc->dup_size != actual ||
memcmp(sc->dup_buf, sc->intr_buf, actual) != 0) {
@@ -607,7 +607,7 @@ iichid_intr(void *context)
if (sc->power_on && sc->open) {
if (actual != 0)
sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2,
- actual);
+ actual - 2);
else
DPRINTF(sc, "no data received\n");
}
@@ -816,12 +816,13 @@ iichid_intr_setup(device_t dev, device_t child __unused, hid_intr_t intr,
sc = device_get_softc(dev);
/*
- * Do not rely just on wMaxInputLength, as some devices (which?)
- * may set it to a wrong length. Also find the longest input report
- * in report descriptor, and add two for the length field.
+ * Start with wMaxInputLength to follow HID-over-I2C specs. Than if
+ * semi-HID device like ietp(4) requested changing of input buffer
+ * size with report descriptor overloading, find the longest input
+ * report in the descriptor, and add two for the length field.
*/
- rdesc->rdsize = 2 +
- MAX(rdesc->isize, le16toh(sc->desc.wMaxInputLength));
+ rdesc->rdsize = rdesc->rdsize == 0 ?
+ le16toh(sc->desc.wMaxInputLength) - 2 : rdesc->isize;
/* Write and get/set_report sizes are limited by I2C-HID protocol. */
rdesc->grsize = rdesc->srsize = IICHID_SIZE_MAX;
rdesc->wrsize = IICHID_SIZE_MAX;
@@ -831,7 +832,7 @@ iichid_intr_setup(device_t dev, device_t child __unused, hid_intr_t intr,
sc->intr_handler = intr;
sc->intr_ctx = context;
- sc->intr_bufsize = rdesc->rdsize;
+ sc->intr_bufsize = rdesc->rdsize + 2;
sc->intr_buf = realloc(sc->intr_buf, sc->intr_bufsize,
M_DEVBUF, M_WAITOK | M_ZERO);
#ifdef IICHID_SAMPLING
@@ -861,7 +862,8 @@ iichid_intr_start(device_t dev, device_t child __unused)
sc = device_get_softc(dev);
DPRINTF(sc, "iichid device open\n");
- iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
+ if (!sc->open)
+ iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
return (0);
}
@@ -1092,7 +1094,8 @@ iichid_probe(device_t dev)
}
if (le16toh(sc->desc.wHIDDescLength) != 30 ||
- le16toh(sc->desc.bcdVersion) != 0x100) {
+ le16toh(sc->desc.bcdVersion) != 0x100 ||
+ le16toh(sc->desc.wMaxInputLength) < 2) {
DPRINTF(sc, "HID descriptor is broken\n");
return (ENXIO);
}
diff --git a/sys/dev/iommu/busdma_iommu.c b/sys/dev/iommu/busdma_iommu.c
index 6856b0551dde..668ccf056463 100644
--- a/sys/dev/iommu/busdma_iommu.c
+++ b/sys/dev/iommu/busdma_iommu.c
@@ -114,8 +114,8 @@ iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
* domain, and must collectively be assigned to use either IOMMU or
* bounce mapping.
*/
-device_t
-iommu_get_requester(device_t dev, uint16_t *rid)
+int
+iommu_get_requester(device_t dev, device_t *requesterp, uint16_t *rid)
{
devclass_t pci_class;
device_t l, pci, pcib, pcip, pcibp, requester;
@@ -129,7 +129,8 @@ iommu_get_requester(device_t dev, uint16_t *rid)
pci = device_get_parent(dev);
if (pci == NULL || device_get_devclass(pci) != pci_class) {
*rid = 0; /* XXXKIB: Could be ACPI HID */
- return (requester);
+ *requesterp = NULL;
+ return (ENOTTY);
}
*rid = pci_get_rid(dev);
@@ -141,16 +142,39 @@ iommu_get_requester(device_t dev, uint16_t *rid)
*/
for (;;) {
pci = device_get_parent(l);
- KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
- "for %s", device_get_name(dev), device_get_name(l)));
- KASSERT(device_get_devclass(pci) == pci_class,
- ("iommu_get_requester(%s): non-pci parent %s for %s",
- device_get_name(dev), device_get_name(pci),
- device_get_name(l)));
+ if (pci == NULL) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): NULL parent for %s\n",
+ device_get_name(dev), device_get_name(l));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
+ if (device_get_devclass(pci) != pci_class) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): non-pci parent %s for %s\n",
+ device_get_name(dev), device_get_name(pci),
+ device_get_name(l));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
pcib = device_get_parent(pci);
- KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge "
- "for %s", device_get_name(dev), device_get_name(pci)));
+ if (pcib == NULL) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): NULL bridge for %s\n",
+ device_get_name(dev), device_get_name(pci));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
/*
* The parent of our "bridge" isn't another PCI bus,
@@ -229,7 +253,8 @@ iommu_get_requester(device_t dev, uint16_t *rid)
}
}
}
- return (requester);
+ *requesterp = requester;
+ return (0);
}
struct iommu_ctx *
@@ -237,10 +262,13 @@ iommu_instantiate_ctx(struct iommu_unit *unit, device_t dev, bool rmrr)
{
device_t requester;
struct iommu_ctx *ctx;
+ int error;
bool disabled;
uint16_t rid;
- requester = iommu_get_requester(dev, &rid);
+ error = iommu_get_requester(dev, &requester, &rid);
+ if (error != 0)
+ return (NULL);
/*
* If the user requested the IOMMU disabled for the device, we
diff --git a/sys/dev/iommu/iommu.h b/sys/dev/iommu/iommu.h
index b1858f0df9f7..55044042c5d2 100644
--- a/sys/dev/iommu/iommu.h
+++ b/sys/dev/iommu/iommu.h
@@ -170,7 +170,7 @@ void iommu_domain_unload(struct iommu_domain *domain,
void iommu_unit_pre_instantiate_ctx(struct iommu_unit *iommu);
struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
device_t dev, bool rmrr);
-device_t iommu_get_requester(device_t dev, uint16_t *rid);
+int iommu_get_requester(device_t dev, device_t *requester, uint16_t *rid);
int iommu_init_busdma(struct iommu_unit *unit);
void iommu_fini_busdma(struct iommu_unit *unit);
diff --git a/sys/dev/iommu/iommu_gas.c b/sys/dev/iommu/iommu_gas.c
index ffa8dc096adc..80e37341b3dc 100644
--- a/sys/dev/iommu/iommu_gas.c
+++ b/sys/dev/iommu/iommu_gas.c
@@ -77,7 +77,7 @@ static int iommu_check_free;
#endif
static void
-intel_gas_init(void)
+intel_gas_init(void *dummy __unused)
{
iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
diff --git a/sys/dev/ipw/if_ipw.c b/sys/dev/ipw/if_ipw.c
index 01d713cdae18..9db562669487 100644
--- a/sys/dev/ipw/if_ipw.c
+++ b/sys/dev/ipw/if_ipw.c
@@ -283,6 +283,8 @@ ipw_attach(device_t dev)
| IEEE80211_C_WPA /* 802.11i supported */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* read MAC address from EEPROM */
val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val >> 8;
@@ -1557,6 +1559,7 @@ ipw_tx_start(struct ipw_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
wh = mtod(m0, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m0);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
index 450fae662dd8..d4d4f328fb43 100644
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -1316,7 +1316,7 @@ irdma_cm_timer_tick(struct timer_list *t)
struct irdma_timer_entry *send_entry, *close_entry;
struct list_head *list_core_temp;
struct list_head *list_node;
- struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
+ struct irdma_cm_core *cm_core = timer_container_of(cm_core, t, tcp_timer);
struct irdma_sc_vsi *vsi;
u32 settimer = 0;
unsigned long timetosend;
diff --git a/sys/dev/irdma/irdma_utils.c b/sys/dev/irdma/irdma_utils.c
index 5fc37022981f..038f1980082b 100644
--- a/sys/dev/irdma/irdma_utils.c
+++ b/sys/dev/irdma/irdma_utils.c
@@ -876,7 +876,7 @@ irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
static void
irdma_terminate_timeout(struct timer_list *t)
{
- struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
+ struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer);
struct irdma_sc_qp *qp = &iwqp->sc_qp;
irdma_terminate_done(qp, 1);
@@ -1528,7 +1528,7 @@ static void
irdma_hw_stats_timeout(struct timer_list *t)
{
struct irdma_vsi_pestat *pf_devstat =
- from_timer(pf_devstat, t, stats_timer);
+ timer_container_of(pf_devstat, t, stats_timer);
struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
diff --git a/sys/dev/isci/scil/intel_sata.h b/sys/dev/isci/scil/intel_sata.h
index 4cf4adf03e07..fdad5be9b083 100644
--- a/sys/dev/isci/scil/intel_sata.h
+++ b/sys/dev/isci/scil/intel_sata.h
@@ -61,7 +61,7 @@
*
* @brief This file defines all of the SATA releated constants, enumerations,
* and types. Please note that this file does not necessarily contain
- * an exhaustive list of all contants and commands.
+ * an exhaustive list of all constants and commands.
*/
/**
diff --git a/sys/dev/iwi/if_iwi.c b/sys/dev/iwi/if_iwi.c
index 3a410a5cbf2c..26b8037186a6 100644
--- a/sys/dev/iwi/if_iwi.c
+++ b/sys/dev/iwi/if_iwi.c
@@ -371,6 +371,8 @@ iwi_attach(device_t dev)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* read MAC address from EEPROM */
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val & 0xff;
@@ -1834,6 +1836,8 @@ iwi_tx_start(struct iwi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
} else
staid = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/iwm/if_iwm.c b/sys/dev/iwm/if_iwm.c
index 1e9090310ece..6840c6a4d00a 100644
--- a/sys/dev/iwm/if_iwm.c
+++ b/sys/dev/iwm/if_iwm.c
@@ -3773,6 +3773,10 @@ iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
+ /* Offloaded sequence number assignment; non-AMPDU case */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX && do software encryption. */
@@ -6142,7 +6146,8 @@ iwm_attach(device_t dev)
// IEEE80211_C_BGSCAN /* capable of bg scanning */
;
/* Advertise full-offload scanning */
- ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
sc->sc_phyctxt[i].id = i;
sc->sc_phyctxt[i].color = 0;
diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c
index b7c452a4f074..a949103f20d4 100644
--- a/sys/dev/iwn/if_iwn.c
+++ b/sys/dev/iwn/if_iwn.c
@@ -584,6 +584,11 @@ iwn_attach(device_t dev)
| IEEE80211_C_PMGT /* Station-side power mgmt */
;
+ /* Driver / firmware assigned sequence numbers */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+ /* Don't originate null data frames in net80211 */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+
/* Read MAC address, channels, etc from EEPROM. */
if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) {
device_printf(dev, "could not read EEPROM, error %d\n",
@@ -4577,6 +4582,9 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
* XXX TODO: Group addressed frames aren't aggregated and must
* go to the normal non-aggregation queue, and have a NONQOS TID
* assigned from net80211.
+ *
+ * TODO: same with NULL QOS frames, which we shouldn't be sending
+ * anyway ourselves (and should stub out / warn / etc.)
*/
ac = M_WME_GETAC(m);
@@ -4589,6 +4597,10 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
ac = *(int *)tap->txa_private;
}
+ /* Only assign if not A-MPDU; the A-MPDU TX path will do its own */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX. */
diff --git a/sys/dev/iwx/if_iwx.c b/sys/dev/iwx/if_iwx.c
index d60ef1874a6c..04ed09f04604 100644
--- a/sys/dev/iwx/if_iwx.c
+++ b/sys/dev/iwx/if_iwx.c
@@ -4805,6 +4805,8 @@ iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
static void
iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
{
+ IWX_ASSERT_LOCKED(sc);
+
if (ring->queued < iwx_lomark) {
sc->qfullmsk &= ~(1 << ring->qid);
if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
@@ -4890,11 +4892,19 @@ iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
+ IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
+ "%s: mac_id=%u, cmslrx=%u, cmb=%u, neb=%d, nrb=%u\n",
+ __func__,
+ le32toh(mbn->mac_id),
+ le32toh(mbn->consec_missed_beacons_since_last_rx),
+ le32toh(mbn->consec_missed_beacons),
+ le32toh(mbn->num_expected_beacons),
+ le32toh(mbn->num_recvd_beacons));
+
missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
if (missed > vap->iv_bmissthreshold) {
ieee80211_beacon_miss(ic);
}
-
}
static int
@@ -5491,6 +5501,9 @@ iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
/* for non-data, use the lowest supported rate */
ridx = min_ridx;
*flags |= IWX_TX_FLAGS_CMD_RATE;
+ } else if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ /* TODO: VHT - the ridx / rate array doesn't have VHT rates yet */
+ ridx = iwx_min_basic_rate(ic);
} else if (ni->ni_flags & IEEE80211_NODE_HT) {
ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
& ~IEEE80211_RATE_MCS];
@@ -5622,6 +5635,8 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
struct mbuf *m1;
size_t txcmd_size;
+ IWX_ASSERT_LOCKED(sc);
+
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
@@ -5673,6 +5688,11 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
if (rinfo == NULL)
return EINVAL;
+ /* Offloaded sequence number assignment; non-AMPDU case */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
+ /* Radiotap */
if (ieee80211_radiotap_active_vap(vap)) {
struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
@@ -5685,6 +5705,7 @@ iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
ieee80211_radiotap_tx(vap, m);
}
+ /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_get_txkey(ni, m);
if (k == NULL) {
@@ -7302,97 +7323,107 @@ iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
return iwx_rs_init_v3(sc, in);
}
-static void
-iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
+
+/**
+ * @brief Turn the given TX rate control notification into an ieee80211_node_txrate
+ *
+ * This populates the given txrate node with the TX rate control notification.
+ *
+ * @param sc driver softc
+ * @param notif firmware notification
+ * @param ni ieee80211_node update
+ * @returns true if updated, false if not
+ */
+static bool
+iwx_rs_update_node_txrate(struct iwx_softc *sc,
+ const struct iwx_tlc_update_notif *notif, struct ieee80211_node *ni)
{
struct ieee80211com *ic = &sc->sc_ic;
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- struct ieee80211_node *ni = (void *)vap->iv_bss;
+ /* XXX TODO: create an inline function in if_iwxreg.h? */
+ static int cck_idx_to_rate[] = { 2, 4, 11, 22, 2, 2, 2, 2 };
+ static int ofdm_idx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108 };
- struct ieee80211_rateset *rs = &ni->ni_rates;
uint32_t rate_n_flags;
- uint8_t plcp, rval;
- int i, cmd_ver, rate_n_flags_ver2 = 0;
-
- if (notif->sta_id != IWX_STATION_ID ||
- (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
- return;
+ uint32_t type;
+ /* Extract the rate and command version */
rate_n_flags = le32toh(notif->rate);
+ if (sc->sc_rate_n_flags_version != 2) {
+ net80211_ic_printf(ic,
+ "%s: unsupported rate_n_flags version (%d)\n",
+ __func__,
+ sc->sc_rate_n_flags_version);
+ return (false);
+ }
+
if (sc->sc_debug & IWX_DEBUG_TXRATE)
print_ratenflags(__func__, __LINE__,
rate_n_flags, sc->sc_rate_n_flags_version);
- cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
- IWX_TLC_MNG_UPDATE_NOTIF);
- if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
- rate_n_flags_ver2 = 1;
-
- if (rate_n_flags_ver2) {
- uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
- if (mod_type == IWX_RATE_MCS_HT_MSK) {
-
- ieee80211_node_set_txrate_dot11rate(ni,
- IWX_RATE_HT_MCS_INDEX(rate_n_flags) |
- IEEE80211_RATE_MCS);
- IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
- "%s:%d new MCS: %d rate_n_flags: %x\n",
- __func__, __LINE__,
- ieee80211_node_get_txrate_dot11rate(ni) & ~IEEE80211_RATE_MCS,
- rate_n_flags);
- return;
- }
- } else {
- if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
- ieee80211_node_set_txrate_dot11rate(ni,
- rate_n_flags & (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
- IWX_RATE_HT_MCS_NSS_MSK_V1));
-
- IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
- "%s:%d new MCS idx: %d rate_n_flags: %x\n",
- __func__, __LINE__,
- ieee80211_node_get_txrate_dot11rate(ni), rate_n_flags);
- return;
- }
+ type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
+ switch (type) {
+ case IWX_RATE_MCS_CCK_MSK:
+ ieee80211_node_set_txrate_dot11rate(ni,
+ cck_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
+ return (true);
+ case IWX_RATE_MCS_LEGACY_OFDM_MSK:
+ ieee80211_node_set_txrate_dot11rate(ni,
+ ofdm_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
+ return (true);
+ case IWX_RATE_MCS_HT_MSK:
+ /*
+ * TODO: the current API doesn't include channel width
+ * and other flags, so we can't accurately store them yet!
+ *
+ * channel width: (flags & IWX_RATE_MCS_CHAN_WIDTH_MSK)
+ * >> IWX_RATE_MCS_CHAN_WIDTH_POS)
+ * LDPC: (flags & (1 << 16))
+ */
+ ieee80211_node_set_txrate_ht_mcsrate(ni,
+ IWX_RATE_HT_MCS_INDEX(rate_n_flags));
+ return (true);
+ case IWX_RATE_MCS_VHT_MSK:
+ /* TODO: same comment on channel width, etc above */
+ ieee80211_node_set_txrate_vht_rate(ni,
+ IWX_RATE_VHT_MCS_CODE(rate_n_flags),
+ IWX_RATE_VHT_MCS_NSS(rate_n_flags));
+ return (true);
+ default:
+ net80211_ic_printf(ic,
+ "%s: unsupported chosen rate type in "
+ "IWX_RATE_MCS_MOD_TYPE (%d)\n", __func__,
+ type >> IWX_RATE_MCS_MOD_TYPE_POS);
+ return (false);
}
- if (rate_n_flags_ver2) {
- const struct ieee80211_rateset *rs;
- uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
- if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
- rs = &ieee80211_std_rateset_11a;
- else
- rs = &ieee80211_std_rateset_11b;
- if (ridx < rs->rs_nrates)
- rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
- else
- rval = 0;
- } else {
- plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
+ /* Default: if we get here, we didn't successfully update anything */
+ return (false);
+}
- rval = 0;
- for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
- if (iwx_rates[i].plcp == plcp) {
- rval = iwx_rates[i].rate;
- break;
- }
- }
- }
+/**
+ * @brief Process a firmware rate control update and update net80211.
+ *
+ * Since firmware is doing rate control, this just needs to update
+ * the txrate in the ieee80211_node entry.
+ */
+static void
+iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ /* XXX TODO: get a node ref! */
+ struct ieee80211_node *ni = (void *)vap->iv_bss;
- if (rval) {
- uint8_t rv;
- for (i = 0; i < rs->rs_nrates; i++) {
- rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
- if (rv == rval) {
- ieee80211_node_set_txrate_dot11rate(ni, i);
- break;
- }
- }
- IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
- "%s:%d new rate %d\n", __func__, __LINE__,
- ieee80211_node_get_txrate_dot11rate(ni));
- }
+ /*
+ * For now the iwx driver only supports a single vdev with a single
+ * node; it doesn't yet support ibss/hostap/multiple vdevs.
+ */
+ if (notif->sta_id != IWX_STATION_ID ||
+ (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
+ return;
+
+ iwx_rs_update_node_txrate(sc, notif, ni);
}
static int
@@ -8520,6 +8551,8 @@ iwx_start(struct iwx_softc *sc)
struct ieee80211_node *ni;
struct mbuf *m;
+ IWX_ASSERT_LOCKED(sc);
+
while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
if (iwx_tx(sc, m, ni) != 0) {
@@ -8979,10 +9012,10 @@ iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
break;
case IWX_MISSED_BEACONS_NOTIFICATION:
+ IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
+ "%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
+ __func__);
iwx_rx_bmiss(sc, pkt, data);
- DPRINTF(("%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
- __func__));
- ieee80211_beacon_miss(ic);
break;
case IWX_MFUART_LOAD_NOTIFICATION:
@@ -10467,6 +10500,10 @@ iwx_attach(device_t dev)
IEEE80211_C_BGSCAN /* capable of bg scanning */
;
ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
+ /* Enable seqno offload */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+ /* Don't send null data frames; let firmware do it */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
ic->ic_txstream = 2;
ic->ic_rxstream = 2;
diff --git a/sys/dev/iwx/if_iwxreg.h b/sys/dev/iwx/if_iwxreg.h
index 6755b93fa0ba..f3d1f078b48e 100644
--- a/sys/dev/iwx/if_iwxreg.h
+++ b/sys/dev/iwx/if_iwxreg.h
@@ -5176,6 +5176,10 @@ enum {
#define IWX_RATE_HT_MCS_INDEX(r) ((((r) & IWX_RATE_MCS_NSS_MSK) >> 1) | \
((r) & IWX_RATE_HT_MCS_CODE_MSK))
+#define IWX_RATE_VHT_MCS_CODE(r) ((r) & IWX_RATE_HT_MCS_CODE_MSK)
+#define IWX_RATE_VHT_MCS_NSS(r) \
+ ((((r) & IWX_RATE_MCS_NSS_MSK) == 0) >> IWX_RATE_MCS_NSS_POS)
+
/* Bits 7-5: reserved */
/*
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 959afa79e7da..1d36fd11f368 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -45,7 +45,7 @@
/************************************************************************
* Driver version
************************************************************************/
-static const char ixgbe_driver_version[] = "4.0.1-k";
+static const char ixgbe_driver_version[] = "5.0.1-k";
/************************************************************************
* PCI Device ID Table
@@ -144,6 +144,16 @@ static const pci_vendor_info_t ixgbe_vendor_info_array[] =
"Intel(R) X540-T2 (Bypass)"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
"Intel(R) X520 82599 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
+ "Intel(R) E610 (Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
+ "Intel(R) E610 (SFP)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
+ "Intel(R) E610 (2.5 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
+ "Intel(R) E610 (10 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
+ "Intel(R) E610 (SGMII)"),
/* required last entry */
PVID_END
};
@@ -174,6 +184,7 @@ static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
int);
static void ixgbe_if_queues_free(if_ctx_t);
static void ixgbe_if_timer(if_ctx_t, uint16_t);
+static const char *ixgbe_link_speed_to_str(u32 link_speed);
static void ixgbe_if_update_admin_status(if_ctx_t);
static void ixgbe_if_vlan_register(if_ctx_t, u16);
static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
@@ -181,6 +192,8 @@ static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
int ixgbe_intr(void *);
+static int ixgbe_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
+
/************************************************************************
* Function prototypes
************************************************************************/
@@ -228,6 +241,13 @@ static void ixgbe_setup_vlan_hw_support(if_ctx_t);
static void ixgbe_config_gpie(struct ixgbe_softc *);
static void ixgbe_config_delay_values(struct ixgbe_softc *);
+static void ixgbe_add_debug_sysctls(struct ixgbe_softc *sc);
+static void ixgbe_add_debug_dump_sysctls(struct ixgbe_softc *sc);
+static int ixgbe_debug_dump_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd);
+static u8 ixgbe_debug_dump_print_cluster(struct ixgbe_softc *sc,
+ struct sbuf *sbuf, u8 cluster_id);
+static int ixgbe_nvm_access_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd);
+
/* Sysctl handlers */
static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
@@ -249,10 +269,17 @@ static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS);
+
/* Deferred interrupt tasklets */
static void ixgbe_handle_msf(void *);
static void ixgbe_handle_mod(void *);
static void ixgbe_handle_phy(void *);
+static void ixgbe_handle_fw_event(void *);
+
+static int ixgbe_enable_lse(struct ixgbe_softc *sc);
+static int ixgbe_disable_lse(struct ixgbe_softc *sc);
/************************************************************************
* FreeBSD Device Interface Entry Points
@@ -315,6 +342,7 @@ static device_method_t ixgbe_if_methods[] = {
DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
+ DEVMETHOD(ifdi_priv_ioctl, ixgbe_if_priv_ioctl),
#ifdef PCI_IOV
DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
@@ -621,6 +649,7 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
table_size = 512;
break;
default:
@@ -902,6 +931,32 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
} /* ixgbe_initialize_transmit_units */
+static int
+ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ device_printf(sc->dev,
+ "The driver for the device stopped because the NVM "
+ "image is newer than expected. You must install the "
+ "most recent version of the network driver.\n");
+ return (EOPNOTSUPP);
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
+ device_printf(sc->dev,
+ "The driver for the device detected a newer version of "
+ "the NVM image than expected. Please install the most "
+ "recent version of the network driver.\n");
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
+ device_printf(sc->dev,
+ "The driver for the device detected an older version "
+ "of the NVM image than expected. "
+ "Please update the NVM image.\n");
+ }
+ return (0);
+}
+
/************************************************************************
* ixgbe_register
************************************************************************/
@@ -970,6 +1025,11 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
goto err_pci;
}
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_init_aci(hw);
+
+ sc->do_debug_dump = false;
+
if (hw->mac.ops.fw_recovery_mode &&
hw->mac.ops.fw_recovery_mode(hw)) {
device_printf(dev,
@@ -1058,6 +1118,12 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
break;
}
+ /* Check the FW API version */
+ if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
+ error = EIO;
+ goto err_pci;
+ }
+
/* Most of the iflib initialization... */
iflib_set_mac(ctx, hw->mac.addr);
@@ -1111,6 +1177,9 @@ err_pci:
IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
ixgbe_free_pci_resources(ctx);
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_shutdown_aci(hw);
+
return (error);
} /* ixgbe_if_attach_pre */
@@ -1296,8 +1365,6 @@ ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
return (0);
case IFCOUNTER_IQDROPS:
return (sc->iqdrops);
- case IFCOUNTER_OQDROPS:
- return (0);
case IFCOUNTER_IERRORS:
return (sc->ierrors);
default:
@@ -1343,6 +1410,248 @@ ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
}
/************************************************************************
+ * ixgbe_if_priv_ioctl - Ioctl handler for driver
+ *
+ * Handler for custom driver specific ioctls
+ *
+ * return 0 on success, positive on failure
+ ************************************************************************/
+static int
+ixgbe_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
+{
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ifdrv *ifd;
+ device_t dev = sc->dev;
+
+ /* Make sure the command type is valid */
+ switch (command) {
+ case SIOCSDRVSPEC:
+ case SIOCGDRVSPEC:
+ /* Accepted commands */
+ break;
+ case SIOCGPRIVATE_0:
+ /*
+ * Although we do not support this ioctl command, it's expected
+ * that iflib will forward it to the IFDI_PRIV_IOCTL handler.
+ * Do not print a message in this case.
+ */
+ return (ENOTSUP);
+ default:
+ /*
+ * If we get a different command for this function, it's
+ * definitely unexpected, so log a message indicating what
+ * command we got for debugging purposes.
+ */
+ device_printf(dev,
+ "%s: unexpected ioctl command %08lx\n",
+ __func__, command);
+ return (EINVAL);
+ }
+
+ ifd = (struct ifdrv *)data;
+
+ switch (ifd->ifd_cmd) {
+ case IXGBE_NVM_ACCESS:
+ IOCTL_DEBUGOUT("ioctl: NVM ACCESS");
+ return (ixgbe_nvm_access_ioctl(sc, ifd));
+ case IXGBE_DEBUG_DUMP:
+ IOCTL_DEBUGOUT("ioctl: DEBUG DUMP");
+ return (ixgbe_debug_dump_ioctl(sc, ifd));
+ default:
+ IOCTL_DEBUGOUT1(
+ "ioctl: UNKNOWN SIOC(S|G)DRVSPEC (0x%X) command\n",
+ (int)ifd->ifd_cmd);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/************************************************************************
+ * ixgbe_nvm_access_ioctl
+ *
+ * Handles an NVM access ioctl request
+ ************************************************************************/
+static int
+ixgbe_nvm_access_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd)
+{
+ struct ixgbe_nvm_access_data *data;
+ struct ixgbe_nvm_access_cmd *cmd;
+ struct ixgbe_hw *hw = &sc->hw;
+ size_t ifd_len = ifd->ifd_len;
+ size_t malloc_len;
+ device_t dev = sc->dev;
+ u8 *nvm_buffer;
+ s32 error = 0;
+
+ /*
+ * ifioctl forwards SIOCxDRVSPEC to iflib without conducting
+ * a privilege check. Subsequently, iflib passes the ioctl to the driver
+ * without verifying privileges. To prevent non-privileged threads from
+ * accessing this interface, perform a privilege check at this point.
+ */
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error)
+ return (error);
+
+ if (ifd_len < sizeof(*cmd)) {
+ device_printf(dev,
+ "%s: ifdrv length is too small. Got %zu, "
+ "but expected %zu\n",
+ __func__, ifd_len, sizeof(*cmd));
+ return (EINVAL);
+ }
+
+ if (ifd->ifd_data == NULL) {
+ device_printf(dev, "%s: No ifd data buffer.\n",
+ __func__);
+ return (EINVAL);
+ }
+
+ malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd));
+
+ nvm_buffer = (u8 *)malloc(malloc_len, M_IXGBE, M_ZERO | M_NOWAIT);
+ if (!nvm_buffer)
+ return (ENOMEM);
+
+ /* Copy the NVM access command and data in from user space */
+ error = copyin(ifd->ifd_data, nvm_buffer, ifd_len);
+ if (error) {
+ device_printf(dev, "%s: Failed to copy data in, error: %d\n",
+ __func__, error);
+ goto cleanup_free_nvm_buffer;
+ }
+
+ /*
+ * The NVM command structure is immediately followed by data which
+ * varies in size based on the command.
+ */
+ cmd = (struct ixgbe_nvm_access_cmd *)nvm_buffer;
+ data = (struct ixgbe_nvm_access_data *)
+ (nvm_buffer + sizeof(struct ixgbe_nvm_access_cmd));
+
+ /* Handle the NVM access request */
+ error = ixgbe_handle_nvm_access(hw, cmd, data);
+ if (error) {
+ device_printf(dev, "%s: NVM access request failed, error %d\n",
+ __func__, error);
+ }
+
+ /* Copy the possibly modified contents of the handled request out */
+ error = copyout(nvm_buffer, ifd->ifd_data, ifd_len);
+ if (error) {
+ device_printf(dev, "%s: Copying response back to "
+ "user space failed, error %d\n",
+ __func__, error);
+ goto cleanup_free_nvm_buffer;
+ }
+
+cleanup_free_nvm_buffer:
+ free(nvm_buffer, M_IXGBE);
+ return (error);
+}
+
+/************************************************************************
+ * ixgbe_debug_dump_ioctl
+ *
+ * Makes debug dump of internal FW/HW data.
+ ************************************************************************/
+static int
+ixgbe_debug_dump_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd)
+{
+ struct ixgbe_debug_dump_cmd *dd_cmd;
+ struct ixgbe_hw *hw = &sc->hw;
+ size_t ifd_len = ifd->ifd_len;
+ device_t dev = sc->dev;
+ s32 error = 0;
+
+ if (!(sc->feat_en & IXGBE_FEATURE_DBG_DUMP))
+ return (ENODEV);
+
+ /* Data returned from ACI command */
+ u16 ret_buf_size = 0;
+ u16 ret_next_cluster = 0;
+ u16 ret_next_table = 0;
+ u32 ret_next_index = 0;
+
+ /*
+ * ifioctl forwards SIOCxDRVSPEC to iflib without conducting
+ * a privilege check. Subsequently, iflib passes the ioctl to the driver
+ * without verifying privileges. To prevent non-privileged threads from
+ * accessing this interface, perform a privilege check at this point.
+ */
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error)
+ return (error);
+
+ if (ifd_len < sizeof(*dd_cmd)) {
+ device_printf(dev,
+ "%s: ifdrv length is too small. Got %zu, "
+ "but expected %zu\n",
+ __func__, ifd_len, sizeof(*dd_cmd));
+ return (EINVAL);
+ }
+
+ if (ifd->ifd_data == NULL) {
+ device_printf(dev, "%s: No ifd data buffer.\n",
+ __func__);
+ return (EINVAL);
+ }
+
+ dd_cmd = (struct ixgbe_debug_dump_cmd *)malloc(ifd_len, M_IXGBE,
+ M_NOWAIT | M_ZERO);
+ if (!dd_cmd) {
+ error = -ENOMEM;
+ goto out;
+ }
+ /* copy data from userspace */
+ error = copyin(ifd->ifd_data, dd_cmd, ifd_len);
+ if (error) {
+ device_printf(dev, "%s: Failed to copy data in, error: %d\n",
+ __func__, error);
+ goto out;
+ }
+
+ /* ACI command requires buf_size arg to be grater than 0 */
+ if (dd_cmd->data_size == 0) {
+ device_printf(dev, "%s: data_size must be greater than 0\n",
+ __func__);
+ error = EINVAL;
+ goto out;
+ }
+
+ /* Zero the data buffer memory space */
+ memset(dd_cmd->data, 0, ifd_len - sizeof(*dd_cmd));
+
+ error = ixgbe_aci_get_internal_data(hw, dd_cmd->cluster_id,
+ dd_cmd->table_id, dd_cmd->offset, dd_cmd->data, dd_cmd->data_size,
+ &ret_buf_size, &ret_next_cluster, &ret_next_table, &ret_next_index);
+ if (error) {
+ device_printf(dev,
+ "%s: Failed to get internal FW/HW data, error: %d\n",
+ __func__, error);
+ goto out;
+ }
+
+ dd_cmd->cluster_id = ret_next_cluster;
+ dd_cmd->table_id = ret_next_table;
+ dd_cmd->offset = ret_next_index;
+ dd_cmd->data_size = ret_buf_size;
+
+ error = copyout(dd_cmd, ifd->ifd_data, ifd->ifd_len);
+ if (error) {
+ device_printf(dev,
+ "%s: Failed to copy data out, error: %d\n",
+ __func__, error);
+ }
+
+out:
+ free(dd_cmd, M_IXGBE);
+
+ return (error);
+}
+
+/************************************************************************
* ixgbe_add_media_types
************************************************************************/
static void
@@ -1358,6 +1667,10 @@ ixgbe_add_media_types(if_ctx_t ctx)
/* Media types with matching FreeBSD media defines */
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
@@ -1459,6 +1772,7 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
}
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
return (true);
return (false);
@@ -1525,6 +1839,15 @@ ixgbe_config_link(if_ctx_t ctx)
IXGBE_LINK_SPEED_5GB_FULL);
}
+ if (hw->mac.type == ixgbe_mac_E610) {
+ hw->phy.ops.init(hw);
+ err = ixgbe_enable_lse(sc);
+ if (err)
+ device_printf(sc->dev,
+ "Failed to enable Link Status Event, "
+ "error: %d", err);
+ }
+
if (hw->mac.ops.setup_link)
err = hw->mac.ops.setup_link(hw, autoneg,
sc->link_up);
@@ -2158,14 +2481,15 @@ get_parent_info:
ixgbe_set_pci_config_data_generic(hw, link);
display:
- device_printf(dev, "PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
+ device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
+ ((hw->bus.speed == ixgbe_bus_speed_16000) ? "16.0GT/s" :
+ (hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
"Unknown"),
- ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+ ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
"Unknown"));
if (bus_info_valid) {
@@ -2372,14 +2696,17 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
layer = sc->phy_layer;
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
- layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
+ if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
switch (sc->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
break;
@@ -2390,15 +2717,6 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_10_T | IFM_FDX;
break;
}
- if (hw->mac.type == ixgbe_mac_X550)
- switch (sc->link_speed) {
- case IXGBE_LINK_SPEED_5GB_FULL:
- ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_2_5GB_FULL:
- ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
- break;
- }
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
switch (sc->link_speed) {
@@ -2676,6 +2994,11 @@ ixgbe_msix_link(void *arg)
sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
}
+ if (eicr & IXGBE_EICR_FW_EVENT) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
+ sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
+ }
+
if (sc->hw.mac.type != ixgbe_mac_82598EB) {
if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
(eicr & IXGBE_EICR_FLOW_DIR)) {
@@ -2734,11 +3057,16 @@ ixgbe_msix_link(void *arg)
/* Check for VF message */
if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
- (eicr & IXGBE_EICR_MAILBOX))
+ (eicr & IXGBE_EICR_MAILBOX)) {
sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
+ }
}
- if (ixgbe_is_sfp(hw)) {
+ /*
+ * On E610, the firmware handles PHY configuration, so
+ * there is no need to perform any SFP-specific tasks.
+ */
+ if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
/* Pluggable optics-related interrupt */
if (hw->mac.type >= ixgbe_mac_X540)
eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
@@ -2812,6 +3140,264 @@ ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
} /* ixgbe_sysctl_interrupt_rate_handler */
/************************************************************************
+ * ixgbe_debug_dump_print_cluster
+ ************************************************************************/
+static u8
+ixgbe_debug_dump_print_cluster(struct ixgbe_softc *sc, struct sbuf *sbuf,
+ u8 cluster_id)
+{
+ u16 data_buf_size = IXGBE_ACI_MAX_BUFFER_SIZE;
+ device_t dev = sc->dev;
+ struct ixgbe_hw *hw = &sc->hw;
+ const u8 reserved_buf[8] = {};
+ int max_aci_calls = 1000;
+ int error, counter = 0;
+ u8 *data_buf;
+
+ /* Input parameters / loop variables */
+ u16 table_id = 0;
+ u32 offset = 0;
+
+ /* Data returned from ACI command */
+ u16 ret_buf_size = 0;
+ u16 ret_next_cluster = 0;
+ u16 ret_next_table = 0;
+ u32 ret_next_index = 0;
+
+ data_buf = (u8 *)malloc(data_buf_size, M_IXGBE, M_NOWAIT | M_ZERO);
+ if (!data_buf)
+ return (0);
+
+ DEBUGOUT2("%s: dumping cluster id (relative) %d\n",
+ __func__, cluster_id);
+
+ do {
+ DEBUGOUT3("table_id 0x%04x offset 0x%08x buf_size %d\n",
+ table_id, offset, data_buf_size);
+
+ error = ixgbe_aci_get_internal_data(hw, cluster_id, table_id,
+ offset, data_buf, data_buf_size, &ret_buf_size,
+ &ret_next_cluster, &ret_next_table, &ret_next_index);
+ if (error) {
+ device_printf(dev,
+ "%s: Failed to get internal FW/HW data, error: %d, "
+ "last aci status: %d\n",
+ __func__, error, hw->aci.last_status);
+ break;
+ }
+
+ DEBUGOUT3("ret_table_id 0x%04x ret_offset 0x%08x "
+ "ret_buf_size %d\n",
+ ret_next_table, ret_next_index, ret_buf_size);
+
+ /* Print cluster id */
+ u32 print_cluster_id = (u32)cluster_id;
+ sbuf_bcat(sbuf, &print_cluster_id, sizeof(print_cluster_id));
+ /* Print table id */
+ u32 print_table_id = (u32)table_id;
+ sbuf_bcat(sbuf, &print_table_id, sizeof(print_table_id));
+ /* Print table length */
+ u32 print_table_length = (u32)ret_buf_size;
+ sbuf_bcat(sbuf, &print_table_length,
+ sizeof(print_table_length));
+ /* Print current offset */
+ u32 print_curr_offset = offset;
+ sbuf_bcat(sbuf, &print_curr_offset, sizeof(print_curr_offset));
+ /* Print reserved bytes */
+ sbuf_bcat(sbuf, reserved_buf, sizeof(reserved_buf));
+ /* Print data */
+ sbuf_bcat(sbuf, data_buf, ret_buf_size);
+
+ /* Prepare for the next loop spin */
+ memset(data_buf, 0, data_buf_size);
+
+ bool last_index = (ret_next_index == 0xffffffff);
+ bool last_table = ((ret_next_table == 0xff ||
+ ret_next_table == 0xffff) &&
+ last_index);
+
+ if (last_table) {
+ /* End of the cluster */
+ DEBUGOUT1("End of the cluster ID %d\n", cluster_id);
+ break;
+ } else if (last_index) {
+ /* End of the table */
+ table_id = ret_next_table;
+ offset = 0;
+ } else {
+ /* More data left in the table */
+ offset = ret_next_index;
+ }
+ } while (++counter < max_aci_calls);
+
+ if (counter >= max_aci_calls)
+ device_printf(dev, "Exceeded nr of ACI calls for cluster %d\n",
+ cluster_id);
+
+ free(data_buf, M_IXGBE);
+
+ return (++cluster_id);
+} /* ixgbe_print_debug_dump_cluster */
+
+/************************************************************************
+ * ixgbe_sysctl_debug_dump_set_clusters
+ *
+ * Sets the cluster to dump from FW when Debug Dump requested.
+ ************************************************************************/
+static int
+ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS)
+{
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ u32 clusters = sc->debug_dump_cluster_mask;
+ device_t dev = sc->dev;
+ int error;
+
+ error = sysctl_handle_32(oidp, &clusters, 0, req);
+ if ((error) || !req->newptr)
+ return (error);
+
+ if (clusters & ~(IXGBE_DBG_DUMP_VALID_CLUSTERS_MASK)) {
+ device_printf(dev,
+ "%s: Unrecognized parameter: %u\n",
+ __func__, clusters);
+ sc->debug_dump_cluster_mask =
+ IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID;
+ return (EINVAL);
+ }
+
+ sc->debug_dump_cluster_mask = clusters;
+
+ return (0);
+} /* ixgbe_sysctl_debug_dump_set_clusters */
+
+/************************************************************************
+ * ixgbe_sysctl_dump_debug_dump
+ ************************************************************************/
+static int
+ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS)
+{
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ device_t dev = sc->dev;
+ struct sbuf *sbuf;
+ int error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (!sc->do_debug_dump) {
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ error = SYSCTL_OUT(req, 0, 0);
+ return (error);
+ }
+
+ char input_buf[2] = "";
+ error = sysctl_handle_string(oidp, input_buf,
+ sizeof(input_buf), req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (input_buf[0] == '1') {
+ if (sc->debug_dump_cluster_mask ==
+ IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID) {
+ device_printf(dev,
+ "Debug Dump failed because an invalid "
+ "cluster was specified.\n");
+ return (EINVAL);
+ }
+
+ sc->do_debug_dump = true;
+ return (0);
+ }
+
+ return (EINVAL);
+ }
+
+ /* Caller just wants the upper bound for size */
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ size_t est_output_len = IXGBE_DBG_DUMP_BASE_SIZE;
+ if (sc->debug_dump_cluster_mask & 0x2)
+ est_output_len += IXGBE_DBG_DUMP_BASE_SIZE;
+ error = SYSCTL_OUT(req, 0, est_output_len);
+ return (error);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ sbuf_clear_flags(sbuf, SBUF_INCLUDENUL);
+
+ DEBUGOUT("FW Debug Dump running...\n");
+
+ if (sc->debug_dump_cluster_mask) {
+ for (u8 id = 0; id <= IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX; id++) {
+ if (sc->debug_dump_cluster_mask & BIT(id)) {
+ DEBUGOUT1("Dumping cluster ID %u...\n", id);
+ ixgbe_debug_dump_print_cluster(sc, sbuf, id);
+ }
+ }
+ } else {
+ u8 next_cluster_id = 0;
+ do {
+ DEBUGOUT1("Dumping cluster ID %u...\n",
+ next_cluster_id);
+ next_cluster_id = ixgbe_debug_dump_print_cluster(sc,
+ sbuf, next_cluster_id);
+ } while (next_cluster_id != 0 &&
+ next_cluster_id <= IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX);
+ }
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ sc->do_debug_dump = false;
+
+ return (error);
+} /* ixgbe_sysctl_dump_debug_dump */
+
+/************************************************************************
+ * ixgbe_add_debug_dump_sysctls
+ ************************************************************************/
+static void
+ixgbe_add_debug_dump_sysctls(struct ixgbe_softc *sc)
+{
+ struct sysctl_oid_list *debug_list, *dump_list;
+ struct sysctl_oid *dump_node;
+ struct sysctl_ctx_list *ctx;
+ device_t dev = sc->dev;
+
+ ctx = device_get_sysctl_ctx(dev);
+ debug_list = SYSCTL_CHILDREN(sc->debug_sysctls);
+
+ dump_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "dump",
+ CTLFLAG_RD, NULL, "Internal FW/HW Dump");
+ dump_list = SYSCTL_CHILDREN(dump_node);
+
+ SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters",
+ CTLTYPE_U32 | CTLFLAG_RW, sc, 0,
+ ixgbe_sysctl_debug_dump_set_clusters, "SU",
+ IXGBE_SYSCTL_DESC_DEBUG_DUMP_SET_CLUSTER);
+
+ SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "dump",
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
+ ixgbe_sysctl_dump_debug_dump, "",
+ IXGBE_SYSCTL_DESC_DUMP_DEBUG_DUMP);
+} /* ixgbe_add_debug_dump_sysctls */
+
+static void
+ixgbe_add_debug_sysctls(struct ixgbe_softc *sc)
+{
+ struct sysctl_oid_list *ctx_list;
+ struct sysctl_ctx_list *ctx;
+ device_t dev = sc->dev;
+
+ ctx = device_get_sysctl_ctx(dev);
+ ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug",
+ CTLFLAG_RD, NULL, "Debug Sysctls");
+
+ if (sc->feat_en & IXGBE_FEATURE_DBG_DUMP)
+ ixgbe_add_debug_dump_sysctls(sc);
+} /* ixgbe_add_debug_sysctls */
+
+/************************************************************************
* ixgbe_add_device_sysctls
************************************************************************/
static void
@@ -2921,6 +3507,8 @@ ixgbe_add_device_sysctls(if_ctx_t ctx)
CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
}
+
+ ixgbe_add_debug_sysctls(sc);
} /* ixgbe_add_device_sysctls */
/************************************************************************
@@ -2985,7 +3573,13 @@ ixgbe_if_detach(if_ctx_t ctx)
callout_drain(&sc->fw_mode_timer);
+ if (sc->hw.mac.type == ixgbe_mac_E610) {
+ ixgbe_disable_lse(sc);
+ ixgbe_shutdown_aci(&sc->hw);
+ }
+
ixgbe_free_pci_resources(ctx);
+
free(sc->mta, M_IXGBE);
return (0);
@@ -3404,6 +3998,7 @@ ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (type == -1) { /* MISC IVAR */
index = (entry & 1) * 8;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
@@ -3826,6 +4421,96 @@ ixgbe_handle_phy(void *context)
} /* ixgbe_handle_phy */
/************************************************************************
+ * ixgbe_enable_lse - enable link status events
+ *
+ * Sets mask and enables link status events
+ ************************************************************************/
+s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
+
+ error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = mask;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_enable_lse */
+
+/************************************************************************
+ * ixgbe_disable_lse - disable link status events
+ ************************************************************************/
+s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = 0;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_disable_lse */
+
+/************************************************************************
+ * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
+ ************************************************************************/
+static void
+ixgbe_handle_fw_event(void *context)
+{
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_aci_event event;
+ bool pending = false;
+ s32 error;
+
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
+ if (!event.msg_buf) {
+ device_printf(sc->dev, "Can not allocate buffer for "
+ "event message\n");
+ return;
+ }
+
+ do {
+ error = ixgbe_aci_get_event(hw, &event, &pending);
+ if (error) {
+ device_printf(sc->dev, "Error getting event from "
+ "FW:%d\n", error);
+ break;
+ }
+
+ switch (le16toh(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
+ break;
+
+ case ixgbe_aci_opc_temp_tca_event:
+ if (hw->adapter_stopped == FALSE)
+ ixgbe_if_stop(ctx);
+ device_printf(sc->dev,
+ "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ device_printf(sc->dev, "System shutdown required!\n");
+ break;
+
+ default:
+ device_printf(sc->dev,
+ "Unknown FW event captured, opcode=0x%04X\n",
+ le16toh(event.desc.opcode));
+ break;
+ }
+ } while (pending);
+
+ free(event.msg_buf, M_IXGBE);
+} /* ixgbe_handle_fw_event */
+
+/************************************************************************
* ixgbe_if_stop - Stop the hardware
*
* Disables all traffic on the adapter by issuing a
@@ -3858,6 +4543,33 @@ ixgbe_if_stop(if_ctx_t ctx)
} /* ixgbe_if_stop */
/************************************************************************
+ * ixgbe_link_speed_to_str - Convert link speed to string
+ *
+ * Helper function to convert link speed constants to human-readable
+ * string representations in conventional Gbps or Mbps.
+ ************************************************************************/
+static const char *
+ixgbe_link_speed_to_str(u32 link_speed)
+{
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ return "10 Gbps";
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ return "5 Gbps";
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ return "2.5 Gbps";
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ return "1 Gbps";
+ case IXGBE_LINK_SPEED_100_FULL:
+ return "100 Mbps";
+ case IXGBE_LINK_SPEED_10_FULL:
+ return "10 Mbps";
+ default:
+ return "Unknown";
+ }
+} /* ixgbe_link_speed_to_str */
+
+/************************************************************************
* ixgbe_update_link_status - Update OS on link state
*
* Note: Only updates the OS on the cached link state.
@@ -3873,9 +4585,9 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
if (sc->link_up) {
if (sc->link_active == false) {
if (bootverbose)
- device_printf(dev, "Link is up %d Gbps %s \n",
- ((sc->link_speed == 128) ? 10 : 1),
- "Full Duplex");
+ device_printf(dev,
+ "Link is up %s Full Duplex\n",
+ ixgbe_link_speed_to_str(sc->link_speed));
sc->link_active = true;
/* Update any Flow Control changes */
ixgbe_fc_enable(&sc->hw);
@@ -3899,6 +4611,8 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
}
/* Handle task requests from msix_link() */
+ if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
+ ixgbe_handle_fw_event(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
ixgbe_handle_mod(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
@@ -3986,6 +4700,9 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
break;
+ case ixgbe_mac_E610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ break;
default:
break;
}
@@ -4008,6 +4725,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
/* Don't autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
+ mask &= ~IXGBE_EIMS_FW_EVENT;
if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
mask &= ~IXGBE_EIMS_MAILBOX;
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
@@ -4026,7 +4744,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
} /* ixgbe_if_enable_intr */
/************************************************************************
- * ixgbe_disable_intr
+ * ixgbe_if_disable_intr
************************************************************************/
static void
ixgbe_if_disable_intr(if_ctx_t ctx)
@@ -4176,8 +4894,9 @@ ixgbe_intr(void *arg)
/* External PHY interrupt */
if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
- (eicr & IXGBE_EICR_GPI_SDP0_X540))
+ (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
+ }
return (FILTER_SCHEDULE_THREAD);
} /* ixgbe_intr */
@@ -4219,7 +4938,7 @@ ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
int error, fc;
sc = (struct ixgbe_softc *)arg1;
- fc = sc->hw.fc.current_mode;
+ fc = sc->hw.fc.requested_mode;
error = sysctl_handle_int(oidp, &fc, 0, req);
if ((error) || (req->newptr == NULL))
@@ -4248,12 +4967,10 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
case ixgbe_fc_rx_pause:
case ixgbe_fc_tx_pause:
case ixgbe_fc_full:
- sc->hw.fc.requested_mode = fc;
if (sc->num_rx_queues > 1)
ixgbe_disable_rx_drop(sc);
break;
case ixgbe_fc_none:
- sc->hw.fc.requested_mode = ixgbe_fc_none;
if (sc->num_rx_queues > 1)
ixgbe_enable_rx_drop(sc);
break;
@@ -4261,6 +4978,8 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
return (EINVAL);
}
+ sc->hw.fc.requested_mode = fc;
+
/* Don't autoneg if forcing a value */
sc->hw.fc.disable_fc_autoneg = true;
ixgbe_fc_enable(&sc->hw);
@@ -4978,6 +5697,10 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
break;
+ case ixgbe_mac_E610:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
+ sc->feat_cap |= IXGBE_FEATURE_DBG_DUMP;
+ break;
default:
break;
}
@@ -4998,6 +5721,9 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
/* Recovery mode */
if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
+ /* FW Debug Dump */
+ if (sc->feat_cap & IXGBE_FEATURE_DBG_DUMP)
+ sc->feat_en |= IXGBE_FEATURE_DBG_DUMP;
/* Enabled via global sysctl... */
/* Flow Director */
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
index 54b2c8c1dd68..8a1c1aae041d 100644
--- a/sys/dev/ixgbe/if_ixv.c
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -68,6 +68,8 @@ static const pci_vendor_info_t ixv_vendor_info_array[] =
"Intel(R) X552 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
"Intel(R) X553 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_VF,
+ "Intel(R) E610 Virtual Function"),
/* required last entry */
PVID_END
};
@@ -1020,6 +1022,9 @@ ixv_identify_hardware(if_ctx_t ctx)
case IXGBE_DEV_ID_X550EM_A_VF:
hw->mac.type = ixgbe_mac_X550EM_a_vf;
break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ break;
default:
device_printf(dev, "unknown mac type\n");
hw->mac.type = ixgbe_mac_unknown;
@@ -1955,6 +1960,7 @@ ixv_init_device_features(struct ixgbe_softc *sc)
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
sc->feat_cap |= IXGBE_FEATURE_RSS;
break;
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 341d4ebfcebc..624b71acabea 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -46,6 +46,7 @@
#include <sys/module.h>
#include <sys/sockio.h>
#include <sys/eventhandler.h>
+#include <sys/priv.h>
#include <net/if.h>
#include <net/if_var.h>
@@ -86,6 +87,7 @@
#include "ixgbe_phy.h"
#include "ixgbe_vf.h"
#include "ixgbe_features.h"
+#include "ixgbe_e610.h"
/* Tunables */
@@ -195,6 +197,15 @@
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
+/* All BASE-T Physical layers */
+#define IXGBE_PHYSICAL_LAYERS_BASE_T_ALL \
+ (IXGBE_PHYSICAL_LAYER_10GBASE_T |\
+ IXGBE_PHYSICAL_LAYER_5000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_2500BASE_T |\
+ IXGBE_PHYSICAL_LAYER_1000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_100BASE_TX |\
+ IXGBE_PHYSICAL_LAYER_10BASE_T)
+
#define IXGBE_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | \
IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | \
@@ -464,6 +475,21 @@ struct ixgbe_softc {
/* Feature capable/enabled flags. See ixgbe_features.h */
u32 feat_cap;
u32 feat_en;
+ u16 lse_mask;
+
+ struct sysctl_oid *debug_sysctls;
+ u32 debug_dump_cluster_mask;
+ bool do_debug_dump;
+};
+
+struct ixgbe_debug_dump_cmd {
+ u32 offset; /* offset to read/write from table, in bytes */
+ u8 cluster_id; /* also used to get next cluster id */
+ u16 table_id;
+ u16 data_size; /* size of data field, in bytes */
+ u16 reserved1;
+ u32 reserved2;
+ u8 data[];
};
/* Precision Time Sync (IEEE 1588) defines */
@@ -488,6 +514,43 @@ struct ixgbe_softc {
#define IXGBE_PHY_CURRENT_TEMP 0xC820
#define IXGBE_PHY_OVERTEMP_STATUS 0xC830
+/**
+ * The ioctl command number used by NVM update for accessing the driver for
+ * NVM access commands.
+ */
+#define IXGBE_NVM_ACCESS \
+ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5)
+
+/*
+ * The ioctl command number used by a userspace tool for accessing the driver
+ * for getting debug dump data from the firmware.
+ */
+#define IXGBE_DEBUG_DUMP \
+ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 6)
+
+/* Debug Dump related definitions */
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID 0xFFFFFF
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_BASE 50
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX 1
+
+#define IXGBE_DBG_DUMP_VALID_CLUSTERS_MASK 0x3
+#define IXGBE_DBG_DUMP_BASE_SIZE (2 * 1024 * 1024)
+
+#define IXGBE_SYSCTL_DESC_DEBUG_DUMP_SET_CLUSTER \
+"\nSelect clusters to dump with \"dump\" sysctl" \
+"\nFlags:" \
+"\n\t 0x1 - Link" \
+"\n\t 0x2 - Full CSR Space, excluding RCW registers" \
+"\n\t" \
+"\nUse \"sysctl -x\" to view flags properly."
+
+#define IXGBE_SYSCTL_DESC_DUMP_DEBUG_DUMP \
+"\nWrite 1 to output a FW debug dump containing the clusters " \
+"specified by the \"clusters\" sysctl" \
+"\nThe \"-b\" flag must be used in order to dump this data " \
+"as binary data because" \
+"\nthis data is opaque and not a string."
+
/* Sysctl help messages; displayed with sysctl -d */
#define IXGBE_SYSCTL_DESC_ADV_SPEED \
"\nControl advertised link speed using these flags:\n" \
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 4c50f10ed92e..f11f52a646e4 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -112,11 +112,15 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
case ixgbe_mac_X550EM_a:
status = ixgbe_init_ops_X550EM_a(hw);
break;
+ case ixgbe_mac_E610:
+ status = ixgbe_init_ops_E610(hw);
+ break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
status = ixgbe_init_ops_vf(hw);
break;
default:
@@ -240,6 +244,18 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mac.type = ixgbe_mac_X550EM_a_vf;
hw->mvals = ixgbe_mvals_X550EM_a;
break;
+ case IXGBE_DEV_ID_E610_BACKPLANE:
+ case IXGBE_DEV_ID_E610_SFP:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
+ case IXGBE_DEV_ID_E610_SGMII:
+ hw->mac.type = ixgbe_mac_E610;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index b81510dacb95..2b4cec8d110e 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -48,6 +48,7 @@ extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index df7ab90e72ab..bff022585a03 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -178,6 +178,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_SFP_N:
case IXGBE_DEV_ID_X550EM_A_QSFP:
case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -210,6 +211,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -616,7 +619,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550) {
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
@@ -1037,6 +1041,9 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
case IXGBE_PCI_LINK_SPEED_8000:
hw->bus.speed = ixgbe_bus_speed_8000;
break;
+ case IXGBE_PCI_LINK_SPEED_16000:
+ hw->bus.speed = ixgbe_bus_speed_16000;
+ break;
default:
hw->bus.speed = ixgbe_bus_speed_unknown;
break;
@@ -1059,7 +1066,9 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_bus_info_generic");
/* Get the negotiated link width and speed from PCI config space */
- link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+ link_status = IXGBE_READ_PCIE_WORD(hw, hw->mac.type == ixgbe_mac_E610 ?
+ IXGBE_PCI_LINK_STATUS_E610 :
+ IXGBE_PCI_LINK_STATUS);
ixgbe_set_pci_config_data_generic(hw, link_status);
@@ -1878,7 +1887,6 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_eeprom_semaphore");
-
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
@@ -3363,7 +3371,6 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
-
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
@@ -3692,6 +3699,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_E610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return msix_count;
}
@@ -4139,7 +4150,6 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
-
/**
* ixgbe_toggle_txdctl_generic - Toggle VF's queues
* @hw: pointer to hardware structure
@@ -4323,7 +4333,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_E610) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -5494,6 +5505,7 @@ void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
/* version of eeprom section */
if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
word = NVM_VER_INVALID;
@@ -5512,6 +5524,7 @@ void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
/* intel phy firmware version */
if (ixgbe_read_eeprom(hw, NVM_EEP_PHY_OFF_X540, &word))
word = NVM_VER_INVALID;
diff --git a/sys/dev/ixgbe/ixgbe_e610.c b/sys/dev/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..18c4612446e0
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.c
@@ -0,0 +1,5533 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
+
+/**
+ * ixgbe_init_aci - initialization routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the ACI lock.
+ */
+void ixgbe_init_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_init_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Destroy the ACI lock.
+ */
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_destroy_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
+ * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
+ * * - IXGBE_ERR_PARAM - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
+ * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static s32
+ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u32 hicr = 0, tmp_buf_size = 0, i = 0;
+ u32 *raw_desc = (u32 *)desc;
+ s32 status = IXGBE_SUCCESS;
+ bool valid_buf = false;
+ u32 *tmp_buf = NULL;
+ u16 opcode = 0;
+
+ do {
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if (!(hicr & PF_HICR_EN)) {
+ status = IXGBE_ERR_ACI_DISABLED;
+ break;
+ }
+ if (hicr & PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ status = IXGBE_ERR_ACI_BUSY;
+ break;
+ }
+ opcode = desc->opcode;
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+
+ if (buf)
+ desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
+
+ /* Check if buf and buf_size are proper params */
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && buf_size == 0) ||
+ (buf == NULL && buf_size)) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf == true) {
+ if (buf_size % 4 == 0)
+ tmp_buf_size = buf_size;
+ else
+ tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
+
+ tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
+ if (!tmp_buf)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ /* tmp_buf will be firstly filled with 0xFF and after
+ * that the content of buf will be written into it.
+ * This approach lets us use valid buf_size and
+ * prevents us from reading past buf area
+ * when buf_size mod 4 not equal to 0.
+ */
+ memset(tmp_buf, 0xFF, tmp_buf_size);
+ memcpy(tmp_buf, buf, buf_size);
+
+ if (tmp_buf_size > IXGBE_ACI_LG_BUF)
+ desc->flags |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
+
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ IXGBE_WRITE_REG(hw, PF_HIBA(i),
+ IXGBE_LE32_TO_CPU(tmp_buf[i]));
+ }
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, PF_HIDA(i),
+ IXGBE_LE32_TO_CPU(raw_desc[i]));
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, PF_HICR, hicr);
+
+ /* Wait for sync Admin Command response */
+ for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+
+ /* Wait for async Admin Command response */
+ if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
+ i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+ }
+
+ /* Read sync Admin Command response */
+ if ((hicr & PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & PF_HICR_C) {
+ status = IXGBE_ERR_ACI_TIMEOUT;
+ break;
+ } else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != opcode &&
+ opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ if (desc->retval != IXGBE_ACI_RC_OK) {
+ hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf && (desc->flags &
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
+ tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
+ }
+ memcpy(buf, tmp_buf, buf_size);
+ }
+ } while (0);
+
+ if (tmp_buf)
+ ixgbe_free(hw, tmp_buf);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ bool is_cmd_for_retry;
+ u8 *buf_cpy = NULL;
+ s32 status;
+ u16 opcode;
+ u8 idx = 0;
+
+ opcode = IXGBE_LE16_TO_CPU(desc->opcode);
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ memset(&desc_cpy, 0, sizeof(desc_cpy));
+
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
+ if (!buf_cpy)
+ return IXGBE_ERR_OUT_OF_MEM;
+ }
+ memcpy(&desc_cpy, desc, sizeof(desc_cpy));
+ }
+
+ do {
+ ixgbe_acquire_lock(&hw->aci.lock);
+ status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ ixgbe_release_lock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
+ (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ memcpy(desc, &desc_cpy, sizeof(desc_cpy));
+
+ msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
+
+ if (buf_cpy)
+ ixgbe_free(hw, buf_cpy);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask;
+ u32 fwsts;
+
+ ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+
+ /* Check state of Event Pending (EP) bit */
+ fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_acquire_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (status)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending) {
+ *pending = ixgbe_aci_check_event_pending(hw);
+ }
+
+aci_get_event_exit:
+ ixgbe_release_lock(&hw->aci.lock);
+
+ return status;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = IXGBE_CPU_TO_LE16(opcode);
+ desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_ver *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (!status) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_driver_ver - send the driver version to firmware
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ *
+ * Send the driver version to the firmware
+ * using the ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * Returns IXGBE_ERR_PARAM, if dv is NULL.
+ */
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
+{
+ struct ixgbe_aci_cmd_driver_ver *cmd;
+ struct ixgbe_aci_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u8 sdp_number,
+ u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
+ cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+ cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
+ *timeout = 0;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout = 0;
+ s32 status;
+
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ goto ixgbe_acquire_res_exit;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (status && retry_timeout && res_timeout) {
+ msec_delay(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ /* lock free, but no work to do */
+ break;
+
+ if (!status)
+ /* lock acquired */
+ break;
+ }
+
+ixgbe_acquire_res_exit:
+ return status;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ s32 status;
+
+ status = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
+ (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
+ msec_delay(1);
+ status = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_common_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
+ u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
+ u32 number = IXGBE_LE32_TO_CPU(elem->number);
+ u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
+ bool found = true;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_NVM_MGMT:
+ caps->sec_rev_disabled =
+ (number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
+ true : false;
+ caps->update_disabled =
+ (number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
+ true : false;
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ caps->netlist_auth =
+ (number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
+ true : false;
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
+ IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
+ caps->orom_recovery_update = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
+ caps->next_cluster_id_support = (number == 1);
+ DEBUGOUT2("%s: next_cluster_id_support = %d\n",
+ prefix, caps->next_cluster_id_support);
+ break;
+ default:
+ /* Not one of the recognized common capabilities */
+ found = false;
+ }
+
+ return found;
+}
+
+/**
+ * ixgbe_hweight8 - count set bits among the 8 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 8 lowest bits in the provided value.
+ */
+static u8 ixgbe_hweight8(u32 w)
+{
+ u8 hweight = 0, i;
+
+ for (i = 0; i < 8; i++)
+ if (w & (1 << i))
+ hweight++;
+
+ return hweight;
+}
+
+/**
+ * ixgbe_hweight32 - count set bits among the 32 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 32 lowest bits in the
+ * provided value.
+ */
+static u8 ixgbe_hweight32(u32 w)
+{
+ u32 bitMask = 0x1, i;
+ u8 bitCnt = 0;
+
+ for (i = 0; i < 32; i++)
+ {
+ if (w & bitMask)
+ bitCnt++;
+
+ bitMask = bitMask << 0x1;
+ }
+
+ return bitCnt;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_funcs = ixgbe_hweight32(number);
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vfs_exposed = number;
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vsi_allocd_to_host = number;
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_flow_director_fltr = number;
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ bool found;
+
+ found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
+ &cap_resp[i], "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+ u8 funcs;
+
+#define IXGBE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return max / funcs;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ ixgbe_parse_common_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
+ */
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = IXGBE_LE32_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 status, cap_count = 0;
+ u8 *cbuf = NULL;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!status)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count = 0;
+ u8 *cbuf = NULL;
+ s32 status;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if(!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!status)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (status)
+ return status;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = (u8)hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
+ status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+
+ if (status == IXGBE_SUCCESS &&
+ report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
+ hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities match PHY
+ * configuration
+ *
+ * Return: true if PHY capabilities match PHY configuration.
+ */
+bool
+ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
+ IXGBE_ACI_PHY_EN_MOD_QUAL);
+ cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
+ ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+ }
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+
+ if (!status)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li = &hw->link.link_info;
+
+ status = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (status)
+ return status;
+
+ if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (status == IXGBE_SUCCESS)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ ixgbe_free(hw, pcaps);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ if (hw->link.get_link_info) {
+ status = ixgbe_update_link_info(hw);
+ if (status) {
+ return status;
+ }
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* save off old link status information */
+ *li_old = *li;
+
+ /* update current link status information */
+ li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
+ li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
+ li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* update fc info */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* save link status information */
+ if (link)
+ *link = *li;
+
+ /* flag cleared so calling functions don't call AQ again */
+ hw->link.get_link_info = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: bool value deciding if lse should be enabled nor disabled
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ s32 rc;
+
+ rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (rc) {
+ return rc;
+ }
+
+ /* Enabling link status events generation by fw */
+ rc = ixgbe_aci_get_link_info(hw, activate, NULL);
+ if (rc) {
+ return rc;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ if (node_handle)
+ *node_handle =
+ IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_find_netlist_node - find a node handle
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Find and return the node handle for a given node type and part number in the
+ * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
+ * otherwise. If @node_handle provided, it would be set to found node handle.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_cmd_get_link_topo cmd;
+ u8 rec_node_part_number;
+ u16 rec_node_handle;
+ s32 status;
+ u8 idx;
+
+ for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ixgbe_aci_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ &rec_node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number) {
+ if (node_handle)
+ *node_handle = rec_node_handle;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ return IXGBE_ERR_NOT_SUPPORTED;
+}
+
+/**
+ * ixgbe_aci_read_i2c - read I2C register value
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type, bits [3:0] - data size
+ * to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ *
+ * Read the value of the I2C pin register using ACI command (0x06E2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 data_size;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ if (!data)
+ return IXGBE_ERR_PARAM;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status) {
+ struct ixgbe_aci_cmd_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_i2c - write a value to I2C register
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
+ * to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ *
+ * Write a value to the I2C pin register using ACI command (0x06E3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 i, data_size;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return IXGBE_ERR_PARAM;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ for (i = 0; i < data_size; i++) {
+ cmd->i2c_data[i] = *data;
+ data++;
+ }
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_gpio - set GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ *
+ * Set the GPIO pin state that is a part of the topology
+ * using ACI command (0x06EC).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_gpio - get GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ *
+ * Get the value of a GPIO signal which is part of the topology
+ * using ACI command (0x06ED).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ *value = !!cmd->gpio_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ *
+ * Read/write SFF EEPROM using ACI command (0x06EE).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write)
+{
+ struct ixgbe_aci_cmd_sff_eeprom *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || (mem_addr & 0xff00))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
+ IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
+ ((page_bank_ctrl <<
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
+ cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
+ cmd->module_page = page;
+ if (write)
+ cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ return status;
+}
+
+/**
+ * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ *
+ * Program Topology Device NVM using ACI command (0x06F2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params)
+{
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.prog_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
+
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ * @start_address: byte offset in the topology device NVM
+ * @data: pointer to data buffer
+ * @data_size: number of bytes to be read from the topology device NVM
+ * Read Topology Device NVM (0x06F3)
+ *
+ * Read Topology of Device NVM using ACI command (0x06F3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size)
+{
+ struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || data_size == 0 ||
+ data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
+ return IXGBE_ERR_PARAM;
+
+ cmd = &desc.params.read_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
+
+ desc.datalen = IXGBE_CPU_TO_LE16(data_size);
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+ cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ memcpy(data, cmd->data_read, data_size);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return IXGBE_SUCCESS;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+ __le16 len;
+
+ /* read a length value from SR, so module_typeid is equal to 0 */
+ /* calculate offset where module size is placed from bytes to words */
+ /* set last command and read from SR values to true */
+ status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_read_nvm_cfg - read an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature ID
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: pointer to count of elements read by FW
+ *
+ * Reads a single or multiple feature/field ID and data using ACI command
+ * (0x0704).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
+
+ cmd->cmd_flags = cmd_flags;
+ cmd->id = IXGBE_CPU_TO_LE16(field_id);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+ if (!status && elem_count)
+ *elem_count = IXGBE_LE16_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_nvm_cfg - write an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: count of elements to be written
+ *
+ * Writes a single or multiple feature/field ID and data using ACI command
+ * (0x0705).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ cmd->count = IXGBE_CPU_TO_LE16(elem_count);
+ cmd->cmd_flags = cmd_flags;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
+ IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid Shadow Ram checksum");
+ status = IXGBE_ERR_NVM_CHECKSUM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_recalculate_checksum - recalculate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Recalculate NVM PFA checksum using ACI command (0x0706).
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = LO_BYTE(cmd_flags);
+ cmd->offset_high = HI_BYTE(cmd_flags);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ s32 status;
+ u32 start;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
+ * NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ s32 status;
+
+ status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (status)
+ return status;
+
+ hdr_len = ROUND_UP(hdr_len, 32);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: structure to store NVM and OROM minsrev values
+ *
+ * Read the Minimum Security Revision TLV and extract
+ * the revision values from the flash image
+ * into a readable structure for processing.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+ u16 valid;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
+ 0, sizeof(data), &data,
+ true, false);
+
+ ixgbe_release_nvm(hw);
+
+ if (status)
+ return status;
+
+ valid = IXGBE_LE16_TO_CPU(data.validity);
+
+ /* Extract NVM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
+
+ minsrevs->nvm = minsrev_h << 16 | minsrev_l;
+ minsrevs->nvm_valid = true;
+ }
+
+ /* Extract the OROM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
+
+ minsrevs->orom = minsrev_h << 16 | minsrev_l;
+ minsrevs->orom_valid = true;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: minimum security revision information
+ *
+ * Update the NVM or Option ROM minimum security revision fields in the PFA
+ * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
+ * fields to determine what update is being requested. If the valid bit is not
+ * set for that module, then the associated minsrev will be left as is.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+
+ if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ /* Get current data */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, true, false);
+ if (status)
+ goto exit_release_res;
+
+ if (minsrevs->nvm_valid) {
+ data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
+ data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
+ }
+
+ if (minsrevs->orom_valid) {
+ data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
+ data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
+ }
+
+ /* Update flash data */
+ status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, false,
+ IXGBE_ACI_NVM_SPECIAL_UPDATE);
+ if (status)
+ goto exit_release_res;
+
+ /* Dump the Shadow RAM to the flash */
+ status = ixgbe_nvm_write_activate(hw, 0, NULL);
+
+exit_release_res:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (status)
+ return status;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ s32 status;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank,
+ E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (status) {
+ return status;
+ }
+
+ nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
+ nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (status) {
+ return status;
+ }
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (status) {
+ return status;
+ }
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ s32 status;
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (status)
+ return status;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (status)
+ return status;
+
+ /* sanity check that we have at least enough words to store the
+ * netlist ID block
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (status)
+ return status;
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
+ sizeof(*id_blk));
+ if (!id_blk)
+ return IXGBE_ERR_NO_SPACE;
+
+ /* Read out the entire Netlist ID Block at once. */
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
+ (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
+ if (status)
+ goto exit_error;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+exit_error:
+ ixgbe_free(hw, id_blk);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+ else
+ *pointer = value * 2;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * 4 * 1024;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (status == IXGBE_ERR_ACI_ERROR &&
+ hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ status = IXGBE_SUCCESS;
+ max_size = offset;
+ } else if (!status) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
+ if (status) {
+ return status;
+ }
+
+ /* Check that the control word indicates validity */
+ if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
+ IXGBE_SR_CTRL_WORD_VALID) {
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+ if (status) {
+ return status;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_nvm - initializes NVM setting
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate NVM settings such as Shadow RAM size,
+ * max_timeout, and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat, status;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
+ flash->blank_nvm_mode = false;
+ } else {
+ /* Blank programming mode */
+ flash->blank_nvm_mode = true;
+ return IXGBE_ERR_NVM_BLANK_MODE;
+ }
+
+ status = ixgbe_discover_flash_size(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_determine_active_flash_banks(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (status) {
+ return status;
+ }
+
+ /* read the netlist version information */
+ status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_operate - Clear the user data
+ * @hw: pointer to the HW struct
+ *
+ * Clear user data from NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u8 values;
+
+ u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
+ IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
+
+ status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
+ if (status)
+ return status;
+ if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
+ return IXGBE_ERR_ACI_ERROR;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_nvm - Sanitize NVM
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flag to the ACI command
+ * @values: values returned from the command
+ *
+ * Sanitize NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm_sanitization *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm_sanitization;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
+ cmd->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (values)
+ *values = cmd->values;
+
+ return status;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (status)
+ return status;
+
+ *data = IXGBE_LE16_TO_CPU(data_local);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
+ * taken before reading the buffer and later released.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2, i;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+
+ *words = bytes / 2;
+
+ for (i = 0; i < *words; i++)
+ data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ s32 status;
+
+ *length = 0;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u))) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = MIN_T(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (status)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return status;
+}
+
+/**
+ * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to access
+ *
+ * Check if all the parameters are valid
+ * before performing any Shadow RAM read/write operations.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
+ * NVM error: tried to access more words then the set limit or
+ * NVM error: cannot spread over two sectors.
+ */
+static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
+ u16 words)
+{
+ if ((offset + words) > hw->eeprom.word_size) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
+ /* We can access only up to 4KB (one sector),
+ * in one Admin Command write
+ */
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
+ (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
+ /* A single access cannot spread over two sectors */
+ return IXGBE_ERR_PARAM;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_sr_word_aci - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
+{
+ __le16 data_local = IXGBE_CPU_TO_LE16(*data);
+ s32 status;
+
+ status = ixgbe_check_sr_access_params(hw, offset, 1);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD, &data_local,
+ false, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
+ const u16 *data)
+{
+ __le16 *data_local;
+ s32 status;
+ void *vmem;
+ u32 i;
+
+ vmem = ixgbe_calloc(hw, words, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data_local = (__le16 *)vmem;
+
+ for (i = 0; i < words; i++)
+ data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ status = ixgbe_check_sr_access_params(hw, offset, words);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD * words,
+ data_local, false, 0);
+
+ ixgbe_free(hw, vmem);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write - write to alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be written
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be written
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure using ACI command (0x0900).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+ cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
+ cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_read - read from alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure using ACI command (0x0902).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
+ * is read.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ if (!reg_val0)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (status == IXGBE_SUCCESS) {
+ *reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
+
+ if (reg_val1)
+ *reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write_done - check if writing to alternate structure
+ * is done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed)
+{
+ struct ixgbe_aci_cmd_done_alt_write *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.done_alt_write;
+
+ if (!reset_needed)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
+ cmd->flags = bios_mode;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status)
+ *reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
+ IXGBE_ACI_RESP_RESET_NEEDED) != 0;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_clear - clear alternate structure
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_clear_port_alt_write);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_internal_data - get internal FW/HW data
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table ID within cluster
+ * @start: index of line in the block to read
+ * @buf: dump buffer
+ * @buf_size: dump buffer size
+ * @ret_buf_size: return buffer size (returned by FW)
+ * @ret_next_cluster: next cluster to read (returned by FW)
+ * @ret_next_table: next block to read (returned by FW)
+ * @ret_next_index: next index to read (returned by FW)
+ *
+ * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index)
+{
+ struct ixgbe_aci_cmd_debug_dump_internals *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.debug_dump;
+
+ if (buf_size == 0 || !buf)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_debug_dump_internals);
+
+ cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
+ cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
+ cmd->idx = IXGBE_CPU_TO_LE32(start);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (!status) {
+ if (ret_buf_size)
+ *ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
+ if (ret_next_cluster)
+ *ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
+ if (ret_next_table)
+ *ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
+ if (ret_next_index)
+ *ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
+ * @cmd: NVM access command structure
+ *
+ * Validates that an NVM access structure is request to read or write a valid
+ * register offset. First validates that the module and flags are correct, and
+ * then ensures that the register offset is one of the accepted registers.
+ *
+ * Return: 0 if the register access is valid, out of range error code otherwise.
+ */
+static s32
+ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
+{
+ u16 i;
+
+ switch (cmd->offset) {
+ case GL_HICR:
+ case GL_HICR_EN: /* Note, this register is read only */
+ case GL_FWSTS:
+ case GL_MNG_FWSM:
+ case GLNVM_GENS:
+ case GLNVM_FLA:
+ case GL_FWRESETCNT:
+ return 0;
+ default:
+ break;
+ }
+
+ for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIDA(i))
+ return 0;
+
+ for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIBA(i))
+ return 0;
+
+ /* All other register offsets are not valid */
+ return IXGBE_ERR_OUT_OF_RANGE;
+}
+
+/**
+ * ixgbe_nvm_access_read - Handle an NVM read request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: storage for the register value read
+ *
+ * Process an NVM access request to read a register.
+ *
+ * Return: 0 if the register read is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Always initialize the output data, even on failure */
+ memset(&data->regval, 0, cmd->data_size);
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
+
+ /* Read the register and store the contents in the data field */
+ data->regval = IXGBE_READ_REG(hw, cmd->offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_nvm_access_write - Handle an NVM write request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: NVM access data to write
+ *
+ * Process an NVM access request to write a register.
+ *
+ * Return: 0 if the register write is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ /* Reject requests to write to read-only registers */
+ switch (cmd->offset) {
+ case GL_HICR_EN:
+ return IXGBE_ERR_OUT_OF_RANGE;
+ default:
+ break;
+ }
+
+ DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
+ cmd->offset, data->regval);
+
+ /* Write the data field to the specified register */
+ IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_nvm_access - Handle an NVM access request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command info
+ * @data: pointer to read or return data
+ *
+ * Process an NVM access request. Read the command structure information and
+ * determine if it is valid. If not, report an error indicating the command
+ * was invalid.
+ *
+ * For valid commands, perform the necessary function, copying the data into
+ * the provided data buffer.
+ *
+ * Return: 0 if the nvm access request is valid and successful,
+ * error code otherwise.
+ */
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ switch (cmd->command) {
+ case IXGBE_NVM_CMD_READ:
+ return ixgbe_nvm_access_read(hw, cmd, data);
+ case IXGBE_NVM_CMD_WRITE:
+ return ixgbe_nvm_access_write(hw, cmd, data);
+ default:
+ return IXGBE_ERR_PARAM;
+ }
+}
+
+/**
+ * ixgbe_aci_set_health_status_config - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF using ACI command (0xFF20). The supported event types are: PF-specific,
+ * all PFs, and global.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
+{
+ struct ixgbe_aci_cmd_set_health_status_config *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_health_status_config;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_set_health_status_config);
+
+ cmd->event_source = event_source;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for E610.
+ * Does not touch the hardware.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ ret_val = ixgbe_init_ops_X550(hw);
+
+ /* MAC */
+ mac->ops.reset_hw = ixgbe_reset_hw_E610;
+ mac->ops.start_hw = ixgbe_start_hw_E610;
+ mac->ops.get_media_type = ixgbe_get_media_type_E610;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_E610;
+ mac->ops.get_san_mac_addr = NULL;
+ mac->ops.set_san_mac_addr = NULL;
+ mac->ops.get_wwn_prefix = NULL;
+ mac->ops.setup_link = ixgbe_setup_link_E610;
+ mac->ops.check_link = ixgbe_check_link_E610;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
+ mac->ops.setup_fc = ixgbe_setup_fc_E610;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
+ mac->ops.disable_rx = ixgbe_disable_rx_E610;
+ mac->ops.setup_eee = ixgbe_setup_eee_E610;
+ mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
+ mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
+ mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
+ mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
+ mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
+ mac->ops.get_thermal_sensor_data = NULL;
+ mac->ops.init_thermal_sensor_thresh = NULL;
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_E610;
+ phy->ops.identify = ixgbe_identify_phy_E610;
+ phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ phy->eee_speeds_advertised = phy->eee_speeds_supported;
+
+ /* Additional ops overrides for e610 to go here */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
+ eeprom->ops.read = ixgbe_read_ee_aci_E610;
+ eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
+ eeprom->ops.write = ixgbe_write_ee_aci_E610;
+ eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
+ eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_hw_E610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_reset_hw_E610");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ status = hw->phy.ops.init(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Reset polling failed to complete.\n");
+ }
+ msec_delay(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Gets firmware version and if API version matches it
+ * starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ ret_val = hw->mac.ops.get_fw_version(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_media_type_E610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_mask = 0;
+ s32 rc;
+ u8 i;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* Check if there is some bit set in phy_type_high */
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_high & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = phy_mask;
+ hw->link.link_info.phy_type_low = 0;
+ break;
+ }
+ phy_mask = 0;
+ }
+
+ /* If nothing found in phy_type_high search in phy_type_low */
+ if (phy_mask == 0) {
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_low & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = 0;
+ hw->link.link_info.phy_type_low = phy_mask;
+ break;
+ }
+ }
+ }
+
+ }
+
+ /* Based on link status or search above try to discover media type */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ *
+ * Return: the exit code of the operation.
+ **/
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type;
+ s32 rc;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
+ if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_setup_link_E610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_E610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ s32 rc;
+ u32 i;
+
+ if (!speed || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command. */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && *link_up == false) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msec_delay(100);
+ hw->link.get_link_info = true;
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW. */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_link_capabilities_E610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return IXGBE_ERR_PARAM;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
+ s32 status = IXGBE_SUCCESS;
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ switch (req_mode) {
+ case ixgbe_fc_auto:
+ {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps) {
+ status = IXGBE_ERR_OUT_OF_MEM;
+ goto out;
+ }
+
+ /* Query the value of FC that both the NIC and the attached
+ * media can do. */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
+ if (status)
+ goto out;
+
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+
+ break;
+ }
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* clear the old pause settings */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg->caps |= pause_mask;
+
+out:
+ if (pcaps)
+ ixgbe_free(hw, pcaps);
+ return status;
+}
+
+/**
+ * ixgbe_setup_fc_E610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
+ s32 status;
+
+ /* Get the current PHY config */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (status)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (status)
+ return status;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (status)
+ return status;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_fc_autoneg_E610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /* Get current link status.
+ * Current FC mode will be stored in the hw context. */
+ status = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (status) {
+ goto out;
+ }
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @minor: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Send driver version number to Firmware using ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * IXGBE_SUCCESS - OK
+ * IXGBE_ERR_PARAM - incorrect parameters were given
+ * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
+ * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
+ * IXGBE_ERR_OUT_OF_MEM - ran out of memory
+ */
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
+ u8 sub, u16 len, const char *driver_ver)
+{
+ size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
+ struct ixgbe_driver_ver dv;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
+
+ if (!len || !driver_ver)
+ return IXGBE_ERR_PARAM;
+
+ dv.major_ver = maj;
+ dv.minor_ver = minor;
+ dv.build_ver = build;
+ dv.subbuild_ver = sub;
+
+ memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
+ memcpy(dv.driver_string, driver_ver, limited_len);
+
+ return ixgbe_aci_send_driver_ver(hw, &dv);
+}
+
+/**
+ * ixgbe_disable_rx_E610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl;
+
+ DEBUGFUNC("ixgbe_disable_rx_E610");
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ u32 pfdtxgswc;
+ s32 status;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ status = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
+
+/**
+ * ixgbe_setup_eee_E610 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enables/disable EEE based on enable_eee flag.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ u16 eee_cap = 0;
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (enable_eee) {
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
+ if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
+ }
+
+ /* Set EEE capability for particular PHY types */
+ phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM recovery mode by
+ * reading the value of the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM Rollback mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in Rollback mode, otherwise false.
+ */
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
+}
+
+/**
+ * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks Thermal Sensor Autonomous Mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in TSAM, otherwise false.
+ */
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
+
+ return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
+}
+
+/**
+ * ixgbe_init_phy_ops_E610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ phy->ops.identify_sfp = ixgbe_identify_module_E610;
+ phy->ops.read_reg = NULL; /* PHY reg access is not required */
+ phy->ops.write_reg = NULL;
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ phy->ops.setup_link = ixgbe_setup_phy_link_E610;
+ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
+ phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
+ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
+ phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation */
+ phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
+ else
+ phy->ops.set_phy_power = NULL;
+ phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
+ phy->ops.handle_lasi = NULL; /* no implementation for E610 */
+ phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
+
+ /* TODO: Set functions pointers based on device ID */
+
+ /* Identify the PHY */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* TODO: Set functions pointers based on PHY type */
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_E610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 rc;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+
+ if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_module_E610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ s32 rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ goto err;
+
+ media_available =
+ (hw->link.link_info.link_info &
+ IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by ixgbe_update_link_info() */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ rc = IXGBE_SUCCESS;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ rc = IXGBE_ERR_SFP_NOT_PRESENT;
+ }
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ s32 rc;
+
+ rc = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (rc) {
+ goto err;
+ }
+
+ /* If media is not available get default config */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (rc) {
+ goto err;
+ }
+
+ sup_phy_type_low = pcaps.phy_type_low;
+ sup_phy_type_high = pcaps.phy_type_high;
+
+ /* Get Active configuration to avoid unintended changes */
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (rc) {
+ goto err;
+ }
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ /* Set default PHY types for a given speed */
+ pcfg.phy_type_low = 0;
+ pcfg.phy_type_high = 0;
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types */
+ pcfg.phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_high &= sup_phy_type_high;
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ }
+
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ *
+ * Determines PHY FW version based on response to Get PHY Capabilities
+ * admin command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 status;
+
+ if (!firmware_version)
+ return IXGBE_ERR_PARAM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (status)
+ return status;
+
+ /* TODO: determine which bytes of the 8-byte phy_fw_ver
+ * field should be written to the 2-byte firmware_version
+ * output argument. */
+ memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs byte read operation from SFP module's SFF-8472 data over I2C.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ sff8472_data, 1, false);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation from SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ eeprom_data, 1, false);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ &eeprom_data, 1, true);
+}
+
+/**
+ * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ *
+ * Get the link status and check if the PHY temperature alarm detected.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "PHY Temperature Alarm detected");
+ status = IXGBE_ERR_OVERTEMP;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_set_phy_power_E610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on) {
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ } else {
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+ }
+
+ /* PHY is already in requested power mode */
+ if (phy_caps.caps == phy_cfg.caps)
+ return IXGBE_SUCCESS;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_enter_lplu_E610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
+ GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ * If the EEPROM params are not initialized, the function
+ * initializes them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the negative error code on error, or the 16-bit checksum
+ */
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ bool nvm_acquired = false;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 checksum = 0;
+ u16 vpd_module;
+ void *vmem;
+ s32 status;
+ u16 *data;
+ u16 i;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data = (u16 *)vmem;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+ nvm_acquired = true;
+
+ /* read pointer to VPD area */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* read pointer to PCIe Alt Auto-load module */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->eeprom.word_size; i++) {
+ /* Read SR page */
+ if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
+
+ status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
+ if (status != IXGBE_SUCCESS)
+ goto ixgbe_calc_sr_checksum_exit;
+ }
+
+ /* Skip Checksum word */
+ if (i == E610_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i >= (u32)vpd_module &&
+ i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
+ continue;
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i >= (u32)pcie_alt_module &&
+ i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
+ continue;
+
+ checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
+
+ixgbe_calc_sr_checksum_exit:
+ if(nvm_acquired)
+ ixgbe_release_nvm(hw);
+ ixgbe_free(hw, vmem);
+
+ if(!status)
+ return (s32)checksum;
+ else
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to Shadow RAM, software sends the admin command
+ * to recalculate and update EEPROM checksum and instructs the hardware
+ * to update the flash.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_recalculate_checksum(hw);
+ if (status)
+ return status;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
+ NULL);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ u32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_validate_checksum(hw);
+
+ if (status)
+ return status;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ *checksum_val = tmp_checksum;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ /* Read TLV length */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return IXGBE_SUCCESS;
+ }
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return IXGBE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ixgbe_read_pba_string_E610 - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ s32 status;
+ u16 i;
+
+ status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ E610_SR_PBA_BLOCK_PTR);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ /* pba_size is the next word */
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ if (pba_tlv_len < pba_size) {
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
diff --git a/sys/dev/ixgbe/ixgbe_e610.h b/sys/dev/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..94e600139499
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.h
@@ -0,0 +1,224 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+void ixgbe_init_aci(struct ixgbe_hw *hw);
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv);
+s32 ixgbe_aci_set_pf_context(struct ixgbe_hw *hw, u8 pf_id);
+
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+s32 ixgbe_discover_func_caps(struct ixgbe_hw* hw,
+ struct ixgbe_hw_func_caps* func_caps);
+s32 ixgbe_get_caps(struct ixgbe_hw *hw);
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+bool ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle);
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value);
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value);
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write);
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params);
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size);
+
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count);
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count);
+
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
+
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw, struct ixgbe_netlist_info *netlist);
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw);
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values);
+
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, u16 *data);
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data);
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words, const u16 *data);
+
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1);
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1);
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed);
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index);
+
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data);
+
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source);
+
+/* E610 operations */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 sub, u16 len, const char *driver_ver);
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee);
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on);
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw);
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_features.h b/sys/dev/ixgbe/ixgbe_features.h
index 0cef334a185f..bee9040319d8 100644
--- a/sys/dev/ixgbe/ixgbe_features.h
+++ b/sys/dev/ixgbe/ixgbe_features.h
@@ -57,6 +57,7 @@
#define IXGBE_FEATURE_LEGACY_IRQ (u32)(1 << 12)
#define IXGBE_FEATURE_NEEDS_CTXD (u32)(1 << 13)
#define IXGBE_FEATURE_RECOVERY_MODE (u32)(1 << 15)
+#define IXGBE_FEATURE_DBG_DUMP (u32)(1 << 16)
/* Check for OS support. Undefine features if not included in the OS */
#ifndef PCI_IOV
diff --git a/sys/dev/ixgbe/ixgbe_osdep.c b/sys/dev/ixgbe/ixgbe_osdep.c
index 892924712c38..9bd9ce63b786 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.c
+++ b/sys/dev/ixgbe/ixgbe_osdep.c
@@ -114,3 +114,29 @@ ixgbe_link_speed_to_baudrate(ixgbe_link_speed speed)
return baudrate;
}
+
+void
+ixgbe_init_lock(struct ixgbe_lock *lock)
+{
+ mtx_init(&lock->mutex, "mutex",
+ "ixgbe ACI lock", MTX_DEF | MTX_DUPOK);
+}
+
+void
+ixgbe_acquire_lock(struct ixgbe_lock *lock)
+{
+ mtx_lock(&lock->mutex);
+}
+
+void
+ixgbe_release_lock(struct ixgbe_lock *lock)
+{
+ mtx_unlock(&lock->mutex);
+}
+
+void
+ixgbe_destroy_lock(struct ixgbe_lock *lock)
+{
+ if (mtx_initialized(&lock->mutex))
+ mtx_destroy(&lock->mutex);
+}
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index cf7c578fd684..8cf1d13736ce 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -133,7 +133,9 @@ enum {
/* XXX these need to be revisited */
#define IXGBE_CPU_TO_LE16 htole16
#define IXGBE_CPU_TO_LE32 htole32
+#define IXGBE_LE16_TO_CPU le16toh
#define IXGBE_LE32_TO_CPU le32toh
+#define IXGBE_LE64_TO_CPU le64toh
#define IXGBE_LE32_TO_CPUS(x) *(x) = le32dec(x)
#define IXGBE_CPU_TO_BE16 htobe16
#define IXGBE_CPU_TO_BE32 htobe32
@@ -146,6 +148,7 @@ typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
+typedef int64_t s64;
#ifndef __bool_true_false_are_defined
typedef boolean_t bool;
#endif
@@ -195,6 +198,11 @@ struct ixgbe_osdep
bus_space_handle_t mem_bus_space_handle;
};
+struct ixgbe_lock
+{
+ struct mtx mutex;
+};
+
/* These routines need struct ixgbe_hw declared */
struct ixgbe_hw;
device_t ixgbe_dev_from_hw(struct ixgbe_hw *hw);
@@ -222,4 +230,27 @@ extern void ixgbe_write_reg_array(struct ixgbe_hw *, u32, u32, u32);
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, val) \
ixgbe_write_reg_array(a, reg, offset, val)
+void ixgbe_init_lock(struct ixgbe_lock *);
+void ixgbe_destroy_lock(struct ixgbe_lock *);
+void ixgbe_acquire_lock(struct ixgbe_lock *);
+void ixgbe_release_lock(struct ixgbe_lock *);
+
+static inline void *
+ixgbe_calloc(struct ixgbe_hw __unused *hw, size_t count, size_t size)
+{
+ return (malloc(count * size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void *
+ixgbe_malloc(struct ixgbe_hw __unused *hw, size_t size)
+{
+ return (malloc(size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void
+ixgbe_free(struct ixgbe_hw __unused *hw, void *addr)
+{
+ free(addr, M_DEVBUF);
+}
+
#endif /* _IXGBE_OSDEP_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 91b46da72c75..0bbe7806d41d 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -74,6 +74,7 @@
*/
#include "ixgbe_osdep.h"
+#include "ixgbe_type_e610.h"
/* Override this by setting IOMEM in your ixgbe_osdep.h header */
#define IOMEM
@@ -150,12 +151,19 @@
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x0001
#define IXGBE_CAT(r, m) IXGBE_##r##m
@@ -1969,6 +1977,7 @@ enum {
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -2004,6 +2013,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2025,6 +2035,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -2047,6 +2058,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2454,6 +2466,7 @@ enum {
#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2571,6 +2584,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2581,6 +2595,7 @@ enum {
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
+#define IXGBE_PCI_LINK_SPEED_16000 0x4
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
@@ -3743,6 +3758,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_X550EM_a_vf,
+ ixgbe_mac_E610,
+ ixgbe_mac_E610_vf,
ixgbe_num_macs
};
@@ -3822,7 +3839,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui
};
/* Flow Control Settings */
@@ -3831,6 +3850,7 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
+ ixgbe_fc_auto,
ixgbe_fc_default
};
@@ -3863,6 +3883,7 @@ enum ixgbe_bus_speed {
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_8000 = 8000,
+ ixgbe_bus_speed_16000 = 16000,
ixgbe_bus_speed_reserved
};
@@ -4007,6 +4028,7 @@ struct ixgbe_eeprom_operations {
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
s32 (*calc_checksum)(struct ixgbe_hw *);
+ s32 (*read_pba_string)(struct ixgbe_hw *, u8 *, u32);
};
struct ixgbe_mac_operations {
@@ -4118,6 +4140,10 @@ struct ixgbe_mac_operations {
void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ bool (*get_fw_tsam_mode)(struct ixgbe_hw *hw);
+ s32 (*get_fw_version)(struct ixgbe_hw *hw);
+ s32 (*get_nvm_version)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
};
struct ixgbe_phy_operations {
@@ -4162,6 +4188,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -4233,6 +4262,9 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
#include "ixgbe_mbx.h"
@@ -4261,6 +4293,22 @@ struct ixgbe_hw {
bool wol_enabled;
bool need_crosstalk_fix;
u32 fw_rst_cnt;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
+ struct ixgbe_fwlog_cfg fwlog_cfg;
+ bool fwlog_support_ena;
+ struct ixgbe_fwlog_ring fwlog_ring;
};
#define ixgbe_call_func(hw, func, params, error) \
@@ -4312,6 +4360,24 @@ struct ixgbe_hw {
#define IXGBE_ERR_MBX_NOMSG -42
#define IXGBE_ERR_TIMEOUT -43
+#define IXGBE_ERR_NOT_SUPPORTED -45
+#define IXGBE_ERR_OUT_OF_RANGE -46
+
+#define IXGBE_ERR_NVM -50
+#define IXGBE_ERR_NVM_CHECKSUM -51
+#define IXGBE_ERR_BUF_TOO_SHORT -52
+#define IXGBE_ERR_NVM_BLANK_MODE -53
+#define IXGBE_ERR_INVAL_SIZE -54
+#define IXGBE_ERR_DOES_NOT_EXIST -55
+
+#define IXGBE_ERR_ACI_ERROR -100
+#define IXGBE_ERR_ACI_DISABLED -101
+#define IXGBE_ERR_ACI_TIMEOUT -102
+#define IXGBE_ERR_ACI_BUSY -103
+#define IXGBE_ERR_ACI_NO_WORK -104
+#define IXGBE_ERR_ACI_NO_EVENTS -105
+#define IXGBE_ERR_FW_API_VER -106
+
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
@@ -4540,5 +4606,6 @@ struct ixgbe_bypass_eeprom {
#define IXGBE_REQUEST_TASK_FDIR 0x08
#define IXGBE_REQUEST_TASK_PHY 0x10
#define IXGBE_REQUEST_TASK_LSC 0x20
+#define IXGBE_REQUEST_TASK_FWEVENT 0x40
#endif /* _IXGBE_TYPE_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type_e610.h b/sys/dev/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..e300030c3ba4
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,2278 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+
+/* Generic defines */
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* !BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* !BIT_ULL */
+#ifndef BITS_PER_BYTE
+#define BITS_PER_BYTE 8
+#endif /* !BITS_PER_BYTE */
+#ifndef DIVIDE_AND_ROUND_UP
+#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
+#endif /* !DIVIDE_AND_ROUND_UP */
+
+#ifndef ROUND_UP
+/**
+ * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
+ * @a: value to round up
+ * @b: arbitrary multiple
+ *
+ * Round up to the next multiple of the arbitrary b.
+ */
+#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
+#endif /* !ROUND_UP */
+
+#define MAKEMASK(mask, shift) (mask << shift)
+
+#define BYTES_PER_WORD 2
+#define BYTES_PER_DWORD 4
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG 64
+#endif /* !BITS_PER_LONG */
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif /* !BITS_PER_LONG_LONG */
+#undef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+/* Data type manipulation macros. */
+#define HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+#define HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define LO_WORD(x) ((u16)((x) & 0xFFFF))
+#define HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define LO_BYTE(x) ((u8)((x) & 0xFF))
+
+#ifndef MIN_T
+#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
+#endif
+
+#define IS_ASCII(_ch) ((_ch) < 0x80)
+
+/**
+ * ixgbe_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
+#define ixgbe_struct_size(ptr, field, num) \
+ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define E610_SR_VPD_SIZE_WORDS 512
+#define E610_SR_PCIE_ALT_SIZE_WORDS 512
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_DEV_STARTER_VER 0x18
+#define E610_NVM_VER_LO_SHIFT 0
+#define E610_NVM_VER_LO_MASK (0xff << E610_NVM_VER_LO_SHIFT)
+#define E610_NVM_VER_HI_SHIFT 12
+#define E610_NVM_VER_HI_MASK (0xf << E610_NVM_VER_HI_SHIFT)
+#define E610_SR_NVM_MAP_VER 0x29
+#define E610_SR_NVM_EETRACK_LO 0x2D
+#define E610_SR_NVM_EETRACK_HI 0x2E
+#define E610_SR_VPD_PTR 0x2F
+#define E610_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define E610_SR_SW_CHECKSUM_WORD 0x3F
+#define E610_SR_PFA_PTR 0x40
+#define E610_SR_1ST_NVM_BANK_PTR 0x42
+#define E610_SR_NVM_BANK_SIZE 0x43
+#define E610_SR_1ST_OROM_BANK_PTR 0x44
+#define E610_SR_OROM_BANK_SIZE 0x45
+#define E610_SR_NETLIST_BANK_PTR 0x46
+#define E610_SR_NETLIST_BANK_SIZE 0x47
+#define E610_SR_POINTER_TYPE_BIT BIT(15)
+#define E610_SR_POINTER_MASK 0x7fff
+#define E610_SR_HALF_4KB_SECTOR_UNITS 2048
+#define E610_GET_PFA_POINTER_IN_WORDS(offset) \
+ ((offset & E610_SR_POINTER_TYPE_BIT) == E610_SR_POINTER_TYPE_BIT) ? \
+ ((offset & E610_SR_POINTER_MASK) * E610_SR_HALF_4KB_SECTOR_UNITS) : \
+ (offset & E610_SR_POINTER_MASK)
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_CTRL_WORD 0x00
+#define E610_SR_PBA_BLOCK_PTR 0x16
+
+/* The Orom version topology */
+#define IXGBE_OROM_VER_PATCH_SHIFT 0
+#define IXGBE_OROM_VER_PATCH_MASK (0xff << IXGBE_OROM_VER_PATCH_SHIFT)
+#define IXGBE_OROM_VER_BUILD_SHIFT 8
+#define IXGBE_OROM_VER_BUILD_MASK (0xffff << IXGBE_OROM_VER_BUILD_SHIFT)
+#define IXGBE_OROM_VER_SHIFT 24
+#define IXGBE_OROM_VER_MASK (0xff << IXGBE_OROM_VER_SHIFT)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_S 0x06
+#define IXGBE_SR_CTRL_WORD_1_M (0x03 << IXGBE_SR_CTRL_WORD_1_S)
+#define IXGBE_SR_CTRL_WORD_VALID 0x1
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
+/* These macros strip from NVM Image Revision the particular part of NVM ver:
+ major ver, minor ver and image id */
+#define E610_NVM_MAJOR_VER(x) ((x & 0xF000) >> 12)
+#define E610_NVM_MINOR_VER(x) (x & 0x00FF)
+
+/* Shadow RAM related */
+#define IXGBE_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define IXGBE_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define IXGBE_SR_SW_CHECKSUM_BASE 0xBABA
+
+/* Netlist */
+#define IXGBE_MAX_NETLIST_SIZE 10
+
+/* General registers */
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_FWS0B_S 0
+#define GL_FWSTS_FWS0B_M MAKEMASK(0xFF, 0)
+#define GL_FWSTS_FWROWD_S 8
+#define GL_FWSTS_FWROWD_M BIT(8)
+#define GL_FWSTS_FWRI_S 9
+#define GL_FWSTS_FWRI_M BIT(9)
+#define GL_FWSTS_FWS1B_S 16
+#define GL_FWSTS_FWS1B_M MAKEMASK(0xFF, 16)
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Recovery mode values of Firmware Status 1 Byte (FWS1B) bitfield */
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER_LEGACY 0x0B
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR_LEGACY 0x0C
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER 0x30
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR 0x31
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_TRANSITION 0x32
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_NVM 0x33
+
+/* Firmware Status (GL_MNG_FWSM) */
+#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define GL_MNG_FWSM_FW_MODES_S 0
+#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
+#define GL_MNG_FWSM_RSV0_S 2
+#define GL_MNG_FWSM_RSV0_M MAKEMASK(0xFF, 2)
+#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10
+#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10)
+#define GL_MNG_FWSM_RSV1_S 11
+#define GL_MNG_FWSM_RSV1_M MAKEMASK(0xF, 11)
+#define GL_MNG_FWSM_RSV2_S 15
+#define GL_MNG_FWSM_RSV2_M BIT(15)
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_S 16
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_M BIT(16)
+#define GL_MNG_FWSM_POR_AL_FAILURE_S 17
+#define GL_MNG_FWSM_POR_AL_FAILURE_M BIT(17)
+#define GL_MNG_FWSM_RSV3_S 18
+#define GL_MNG_FWSM_RSV3_M BIT(18)
+#define GL_MNG_FWSM_EXT_ERR_IND_S 19
+#define GL_MNG_FWSM_EXT_ERR_IND_M MAKEMASK(0x3F, 19)
+#define GL_MNG_FWSM_RSV4_S 25
+#define GL_MNG_FWSM_RSV4_M BIT(25)
+#define GL_MNG_FWSM_RESERVED_11_S 26
+#define GL_MNG_FWSM_RESERVED_11_M MAKEMASK(0xF, 26)
+#define GL_MNG_FWSM_RSV5_S 30
+#define GL_MNG_FWSM_RSV5_M MAKEMASK(0x3, 30)
+
+/* FW mode indications */
+#define GL_MNG_FWSM_FW_MODES_DEBUG_M BIT(0)
+#define GL_MNG_FWSM_FW_MODES_RECOVERY_M BIT(1)
+#define GL_MNG_FWSM_FW_MODES_ROLLBACK_M BIT(2)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_NVM_PRES_S 0
+#define GLNVM_GENS_NVM_PRES_M BIT(0)
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M MAKEMASK(0x7, 5)
+#define GLNVM_GENS_BANK1VAL_S 8
+#define GLNVM_GENS_BANK1VAL_M BIT(8)
+#define GLNVM_GENS_ALT_PRST_S 23
+#define GLNVM_GENS_ALT_PRST_M BIT(23)
+#define GLNVM_GENS_FL_AUTO_RD_S 25
+#define GLNVM_GENS_FL_AUTO_RD_M BIT(25)
+
+/* Flash Access Register */
+#define GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define GLNVM_FLA_LOCKED_S 6
+#define GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Bit Bang registers */
+#define RDASB_MSGCTL 0x000B6820
+#define RDASB_MSGCTL_HDR_DWS_S 0
+#define RDASB_MSGCTL_EXP_RDW_S 8
+#define RDASB_MSGCTL_CMDV_M BIT(31)
+#define RDASB_RSPCTL 0x000B6824
+#define RDASB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define RDASB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define RDASB_WHDR0 0x000B68F4
+#define RDASB_WHDR1 0x000B68F8
+#define RDASB_WHDR2 0x000B68FC
+#define RDASB_WHDR3 0x000B6900
+#define RDASB_WHDR4 0x000B6904
+#define RDASB_RHDR0 0x000B6AFC
+#define RDASB_RHDR0_RESPONSE_S 27
+#define RDASB_RHDR0_RESPONSE_M MAKEMASK(0x7, 27)
+#define RDASB_RDATA0 0x000B6B00
+#define RDASB_RDATA1 0x000B6B04
+
+/* SPI Registers */
+#define SPISB_MSGCTL 0x000B7020
+#define SPISB_MSGCTL_HDR_DWS_S 0
+#define SPISB_MSGCTL_EXP_RDW_S 8
+#define SPISB_MSGCTL_MSG_MODE_S 26
+#define SPISB_MSGCTL_TOKEN_MODE_S 28
+#define SPISB_MSGCTL_BARCLR_S 30
+#define SPISB_MSGCTL_CMDV_S 31
+#define SPISB_MSGCTL_CMDV_M BIT(31)
+#define SPISB_RSPCTL 0x000B7024
+#define SPISB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define SPISB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define SPISB_WHDR0 0x000B70F4
+#define SPISB_WHDR0_DEST_SEL_S 12
+#define SPISB_WHDR0_OPCODE_SEL_S 16
+#define SPISB_WHDR0_TAG_S 24
+#define SPISB_WHDR1 0x000B70F8
+#define SPISB_WHDR2 0x000B70FC
+#define SPISB_RDATA 0x000B7300
+#define SPISB_WDATA 0x000B7100
+
+/* Firmware Reset Count register */
+#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
+#define GL_FWRESETCNT_FWRESETCNT_S 0
+#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
+
+/* Admin Command Interface (ACI) registers */
+#define PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define PF_HICR 0x00082048
+
+#define PF_HIDA_MAX_INDEX 15
+#define PF_HIBA_MAX_INDEX 1023
+
+#define PF_HICR_EN BIT(0)
+#define PF_HICR_C BIT(1)
+#define PF_HICR_SV BIT(2)
+#define PF_HICR_EV BIT(3)
+
+#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
+#define GL_HIDA_2(_i) (0x00082020 + ((_i) * 4))
+#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
+#define GL_HICR 0x00082040
+
+#define GL_HIDA_MAX_INDEX 15
+#define GL_HIBA_MAX_INDEX 1023
+
+#define GL_HICR_C BIT(1)
+#define GL_HICR_SV BIT(2)
+#define GL_HICR_EV BIT(3)
+
+#define GL_HICR_EN 0x00082044
+
+#define GL_HICR_EN_CHECK BIT(0)
+
+/* Admin Command Interface (ACI) defines */
+/* Defines that help manage the driver vs FW API checks.
+ */
+#define IXGBE_FW_API_VER_BRANCH 0x00
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_DESC_COOKIE_L_DWORD_OFFSET 3
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* Timestamp spacing for Tools ACI: queue is active if spacing is within the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_LO 0
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_HI 200
+
+/* Timestamp spacing for Tools ACI: queue is expired if spacing is outside the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_LO -5
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_HI 205
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets */
+#define IXGBE_ACI_FLAG_DD_S 0
+#define IXGBE_ACI_FLAG_CMP_S 1
+#define IXGBE_ACI_FLAG_ERR_S 2
+#define IXGBE_ACI_FLAG_VFE_S 3
+#define IXGBE_ACI_FLAG_LB_S 9
+#define IXGBE_ACI_FLAG_RD_S 10
+#define IXGBE_ACI_FLAG_VFC_S 11
+#define IXGBE_ACI_FLAG_BUF_S 12
+#define IXGBE_ACI_FLAG_SI_S 13
+#define IXGBE_ACI_FLAG_EI_S 14
+#define IXGBE_ACI_FLAG_FE_S 15
+
+#define IXGBE_ACI_FLAG_DD BIT(IXGBE_ACI_FLAG_DD_S) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(IXGBE_ACI_FLAG_CMP_S) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(IXGBE_ACI_FLAG_ERR_S) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(IXGBE_ACI_FLAG_VFE_S) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(IXGBE_ACI_FLAG_LB_S) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(IXGBE_ACI_FLAG_RD_S) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(IXGBE_ACI_FLAG_VFC_S) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(IXGBE_ACI_FLAG_BUF_S) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(IXGBE_ACI_FLAG_SI_S) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(IXGBE_ACI_FLAG_EI_S) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(IXGBE_ACI_FLAG_FE_S) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or allocation failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+ ixgbe_aci_opc_nvm_sanitization = 0x070C,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+
+ /* FW Logging Commands */
+ ixgbe_aci_opc_fw_logs_config = 0xFF30,
+ ixgbe_aci_opc_fw_logs_register = 0xFF31,
+ ixgbe_aci_opc_fw_logs_query = 0xFF32,
+ ixgbe_aci_opc_fw_logs_event = 0xFF33,
+ ixgbe_aci_opc_fw_logs_get = 0xFF34,
+ ixgbe_aci_opc_fw_logs_clear = 0xFF35
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IXGBE_CHECK_STRUCT_LEN(n, X) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used to generate a compilation error if a variable-length
+ * structure is not exactly the correct length assuming a single element of
+ * the variable-length object as the last element of the structure. It gives
+ * a divide by zero error if the structure is not of the correct size,
+ * otherwise it creates an enum that is never used.
+ */
+#define IXGBE_CHECK_VAR_LEN_STRUCT_LEN(n, X, T) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / \
+ (((sizeof(struct X) + sizeof(T)) == (n)) ? 1 : 0) }
+
+/* This macro is used to ensure that parameter structures (i.e. structures
+ * in the params union member of struct ixgbe_aci_desc) are 16 bytes in length.
+ *
+ * NOT intended to be used to check the size of an indirect command/response
+ * additional data buffer (e.g. struct foo) which should just happen to be 16
+ * bytes (instead, use IXGBE_CHECK_STRUCT_LEN(16, foo) for that).
+ */
+#define IXGBE_CHECK_PARAM_LEN(X) IXGBE_CHECK_STRUCT_LEN(16, X)
+
+struct ixgbe_aci_cmd_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_generic);
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_ver);
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+struct ixgbe_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 driver_string[IXGBE_DRV_VER_STR_LEN_E610];
+};
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_driver_ver);
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_exp_err);
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+#define IXGBE_CHANGE_LOCK_TIMEOUT 1000
+#define IXGBE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+#define IXGBE_ACI_RES_ID_NVM 1
+#define IXGBE_ACI_RES_ID_SDP 2
+#define IXGBE_ACI_RES_ID_CHNG_LOCK 3
+#define IXGBE_ACI_RES_ID_GLBL_LOCK 4
+ __le16 access_type;
+#define IXGBE_ACI_RES_ACCESS_READ 1
+#define IXGBE_ACI_RES_ACCESS_WRITE 2
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ /* Status is only used for IXGBE_ACI_RES_ID_GLBL_LOCK */
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_req_res);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE 0x0090
+#define IXGBE_ACI_CAPS_NEXT_CLUSTER_ID 0x0096
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(32, ixgbe_aci_cmd_list_caps_elem);
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_disable_rxen);
+
+/* Get FW Event (indirect 0x0014) */
+struct ixgbe_aci_cmd_get_fw_event {
+ __le16 fw_buf_status;
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_OBTAINED BIT(0)
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_PENDING BIT(1)
+ u8 rsvd[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_fw_event);
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_S 1
+#define IXGBE_ACI_REPORT_MODE_M (7 << IXGBE_ACI_REPORT_MODE_S)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_phy_caps);
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 18
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK MAKEMASK(0xff, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK MAKEMASK(0xdf, 0)
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+IXGBE_CHECK_STRUCT_LEN(560, ixgbe_aci_cmd_get_phy_caps_data);
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 reserved[8];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_phy_cfg);
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0)
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_set_phy_cfg_data);
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_restart_an);
+
+#pragma pack(1)
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_LSE_M 0x3
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ u8 reserved2[5];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_status);
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_S 2
+#define IXGBE_ACI_LINK_TX_M (0x03 << IXGBE_ACI_LINK_TX_S)
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+#define IXGBE_ACI_LINK_LB_PHY_IDX_S 3
+#define IXGBE_ACI_LINK_LB_PHY_IDX_M (0x7 << IXGBE_ACI_LB_PHY_IDX_S)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK MAKEMASK(0x7, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_S 3
+#define IXGBE_ACI_CFG_PACING_M (0xF << IXGBE_ACI_CFG_PACING_S)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M 0x3F
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M 0x7FF
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3; /* Aligns next field to 8-byte boundary */
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+};
+#pragma pack()
+
+IXGBE_CHECK_STRUCT_LEN(56, ixgbe_aci_cmd_get_link_status_data);
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_event_mask);
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_S 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M (0xF << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M \
+ (0xF << IXGBE_ACI_LINK_TOPO_NODE_CTX_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_link_topo_params);
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+#define IXGBE_ACI_LINK_TOPO_HANDLE_S 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_M (0x3FF << IXGBE_ACI_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S 6
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_M \
+ (0x7 << IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_link_topo_addr);
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_topo);
+
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ixgbe_aci_cmd_i2c {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define IXGBE_ACI_I2C_DATA_SIZE_S 0
+#define IXGBE_ACI_I2C_DATA_SIZE_M (0xF << IXGBE_ACI_I2C_DATA_SIZE_S)
+#define IXGBE_ACI_I2C_ADDR_TYPE_M BIT(4)
+#define IXGBE_ACI_I2C_ADDR_TYPE_7BIT 0
+#define IXGBE_ACI_I2C_ADDR_TYPE_10BIT IXGBE_ACI_I2C_ADDR_TYPE_M
+#define IXGBE_ACI_I2C_DATA_OFFSET_S 5
+#define IXGBE_ACI_I2C_DATA_OFFSET_M (0x3 << IXGBE_ACI_I2C_DATA_OFFSET_S)
+#define IXGBE_ACI_I2C_USE_REPEATED_START BIT(7)
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_I2C_ADDR_7BIT_MASK 0x7F
+#define IXGBE_ACI_I2C_ADDR_10BIT_MASK 0x3FF
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_i2c);
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ixgbe_aci_cmd_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_i2c_resp);
+
+/* Read/Write MDIO (direct, 0x06E4/0x06E5) */
+struct ixgbe_aci_cmd_mdio {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 mdio_device_addr;
+#define IXGBE_ACI_MDIO_DEV_S 0
+#define IXGBE_ACI_MDIO_DEV_M (0x1F << IXGBE_ACI_MDIO_DEV_S)
+#define IXGBE_ACI_MDIO_CLAUSE_22 BIT(5)
+#define IXGBE_ACI_MDIO_CLAUSE_45 BIT(6)
+ u8 mdio_bus_address;
+#define IXGBE_ACI_MDIO_BUS_ADDR_S 0
+#define IXGBE_ACI_MDIO_BUS_ADDR_M (0x1F << IXGBE_ACI_MDIO_BUS_ADDR_S)
+ __le16 offset;
+ __le16 data; /* Input in write cmd, output in read cmd. */
+ u8 rsvd1[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_mdio);
+
+/* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */
+struct ixgbe_aci_cmd_gpio_by_func {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 io_func_num;
+#define IXGBE_ACI_GPIO_FUNC_S 0
+#define IXGBE_ACI_GPIO_FUNC_M (0x1F << IXGBE_ACI_GPIO_IO_FUNC_NUM_S)
+ u8 io_value; /* Input in write cmd, output in read cmd. */
+#define IXGBE_ACI_GPIO_ON BIT(0)
+#define IXGBE_ACI_GPIO_OFF 0
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio_by_func);
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+ u8 ident_mode;
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_port_id_led);
+
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ixgbe_aci_cmd_gpio {
+ __le16 gpio_ctrl_handle;
+#define IXGBE_ACI_GPIO_HANDLE_S 0
+#define IXGBE_ACI_GPIO_HANDLE_M (0x3FF << IXGBE_ACI_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio);
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M 0x7F
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M 0x3FF
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_S 11
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_M (0x3 << IXGBE_ACI_SFF_PAGE_BANK_CTRL_S)
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_sff_eeprom);
+
+/* Program Topology Device NVM (direct, 0x06F2) */
+struct ixgbe_aci_cmd_prog_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_prog_topo_dev_nvm);
+
+/* Read Topology Device NVM (direct, 0x06F3) */
+struct ixgbe_aci_cmd_read_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le32 start_address;
+#define IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE 8
+ u8 data_read[IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_topo_dev_nvm);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */
+#define IXGBE_ACI_NVM_PRESERVATION_M (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_NO_PRESERVATION (0 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_FACTORY_DEFAULT (2 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M MAKEMASK(0x3, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for struct ixgbe_aci_cmd_nvm. */
+#define IXGBE_ACI_NVM_SECTOR_UNIT 4096 /* In Bytes */
+#define IXGBE_ACI_NVM_WORD_UNIT 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_START_POINT 0
+#define IXGBE_ACI_NVM_EMP_SR_PTR_OFFSET 0x90
+#define IXGBE_ACI_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_S 15
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_M BIT(15)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_SECTOR 1
+
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_OFFSET 0x46
+#define IXGBE_ACI_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_LLDP_PRESERVED_MOD_ID 0x129
+#define IXGBE_ACI_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0)
+#define IXGBE_ACI_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
+#define IXGBE_ACI_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+
+#define IXGBE_ACI_NVM_MINSREV_MOD_ID 0x130
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm);
+
+/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
+ * type field is excluded from the section when reading and writing from
+ * a module using the module_typeid field with these AQ commands.
+ */
+struct ixgbe_aci_cmd_nvm_minsrev {
+ __le16 length;
+ __le16 validity;
+#define IXGBE_ACI_NVM_MINSREV_NVM_VALID BIT(0)
+#define IXGBE_ACI_NVM_MINSREV_OROM_VALID BIT(1)
+ __le16 nvm_minsrev_l;
+ __le16 nvm_minsrev_h;
+ __le16 orom_minsrev_l;
+ __le16 orom_minsrev_h;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_nvm_minsrev);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct ixgbe_aci_cmd_nvm_cfg {
+ u8 cmd_flags;
+#define IXGBE_ACI_ANVM_MULTIPLE_ELEMS BIT(0)
+#define IXGBE_ACI_ANVM_IMMEDIATE_FIELD BIT(1)
+#define IXGBE_ACI_ANVM_NEW_CFG BIT(2)
+ u8 reserved;
+ __le16 count;
+ __le16 id;
+ u8 reserved1[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_cfg);
+
+struct ixgbe_aci_cmd_nvm_cfg_data {
+ __le16 field_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_nvm_cfg_data);
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_checksum);
+
+/* Used for NVM Sanitization command - 0x070C */
+struct ixgbe_aci_cmd_nvm_sanitization {
+ u8 cmd_flags;
+#define IXGBE_ACI_SANITIZE_REQ_READ 0
+#define IXGBE_ACI_SANITIZE_REQ_OPERATE BIT(0)
+
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_BITS 0
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_STATE BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR 0
+ u8 values;
+#define IXGBE_ACI_SANITIZE_NVM_BITS_HOST_CLEAN_SUPPORT BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_BITS_BMC_CLEAN_SUPPORT BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_SUCCESS BIT(3)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS BIT(3)
+ u8 reserved[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_sanitization);
+
+/* Write/Read Alternate - Direct (direct 0x0900/0x0902) */
+struct ixgbe_aci_cmd_read_write_alt_direct {
+ __le32 dword0_addr;
+ __le32 dword0_value;
+ __le32 dword1_addr;
+ __le32 dword1_value;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_direct);
+
+/* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */
+struct ixgbe_aci_cmd_read_write_alt_indirect {
+ __le32 base_dword_addr;
+ __le32 num_dwords;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_indirect);
+
+/* Done Alternate Write (direct 0x0904) */
+struct ixgbe_aci_cmd_done_alt_write {
+ u8 flags;
+#define IXGBE_ACI_CMD_UEFI_BIOS_MODE BIT(0)
+#define IXGBE_ACI_RESP_RESET_NEEDED BIT(1)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_done_alt_write);
+
+/* Clear Port Alternate Write (direct 0x0906) */
+struct ixgbe_aci_cmd_clear_port_alt_write {
+ u8 reserved[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_port_alt_write);
+
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ixgbe_aci_cmd_get_cgu_abilities {
+ u8 num_inputs;
+ u8 num_outputs;
+ u8 pps_dpll_idx;
+ u8 synce_dpll_idx;
+ __le32 max_in_freq;
+ __le32 max_in_phase_adj;
+ __le32 max_out_freq;
+ __le32 max_out_phase_adj;
+ u8 cgu_part_num;
+ u8 rsvd[3];
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_get_cgu_abilities);
+
+#define IXGBE_ACI_NODE_HANDLE_VALID BIT(10)
+#define IXGBE_ACI_NODE_HANDLE MAKEMASK(0x3FF, 0)
+#define IXGBE_ACI_DRIVING_CLK_NUM_SHIFT 10
+#define IXGBE_ACI_DRIVING_CLK_NUM MAKEMASK(0x3F, IXGBE_ACI_DRIVING_CLK_NUM_SHIFT)
+
+/* Set CGU input config (direct 0x0C62) */
+struct ixgbe_aci_cmd_set_cgu_input_config {
+ u8 input_idx;
+ u8 flags1;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7)
+ u8 flags2;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_input_config);
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ixgbe_aci_cmd_get_cgu_input_config {
+ u8 input_idx;
+ u8 status;
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7)
+ u8 type;
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_GPS BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_PHY BIT(6)
+ u8 flags1;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7)
+ __le32 freq;
+ __le32 phase_delay;
+ u8 flags2;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_input_config);
+
+/* Set CGU output config (direct 0x0C64) */
+struct ixgbe_aci_cmd_set_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4)
+ u8 src_sel;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_DPLL_SRC_SEL MAKEMASK(0x1F, 0)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_output_config);
+
+/* Get CGU output config (direct 0x0C65) */
+struct ixgbe_aci_cmd_get_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2)
+ u8 src_sel;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+ u8 rsvd;
+ __le32 freq;
+ __le32 src_freq;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_output_config);
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ixgbe_aci_cmd_get_cgu_dpll_status {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6)
+ __le16 dpll_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT 8
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT 13
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT)
+ __le32 phase_offset_h;
+ __le32 phase_offset_l;
+ u8 eec_mode;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_dpll_status);
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ixgbe_aci_cmd_set_cgu_dpll_config {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6)
+ u8 rsvd;
+ u8 config;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_CLK_REF_SEL MAKEMASK(0x1F, 0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_MODE MAKEMASK(0x7, 5)
+ u8 rsvd2[8];
+ u8 eec_mode;
+ u8 rsvd3[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_dpll_config);
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ixgbe_aci_cmd_set_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority;
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_ref_prio);
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ixgbe_aci_cmd_get_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority; /* Valid only in response */
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_ref_prio);
+
+/* Get CGU info (direct 0x0C6A) */
+struct ixgbe_aci_cmd_get_cgu_info {
+ __le32 cgu_id;
+ __le32 cgu_cfg_ver;
+ __le32 cgu_fw_ver;
+ u8 node_part_num;
+ u8 dev_rev;
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_info);
+
+struct ixgbe_aci_cmd_temp_tca_event {
+ u8 event_desc;
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_SHIFT 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_NVM 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_EVENT_STATE 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_ALL 2
+
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_SHIFT 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_CLEARED 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_CLEARED 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_RAISED 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_RAISED 3
+
+ u8 reserved;
+ __le16 temperature;
+ __le16 thermal_sensor_max_value;
+ __le16 thermal_sensor_min_value;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_temp_tca_event);
+
+/* Debug Dump Internal Data (indirect 0xFF08) */
+struct ixgbe_aci_cmd_debug_dump_internals {
+ __le16 cluster_id; /* Expresses next cluster ID in response */
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_LINK 0
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 1
+ __le16 table_id; /* Used only for non-memory clusters */
+ __le32 idx; /* In table entries for tables, in bytes for memory */
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_debug_dump_internals);
+
+/* Set Health Status (direct 0xFF20) */
+struct ixgbe_aci_cmd_set_health_status_config {
+ u8 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0)
+#define IXGBE_ACI_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1)
+#define IXGBE_ACI_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_health_status_config);
+
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_TYPE 0x102
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_QUAL 0x103
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_COMM 0x104
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_CONFLICT 0x105
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
+#define IXGBE_ACI_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109
+#define IXGBE_ACI_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED 0x10F
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PARALLEL_FAULT 0x110
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED 0x111
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST_TOPO 0x112
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST 0x113
+#define IXGBE_ACI_HEALTH_STATUS_ERR_TOPO_CONFLICT 0x114
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DNL_INIT 0x117
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_NVM_PROG 0x120
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_FW_LOAD 0x121
+#define IXGBE_ACI_HEALTH_STATUS_INFO_RECOVERY 0x500
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_AUTH 0x502
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_AUTH 0x503
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DDP_AUTH 0x504
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DCB_MIB 0x509
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A
+#define IXGBE_ACI_HEALTH_STATUS_ERR_BMC_RESET 0x50B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_LOOP 0x1000
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002
+
+/* Get Health Status codes (indirect 0xFF21) */
+struct ixgbe_aci_cmd_get_supported_health_status_codes {
+ __le16 health_code_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_supported_health_status_codes);
+
+/* Get Health Status (indirect 0xFF22) */
+struct ixgbe_aci_cmd_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_health_status);
+
+/* Get Health Status event buffer entry, (0xFF22)
+ * repeated per reported health status
+ */
+struct ixgbe_aci_cmd_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_PF (0x1)
+#define IXGBE_ACI_HEALTH_STATUS_PORT (0x2)
+#define IXGBE_ACI_HEALTH_STATUS_GLOBAL (0x3)
+ __le32 internal_data1;
+#define IXGBE_ACI_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF)
+ __le32 internal_data2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_health_status_elem);
+
+/* Clear Health Status (direct 0xFF23) */
+struct ixgbe_aci_cmd_clear_health_status {
+ __le32 reserved[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_health_status);
+
+enum ixgbe_aci_fw_logging_mod {
+ IXGBE_ACI_FW_LOG_ID_GENERAL = 0,
+ IXGBE_ACI_FW_LOG_ID_CTRL = 1,
+ IXGBE_ACI_FW_LOG_ID_LINK = 2,
+ IXGBE_ACI_FW_LOG_ID_LINK_TOPO = 3,
+ IXGBE_ACI_FW_LOG_ID_DNL = 4,
+ IXGBE_ACI_FW_LOG_ID_I2C = 5,
+ IXGBE_ACI_FW_LOG_ID_SDP = 6,
+ IXGBE_ACI_FW_LOG_ID_MDIO = 7,
+ IXGBE_ACI_FW_LOG_ID_ADMINQ = 8,
+ IXGBE_ACI_FW_LOG_ID_HDMA = 9,
+ IXGBE_ACI_FW_LOG_ID_LLDP = 10,
+ IXGBE_ACI_FW_LOG_ID_DCBX = 11,
+ IXGBE_ACI_FW_LOG_ID_DCB = 12,
+ IXGBE_ACI_FW_LOG_ID_XLR = 13,
+ IXGBE_ACI_FW_LOG_ID_NVM = 14,
+ IXGBE_ACI_FW_LOG_ID_AUTH = 15,
+ IXGBE_ACI_FW_LOG_ID_VPD = 16,
+ IXGBE_ACI_FW_LOG_ID_IOSF = 17,
+ IXGBE_ACI_FW_LOG_ID_PARSER = 18,
+ IXGBE_ACI_FW_LOG_ID_SW = 19,
+ IXGBE_ACI_FW_LOG_ID_SCHEDULER = 20,
+ IXGBE_ACI_FW_LOG_ID_TXQ = 21,
+ IXGBE_ACI_FW_LOG_ID_ACL = 22,
+ IXGBE_ACI_FW_LOG_ID_POST = 23,
+ IXGBE_ACI_FW_LOG_ID_WATCHDOG = 24,
+ IXGBE_ACI_FW_LOG_ID_TASK_DISPATCH = 25,
+ IXGBE_ACI_FW_LOG_ID_MNG = 26,
+ IXGBE_ACI_FW_LOG_ID_SYNCE = 27,
+ IXGBE_ACI_FW_LOG_ID_HEALTH = 28,
+ IXGBE_ACI_FW_LOG_ID_TSDRV = 29,
+ IXGBE_ACI_FW_LOG_ID_PFREG = 30,
+ IXGBE_ACI_FW_LOG_ID_MDLVER = 31,
+ IXGBE_ACI_FW_LOG_ID_MAX = 32,
+};
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to IXGBE_FWLOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except IXGBE_FWLOG_LEVEL_NONE)
+ */
+enum ixgbe_fwlog_level {
+ IXGBE_FWLOG_LEVEL_NONE = 0,
+ IXGBE_FWLOG_LEVEL_ERROR = 1,
+ IXGBE_FWLOG_LEVEL_WARNING = 2,
+ IXGBE_FWLOG_LEVEL_NORMAL = 3,
+ IXGBE_FWLOG_LEVEL_VERBOSE = 4,
+ IXGBE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ixgbe_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ixgbe_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ixgbe_fwlog_module_entry module_entries[IXGBE_ACI_FW_LOG_ID_MAX];
+#define IXGBE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define IXGBE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ixgbe_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define IXGBE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ixgbe_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define IXGBE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+ /* options used to configure firmware logging */
+ u16 options;
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u8 log_resolution;
+};
+
+struct ixgbe_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ixgbe_fwlog_ring {
+ struct ixgbe_fwlog_data *rings;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define IXGBE_FWLOG_RING_SIZE_DFLT 256
+#define IXGBE_FWLOG_RING_SIZE_MAX 512
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ * Get FW Log (indirect 0xFF34)
+ * Clear FW Log (indirect 0xFF35)
+ */
+struct ixgbe_aci_cmd_fw_log {
+ u8 cmd_flags;
+#define IXGBE_ACI_FW_LOG_CONF_UART_EN BIT(0)
+#define IXGBE_ACI_FW_LOG_CONF_AQ_EN BIT(1)
+#define IXGBE_ACI_FW_LOG_QUERY_REGISTERED BIT(2)
+#define IXGBE_ACI_FW_LOG_CONF_SET_VALID BIT(3)
+#define IXGBE_ACI_FW_LOG_AQ_REGISTER BIT(0)
+#define IXGBE_ACI_FW_LOG_AQ_QUERY BIT(2)
+#define IXGBE_ACI_FW_LOG_PERSISTENT BIT(0)
+ u8 rsp_flag;
+#define IXGBE_ACI_FW_LOG_MORE_DATA BIT(1)
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define IXGBE_ACI_FW_LOG_MIN_RESOLUTION (1)
+#define IXGBE_ACI_FW_LOG_MAX_RESOLUTION (128)
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_fw_log);
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ixgbe_aci_cmd_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_fw_log_cfg_resp);
+
+/**
+ * struct ixgbe_aq_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the Admin Command Interface
+ * (ACI). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_generic generic;
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_fw_event get_fw_event;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_i2c read_write_i2c;
+ struct ixgbe_aci_cmd_read_i2c_resp read_i2c_resp;
+ struct ixgbe_aci_cmd_mdio read_write_mdio;
+ struct ixgbe_aci_cmd_mdio read_mdio;
+ struct ixgbe_aci_cmd_mdio write_mdio;
+ struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
+ struct ixgbe_aci_cmd_gpio_by_func read_write_gpio_by_func;
+ struct ixgbe_aci_cmd_gpio read_write_gpio;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm prog_topo_dev_nvm;
+ struct ixgbe_aci_cmd_read_topo_dev_nvm read_topo_dev_nvm;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_cfg nvm_cfg;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ struct ixgbe_aci_cmd_read_write_alt_direct read_write_alt_direct;
+ struct ixgbe_aci_cmd_read_write_alt_indirect read_write_alt_indirect;
+ struct ixgbe_aci_cmd_done_alt_write done_alt_write;
+ struct ixgbe_aci_cmd_clear_port_alt_write clear_port_alt_write;
+ struct ixgbe_aci_cmd_debug_dump_internals debug_dump;
+ struct ixgbe_aci_cmd_set_health_status_config
+ set_health_status_config;
+ struct ixgbe_aci_cmd_get_supported_health_status_codes
+ get_supported_health_status_codes;
+ struct ixgbe_aci_cmd_get_health_status get_health_status;
+ struct ixgbe_aci_cmd_clear_health_status clear_health_status;
+ struct ixgbe_aci_cmd_fw_log fw_log;
+ struct ixgbe_aci_cmd_nvm_sanitization nvm_sanitization;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u8 topo_media_conflict;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE] of
+ * ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_common_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M 0xF
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M 0xF0
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M \
+ MAKEMASK(0xFF, IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+ /* Support for OROM update in Recovery Mode. */
+ bool orom_recovery_update;
+ bool next_cluster_id_support;
+};
+
+#pragma pack(1)
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+};
+#pragma pack()
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+struct ixgbe_aci_info {
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+ struct ixgbe_lock lock; /* admin command interface lock */
+};
+
+/* Minimum Security Revision information */
+struct ixgbe_minsrev_info {
+ u32 nvm;
+ u32 orom;
+ u8 nvm_valid : 1;
+ u8 orom_valid : 1;
+};
+
+/* Enumeration of which flash bank is desired to read from, either the active
+ * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
+ * code which just wants to read the active or inactive flash bank.
+ */
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+};
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Pointer to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u32 flash_size; /* Size of available flash in bytes */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#define IXGBE_NVM_CMD_READ 0x0000000B
+#define IXGBE_NVM_CMD_WRITE 0x0000000C
+
+/* NVM Access command */
+struct ixgbe_nvm_access_cmd {
+ u32 command; /* NVM command: READ or WRITE */
+ u32 offset; /* Offset to read/write, in bytes */
+ u32 data_size; /* Size of data field, in bytes */
+};
+
+/* NVM Access data */
+struct ixgbe_nvm_access_data {
+ u32 regval; /* Storage for register value */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index cac3c6b5e5e7..4e48f7f33c9d 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -656,7 +656,8 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550_vf) {
+ if (hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_E610_vf) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 60e66aeaf579..bfaf6cd69e58 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -1151,13 +1151,20 @@ ixl_if_enable_intr(if_ctx_t ctx)
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
- struct ixl_rx_queue *que = vsi->rx_queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
ixl_enable_intr0(hw);
/* Enable queue interrupts */
- for (int i = 0; i < vsi->num_rx_queues; i++, que++)
- /* TODO: Queue index parameter is probably wrong */
- ixl_enable_queue(hw, que->rxr.me);
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ ixl_enable_queue(hw, rx_que->rxr.me);
+ } else {
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to 0x0 to enable
+ * triggering interrupts by queues.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, 0x0);
+ }
}
/*
@@ -1175,11 +1182,13 @@ ixl_if_disable_intr(if_ctx_t ctx)
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
- ixl_disable_queue(hw, rx_que->msix - 1);
+ ixl_disable_queue(hw, rx_que->rxr.me);
} else {
- // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
- // stops queues from triggering interrupts
- wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to End of List (0x7FF)
+ * to stop queues from triggering interrupts.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, IXL_QUEUE_EOL);
}
}
@@ -1471,17 +1480,33 @@ ixl_if_multi_set(if_ctx_t ctx)
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
+ enum i40e_status_code status;
int mcnt;
+ if_t ifp = iflib_get_ifp(ctx);
IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
/* Delete filters for removed multicast addresses */
ixl_del_multi(vsi, false);
- mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
+ mcnt = min(if_llmaddr_count(ifp), MAX_MULTICAST_ADDR);
if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
- i40e_aq_set_vsi_multicast_promiscuous(hw,
+ /* Check if promisc mode is already enabled, if yes return */
+ if (vsi->flags & IXL_FLAGS_MC_PROMISC)
+ return;
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw,
vsi->seid, TRUE, NULL);
+ if (status != I40E_SUCCESS)
+ if_printf(ifp, "Failed to enable multicast promiscuous "
+ "mode, status: %s\n", i40e_stat_str(hw, status));
+ else {
+ if_printf(ifp, "Enabled multicast promiscuous mode\n");
+
+ /* Set the flag to track promiscuous mode */
+ vsi->flags |= IXL_FLAGS_MC_PROMISC;
+ }
+ /* Delete all existing MC filters */
ixl_del_multi(vsi, true);
return;
}
@@ -1684,6 +1709,13 @@ ixl_if_promisc_set(if_ctx_t ctx, int flags)
return (err);
err = i40e_aq_set_vsi_multicast_promiscuous(hw,
vsi->seid, multi, NULL);
+
+ /* Update the multicast promiscuous flag based on the new state */
+ if (multi)
+ vsi->flags |= IXL_FLAGS_MC_PROMISC;
+ else
+ vsi->flags &= ~IXL_FLAGS_MC_PROMISC;
+
return (err);
}
@@ -1776,7 +1808,7 @@ ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_OPACKETS:
return (vsi->opackets);
case IFCOUNTER_OERRORS:
- return (vsi->oerrors);
+ return (if_get_counter_default(ifp, cnt) + vsi->oerrors);
case IFCOUNTER_COLLISIONS:
/* Collisions are by standard impossible in 40G/10G Ethernet */
return (0);
@@ -1791,7 +1823,7 @@ ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_IQDROPS:
return (vsi->iqdrops);
case IFCOUNTER_OQDROPS:
- return (vsi->oqdrops);
+ return (if_get_counter_default(ifp, cnt) + vsi->oqdrops);
case IFCOUNTER_NOPROTO:
return (vsi->noproto);
default:
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 95379448b570..ab0f38307d90 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -202,6 +202,7 @@
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
#define IXL_FLAGS_USES_MSIX (1 << 2)
#define IXL_FLAGS_IS_VF (1 << 3)
+#define IXL_FLAGS_MC_PROMISC (1 << 4)
#define IXL_VSI_IS_PF(v) ((v->flags & IXL_FLAGS_IS_VF) == 0)
#define IXL_VSI_IS_VF(v) ((v->flags & IXL_FLAGS_IS_VF) != 0)
diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c
index 1752efc02fff..b62619ced5cb 100644
--- a/sys/dev/ixl/ixl_pf_main.c
+++ b/sys/dev/ixl/ixl_pf_main.c
@@ -593,24 +593,29 @@ ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
* Routines for multicast and vlan filter management.
*
*********************************************************************/
+
+/**
+ * ixl_add_multi - Add multicast filters to the hardware
+ * @vsi: The VSI structure
+ *
+ * In case number of multicast filters in the IFP exceeds 127 entries,
+ * multicast promiscuous mode will be enabled and the filters will be removed
+ * from the hardware
+ */
void
ixl_add_multi(struct ixl_vsi *vsi)
{
if_t ifp = vsi->ifp;
- struct i40e_hw *hw = vsi->hw;
int mcnt = 0;
struct ixl_add_maddr_arg cb_arg;
IOCTL_DEBUGOUT("ixl_add_multi: begin");
- mcnt = if_llmaddr_count(ifp);
- if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
- i40e_aq_set_vsi_multicast_promiscuous(hw,
- vsi->seid, TRUE, NULL);
- /* delete all existing MC filters */
- ixl_del_multi(vsi, true);
- return;
- }
+ /*
+ * There is no need to check if the number of multicast addresses
+ * exceeds the MAX_MULTICAST_ADDR threshold and set promiscuous mode
+ * here, as all callers already handle this case.
+ */
cb_arg.vsi = vsi;
LIST_INIT(&cb_arg.to_add);
@@ -633,30 +638,103 @@ ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
return (0);
}
+/**
+ * ixl_dis_multi_promisc - Disable multicast promiscuous mode
+ * @vsi: The VSI structure
+ * @vsi_mcnt: Number of multicast filters in the VSI
+ *
+ * Disable multicast promiscuous mode based on number of entries in the IFP
+ * and the VSI, then re-add multicast filters.
+ *
+ */
+static void
+ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt)
+{
+ struct ifnet *ifp = vsi->ifp;
+ struct i40e_hw *hw = vsi->hw;
+ int ifp_mcnt = 0;
+ enum i40e_status_code status;
+
+ /*
+ * Check if multicast promiscuous mode was actually enabled.
+ * If promiscuous mode was not enabled, don't attempt to disable it.
+ * Also, don't disable if IFF_PROMISC or IFF_ALLMULTI is set.
+ */
+ if (!(vsi->flags & IXL_FLAGS_MC_PROMISC) ||
+ (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)))
+ return;
+
+ ifp_mcnt = if_llmaddr_count(ifp);
+ /*
+ * Equal lists or empty ifp list mean the list has not been changed
+ * and in such case avoid disabling multicast promiscuous mode as it
+ * was not previously enabled. Case where multicast promiscuous mode has
+ * been enabled is when vsi_mcnt == 0 && ifp_mcnt > 0.
+ */
+ if (ifp_mcnt == vsi_mcnt || ifp_mcnt == 0 ||
+ ifp_mcnt >= MAX_MULTICAST_ADDR)
+ return;
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ FALSE, NULL);
+ if (status != I40E_SUCCESS) {
+ if_printf(ifp, "Failed to disable multicast promiscuous "
+ "mode, status: %s\n", i40e_stat_str(hw, status));
+
+ return;
+ }
+
+ /* Clear the flag since promiscuous mode is now disabled */
+ vsi->flags &= ~IXL_FLAGS_MC_PROMISC;
+ if_printf(ifp, "Disabled multicast promiscuous mode\n");
+
+ ixl_add_multi(vsi);
+}
+
+/**
+ * ixl_del_multi - Delete multicast filters from the hardware
+ * @vsi: The VSI structure
+ * @all: Bool to determine if all the multicast filters should be removed
+ *
+ * In case number of multicast filters in the IFP drops to 127 entries,
+ * multicast promiscuous mode will be disabled and the filters will be reapplied
+ * to the hardware.
+ */
void
ixl_del_multi(struct ixl_vsi *vsi, bool all)
{
- struct ixl_ftl_head to_del;
+ int to_del_cnt = 0, vsi_mcnt = 0;
if_t ifp = vsi->ifp;
struct ixl_mac_filter *f, *fn;
- int mcnt = 0;
+ struct ixl_ftl_head to_del;
IOCTL_DEBUGOUT("ixl_del_multi: begin");
LIST_INIT(&to_del);
/* Search for removed multicast addresses */
LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
- if ((f->flags & IXL_FILTER_MC) == 0 ||
- (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
+ if ((f->flags & IXL_FILTER_MC) == 0)
+ continue;
+
+ /* Count all the multicast filters in the VSI for comparison */
+ vsi_mcnt++;
+
+ if (!all && if_foreach_llmaddr(ifp, ixl_match_maddr, f) != 0)
continue;
LIST_REMOVE(f, ftle);
LIST_INSERT_HEAD(&to_del, f, ftle);
- mcnt++;
+ to_del_cnt++;
}
- if (mcnt > 0)
- ixl_del_hw_filters(vsi, &to_del, mcnt);
+ if (to_del_cnt > 0) {
+ ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
+ return;
+ }
+
+ ixl_dis_multi_promisc(vsi, vsi_mcnt);
+
+ IOCTL_DEBUGOUT("ixl_del_multi: end");
}
void
diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c
index 79a3213c6802..2e4f3967ace4 100644
--- a/sys/dev/malo/if_malo.c
+++ b/sys/dev/malo/if_malo.c
@@ -263,6 +263,8 @@ malo_attach(uint16_t devid, struct malo_softc *sc)
;
IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->malo_hwspecs.macaddr);
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Transmit requires space in the packet for a special format transmit
* record and optional padding between this record and the payload.
@@ -1040,6 +1042,8 @@ malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni,
} else
qos = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (iswep) {
struct ieee80211_key *k;
diff --git a/sys/dev/mii/mv88e151x.c b/sys/dev/mii/mv88e151x.c
index 618ad81471c9..fb03b2a7a917 100644
--- a/sys/dev/mii/mv88e151x.c
+++ b/sys/dev/mii/mv88e151x.c
@@ -97,7 +97,7 @@ mv88e151x_attach(device_t dev)
{
const struct mii_attach_args *ma;
struct mii_softc *sc;
- uint32_t cop_cap, cop_extcap;
+ uint32_t cop_cap = 0, cop_extcap = 0;
sc = device_get_softc(dev);
ma = device_get_ivars(dev);
@@ -224,10 +224,12 @@ mv88e151x_fiber_status(struct mii_softc *phy)
else if (reg & MV88E151X_STATUS_LINK &&
reg & MV88E151X_STATUS_SYNC &&
(reg & MV88E151X_STATUS_ENERGY) == 0) {
- if ((reg & MV88E151X_STATUS_SPEED_MASK) ==
+ if (((reg & MV88E151X_STATUS_SPEED_MASK) >>
+ MV88E151X_STATUS_SPEED_SHIFT) ==
MV88E151X_STATUS_SPEED_1000)
mii->mii_media_active |= IFM_1000_SX;
- else if ((reg & MV88E151X_STATUS_SPEED_MASK) ==
+ else if (((reg & MV88E151X_STATUS_SPEED_MASK) >>
+ MV88E151X_STATUS_SPEED_SHIFT) ==
MV88E151X_STATUS_SPEED_100)
mii->mii_media_active |= IFM_100_FX;
else
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c
index fb9ca94278db..d1f454a5ec41 100644
--- a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c
@@ -1134,6 +1134,11 @@ static int tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry *sa_entry,
setup_fte_no_frags(spec);
setup_fte_reg_a_with_tag(spec, sa_entry->kspi);
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
+
rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, flow_act, dest, num_dest);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1169,6 +1174,10 @@ static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
if(attrs->reqid) {
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
setup_fte_no_frags(spec);
setup_fte_reg_c0(spec, attrs->reqid);
rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest);
@@ -1181,6 +1190,11 @@ static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry,
memset(spec, 0, sizeof(*spec));
}
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
+
if (attrs->family == AF_INET)
setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
else
@@ -1322,6 +1336,11 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_mod_header;
}
+ if (attrs->vid != VLAN_NONE)
+ setup_fte_vid(spec, attrs->vid);
+ else
+ setup_fte_no_vid(spec);
+
flow_act.flags |= FLOW_ACT_NO_APPEND;
dest[dstn].ft = tx->ft.sa;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
index 978e5f25ceaf..cc0bc1f3fcd2 100644
--- a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
@@ -120,7 +120,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
switch (attrs->dir) {
case IPSEC_DIR_OUTBOUND:
- if (attrs->replay_esn.replay_window != 0)
+ if (attrs->replay_esn.trigger)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
else
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_MODE);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
index 6e24395b5577..c45f02cdaf42 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
@@ -1783,8 +1783,8 @@ mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
el->refcount++;
if (el->installed)
return (0);
- }
- el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
+ } else
+ el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_add_vxlan_rule_from_db(priv, el);
diff --git a/sys/dev/mpi3mr/mpi3mr.c b/sys/dev/mpi3mr/mpi3mr.c
index 99edd3542619..bcf8f46ddf5d 100644
--- a/sys/dev/mpi3mr/mpi3mr.c
+++ b/sys/dev/mpi3mr/mpi3mr.c
@@ -2799,10 +2799,11 @@ retry_init:
U32 fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
MPI3_SYSIF_FAULT_CODE_MASK;
- if (fault == MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER)
+ if (fault == MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER) {
mpi3mr_dprint(sc, MPI3MR_INFO,
"controller faulted due to insufficient power, try by connecting it in a different slot\n");
goto err;
+ }
U32 host_diagnostic;
timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
@@ -4486,7 +4487,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
Mpi3SuccessReplyDescriptor_t *success_desc;
Mpi3DefaultReply_t *def_reply = NULL;
struct mpi3mr_drvr_cmd *cmdptr = NULL;
- Mpi3SCSIIOReply_t *scsi_reply;
+ Mpi3SCSIIOReply_t *scsi_reply = NULL;
U8 *sense_buf = NULL;
*reply_dma = 0;
@@ -4589,7 +4590,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
}
}
out:
- if (sense_buf != NULL)
+ if (scsi_reply != NULL && sense_buf != NULL)
mpi3mr_repost_sense_buf(sc,
scsi_reply->SenseDataBufferAddress);
return;
@@ -6161,7 +6162,7 @@ static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
{
int retval = -1;
U8 unlock_retry_count = 0;
- U32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
+ U32 host_diagnostic = 0, ioc_status, ioc_config, scratch_pad0;
U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
diff --git a/sys/dev/mpi3mr/mpi3mr_cam.c b/sys/dev/mpi3mr/mpi3mr_cam.c
index 77e25339a1a9..a5120e2788db 100644
--- a/sys/dev/mpi3mr/mpi3mr_cam.c
+++ b/sys/dev/mpi3mr/mpi3mr_cam.c
@@ -1856,10 +1856,11 @@ int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
"Poll reply queue once\n", target_outstanding, target->per_id);
mpi3mr_poll_pend_io_completions(sc);
target_outstanding = mpi3mr_atomic_read(&target->outstanding);
- if (target_outstanding)
+ if (target_outstanding) {
target_outstanding = mpi3mr_atomic_read(&target->outstanding);
mpi3mr_dprint(sc, MPI3MR_ERROR, "[%2d] outstanding IOs present on target: %d "
- "despite poll\n", target_outstanding, target->per_id);
+ "despite poll\n", target_outstanding, target->per_id);
+ }
}
if (target->exposed_to_os && !sc->reset_in_progress) {
diff --git a/sys/dev/mpr/mpr.c b/sys/dev/mpr/mpr.c
index d1c572e40669..262d6b58b705 100644
--- a/sys/dev/mpr/mpr.c
+++ b/sys/dev/mpr/mpr.c
@@ -1729,6 +1729,7 @@ mpr_get_tunables(struct mpr_softc *sc)
sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD;
sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
sc->use_phynum = 1;
+ sc->encl_min_slots = 0;
sc->max_reqframes = MPR_REQ_FRAMES;
sc->max_prireqframes = MPR_PRI_REQ_FRAMES;
sc->max_replyframes = MPR_REPLY_FRAMES;
@@ -1748,6 +1749,7 @@ mpr_get_tunables(struct mpr_softc *sc)
TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu);
TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time);
TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum);
+ TUNABLE_INT_FETCH("hw.mpr.encl_min_slots", &sc->encl_min_slots);
TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes);
TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes);
TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes);
@@ -1797,6 +1799,10 @@ mpr_get_tunables(struct mpr_softc *sc)
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.encl_min_slots",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->encl_min_slots);
+
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
@@ -1951,6 +1957,10 @@ mpr_setup_sysctl(struct mpr_softc *sc)
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD,
&sc->prp_page_alloc_fail, "PRP page allocation failures");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "encl_min_slots", CTLFLAG_RW, &sc->encl_min_slots, 0,
+ "force enclosure minimum slots");
}
static struct mpr_debug_string {
diff --git a/sys/dev/mpr/mpr_mapping.c b/sys/dev/mpr/mpr_mapping.c
index f9a9ac1c53d0..38aa4dfc7ef2 100644
--- a/sys/dev/mpr/mpr_mapping.c
+++ b/sys/dev/mpr/mpr_mapping.c
@@ -2785,6 +2785,8 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
* DPM, if it's being used.
*/
if (enc_idx != MPR_ENCTABLE_BAD_IDX) {
+ u16 new_num_slots;
+
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->init_complete &&
!et_entry->missing_count) {
@@ -2796,6 +2798,17 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
et_entry->enc_handle = le16toh(event_data->
EnclosureHandle);
et_entry->start_slot = le16toh(event_data->StartSlot);
+ new_num_slots = le16toh(event_data->NumSlots);
+ if (new_num_slots < sc->encl_min_slots) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d num_slots %d, overriding with %d.\n",
+ __func__, enc_idx, new_num_slots, sc->encl_min_slots);
+ new_num_slots = sc->encl_min_slots;
+ }
+ if (et_entry->num_slots != new_num_slots) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d old num_slots %d, new %d.\n",
+ __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots);
+ et_entry->num_slots = new_num_slots;
+ }
saved_phy_bits = et_entry->phy_bits;
et_entry->phy_bits |= le32toh(event_data->PhyBits);
if (saved_phy_bits != et_entry->phy_bits)
@@ -2858,6 +2871,11 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
et_entry->start_index = MPR_MAPTABLE_BAD_IDX;
et_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
et_entry->num_slots = le16toh(event_data->NumSlots);
+ if (et_entry->num_slots < sc->encl_min_slots) {
+ mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Enclosure %d num_slots is %d, overriding with %d.\n",
+ __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots);
+ et_entry->num_slots = sc->encl_min_slots;
+ }
et_entry->start_slot = le16toh(event_data->StartSlot);
et_entry->phy_bits = le32toh(event_data->PhyBits);
}
diff --git a/sys/dev/mpr/mprvar.h b/sys/dev/mpr/mprvar.h
index 0f1743f4266e..93f3fbffe079 100644
--- a/sys/dev/mpr/mprvar.h
+++ b/sys/dev/mpr/mprvar.h
@@ -366,6 +366,7 @@ struct mpr_softc {
int spinup_wait_time;
int use_phynum;
int dump_reqs_alltypes;
+ int encl_min_slots;
uint64_t chain_alloc_fail;
uint64_t prp_page_alloc_fail;
struct sysctl_ctx_list sysctl_ctx;
diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c
index d69c8ea5fded..fa0f817ed67b 100644
--- a/sys/dev/mps/mps_sas.c
+++ b/sys/dev/mps/mps_sas.c
@@ -858,7 +858,7 @@ mps_detach_sas(struct mps_softc *sc)
if (sassc->devq != NULL)
cam_simq_free(sassc->devq);
- for(i=0; i< sassc->maxtargets ;i++) {
+ for (i = 0; i < sassc->maxtargets; i++) {
targ = &sassc->targets[i];
SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
free(lun, M_MPT2);
@@ -3396,7 +3396,7 @@ mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
* the allocated LUNs for each target and then the target buffer
* itself.
*/
- for (i=0; i< maxtargets; i++) {
+ for (i = 0; i < maxtargets; i++) {
targ = &sassc->targets[i];
SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
free(lun, M_MPT2);
diff --git a/sys/dev/mpt/mpt_raid.c b/sys/dev/mpt/mpt_raid.c
index 5ff08ffcf2b3..2b868f6ef070 100644
--- a/sys/dev/mpt/mpt_raid.c
+++ b/sys/dev/mpt/mpt_raid.c
@@ -830,7 +830,7 @@ mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
}
ioc_vol = mpt->ioc_page2->RaidVolume;
ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
- for (;ioc_vol != ioc_last_vol; ioc_vol++) {
+ for (; ioc_vol != ioc_last_vol; ioc_vol++) {
if (ioc_vol->VolumeID == tgt) {
return (1);
}
@@ -1406,7 +1406,7 @@ mpt_refresh_raid_data(struct mpt_softc *mpt)
ioc_vol = mpt->ioc_page2->RaidVolume;
ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
- for (;ioc_vol != ioc_last_vol; ioc_vol++) {
+ for (; ioc_vol != ioc_last_vol; ioc_vol++) {
struct mpt_raid_volume *mpt_vol;
mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c
index 2570cbce525b..9f3d34f4f50d 100644
--- a/sys/dev/mwl/if_mwl.c
+++ b/sys/dev/mwl/if_mwl.c
@@ -433,6 +433,8 @@ mwl_attach(uint16_t devid, struct mwl_softc *sc)
| IEEE80211_HTC_SMPS /* SMPS available */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Mark h/w crypto support.
* XXX no way to query h/w support.
@@ -1797,7 +1799,7 @@ mwl_updateslot(struct ieee80211com *ic)
return;
/*
- * Calculate the ERP flags. The firwmare will use
+ * Calculate the ERP flags. The firmware will use
* this to carry out the appropriate measures.
*/
prot = 0;
@@ -3087,6 +3089,8 @@ mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *
} else
qos = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (iswep) {
const struct ieee80211_cipher *cip;
struct ieee80211_key *k;
@@ -4017,7 +4021,7 @@ mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
- if (ni->ni_chw != IEEE80211_STA_RX_BW_40)
+ if (ni->ni_chw != NET80211_STA_RX_BW_40)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
}
return pi;
diff --git a/sys/dev/nctgpio/nctgpio.c b/sys/dev/nctgpio/nctgpio.c
index 75ea1fbdba17..ddc2ceef7dfb 100644
--- a/sys/dev/nctgpio/nctgpio.c
+++ b/sys/dev/nctgpio/nctgpio.c
@@ -1258,13 +1258,14 @@ nct_attach(device_t dev)
GPIO_UNLOCK(sc);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "failed to attach to gpiobus\n");
GPIO_LOCK_DESTROY(sc);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c
index bf14bfdb73ea..9c06f7fec530 100644
--- a/sys/dev/netmap/if_ptnet.c
+++ b/sys/dev/netmap/if_ptnet.c
@@ -27,8 +27,9 @@
/* Driver for ptnet paravirtualized network device. */
#include <sys/cdefs.h>
+#include "opt_inet.h"
+#include "opt_inet6.h"
-#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -75,9 +76,6 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
#include <sys/selinfo.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
diff --git a/sys/dev/nfe/if_nfe.c b/sys/dev/nfe/if_nfe.c
index 4625c2616562..265181ef7ad0 100644
--- a/sys/dev/nfe/if_nfe.c
+++ b/sys/dev/nfe/if_nfe.c
@@ -2078,7 +2078,7 @@ nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
BUS_DMASYNC_POSTREAD);
- for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
+ for (prog = 0; ; NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
if (count <= 0)
break;
count--;
@@ -2192,7 +2192,7 @@ nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
BUS_DMASYNC_POSTREAD);
- for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
+ for (prog = 0; ; NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
vtag = 0) {
if (count <= 0)
break;
diff --git a/sys/dev/null/null.c b/sys/dev/null/null.c
index 7ffc618e63ee..8525eb9543c3 100644
--- a/sys/dev/null/null.c
+++ b/sys/dev/null/null.c
@@ -4,6 +4,7 @@
* Copyright (c) 2000 Mark R. V. Murray & Jeroen C. van Gelderen
* Copyright (c) 2001-2004 Mark R. V. Murray
* Copyright (c) 2014 Eitan Adler
+ * Copyright (c) 2025 Pietro Cerutti
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,6 +40,7 @@
#include <sys/disk.h>
#include <sys/bus.h>
#include <sys/filio.h>
+#include <sys/event.h>
#include <machine/bus.h>
#include <machine/vmparam.h>
@@ -53,12 +55,26 @@ static d_write_t null_write;
static d_ioctl_t null_ioctl;
static d_ioctl_t zero_ioctl;
static d_read_t zero_read;
+static d_kqfilter_t kqfilter;
+static int one_ev(struct knote *kn, long hint);
+static int zero_ev(struct knote *kn, long hint);
+
+static const struct filterops one_fop = {
+ .f_isfd = 1,
+ .f_event = one_ev
+};
+
+static const struct filterops zero_fop = {
+ .f_isfd = 1,
+ .f_event = zero_ev
+};
static struct cdevsw full_cdevsw = {
.d_version = D_VERSION,
.d_read = zero_read,
.d_write = full_write,
.d_ioctl = zero_ioctl,
+ .d_kqfilter = kqfilter,
.d_name = "full",
};
@@ -67,6 +83,7 @@ static struct cdevsw null_cdevsw = {
.d_read = (d_read_t *)nullop,
.d_write = null_write,
.d_ioctl = null_ioctl,
+ .d_kqfilter = kqfilter,
.d_name = "null",
};
@@ -75,6 +92,7 @@ static struct cdevsw zero_cdevsw = {
.d_read = zero_read,
.d_write = null_write,
.d_ioctl = zero_ioctl,
+ .d_kqfilter = kqfilter,
.d_name = "zero",
.d_flags = D_MMAP_ANON,
};
@@ -197,5 +215,35 @@ null_modevent(module_t mod __unused, int type, void *data __unused)
return (0);
}
+static int
+one_ev(struct knote *kn, long hint)
+{
+
+ return (1);
+}
+
+static int
+zero_ev(struct knote *kn, long hint)
+{
+
+ return (0);
+}
+
+static int
+kqfilter(struct cdev *dev, struct knote *kn)
+{
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ kn->kn_fop = dev->si_devsw == &null_cdevsw ? &zero_fop : &one_fop;
+ return (0);
+ case EVFILT_WRITE:
+ kn->kn_fop = dev->si_devsw == &full_cdevsw ? &zero_fop : &one_fop;
+ return (0);
+ default:
+ return (EOPNOTSUPP);
+ }
+}
+
DEV_MODULE(null, null_modevent, NULL);
MODULE_VERSION(null, 1);
diff --git a/sys/dev/nvme/nvme.c b/sys/dev/nvme/nvme.c
index 84f365024f13..d119f9877aaa 100644
--- a/sys/dev/nvme/nvme.c
+++ b/sys/dev/nvme/nvme.c
@@ -51,7 +51,7 @@ int32_t nvme_retry_count;
MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
static void
-nvme_init(void)
+nvme_init(void *dummy __unused)
{
uint32_t i;
@@ -62,7 +62,7 @@ nvme_init(void)
SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
static void
-nvme_uninit(void)
+nvme_uninit(void *dummy __unused)
{
}
@@ -295,7 +295,6 @@ nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
void
nvme_unregister_consumer(struct nvme_consumer *consumer)
{
-
consumer->id = INVALID_CONSUMER_ID;
}
diff --git a/sys/dev/nvme/nvme.h b/sys/dev/nvme/nvme.h
index 17c5cdb4db87..f4ea08f129c0 100644
--- a/sys/dev/nvme/nvme.h
+++ b/sys/dev/nvme/nvme.h
@@ -1507,9 +1507,7 @@ struct nvme_namespace_data {
uint8_t eui64[8];
/** lba format support */
- uint32_t lbaf[16];
-
- uint8_t reserved7[192];
+ uint32_t lbaf[64];
uint8_t vendor_specific[3712];
} __packed __aligned(4);
@@ -2155,8 +2153,6 @@ static inline
void nvme_namespace_data_swapbytes(struct nvme_namespace_data *s __unused)
{
#if _BYTE_ORDER != _LITTLE_ENDIAN
- int i;
-
s->nsze = le64toh(s->nsze);
s->ncap = le64toh(s->ncap);
s->nuse = le64toh(s->nuse);
@@ -2175,7 +2171,7 @@ void nvme_namespace_data_swapbytes(struct nvme_namespace_data *s __unused)
s->anagrpid = le32toh(s->anagrpid);
s->nvmsetid = le16toh(s->nvmsetid);
s->endgid = le16toh(s->endgid);
- for (i = 0; i < 16; i++)
+ for (unsigned i = 0; i < nitems(s->lbaf); i++)
s->lbaf[i] = le32toh(s->lbaf[i]);
#endif
}
diff --git a/sys/dev/nvme/nvme_ahci.c b/sys/dev/nvme/nvme_ahci.c
index 888207a454f7..b06661226d34 100644
--- a/sys/dev/nvme/nvme_ahci.c
+++ b/sys/dev/nvme/nvme_ahci.c
@@ -124,6 +124,5 @@ bad:
static int
nvme_ahci_detach(device_t dev)
{
-
return (nvme_detach(dev));
}
diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c
index fd7f00ced14b..3a1894bf754d 100644
--- a/sys/dev/nvme/nvme_ctrlr.c
+++ b/sys/dev/nvme/nvme_ctrlr.c
@@ -41,6 +41,9 @@
#include <sys/endian.h>
#include <sys/stdarg.h>
#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
#include "nvme_private.h"
#include "nvme_linux.h"
@@ -597,7 +600,6 @@ nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
static bool
is_log_page_id_valid(uint8_t page_id)
{
-
switch (page_id) {
case NVME_LOG_ERROR:
case NVME_LOG_HEALTH_INFORMATION:
@@ -653,7 +655,6 @@ static void
nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
uint8_t state)
{
-
if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n");
@@ -781,7 +782,6 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
static void
nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
{
-
ctrlr->int_coal_time = 0;
TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
&ctrlr->int_coal_time);
@@ -1268,6 +1268,34 @@ nvme_ctrlr_shared_handler(void *arg)
nvme_mmio_write_4(ctrlr, intmc, 1);
}
+#define NVME_MAX_PAGES (int)(1024 / sizeof(vm_page_t))
+
+static int
+nvme_user_ioctl_req(vm_offset_t addr, size_t len, bool is_read,
+ vm_page_t *upages, int max_pages, int *npagesp, struct nvme_request **req,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ vm_prot_t prot = VM_PROT_READ;
+ int err;
+
+ if (is_read)
+ prot |= VM_PROT_WRITE; /* Device will write to host memory */
+ err = vm_fault_hold_pages(&curproc->p_vmspace->vm_map,
+ addr, len, prot, upages, max_pages, npagesp);
+ if (err != 0)
+ return (err);
+ *req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
+ (*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK);
+ (*req)->payload_valid = true;
+ return (0);
+}
+
+static void
+nvme_user_ioctl_free(vm_page_t *pages, int npage)
+{
+ vm_page_unhold_pages(pages, npage);
+}
+
static void
nvme_pt_done(void *arg, const struct nvme_completion *cpl)
{
@@ -1290,30 +1318,28 @@ nvme_pt_done(void *arg, const struct nvme_completion *cpl)
int
nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
- struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
+ struct nvme_pt_command *pt, uint32_t nsid, int is_user,
int is_admin_cmd)
{
- struct nvme_request *req;
- struct mtx *mtx;
- struct buf *buf = NULL;
- int ret = 0;
+ struct nvme_request *req;
+ struct mtx *mtx;
+ int ret = 0;
+ int npages = 0;
+ vm_page_t upages[NVME_MAX_PAGES];
if (pt->len > 0) {
if (pt->len > ctrlr->max_xfer_size) {
- nvme_printf(ctrlr, "pt->len (%d) "
- "exceeds max_xfer_size (%d)\n", pt->len,
- ctrlr->max_xfer_size);
- return EIO;
+ nvme_printf(ctrlr,
+ "len (%d) exceeds max_xfer_size (%d)\n",
+ pt->len, ctrlr->max_xfer_size);
+ return (EIO);
}
- if (is_user_buffer) {
- buf = uma_zalloc(pbuf_zone, M_WAITOK);
- buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
- if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
- ret = EFAULT;
- goto err;
- }
- req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
- M_WAITOK, nvme_pt_done, pt);
+ if (is_user) {
+ ret = nvme_user_ioctl_req((vm_offset_t)pt->buf, pt->len,
+ pt->is_read, upages, nitems(upages), &npages, &req,
+ nvme_pt_done, pt);
+ if (ret != 0)
+ return (ret);
} else
req = nvme_allocate_request_vaddr(pt->buf, pt->len,
M_WAITOK, nvme_pt_done, pt);
@@ -1347,11 +1373,8 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
mtx_unlock(mtx);
- if (buf != NULL) {
- vunmapbuf(buf);
-err:
- uma_zfree(pbuf_zone, buf);
- }
+ if (npages > 0)
+ nvme_user_ioctl_free(upages, npages);
return (ret);
}
@@ -1377,8 +1400,9 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
{
struct nvme_request *req;
struct mtx *mtx;
- struct buf *buf = NULL;
int ret = 0;
+ int npages = 0;
+ vm_page_t upages[NVME_MAX_PAGES];
/*
* We don't support metadata.
@@ -1389,28 +1413,16 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
if (npc->data_len > 0 && npc->addr != 0) {
if (npc->data_len > ctrlr->max_xfer_size) {
nvme_printf(ctrlr,
- "npc->data_len (%d) exceeds max_xfer_size (%d)\n",
+ "data_len (%d) exceeds max_xfer_size (%d)\n",
npc->data_len, ctrlr->max_xfer_size);
return (EIO);
}
- /*
- * We only support data out or data in commands, but not both at
- * once. However, there's some comands with lower bit cleared
- * that are really read commands, so we should filter & 3 == 0,
- * but don't.
- */
- if ((npc->opcode & 0x3) == 3)
- return (EINVAL);
if (is_user) {
- buf = uma_zalloc(pbuf_zone, M_WAITOK);
- buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ;
- if (vmapbuf(buf, (void *)(uintptr_t)npc->addr,
- npc->data_len, 1) < 0) {
- ret = EFAULT;
- goto err;
- }
- req = nvme_allocate_request_vaddr(buf->b_data,
- npc->data_len, M_WAITOK, nvme_npc_done, npc);
+ ret = nvme_user_ioctl_req(npc->addr, npc->data_len,
+ npc->opcode & 0x1, upages, nitems(upages), &npages,
+ &req, nvme_npc_done, npc);
+ if (ret != 0)
+ return (ret);
} else
req = nvme_allocate_request_vaddr(
(void *)(uintptr_t)npc->addr, npc->data_len,
@@ -1420,8 +1432,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
req->cmd.opc = npc->opcode;
req->cmd.fuse = npc->flags;
- req->cmd.rsvd2 = htole16(npc->cdw2);
- req->cmd.rsvd3 = htole16(npc->cdw3);
+ req->cmd.rsvd2 = htole32(npc->cdw2);
+ req->cmd.rsvd3 = htole32(npc->cdw3);
req->cmd.cdw10 = htole32(npc->cdw10);
req->cmd.cdw11 = htole32(npc->cdw11);
req->cmd.cdw12 = htole32(npc->cdw12);
@@ -1445,11 +1457,8 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
mtx_sleep(npc, mtx, PRIBIO, "nvme_npc", 0);
mtx_unlock(mtx);
- if (buf != NULL) {
- vunmapbuf(buf);
-err:
- uma_zfree(pbuf_zone, buf);
- }
+ if (npages > 0)
+ nvme_user_ioctl_free(upages, npages);
return (ret);
}
@@ -1776,7 +1785,6 @@ void
nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
struct nvme_request *req)
{
-
nvme_qpair_submit_request(&ctrlr->adminq, req);
}
@@ -1793,14 +1801,12 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
device_t
nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
{
-
return (ctrlr->dev);
}
const struct nvme_controller_data *
nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
{
-
return (&ctrlr->cdata);
}
@@ -1853,7 +1859,6 @@ nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
int
nvme_ctrlr_resume(struct nvme_controller *ctrlr)
{
-
/*
* Can't touch failed controllers, so nothing to do to resume.
*/
diff --git a/sys/dev/nvme/nvme_ctrlr_cmd.c b/sys/dev/nvme/nvme_ctrlr_cmd.c
index 993a7718356d..5a44ed425acb 100644
--- a/sys/dev/nvme/nvme_ctrlr_cmd.c
+++ b/sys/dev/nvme/nvme_ctrlr_cmd.c
@@ -281,7 +281,6 @@ nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
struct nvme_error_information_entry *payload, uint32_t num_entries,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
/* Controller's error log page entries is 0-based. */
@@ -302,7 +301,6 @@ nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
uint32_t nsid, struct nvme_health_information_page *payload,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
nsid, payload, sizeof(*payload), cb_fn, cb_arg);
}
@@ -311,7 +309,6 @@ void
nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
cb_arg);
diff --git a/sys/dev/nvme/nvme_ns.c b/sys/dev/nvme/nvme_ns.c
index 3f29382fe42f..e84d2066930e 100644
--- a/sys/dev/nvme/nvme_ns.c
+++ b/sys/dev/nvme/nvme_ns.c
@@ -129,7 +129,6 @@ static int
nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
struct thread *td)
{
-
return (0);
}
@@ -231,7 +230,6 @@ nvme_ns_get_model_number(struct nvme_namespace *ns)
const struct nvme_namespace_data *
nvme_ns_get_data(struct nvme_namespace *ns)
{
-
return (&ns->data);
}
@@ -631,7 +629,6 @@ nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
void
nvme_ns_destruct(struct nvme_namespace *ns)
{
-
if (ns->cdev != NULL) {
if (ns->cdev->si_drv2 != NULL)
destroy_dev(ns->cdev->si_drv2);
diff --git a/sys/dev/nvme/nvme_pci.c b/sys/dev/nvme/nvme_pci.c
index 29b49b7df403..c07a68d2f0dc 100644
--- a/sys/dev/nvme/nvme_pci.c
+++ b/sys/dev/nvme/nvme_pci.c
@@ -151,7 +151,6 @@ nvme_pci_probe (device_t device)
static int
nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
{
-
ctrlr->resource_id = PCIR_BAR(0);
ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h
index 36f00fedc48e..52e9fcbbebcd 100644
--- a/sys/dev/nvme/nvme_private.h
+++ b/sys/dev/nvme/nvme_private.h
@@ -459,18 +459,17 @@ int nvme_detach(device_t dev);
* vast majority of these without waiting for a tick plus scheduling delays. Since
* these are on startup, this drastically reduces startup time.
*/
-static __inline
-void
+static __inline void
nvme_completion_poll(struct nvme_completion_poll_status *status)
{
int timeout = ticks + 10 * hz;
- sbintime_t delta_t = SBT_1US;
+ sbintime_t delta = SBT_1US;
while (!atomic_load_acq_int(&status->done)) {
if (timeout - ticks < 0)
panic("NVME polled command failed to complete within 10s.");
- pause_sbt("nvme", delta_t, 0, C_PREL(1));
- delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ pause_sbt("nvme", delta, 0, C_PREL(1));
+ delta = min(SBT_1MS, delta + delta / 2);
}
}
diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c
index bd8626e32209..4f2c44da3b4f 100644
--- a/sys/dev/nvme/nvme_qpair.c
+++ b/sys/dev/nvme/nvme_qpair.c
@@ -793,7 +793,6 @@ nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
void
nvme_io_qpair_destroy(struct nvme_qpair *qpair)
{
-
nvme_qpair_destroy(qpair);
}
@@ -1202,7 +1201,6 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
void
nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
{
-
mtx_lock(&qpair->lock);
_nvme_qpair_submit_request(qpair, req);
mtx_unlock(&qpair->lock);
@@ -1226,7 +1224,6 @@ nvme_qpair_enable(struct nvme_qpair *qpair)
void
nvme_qpair_reset(struct nvme_qpair *qpair)
{
-
qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
/*
diff --git a/sys/dev/nvme/nvme_sim.c b/sys/dev/nvme/nvme_sim.c
index 4974bb718222..7693aa6d54d3 100644
--- a/sys/dev/nvme/nvme_sim.c
+++ b/sys/dev/nvme/nvme_sim.c
@@ -301,7 +301,6 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
static void
nvme_sim_poll(struct cam_sim *sim)
{
-
nvme_ctrlr_poll(sim2ctrlr(sim));
}
@@ -392,7 +391,7 @@ nvme_sim_controller_fail(void *ctrlr_arg)
struct nvme_consumer *consumer_cookie;
static void
-nvme_sim_init(void)
+nvme_sim_init(void *dummy __unused)
{
if (nvme_use_nvd)
return;
@@ -405,7 +404,7 @@ SYSINIT(nvme_sim_register, SI_SUB_DRIVERS, SI_ORDER_ANY,
nvme_sim_init, NULL);
static void
-nvme_sim_uninit(void)
+nvme_sim_uninit(void *dummy __unused)
{
if (nvme_use_nvd)
return;
diff --git a/sys/dev/nvme/nvme_sysctl.c b/sys/dev/nvme/nvme_sysctl.c
index a5a44721f9f9..50d19e730a16 100644
--- a/sys/dev/nvme/nvme_sysctl.c
+++ b/sys/dev/nvme/nvme_sysctl.c
@@ -153,7 +153,6 @@ nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
static void
nvme_qpair_reset_stats(struct nvme_qpair *qpair)
{
-
/*
* Reset the values. Due to sanity checks in
* nvme_qpair_process_completions, we reset the number of interrupt
diff --git a/sys/dev/nvme/nvme_util.c b/sys/dev/nvme/nvme_util.c
index 0a07653a7378..cb0ba729ac96 100644
--- a/sys/dev/nvme/nvme_util.c
+++ b/sys/dev/nvme/nvme_util.c
@@ -208,31 +208,33 @@ nvme_opcode_sbuf(bool admin, uint8_t opc, struct sbuf *sb)
if (s == NULL)
sbuf_printf(sb, "%s (%02x)", type, opc);
else
- sbuf_printf(sb, "%s", s);
+ sbuf_printf(sb, "%s (%02x)", s, opc);
}
void
nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb)
{
const char *s, *type;
- uint16_t status;
+ uint16_t status, sc, sct;
status = le16toh(cpl->status);
- switch (NVME_STATUS_GET_SCT(status)) {
+ sc = NVME_STATUS_GET_SC(status);
+ sct = NVME_STATUS_GET_SCT(status);
+ switch (sct) {
case NVME_SCT_GENERIC:
- s = generic_status[NVME_STATUS_GET_SC(status)];
+ s = generic_status[sc];
type = "GENERIC";
break;
case NVME_SCT_COMMAND_SPECIFIC:
- s = command_specific_status[NVME_STATUS_GET_SC(status)];
+ s = command_specific_status[sc];
type = "COMMAND SPECIFIC";
break;
case NVME_SCT_MEDIA_ERROR:
- s = media_error_status[NVME_STATUS_GET_SC(status)];
+ s = media_error_status[sc];
type = "MEDIA ERROR";
break;
case NVME_SCT_PATH_RELATED:
- s = path_related_status[NVME_STATUS_GET_SC(status)];
+ s = path_related_status[sc];
type = "PATH RELATED";
break;
case NVME_SCT_VENDOR_SPECIFIC:
@@ -246,12 +248,11 @@ nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb)
}
if (type == NULL)
- sbuf_printf(sb, "RESERVED (%02x/%02x)",
- NVME_STATUS_GET_SCT(status), NVME_STATUS_GET_SC(status));
+ sbuf_printf(sb, "RESERVED (%02x/%02x)", sct, sc);
else if (s == NULL)
- sbuf_printf(sb, "%s (%02x)", type, NVME_STATUS_GET_SC(status));
+ sbuf_printf(sb, "%s (%02x/%02x)", type, sct, sc);
else
- sbuf_printf(sb, "%s", s);
+ sbuf_printf(sb, "%s (%02x/%02x)", s, sct, sc);
}
void
diff --git a/sys/dev/nvmf/nvmf_tcp.c b/sys/dev/nvmf/nvmf_tcp.c
index 6ad5229f6043..e50d7ff48d2b 100644
--- a/sys/dev/nvmf/nvmf_tcp.c
+++ b/sys/dev/nvmf/nvmf_tcp.c
@@ -970,7 +970,7 @@ nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
}
/*
- * XXX: The spec does not specify how to handle R2T tranfers
+ * XXX: The spec does not specify how to handle R2T transfers
* out of range of the original command.
*/
data_len = le32toh(r2t->r2tl);
diff --git a/sys/dev/ocs_fc/ocs_mgmt.c b/sys/dev/ocs_fc/ocs_mgmt.c
index 726b499f28ba..5b7f6557c017 100644
--- a/sys/dev/ocs_fc/ocs_mgmt.c
+++ b/sys/dev/ocs_fc/ocs_mgmt.c
@@ -226,7 +226,7 @@ ocs_mgmt_get_list(ocs_t *ocs, ocs_textbuf_t *textbuf)
ocs_mgmt_start_unnumbered_section(textbuf, "ocs");
- for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
access = 0;
if (mgmt_table[i].get_handler) {
access |= MGMT_MODE_RD;
@@ -305,7 +305,7 @@ ocs_mgmt_get(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
char *unqualified_name = name + strlen(qualifier) + 1;
- for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
if (mgmt_table[i].get_handler) {
mgmt_table[i].get_handler(ocs, name, textbuf);
@@ -387,7 +387,7 @@ ocs_mgmt_set(ocs_t *ocs, char *name, char *value)
char *unqualified_name = name + strlen(qualifier) +1;
/* See if it's a value I can set */
- for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
if (mgmt_table[i].set_handler) {
return mgmt_table[i].set_handler(ocs, name, value);
@@ -469,7 +469,7 @@ ocs_mgmt_exec(ocs_t *ocs, char *action, void *arg_in,
char *unqualified_name = action + strlen(qualifier) +1;
/* See if it's an action I can perform */
- for (i=0;i<ARRAY_SIZE(mgmt_table); i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
if (mgmt_table[i].action_handler) {
return mgmt_table[i].action_handler(ocs, action, arg_in, arg_in_length,
@@ -527,7 +527,7 @@ ocs_mgmt_get_all(ocs_t *ocs, ocs_textbuf_t *textbuf)
ocs_mgmt_start_unnumbered_section(textbuf, "ocs");
- for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
if (mgmt_table[i].get_handler) {
mgmt_table[i].get_handler(ocs, mgmt_table[i].name, textbuf);
} else if (mgmt_table[i].action_handler) {
@@ -1212,7 +1212,7 @@ get_sfp_a2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1;
int bytes_added;
- for (i=0; i < bytes_read; i++) {
+ for (i = 0; i < bytes_read; i++) {
bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s);
++s;
d += bytes_added;
@@ -2040,7 +2040,7 @@ get_profile_list(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
result_buf = ocs_malloc(ocs, BUFFER_SIZE, OCS_M_ZERO);
bytes_left = BUFFER_SIZE;
- for (i=0; i<result.list->num_descriptors; i++) {
+ for (i = 0; i < result.list->num_descriptors; i++) {
sprintf(result_line, "0x%02x:%s\n", result.list->descriptors[i].profile_id,
result.list->descriptors[i].profile_description);
if (strlen(result_line) < bytes_left) {
diff --git a/sys/dev/otus/if_otus.c b/sys/dev/otus/if_otus.c
index 5919e75a59cf..f6c4a0118b68 100644
--- a/sys/dev/otus/if_otus.c
+++ b/sys/dev/otus/if_otus.c
@@ -728,6 +728,12 @@ otus_attachhook(struct otus_softc *sc)
IEEE80211_C_SWAMSDUTX | /* Do software A-MSDU TX */
IEEE80211_C_WPA; /* WPA/RSN. */
+ /*
+ * Although A-MPDU RX is fine, A-MPDU TX apparently has some
+ * hardware bugs. Looking at Linux carl9170, it has a work-around
+ * that forces all frames into the AC_BE queue regardless of
+ * the actual QoS queue.
+ */
ic->ic_htcaps =
IEEE80211_HTC_HT |
#if 0
@@ -737,6 +743,8 @@ otus_attachhook(struct otus_softc *sc)
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
otus_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -2232,6 +2240,9 @@ otus_tx(struct otus_softc *sc, struct ieee80211_node *ni, struct mbuf *m,
int hasqos, xferlen, type, ismcast;
wh = mtod(m, struct ieee80211_frame *);
+
+ ieee80211_output_seqno_assign(ni, -1, m);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/p2sb/lewisburg_gpio.c b/sys/dev/p2sb/lewisburg_gpio.c
index b45d7767602c..3be777ab9524 100644
--- a/sys/dev/p2sb/lewisburg_gpio.c
+++ b/sys/dev/p2sb/lewisburg_gpio.c
@@ -217,10 +217,11 @@ lbggpio_attach(device_t dev)
}
/* support gpio */
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL)
return (ENXIO);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index 9e43a4c1909f..cde98cb62cef 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -240,6 +240,7 @@ struct pci_quirk {
#define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
#define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
#define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */
+#define PCI_QUIRK_DISABLE_FLR 8 /* Function-Level Reset (FLR) not working. */
int arg1;
int arg2;
};
@@ -319,6 +320,13 @@ static const struct pci_quirk pci_quirks[] = {
* expected place.
*/
{ 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 },
+
+ /*
+ * With some MediaTek mt76 WiFi FLR does not work despite advertised.
+ */
+ { 0x061614c3, PCI_QUIRK_DISABLE_FLR, 0, 0 }, /* mt76 7922 */
+
+ /* end of table */
{ 0 }
};
@@ -6740,6 +6748,8 @@ pcie_flr(device_t dev, u_int max_delay, bool force)
if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR))
return (false);
+ if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_FLR))
+ return (false);
/*
* Disable busmastering to prevent generation of new
diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c
index f68b5b7e71ff..9768030995e7 100644
--- a/sys/dev/pci/pci_user.c
+++ b/sys/dev/pci/pci_user.c
@@ -79,6 +79,9 @@ struct pci_conf32 {
u_int8_t pc_revid; /* chip revision ID */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_int32_t pd_unit; /* device unit number */
+ int pd_numa_domain; /* device NUMA domain */
+ u_int32_t pc_reported_len;/* length of PCI data reported */
+ char pc_spare[64]; /* space for future fields */
};
struct pci_match_conf32 {
@@ -502,11 +505,58 @@ pci_conf_match_freebsd6_32(struct pci_match_conf_freebsd6_32 *matches, int num_m
#endif /* COMPAT_FREEBSD32 */
#endif /* !PRE7_COMPAT */
+#ifdef COMPAT_FREEBSD14
+struct pci_conf_freebsd14 {
+ struct pcisel pc_sel; /* domain+bus+slot+function */
+ u_int8_t pc_hdr; /* PCI header type */
+ u_int16_t pc_subvendor; /* card vendor ID */
+ u_int16_t pc_subdevice; /* card device ID, assigned by
+ card vendor */
+ u_int16_t pc_vendor; /* chip vendor ID */
+ u_int16_t pc_device; /* chip device ID, assigned by
+ chip vendor */
+ u_int8_t pc_class; /* chip PCI class */
+ u_int8_t pc_subclass; /* chip PCI subclass */
+ u_int8_t pc_progif; /* chip PCI programming interface */
+ u_int8_t pc_revid; /* chip revision ID */
+ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
+ u_long pd_unit; /* device unit number */
+};
+#define PCIOCGETCONF_FREEBSD14 _IOWR('p', 5, struct pci_conf_io)
+
+#ifdef COMPAT_FREEBSD32
+struct pci_conf_freebsd14_32 {
+ struct pcisel pc_sel; /* domain+bus+slot+function */
+ u_int8_t pc_hdr; /* PCI header type */
+ u_int16_t pc_subvendor; /* card vendor ID */
+ u_int16_t pc_subdevice; /* card device ID, assigned by
+ card vendor */
+ u_int16_t pc_vendor; /* chip vendor ID */
+ u_int16_t pc_device; /* chip device ID, assigned by
+ chip vendor */
+ u_int8_t pc_class; /* chip PCI class */
+ u_int8_t pc_subclass; /* chip PCI subclass */
+ u_int8_t pc_progif; /* chip PCI programming interface */
+ u_int8_t pc_revid; /* chip revision ID */
+ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
+ u_int32_t pd_unit; /* device unit number */
+};
+#define PCIOCGETCONF_FREEBSD14_32 \
+ _IOC_NEWTYPE(PCIOCGETCONF_FREEBSD14, struct pci_conf_io32)
+#endif /* COMPAT_FREEBSD32 */
+#endif /* COMPAT_FREEBSD14 */
+
union pci_conf_union {
struct pci_conf pc;
#ifdef COMPAT_FREEBSD32
struct pci_conf32 pc32;
#endif
+#ifdef COMPAT_FREEBSD14
+ struct pci_conf_freebsd14 pc14;
+#ifdef COMPAT_FREEBSD32
+ struct pci_conf_freebsd14_32 pc14_32;
+#endif
+#endif
#ifdef PRE7_COMPAT
struct pci_conf_freebsd6 pco;
#ifdef COMPAT_FREEBSD32
@@ -522,10 +572,16 @@ pci_conf_match(u_long cmd, struct pci_match_conf *matches, int num_matches,
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
return (pci_conf_match_native(
(struct pci_match_conf *)matches, num_matches, match_buf));
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
return (pci_conf_match32((struct pci_match_conf32 *)matches,
num_matches, match_buf));
#endif
@@ -645,9 +701,15 @@ pci_match_conf_size(u_long cmd)
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
return (sizeof(struct pci_match_conf));
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
return (sizeof(struct pci_match_conf32));
#endif
#ifdef PRE7_COMPAT
@@ -675,6 +737,14 @@ pci_conf_size(u_long cmd)
case PCIOCGETCONF32:
return (sizeof(struct pci_conf32));
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+ return (sizeof(struct pci_conf_freebsd14));
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+ return (sizeof(struct pci_conf_freebsd14_32));
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
return (sizeof(struct pci_conf_freebsd6));
@@ -698,6 +768,9 @@ pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd)
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#endif
@@ -706,6 +779,9 @@ pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd)
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6_32:
#endif
@@ -739,6 +815,9 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data,
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#endif
@@ -751,6 +830,9 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data,
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6_32:
#endif
@@ -781,8 +863,17 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
pcup->pc = *pcp;
return;
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+ memcpy(&pcup->pc14, pcp, sizeof(pcup->pc14));
+ return;
+#endif
+
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
pcup->pc32.pc_sel = pcp->pc_sel;
pcup->pc32.pc_hdr = pcp->pc_hdr;
pcup->pc32.pc_subvendor = pcp->pc_subvendor;
@@ -796,8 +887,13 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
strlcpy(pcup->pc32.pd_name, pcp->pd_name,
sizeof(pcup->pc32.pd_name));
pcup->pc32.pd_unit = (uint32_t)pcp->pd_unit;
+ if (cmd == PCIOCGETCONF32) {
+ pcup->pc32.pd_numa_domain = pcp->pd_numa_domain;
+ pcup->pc32.pc_reported_len =
+ (uint32_t)offsetof(struct pci_conf32, pc_spare);
+ }
return;
-#endif
+#endif /* COMPAT_FREEBSD32 */
#ifdef PRE7_COMPAT
#ifdef COMPAT_FREEBSD32
@@ -1024,7 +1120,7 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
struct pci_map *pm;
struct pci_bar_mmap *pbm;
size_t confsz, iolen;
- int error, ionum, i, num_patterns;
+ int domain, error, ionum, i, num_patterns;
union pci_conf_union pcu;
#ifdef PRE7_COMPAT
struct pci_io iodata;
@@ -1044,6 +1140,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#ifdef COMPAT_FREEBSD32
@@ -1069,6 +1171,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
+#endif
#ifdef PRE7_COMPAT
case PCIOCGETCONF_FREEBSD6:
#ifdef COMPAT_FREEBSD32
@@ -1201,6 +1309,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
dinfo->conf.pd_unit = 0;
}
+ if (dinfo->cfg.dev != NULL &&
+ bus_get_domain(dinfo->cfg.dev, &domain) == 0)
+ dinfo->conf.pd_numa_domain = domain;
+ else
+ dinfo->conf.pd_numa_domain = 0;
+
if (pattern_buf == NULL ||
pci_conf_match(cmd, pattern_buf, num_patterns,
&dinfo->conf) == 0) {
@@ -1217,6 +1331,9 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
break;
}
+ dinfo->conf.pc_reported_len =
+ offsetof(struct pci_conf, pc_spare);
+
pci_conf_for_copyout(&dinfo->conf, &pcu, cmd);
error = copyout(&pcu,
(caddr_t)cio->matches +
diff --git a/sys/dev/ppc/ppc.c b/sys/dev/ppc/ppc.c
index 9870379e2eba..de75f4747709 100644
--- a/sys/dev/ppc/ppc.c
+++ b/sys/dev/ppc/ppc.c
@@ -1389,7 +1389,7 @@ ppc_exec_microseq(device_t dev, struct ppb_microseq **p_msq)
/* let's suppose the next instr. is the same */
prefetch:
- for (;mi->opcode == MS_OP_RASSERT; INCR_PC)
+ for (; mi->opcode == MS_OP_RASSERT; INCR_PC)
w_reg(mi->arg[0].i, ppc, (char)mi->arg[1].i);
if (mi->opcode == MS_OP_DELAY) {
diff --git a/sys/dev/psci/smccc_trng.c b/sys/dev/psci/smccc_trng.c
index ab98837d3841..8a2e5508ef48 100644
--- a/sys/dev/psci/smccc_trng.c
+++ b/sys/dev/psci/smccc_trng.c
@@ -58,7 +58,7 @@ static device_attach_t trng_attach;
static unsigned trng_read(void *, unsigned);
-static struct random_source random_trng = {
+static const struct random_source random_trng = {
.rs_ident = "Arm SMCCC TRNG",
.rs_source = RANDOM_PURE_ARM_TRNG,
.rs_read = trng_read,
diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c
index e911a407cca9..436af76001da 100644
--- a/sys/dev/puc/pucdata.c
+++ b/sys/dev/puc/pucdata.c
@@ -64,6 +64,7 @@ static puc_config_f puc_config_quatech;
static puc_config_f puc_config_syba;
static puc_config_f puc_config_siig;
static puc_config_f puc_config_sunix;
+static puc_config_f puc_config_systembase;
static puc_config_f puc_config_timedia;
static puc_config_f puc_config_titan;
@@ -1705,6 +1706,23 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_4S, 0x10, 0, 8,
.config_function = puc_config_icbook
},
+
+ /*
+ * Systembase cards using SB16C1050 UARTs:
+ */
+ { 0x14a1, 0x0008, 0x14a1, 0x0008,
+ "Systembase SB16C1058",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_8S, 0x10, 0, 8,
+ .config_function = puc_config_systembase,
+ },
+ { 0x14a1, 0x0004, 0x14a1, 0x0004,
+ "Systembase SB16C1054",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x10, 0, 8,
+ .config_function = puc_config_systembase,
+ },
+
{ 0xffff, 0, 0xffff, 0, NULL, 0 }
};
@@ -2294,3 +2312,28 @@ puc_config_titan(struct puc_softc *sc __unused, enum puc_cfg_cmd cmd,
}
return (ENXIO);
}
+
+static int
+puc_config_systembase(struct puc_softc *sc __unused,
+ enum puc_cfg_cmd cmd, int port, intptr_t *res)
+{
+ struct puc_bar *bar;
+
+ switch (cmd) {
+ case PUC_CFG_SETUP:
+ bar = puc_get_bar(sc, 0x14);
+ if (bar == NULL)
+ return (ENXIO);
+
+ /*
+ * The Systembase SB16C1058 (and probably other devices
+ * based on the SB16C1050 UART core) require poking a
+ * register in the *other* RID to turn on interrupts.
+ */
+ bus_write_1(bar->b_res, /* OPT_IMRREG0 */ 0xc, 0xff);
+ return (0);
+ default:
+ break;
+ }
+ return (ENXIO);
+}
diff --git a/sys/dev/qat/include/common/adf_accel_devices.h b/sys/dev/qat/include/common/adf_accel_devices.h
index c09aee8ea4bd..eeffc6a9132c 100644
--- a/sys/dev/qat/include/common/adf_accel_devices.h
+++ b/sys/dev/qat/include/common/adf_accel_devices.h
@@ -39,12 +39,16 @@
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
+#define ADF_402XX_PCI_DEVICE_ID 0x4944
+#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
#define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); })
static inline bool
IS_QAT_GEN4(const unsigned int id)
{
return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID ||
+ id == ADF_402XX_PCI_DEVICE_ID ||
+ id == ADF_402XXIOV_PCI_DEVICE_ID ||
id == ADF_4XXXIOV_PCI_DEVICE_ID ||
id == ADF_401XXIOV_PCI_DEVICE_ID);
}
diff --git a/sys/dev/qat/qat_api/include/icp_sal_versions.h b/sys/dev/qat/qat_api/include/icp_sal_versions.h
index 03bcef4fcbbb..0eb227ade09c 100644
--- a/sys/dev/qat/qat_api/include/icp_sal_versions.h
+++ b/sys/dev/qat/qat_api/include/icp_sal_versions.h
@@ -26,7 +26,7 @@
/* Part name and number of the accelerator device */
#define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3
-#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 15
+#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 16
#define SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER 0
/**
diff --git a/sys/dev/qat/qat_common/adf_gen4_timer.c b/sys/dev/qat/qat_common/adf_gen4_timer.c
index 96b65cdff181..2c74d09418e5 100644
--- a/sys/dev/qat/qat_common/adf_gen4_timer.c
+++ b/sys/dev/qat/qat_common/adf_gen4_timer.c
@@ -57,7 +57,7 @@ end:
static void
timer_handler(struct timer_list *tl)
{
- struct adf_int_timer *int_timer = from_timer(int_timer, tl, timer);
+ struct adf_int_timer *int_timer = timer_container_of(int_timer, tl, timer);
struct adf_accel_dev *accel_dev = int_timer->accel_dev;
struct adf_hb_timer_data *hb_timer_data = NULL;
u64 timeout_val = adf_get_next_timeout(int_timer->timeout_val);
diff --git a/sys/dev/qat/qat_common/qat_uclo.c b/sys/dev/qat/qat_common/qat_uclo.c
index 54e8e8eb7421..b17020286d24 100644
--- a/sys/dev/qat/qat_common/qat_uclo.c
+++ b/sys/dev/qat/qat_common/qat_uclo.c
@@ -892,6 +892,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C4XXX_DEV_TYPE;
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
index d730efd5952b..49e1e1859e78 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
@@ -536,8 +536,8 @@ adf_exit_accel_units(struct adf_accel_dev *accel_dev)
}
static const char *
-get_obj_name(struct adf_accel_dev *accel_dev,
- enum adf_accel_unit_services service)
+get_obj_name_4xxx(struct adf_accel_dev *accel_dev,
+ enum adf_accel_unit_services service)
{
switch (service) {
case ADF_ACCEL_ASYM:
@@ -553,6 +553,24 @@ get_obj_name(struct adf_accel_dev *accel_dev,
}
}
+static const char *
+get_obj_name_402xx(struct adf_accel_dev *accel_dev,
+ enum adf_accel_unit_services service)
+{
+ switch (service) {
+ case ADF_ACCEL_ASYM:
+ return ADF_402XX_ASYM_OBJ;
+ case ADF_ACCEL_CRYPTO:
+ return ADF_402XX_SYM_OBJ;
+ case ADF_ACCEL_COMPRESSION:
+ return ADF_402XX_DC_OBJ;
+ case ADF_ACCEL_ADMIN:
+ return ADF_402XX_ADMIN_OBJ;
+ default:
+ return NULL;
+ }
+}
+
static uint32_t
get_objs_num(struct adf_accel_dev *accel_dev)
{
@@ -982,8 +1000,23 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
- hw_data->fw_name = ADF_4XXX_FW;
- hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ switch (id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_402XX_FW;
+ hw_data->fw_mmp_name = ADF_402XX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
+ break;
+ case ADF_401XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
+ break;
+
+ default:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
+ }
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_4xxx_send_admin_init;
@@ -1002,7 +1035,13 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->get_objs_num = get_objs_num;
- hw_data->get_obj_name = get_obj_name;
+ switch (id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->get_obj_name = get_obj_name_402xx;
+ break;
+ default:
+ hw_data->get_obj_name = get_obj_name_4xxx;
+ }
hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
hw_data->get_service_type = adf_4xxx_get_service_type;
hw_data->set_msix_rttable = set_msix_default_rttable;
@@ -1022,15 +1061,6 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->query_storage_cap = 1;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
- switch (id) {
- case ADF_401XX_PCI_DEVICE_ID:
- hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
- break;
- case ADF_4XXX_PCI_DEVICE_ID:
- default:
- hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
- }
-
adf_gen4_init_hw_csr_info(&hw_data->csr_info);
adf_gen4_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
}
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
index c35ebbcadcd7..fa7249dca596 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
@@ -87,6 +87,12 @@
#define ADF_4XXX_SYM_OBJ "qat_4xxx_sym.bin"
#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
+#define ADF_402XX_FW "qat_402xx_fw"
+#define ADF_402XX_MMP "qat_402xx_mmp_fw"
+#define ADF_402XX_DC_OBJ "qat_402xx_dc.bin"
+#define ADF_402XX_SYM_OBJ "qat_402xx_sym.bin"
+#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
+#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
/* Only 3 types of images can be loaded including the admin image */
#define ADF_4XXX_MAX_OBJ 3
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
index cb534dd03b86..f9ad39fa45f0 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
@@ -22,12 +22,14 @@ static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx");
PCI_VENDOR_ID_INTEL, device_id \
}
-static const struct pci_device_id adf_pci_tbl[] =
- { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
- ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
- {
- 0,
- } };
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_402XX_PCI_DEVICE_ID),
+ {
+ 0,
+ }
+};
static int
adf_probe(device_t dev)
@@ -135,6 +137,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
switch (pci_get_device(accel_pci_dev->pci_dev)) {
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
adf_clean_hw_data_4xxx(accel_dev->hw_device);
break;
default:
diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
index 2bbccb4d6b17..dbe40835ccbf 100644
--- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
@@ -22,12 +22,14 @@ static MALLOC_DEFINE(M_QAT_4XXXVF, "qat_4xxxvf", "qat_4xxxvf");
PCI_VENDOR_ID_INTEL, device_id \
}
-static const struct pci_device_id adf_pci_tbl[] =
- { ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID),
- ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID),
- {
- 0,
- } };
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_402XXIOV_PCI_DEVICE_ID),
+ {
+ 0,
+ }
+};
static int
adf_probe(device_t dev)
@@ -76,6 +78,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
switch (pci_get_device(accel_pci_dev->pci_dev)) {
case ADF_4XXXIOV_PCI_DEVICE_ID:
case ADF_401XXIOV_PCI_DEVICE_ID:
+ case ADF_402XXIOV_PCI_DEVICE_ID:
adf_clean_hw_data_4xxxiov(accel_dev->hw_device);
break;
default:
diff --git a/sys/dev/qcom_rnd/qcom_rnd.c b/sys/dev/qcom_rnd/qcom_rnd.c
index fdd0b553523e..a5ece7e00f28 100644
--- a/sys/dev/qcom_rnd/qcom_rnd.c
+++ b/sys/dev/qcom_rnd/qcom_rnd.c
@@ -63,7 +63,7 @@ static int qcom_rnd_detach(device_t);
static int qcom_rnd_harvest(struct qcom_rnd_softc *, void *, size_t *);
static unsigned qcom_rnd_read(void *, unsigned);
-static struct random_source random_qcom_rnd = {
+static const struct random_source random_qcom_rnd = {
.rs_ident = "Qualcomm Entropy Adapter",
.rs_source = RANDOM_PURE_QUALCOMM,
.rs_read = qcom_rnd_read,
diff --git a/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c b/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
index 2d390cd449af..50f54b896748 100644
--- a/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
+++ b/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
@@ -346,13 +346,14 @@ qcom_tlmm_ipq4018_attach(device_t dev)
fdt_pinctrl_register(dev, NULL);
fdt_pinctrl_configure_by_name(dev, "default");
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "%s: failed to attach bus\n", __func__);
qcom_tlmm_ipq4018_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/qlnx/qlnxe/ecore_dev.c b/sys/dev/qlnx/qlnxe/ecore_dev.c
index 6187ecdbc446..389a95a4164c 100644
--- a/sys/dev/qlnx/qlnxe/ecore_dev.c
+++ b/sys/dev/qlnx/qlnxe/ecore_dev.c
@@ -5268,7 +5268,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
+ "Read default link: Speed %u Mb/sec, Adv. Speeds 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%u usec]\n",
link->speed.forced_speed, link->speed.advertised_speeds,
link->speed.autoneg, link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer);
@@ -6860,7 +6860,7 @@ int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
p_hwfn->qm_info.pf_rl);
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configured MAX bandwidth to be %08x Mb/sec\n",
+ "Configured MAX bandwidth to be %u Mb/sec\n",
p_link->speed);
return rc;
@@ -6918,7 +6918,7 @@ int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configured MIN bandwidth to be %d Mb/sec\n",
+ "Configured MIN bandwidth to be %u Mb/sec\n",
p_link->min_pf_rate);
return rc;
diff --git a/sys/dev/qlnx/qlnxe/ecore_mcp.c b/sys/dev/qlnx/qlnxe/ecore_mcp.c
index ab14b1eb5186..6d1e5fe24d06 100644
--- a/sys/dev/qlnx/qlnxe/ecore_mcp.c
+++ b/sys/dev/qlnx/qlnxe/ecore_mcp.c
@@ -1638,7 +1638,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
+ "Configuring Link: Speed %u Mb/sec, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
phy_cfg.loopback_mode);
else
diff --git a/sys/dev/qlnx/qlnxe/qlnx_def.h b/sys/dev/qlnx/qlnxe/qlnx_def.h
index 4342bba89587..796845f3f8c6 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_def.h
+++ b/sys/dev/qlnx/qlnxe/qlnx_def.h
@@ -696,22 +696,6 @@ extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
* Some OS specific stuff
*/
-#if (defined IFM_100G_SR4)
-#define QLNX_IFM_100G_SR4 IFM_100G_SR4
-#define QLNX_IFM_100G_LR4 IFM_100G_LR4
-#define QLNX_IFM_100G_CR4 IFM_100G_CR4
-#else
-#define QLNX_IFM_100G_SR4 IFM_UNKNOWN
-#define QLNX_IFM_100G_LR4 IFM_UNKNOWN
-#endif /* #if (defined IFM_100G_SR4) */
-
-#if (defined IFM_25G_SR)
-#define QLNX_IFM_25G_SR IFM_25G_SR
-#define QLNX_IFM_25G_CR IFM_25G_CR
-#else
-#define QLNX_IFM_25G_SR IFM_UNKNOWN
-#define QLNX_IFM_25G_CR IFM_UNKNOWN
-#endif /* #if (defined IFM_25G_SR) */
#define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
#define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
diff --git a/sys/dev/qlnx/qlnxe/qlnx_os.c b/sys/dev/qlnx/qlnxe/qlnx_os.c
index 4ad190374f87..9963f472c615 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_os.c
+++ b/sys/dev/qlnx/qlnxe/qlnx_os.c
@@ -2375,18 +2375,15 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
} else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
- ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
- ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_25G_SR), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_25G_CR), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_LR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_SR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_CR4), 0, NULL);
}
ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
@@ -2724,7 +2721,9 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
- QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
+ case SIOCGIFXMEDIA:
+ QL_DPRINT4(ha,
+ "SIOCSIFMEDIA/SIOCGIFMEDIA/SIOCGIFXMEDIA (0x%lx)\n", cmd);
ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
break;
@@ -3808,11 +3807,11 @@ qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
case MEDIA_MODULE_FIBER:
case MEDIA_UNSPECIFIED:
if (if_link->speed == (100 * 1000))
- ifm_type = QLNX_IFM_100G_SR4;
+ ifm_type = IFM_100G_SR4;
else if (if_link->speed == (40 * 1000))
ifm_type = IFM_40G_SR4;
else if (if_link->speed == (25 * 1000))
- ifm_type = QLNX_IFM_25G_SR;
+ ifm_type = IFM_25G_SR;
else if (if_link->speed == (10 * 1000))
ifm_type = (IFM_10G_LR | IFM_10G_SR);
else if (if_link->speed == (1 * 1000))
@@ -3822,11 +3821,11 @@ qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
case MEDIA_DA_TWINAX:
if (if_link->speed == (100 * 1000))
- ifm_type = QLNX_IFM_100G_CR4;
+ ifm_type = IFM_100G_CR4;
else if (if_link->speed == (40 * 1000))
ifm_type = IFM_40G_CR4;
else if (if_link->speed == (25 * 1000))
- ifm_type = QLNX_IFM_25G_CR;
+ ifm_type = IFM_25G_CR;
else if (if_link->speed == (10 * 1000))
ifm_type = IFM_10G_TWINAX;
diff --git a/sys/dev/ral/rt2560.c b/sys/dev/ral/rt2560.c
index 09b01ea55be9..7feb324eb21d 100644
--- a/sys/dev/ral/rt2560.c
+++ b/sys/dev/ral/rt2560.c
@@ -281,6 +281,8 @@ rt2560_attach(device_t dev, int id)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2560_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1516,6 +1518,8 @@ rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0,
wh = mtod(m0, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/ral/rt2661.c b/sys/dev/ral/rt2661.c
index 38cd99d899ed..c9c86d4f089a 100644
--- a/sys/dev/ral/rt2661.c
+++ b/sys/dev/ral/rt2661.c
@@ -282,6 +282,8 @@ rt2661_attach(device_t dev, int id)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2661_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1284,7 +1286,7 @@ rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0,
rate = ni->ni_txparms->mgmtrate;
wh = mtod(m0, struct ieee80211_frame *);
-
+ ieee80211_output_seqno_assign(ni, -1, m0);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/ral/rt2860.c b/sys/dev/ral/rt2860.c
index 1449df683a93..76fe4652839d 100644
--- a/sys/dev/ral/rt2860.c
+++ b/sys/dev/ral/rt2860.c
@@ -323,6 +323,8 @@ rt2860_attach(device_t dev, int id)
| IEEE80211_C_WME /* 802.11e */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2860_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1471,6 +1473,7 @@ rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
diff --git a/sys/dev/random/armv8rng.c b/sys/dev/random/armv8rng.c
index 61698bfff820..524d80317681 100644
--- a/sys/dev/random/armv8rng.c
+++ b/sys/dev/random/armv8rng.c
@@ -44,7 +44,7 @@
static u_int random_rndr_read(void *, u_int);
static bool has_rndr;
-static struct random_source random_armv8_rndr = {
+static const struct random_source random_armv8_rndr = {
.rs_ident = "Armv8 rndr RNG",
.rs_source = RANDOM_PURE_ARMV8,
.rs_read = random_rndr_read,
diff --git a/sys/dev/random/darn.c b/sys/dev/random/darn.c
index a66754e095fb..9bb4991df82f 100644
--- a/sys/dev/random/darn.c
+++ b/sys/dev/random/darn.c
@@ -56,7 +56,7 @@
static u_int random_darn_read(void *, u_int);
-static struct random_source random_darn = {
+static const struct random_source random_darn = {
.rs_ident = "PowerISA DARN random number generator",
.rs_source = RANDOM_PURE_DARN,
.rs_read = random_darn_read
diff --git a/sys/dev/random/fenestrasX/fx_pool.c b/sys/dev/random/fenestrasX/fx_pool.c
index d2e6f0db71ee..f4ad1e295d54 100644
--- a/sys/dev/random/fenestrasX/fx_pool.c
+++ b/sys/dev/random/fenestrasX/fx_pool.c
@@ -164,6 +164,9 @@ static const struct fxrng_ent_char {
[RANDOM_CALLOUT] = {
.entc_cls = &fxrng_lo_push,
},
+ [RANDOM_RANDOMDEV] = {
+ .entc_cls = &fxrng_lo_push,
+ },
[RANDOM_PURE_OCTEON] = {
.entc_cls = &fxrng_hi_push, /* Could be made pull. */
},
diff --git a/sys/dev/random/ivy.c b/sys/dev/random/ivy.c
index 05474d977276..fa1e4831f1b9 100644
--- a/sys/dev/random/ivy.c
+++ b/sys/dev/random/ivy.c
@@ -51,7 +51,7 @@
static bool has_rdrand, has_rdseed;
static u_int random_ivy_read(void *, u_int);
-static struct random_source random_ivy = {
+static const struct random_source random_ivy = {
.rs_ident = "Intel Secure Key RNG",
.rs_source = RANDOM_PURE_RDRAND,
.rs_read = random_ivy_read
diff --git a/sys/dev/random/nehemiah.c b/sys/dev/random/nehemiah.c
index f76071290b8f..56f144169dae 100644
--- a/sys/dev/random/nehemiah.c
+++ b/sys/dev/random/nehemiah.c
@@ -44,7 +44,7 @@
static u_int random_nehemiah_read(void *, u_int);
-static struct random_source random_nehemiah = {
+static const struct random_source random_nehemiah = {
.rs_ident = "VIA Nehemiah Padlock RNG",
.rs_source = RANDOM_PURE_NEHEMIAH,
.rs_read = random_nehemiah_read
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index c7762967c4fb..2d7af254c52c 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -103,14 +103,16 @@ static const char *random_source_descr[ENTROPYSOURCE];
volatile int random_kthread_control;
-/* Allow the sysadmin to select the broad category of
- * entropy types to harvest.
+/*
+ * Allow the sysadmin to select the broad category of entropy types to harvest.
+ *
+ * Updates are synchronized by the harvest mutex.
*/
__read_frequently u_int hc_source_mask;
struct random_sources {
CK_LIST_ENTRY(random_sources) rrs_entries;
- struct random_source *rrs_source;
+ const struct random_source *rrs_source;
};
static CK_LIST_HEAD(sources_head, random_sources) source_list =
@@ -278,8 +280,15 @@ random_sources_feed(void)
epoch_enter_preempt(rs_epoch, &et);
CK_LIST_FOREACH(rrs, &source_list, rrs_entries) {
for (i = 0; i < npools; i++) {
+ if (rrs->rrs_source->rs_read == NULL) {
+ /* Source pushes entropy asynchronously. */
+ continue;
+ }
n = rrs->rrs_source->rs_read(entropy, sizeof(entropy));
- KASSERT((n <= sizeof(entropy)), ("%s: rs_read returned too much data (%u > %zu)", __func__, n, sizeof(entropy)));
+ KASSERT((n <= sizeof(entropy)),
+ ("%s: rs_read returned too much data (%u > %zu)",
+ __func__, n, sizeof(entropy)));
+
/*
* Sometimes the HW entropy source doesn't have anything
* ready for us. This isn't necessarily untrustworthy.
@@ -334,7 +343,17 @@ copy_event(uint32_t dst[static HARVESTSIZE + 1],
{
memset(dst, 0, sizeof(uint32_t) * (HARVESTSIZE + 1));
memcpy(dst, event->he_entropy, event->he_size);
- dst[HARVESTSIZE] = event->he_somecounter;
+ if (event->he_source <= RANDOM_ENVIRONMENTAL_END) {
+ /*
+ * For pure entropy sources the timestamp counter is generally
+ * quite determinstic since samples are taken at regular
+ * intervals, so does not contribute much to the entropy. To
+ * make health tests more effective, exclude it from the sample,
+ * since it might otherwise defeat the health tests in a
+ * scenario where the source is stuck.
+ */
+ dst[HARVESTSIZE] = event->he_somecounter;
+ }
}
static void
@@ -464,11 +483,12 @@ SYSCTL_BOOL(_kern_random, OID_AUTO, nist_healthtest_enabled,
"Enable NIST SP 800-90B health tests for noise sources");
static void
-random_healthtest_init(enum random_entropy_source source)
+random_healthtest_init(enum random_entropy_source source, int min_entropy)
{
struct health_test_softc *ht;
ht = &healthtest[source];
+ memset(ht, 0, sizeof(*ht));
KASSERT(ht->ht_state == INIT,
("%s: health test state is %d for source %d",
__func__, ht->ht_state, source));
@@ -485,20 +505,62 @@ random_healthtest_init(enum random_entropy_source source)
}
/*
- * Set cutoff values for the two tests, assuming that each sample has
- * min-entropy of 1 bit and allowing for an error rate of 1 in 2^{34}.
- * With a sample rate of RANDOM_KTHREAD_HZ, we expect to see an false
- * positive once in ~54.5 years.
+ * Set cutoff values for the two tests, given a min-entropy estimate for
+ * the source and allowing for an error rate of 1 in 2^{34}. With a
+ * min-entropy estimate of 1 bit and a sample rate of RANDOM_KTHREAD_HZ,
+ * we expect to see an false positive once in ~54.5 years.
*
* The RCT limit comes from the formula in section 4.4.1.
*
- * The APT cutoff is calculated using the formula in section 4.4.2
- * footnote 10 with the window size changed from 512 to 511, since the
- * test as written counts the number of samples equal to the first
- * sample in the window, and thus tests W-1 samples.
+ * The APT cutoffs are calculated using the formula in section 4.4.2
+ * footnote 10 with the number of Bernoulli trials changed from W to
+ * W-1, since the test as written counts the number of samples equal to
+ * the first sample in the window, and thus tests W-1 samples. We
+ * provide cutoffs for estimates up to sizeof(uint32_t)*HARVESTSIZE*8
+ * bits.
*/
- ht->ht_rct_limit = 35;
- ht->ht_apt_cutoff = 330;
+ const int apt_cutoffs[] = {
+ [1] = 329,
+ [2] = 195,
+ [3] = 118,
+ [4] = 73,
+ [5] = 48,
+ [6] = 33,
+ [7] = 23,
+ [8] = 17,
+ [9] = 13,
+ [10] = 11,
+ [11] = 9,
+ [12] = 8,
+ [13] = 7,
+ [14] = 6,
+ [15] = 5,
+ [16] = 5,
+ [17 ... 19] = 4,
+ [20 ... 25] = 3,
+ [26 ... 42] = 2,
+ [43 ... 64] = 1,
+ };
+ const int error_rate = 34;
+
+ if (min_entropy == 0) {
+ /*
+ * For environmental sources, the main source of entropy is the
+ * associated timecounter value. Since these sources can be
+ * influenced by unprivileged users, we conservatively use a
+ * min-entropy estimate of 1 bit per sample. For "pure"
+ * sources, we assume 8 bits per sample, as such sources provide
+ * a variable amount of data per read and in particular might
+ * only provide a single byte at a time.
+ */
+ min_entropy = source >= RANDOM_PURE_START ? 8 : 1;
+ } else if (min_entropy < 0 || min_entropy >= nitems(apt_cutoffs)) {
+ panic("invalid min_entropy %d for %s", min_entropy,
+ random_source_descr[source]);
+ }
+
+ ht->ht_rct_limit = 1 + howmany(error_rate, min_entropy);
+ ht->ht_apt_cutoff = apt_cutoffs[min_entropy];
}
static int
@@ -533,9 +595,9 @@ random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS)
_RANDOM_HARVEST_ETHER_OFF | _RANDOM_HARVEST_UMA_OFF;
int error;
- u_int value, orig_value;
+ u_int value;
- orig_value = value = hc_source_mask;
+ value = atomic_load_int(&hc_source_mask);
error = sysctl_handle_int(oidp, &value, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
@@ -546,12 +608,14 @@ random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS)
/*
* Disallow userspace modification of pure entropy sources.
*/
+ RANDOM_HARVEST_LOCK();
hc_source_mask = (value & ~user_immutable_mask) |
- (orig_value & user_immutable_mask);
+ (hc_source_mask & user_immutable_mask);
+ RANDOM_HARVEST_UNLOCK();
return (0);
}
SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask,
- CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
random_check_uint_harvestmask, "IU",
"Entropy harvesting mask");
@@ -563,9 +627,16 @@ random_print_harvestmask(SYSCTL_HANDLER_ARGS)
error = sysctl_wire_old_buffer(req, 0);
if (error == 0) {
+ u_int mask;
+
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
- for (i = ENTROPYSOURCE - 1; i >= 0; i--)
- sbuf_cat(&sbuf, (hc_source_mask & (1 << i)) ? "1" : "0");
+ mask = atomic_load_int(&hc_source_mask);
+ for (i = ENTROPYSOURCE - 1; i >= 0; i--) {
+ bool present;
+
+ present = (mask & (1u << i)) != 0;
+ sbuf_cat(&sbuf, present ? "1" : "0");
+ }
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
}
@@ -619,16 +690,21 @@ random_print_harvestmask_symbolic(SYSCTL_HANDLER_ARGS)
first = true;
error = sysctl_wire_old_buffer(req, 0);
if (error == 0) {
+ u_int mask;
+
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
+ mask = atomic_load_int(&hc_source_mask);
for (i = ENTROPYSOURCE - 1; i >= 0; i--) {
- if (i >= RANDOM_PURE_START &&
- (hc_source_mask & (1 << i)) == 0)
+ bool present;
+
+ present = (mask & (1u << i)) != 0;
+ if (i >= RANDOM_PURE_START && !present)
continue;
if (!first)
sbuf_cat(&sbuf, ",");
- sbuf_cat(&sbuf, !(hc_source_mask & (1 << i)) ? "[" : "");
+ sbuf_cat(&sbuf, !present ? "[" : "");
sbuf_cat(&sbuf, random_source_descr[i]);
- sbuf_cat(&sbuf, !(hc_source_mask & (1 << i)) ? "]" : "");
+ sbuf_cat(&sbuf, !present ? "]" : "");
first = false;
}
error = sbuf_finish(&sbuf);
@@ -652,8 +728,8 @@ random_harvestq_init(void *unused __unused)
RANDOM_HARVEST_INIT_LOCK();
harvest_context.hc_active_buf = 0;
- for (int i = 0; i < ENTROPYSOURCE; i++)
- random_healthtest_init(i);
+ for (int i = RANDOM_START; i <= RANDOM_ENVIRONMENTAL_END; i++)
+ random_healthtest_init(i, 0);
}
SYSINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_init, NULL);
@@ -835,21 +911,7 @@ random_harvest_direct_(const void *entropy, u_int size, enum random_entropy_sour
}
void
-random_harvest_register_source(enum random_entropy_source source)
-{
-
- hc_source_mask |= (1 << source);
-}
-
-void
-random_harvest_deregister_source(enum random_entropy_source source)
-{
-
- hc_source_mask &= ~(1 << source);
-}
-
-void
-random_source_register(struct random_source *rsource)
+random_source_register(const struct random_source *rsource)
{
struct random_sources *rrs;
@@ -858,25 +920,25 @@ random_source_register(struct random_source *rsource)
rrs = malloc(sizeof(*rrs), M_ENTROPY, M_WAITOK);
rrs->rrs_source = rsource;
- random_harvest_register_source(rsource->rs_source);
-
printf("random: registering fast source %s\n", rsource->rs_ident);
+ random_healthtest_init(rsource->rs_source, rsource->rs_min_entropy);
+
RANDOM_HARVEST_LOCK();
+ hc_source_mask |= (1 << rsource->rs_source);
CK_LIST_INSERT_HEAD(&source_list, rrs, rrs_entries);
RANDOM_HARVEST_UNLOCK();
}
void
-random_source_deregister(struct random_source *rsource)
+random_source_deregister(const struct random_source *rsource)
{
struct random_sources *rrs = NULL;
KASSERT(rsource != NULL, ("invalid input to %s", __func__));
- random_harvest_deregister_source(rsource->rs_source);
-
RANDOM_HARVEST_LOCK();
+ hc_source_mask &= ~(1 << rsource->rs_source);
CK_LIST_FOREACH(rrs, &source_list, rrs_entries)
if (rrs->rrs_source == rsource) {
CK_LIST_REMOVE(rrs, rrs_entries);
diff --git a/sys/dev/random/randomdev.h b/sys/dev/random/randomdev.h
index e1c9ac7b680d..a6ca66c7d92e 100644
--- a/sys/dev/random/randomdev.h
+++ b/sys/dev/random/randomdev.h
@@ -52,7 +52,9 @@ random_check_uint_##name(SYSCTL_HANDLER_ARGS) \
}
#endif /* SYSCTL_DECL */
+#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_ENTROPY);
+#endif
extern bool random_bypass_before_seeding;
extern bool read_random_bypassed_before_seeding;
@@ -101,10 +103,11 @@ struct random_source {
const char *rs_ident;
enum random_entropy_source rs_source;
random_source_read_t *rs_read;
+ int rs_min_entropy;
};
-void random_source_register(struct random_source *);
-void random_source_deregister(struct random_source *);
+void random_source_register(const struct random_source *);
+void random_source_deregister(const struct random_source *);
#endif /* _KERNEL */
diff --git a/sys/dev/rccgpio/rccgpio.c b/sys/dev/rccgpio/rccgpio.c
index b2b775b879ad..dafd0b511fa9 100644
--- a/sys/dev/rccgpio/rccgpio.c
+++ b/sys/dev/rccgpio/rccgpio.c
@@ -308,7 +308,7 @@ rcc_gpio_attach(device_t dev)
RCC_WRITE(sc, RCC_GPIO_GP_LVL, sc->sc_output);
/* Attach the gpiobus. */
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
bus_release_resource(dev, SYS_RES_IOPORT, sc->sc_io_rid,
sc->sc_io_res);
@@ -316,6 +316,7 @@ rcc_gpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/re/if_re.c b/sys/dev/re/if_re.c
index 091ab2db72ec..d56c975a43d2 100644
--- a/sys/dev/re/if_re.c
+++ b/sys/dev/re/if_re.c
@@ -353,6 +353,8 @@ static driver_t re_driver = {
DRIVER_MODULE(re, pci, re_driver, 0, 0);
DRIVER_MODULE(miibus, re, miibus_driver, 0, 0);
+MODULE_PNP_INFO("U16:vendor;U16:device;U32:#;D:#", pci, re, re_devs,
+ nitems(re_devs) - 1);
#define EE_SET(x) \
CSR_WRITE_1(sc, RL_EECMD, \
@@ -3558,6 +3560,7 @@ re_ioctl(if_t ifp, u_long command, caddr_t data)
static void
re_watchdog(struct rl_softc *sc)
{
+ struct epoch_tracker et;
if_t ifp;
RL_LOCK_ASSERT(sc);
@@ -3578,7 +3581,9 @@ re_watchdog(struct rl_softc *sc)
if_printf(ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ NET_EPOCH_ENTER(et);
re_rxeof(sc, NULL);
+ NET_EPOCH_EXIT(et);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
if (!if_sendq_empty(ifp))
diff --git a/sys/dev/rtwn/if_rtwn.c b/sys/dev/rtwn/if_rtwn.c
index 7a547e13cafa..c5889937fb08 100644
--- a/sys/dev/rtwn/if_rtwn.c
+++ b/sys/dev/rtwn/if_rtwn.c
@@ -268,6 +268,14 @@ rtwn_attach(struct rtwn_softc *sc)
ic->ic_flags_ext |= IEEE80211_FEXT_WATCHDOG;
#endif
+ /* Enable seqno offload */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
+#ifdef RTWN_WITHOUT_UCODE
+ /* Don't originate NULL data frames - let firmware do this */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+#endif
+
/* Adjust capabilities. */
rtwn_adj_devcaps(sc);
diff --git a/sys/dev/rtwn/if_rtwn_tx.c b/sys/dev/rtwn/if_rtwn_tx.c
index 2c9c246dfbb4..fa7f35f2de83 100644
--- a/sys/dev/rtwn/if_rtwn_tx.c
+++ b/sys/dev/rtwn/if_rtwn_tx.c
@@ -183,6 +183,10 @@ rtwn_tx_data(struct rtwn_softc *sc, struct ieee80211_node *ni,
}
}
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
cipher = IEEE80211_CIPHER_NONE;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
@@ -229,6 +233,10 @@ rtwn_tx_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
uint8_t type;
u_int cipher;
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
cipher = IEEE80211_CIPHER_NONE;
if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
diff --git a/sys/dev/rtwn/rtl8192c/r92c_tx.c b/sys/dev/rtwn/rtl8192c/r92c_tx.c
index 6b013de0c536..ba2f60bd9295 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_tx.c
+++ b/sys/dev/rtwn/rtl8192c/r92c_tx.c
@@ -452,11 +452,10 @@ r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
} else {
uint16_t seqno;
- if (m->m_flags & M_AMPDU_MPDU) {
- seqno = ni->ni_txseqs[tid] % IEEE80211_SEQ_RANGE;
- ni->ni_txseqs[tid]++;
- } else
- seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
+ if (m->m_flags & M_AMPDU_MPDU)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
+ seqno = M_SEQNO_GET(m);
/* Set sequence number. */
txd->txdseq = htole16(seqno);
@@ -511,7 +510,7 @@ r92c_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
rtwn_r92c_tx_setup_hwseq(sc, txd);
} else {
/* Set sequence number. */
- txd->txdseq |= htole16(M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE);
+ txd->txdseq |= htole16(M_SEQNO_GET(m));
}
}
diff --git a/sys/dev/rtwn/rtl8812a/r12a_tx.c b/sys/dev/rtwn/rtl8812a/r12a_tx.c
index acb238316559..6a7af0a9b674 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_tx.c
+++ b/sys/dev/rtwn/rtl8812a/r12a_tx.c
@@ -101,12 +101,12 @@ r12a_tx_set_vht_bw(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
prim_chan = r12a_get_primary_channel(sc, ni->ni_chan);
- if (ieee80211_vht_check_tx_bw(ni, IEEE80211_STA_RX_BW_80)) {
+ if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_80)) {
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
R12A_TXDW5_DATA_BW80));
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
prim_chan));
- } else if (ieee80211_vht_check_tx_bw(ni, IEEE80211_STA_RX_BW_40)) {
+ } else if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_40)) {
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
R12A_TXDW5_DATA_BW40));
txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
@@ -433,12 +433,9 @@ r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
} else {
uint16_t seqno;
- if (m->m_flags & M_AMPDU_MPDU) {
- seqno = ni->ni_txseqs[tid];
- ni->ni_txseqs[tid]++;
- } else
- seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
-
+ if (m->m_flags & M_AMPDU_MPDU)
+ ieee80211_output_seqno_assign(ni, -1, m);
+ seqno = M_SEQNO_GET(m);
/* Set sequence number. */
txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, seqno));
}
@@ -493,8 +490,7 @@ r12a_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, uvp->id));
} else {
/* Set sequence number. */
- txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ,
- M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE));
+ txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, M_SEQNO_GET(m)));
}
}
diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c
index f000d9ce9db3..88dcf45dd08a 100644
--- a/sys/dev/smartpqi/smartpqi_event.c
+++ b/sys/dev/smartpqi/smartpqi_event.c
@@ -115,7 +115,7 @@ pqisrc_ack_all_events(void *arg1)
pending_event = &softs->pending_events[0];
- for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
+ for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
if (pending_event->pending == true) {
pending_event->pending = false;
pqisrc_acknowledge_event(softs, pending_event);
@@ -417,7 +417,7 @@ pqisrc_report_event_config(pqisrc_softstate_t *softs)
softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors,
PQI_MAX_EVENT_DESCRIPTORS) ;
- for (i=0; i < softs->event_config.num_event_descriptors ;i++){
+ for (i = 0; i < softs->event_config.num_event_descriptors; i++) {
softs->event_config.descriptors[i].event_type =
event_config_p->descriptors[i].event_type;
}
@@ -477,7 +477,7 @@ pqisrc_set_event_config(pqisrc_softstate_t *softs)
event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors;
- for (i=0; i < softs->event_config.num_event_descriptors ; i++){
+ for (i = 0; i < softs->event_config.num_event_descriptors; i++) {
event_config_p->descriptors[i].event_type =
softs->event_config.descriptors[i].event_type;
if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1)
diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c
index 2e80b01b5436..f05c951cd4f9 100644
--- a/sys/dev/smartpqi/smartpqi_queue.c
+++ b/sys/dev/smartpqi/smartpqi_queue.c
@@ -700,7 +700,7 @@ pqisrc_create_op_obq(pqisrc_softstate_t *softs,
} else {
int i = 0;
DBG_WARN("Error Status Descriptors\n");
- for(i = 0; i < 4;i++)
+ for (i = 0; i < 4; i++)
DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]);
}
@@ -743,7 +743,7 @@ pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
} else {
int i = 0;
DBG_WARN("Error Status Decsriptors\n");
- for(i = 0; i < 4;i++)
+ for (i = 0; i < 4; i++)
DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]);
}
diff --git a/sys/dev/sound/pci/hda/hdaa.c b/sys/dev/sound/pci/hda/hdaa.c
index 1e486b01b168..5dbb5c4f4453 100644
--- a/sys/dev/sound/pci/hda/hdaa.c
+++ b/sys/dev/sound/pci/hda/hdaa.c
@@ -532,9 +532,11 @@ static void
hdaa_presence_handler(struct hdaa_widget *w)
{
struct hdaa_devinfo *devinfo = w->devinfo;
- struct hdaa_audio_as *as;
+ struct hdaa_audio_as *as, *asp;
+ char buf[32];
uint32_t res;
- int connected, old;
+ int connected, old, i;
+ bool active;
if (w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
@@ -552,13 +554,6 @@ hdaa_presence_handler(struct hdaa_widget *w)
if (connected == old)
return;
w->wclass.pin.connected = connected;
- HDA_BOOTVERBOSE(
- if (connected || old != 2) {
- device_printf(devinfo->dev,
- "Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
- w->nid, res, !connected ? "dis" : "");
- }
- );
as = &devinfo->as[w->bindas];
if (as->hpredir >= 0 && as->pins[15] == w->nid)
@@ -567,6 +562,38 @@ hdaa_presence_handler(struct hdaa_widget *w)
hdaa_autorecsrc_handler(as, w);
if (old != 2)
hdaa_channels_handler(as);
+
+ if (connected || old != 2) {
+ HDA_BOOTVERBOSE(
+ device_printf(devinfo->dev,
+ "Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
+ w->nid, res, !connected ? "dis" : "");
+ );
+ if (as->hpredir >= 0)
+ return;
+ for (i = 0, active = false; i < devinfo->num_devs; i++) {
+ if (device_get_unit(devinfo->devs[i].dev) == snd_unit) {
+ active = true;
+ break;
+ }
+ }
+ /* Proceed only if we are currently using this codec. */
+ if (!active)
+ return;
+ for (i = 0; i < devinfo->ascnt; i++) {
+ asp = &devinfo->as[i];
+ if (!asp->enable)
+ continue;
+ if ((connected && asp->index == as->index) ||
+ (!connected && asp->dir == as->dir)) {
+ snprintf(buf, sizeof(buf), "cdev=dsp%d",
+ device_get_unit(asp->pdevinfo->dev));
+ devctl_notify("SND", "CONN",
+ asp->dir == HDAA_CTL_IN ? "IN" : "OUT", buf);
+ break;
+ }
+ }
+ }
}
/*
@@ -6194,15 +6221,15 @@ hdaa_configure(device_t dev)
);
hdaa_patch_direct(devinfo);
HDA_BOOTHVERBOSE(
- device_printf(dev, "Pin sense init...\n");
- );
- hdaa_sense_init(devinfo);
- HDA_BOOTHVERBOSE(
device_printf(dev, "Creating PCM devices...\n");
);
hdaa_unlock(devinfo);
hdaa_create_pcms(devinfo);
hdaa_lock(devinfo);
+ HDA_BOOTHVERBOSE(
+ device_printf(dev, "Pin sense init...\n");
+ );
+ hdaa_sense_init(devinfo);
HDA_BOOTVERBOSE(
if (devinfo->quirks != 0) {
diff --git a/sys/dev/sound/pci/hda/hdaa_patches.c b/sys/dev/sound/pci/hda/hdaa_patches.c
index 8967cb49125c..91bb244578c7 100644
--- a/sys/dev/sound/pci/hda/hdaa_patches.c
+++ b/sys/dev/sound/pci/hda/hdaa_patches.c
@@ -362,8 +362,10 @@ hdac_pin_patch(struct hdaa_widget *w)
patch_str = "as=3 seq=15 color=Black loc=Left";
break;
}
- } else if (id == HDA_CODEC_ALC295 &&
- subid == FRAMEWORK_LAPTOP_0005_SUBVENDOR) {
+ } else if ((id == HDA_CODEC_ALC295 &&
+ subid == FRAMEWORK_LAPTOP_0005_SUBVENDOR) ||
+ (id == HDA_CODEC_ALC285 &&
+ subid == FRAMEWORK_LAPTOP_000D_SUBVENDOR)) {
switch (nid) {
case 20:
/*
diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c
index 900578b73de4..80028063bb0d 100644
--- a/sys/dev/sound/pci/hda/hdac.c
+++ b/sys/dev/sound/pci/hda/hdac.c
@@ -133,6 +133,7 @@ static const struct {
{ HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 },
{ HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 },
{ HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 },
+ { HDA_INTEL_ELLK2, "Intel Elkhart Lake", 0, 0 },
{ HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 },
{ HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 },
{ HDA_INTEL_SCH, "Intel SCH", 0, 0 },
@@ -1773,17 +1774,17 @@ hdac_detach(device_t dev)
struct hdac_softc *sc = device_get_softc(dev);
int i, error;
+ callout_drain(&sc->poll_callout);
+ hdac_irq_free(sc);
+ taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
+
error = bus_generic_detach(dev);
if (error != 0)
return (error);
hdac_lock(sc);
- callout_stop(&sc->poll_callout);
hdac_reset(sc, false);
hdac_unlock(sc);
- callout_drain(&sc->poll_callout);
- taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
- hdac_irq_free(sc);
for (i = 0; i < sc->num_ss; i++)
hdac_dma_free(sc, &sc->streams[i].bdl);
@@ -2206,4 +2207,4 @@ static driver_t hdac_driver = {
sizeof(struct hdac_softc),
};
-DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL);
+DRIVER_MODULE_ORDERED(snd_hda, pci, hdac_driver, NULL, NULL, SI_ORDER_ANY);
diff --git a/sys/dev/sound/pci/hda/hdac.h b/sys/dev/sound/pci/hda/hdac.h
index 223434a214b1..c11e6b2d6810 100644
--- a/sys/dev/sound/pci/hda/hdac.h
+++ b/sys/dev/sound/pci/hda/hdac.h
@@ -66,6 +66,7 @@
#define HDA_INTEL_PCH HDA_MODEL_CONSTRUCT(INTEL, 0x3b56)
#define HDA_INTEL_PCH2 HDA_MODEL_CONSTRUCT(INTEL, 0x3b57)
#define HDA_INTEL_ELLK HDA_MODEL_CONSTRUCT(INTEL, 0x4b55)
+#define HDA_INTEL_ELLK2 HDA_MODEL_CONSTRUCT(INTEL, 0x4b58)
#define HDA_INTEL_JLK2 HDA_MODEL_CONSTRUCT(INTEL, 0x4dc8)
#define HDA_INTEL_BXTNP HDA_MODEL_CONSTRUCT(INTEL, 0x5a98)
#define HDA_INTEL_MACBOOKPRO92 HDA_MODEL_CONSTRUCT(INTEL, 0x7270)
@@ -535,6 +536,7 @@
#define FRAMEWORK_LAPTOP_0003_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0003)
#define FRAMEWORK_LAPTOP_0005_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0005)
#define FRAMEWORK_LAPTOP_0006_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0006)
+#define FRAMEWORK_LAPTOP_000D_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x000d)
/* All codecs you can eat... */
#define HDA_CODEC_CONSTRUCT(vendor, id) \
diff --git a/sys/dev/sound/pcm/channel.h b/sys/dev/sound/pcm/channel.h
index fab182b22774..9ad21d219001 100644
--- a/sys/dev/sound/pcm/channel.h
+++ b/sys/dev/sound/pcm/channel.h
@@ -408,7 +408,7 @@ enum {
#define CHN_F_RESET (CHN_F_BUSY | CHN_F_DEAD | \
CHN_F_VIRTUAL | CHN_F_HAS_VCHAN | \
- CHN_F_VCHAN_DYNAMIC | \
+ CHN_F_VCHAN_DYNAMIC | CHN_F_NBIO | \
CHN_F_PASSTHROUGH | CHN_F_EXCLUSIVE)
#define CHN_F_MMAP_INVALID (CHN_F_DEAD | CHN_F_RUNNING)
diff --git a/sys/dev/sound/pcm/dsp.c b/sys/dev/sound/pcm/dsp.c
index aa6ad4a59778..fe5576baf017 100644
--- a/sys/dev/sound/pcm/dsp.c
+++ b/sys/dev/sound/pcm/dsp.c
@@ -299,7 +299,7 @@ dsp_close(void *data)
CHN_LOCK(rdch);
chn_abort(rdch); /* won't sleep */
rdch->flags &= ~(CHN_F_RUNNING | CHN_F_MMAP |
- CHN_F_DEAD | CHN_F_EXCLUSIVE);
+ CHN_F_DEAD | CHN_F_EXCLUSIVE | CHN_F_NBIO);
chn_reset(rdch, 0, 0);
chn_release(rdch);
if (rdch->flags & CHN_F_VIRTUAL) {
@@ -323,7 +323,7 @@ dsp_close(void *data)
CHN_LOCK(wrch);
chn_flush(wrch); /* may sleep */
wrch->flags &= ~(CHN_F_RUNNING | CHN_F_MMAP |
- CHN_F_DEAD | CHN_F_EXCLUSIVE);
+ CHN_F_DEAD | CHN_F_EXCLUSIVE | CHN_F_NBIO);
chn_reset(wrch, 0, 0);
chn_release(wrch);
if (wrch->flags & CHN_F_VIRTUAL) {
@@ -671,6 +671,43 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *ch,
return (0);
}
+#ifdef COMPAT_FREEBSD32
+typedef struct _snd_chan_param32 {
+ uint32_t play_rate;
+ uint32_t rec_rate;
+ uint32_t play_format;
+ uint32_t rec_format;
+} snd_chan_param32;
+#define AIOGFMT32 _IOC_NEWTYPE(AIOGFMT, snd_chan_param32)
+#define AIOSFMT32 _IOC_NEWTYPE(AIOSFMT, snd_chan_param32)
+
+typedef struct _snd_capabilities32 {
+ uint32_t rate_min, rate_max;
+ uint32_t formats;
+ uint32_t bufsize;
+ uint32_t mixers;
+ uint32_t inputs;
+ uint16_t left, right;
+} snd_capabilities32;
+#define AIOGCAP32 _IOC_NEWTYPE(AIOGCAP, snd_capabilities32)
+
+typedef struct audio_errinfo32
+{
+ int32_t play_underruns;
+ int32_t rec_overruns;
+ uint32_t play_ptradjust;
+ uint32_t rec_ptradjust;
+ int32_t play_errorcount;
+ int32_t rec_errorcount;
+ int32_t play_lasterror;
+ int32_t rec_lasterror;
+ int32_t play_errorparm;
+ int32_t rec_errorparm;
+ int32_t filler[16];
+} audio_errinfo32;
+#define SNDCTL_DSP_GETERROR32 _IOC_NEWTYPE(SNDCTL_DSP_GETERROR, audio_errinfo32)
+#endif
+
static int
dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct thread *td)
@@ -829,9 +866,25 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
case AIOSFMT:
case AIOGFMT:
+#ifdef COMPAT_FREEBSD32
+ case AIOSFMT32:
+ case AIOGFMT32:
+#endif
{
snd_chan_param *p = (snd_chan_param *)arg;
+#ifdef COMPAT_FREEBSD32
+ snd_chan_param32 *p32 = (snd_chan_param32 *)arg;
+ snd_chan_param param;
+
+ if (cmd == AIOSFMT32) {
+ p = &param;
+ p->play_rate = p32->play_rate;
+ p->rec_rate = p32->rec_rate;
+ p->play_format = p32->play_format;
+ p->rec_format = p32->rec_format;
+ }
+#endif
if (cmd == AIOSFMT &&
((p->play_format != 0 && p->play_rate == 0) ||
(p->rec_format != 0 && p->rec_rate == 0))) {
@@ -872,15 +925,41 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
p->rec_format = 0;
}
PCM_RELEASE_QUICK(d);
+#ifdef COMPAT_FREEBSD32
+ if (cmd == AIOSFMT32 || cmd == AIOGFMT32) {
+ p32->play_rate = p->play_rate;
+ p32->rec_rate = p->rec_rate;
+ p32->play_format = p->play_format;
+ p32->rec_format = p->rec_format;
+ }
+#endif
}
break;
case AIOGCAP: /* get capabilities */
+#ifdef COMPAT_FREEBSD32
+ case AIOGCAP32:
+#endif
{
snd_capabilities *p = (snd_capabilities *)arg;
struct pcmchan_caps *pcaps = NULL, *rcaps = NULL;
struct cdev *pdev;
-
+#ifdef COMPAT_FREEBSD32
+ snd_capabilities32 *p32 = (snd_capabilities32 *)arg;
+ snd_capabilities capabilities;
+
+ if (cmd == AIOGCAP32) {
+ p = &capabilities;
+ p->rate_min = p32->rate_min;
+ p->rate_max = p32->rate_max;
+ p->formats = p32->formats;
+ p->bufsize = p32->bufsize;
+ p->mixers = p32->mixers;
+ p->inputs = p32->inputs;
+ p->left = p32->left;
+ p->right = p32->right;
+ }
+#endif
PCM_LOCK(d);
if (rdch) {
CHN_LOCK(rdch);
@@ -913,6 +992,18 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
if (rdch)
CHN_UNLOCK(rdch);
PCM_UNLOCK(d);
+#ifdef COMPAT_FREEBSD32
+ if (cmd == AIOGCAP32) {
+ p32->rate_min = p->rate_min;
+ p32->rate_max = p->rate_max;
+ p32->formats = p->formats;
+ p32->bufsize = p->bufsize;
+ p32->mixers = p->mixers;
+ p32->inputs = p->inputs;
+ p32->left = p->left;
+ p32->right = p->right;
+ }
+#endif
}
break;
@@ -1635,6 +1726,9 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
break;
case SNDCTL_DSP_GETERROR:
+#ifdef COMPAT_FREEBSD32
+ case SNDCTL_DSP_GETERROR32:
+#endif
/*
* OSSv4 docs: "All errors and counters will automatically be
* cleared to zeroes after the call so each call will return only
@@ -1644,6 +1738,14 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
*/
{
audio_errinfo *ei = (audio_errinfo *)arg;
+#ifdef COMPAT_FREEBSD32
+ audio_errinfo errinfo;
+ audio_errinfo32 *ei32 = (audio_errinfo32 *)arg;
+
+ if (cmd == SNDCTL_DSP_GETERROR32) {
+ ei = &errinfo;
+ }
+#endif
bzero((void *)ei, sizeof(*ei));
@@ -1659,6 +1761,21 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
rdch->xruns = 0;
CHN_UNLOCK(rdch);
}
+#ifdef COMPAT_FREEBSD32
+ if (cmd == SNDCTL_DSP_GETERROR32) {
+ bzero((void *)ei32, sizeof(*ei32));
+ ei32->play_underruns = ei->play_underruns;
+ ei32->rec_overruns = ei->rec_overruns;
+ ei32->play_ptradjust = ei->play_ptradjust;
+ ei32->rec_ptradjust = ei->rec_ptradjust;
+ ei32->play_errorcount = ei->play_errorcount;
+ ei32->rec_errorcount = ei->rec_errorcount;
+ ei32->play_lasterror = ei->play_lasterror;
+ ei32->rec_lasterror = ei->rec_lasterror;
+ ei32->play_errorparm = ei->play_errorparm;
+ ei32->rec_errorparm = ei->rec_errorparm;
+ }
+#endif
}
break;
diff --git a/sys/dev/sym/sym_hipd.c b/sys/dev/sym/sym_hipd.c
index fa65d544e17d..b4e5c1075fb4 100644
--- a/sys/dev/sym/sym_hipd.c
+++ b/sys/dev/sym/sym_hipd.c
@@ -3266,7 +3266,7 @@ static void sym_init (hcb_p np, int reason)
* Reinitialize usrwide.
* Prepare sync negotiation according to actual SCSI bus mode.
*/
- for (i=0;i<SYM_CONF_MAX_TARGET;i++) {
+ for (i = 0; i < SYM_CONF_MAX_TARGET; i++) {
tcb_p tp = &np->target[i];
tp->to_reset = 0;
@@ -3715,7 +3715,7 @@ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat)
}
printf ("%s: regdump:", sym_name(np));
- for (i=0; i<24;i++)
+ for (i = 0; i < 24; i++)
printf (" %02x", (unsigned)INB_OFF(i));
printf (".\n");
@@ -5527,8 +5527,8 @@ static int sym_show_msg (u_char * msg)
u_char i;
printf ("%x",*msg);
if (*msg==M_EXTENDED) {
- for (i=1;i<8;i++) {
- if (i-1>msg[1]) break;
+ for (i = 1; i < 8; i++) {
+ if (i - 1 > msg[1]) break;
printf ("-%x",msg[i]);
}
return (i+1);
@@ -6744,10 +6744,10 @@ restart_test:
/*
* Wait 'til done (with timeout)
*/
- for (i=0; i<SYM_SNOOP_TIMEOUT; i++)
+ for (i = 0; i < SYM_SNOOP_TIMEOUT; i++)
if (INB(nc_istat) & (INTF|SIP|DIP))
break;
- if (i>=SYM_SNOOP_TIMEOUT) {
+ if (i >= SYM_SNOOP_TIMEOUT) {
printf ("CACHE TEST FAILED: timeout.\n");
return (0x20);
}
diff --git a/sys/dev/thunderbolt/hcm.c b/sys/dev/thunderbolt/hcm.c
new file mode 100644
index 000000000000..b8f703fc3b52
--- /dev/null
+++ b/sys/dev/thunderbolt/hcm.c
@@ -0,0 +1,223 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Host Configuration Manager (HCM) for USB4 and later TB3 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/hcm_var.h>
+
+static void hcm_cfg_task(void *, int);
+
+int
+hcm_attach(struct nhi_softc *nsc)
+{
+ struct hcm_softc *hcm;
+
+ tb_debug(nsc, DBG_HCM|DBG_EXTRA, "hcm_attach called\n");
+
+ hcm = malloc(sizeof(struct hcm_softc), M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (hcm == NULL) {
+ tb_debug(nsc, DBG_HCM, "Cannot allocate hcm object\n");
+ return (ENOMEM);
+ }
+
+ hcm->dev = nsc->dev;
+ hcm->nsc = nsc;
+ nsc->hcm = hcm;
+
+ hcm->taskqueue = taskqueue_create("hcm_event", M_NOWAIT,
+ taskqueue_thread_enqueue, &hcm->taskqueue);
+ if (hcm->taskqueue == NULL)
+ return (ENOMEM);
+ taskqueue_start_threads(&hcm->taskqueue, 1, PI_DISK, "tbhcm%d_tq",
+ device_get_unit(nsc->dev));
+ TASK_INIT(&hcm->cfg_task, 0, hcm_cfg_task, hcm);
+
+ return (0);
+}
+
+int
+hcm_detach(struct nhi_softc *nsc)
+{
+ struct hcm_softc *hcm;
+
+ hcm = nsc->hcm;
+ if (hcm->taskqueue)
+ taskqueue_free(hcm->taskqueue);
+
+ return (0);
+}
+
+int
+hcm_router_discover(struct hcm_softc *hcm)
+{
+
+ taskqueue_enqueue(hcm->taskqueue, &hcm->cfg_task);
+
+ return (0);
+}
+
+static void
+hcm_cfg_task(void *arg, int pending)
+{
+ struct hcm_softc *hcm;
+ struct router_softc *rsc;
+ struct router_cfg_cap cap;
+ struct tb_cfg_router *cfg;
+ struct tb_cfg_adapter *adp;
+ struct tb_cfg_cap_lane *lane;
+ uint32_t *buf;
+ uint8_t *u;
+ u_int error, i, offset;
+
+ hcm = (struct hcm_softc *)arg;
+
+ tb_debug(hcm, DBG_HCM|DBG_EXTRA, "hcm_cfg_task called\n");
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL) {
+ tb_debug(hcm, DBG_HCM, "Cannot alloc memory for discovery\n");
+ return;
+ }
+
+ rsc = hcm->nsc->root_rsc;
+ error = tb_config_router_read(rsc, 0, 5, buf);
+ if (error != 0) {
+ free(buf, M_NHI);
+ return;
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+
+ cap.space = TB_CFG_CS_ROUTER;
+ cap.adap = 0;
+ cap.next_cap = GET_ROUTER_CS_NEXT_CAP(cfg);
+ while (cap.next_cap != 0) {
+ error = tb_config_next_cap(rsc, &cap);
+ if (error != 0)
+ break;
+
+ if ((cap.cap_id == TB_CFG_CAP_VSEC) && (cap.vsc_len == 0)) {
+ tb_debug(hcm, DBG_HCM, "Router Cap= %d, vsec= %d, "
+ "len= %d, next_cap= %d\n", cap.cap_id,
+ cap.vsc_id, cap.vsec_len, cap.next_cap);
+ } else if (cap.cap_id == TB_CFG_CAP_VSC) {
+ tb_debug(hcm, DBG_HCM, "Router cap= %d, vsc= %d, "
+ "len= %d, next_cap= %d\n", cap.cap_id,
+ cap.vsc_id, cap.vsc_len, cap.next_cap);
+ } else
+ tb_debug(hcm, DBG_HCM, "Router cap= %d, "
+ "next_cap= %d\n", cap.cap_id, cap.next_cap);
+ if (cap.next_cap > TB_CFG_CAP_OFFSET_MAX)
+ cap.next_cap = 0;
+ }
+
+ u = (uint8_t *)buf;
+ error = tb_config_get_lc_uuid(rsc, u);
+ if (error == 0) {
+ tb_debug(hcm, DBG_HCM, "Router LC UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[0], u[1], u[2], u[3], u[4], u[5], u[6], u[7], u[8],
+ u[9], u[10], u[11], u[12], u[13], u[14], u[15]);
+ } else
+ tb_printf(hcm, "Error finding LC registers: %d\n", error);
+
+ for (i = 1; i <= rsc->max_adap; i++) {
+ error = tb_config_adapter_read(rsc, i, 0, 8, buf);
+ if (error != 0) {
+ tb_debug(hcm, DBG_HCM, "Adapter %d: no adapter\n", i);
+ continue;
+ }
+ adp = (struct tb_cfg_adapter *)buf;
+ tb_debug(hcm, DBG_HCM, "Adapter %d: %s, max_counters= 0x%08x,"
+ " adapter_num= %d\n", i,
+ tb_get_string(GET_ADP_CS_TYPE(adp), tb_adapter_type),
+ GET_ADP_CS_MAX_COUNTERS(adp), GET_ADP_CS_ADP_NUM(adp));
+
+ if (GET_ADP_CS_TYPE(adp) != ADP_CS2_LANE)
+ continue;
+
+ error = tb_config_find_adapter_cap(rsc, i, TB_CFG_CAP_LANE,
+ &offset);
+ if (error)
+ continue;
+
+ error = tb_config_adapter_read(rsc, i, offset, 3, buf);
+ if (error)
+ continue;
+
+ lane = (struct tb_cfg_cap_lane *)buf;
+ tb_debug(hcm, DBG_HCM, "Lane Adapter State= %s %s\n",
+ tb_get_string((lane->current_lws & CAP_LANE_STATE_MASK),
+ tb_adapter_state), (lane->targ_lwp & CAP_LANE_DISABLE) ?
+ "disabled" : "enabled");
+
+ if ((lane->current_lws & CAP_LANE_STATE_MASK) ==
+ CAP_LANE_STATE_CL0) {
+ tb_route_t newr;
+
+ newr.hi = rsc->route.hi;
+ newr.lo = rsc->route.lo | (i << rsc->depth * 8);
+
+ tb_printf(hcm, "want to add router at 0x%08x%08x\n",
+ newr.hi, newr.lo);
+ error = tb_router_attach(rsc, newr);
+ tb_printf(rsc, "tb_router_attach returned %d\n", error);
+ }
+ }
+
+ free(buf, M_THUNDERBOLT);
+}
diff --git a/sys/dev/thunderbolt/hcm_var.h b/sys/dev/thunderbolt/hcm_var.h
new file mode 100644
index 000000000000..a11c8e9b6a92
--- /dev/null
+++ b/sys/dev/thunderbolt/hcm_var.h
@@ -0,0 +1,47 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _HCM_VAR_H
+#define _HCM_VAR_H
+
+struct hcm_softc {
+ u_int debug;
+ device_t dev;
+ struct nhi_softc *nsc;
+
+ struct task cfg_task;
+ struct taskqueue *taskqueue;
+};
+
+int hcm_attach(struct nhi_softc *);
+int hcm_detach(struct nhi_softc *);
+int hcm_router_discover(struct hcm_softc *);
+
+#endif /* _HCM_VAR_H */
diff --git a/sys/dev/thunderbolt/nhi.c b/sys/dev/thunderbolt/nhi.c
new file mode 100644
index 000000000000..205e69c16253
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi.c
@@ -0,0 +1,1170 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe interface for Thunderbolt Native Host Interface (nhi) */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/hcm_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_dev.h>
+#include "tb_if.h"
+
+static int nhi_alloc_ring(struct nhi_softc *, int, int, int,
+ struct nhi_ring_pair **);
+static void nhi_free_ring(struct nhi_ring_pair *);
+static void nhi_free_rings(struct nhi_softc *);
+static int nhi_configure_ring(struct nhi_softc *, struct nhi_ring_pair *);
+static int nhi_activate_ring(struct nhi_ring_pair *);
+static int nhi_deactivate_ring(struct nhi_ring_pair *);
+static int nhi_alloc_ring0(struct nhi_softc *);
+static void nhi_free_ring0(struct nhi_softc *);
+static void nhi_fill_rx_ring(struct nhi_softc *, struct nhi_ring_pair *);
+static int nhi_init(struct nhi_softc *);
+static void nhi_post_init(void *);
+static int nhi_tx_enqueue(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+static int nhi_setup_sysctl(struct nhi_softc *);
+
+SYSCTL_NODE(_hw, OID_AUTO, nhi, CTLFLAG_RD, 0, "NHI Driver Parameters");
+
+MALLOC_DEFINE(M_NHI, "nhi", "nhi driver memory");
+
+#ifndef NHI_DEBUG_LEVEL
+#define NHI_DEBUG_LEVEL 0
+#endif
+
+/* 0 = default, 1 = force-on, 2 = force-off */
+#ifndef NHI_FORCE_HCM
+#define NHI_FORCE_HCM 0
+#endif
+
+void
+nhi_get_tunables(struct nhi_softc *sc)
+{
+ devclass_t dc;
+ device_t ufp;
+ char tmpstr[80], oid[80];
+ u_int val;
+
+ /* Set local defaults */
+ sc->debug = NHI_DEBUG_LEVEL;
+ sc->max_ring_count = NHI_DEFAULT_NUM_RINGS;
+ sc->force_hcm = NHI_FORCE_HCM;
+
+ /* Inherit setting from the upstream thunderbolt switch node */
+ val = TB_GET_DEBUG(sc->dev, &sc->debug);
+ if (val != 0) {
+ dc = devclass_find("tbolt");
+ if (dc != NULL) {
+ ufp = devclass_get_device(dc, device_get_unit(sc->dev));
+ if (ufp != NULL)
+ TB_GET_DEBUG(ufp, &sc->debug);
+ } else {
+ if (TUNABLE_STR_FETCH("hw.tbolt.debug_level", oid,
+ 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ }
+ }
+
+ /*
+ * Grab global variables. Allow nhi debug flags to override
+ * thunderbolt debug flags, if present.
+ */
+ bzero(oid, 80);
+ if (TUNABLE_STR_FETCH("hw.nhi.debug_level", oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ if (TUNABLE_INT_FETCH("hw.nhi.max_rings", &val) != 0) {
+ val = min(val, NHI_MAX_NUM_RINGS);
+ sc->max_ring_count = max(val, 1);
+ }
+ if (TUNABLE_INT_FETCH("hw.nhi.force_hcm", &val) != 0)
+ sc->force_hcm = val;
+
+ /* Grab instance variables */
+ bzero(oid, 80);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.nhi.%d.debug_level",
+ device_get_unit(sc->dev));
+ if (TUNABLE_STR_FETCH(tmpstr, oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.nhi.%d.max_rings",
+ device_get_unit(sc->dev));
+ if (TUNABLE_INT_FETCH(tmpstr, &val) != 0) {
+ val = min(val, NHI_MAX_NUM_RINGS);
+ sc->max_ring_count = max(val, 1);
+ }
+ snprintf(tmpstr, sizeof(tmpstr), "dev, nhi.%d.force_hcm",
+ device_get_unit(sc->dev));
+ if (TUNABLE_INT_FETCH(tmpstr, &val) != 0)
+ sc->force_hcm = val;
+
+ return;
+}
+
+static void
+nhi_configure_caps(struct nhi_softc *sc)
+{
+
+ if (NHI_IS_USB4(sc) || (sc->force_hcm == NHI_FORCE_HCM_ON))
+ sc->caps |= NHI_CAP_HCM;
+ if (sc->force_hcm == NHI_FORCE_HCM_OFF)
+ sc->caps &= ~NHI_CAP_HCM;
+}
+
+struct nhi_cmd_frame *
+nhi_alloc_tx_frame(struct nhi_ring_pair *r)
+{
+ struct nhi_cmd_frame *cmd;
+
+ mtx_lock(&r->mtx);
+ cmd = nhi_alloc_tx_frame_locked(r);
+ mtx_unlock(&r->mtx);
+
+ return (cmd);
+}
+
+void
+nhi_free_tx_frame(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ mtx_lock(&r->mtx);
+ nhi_free_tx_frame_locked(r, cmd);
+ mtx_unlock(&r->mtx);
+}
+
+/*
+ * Push a command and data dword through the mailbox to the firmware.
+ * Response is either good, error, or timeout. Commands that return data
+ * do so by reading OUTMAILDATA.
+ */
+int
+nhi_inmail_cmd(struct nhi_softc *sc, uint32_t cmd, uint32_t data)
+{
+ uint32_t val;
+ u_int error, timeout;
+
+ mtx_lock(&sc->nhi_mtx);
+ /*
+ * XXX Should a defer/reschedule happen here, or is it not worth
+ * worrying about?
+ */
+ if (sc->hwflags & NHI_MBOX_BUSY) {
+ mtx_unlock(&sc->nhi_mtx);
+ tb_debug(sc, DBG_MBOX, "Driver busy with mailbox\n");
+ return (EBUSY);
+ }
+ sc->hwflags |= NHI_MBOX_BUSY;
+
+ val = nhi_read_reg(sc, TBT_INMAILCMD);
+ tb_debug(sc, DBG_MBOX|DBG_FULL, "Reading INMAILCMD= 0x%08x\n", val);
+ if (val & INMAILCMD_ERROR)
+ tb_debug(sc, DBG_MBOX, "Error already set in INMAILCMD\n");
+ if (val & INMAILCMD_OPREQ) {
+ mtx_unlock(&sc->nhi_mtx);
+ tb_debug(sc, DBG_MBOX,
+ "INMAILCMD request already in progress\n");
+ return (EBUSY);
+ }
+
+ nhi_write_reg(sc, TBT_INMAILDATA, data);
+ nhi_write_reg(sc, TBT_INMAILCMD, cmd | INMAILCMD_OPREQ);
+
+ /* Poll at 1s intervals */
+ timeout = NHI_MAILBOX_TIMEOUT;
+ while (timeout--) {
+ DELAY(1000000);
+ val = nhi_read_reg(sc, TBT_INMAILCMD);
+ tb_debug(sc, DBG_MBOX|DBG_EXTRA,
+ "Polling INMAILCMD= 0x%08x\n", val);
+ if ((val & INMAILCMD_OPREQ) == 0)
+ break;
+ }
+ sc->hwflags &= ~NHI_MBOX_BUSY;
+ mtx_unlock(&sc->nhi_mtx);
+
+ error = 0;
+ if (val & INMAILCMD_OPREQ) {
+ tb_printf(sc, "Timeout waiting for mailbox\n");
+ error = ETIMEDOUT;
+ }
+ if (val & INMAILCMD_ERROR) {
+ tb_printf(sc, "Firmware reports error in mailbox\n");
+ error = EINVAL;
+ }
+
+ return (error);
+}
+
+/*
+ * Pull command status and data from the firmware mailbox.
+ */
+int
+nhi_outmail_cmd(struct nhi_softc *sc, uint32_t *val)
+{
+
+ if (val == NULL)
+ return (EINVAL);
+ *val = nhi_read_reg(sc, TBT_OUTMAILCMD);
+ return (0);
+}
+
+int
+nhi_attach(struct nhi_softc *sc)
+{
+ uint32_t val;
+ int error = 0;
+
+ if ((error = nhi_setup_sysctl(sc)) != 0)
+ return (error);
+
+ mtx_init(&sc->nhi_mtx, "nhimtx", "NHI Control Mutex", MTX_DEF);
+
+ nhi_configure_caps(sc);
+
+ /*
+ * Get the number of TX/RX paths. This sizes some of the register
+ * arrays during allocation and initialization. USB4 spec says that
+ * the max is 21. Alpine Ridge appears to default to 12.
+ */
+ val = GET_HOST_CAPS_PATHS(nhi_read_reg(sc, NHI_HOST_CAPS));
+ tb_debug(sc, DBG_INIT|DBG_NOISY, "Total Paths= %d\n", val);
+ if ((val == 0) || (val > 21) || ((NHI_IS_AR(sc) && val != 12))) {
+ tb_printf(sc, "WARN: unexpected number of paths: %d\n", val);
+ /* return (ENXIO); */
+ }
+ sc->path_count = val;
+
+ SLIST_INIT(&sc->ring_list);
+
+ error = nhi_pci_configure_interrupts(sc);
+ if (error == 0)
+ error = nhi_alloc_ring0(sc);
+ if (error == 0) {
+ nhi_configure_ring(sc, sc->ring0);
+ nhi_activate_ring(sc->ring0);
+ nhi_fill_rx_ring(sc, sc->ring0);
+ }
+
+ if (error == 0)
+ error = tbdev_add_interface(sc);
+
+ if ((error == 0) && (NHI_USE_ICM(sc)))
+ tb_printf(sc, "WARN: device uses an internal connection manager\n");
+ if ((error == 0) && (NHI_USE_HCM(sc)))
+ ;
+ error = hcm_attach(sc);
+
+ if (error == 0)
+ error = nhi_init(sc);
+
+ return (error);
+}
+
+int
+nhi_detach(struct nhi_softc *sc)
+{
+
+ if (NHI_USE_HCM(sc))
+ hcm_detach(sc);
+
+ if (sc->root_rsc != NULL)
+ tb_router_detach(sc->root_rsc);
+
+ tbdev_remove_interface(sc);
+
+ nhi_pci_disable_interrupts(sc);
+
+ nhi_free_ring0(sc);
+
+ /* XXX Should the rings be marked as !VALID in the descriptors? */
+ nhi_free_rings(sc);
+
+ mtx_destroy(&sc->nhi_mtx);
+
+ return (0);
+}
+
+static void
+nhi_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+ if (error == 0 && nsegs == 1) {
+ *addr = segs[0].ds_addr;
+ } else
+ *addr = 0;
+}
+
+static int
+nhi_alloc_ring(struct nhi_softc *sc, int ringnum, int tx_depth, int rx_depth,
+ struct nhi_ring_pair **rp)
+{
+ bus_dma_template_t t;
+ bus_addr_t ring_busaddr;
+ struct nhi_ring_pair *r;
+ int ring_size, error;
+ u_int rxring_len, txring_len;
+ char *ring;
+
+ if (ringnum >= sc->max_ring_count) {
+ tb_debug(sc, DBG_INIT, "Tried to allocate ring number %d\n",
+ ringnum);
+ return (EINVAL);
+ }
+
+ /* Allocate the ring structure and the RX ring tacker together. */
+ rxring_len = rx_depth * sizeof(void *);
+ txring_len = tx_depth * sizeof(void *);
+ r = malloc(sizeof(struct nhi_ring_pair) + rxring_len + txring_len,
+ M_NHI, M_NOWAIT|M_ZERO);
+ if (r == NULL) {
+ tb_printf(sc, "ERROR: Cannot allocate ring memory\n");
+ return (ENOMEM);
+ }
+
+ r->sc = sc;
+ TAILQ_INIT(&r->tx_head);
+ TAILQ_INIT(&r->rx_head);
+ r->ring_num = ringnum;
+ r->tx_ring_depth = tx_depth;
+ r->tx_ring_mask = tx_depth - 1;
+ r->rx_ring_depth = rx_depth;
+ r->rx_ring_mask = rx_depth - 1;
+ r->rx_pici_reg = NHI_RX_RING_PICI + ringnum * 16;
+ r->tx_pici_reg = NHI_TX_RING_PICI + ringnum * 16;
+ r->rx_cmd_ring = (struct nhi_cmd_frame **)((uint8_t *)r + sizeof (*r));
+ r->tx_cmd_ring = (struct nhi_cmd_frame **)((uint8_t *)r->rx_cmd_ring +
+ rxring_len);
+
+ snprintf(r->name, NHI_RING_NAMELEN, "nhiring%d\n", ringnum);
+ mtx_init(&r->mtx, r->name, "NHI Ring Lock", MTX_DEF);
+ tb_debug(sc, DBG_INIT | DBG_FULL, "Allocated ring context at %p, "
+ "mutex %p\n", r, &r->mtx);
+
+ /* Allocate the RX and TX buffer descriptor rings */
+ ring_size = sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+ ring_size += sizeof(struct nhi_rx_buffer_desc) * r->rx_ring_depth;
+ tb_debug(sc, DBG_INIT | DBG_FULL, "Ring %d ring_size= %d\n",
+ ringnum, ring_size);
+
+ bus_dma_template_init(&t, sc->parent_dmat);
+ t.alignment = 4;
+ t.maxsize = t.maxsegsize = ring_size;
+ t.nsegments = 1;
+ if ((error = bus_dma_template_tag(&t, &r->ring_dmat)) != 0) {
+ tb_printf(sc, "Cannot allocate ring %d DMA tag: %d\n",
+ ringnum, error);
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(r->ring_dmat, (void **)&ring, BUS_DMA_NOWAIT,
+ &r->ring_map)) {
+ tb_printf(sc, "Cannot allocate ring memory\n");
+ return (ENOMEM);
+ }
+ bzero(ring, ring_size);
+ bus_dmamap_load(r->ring_dmat, r->ring_map, ring, ring_size,
+ nhi_memaddr_cb, &ring_busaddr, 0);
+
+ r->ring = ring;
+
+ r->tx_ring = (union nhi_ring_desc *)(ring);
+ r->tx_ring_busaddr = ring_busaddr;
+ ring += sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+ ring_busaddr += sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+
+ r->rx_ring = (union nhi_ring_desc *)(ring);
+ r->rx_ring_busaddr = ring_busaddr;
+
+ tb_debug(sc, DBG_INIT | DBG_EXTRA, "Ring %d: RX %p [0x%jx] "
+ "TX %p [0x%jx]\n", ringnum, r->tx_ring, r->tx_ring_busaddr,
+ r->rx_ring, r->rx_ring_busaddr);
+
+ *rp = r;
+ return (0);
+}
+
+static void
+nhi_free_ring(struct nhi_ring_pair *r)
+{
+
+ tb_debug(r->sc, DBG_INIT, "Freeing ring %d resources\n", r->ring_num);
+ nhi_deactivate_ring(r);
+
+ if (r->tx_ring_busaddr != 0) {
+ bus_dmamap_unload(r->ring_dmat, r->ring_map);
+ r->tx_ring_busaddr = 0;
+ }
+ if (r->ring != NULL) {
+ bus_dmamem_free(r->ring_dmat, r->ring, r->ring_map);
+ r->ring = NULL;
+ }
+ if (r->ring_dmat != NULL) {
+ bus_dma_tag_destroy(r->ring_dmat);
+ r->ring_dmat = NULL;
+ }
+ mtx_destroy(&r->mtx);
+}
+
+static void
+nhi_free_rings(struct nhi_softc *sc)
+{
+ struct nhi_ring_pair *r;
+
+ while ((r = SLIST_FIRST(&sc->ring_list)) != NULL) {
+ nhi_free_ring(r);
+ mtx_lock(&sc->nhi_mtx);
+ SLIST_REMOVE_HEAD(&sc->ring_list, ring_link);
+ mtx_unlock(&sc->nhi_mtx);
+ free(r, M_NHI);
+ }
+
+ return;
+}
+
+static int
+nhi_configure_ring(struct nhi_softc *sc, struct nhi_ring_pair *ring)
+{
+ bus_addr_t busaddr;
+ uint32_t val;
+ int idx;
+
+ idx = ring->ring_num * 16;
+
+ /* Program the TX ring address and size */
+ busaddr = ring->tx_ring_busaddr;
+ nhi_write_reg(sc, NHI_TX_RING_ADDR_LO + idx, busaddr & 0xffffffff);
+ nhi_write_reg(sc, NHI_TX_RING_ADDR_HI + idx, busaddr >> 32);
+ nhi_write_reg(sc, NHI_TX_RING_SIZE + idx, ring->tx_ring_depth);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_TIMESTAMP + idx, 0x0);
+ tb_debug(sc, DBG_INIT, "TX Ring %d TX_RING_SIZE= 0x%x\n",
+ ring->ring_num, ring->tx_ring_depth);
+
+ /* Program the RX ring address and size */
+ busaddr = ring->rx_ring_busaddr;
+ val = (ring->rx_buffer_size << 16) | ring->rx_ring_depth;
+ nhi_write_reg(sc, NHI_RX_RING_ADDR_LO + idx, busaddr & 0xffffffff);
+ nhi_write_reg(sc, NHI_RX_RING_ADDR_HI + idx, busaddr >> 32);
+ nhi_write_reg(sc, NHI_RX_RING_SIZE + idx, val);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE1 + idx, 0xffffffff);
+ tb_debug(sc, DBG_INIT, "RX Ring %d RX_RING_SIZE= 0x%x\n",
+ ring->ring_num, val);
+
+ return (0);
+}
+
+static int
+nhi_activate_ring(struct nhi_ring_pair *ring)
+{
+ struct nhi_softc *sc = ring->sc;
+ int idx;
+
+ nhi_pci_enable_interrupt(ring);
+
+ idx = ring->ring_num * 32;
+ tb_debug(sc, DBG_INIT, "Activating ring %d at idx %d\n",
+ ring->ring_num, idx);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_BASE0 + idx,
+ TX_TABLE_RAW | TX_TABLE_VALID);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE0 + idx,
+ RX_TABLE_RAW | RX_TABLE_VALID);
+
+ return (0);
+}
+
+static int
+nhi_deactivate_ring(struct nhi_ring_pair *r)
+{
+ struct nhi_softc *sc = r->sc;
+ int idx;
+
+ idx = r->ring_num * 32;
+ tb_debug(sc, DBG_INIT, "Deactiving ring %d at idx %d\n",
+ r->ring_num, idx);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_BASE0 + idx, 0);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE0 + idx, 0);
+
+ idx = r->ring_num * 16;
+ tb_debug(sc, DBG_INIT, "Setting ring %d sizes to 0\n", r->ring_num);
+ nhi_write_reg(sc, NHI_TX_RING_SIZE + idx, 0);
+ nhi_write_reg(sc, NHI_RX_RING_SIZE + idx, 0);
+
+ return (0);
+}
+
+static int
+nhi_alloc_ring0(struct nhi_softc *sc)
+{
+ bus_addr_t frames_busaddr;
+ bus_dma_template_t t;
+ struct nhi_intr_tracker *trkr;
+ struct nhi_ring_pair *r;
+ struct nhi_cmd_frame *cmd;
+ char *frames;
+ int error, size, i;
+
+ if ((error = nhi_alloc_ring(sc, 0, NHI_RING0_TX_DEPTH,
+ NHI_RING0_RX_DEPTH, &r)) != 0) {
+ tb_printf(sc, "Error allocating control ring\n");
+ return (error);
+ }
+
+ r->rx_buffer_size = NHI_RING0_FRAME_SIZE;/* Control packets are small */
+
+ /* Allocate the RX and TX buffers that are used for Ring0 comms */
+ size = r->tx_ring_depth * NHI_RING0_FRAME_SIZE;
+ size += r->rx_ring_depth * NHI_RING0_FRAME_SIZE;
+
+ bus_dma_template_init(&t, sc->parent_dmat);
+ t.maxsize = t.maxsegsize = size;
+ t.nsegments = 1;
+ if (bus_dma_template_tag(&t, &sc->ring0_dmat)) {
+ tb_printf(sc, "Error allocating control ring buffer tag\n");
+ return (ENOMEM);
+ }
+
+ if (bus_dmamem_alloc(sc->ring0_dmat, (void **)&frames, BUS_DMA_NOWAIT,
+ &sc->ring0_map) != 0) {
+ tb_printf(sc, "Error allocating control ring memory\n");
+ return (ENOMEM);
+ }
+ bzero(frames, size);
+ bus_dmamap_load(sc->ring0_dmat, sc->ring0_map, frames, size,
+ nhi_memaddr_cb, &frames_busaddr, 0);
+ sc->ring0_frames_busaddr = frames_busaddr;
+ sc->ring0_frames = frames;
+
+ /* Allocate the driver command trackers */
+ sc->ring0_cmds = malloc(sizeof(struct nhi_cmd_frame) *
+ (r->tx_ring_depth + r->rx_ring_depth), M_NHI, M_NOWAIT | M_ZERO);
+ if (sc->ring0_cmds == NULL)
+ return (ENOMEM);
+
+ /* Initialize the RX frames so they can be used */
+ mtx_lock(&r->mtx);
+ for (i = 0; i < r->rx_ring_depth; i++) {
+ cmd = &sc->ring0_cmds[i];
+ cmd->data = (uint32_t *)(frames + NHI_RING0_FRAME_SIZE * i);
+ cmd->data_busaddr = frames_busaddr + NHI_RING0_FRAME_SIZE * i;
+ cmd->flags = CMD_MAPPED;
+ cmd->idx = i;
+ TAILQ_INSERT_TAIL(&r->rx_head, cmd, cm_link);
+ }
+
+ /* Inititalize the TX frames */
+ for ( ; i < r->tx_ring_depth + r->rx_ring_depth - 1; i++) {
+ cmd = &sc->ring0_cmds[i];
+ cmd->data = (uint32_t *)(frames + NHI_RING0_FRAME_SIZE * i);
+ cmd->data_busaddr = frames_busaddr + NHI_RING0_FRAME_SIZE * i;
+ cmd->flags = CMD_MAPPED;
+ cmd->idx = i;
+ nhi_free_tx_frame_locked(r, cmd);
+ }
+ mtx_unlock(&r->mtx);
+
+ /* Do a 1:1 mapping of rings to interrupt vectors. */
+ /* XXX Should be abstracted */
+ trkr = &sc->intr_trackers[0];
+ trkr->ring = r;
+ r->tracker = trkr;
+
+ /* XXX Should be an array */
+ sc->ring0 = r;
+ SLIST_INSERT_HEAD(&sc->ring_list, r, ring_link);
+
+ return (0);
+}
+
+static void
+nhi_free_ring0(struct nhi_softc *sc)
+{
+ if (sc->ring0_cmds != NULL) {
+ free(sc->ring0_cmds, M_NHI);
+ sc->ring0_cmds = NULL;
+ }
+
+ if (sc->ring0_frames_busaddr != 0) {
+ bus_dmamap_unload(sc->ring0_dmat, sc->ring0_map);
+ sc->ring0_frames_busaddr = 0;
+ }
+
+ if (sc->ring0_frames != NULL) {
+ bus_dmamem_free(sc->ring0_dmat, sc->ring0_frames,
+ sc->ring0_map);
+ sc->ring0_frames = NULL;
+ }
+
+ if (sc->ring0_dmat != NULL)
+ bus_dma_tag_destroy(sc->ring0_dmat);
+
+ return;
+}
+
+static void
+nhi_fill_rx_ring(struct nhi_softc *sc, struct nhi_ring_pair *rp)
+{
+ struct nhi_cmd_frame *cmd;
+ struct nhi_rx_buffer_desc *desc;
+ u_int ci;
+
+ /* Assume that we never grow or shrink the ring population */
+ rp->rx_ci = ci = 0;
+ rp->rx_pi = 0;
+
+ do {
+ cmd = TAILQ_FIRST(&rp->rx_head);
+ if (cmd == NULL)
+ break;
+ TAILQ_REMOVE(&rp->rx_head, cmd, cm_link);
+ desc = &rp->rx_ring[ci].rx;
+ if ((cmd->flags & CMD_MAPPED) == 0)
+ panic("Need rx buffer mapping code");
+
+ desc->addr_lo = cmd->data_busaddr & 0xffffffff;
+ desc->addr_hi = (cmd->data_busaddr >> 32) & 0xffffffff;
+ desc->offset = 0;
+ desc->flags = RX_BUFFER_DESC_RS | RX_BUFFER_DESC_IE;
+ rp->rx_ci = ci;
+ rp->rx_cmd_ring[ci] = cmd;
+ tb_debug(sc, DBG_RXQ | DBG_FULL,
+ "Updating ring%d ci= %d cmd= %p, busaddr= 0x%jx\n",
+ rp->ring_num, ci, cmd, cmd->data_busaddr);
+
+ ci = (rp->rx_ci + 1) & rp->rx_ring_mask;
+ } while (ci != rp->rx_pi);
+
+ /* Update the CI in one shot */
+ tb_debug(sc, DBG_RXQ, "Writing RX CI= %d\n", rp->rx_ci);
+ nhi_write_reg(sc, rp->rx_pici_reg, rp->rx_ci);
+
+ return;
+}
+
+static int
+nhi_init(struct nhi_softc *sc)
+{
+ tb_route_t root_route = {0x0, 0x0};
+ uint32_t val;
+ int error;
+
+ tb_debug(sc, DBG_INIT, "Initializing NHI\n");
+
+ /* Set interrupt Auto-ACK */
+ val = nhi_read_reg(sc, NHI_DMA_MISC);
+ tb_debug(sc, DBG_INIT|DBG_FULL, "Read NHI_DMA_MISC= 0x%08x\n", val);
+ val |= DMA_MISC_INT_AUTOCLEAR;
+ tb_debug(sc, DBG_INIT, "Setting interrupt auto-ACK, 0x%08x\n", val);
+ nhi_write_reg(sc, NHI_DMA_MISC, val);
+
+ if (NHI_IS_AR(sc) || NHI_IS_TR(sc) || NHI_IS_ICL(sc))
+ tb_printf(sc, "WARN: device uses an internal connection manager\n");
+
+ /*
+ * Populate the controller (local) UUID, necessary for cross-domain
+ * communications.
+ if (NHI_IS_ICL(sc))
+ nhi_pci_get_uuid(sc);
+ */
+
+ /*
+ * Attach the router to the root thunderbolt bridge now that the DMA
+ * channel is configured and ready.
+ * The root router always has a route of 0x0...0, so set it statically
+ * here.
+ */
+ if ((error = tb_router_attach_root(sc, root_route)) != 0)
+ tb_printf(sc, "tb_router_attach_root() error."
+ " The driver should be loaded at boot\n");
+
+ if (error == 0) {
+ sc->ich.ich_func = nhi_post_init;
+ sc->ich.ich_arg = sc;
+ error = config_intrhook_establish(&sc->ich);
+ if (error)
+ tb_printf(sc, "Failed to establish config hook\n");
+ }
+
+ return (error);
+}
+
+static void
+nhi_post_init(void *arg)
+{
+ struct nhi_softc *sc;
+ uint8_t *u;
+ int error;
+
+ sc = (struct nhi_softc *)arg;
+ tb_debug(sc, DBG_INIT | DBG_EXTRA, "nhi_post_init\n");
+
+ bzero(sc->lc_uuid, 16);
+ error = tb_config_get_lc_uuid(sc->root_rsc, sc->lc_uuid);
+ if (error == 0) {
+ u = sc->lc_uuid;
+ tb_printf(sc, "Root Router LC UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[15], u[14], u[13], u[12], u[11], u[10], u[9], u[8], u[7],
+ u[6], u[5], u[4], u[3], u[2], u[1], u[0]);
+ } else
+ tb_printf(sc, "Error finding LC registers: %d\n", error);
+
+ u = sc->uuid;
+ tb_printf(sc, "Root Router UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[15], u[14], u[13], u[12], u[11], u[10], u[9], u[8], u[7],
+ u[6], u[5], u[4], u[3], u[2], u[1], u[0]);
+
+ config_intrhook_disestablish(&sc->ich);
+}
+
+static int
+nhi_tx_enqueue(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_tx_buffer_desc *desc;
+ uint16_t pi;
+
+ sc = r->sc;
+
+ /* A length of 0 means 4096. Can't have longer lengths */
+ if (cmd->req_len > TX_BUFFER_DESC_LEN_MASK + 1) {
+ tb_debug(sc, DBG_TXQ, "Error: TX frame too big\n");
+ return (EINVAL);
+ }
+ cmd->req_len &= TX_BUFFER_DESC_LEN_MASK;
+
+ mtx_lock(&r->mtx);
+ desc = &r->tx_ring[r->tx_pi].tx;
+ pi = (r->tx_pi + 1) & r->tx_ring_mask;
+ if (pi == r->tx_ci) {
+ mtx_unlock(&r->mtx);
+ return (EBUSY);
+ }
+ r->tx_cmd_ring[r->tx_pi] = cmd;
+ r->tx_pi = pi;
+
+ desc->addr_lo = htole32(cmd->data_busaddr & 0xffffffff);
+ desc->addr_hi = htole32(cmd->data_busaddr >> 32);
+ desc->eof_len = htole16((cmd->pdf << TX_BUFFER_DESC_EOF_SHIFT) |
+ cmd->req_len);
+ desc->flags_sof = cmd->pdf | TX_BUFFER_DESC_IE | TX_BUFFER_DESC_RS;
+ desc->offset = 0;
+ desc->payload_time = 0;
+
+ tb_debug(sc, DBG_TXQ, "enqueue TXdescIdx= %d cmdidx= %d len= %d, "
+ "busaddr= 0x%jx\n", r->tx_pi, cmd->idx, cmd->req_len,
+ cmd->data_busaddr);
+
+ nhi_write_reg(sc, r->tx_pici_reg, pi << TX_RING_PI_SHIFT | r->tx_ci);
+ mtx_unlock(&r->mtx);
+ return (0);
+}
+
+/*
+ * No scheduling happens for now. Ring0 scheduling is done in the TB
+ * layer.
+ */
+int
+nhi_tx_schedule(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ int error;
+
+ error = nhi_tx_enqueue(r, cmd);
+ if (error == EBUSY)
+ nhi_write_reg(r->sc, r->tx_pici_reg, r->tx_pi << TX_RING_PI_SHIFT | r->tx_ci);
+ return (error);
+}
+
+int
+nhi_tx_synchronous(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ int error, count;
+
+ if ((error = nhi_tx_schedule(r, cmd)) != 0)
+ return (error);
+
+ if (cmd->flags & CMD_POLLED) {
+ error = 0;
+ count = cmd->timeout * 100;
+
+ /* Enter the loop at least once */
+ while ((count-- > 0) && (cmd->flags & CMD_REQ_COMPLETE) == 0) {
+ DELAY(10000);
+ rmb();
+ nhi_intr(r->tracker);
+ }
+ } else {
+ error = msleep(cmd, &r->mtx, PCATCH, "nhi_tx", cmd->timeout);
+ if ((error == 0) && (cmd->flags & CMD_REQ_COMPLETE) != 0)
+ error = EWOULDBLOCK;
+ }
+
+ if ((cmd->flags & CMD_REQ_COMPLETE) == 0)
+ error = ETIMEDOUT;
+
+ tb_debug(r->sc, DBG_TXQ|DBG_FULL, "tx_synchronous done waiting, "
+ "err= %d, TX_COMPLETE= %d\n", error,
+ !!(cmd->flags & CMD_REQ_COMPLETE));
+
+ if (error == ERESTART) {
+ tb_printf(r->sc, "TX command interrupted\n");
+ } else if ((error == EWOULDBLOCK) || (error == ETIMEDOUT)) {
+ tb_printf(r->sc, "TX command timed out\n");
+ } else if (error != 0) {
+ tb_printf(r->sc, "TX command failed error= %d\n", error);
+ }
+
+ return (error);
+}
+
+static int
+nhi_tx_complete(struct nhi_ring_pair *r, struct nhi_tx_buffer_desc *desc,
+ struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_pdf_dispatch *txpdf;
+ u_int sof;
+
+ sc = r->sc;
+ sof = desc->flags_sof & TX_BUFFER_DESC_SOF_MASK;
+ tb_debug(sc, DBG_TXQ, "Recovered TX pdf= %s cmdidx= %d flags= 0x%x\n",
+ tb_get_string(sof, nhi_frame_pdf), cmd->idx, desc->flags_sof);
+
+ if ((desc->flags_sof & TX_BUFFER_DESC_DONE) == 0)
+ tb_debug(sc, DBG_TXQ,
+ "warning, TX descriptor DONE flag not set\n");
+
+ /* XXX Atomics */
+ cmd->flags |= CMD_REQ_COMPLETE;
+
+ txpdf = &r->tracker->txpdf[sof];
+ if (txpdf->cb != NULL) {
+ tb_debug(sc, DBG_INTR|DBG_TXQ, "Calling PDF TX callback\n");
+ txpdf->cb(txpdf->context, (union nhi_ring_desc *)desc, cmd);
+ return (0);
+ }
+
+ tb_debug(sc, DBG_TXQ, "Unhandled TX complete %s\n",
+ tb_get_string(sof, nhi_frame_pdf));
+ nhi_free_tx_frame(r, cmd);
+
+ return (0);
+}
+
+static int
+nhi_rx_complete(struct nhi_ring_pair *r, struct nhi_rx_post_desc *desc,
+ struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_pdf_dispatch *rxpdf;
+ u_int eof, len;
+
+ sc = r->sc;
+ eof = desc->eof_len >> RX_BUFFER_DESC_EOF_SHIFT;
+ len = desc->eof_len & RX_BUFFER_DESC_LEN_MASK;
+ tb_debug(sc, DBG_INTR|DBG_RXQ,
+ "Recovered RX pdf= %s len= %d cmdidx= %d, busaddr= 0x%jx\n",
+ tb_get_string(eof, nhi_frame_pdf), len, cmd->idx,
+ cmd->data_busaddr);
+
+ rxpdf = &r->tracker->rxpdf[eof];
+ if (rxpdf->cb != NULL) {
+ tb_debug(sc, DBG_INTR|DBG_RXQ, "Calling PDF RX callback\n");
+ rxpdf->cb(rxpdf->context, (union nhi_ring_desc *)desc, cmd);
+ return (0);
+ }
+
+ tb_debug(sc, DBG_INTR, "Unhandled RX frame %s\n",
+ tb_get_string(eof, nhi_frame_pdf));
+
+ return (0);
+}
+
+int
+nhi_register_pdf(struct nhi_ring_pair *rp, struct nhi_dispatch *tx,
+ struct nhi_dispatch *rx)
+{
+ struct nhi_intr_tracker *trkr;
+ struct nhi_pdf_dispatch *slot;
+
+ KASSERT(rp != NULL, ("ring_pair is null\n"));
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "nhi_register_pdf called\n");
+
+ trkr = rp->tracker;
+ if (trkr == NULL) {
+ tb_debug(rp->sc, DBG_INTR, "Invalid tracker\n");
+ return (EINVAL);
+ }
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "Registering TX interrupts\n");
+ if (tx != NULL) {
+ while (tx->cb != NULL) {
+ if ((tx->pdf < 0) || (tx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->txpdf[tx->pdf];
+ if (slot->cb != NULL) {
+ tb_debug(rp->sc, DBG_INTR,
+ "Attempted to register busy callback\n");
+ return (EBUSY);
+ }
+ slot->cb = tx->cb;
+ slot->context = tx->context;
+ tb_debug(rp->sc, DBG_INTR,
+ "Registered TX callback for PDF %d\n", tx->pdf);
+ tx++;
+ }
+ }
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "Registering RX interrupts\n");
+ if (rx != NULL) {
+ while (rx->cb != NULL) {
+ if ((rx->pdf < 0) || (rx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->rxpdf[rx->pdf];
+ if (slot->cb != NULL) {
+ tb_debug(rp->sc, DBG_INTR,
+ "Attempted to register busy callback\n");
+ return (EBUSY);
+ }
+ slot->cb = rx->cb;
+ slot->context = rx->context;
+ tb_debug(rp->sc, DBG_INTR,
+ "Registered RX callback for PDF %d\n", rx->pdf);
+ rx++;
+ }
+ }
+
+ return (0);
+}
+
+int
+nhi_deregister_pdf(struct nhi_ring_pair *rp, struct nhi_dispatch *tx,
+ struct nhi_dispatch *rx)
+{
+ struct nhi_intr_tracker *trkr;
+ struct nhi_pdf_dispatch *slot;
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "nhi_register_pdf called\n");
+
+ trkr = rp->tracker;
+
+ if (tx != NULL) {
+ while (tx->cb != NULL) {
+ if ((tx->pdf < 0) || (tx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->txpdf[tx->pdf];
+ slot->cb = NULL;
+ slot->context = NULL;
+ tx++;
+ }
+ }
+
+ if (rx != NULL) {
+ while (rx->cb != NULL) {
+ if ((rx->pdf < 0) || (rx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->rxpdf[rx->pdf];
+ slot->cb = NULL;
+ slot->context = NULL;
+ rx++;
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * The CI and PI indexes are not read from the hardware. We track them in
+ * software, so we know where in the ring to start a scan on an interrupt.
+ * All we have to do is check for the appropriate Done bit in the next
+ * descriptor, and we know if we have reached the last descriptor that the
+ * hardware touched. This technique saves at least 2 MEMIO reads per
+ * interrupt.
+ */
+void
+nhi_intr(void *data)
+{
+ union nhi_ring_desc *rxd;
+ struct nhi_cmd_frame *cmd;
+ struct nhi_intr_tracker *trkr = data;
+ struct nhi_softc *sc;
+ struct nhi_ring_pair *r;
+ struct nhi_tx_buffer_desc *txd;
+ uint32_t val, old_ci;
+ u_int count;
+
+ sc = trkr->sc;
+
+ tb_debug(sc, DBG_INTR|DBG_FULL, "Interrupt @ vector %d\n",
+ trkr->vector);
+ if ((r = trkr->ring) == NULL)
+ return;
+
+ /*
+ * Process TX completions from the adapter. Only go through
+ * the ring once to prevent unbounded looping.
+ */
+ count = r->tx_ring_depth;
+ while (count-- > 0) {
+ txd = &r->tx_ring[r->tx_ci].tx;
+ if ((txd->flags_sof & TX_BUFFER_DESC_DONE) == 0)
+ break;
+ cmd = r->tx_cmd_ring[r->tx_ci];
+ tb_debug(sc, DBG_INTR|DBG_TXQ|DBG_FULL,
+ "Found tx cmdidx= %d cmd= %p\n", r->tx_ci, cmd);
+
+ /* Pass the completion up the stack */
+ nhi_tx_complete(r, txd, cmd);
+
+ /*
+ * Advance to the next item in the ring via the cached
+ * copy of the CI. Clear the flags so we can detect
+ * a new done condition the next time the ring wraps
+ * around. Anything higher up the stack that needs this
+ * field should have already copied it.
+ *
+ * XXX is a memory barrier needed?
+ */
+ txd->flags_sof = 0;
+ r->tx_ci = (r->tx_ci + 1) & r->tx_ring_mask;
+ }
+
+ /* Process RX packets from the adapter */
+ count = r->rx_ring_depth;
+ old_ci = r->rx_ci;
+
+ while (count-- > 0) {
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "Checking RX descriptor at %d\n", r->rx_pi);
+
+ /* Look up RX descriptor and cmd */
+ rxd = &r->rx_ring[r->rx_pi];
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "rx desc len= 0x%04x flags= 0x%04x\n", rxd->rxpost.eof_len,
+ rxd->rxpost.flags_sof);
+ if ((rxd->rxpost.flags_sof & RX_BUFFER_DESC_DONE) == 0)
+ break;
+ cmd = r->rx_cmd_ring[r->rx_pi];
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "Found rx cmdidx= %d cmd= %p\n", r->rx_pi, cmd);
+
+ /*
+ * Pass the RX frame up the stack. RX frames are re-used
+ * in-place, so their contents must be copied before this
+ * function returns.
+ *
+ * XXX Rings other than Ring0 might want to have a different
+ * re-use and re-populate policy
+ */
+ nhi_rx_complete(r, &rxd->rxpost, cmd);
+
+ /*
+ * Advance the CI and move forward to the next item in the
+ * ring via our cached copy of the PI. Clear out the
+ * length field so we can detect a new RX frame when the
+ * ring wraps around. Reset the flags of the descriptor.
+ */
+ rxd->rxpost.eof_len = 0;
+ rxd->rx.flags = RX_BUFFER_DESC_RS | RX_BUFFER_DESC_IE;
+ r->rx_ci = (r->rx_ci + 1) & r->rx_ring_mask;
+ r->rx_pi = (r->rx_pi + 1) & r->rx_ring_mask;
+ }
+
+ /*
+ * Tell the firmware about the new RX CI
+ *
+ * XXX There's a chance this will overwrite an update to the PI.
+ * Is that OK? We keep our own copy of the PI and never read it from
+ * hardware. However, will overwriting it result in a missed
+ * interrupt?
+ */
+ if (r->rx_ci != old_ci) {
+ val = r->rx_pi << RX_RING_PI_SHIFT | r->rx_ci;
+ tb_debug(sc, DBG_INTR | DBG_RXQ,
+ "Writing new RX PICI= 0x%08x\n", val);
+ nhi_write_reg(sc, r->rx_pici_reg, val);
+ }
+}
+
+static int
+nhi_setup_sysctl(struct nhi_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = NULL;
+ struct sysctl_oid *tree = NULL;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ if (ctx != NULL)
+ tree = device_get_sysctl_tree(sc->dev);
+
+ /*
+ * Not being able to create sysctls is going to hamper other
+ * parts of the driver.
+ */
+ if (tree == NULL) {
+ tb_printf(sc, "Error: cannot create sysctl nodes\n");
+ return (EINVAL);
+ }
+ sc->sysctl_tree = tree;
+ sc->sysctl_ctx = ctx;
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
+ OID_AUTO, "debug_level", CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ &sc->debug, 0, tb_debug_sysctl, "A", "Thunderbolt debug level");
+ SYSCTL_ADD_U16(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "max_rings", CTLFLAG_RD, &sc->max_ring_count, 0,
+ "Max number of rings available");
+ SYSCTL_ADD_U8(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "force_hcm", CTLFLAG_RD, &sc->force_hcm, 0,
+ "Force on/off the function of the host connection manager");
+
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/nhi_pci.c b/sys/dev/thunderbolt/nhi_pci.c
new file mode 100644
index 000000000000..7dacff523cef
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_pci.c
@@ -0,0 +1,529 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe interface for Thunderbolt Native Host Interface */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include "tb_if.h"
+
+static int nhi_pci_probe(device_t);
+static int nhi_pci_attach(device_t);
+static int nhi_pci_detach(device_t);
+static int nhi_pci_suspend(device_t);
+static int nhi_pci_resume(device_t);
+static void nhi_pci_free(struct nhi_softc *);
+static int nhi_pci_allocate_interrupts(struct nhi_softc *);
+static void nhi_pci_free_interrupts(struct nhi_softc *);
+static int nhi_pci_icl_poweron(struct nhi_softc *);
+
+static device_method_t nhi_methods[] = {
+ DEVMETHOD(device_probe, nhi_pci_probe),
+ DEVMETHOD(device_attach, nhi_pci_attach),
+ DEVMETHOD(device_detach, nhi_pci_detach),
+ DEVMETHOD(device_suspend, nhi_pci_suspend),
+ DEVMETHOD(device_resume, nhi_pci_resume),
+
+ DEVMETHOD(tb_find_ufp, tb_generic_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_generic_get_debug),
+
+ DEVMETHOD_END
+};
+
+static driver_t nhi_pci_driver = {
+ "nhi",
+ nhi_methods,
+ sizeof(struct nhi_softc)
+};
+
+struct nhi_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ uint32_t flags;
+ const char *desc;
+} nhi_identifiers[] = {
+ { VENDOR_INTEL, DEVICE_AR_2C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 2C)" },
+ { VENDOR_INTEL, DEVICE_AR_DP_B_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 4C Rev B)" },
+ { VENDOR_INTEL, DEVICE_AR_DP_C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 4C Rev C)" },
+ { VENDOR_INTEL, DEVICE_AR_LP_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge LP 2C)" },
+ { VENDOR_INTEL, DEVICE_ICL_NHI_0, 0xffff, 0xffff, NHI_TYPE_ICL,
+ "Thunderbolt 3 NHI Port 0 (IceLake)" },
+ { VENDOR_INTEL, DEVICE_ICL_NHI_1, 0xffff, 0xffff, NHI_TYPE_ICL,
+ "Thunderbolt 3 NHI Port 1 (IceLake)" },
+ { VENDOR_AMD, DEVICE_PINK_SARDINE_0, 0xffff, 0xffff, NHI_TYPE_USB4,
+ "USB4 NHI Port 0 (Pink Sardine)" },
+ { VENDOR_AMD, DEVICE_PINK_SARDINE_1, 0xffff, 0xffff, NHI_TYPE_USB4,
+ "USB4 NHI Port 1 (Pink Sardine)" },
+ { 0, 0, 0, 0, 0, NULL }
+};
+
+DRIVER_MODULE_ORDERED(nhi, pci, nhi_pci_driver, NULL, NULL,
+ SI_ORDER_ANY);
+MODULE_PNP_INFO("U16:vendor;U16:device;V16:subvendor;V16:subdevice;U32:#;D:#",
+ pci, nhi, nhi_identifiers, nitems(nhi_identifiers) - 1);
+
+static struct nhi_ident *
+nhi_find_ident(device_t dev)
+{
+ struct nhi_ident *n;
+
+ for (n = nhi_identifiers; n->vendor != 0; n++) {
+ if (n->vendor != pci_get_vendor(dev))
+ continue;
+ if (n->device != pci_get_device(dev))
+ continue;
+ if ((n->subvendor != 0xffff) &&
+ (n->subvendor != pci_get_subvendor(dev)))
+ continue;
+ if ((n->subdevice != 0xffff) &&
+ (n->subdevice != pci_get_subdevice(dev)))
+ continue;
+ return (n);
+ }
+
+ return (NULL);
+}
+
+static int
+nhi_pci_probe(device_t dev)
+{
+ struct nhi_ident *n;
+
+ if (resource_disabled("tb", 0))
+ return (ENXIO);
+ if ((n = nhi_find_ident(dev)) != NULL) {
+ device_set_desc(dev, n->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ return (ENXIO);
+}
+
+static int
+nhi_pci_attach(device_t dev)
+{
+ devclass_t dc;
+ bus_dma_template_t t;
+ struct nhi_softc *sc;
+ struct nhi_ident *n;
+ int error = 0;
+
+ sc = device_get_softc(dev);
+ bzero(sc, sizeof(*sc));
+ sc->dev = dev;
+ n = nhi_find_ident(dev);
+ sc->hwflags = n->flags;
+ nhi_get_tunables(sc);
+
+ tb_debug(sc, DBG_INIT|DBG_FULL, "busmaster status was %s\n",
+ (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)
+ ? "enabled" : "disabled");
+ pci_enable_busmaster(dev);
+
+ sc->ufp = NULL;
+ if ((TB_FIND_UFP(dev, &sc->ufp) != 0) || (sc->ufp == NULL)) {
+ dc = devclass_find("tbolt");
+ if (dc != NULL)
+ sc->ufp = devclass_get_device(dc, device_get_unit(dev));
+ }
+ if (sc->ufp == NULL)
+ tb_printf(sc, "Cannot find Upstream Facing Port\n");
+ else
+ tb_printf(sc, "Upstream Facing Port is %s\n",
+ device_get_nameunit(sc->ufp));
+
+ if (NHI_IS_ICL(sc)) {
+ if ((error = nhi_pci_icl_poweron(sc)) != 0)
+ return (error);
+ }
+
+
+ /* Allocate BAR0 DMA registers */
+ sc->regs_rid = PCIR_BAR(0);
+ if ((sc->regs_resource = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE)) == NULL) {
+ tb_printf(sc, "Cannot allocate PCI registers\n");
+ return (ENXIO);
+ }
+ sc->regs_btag = rman_get_bustag(sc->regs_resource);
+ sc->regs_bhandle = rman_get_bushandle(sc->regs_resource);
+
+ /* Allocate parent DMA tag */
+ bus_dma_template_init(&t, bus_get_dma_tag(dev));
+ if (bus_dma_template_tag(&t, &sc->parent_dmat) != 0) {
+ tb_printf(sc, "Cannot allocate parent DMA tag\n");
+ nhi_pci_free(sc);
+ return (ENOMEM);
+ }
+
+ error = nhi_pci_allocate_interrupts(sc);
+ if (error == 0)
+ error = nhi_attach(sc);
+ if (error != 0)
+ nhi_pci_detach(sc->dev);
+ return (error);
+}
+
+static int
+nhi_pci_detach(device_t dev)
+{
+ struct nhi_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ nhi_detach(sc);
+ nhi_pci_free(sc);
+
+ return (0);
+}
+
+static int
+nhi_pci_suspend(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+nhi_pci_resume(device_t dev)
+{
+
+ return (0);
+}
+
+static void
+nhi_pci_free(struct nhi_softc *sc)
+{
+
+ nhi_pci_free_interrupts(sc);
+
+ if (sc->parent_dmat != NULL) {
+ bus_dma_tag_destroy(sc->parent_dmat);
+ sc->parent_dmat = NULL;
+ }
+
+ if (sc->regs_resource != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->regs_rid, sc->regs_resource);
+ sc->regs_resource = NULL;
+ }
+
+ return;
+}
+
+static int
+nhi_pci_allocate_interrupts(struct nhi_softc *sc)
+{
+ int msgs, error = 0;
+
+ /* Map the Pending Bit Array and Vector Table BARs for MSI-X */
+ sc->irq_pba_rid = pci_msix_pba_bar(sc->dev);
+ sc->irq_table_rid = pci_msix_table_bar(sc->dev);
+
+ if (sc->irq_pba_rid != -1)
+ sc->irq_pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->irq_pba_rid, RF_ACTIVE);
+ if (sc->irq_table_rid != -1)
+ sc->irq_table = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->irq_table_rid, RF_ACTIVE);
+
+ msgs = pci_msix_count(sc->dev);
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "Counted %d MSI-X messages\n", msgs);
+ msgs = min(msgs, NHI_MSIX_MAX);
+ msgs = max(msgs, 1);
+ if (msgs != 0) {
+ tb_debug(sc, DBG_INIT|DBG_INTR, "Attempting to allocate %d "
+ "MSI-X interrupts\n", msgs);
+ error = pci_alloc_msix(sc->dev, &msgs);
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "pci_alloc_msix return msgs= %d, error= %d\n", msgs, error);
+ }
+
+ if ((error != 0) || (msgs <= 0)) {
+ tb_printf(sc, "Failed to allocate any interrupts\n");
+ msgs = 0;
+ }
+
+ sc->msix_count = msgs;
+ return (error);
+}
+
+static void
+nhi_pci_free_interrupts(struct nhi_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->msix_count; i++) {
+ bus_teardown_intr(sc->dev, sc->irqs[i], sc->intrhand[i]);
+ bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid[i],
+ sc->irqs[i]);
+ }
+
+ pci_release_msi(sc->dev);
+
+ if (sc->irq_table != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->irq_table_rid, sc->irq_table);
+ sc->irq_table = NULL;
+ }
+
+ if (sc->irq_pba != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->irq_pba_rid, sc->irq_pba);
+ sc->irq_pba = NULL;
+ }
+
+ if (sc->intr_trackers != NULL)
+ free(sc->intr_trackers, M_NHI);
+ return;
+}
+
+int
+nhi_pci_configure_interrupts(struct nhi_softc *sc)
+{
+ struct nhi_intr_tracker *trkr;
+ int rid, i, error;
+
+ nhi_pci_disable_interrupts(sc);
+
+ sc->intr_trackers = malloc(sizeof(struct nhi_intr_tracker) *
+ sc->msix_count, M_NHI, M_ZERO | M_NOWAIT);
+ if (sc->intr_trackers == NULL) {
+ tb_debug(sc, DBG_INIT, "Cannot allocate intr trackers\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < sc->msix_count; i++) {
+ rid = i + 1;
+ trkr = &sc->intr_trackers[i];
+ trkr->sc = sc;
+ trkr->ring = NULL;
+ trkr->vector = i;
+
+ sc->irq_rid[i] = rid;
+ sc->irqs[i] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
+ &sc->irq_rid[i], RF_ACTIVE);
+ if (sc->irqs[i] == NULL) {
+ tb_debug(sc, DBG_INIT,
+ "Cannot allocate interrupt RID %d\n",
+ sc->irq_rid[i]);
+ break;
+ }
+ error = bus_setup_intr(sc->dev, sc->irqs[i], INTR_TYPE_BIO |
+ INTR_MPSAFE, NULL, nhi_intr, trkr, &sc->intrhand[i]);
+ if (error) {
+ tb_debug(sc, DBG_INIT,
+ "cannot setup interrupt RID %d\n", sc->irq_rid[i]);
+ break;
+ }
+ }
+
+ tb_debug(sc, DBG_INIT, "Set up %d interrupts\n", sc->msix_count);
+
+ /* Set the interrupt throttle rate to 128us */
+ for (i = 0; i < 16; i ++)
+ nhi_write_reg(sc, NHI_ITR0 + i * 4, 0x1f4);
+
+ return (error);
+}
+
+#define NHI_SET_INTERRUPT(offset, mask, val) \
+do { \
+ reg = offset / 32; \
+ offset %= 32; \
+ ivr[reg] &= ~(mask << offset); \
+ ivr[reg] |= (val << offset); \
+} while (0)
+
+void
+nhi_pci_enable_interrupt(struct nhi_ring_pair *r)
+{
+ struct nhi_softc *sc = r->sc;
+ uint32_t ivr[5];
+ u_int offset, reg;
+
+ tb_debug(sc, DBG_INIT|DBG_INTR, "Enabling interrupts for ring %d\n",
+ r->ring_num);
+ /*
+ * Compute the routing between event type and MSI-X vector.
+ * 4 bits per descriptor.
+ */
+ ivr[0] = nhi_read_reg(sc, NHI_IVR0);
+ ivr[1] = nhi_read_reg(sc, NHI_IVR1);
+ ivr[2] = nhi_read_reg(sc, NHI_IVR2);
+ ivr[3] = nhi_read_reg(sc, NHI_IVR3);
+ ivr[4] = nhi_read_reg(sc, NHI_IVR4);
+
+ /* Program TX */
+ offset = (r->ring_num + IVR_TX_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
+
+ /* Now program RX */
+ offset = (r->ring_num + IVR_RX_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
+
+ /* Last, program Nearly Empty. This one always going to vector 15 */
+ offset = (r->ring_num + IVR_NE_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, 0x0f);
+
+ nhi_write_reg(sc, NHI_IVR0, ivr[0]);
+ nhi_write_reg(sc, NHI_IVR1, ivr[1]);
+ nhi_write_reg(sc, NHI_IVR2, ivr[2]);
+ nhi_write_reg(sc, NHI_IVR3, ivr[3]);
+ nhi_write_reg(sc, NHI_IVR4, ivr[4]);
+
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "Wrote IVR 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ivr[0], ivr[1], ivr[2], ivr[3], ivr[4]);
+
+ /* Now do the Interrupt Mask Register, 1 bit per descriptor */
+ ivr[0] = nhi_read_reg(sc, NHI_IMR0);
+ ivr[1] = nhi_read_reg(sc, NHI_IMR1);
+
+ /* Tx */
+ offset = r->ring_num + IMR_TX_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ /* Rx */
+ offset = r->ring_num + IMR_RX_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ /* NE */
+ offset = r->ring_num + IMR_NE_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ nhi_write_reg(sc, NHI_IMR0, ivr[0]);
+ nhi_write_reg(sc, NHI_IMR1, ivr[1]);
+ tb_debug(sc, DBG_INIT|DBG_FULL,
+ "Wrote IMR 0x%08x 0x%08x\n", ivr[0], ivr[1]);
+}
+
+void
+nhi_pci_disable_interrupts(struct nhi_softc *sc)
+{
+
+ tb_debug(sc, DBG_INIT, "Disabling interrupts\n");
+ nhi_write_reg(sc, NHI_IMR0, 0);
+ nhi_write_reg(sc, NHI_IMR1, 0);
+ nhi_write_reg(sc, NHI_IVR0, 0);
+ nhi_write_reg(sc, NHI_IVR1, 0);
+ nhi_write_reg(sc, NHI_IVR2, 0);
+ nhi_write_reg(sc, NHI_IVR3, 0);
+ nhi_write_reg(sc, NHI_IVR4, 0);
+
+ /* Dummy reads to clear pending bits */
+ nhi_read_reg(sc, NHI_ISR0);
+ nhi_read_reg(sc, NHI_ISR1);
+}
+
+/*
+ * Icelake controllers need to be notified of power-on
+ */
+static int
+nhi_pci_icl_poweron(struct nhi_softc *sc)
+{
+ device_t dev;
+ uint32_t val;
+ int i, error = 0;
+
+ dev = sc->dev;
+ val = pci_read_config(dev, ICL_VSCAP_9, 4);
+ tb_debug(sc, DBG_INIT, "icl_poweron val= 0x%x\n", val);
+ if (val & ICL_VSCAP9_FWREADY)
+ return (0);
+
+ val = pci_read_config(dev, ICL_VSCAP_22, 4);
+ val |= ICL_VSCAP22_FORCEPWR;
+ tb_debug(sc, DBG_INIT|DBG_FULL, "icl_poweron writing 0x%x\n", val);
+ pci_write_config(dev, ICL_VSCAP_22, val, 4);
+
+ error = ETIMEDOUT;
+ for (i = 0; i < 15; i++) {
+ DELAY(1000000);
+ val = pci_read_config(dev, ICL_VSCAP_9, 4);
+ if (val & ICL_VSCAP9_FWREADY) {
+ error = 0;
+ break;
+ }
+ }
+
+ return (error);
+}
+
+/*
+ * Icelake and Alderlake controllers store their UUID in PCI config space
+ */
+int
+nhi_pci_get_uuid(struct nhi_softc *sc)
+{
+ device_t dev;
+ uint32_t val[4];
+
+ dev = sc->dev;
+ val[0] = pci_read_config(dev, ICL_VSCAP_10, 4);
+ val[1] = pci_read_config(dev, ICL_VSCAP_11, 4);
+ val[2] = 0xffffffff;
+ val[3] = 0xffffffff;
+
+ bcopy(val, &sc->uuid, 16);
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/nhi_reg.h b/sys/dev/thunderbolt/nhi_reg.h
new file mode 100644
index 000000000000..6e71f4c9646b
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_reg.h
@@ -0,0 +1,332 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 register definitions
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _NHI_REG_H
+#define _NHI_REG_H
+
+/* Some common definitions */
+#define TBT_SEC_NONE 0x00
+#define TBT_SEC_USER 0x01
+#define TBT_SEC_SECURE 0x02
+#define TBT_SEC_DP 0x03
+
+#define GENMASK(h, l) (((~0U) >> (31 - (h))) ^ ((~0U) >> (31 - (l)) >> 1))
+
+/* PCI Vendor and Device ID's */
+#define VENDOR_INTEL 0x8086
+#define DEVICE_AR_2C_NHI 0x1575
+#define DEVICE_AR_DP_B_NHI 0x1577
+#define DEVICE_AR_DP_C_NHI 0x15d2
+#define DEVICE_AR_LP_NHI 0x15bf
+#define DEVICE_ICL_NHI_0 0x8a17
+#define DEVICE_ICL_NHI_1 0x8a0d
+
+#define VENDOR_AMD 0x1022
+#define DEVICE_PINK_SARDINE_0 0x1668
+#define DEVICE_PINK_SARDINE_1 0x1669
+
+/* * * MMIO Registers
+ * * Ring buffer registers
+ *
+ * 32 transmit and receive rings are available, with Ring 0 being the most
+ * important one. The ring descriptors are 16 bytes each, and each set of
+ * TX and RX descriptors are packed together. There are only definitions
+ * for the Ring 0 addresses, others can be directly computed.
+ */
+#define NHI_TX_RING_ADDR_LO 0x00000
+#define NHI_TX_RING_ADDR_HI 0x00004
+#define NHI_TX_RING_PICI 0x00008
+#define TX_RING_CI_MASK GENMASK(15, 0)
+#define TX_RING_PI_SHIFT 16
+#define NHI_TX_RING_SIZE 0x0000c
+
+#define NHI_RX_RING_ADDR_LO 0x08000
+#define NHI_RX_RING_ADDR_HI 0x08004
+#define NHI_RX_RING_PICI 0x08008
+#define RX_RING_CI_MASK GENMASK(15, 0)
+#define RX_RING_PI_SHIFT 16
+#define NHI_RX_RING_SIZE 0x0800c
+#define RX_RING_BUF_SIZE_SHIFT 16
+
+/*
+ * One 32-bit status register encodes one status bit per ring indicates that
+ * the watermark from the control descriptor has been reached.
+ */
+#define NHI_RX_RING_STATUS 0x19400
+
+/*
+ * TX and RX Tables. These are 32 byte control fields for each ring.
+ * Only 8 bytes are controllable by the host software, the rest are a
+ * shadow copy by the controller of the current packet that's being
+ * processed.
+ */
+#define NHI_TX_RING_TABLE_BASE0 0x19800
+#define TX_TABLE_INTERVAL_MASK GENMASK(23,0) /* Isoch interval 256ns */
+#define TX_TABLE_ITE (1 << 27) /* Isoch tx enable */
+#define TX_TABLE_E2E (1 << 28) /* End-to-end flow control */
+#define TX_TABLE_NS (1 << 29) /* PCIe No Snoop */
+#define TX_TABLE_RAW (1 << 30) /* Raw (1)/frame(0) mode */
+#define TX_TABLE_VALID (1 << 31) /* Table entry is valid */
+#define NHI_TX_RING_TABLE_TIMESTAMP 0x19804
+
+#define NHI_RX_RING_TABLE_BASE0 0x29800
+#define RX_TABLE_TX_E2E_HOPID_SHIFT (1 << 12)
+#define RX_TABLE_E2E (1 << 28) /* End-to-end flow control */
+#define RX_TABLE_NS (1 << 29) /* PCIe No Snoop */
+#define RX_TABLE_RAW (1 << 30) /* Raw (1)/frame(0) mode */
+#define RX_TABLE_VALID (1 << 31) /* Table entry is valid */
+#define NHI_RX_RING_TABLE_BASE1 0x29804
+#define RX_TABLE_EOF_MASK (1 << 0)
+#define RX_TABLE_SOF_MASK (1 << 16)
+
+/* * Interrupt Control/Status Registers
+ * Interrupt Status Register (ISR)
+ * Interrupt status for RX, TX, and Nearly Empty events, one bit per
+ * MSI-X vector. Clear on read.
+ * Only 12 bits per operation, instead of 16? I guess it relates to the
+ * number paths, advertised in the HOST_CAPS register, which is wired to
+ * 0x0c for Alpine Ridge.
+ */
+#define NHI_ISR0 0x37800
+#define ISR0_TX_DESC_SHIFT 0
+#define ISR0_RX_DESC_SHIFT 12
+#define ISR0_RX_EMPTY_SHIFT 24
+#define NHI_ISR1 0x37804
+#define ISR1_RX_EMPTY_SHIFT 0
+
+/* * Interrupt Status Clear, corresponds to ISR0/ISR1. Write Only */
+#define NHI_ISC0 0x37808
+#define NHI_ISC1 0x3780c
+
+/* * Interrupt Status Set, corresponds to ISR0/ISR1. Write Only */
+#define NHI_ISS0 0x37810
+#define NHI_ISS1 0x37814
+
+/* * Interrupt Mask, corresponds to ISR0/ISR1. Read-Write */
+#define NHI_IMR0 0x38200
+#define NHI_IMR1 0x38204
+#define IMR_TX_OFFSET 0
+#define IMR_RX_OFFSET 12
+#define IMR_NE_OFFSET 24
+
+/* * Interrupt Mask Clear, corresponds to ISR0/ISR1. Write-only */
+#define NHI_IMC0 0x38208
+#define NHI_IMC1 0x3820c
+
+/* * Interrupt Mask Set, corresponds to ISR0/ISR1. Write-only */
+#define NHI_IMS0 0x38210
+#define NHI_IMS1 0x38214
+
+/*
+ * Interrupt Throttle Rate. One 32 bit register per interrupt,
+ * 16 registers for the 16 MSI-X interrupts. Interval is in 256ns
+ * increments.
+ */
+#define NHI_ITR0 0x38c00
+#define ITR_INTERVAL_SHIFT 0
+#define ITR_COUNTER_SHIFT 16
+
+/*
+ * Interrupt Vector Allocation.
+ * There are 12 4-bit descriptors for TX, 12 4-bit descriptors for RX,
+ * and 12 4-bit descriptors for Nearly Empty. Each descriptor holds
+ * the numerical value of the MSI-X vector that will receive the
+ * corresponding interrupt.
+ * Bits 0-31 of IVR0 and 0-15 of IVR1 are for TX
+ * Bits 16-31 of IVR1 and 0-31 of IVR2 are for RX
+ * Bits 0-31 of IVR3 and 0-15 of IVR4 are for Nearly Empty
+ */
+#define NHI_IVR0 0x38c40
+#define NHI_IVR1 0x38c44
+#define NHI_IVR2 0x38c48
+#define NHI_IVR3 0x38c4c
+#define NHI_IVR4 0x38c50
+#define IVR_TX_OFFSET 0
+#define IVR_RX_OFFSET 12
+#define IVR_NE_OFFSET 24
+
+/* Native Host Interface Control registers */
+#define NHI_HOST_CAPS 0x39640
+#define GET_HOST_CAPS_PATHS(val) ((val) & 0x3f)
+
+/*
+ * This definition comes from the Linux driver. In the USB4 spec, this
+ * register is named Host Interface Control, and the Interrupt Autoclear bit
+ * is at bit17, not bit2. The Linux driver doesn't seem to acknowledge this.
+ */
+#define NHI_DMA_MISC 0x39864
+#define DMA_MISC_INT_AUTOCLEAR (1 << 2)
+
+/* Thunderbolt firmware mailbox registers */
+#define TBT_INMAILDATA 0x39900
+
+#define TBT_INMAILCMD 0x39904
+#define INMAILCMD_CMD_MASK 0xff
+#define INMAILCMD_SAVE_CONNECTED 0x05
+#define INMAILCMD_DISCONNECT_PCIE 0x06
+#define INMAILCMD_DRIVER_UNLOAD_DISCONNECT 0x07
+#define INMAILCMD_DISCONNECT_PORTA 0x10
+#define INMAILCMD_DISCONNECT_PORTB 0x11
+#define INMAILCMD_SETMODE_CERT_TB_1ST_DEPTH 0x20
+#define INMAILCMD_SETMODE_ANY_TB_1ST_DEPTH 0x21
+#define INMAILCMD_SETMODE_CERT_TB_ANY_DEPTH 0x22
+#define INMAILCMD_SETMODE_ANY_TB_ANY_DEPTH 0x23
+#define INMAILCMD_CIO_RESET 0xf0
+#define INMAILCMD_ERROR (1 << 30)
+#define INMAILCMD_OPREQ (1 << 31)
+
+#define TBT_OUTMAILCMD 0x3990c
+#define OUTMAILCMD_STATUS_BUSY (1 << 12)
+#define OUTMAILCMD_OPMODE_MASK 0xf00
+#define OUTMAILCMD_OPMODE_SAFE 0x000
+#define OUTMAILCMD_OPMODE_AUTH 0x100
+#define OUTMAILCMD_OPMODE_ENDPOINT 0x200
+#define OUTMAILCMD_OPMODE_CM_FULL 0x300
+
+#define TBT_FW_STATUS 0x39944
+#define FWSTATUS_ENABLE (1 << 0)
+#define FWSTATUS_INVERT (1 << 1)
+#define FWSTATUS_START (1 << 2)
+#define FWSTATUS_CIO_RESET (1 << 30)
+#define FWSTATUS_CM_READY (1 << 31)
+
+/*
+ * Link Controller (LC) registers. These are in the Vendor Specific
+ * Extended Capability registers in PCICFG.
+ */
+#define AR_LC_MBOX_OUT 0x4c
+#define ICL_LC_MBOX_OUT 0xf0
+#define LC_MBOXOUT_VALID (1 << 0)
+#define LC_MBOXOUT_CMD_SHIFT 1
+#define LC_MBOXOUT_CMD_MASK (0x7f << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_GO2SX (0x02 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_GO2SX_NOWAKE (0x03 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_SXEXIT_TBT (0x04 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_SXEXIT_NOTBT (0x05 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_OS_UP (0x06 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_DATA_SHIFT 8
+#define SET_LC_MBOXOUT_DATA(val) ((val) << LC_MBOXOUT_DATA_SHIFT)
+
+#define AR_LC_MBOX_IN 0x48
+#define ICL_LC_MBOX_IN 0xec
+#define LC_MBOXIN_DONE (1 << 0)
+#define LC_MBOXIN_CMD_SHIFT 1
+#define LC_MBOXIN_CMD_MASK (0x7f << LC_MBOXIN_CMD_SHIFT)
+#define LC_MBOXIN_DATA_SHIFT 8
+#define GET_LC_MBOXIN_DATA(val) ((val) >> LC_MBOXIN_DATA_SHIFT)
+
+/* Other Vendor Specific registers */
+#define AR_VSCAP_1C 0x1c
+#define AR_VSCAP_B0 0xb0
+
+#define ICL_VSCAP_9 0xc8
+#define ICL_VSCAP9_FWREADY (1 << 31)
+#define ICL_VSCAP_10 0xcc
+#define ICL_VSCAP_11 0xd0
+#define ICL_VSCAP_22 0xfc
+#define ICL_VSCAP22_FORCEPWR (1 << 1)
+
+/* * Data structures
+ * Transmit buffer descriptor, 12.3.1. Must be aligned on a 4byte boundary.
+ */
+struct nhi_tx_buffer_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t eof_len;
+#define TX_BUFFER_DESC_LEN_MASK 0xfff
+#define TX_BUFFER_DESC_EOF_SHIFT 12
+ uint8_t flags_sof;
+#define TX_BUFFER_DESC_SOF_MASK 0xf
+#define TX_BUFFER_DESC_IDE (1 << 4) /* Isoch DMA enable */
+#define TX_BUFFER_DESC_DONE (1 << 5) /* Descriptor Done */
+#define TX_BUFFER_DESC_RS (1 << 6) /* Request Status/Done */
+#define TX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t payload_time;
+} __packed;
+
+/*
+ * Receive buffer descriptor, 12.4.1. 4 byte aligned. This goes into
+ * the descriptor ring, but changes into the _post form when the
+ * controller uses it.
+ */
+struct nhi_rx_buffer_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t reserved0;
+ uint8_t flags;
+#define RX_BUFFER_DESC_RS (1 << 6) /* Request Status/Done */
+#define RX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t reserved1;
+} __packed;
+
+/*
+ * Receive buffer descriptor, after the controller fills it in
+ */
+struct nhi_rx_post_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t eof_len;
+#define RX_BUFFER_DESC_LEN_MASK 0xfff
+#define RX_BUFFER_DESC_EOF_SHIFT 12
+ uint8_t flags_sof;
+#define RX_BUFFER_DESC_SOF_MASK 0xf
+#define RX_BUFFER_DESC_CRC_ERROR (1 << 4) /* CRC error (frame mode) */
+#define RX_BUFFER_DESC_DONE (1 << 5) /* Descriptor Done */
+#define RX_BUFFER_DESC_OVERRUN (1 << 6) /* Buffer overrun */
+#define RX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t payload_time;
+} __packed;
+
+union nhi_ring_desc {
+ struct nhi_tx_buffer_desc tx;
+ struct nhi_rx_buffer_desc rx;
+ struct nhi_rx_post_desc rxpost;
+ uint32_t dword[4];
+};
+
+/* Protocol Defined Field (PDF) */
+#define PDF_READ 0x01
+#define PDF_WRITE 0x02
+#define PDF_NOTIFY 0x03
+#define PDF_NOTIFY_ACK 0x04
+#define PDF_HOTPLUG 0x05
+#define PDF_XDOMAIN_REQ 0x06
+#define PDF_XDOMAIN_RESP 0x07
+/* Thunderbolt-only */
+#define PDF_CM_EVENT 0x0a
+#define PDF_CM_REQ 0x0b
+#define PDF_CM_RESP 0x0c
+
+#endif /* _NHI_REG_H */
diff --git a/sys/dev/thunderbolt/nhi_var.h b/sys/dev/thunderbolt/nhi_var.h
new file mode 100644
index 000000000000..2b9e878af47d
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_var.h
@@ -0,0 +1,277 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 / Native Host Interface driver variables
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NHI_VAR
+#define _NHI_VAR
+
+MALLOC_DECLARE(M_NHI);
+
+#define NHI_MSIX_MAX 32
+#define NHI_RING0_TX_DEPTH 16
+#define NHI_RING0_RX_DEPTH 16
+#define NHI_DEFAULT_NUM_RINGS 1
+#define NHI_MAX_NUM_RINGS 32 /* XXX 2? */
+#define NHI_RING0_FRAME_SIZE 256
+#define NHI_MAILBOX_TIMEOUT 15
+
+#define NHI_CMD_TIMEOUT 3 /* 3 seconds */
+
+struct nhi_softc;
+struct nhi_ring_pair;
+struct nhi_intr_tracker;
+struct nhi_cmd_frame;
+struct hcm_softc;
+struct router_softc;
+
+struct nhi_cmd_frame {
+ TAILQ_ENTRY(nhi_cmd_frame) cm_link;
+ uint32_t *data;
+ bus_addr_t data_busaddr;
+ u_int req_len;
+ uint16_t flags;
+#define CMD_MAPPED (1 << 0)
+#define CMD_POLLED (1 << 1)
+#define CMD_REQ_COMPLETE (1 << 2)
+#define CMD_RESP_COMPLETE (1 << 3)
+#define CMD_RESP_OVERRUN (1 << 4)
+ uint16_t retries;
+ uint16_t pdf;
+ uint16_t idx;
+
+ void *context;
+ u_int timeout;
+
+ uint32_t *resp_buffer;
+ u_int resp_len;
+};
+
+#define NHI_RING_NAMELEN 16
+struct nhi_ring_pair {
+ struct nhi_softc *sc;
+
+ union nhi_ring_desc *tx_ring;
+ union nhi_ring_desc *rx_ring;
+
+ uint16_t tx_pi;
+ uint16_t tx_ci;
+ uint16_t rx_pi;
+ uint16_t rx_ci;
+
+ uint16_t rx_pici_reg;
+ uint16_t tx_pici_reg;
+
+ struct nhi_cmd_frame **rx_cmd_ring;
+ struct nhi_cmd_frame **tx_cmd_ring;
+
+ struct mtx mtx;
+ char name[NHI_RING_NAMELEN];
+ struct nhi_intr_tracker *tracker;
+ SLIST_ENTRY(nhi_ring_pair) ring_link;
+
+ TAILQ_HEAD(, nhi_cmd_frame) tx_head;
+ TAILQ_HEAD(, nhi_cmd_frame) rx_head;
+
+ uint16_t tx_ring_depth;
+ uint16_t tx_ring_mask;
+ uint16_t rx_ring_depth;
+ uint16_t rx_ring_mask;
+ uint16_t rx_buffer_size;
+ u_char ring_num;
+
+ bus_dma_tag_t ring_dmat;
+ bus_dmamap_t ring_map;
+ void *ring;
+ bus_addr_t tx_ring_busaddr;
+ bus_addr_t rx_ring_busaddr;
+
+ bus_dma_tag_t frames_dmat;
+ bus_dmamap_t frames_map;
+ void *frames;
+ bus_addr_t tx_frames_busaddr;
+ bus_addr_t rx_frames_busaddr;
+};
+
+/* PDF-indexed array of dispatch routines for interrupts */
+typedef void (nhi_ring_cb_t)(void *, union nhi_ring_desc *,
+ struct nhi_cmd_frame *);
+struct nhi_pdf_dispatch {
+ nhi_ring_cb_t *cb;
+ void *context;
+};
+
+struct nhi_intr_tracker {
+ struct nhi_softc *sc;
+ struct nhi_ring_pair *ring;
+ struct nhi_pdf_dispatch txpdf[16];
+ struct nhi_pdf_dispatch rxpdf[16];
+ u_int vector;
+};
+
+struct nhi_softc {
+ device_t dev;
+ device_t ufp;
+ u_int debug;
+ u_int hwflags;
+#define NHI_TYPE_UNKNOWN 0x00
+#define NHI_TYPE_AR 0x01 /* Alpine Ridge */
+#define NHI_TYPE_TR 0x02 /* Titan Ridge */
+#define NHI_TYPE_ICL 0x03 /* IceLake */
+#define NHI_TYPE_MR 0x04 /* Maple Ridge */
+#define NHI_TYPE_ADL 0x05 /* AlderLake */
+#define NHI_TYPE_USB4 0x0f
+#define NHI_TYPE_MASK 0x0f
+#define NHI_MBOX_BUSY 0x10
+ u_int caps;
+#define NHI_CAP_ICM 0x01
+#define NHI_CAP_HCM 0x02
+#define NHI_USE_ICM(sc) ((sc)->caps & NHI_CAP_ICM)
+#define NHI_USE_HCM(sc) ((sc)->caps & NHI_CAP_HCM)
+ struct hcm_softc *hcm;
+ struct router_softc *root_rsc;
+
+ struct nhi_ring_pair *ring0;
+ struct nhi_intr_tracker *intr_trackers;
+
+ uint16_t path_count;
+ uint16_t max_ring_count;
+
+ struct mtx nhi_mtx;
+ SLIST_HEAD(, nhi_ring_pair) ring_list;
+
+ int msix_count;
+ struct resource *irqs[NHI_MSIX_MAX];
+ void *intrhand[NHI_MSIX_MAX];
+ int irq_rid[NHI_MSIX_MAX];
+ struct resource *irq_pba;
+ int irq_pba_rid;
+ struct resource *irq_table;
+ int irq_table_rid;
+
+ struct resource *regs_resource;
+ bus_space_handle_t regs_bhandle;
+ bus_space_tag_t regs_btag;
+ int regs_rid;
+
+ bus_dma_tag_t parent_dmat;
+
+ bus_dma_tag_t ring0_dmat;
+ bus_dmamap_t ring0_map;
+ void *ring0_frames;
+ bus_addr_t ring0_frames_busaddr;
+ struct nhi_cmd_frame *ring0_cmds;
+
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+
+ struct intr_config_hook ich;
+
+ uint8_t force_hcm;
+#define NHI_FORCE_HCM_DEFAULT 0x00
+#define NHI_FORCE_HCM_ON 0x01
+#define NHI_FORCE_HCM_OFF 0x02
+
+ uint8_t uuid[16];
+ uint8_t lc_uuid[16];
+};
+
+struct nhi_dispatch {
+ uint8_t pdf;
+ nhi_ring_cb_t *cb;
+ void *context;
+};
+
+#define NHI_IS_AR(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_AR)
+#define NHI_IS_TR(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_TR)
+#define NHI_IS_ICL(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_ICL)
+#define NHI_IS_USB4(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_USB4)
+
+int nhi_pci_configure_interrupts(struct nhi_softc *sc);
+void nhi_pci_enable_interrupt(struct nhi_ring_pair *r);
+void nhi_pci_disable_interrupts(struct nhi_softc *sc);
+int nhi_pci_get_uuid(struct nhi_softc *sc);
+int nhi_read_lc_mailbox(struct nhi_softc *, u_int reg, uint32_t *val);
+int nhi_write_lc_mailbox(struct nhi_softc *, u_int reg, uint32_t val);
+
+void nhi_get_tunables(struct nhi_softc *);
+int nhi_attach(struct nhi_softc *);
+int nhi_detach(struct nhi_softc *);
+
+struct nhi_cmd_frame * nhi_alloc_tx_frame(struct nhi_ring_pair *);
+void nhi_free_tx_frame(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+
+int nhi_inmail_cmd(struct nhi_softc *, uint32_t, uint32_t);
+int nhi_outmail_cmd(struct nhi_softc *, uint32_t *);
+
+int nhi_tx_schedule(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+int nhi_tx_synchronous(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+void nhi_intr(void *);
+
+int nhi_register_pdf(struct nhi_ring_pair *, struct nhi_dispatch *,
+ struct nhi_dispatch *);
+int nhi_deregister_pdf(struct nhi_ring_pair *, struct nhi_dispatch *,
+ struct nhi_dispatch *);
+
+/* Low level read/write MMIO registers */
+static __inline uint32_t
+nhi_read_reg(struct nhi_softc *sc, u_int offset)
+{
+ return (le32toh(bus_space_read_4(sc->regs_btag, sc->regs_bhandle,
+ offset)));
+}
+
+static __inline void
+nhi_write_reg(struct nhi_softc *sc, u_int offset, uint32_t val)
+{
+ bus_space_write_4(sc->regs_btag, sc->regs_bhandle, offset,
+ htole32(val));
+}
+
+static __inline struct nhi_cmd_frame *
+nhi_alloc_tx_frame_locked(struct nhi_ring_pair *r)
+{
+ struct nhi_cmd_frame *cmd;
+
+ if ((cmd = TAILQ_FIRST(&r->tx_head)) != NULL)
+ TAILQ_REMOVE(&r->tx_head, cmd, cm_link);
+ return (cmd);
+}
+
+static __inline void
+nhi_free_tx_frame_locked(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ /* Clear all flags except for MAPPED */
+ cmd->flags &= CMD_MAPPED;
+ cmd->resp_buffer = NULL;
+ TAILQ_INSERT_TAIL(&r->tx_head, cmd, cm_link);
+}
+
+#endif /* _NHI_VAR */
diff --git a/sys/dev/thunderbolt/nhi_wmi.c b/sys/dev/thunderbolt/nhi_wmi.c
new file mode 100644
index 000000000000..3feba3bcd8d1
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_wmi.c
@@ -0,0 +1,198 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/sbuf.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include "acpi_wmi_if.h"
+
+ACPI_MODULE_NAME("THUNDERBOLT-NHI-WMI")
+
+#define ACPI_INTEL_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341"
+
+struct nhi_wmi_softc {
+ device_t dev;
+ device_t wmi_dev;
+ u_int state;
+ char *guid;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+};
+
+ACPI_SERIAL_DECL(nhi_wmi, "Thunderbolt NHI WMI device");
+
+static void nhi_wmi_identify(driver_t *driver, device_t parent);
+static int nhi_wmi_probe(device_t dev);
+static int nhi_wmi_attach(device_t dev);
+static int nhi_wmi_detach(device_t dev);
+static int nhi_wmi_sysctl(SYSCTL_HANDLER_ARGS);
+static int nhi_wmi_evaluate_method(struct nhi_wmi_softc *sc,
+ int method, uint32_t arg0, uint32_t *retval);
+
+static device_method_t nhi_wmi_methods[] = {
+ DEVMETHOD(device_identify, nhi_wmi_identify),
+ DEVMETHOD(device_probe, nhi_wmi_probe),
+ DEVMETHOD(device_attach, nhi_wmi_attach),
+ DEVMETHOD(device_detach, nhi_wmi_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t nhi_wmi_driver = {
+ "nhi_wmi",
+ nhi_wmi_methods,
+ sizeof(struct nhi_wmi_softc)
+};
+
+DRIVER_MODULE(nhi_wmi, acpi_wmi, nhi_wmi_driver,
+ NULL, NULL);
+MODULE_DEPEND(nhi_wmi, acpi_wmi, 1, 1, 1);
+MODULE_DEPEND(nhi_wmi, acpi, 1, 1, 1);
+
+static void
+nhi_wmi_identify(driver_t *driver, device_t parent)
+{
+
+ if (acpi_disabled("nhi_wmi") != 0)
+ return;
+
+ if (device_find_child(parent, "nhi_wmi", -1) != NULL)
+ return;
+
+ if (ACPI_WMI_PROVIDES_GUID_STRING(parent,
+ ACPI_INTEL_THUNDERBOLT_GUID) == 0)
+ return;
+
+ if (BUS_ADD_CHILD(parent, 0, "nhi_wmi", -1) == NULL)
+ device_printf(parent, "failed to add nhi_wmi\n");
+}
+
+static int
+nhi_wmi_probe(device_t dev)
+{
+
+ if (ACPI_WMI_PROVIDES_GUID_STRING(device_get_parent(dev),
+ ACPI_INTEL_THUNDERBOLT_GUID) == 0)
+ return (EINVAL);
+ device_set_desc(dev, "Thunderbolt WMI Endpoint");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+nhi_wmi_attach(device_t dev)
+{
+ struct nhi_wmi_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->wmi_dev = device_get_parent(dev);
+
+ sc->sysctl_ctx = device_get_sysctl_ctx(dev);
+ sc->sysctl_tree = device_get_sysctl_tree(dev);
+ sc->state = 0;
+ sc->guid = ACPI_INTEL_THUNDERBOLT_GUID;
+
+ SYSCTL_ADD_STRING(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "GUID", CTLFLAG_RD, sc->guid, 0, "WMI GUID");
+ SYSCTL_ADD_PROC(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "force_power", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ sc, 0, nhi_wmi_sysctl, "I", "Force controller power on");
+
+ return (0);
+}
+
+static int
+nhi_wmi_detach(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+nhi_wmi_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct nhi_wmi_softc *sc;
+ int error, arg;
+
+ sc = (struct nhi_wmi_softc *)arg1;
+ arg = !!sc->state;
+ error = sysctl_handle_int(oidp, &arg, 0, req);
+ if (!error && req->newptr != NULL) {
+ ACPI_SERIAL_BEGIN(nhi_wmi);
+ error = nhi_wmi_evaluate_method(sc, 1, arg, NULL);
+ ACPI_SERIAL_END(nhi_wmi);
+ if (error == 0)
+ sc->state = arg;
+ }
+ return (error);
+}
+
+static int
+nhi_wmi_evaluate_method(struct nhi_wmi_softc *sc, int method, uint32_t arg0,
+ uint32_t *retval)
+{
+ ACPI_OBJECT *obj;
+ ACPI_BUFFER in, out;
+ uint32_t val, params[1];
+
+ params[0] = arg0;
+ in.Pointer = &params;
+ in.Length = sizeof(params);
+ out.Pointer = NULL;
+ out.Length = ACPI_ALLOCATE_BUFFER;
+
+ if (ACPI_FAILURE(ACPI_WMI_EVALUATE_CALL(sc->wmi_dev,
+ ACPI_INTEL_THUNDERBOLT_GUID, 0, method, &in, &out))) {
+ AcpiOsFree(out.Pointer);
+ return (EINVAL);
+ }
+
+ obj = out.Pointer;
+ if (obj != NULL && obj->Type == ACPI_TYPE_INTEGER)
+ val = (uint32_t)obj->Integer.Value;
+ else
+ val = 0;
+
+ AcpiOsFree(out.Pointer);
+ if (retval)
+ *retval = val;
+
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/router.c b/sys/dev/thunderbolt/router.c
new file mode 100644
index 000000000000..a3b418d77fac
--- /dev/null
+++ b/sys/dev/thunderbolt/router.c
@@ -0,0 +1,939 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Config space access for switches, ports, and devices in TB3 and USB4 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+static int router_alloc_cmd(struct router_softc *, struct router_command **);
+static void router_free_cmd(struct router_softc *, struct router_command *);
+static int _tb_router_attach(struct router_softc *);
+static void router_prepare_read(struct router_softc *, struct router_command *,
+ int);
+static int _tb_config_read(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *, void *, struct router_command **);
+static int router_schedule(struct router_softc *, struct router_command *);
+static int router_schedule_locked(struct router_softc *,
+ struct router_command *);
+static nhi_ring_cb_t router_complete_intr;
+static nhi_ring_cb_t router_response_intr;
+static nhi_ring_cb_t router_notify_intr;
+
+#define CFG_DEFAULT_RETRIES 3
+#define CFG_DEFAULT_TIMEOUT 2
+
+static int
+router_lookup_device(struct router_softc *sc, tb_route_t route,
+ struct router_softc **dev)
+{
+ struct router_softc *cursor;
+ uint64_t search_rt, remainder_rt, this_rt;
+ uint8_t hop;
+
+ KASSERT(dev != NULL, ("dev cannot be NULL\n"));
+
+ cursor = tb_config_get_root(sc);
+ remainder_rt = search_rt = route.lo | ((uint64_t)route.hi << 32);
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "%s: Searching for router 0x%016jx\n", __func__, search_rt);
+
+ while (cursor != NULL) {
+ this_rt = TB_ROUTE(cursor);
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Comparing cursor route 0x%016jx\n", this_rt);
+ if (this_rt == search_rt)
+ break;
+
+ /* Prepare to go to the next hop node in the route */
+ hop = remainder_rt & 0xff;
+ remainder_rt >>= 8;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "hop= 0x%02x, remainder= 0x%016jx\n", hop, remainder_rt);
+
+ /*
+ * An adapter index of 0x0 is only for the host interface
+ * adapter on the root route. The only time that
+ * it's valid for searches is when you're looking for the
+ * root route, and that case has already been handled.
+ */
+ if (hop == 0) {
+ tb_debug(sc, DBG_ROUTER,
+ "End of route chain, route not found\n");
+ return (ENOENT);
+ }
+
+ if (hop > cursor->max_adap) {
+ tb_debug(sc, DBG_ROUTER,
+ "Route hop out of range for parent\n");
+ return (EINVAL);
+ }
+
+ if (cursor->adapters == NULL) {
+ tb_debug(sc, DBG_ROUTER,
+ "Error, router not fully initialized\n");
+ return (EINVAL);
+ }
+
+ cursor = cursor->adapters[hop];
+ }
+
+ if (cursor == NULL)
+ return (ENOENT);
+
+ *dev = cursor;
+ return (0);
+}
+
+static int
+router_insert(struct router_softc *sc, struct router_softc *parent)
+{
+ uint64_t this_rt;
+ uint8_t this_hop;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_insert called\n");
+
+ if (parent == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Parent cannot be NULL in insert\n");
+ return (EINVAL);
+ }
+
+ this_rt = TB_ROUTE(sc);
+ if (((this_rt >> (sc->depth * 8)) > 0xffULL) ||
+ (parent->depth + 1 != sc->depth)) {
+ tb_debug(sc, DBG_ROUTER, "Added route 0x%08x%08x is not a "
+ "direct child of the parent route 0x%08x%08x\n",
+ sc->route.hi, sc->route.lo, parent->route.hi,
+ parent->route.lo);
+ return (EINVAL);
+ }
+
+ this_hop = (uint8_t)(this_rt >> (sc->depth * 8));
+
+ tb_debug(sc, DBG_ROUTER, "Inserting route 0x%08x%08x with last hop "
+ "of 0x%02x and depth of %d\n", sc->route.hi, sc->route.lo,
+ this_hop, sc->depth);
+
+ if (this_hop > parent->max_adap) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Inserted route is out of range of the parent\n");
+ return (EINVAL);
+ }
+
+ if (parent->adapters[this_hop] != NULL) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Inserted route already exists\n");
+ return (EEXIST);
+ }
+
+ parent->adapters[this_hop] = sc;
+
+ tb_debug(sc, DBG_ROUTER, "Added router 0x%08x%08x to parent "
+ "0x%08x%08x\n", sc->route.hi, sc->route.lo, parent->route.hi,
+ parent->route.lo);
+ return (0);
+}
+
+static int
+router_register_interrupts(struct router_softc *sc)
+{
+ struct nhi_dispatch tx[] = { { PDF_READ, router_complete_intr, sc },
+ { PDF_WRITE, router_complete_intr, sc },
+ { 0, NULL, NULL } };
+ struct nhi_dispatch rx[] = { { PDF_READ, router_response_intr, sc },
+ { PDF_WRITE, router_response_intr, sc },
+ { PDF_NOTIFY, router_notify_intr, sc },
+ { 0, NULL, NULL } };
+
+ return (nhi_register_pdf(sc->ring0, tx, rx));
+}
+
+int
+tb_router_attach(struct router_softc *parent, tb_route_t route)
+{
+ struct router_softc *sc;
+
+ tb_debug(parent, DBG_ROUTER|DBG_EXTRA, "tb_router_attach called\n");
+
+ sc = malloc(sizeof(*sc), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (sc == NULL) {
+ tb_debug(parent, DBG_ROUTER, "Cannot allocate root router\n");
+ return (ENOMEM);
+ }
+
+ sc->dev = parent->dev;
+ sc->debug = parent->debug;
+ sc->ring0 = parent->ring0;
+ sc->route = route;
+ sc->nsc = parent->nsc;
+
+ mtx_init(&sc->mtx, "tbcfg", "Thunderbolt Router Config", MTX_DEF);
+ TAILQ_INIT(&sc->cmd_queue);
+
+ router_insert(sc, parent);
+
+ return (_tb_router_attach(sc));
+}
+
+int
+tb_router_attach_root(struct nhi_softc *nsc, tb_route_t route)
+{
+ struct router_softc *sc;
+ int error;
+
+ tb_debug(nsc, DBG_ROUTER|DBG_EXTRA, "tb_router_attach_root called\n");
+
+ sc = malloc(sizeof(*sc), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (sc == NULL) {
+ tb_debug(nsc, DBG_ROUTER, "Cannot allocate root router\n");
+ return (ENOMEM);
+ }
+
+ sc->dev = nsc->dev;
+ sc->debug = nsc->debug;
+ sc->ring0 = nsc->ring0;
+ sc->route = route;
+ sc->nsc = nsc;
+
+ mtx_init(&sc->mtx, "tbcfg", "Thunderbolt Router Config", MTX_DEF);
+ TAILQ_INIT(&sc->cmd_queue);
+
+ /*
+ * This router is semi-virtual and represents the router that's part
+ * of the NHI DMA engine. Commands can't be issued to the topology
+ * until the NHI is initialized and this router is initialized, so
+ * there's no point in registering router interrupts earlier than this,
+ * even if other routers are found first.
+ */
+ tb_config_set_root(sc);
+ error = router_register_interrupts(sc);
+ if (error) {
+ tb_router_detach(sc);
+ return (error);
+ }
+
+ error = _tb_router_attach(sc);
+ if (error)
+ return (error);
+
+ bcopy((uint8_t *)sc->uuid, nsc->uuid, 16);
+ return (0);
+}
+
+static int
+_tb_router_attach(struct router_softc *sc)
+{
+ struct tb_cfg_router *cfg;
+ uint32_t *buf;
+ int error, up;
+
+ buf = malloc(9 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_router_read_polled(sc, 0, 9, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+ up = GET_ROUTER_CS_UPSTREAM_ADAP(cfg);
+ sc->max_adap = GET_ROUTER_CS_MAX_ADAP(cfg);
+ sc->depth = GET_ROUTER_CS_DEPTH(cfg);
+ sc->uuid[0] = cfg->uuid_lo;
+ sc->uuid[1] = cfg->uuid_hi;
+ sc->uuid[2] = 0xffffffff;
+ sc->uuid[3] = 0xffffffff;
+ tb_debug(sc, DBG_ROUTER, "Router upstream_port= %d, max_port= %d, "
+ "depth= %d\n", up, sc->max_adap, sc->depth);
+ free(buf, M_THUNDERBOLT);
+
+ /* Downstream adapters are indexed in the array allocated here. */
+ sc->max_adap = MIN(sc->max_adap, ROUTER_CS1_MAX_ADAPTERS);
+ sc->adapters = malloc((1 + sc->max_adap) * sizeof(void *),
+ M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (sc->adapters == NULL) {
+ tb_debug(sc, DBG_ROUTER,
+ "Cannot allocate downstream adapter memory\n");
+ return (ENOMEM);
+ }
+
+ tb_debug(sc, DBG_ROUTER, "Router created, route 0x%08x%08x\n",
+ sc->route.hi, sc->route.lo);
+
+ return (0);
+}
+
+int
+tb_router_detach(struct router_softc *sc)
+{
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "tb_router_deattach called\n");
+
+ if (TAILQ_FIRST(&sc->cmd_queue) != NULL)
+ return (EBUSY);
+
+ mtx_destroy(&sc->mtx);
+
+ if (sc->adapters != NULL)
+ free(sc->adapters, M_THUNDERBOLT);
+
+ if (sc != NULL)
+ free(sc, M_THUNDERBOLT);
+
+ return (0);
+}
+
+static void
+router_get_config_cb(struct router_softc *sc, struct router_command *cmd,
+ void *arg)
+{
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_get_config_cb called\n");
+
+ /*
+ * Only do the copy if the command didn't have a notify event thrown.
+ * These events serve as asynchronous exception signals, which is
+ * cumbersome.
+ */
+ if (cmd->ev == 0)
+ bcopy((uint8_t *)cmd->resp_buffer,
+ (uint8_t *)cmd->callback_arg, cmd->dwlen * 4);
+
+ mtx_lock(&sc->mtx);
+ sc->inflight_cmd = NULL;
+
+ if ((cmd->flags & RCMD_POLLED) == 0)
+ wakeup(cmd);
+ else
+ cmd->flags |= RCMD_POLL_COMPLETE;
+
+ router_schedule_locked(sc, NULL);
+ mtx_unlock(&sc->mtx);
+}
+
+int
+tb_config_read(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+ struct router_command *cmd;
+ int error, retries;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ router_get_config_cb, &cmd)) != 0)
+ return (error);
+
+ retries = cmd->retries;
+ mtx_lock(&sc->mtx);
+ while (retries-- >= 0) {
+ error = router_schedule_locked(sc, cmd);
+ if (error)
+ break;
+
+ error = msleep(cmd, &sc->mtx, 0, "tbtcfg", cmd->timeout * hz);
+ if (error != EWOULDBLOCK)
+ break;
+ sc->inflight_cmd = NULL;
+ tb_debug(sc, DBG_ROUTER, "Config command timed out, retries=%d\n", retries);
+ }
+
+ if (cmd->ev != 0)
+ error = EINVAL;
+ router_free_cmd(sc, cmd);
+ mtx_unlock(&sc->mtx);
+ return (error);
+}
+
+int
+tb_config_read_polled(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+ struct router_command *cmd;
+ int error, retries, timeout;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ router_get_config_cb, &cmd)) != 0)
+ return (error);
+
+ retries = cmd->retries;
+ cmd->flags |= RCMD_POLLED;
+ timeout = cmd->timeout * 1000000;
+
+ mtx_lock(&sc->mtx);
+ while (retries-- >= 0) {
+ error = router_schedule_locked(sc, cmd);
+ if (error)
+ break;
+ mtx_unlock(&sc->mtx);
+
+ while (timeout > 0) {
+ DELAY(100 * 1000);
+ if ((cmd->flags & RCMD_POLL_COMPLETE) != 0)
+ break;
+ timeout -= 100000;
+ }
+
+ mtx_lock(&sc->mtx);
+ if ((cmd->flags & RCMD_POLL_COMPLETE) == 0) {
+ error = ETIMEDOUT;
+ sc->inflight_cmd = NULL;
+ tb_debug(sc, DBG_ROUTER, "Config command timed out, retries=%d\n", retries);
+ continue;
+ } else
+ break;
+ }
+
+ if (cmd->ev != 0)
+ error = EINVAL;
+ router_free_cmd(sc, cmd);
+ mtx_unlock(&sc->mtx);
+ return (error);
+}
+
+int
+tb_config_read_async(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf, void *cb)
+{
+ struct router_command *cmd;
+ int error;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ cb, &cmd)) != 0)
+ return (error);
+
+ error = router_schedule(sc, cmd);
+
+ return (error);
+}
+
+static int
+_tb_config_read(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf, void *cb,
+ struct router_command **rcmd)
+{
+ struct router_command *cmd;
+ struct tb_cfg_read *msg;
+ int error;
+
+ if ((error = router_alloc_cmd(sc, &cmd)) != 0)
+ return (error);
+
+ msg = router_get_frame_data(cmd);
+ bzero(msg, sizeof(*msg));
+ msg->route.hi = sc->route.hi;
+ msg->route.lo = sc->route.lo;
+ msg->addr_attrs = TB_CONFIG_ADDR(0, space, adapter, dwlen, offset);
+ cmd->callback = cb;
+ cmd->callback_arg = buf;
+ cmd->dwlen = dwlen;
+ router_prepare_read(sc, cmd, sizeof(*msg));
+
+ if (rcmd != NULL)
+ *rcmd = cmd;
+
+ return (0);
+}
+
+int
+tb_config_write(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+
+ return(0);
+}
+
+static int
+router_alloc_cmd(struct router_softc *sc, struct router_command **rcmd)
+{
+ struct router_command *cmd;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_alloc_cmd\n");
+
+ cmd = malloc(sizeof(*cmd), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (cmd == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot allocate cmd/response\n");
+ return (ENOMEM);
+ }
+
+ cmd->nhicmd = nhi_alloc_tx_frame(sc->ring0);
+ if (cmd->nhicmd == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot allocate command frame\n");
+ free(cmd, M_THUNDERBOLT);
+ return (EBUSY);
+ }
+
+ cmd->sc = sc;
+ *rcmd = cmd;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Allocated command with index %d\n",
+ cmd->nhicmd->idx);
+
+ return (0);
+}
+
+static void
+router_free_cmd(struct router_softc *sc, struct router_command *cmd)
+{
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_free_cmd\n");
+
+ if (cmd == NULL)
+ return;
+
+ if (cmd->nhicmd != NULL) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Freeing nhi command %d\n",
+ cmd->nhicmd->idx);
+ nhi_free_tx_frame(sc->ring0, cmd->nhicmd);
+ }
+ free(cmd, M_THUNDERBOLT);
+
+ return;
+}
+
+static void
+router_prepare_read(struct router_softc *sc, struct router_command *cmd,
+ int len)
+{
+ struct nhi_cmd_frame *nhicmd;
+ uint32_t *msg;
+ int msglen, i;
+
+ KASSERT(cmd != NULL, ("cmd cannot be NULL\n"));
+ KASSERT(len != 0, ("Invalid zero-length command\n"));
+ KASSERT(len % 4 == 0, ("Message must be 32bit padded\n"));
+
+ nhicmd = cmd->nhicmd;
+ msglen = (len - 4) / 4;
+ for (i = 0; i < msglen; i++)
+ nhicmd->data[i] = htobe32(nhicmd->data[i]);
+
+ msg = (uint32_t *)nhicmd->data;
+ msg[msglen] = htobe32(tb_calc_crc(nhicmd->data, len-4));
+
+ nhicmd->pdf = PDF_READ;
+ nhicmd->req_len = len;
+
+ nhicmd->timeout = NHI_CMD_TIMEOUT;
+ nhicmd->retries = 0;
+ nhicmd->resp_buffer = (uint32_t *)cmd->resp_buffer;
+ nhicmd->resp_len = (cmd->dwlen + 3) * 4;
+ nhicmd->context = cmd;
+
+ cmd->retries = CFG_DEFAULT_RETRIES;
+ cmd->timeout = CFG_DEFAULT_TIMEOUT;
+
+ return;
+}
+
+static int
+router_schedule(struct router_softc *sc, struct router_command *cmd)
+{
+ int error;
+
+ mtx_lock(&sc->mtx);
+ error = router_schedule_locked(sc, cmd);
+ mtx_unlock(&sc->mtx);
+
+ return(error);
+}
+
+static int
+router_schedule_locked(struct router_softc *sc, struct router_command *cmd)
+{
+ struct nhi_cmd_frame *nhicmd;
+ int error;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_schedule\n");
+
+ if (cmd != NULL)
+ TAILQ_INSERT_TAIL(&sc->cmd_queue, cmd, link);
+
+ while ((sc->inflight_cmd == NULL) &&
+ ((cmd = TAILQ_FIRST(&sc->cmd_queue)) != NULL)) {
+
+ TAILQ_REMOVE(&sc->cmd_queue, cmd, link);
+ nhicmd = cmd->nhicmd;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Scheduling command with index %d\n", nhicmd->idx);
+ sc->inflight_cmd = cmd;
+ if ((error = nhi_tx_schedule(sc->ring0, nhicmd)) != 0) {
+ tb_debug(sc, DBG_ROUTER, "nhi ring error "
+ "%d\n", error);
+ sc->inflight_cmd = NULL;
+ if (error == EBUSY) {
+ TAILQ_INSERT_HEAD(&sc->cmd_queue, cmd, link);
+ error = 0;
+ }
+ break;
+ }
+ }
+
+ return (error);
+}
+
+static void
+router_complete_intr(void *context, union nhi_ring_desc *ring,
+ struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc;
+ struct router_command *cmd;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+ KASSERT(nhicmd != NULL, ("nhicmd cannot be NULL\n"));
+
+ cmd = (struct router_command *)(nhicmd->context);
+ sc = cmd->sc;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_complete_intr called\n");
+
+ if (nhicmd->flags & CMD_RESP_COMPLETE) {
+ cmd->callback(sc, cmd, cmd->callback_arg);
+ }
+
+ return;
+}
+
+static void
+router_response_intr(void *context, union nhi_ring_desc *ring, struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc, *dev;
+ struct tb_cfg_read_resp *read;
+ struct tb_cfg_write_resp *write;
+ struct router_command *cmd;
+ tb_route_t route;
+ u_int error, i, eof, len;
+ uint32_t attrs;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+
+ sc = (struct router_softc *)context;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_response_intr called\n");
+
+ eof = ring->rxpost.eof_len >> RX_BUFFER_DESC_EOF_SHIFT;
+
+ if (eof == PDF_WRITE) {
+ write = (struct tb_cfg_write_resp *)nhicmd->data;
+ route.hi = be32toh(write->route.hi);
+ route.lo = be32toh(write->route.lo);
+ } else {
+ read = (struct tb_cfg_read_resp *)nhicmd->data;
+ route.hi = be32toh(read->route.hi);
+ route.lo = be32toh(read->route.lo);
+ attrs = be32toh(read->addr_attrs);
+ len = (attrs & TB_CFG_SIZE_MASK) >> TB_CFG_SIZE_SHIFT;
+ }
+
+ /* XXX Is this a problem? */
+ if ((route.hi & 0x80000000) == 0)
+ tb_debug(sc, DBG_ROUTER, "Invalid route\n");
+ route.hi &= ~0x80000000;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Looking up route 0x%08x%08x\n",
+ route.hi, route.lo);
+
+ error = router_lookup_device(sc, route, &dev);
+ if (error != 0 || dev == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot find device, error= %d\n",
+ error);
+ return;
+ }
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Found device %s route 0x%08x%08x, "
+ "inflight_cmd= %p\n", device_get_nameunit(dev->dev), dev->route.hi,
+ dev->route.lo, dev->inflight_cmd);
+
+ cmd = dev->inflight_cmd;
+ if (cmd == NULL) {
+ tb_debug(dev, DBG_ROUTER, "Null inflight cmd\n");
+ return;
+ }
+
+ if (eof == PDF_READ) {
+ for (i = 0; i < len; i++)
+ cmd->nhicmd->resp_buffer[i] = be32toh(read->data[i]);
+ }
+
+ cmd->nhicmd->flags |= CMD_RESP_COMPLETE;
+ if (cmd->nhicmd->flags & CMD_REQ_COMPLETE) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "TX_COMPLETE set\n");
+ cmd->callback(dev, cmd, cmd->callback_arg);
+ }
+
+ return;
+}
+
+static void
+router_notify_intr(void *context, union nhi_ring_desc *ring, struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc;
+ struct router_command *cmd;
+ struct tb_cfg_notify event;
+ u_int ev, adap;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+
+ sc = (struct router_softc *)context;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_notify_intr called\n");
+
+ event.route.hi = be32toh(nhicmd->data[0]);
+ event.route.lo = be32toh(nhicmd->data[1]);
+ event.event_adap = be32toh(nhicmd->data[2]);
+
+ ev = GET_NOTIFY_EVENT(&event);
+ adap = GET_NOTIFY_ADAPTER(&event);
+
+ tb_debug(sc, DBG_ROUTER, "Event route 0x%08x%08x adap %d code %s\n",
+ event.route.hi, event.route.lo, adap,
+ tb_get_string(ev, tb_notify_event));
+
+ switch (ev) {
+ case TB_CFG_ERR_CONN:
+ case TB_CFG_ERR_LINK:
+ case TB_CFG_ERR_ADDR:
+ case TB_CFG_ERR_ADP:
+ case TB_CFG_ERR_ENUM:
+ case TB_CFG_ERR_NUA:
+ case TB_CFG_ERR_LEN:
+ case TB_CFG_ERR_HEC:
+ case TB_CFG_ERR_FC:
+ case TB_CFG_ERR_PLUG:
+ case TB_CFG_ERR_LOCK:
+ case TB_CFG_HP_ACK:
+ case TB_CFG_DP_BW:
+ if (sc->inflight_cmd != NULL) {
+ cmd = sc->inflight_cmd;
+ cmd->ev = ev;
+ cmd->callback(sc, cmd, cmd->callback_arg);
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+int
+tb_config_next_cap(struct router_softc *sc, struct router_cfg_cap *cap)
+{
+ union tb_cfg_cap *tbcap;
+ uint32_t *buf;
+ uint16_t current;
+ int error;
+
+ KASSERT(cap != NULL, ("cap cannot be NULL\n"));
+ KASSERT(cap->next_cap != 0, ("next_cap cannot be 0\n"));
+
+ buf = malloc(sizeof(*tbcap), M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+
+ current = cap->next_cap;
+ error = tb_config_read(sc, cap->space, cap->adap, current, 1, buf);
+ if (error)
+ return (error);
+
+ tbcap = (union tb_cfg_cap *)buf;
+ cap->cap_id = tbcap->hdr.cap_id;
+ cap->next_cap = tbcap->hdr.next_cap;
+ cap->current_cap = current;
+
+ if ((cap->space != TB_CFG_CS_ROUTER) &&
+ (tbcap->hdr.cap_id != TB_CFG_CAP_VSC)) {
+ free(buf, M_THUNDERBOLT);
+ return (0);
+ }
+
+ tb_config_read(sc, cap->space, cap->adap, current, 2, buf);
+ if (error) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cap->vsc_id = tbcap->vsc.vsc_id;
+ cap->vsc_len = tbcap->vsc.len;
+ if (tbcap->vsc.len == 0) {
+ cap->next_cap = tbcap->vsec.vsec_next_cap;
+ cap->vsec_len = tbcap->vsec.vsec_len;
+ }
+
+ free(buf, M_THUNDERBOLT);
+ return (0);
+}
+
+int
+tb_config_find_cap(struct router_softc *sc, struct router_cfg_cap *cap)
+{
+ u_int cap_id, vsc_id;
+ int error;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "tb_config_find_cap called\n");
+
+ cap_id = cap->cap_id;
+ vsc_id = cap->vsc_id;
+
+ cap->cap_id = cap->vsc_id = 0;
+ while ((cap->cap_id != cap_id) || (cap->vsc_id != vsc_id)) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Looking for cap %d at offset %d\n", cap->cap_id,
+ cap->next_cap);
+ if ((cap->next_cap == 0) ||
+ (cap->next_cap > TB_CFG_CAP_OFFSET_MAX))
+ return (EINVAL);
+ error = tb_config_next_cap(sc, cap);
+ if (error)
+ break;
+ }
+
+ return (0);
+}
+
+int
+tb_config_find_router_cap(struct router_softc *sc, u_int cap, u_int vsc, u_int *offset)
+{
+ struct router_cfg_cap rcap;
+ struct tb_cfg_router *cfg;
+ uint32_t *buf;
+ int error;
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_router_read(sc, 0, 5, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+ rcap.space = TB_CFG_CS_ROUTER;
+ rcap.adap = 0;
+ rcap.next_cap = GET_ROUTER_CS_NEXT_CAP(cfg);
+ rcap.cap_id = cap;
+ rcap.vsc_id = vsc;
+ error = tb_config_find_cap(sc, &rcap);
+ if (error == 0)
+ *offset = rcap.current_cap;
+
+ free(buf, M_THUNDERBOLT);
+ return (error);
+}
+
+int
+tb_config_find_router_vsc(struct router_softc *sc, u_int cap, u_int *offset)
+{
+
+ return (tb_config_find_router_cap(sc, TB_CFG_CAP_VSC, cap, offset));
+}
+
+int
+tb_config_find_router_vsec(struct router_softc *sc, u_int cap, u_int *offset)
+{
+
+ return (tb_config_find_router_cap(sc, TB_CFG_CAP_VSEC, cap, offset));
+}
+
+int
+tb_config_find_adapter_cap(struct router_softc *sc, u_int adap, u_int cap, u_int *offset)
+{
+ struct router_cfg_cap rcap;
+ struct tb_cfg_adapter *cfg;
+ uint32_t *buf;
+ int error;
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_adapter_read(sc, adap, 0, 8, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_adapter *)buf;
+ rcap.space = TB_CFG_CS_ADAPTER;
+ rcap.adap = adap;
+ rcap.next_cap = GET_ADP_CS_NEXT_CAP(cfg);
+ rcap.cap_id = cap;
+ rcap.vsc_id = 0;
+ error = tb_config_find_cap(sc, &rcap);
+ if (error == 0)
+ *offset = rcap.current_cap;
+
+ free(buf, M_THUNDERBOLT);
+ return (error);
+}
+
+int
+tb_config_get_lc_uuid(struct router_softc *rsc, uint8_t *uuid)
+{
+ u_int error, offset;
+ uint32_t buf[8];
+
+ bzero(buf, sizeof(buf));
+
+ error = tb_config_find_router_vsec(rsc, TB_CFG_VSEC_LC, &offset);
+ if (error != 0) {
+ tb_debug(rsc, DBG_ROUTER, "Error finding LC registers: %d\n",
+ error);
+ return (error);
+ }
+
+ error = tb_config_router_read(rsc, offset + TB_LC_UUID, 4, buf);
+ if (error != 0) {
+ tb_debug(rsc, DBG_ROUTER, "Error fetching UUID: %d\n", error);
+ return (error);
+ }
+
+ bcopy(buf, uuid, 16);
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/router_var.h b/sys/dev/thunderbolt/router_var.h
new file mode 100644
index 000000000000..8366ede852e7
--- /dev/null
+++ b/sys/dev/thunderbolt/router_var.h
@@ -0,0 +1,242 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ROUTER_VAR_H
+#define _ROUTER_VAR_H
+
+struct router_softc;
+struct router_command;
+struct router_topo;
+
+typedef void (*router_callback_t)(struct router_softc *,
+ struct router_command *, void *);
+
+struct router_command {
+ TAILQ_ENTRY(router_command) link;
+ struct router_softc *sc;
+ struct nhi_cmd_frame *nhicmd;
+ u_int flags;
+#define RCMD_POLLED (1 << 0)
+#define RCMD_POLL_COMPLETE (1 << 1)
+ int resp_len;
+ router_callback_t callback;
+ void *callback_arg;
+ u_int dwlen;
+ u_int timeout;
+ int retries;
+ u_int ev;
+ uint8_t resp_buffer[NHI_RING0_FRAME_SIZE];
+};
+
+struct router_softc {
+ TAILQ_ENTRY(router_softc) link;
+ u_int debug;
+ tb_route_t route;
+ device_t dev;
+ struct nhi_softc *nsc;
+
+ struct mtx mtx;
+ struct nhi_ring_pair *ring0;
+ TAILQ_HEAD(,router_command) cmd_queue;
+
+ struct router_command *inflight_cmd;
+
+ uint8_t depth;
+ uint8_t max_adap;
+
+ struct router_softc **adapters;
+
+ uint32_t uuid[4];
+};
+
+struct router_cfg_cap {
+ uint16_t current_cap;
+ uint16_t next_cap;
+ uint32_t space;
+ uint8_t adap;
+ uint8_t cap_id;
+ uint8_t vsc_id;
+ uint8_t vsc_len;
+ uint16_t vsec_len;
+};
+
+int tb_router_attach(struct router_softc *, tb_route_t);
+int tb_router_attach_root(struct nhi_softc *, tb_route_t);
+int tb_router_detach(struct router_softc *);
+int tb_config_read(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_read_polled(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_read_async(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *, void *);
+int tb_config_write(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_next_cap(struct router_softc *, struct router_cfg_cap *);
+int tb_config_find_cap(struct router_softc *, struct router_cfg_cap *);
+int tb_config_find_router_cap(struct router_softc *, u_int, u_int, u_int *);
+int tb_config_find_router_vsc(struct router_softc *, u_int, u_int *);
+int tb_config_find_router_vsec(struct router_softc *, u_int, u_int *);
+int tb_config_find_adapter_cap(struct router_softc *, u_int, u_int, u_int *);
+int tb_config_get_lc_uuid(struct router_softc *, uint8_t *);
+
+#define TB_CONFIG_ADDR(seq, space, adapter, dwlen, offset) \
+ ((seq << TB_CFG_SEQ_SHIFT) | space | \
+ (adapter << TB_CFG_ADAPTER_SHIFT) | (dwlen << TB_CFG_SIZE_SHIFT) | \
+ (offset & TB_CFG_ADDR_MASK))
+
+#define TB_ROUTE(router) \
+ ((uint64_t)(router)->route.hi << 32) | (router)->route.lo
+
+static __inline void *
+router_get_frame_data(struct router_command *cmd)
+{
+ return ((void *)cmd->nhicmd->data);
+}
+
+/*
+ * Read the Router config space for the router referred to in the softc.
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_router_read(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+static __inline int
+tb_config_router_read_polled(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_read_polled(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+/*
+ * Write the Router config space for the router referred to in the softc.
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_router_write(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+/*
+ * Read the Adapter config space for the router referred to in the softc.
+ * adap - Adapter number
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_adapter_read(struct router_softc *sc, u_int adap, u_int addr,
+ u_int dwlen, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_ADAPTER, adap, addr, dwlen, buf));
+}
+
+/*
+ * Read the Adapter config space for the router referred to in the softc.
+ * adap - Adapter number
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_adapter_write(struct router_softc *sc, u_int adap, u_int addr,
+ u_int dwlen, uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_ADAPTER, adap, addr, dwlen, buf));
+}
+
+/*
+ * Read the Path config space for the router referred to in the softc.
+ * adap - Adapter number
+ * hopid - HopID of the path
+ * len - The number of adjacent paths
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_path_read(struct router_softc *sc, u_int adap, u_int hopid,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_PATH, adap, hopid * 2,
+ num * 2, buf));
+}
+
+/*
+ * Write the Path config space for the router referred to in the softc.
+ * adap - Adapter number
+ * hopid - HopID of the path
+ * len - The number of adjacent paths
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_path_write(struct router_softc *sc, u_int adap, u_int hopid,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_PATH, adap, hopid * 2,
+ num * 2, buf));
+}
+
+/*
+ * Read the Counters config space for the router referred to in the softc.
+ * Counters come in sets of 3 dwords.
+ * adap - Adapter number
+ * set - The counter set index
+ * num - The number of adjacent counter sets to read
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_counters_read(struct router_softc *sc, u_int adap, u_int set,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_COUNTERS, adap, set * 3,
+ num * 3, buf));
+}
+
+static __inline void
+tb_config_set_root(struct router_softc *sc)
+{
+ sc->nsc->root_rsc = sc;
+}
+
+static __inline void *
+tb_config_get_root(struct router_softc *sc)
+{
+ return (sc->nsc->root_rsc);
+}
+
+#endif /* _ROUTER_VAR_H */
diff --git a/sys/dev/thunderbolt/tb_acpi_pcib.c b/sys/dev/thunderbolt/tb_acpi_pcib.c
new file mode 100644
index 000000000000..947df3688535
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_acpi_pcib.c
@@ -0,0 +1,181 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+
+/* ACPI identified PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcib_private.h>
+#include <dev/pci/pci_private.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+#include <machine/md_var.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_pcib.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+static int tb_acpi_pcib_probe(device_t);
+static int tb_acpi_pcib_attach(device_t);
+static int tb_acpi_pcib_detach(device_t);
+
+/* ACPI attachment for Thudnerbolt Bridges */
+
+static int
+tb_acpi_pcib_probe(device_t dev)
+{
+ char desc[TB_DESC_MAX], desc1[TB_DESC_MAX];
+ int val;
+
+ if (pci_get_class(dev) != PCIC_BRIDGE ||
+ pci_get_subclass(dev) != PCIS_BRIDGE_PCI ||
+ acpi_disabled("pci"))
+ return (ENXIO);
+ if (acpi_get_handle(dev) == NULL)
+ return (ENXIO);
+ if (pci_cfgregopen() == 0)
+ return (ENXIO);
+
+ /*
+ * Success? Specify a higher probe priority than the conventional
+ * Thunderbolt PCIb driver
+ */
+ if ((val = tb_pcib_probe_common(dev, desc)) < 0) {
+ val++;
+ snprintf(desc1, TB_DESC_MAX, "ACPI %s", desc);
+ device_set_desc_copy(dev, desc1);
+ }
+
+ return (val);
+}
+
+static int
+tb_acpi_pcib_attach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ error = tb_pcib_attach_common(dev);
+ if (error)
+ return (error);
+
+ sc = device_get_softc(dev);
+ sc->ap_handle = acpi_get_handle(dev);
+ KASSERT(sc->ap_handle != NULL, ("ACPI handle cannot be NULL\n"));
+
+ /* Execute OSUP in case the BIOS didn't */
+ if (TB_IS_ROOT(sc)) {
+ ACPI_OBJECT_LIST list;
+ ACPI_OBJECT arg;
+ ACPI_BUFFER buf;
+ ACPI_STATUS s;
+
+ tb_debug(sc, DBG_BRIDGE, "Executing OSUP\n");
+
+ list.Pointer = &arg;
+ list.Count = 1;
+ arg.Integer.Value = 0;
+ arg.Type = ACPI_TYPE_INTEGER;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ buf.Pointer = NULL;
+
+ s = AcpiEvaluateObject(sc->ap_handle, "\\_GPE.OSUP", &list,
+ &buf);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL,
+ "ACPI returned %d, buf= %p\n", s, buf.Pointer);
+ if (buf.Pointer != NULL)
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "buffer= 0x%x\n",
+ *(uint32_t *)buf.Pointer);
+
+ AcpiOsFree(buf.Pointer);
+ }
+
+ pcib_attach_common(dev);
+ acpi_pcib_fetch_prt(dev, &sc->ap_prt);
+
+ return (pcib_attach_child(dev));
+}
+
+static int
+tb_acpi_pcib_detach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ tb_debug(sc, DBG_BRIDGE|DBG_ROUTER|DBG_EXTRA, "tb_acpi_pcib_detach\n");
+
+ error = pcib_detach(dev);
+ if (error == 0)
+ AcpiOsFree(sc->ap_prt.Pointer);
+ return (error);
+}
+
+static device_method_t tb_acpi_pcib_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, tb_acpi_pcib_probe),
+ DEVMETHOD(device_attach, tb_acpi_pcib_attach),
+ DEVMETHOD(device_detach, tb_acpi_pcib_detach),
+
+ /* Thunderbolt interface is inherited */
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_2(tbolt, tb_acpi_pcib_driver, tb_acpi_pcib_methods,
+ sizeof(struct tb_pcib_softc), pcib_driver, tb_pcib_driver);
+DRIVER_MODULE_ORDERED(tb_acpi_pcib, pci, tb_acpi_pcib_driver,
+ NULL, NULL, SI_ORDER_MIDDLE);
+MODULE_DEPEND(tb_acpi_pcib, acpi, 1, 1, 1);
diff --git a/sys/dev/thunderbolt/tb_debug.c b/sys/dev/thunderbolt/tb_debug.c
new file mode 100644
index 000000000000..f455ee72e9f6
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_debug.c
@@ -0,0 +1,334 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+tb_string_t nhi_outmailcmd_opmode[] = {
+ { 0x000, "Safe Mode" },
+ { 0x100, "Authentication Mode" },
+ { 0x200, "Endpoint Mode" },
+ { 0x300, "Connection Manager Fully Functional" },
+ { 0, NULL }
+};
+
+tb_string_t nhi_frame_pdf[] = {
+ { 0x01, "PDF_READ" },
+ { 0x02, "PDF_WRITE" },
+ { 0x03, "PDF_NOTIFY" },
+ { 0x04, "PDF_NOTIFY_ACK" },
+ { 0x05, "PDF_HOTPLUG" },
+ { 0x06, "PDF_XDOMAIN_REQ" },
+ { 0x07, "PDF_XDOMAIN_RESP" },
+ { 0x0a, "PDF_CM_EVENT" },
+ { 0x0b, "PDF_CM_REQ" },
+ { 0x0c, "PDF_CM_RESP" },
+ { 0, NULL }
+};
+
+tb_string_t tb_security_level[] = {
+ { TBSEC_NONE, "None" },
+ { TBSEC_USER, "User" },
+ { TBSEC_SECURE, "Secure Authorization" },
+ { TBSEC_DP, "Display Port" },
+ { TBSEC_UNKNOWN,"Unknown" },
+ { 0, NULL }
+};
+
+tb_string_t tb_mbox_connmode[] = {
+ { INMAILCMD_SETMODE_CERT_TB_1ST_DEPTH, "Certified/1st" },
+ { INMAILCMD_SETMODE_ANY_TB_1ST_DEPTH, "Any/1st" },
+ { INMAILCMD_SETMODE_CERT_TB_ANY_DEPTH, "Certified/Any" },
+ { INMAILCMD_SETMODE_ANY_TB_ANY_DEPTH, "Any/Any" },
+ { 0, NULL }
+};
+
+tb_string_t tb_device_power[] = {
+ { 0x0, "Self-powered" },
+ { 0x1, "Normal power" },
+ { 0x2, "High power" },
+ { 0x3, "Unknown power draw" },
+ { 0, NULL }
+};
+
+tb_string_t tb_notify_code[] = {
+ { 0x03, "DEVCONN" },
+ { 0x04, "DISCONN" },
+ { 0x05, "DPCONN" },
+ { 0x06, "DOMCONN" },
+ { 0x07, "DOMDISCONN" },
+ { 0x08, "DPCHANGE" },
+ { 0x09, "I2C" },
+ { 0x0a, "RTD3" },
+ { 0, NULL }
+};
+
+tb_string_t tb_adapter_type[] = {
+ { ADP_CS2_UNSUPPORTED, "Unsupported Adapter" },
+ { ADP_CS2_LANE, "Lane Adapter" },
+ { ADP_CS2_HOSTIF, "Host Interface Adapter" },
+ { ADP_CS2_PCIE_DFP, "Downstream PCIe Adapter" },
+ { ADP_CS2_PCIE_UFP, "Upstream PCIe Adapter" },
+ { ADP_CS2_DP_OUT, "DP OUT Adapter" },
+ { ADP_CS2_DP_IN, "DP IN Adapter" },
+ { ADP_CS2_USB3_DFP, "Downstream USB3 Adapter" },
+ { ADP_CS2_USB3_UFP, "Upstream USB3 Adapter" },
+ { 0, NULL }
+};
+
+tb_string_t tb_adapter_state[] = {
+ { CAP_LANE_STATE_DISABLE, "Disabled" },
+ { CAP_LANE_STATE_TRAINING, "Training" },
+ { CAP_LANE_STATE_CL0, "CL0" },
+ { CAP_LANE_STATE_TXCL0, "TX CL0s" },
+ { CAP_LANE_STATE_RXCL0, "RX CL0s" },
+ { CAP_LANE_STATE_CL1, "CL1" },
+ { CAP_LANE_STATE_CL2, "CL2" },
+ { CAP_LANE_STATE_CLD, "CLd" },
+ { 0, NULL }
+};
+
+tb_string_t tb_notify_event[] = {
+ { TB_CFG_ERR_CONN, "Connection error" },
+ { TB_CFG_ERR_LINK, "Link error" },
+ { TB_CFG_ERR_ADDR, "Addressing error" },
+ { TB_CFG_ERR_ADP, "Invalid adapter" },
+ { TB_CFG_ERR_ENUM, "Enumeration error" },
+ { TB_CFG_ERR_NUA, "Adapter not enumerated" },
+ { TB_CFG_ERR_LEN, "Invalid request length" },
+ { TB_CFG_ERR_HEC, "Invalid packet header" },
+ { TB_CFG_ERR_FC, "Flow control error" },
+ { TB_CFG_ERR_PLUG, "Hot plug error" },
+ { TB_CFG_ERR_LOCK, "Adapter locked" },
+ { TB_CFG_HP_ACK, "Hotplug acknowledgement" },
+ { TB_CFG_DP_BW, "Display port bandwidth change" },
+ { 0, NULL }
+};
+
+const char *
+tb_get_string(uintmax_t key, tb_string_t *table)
+{
+
+ if (table == NULL)
+ return ("<null>");
+
+ while (table->value != NULL) {
+ if (table->key == key)
+ return (table->value);
+ table++;
+ }
+
+ return ("<unknown>");
+}
+
+static struct tb_debug_string {
+ char *name;
+ int flag;
+} tb_debug_strings[] = {
+ {"info", DBG_INFO},
+ {"init", DBG_INIT},
+ {"info", DBG_INFO},
+ {"rxq", DBG_RXQ},
+ {"txq", DBG_TXQ},
+ {"intr", DBG_INTR},
+ {"tb", DBG_TB},
+ {"mbox", DBG_MBOX},
+ {"bridge", DBG_BRIDGE},
+ {"cfg", DBG_CFG},
+ {"router", DBG_ROUTER},
+ {"port", DBG_PORT},
+ {"hcm", DBG_HCM},
+ {"extra", DBG_EXTRA},
+ {"noisy", DBG_NOISY},
+ {"full", DBG_FULL}
+};
+
+enum tb_debug_level_combiner {
+ COMB_NONE,
+ COMB_ADD,
+ COMB_SUB
+};
+
+int
+tb_debug_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct sbuf *sbuf;
+#if defined (THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ struct tb_debug_string *string;
+ char *buffer;
+ size_t sz;
+ u_int *debug;
+ int i, len;
+#endif
+ int error;
+
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+#if defined (THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ debug = (u_int *)arg1;
+
+ sbuf_printf(sbuf, "%#x", *debug);
+
+ sz = sizeof(tb_debug_strings) / sizeof(tb_debug_strings[0]);
+ for (i = 0; i < sz; i++) {
+ string = &tb_debug_strings[i];
+ if (*debug & string->flag)
+ sbuf_printf(sbuf, ",%s", string->name);
+ }
+
+ error = sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ if (error || req->newptr == NULL)
+ return (error);
+
+ len = req->newlen - req->newidx;
+ if (len == 0)
+ return (0);
+
+ buffer = malloc(len, M_THUNDERBOLT, M_ZERO|M_WAITOK);
+ error = SYSCTL_IN(req, buffer, len);
+
+ tb_parse_debug(debug, buffer);
+
+ free(buffer, M_THUNDERBOLT);
+#else
+ sbuf_printf(sbuf, "debugging unavailable");
+ error = sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+#endif
+
+ return (error);
+}
+
+void
+tb_parse_debug(u_int *debug, char *list)
+{
+ struct tb_debug_string *string;
+ enum tb_debug_level_combiner op;
+ char *token, *endtoken;
+ size_t sz;
+ int flags, i;
+
+ if (list == NULL || *list == '\0')
+ return;
+
+ if (*list == '+') {
+ op = COMB_ADD;
+ list++;
+ } else if (*list == '-') {
+ op = COMB_SUB;
+ list++;
+ } else
+ op = COMB_NONE;
+ if (*list == '\0')
+ return;
+
+ flags = 0;
+ sz = sizeof(tb_debug_strings) / sizeof(tb_debug_strings[0]);
+ while ((token = strsep(&list, ":,")) != NULL) {
+
+ /* Handle integer flags */
+ flags |= strtol(token, &endtoken, 0);
+ if (token != endtoken)
+ continue;
+
+ /* Handle text flags */
+ for (i = 0; i < sz; i++) {
+ string = &tb_debug_strings[i];
+ if (strcasecmp(token, string->name) == 0) {
+ flags |= string->flag;
+ break;
+ }
+ }
+ }
+
+ switch (op) {
+ case COMB_NONE:
+ *debug = flags;
+ break;
+ case COMB_ADD:
+ *debug |= flags;
+ break;
+ case COMB_SUB:
+ *debug &= (~flags);
+ break;
+ }
+ return;
+}
+
+void
+tbdbg_dprintf(device_t dev, u_int debug, u_int val, const char *fmt, ...)
+{
+#if defined(THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ va_list ap;
+ u_int lvl, dbg;
+
+ lvl = debug & 0xc0000000;
+ dbg = debug & 0x3fffffff;
+ va_start(ap, fmt);
+ if ((lvl >= (val & 0xc0000000)) &&
+ ((dbg & (val & 0x3fffffff)) != 0)) {
+ device_printf(dev, "");
+ vprintf(fmt, ap);
+ }
+ va_end(ap);
+#endif
+}
diff --git a/sys/dev/thunderbolt/tb_debug.h b/sys/dev/thunderbolt/tb_debug.h
new file mode 100644
index 000000000000..4f5584420882
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_debug.h
@@ -0,0 +1,93 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 driver debug strings
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_DEBUG_H
+#define _TB_DEBUG_H
+
+typedef struct {
+ uintmax_t key;
+ const char * value;
+} tb_string_t;
+
+const char * tb_get_string(uintmax_t, tb_string_t *);
+int tb_debug_sysctl(SYSCTL_HANDLER_ARGS);
+void tb_parse_debug(u_int *, char *);
+
+extern tb_string_t nhi_outmailcmd_opmode[];
+extern tb_string_t nhi_frame_pdf[];
+extern tb_string_t tb_security_level[];
+extern tb_string_t tb_rdy_connmode[];
+extern tb_string_t tb_mbox_connmode[];
+extern tb_string_t tb_device_power[];
+extern tb_string_t tb_notify_code[];
+extern tb_string_t tb_adapter_type[];
+extern tb_string_t tb_adapter_state[];
+extern tb_string_t tb_notify_event[];
+
+enum {
+ /* Debug subsystems */
+ DBG_NONE = 0,
+ DBG_INIT = (1 << 0),
+ DBG_INFO = (1 << 1),
+ DBG_RXQ = (1 << 2),
+ DBG_TXQ = (1 << 3),
+ DBG_INTR = (1 << 4),
+ DBG_TB = (1 << 5),
+ DBG_MBOX = (1 << 6),
+ DBG_BRIDGE = (1 << 7),
+ DBG_CFG = (1 << 8),
+ DBG_ROUTER = (1 << 9),
+ DBG_PORT = (1 << 10),
+ DBG_HCM = (1 << 11),
+ /* Debug levels */
+ DBG_EXTRA = (1 << 30),
+ DBG_NOISY = (1 << 31),
+ DBG_FULL = DBG_EXTRA | DBG_NOISY
+};
+
+/*
+ * Macros to wrap printing.
+ * Each softc type needs a `dev` and `debug` field. Do tbdbg_printf as a
+ * function to make format errors more clear during compile.
+ */
+void tbdbg_dprintf(device_t dev, u_int debug, u_int val, const char *fmt, ...) __printflike(4, 5);
+
+#if defined(THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+#define tb_debug(sc, level, fmt...) \
+ tbdbg_dprintf((sc)->dev, (sc)->debug, level, ##fmt)
+#else
+#define tb_debug(sc, level, fmt...)
+#endif
+#define tb_printf(sc, fmt...) \
+ device_printf((sc)->dev, ##fmt)
+
+#endif /* _TB_DEBUG_H */
diff --git a/sys/dev/thunderbolt/tb_dev.c b/sys/dev/thunderbolt/tb_dev.c
new file mode 100644
index 000000000000..7ea545dee0c3
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_dev.c
@@ -0,0 +1,331 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Userspace control device for USB4 / TB3 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/nv.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/tb_dev.h>
+#include <dev/thunderbolt/tb_ioctl.h>
+
+struct tbdev_if;
+struct tbdev_dm;
+struct tbdev_rt;
+
+struct tbdev_if {
+ TAILQ_ENTRY(tbdev_if) dev_next;
+ char name[SPECNAMELEN];
+};
+
+struct tbdev_dm {
+ TAILQ_ENTRY(tbdev_dm) dev_next;
+ char uid[16];
+};
+
+struct tbdev_rt {
+ TAILQ_ENTRY(tbdev_rt) dev_next;
+ uint64_t route;
+};
+
+static int tbdev_static_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td);
+
+static struct cdevsw tbdev_static_devsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = tbdev_static_ioctl,
+ .d_name = "tbt"
+};
+static struct cdev *tb_dev = NULL;
+
+static TAILQ_HEAD(, tbdev_if) tbdev_head = TAILQ_HEAD_INITIALIZER(tbdev_head);
+static TAILQ_HEAD(, tbdev_dm) tbdomain_head = TAILQ_HEAD_INITIALIZER(tbdomain_head);
+static TAILQ_HEAD(, tbdev_rt) tbrouter_head = TAILQ_HEAD_INITIALIZER(tbrouter_head);
+
+static struct mtx tbdev_mtx;
+MTX_SYSINIT(tbdev_mtx, &tbdev_mtx, "TBT Device Mutex", MTX_DEF);
+
+MALLOC_DEFINE(M_THUNDERBOLT, "thunderbolt", "memory for thunderbolt");
+
+static void
+tbdev_init(void *arg)
+{
+
+ tb_dev = make_dev(&tbdev_static_devsw, 0, UID_ROOT, GID_OPERATOR,
+ 0644, TBT_DEVICE_NAME);
+ if (tb_dev == NULL)
+ printf("Cannot create Thunderbolt system device\n");
+
+ return;
+}
+
+SYSINIT(tbdev_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, tbdev_init, NULL);
+
+static void
+tbdev_uninit(void *arg)
+{
+ if (tb_dev != NULL) {
+ destroy_dev(tb_dev);
+ tb_dev = NULL;
+ }
+}
+
+SYSUNINIT(tbdev_uninit, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, tbdev_uninit, NULL);
+
+int
+tbdev_add_interface(struct nhi_softc *nhi)
+{
+ struct tbdev_if *ifce;
+
+ ifce = malloc(sizeof(struct tbdev_if), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (ifce == NULL)
+ return (ENOMEM);
+
+ strlcpy(ifce->name, device_get_nameunit(nhi->dev), SPECNAMELEN);
+ mtx_lock(&tbdev_mtx);
+ TAILQ_INSERT_TAIL(&tbdev_head, ifce, dev_next);
+ mtx_unlock(&tbdev_mtx);
+
+ return (0);
+}
+
+int
+tbdev_remove_interface(struct nhi_softc *nhi)
+{
+ struct tbdev_if *ifce = NULL, *if_back;
+ const char *name;
+
+ name = device_get_nameunit(nhi->dev);
+ mtx_lock(&tbdev_mtx);
+ TAILQ_FOREACH_SAFE(ifce, &tbdev_head, dev_next, if_back) {
+ if (strncmp(name, ifce->name, SPECNAMELEN) == 0) {
+ TAILQ_REMOVE(&tbdev_head, ifce, dev_next);
+ break;
+ }
+ }
+ mtx_unlock(&tbdev_mtx);
+
+ if (ifce != NULL)
+ free(ifce, M_THUNDERBOLT);
+
+ return (0);
+}
+
+int
+tbdev_add_domain(void *domain)
+{
+
+ return (0);
+}
+
+int
+tbdev_remove_domain(void *domain)
+{
+
+ return (0);
+}
+
+int
+tbdev_add_router(struct router_softc *rt)
+{
+
+ return (0);
+}
+
+int
+tbdev_remove_router(struct router_softc *rt)
+{
+
+ return (0);
+}
+
+static int
+tbdev_discover(caddr_t addr)
+{
+ nvlist_t *nvl = NULL;
+ struct tbt_ioc *ioc = (struct tbt_ioc *)addr;
+ struct tbdev_if *dev;
+ struct tbdev_dm *dm;
+ struct tbdev_rt *rt;
+ void *nvlpacked = NULL;
+ const char *cmd = NULL;
+ int error = 0;
+
+ if ((ioc->data == NULL) || (ioc->size == 0)) {
+ printf("data or size is 0\n");
+ return (EINVAL);
+ }
+
+ if ((ioc->len == 0) || (ioc->len > TBT_IOCMAXLEN) ||
+ (ioc->len > ioc->size)) {
+ printf("len is wrong\n");
+ return (EINVAL);
+ }
+
+ nvlpacked = malloc(ioc->len, M_THUNDERBOLT, M_NOWAIT);
+ if (nvlpacked == NULL) {
+ printf("cannot allocate nvlpacked\n");
+ return (ENOMEM);
+ }
+
+ error = copyin(ioc->data, nvlpacked, ioc->len);
+ if (error) {
+ free(nvlpacked, M_THUNDERBOLT);
+ printf("error %d from copyin\n", error);
+ return (error);
+ }
+
+ nvl = nvlist_unpack(nvlpacked, ioc->len, NV_FLAG_NO_UNIQUE);
+ if (nvl == NULL) {
+ free(nvlpacked, M_THUNDERBOLT);
+ printf("cannot unpack nvlist\n");
+ return (EINVAL);
+ }
+ free(nvlpacked, M_THUNDERBOLT);
+ nvlpacked = NULL;
+
+ if (nvlist_exists_string(nvl, TBT_DISCOVER_TYPE))
+ cmd = nvlist_get_string(nvl, TBT_DISCOVER_TYPE);
+ if (cmd == NULL) {
+ printf("cannot find type string\n");
+ error = EINVAL;
+ goto out;
+ }
+
+ mtx_lock(&tbdev_mtx);
+ if (strncmp(cmd, TBT_DISCOVER_IFACE, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(dev, &tbdev_head, dev_next)
+ nvlist_add_string(nvl, TBT_DISCOVER_IFACE, dev->name);
+ } else if (strncmp(cmd, TBT_DISCOVER_DOMAIN, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(dm, &tbdomain_head, dev_next)
+ nvlist_add_string(nvl, TBT_DISCOVER_DOMAIN, dm->uid);
+ } else if (strncmp(cmd, TBT_DISCOVER_ROUTER, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(rt, &tbrouter_head, dev_next)
+ nvlist_add_number(nvl, TBT_DISCOVER_ROUTER, rt->route);
+ } else {
+ printf("cannot find supported tpye\n");
+ error = EINVAL;
+ goto out;
+ }
+ mtx_unlock(&tbdev_mtx);
+
+ error = nvlist_error(nvl);
+ if (error != 0) {
+ printf("error %d state in nvlist\n", error);
+ return (error);
+ }
+
+ nvlpacked = nvlist_pack(nvl, &ioc->len);
+ if (nvlpacked == NULL) {
+ printf("cannot allocate new packed buffer\n");
+ return (ENOMEM);
+ }
+ if (ioc->size < ioc->len) {
+ printf("packed buffer is too big to copyout\n");
+ return (ENOSPC);
+ }
+
+ error = copyout(nvlpacked, ioc->data, ioc->len);
+ if (error)
+ printf("error %d on copyout\n", error);
+
+out:
+ if (nvlpacked != NULL)
+ free(nvlpacked, M_NVLIST);
+ if (nvl != NULL)
+ nvlist_destroy(nvl);
+
+ return (error);
+}
+
+static int
+tbdev_request(caddr_t addr)
+{
+ struct tbt_ioc *ioc = (struct tbt_ioc *)addr;
+ nvlist_t *nvl = NULL;
+ void *nvlpacked = NULL;
+ int error = 0;
+
+ if ((ioc->data == NULL) || (ioc->size == 0))
+ return (ENOMEM);
+
+ nvlpacked = nvlist_pack(nvl, &ioc->len);
+ if (nvlpacked == NULL)
+ return (ENOMEM);
+ if (ioc->size < ioc->len)
+ return (ENOSPC);
+
+ error = copyout(nvlpacked, ioc->data, ioc->len);
+ return (error);
+}
+
+static int
+tbdev_static_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
+ struct thread *td)
+{
+ int error = 0;
+
+ switch (cmd) {
+ case TBT_DISCOVER:
+ error = tbdev_discover(addr);
+ break;
+ case TBT_REQUEST:
+ error = tbdev_request(addr);
+ break;
+ default:
+ error = EINVAL;
+ }
+
+ return (error);
+}
diff --git a/sys/dev/thunderbolt/tb_dev.h b/sys/dev/thunderbolt/tb_dev.h
new file mode 100644
index 000000000000..c40a7fbc3d5a
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_dev.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_DEV_H
+#define _TB_DEV_H
+
+int tbdev_add_interface(struct nhi_softc *);
+int tbdev_remove_interface(struct nhi_softc *);
+int tbdev_add_domain(void *);
+int tbdev_remove_domain(void *);
+int tbdev_add_router(struct router_softc *);
+int tbdev_remove_router(struct router_softc *);
+
+#endif /* _TB_DEV_H */
diff --git a/sys/dev/virtio/mmio/virtio_mmio_if.m b/sys/dev/thunderbolt/tb_if.m
index baebbd9a0b1c..8b0918811a5d 100644
--- a/sys/dev/virtio/mmio/virtio_mmio_if.m
+++ b/sys/dev/thunderbolt/tb_if.m
@@ -1,10 +1,8 @@
#-
-# Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
-# All rights reserved.
+# SPDX-License-Identifier: BSD-2-Clause-FreeBSD
#
-# This software was developed by SRI International and the University of
-# Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
-# ("CTSRD"), as part of the DARPA CRASH research programme.
+# Copyright (c) 2022 Scott Long
+# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -27,73 +25,97 @@
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
+# $FreeBSD$
#
+#include <sys/bus.h>
#include <sys/types.h>
+#include <dev/thunderbolt/tb_reg.h>
-#
-# This is optional interface to virtio mmio backend.
-# Useful when backend is implemented not by the hardware but software, e.g.
-# by using another cpu core.
-#
-
-INTERFACE virtio_mmio;
+INTERFACE tb;
CODE {
- static int
- virtio_mmio_prewrite(device_t dev, size_t offset, int val)
+ struct nhi_softc;
+
+ int
+ tb_generic_find_ufp(device_t dev, device_t *ufp)
{
+ device_t parent;
+
+ parent = device_get_parent(dev);
+ if (parent == NULL)
+ return (EOPNOTSUPP);
- return (1);
+ return (TB_FIND_UFP(parent, ufp));
}
- static int
- virtio_mmio_note(device_t dev, size_t offset, int val)
+ int
+ tb_generic_get_debug(device_t dev, u_int *debug)
{
+ device_t parent;
- return (1);
+ parent = device_get_parent(dev);
+ if (parent == NULL)
+ return (EOPNOTSUPP);
+
+ return (TB_GET_DEBUG(parent, debug));
}
- static int
- virtio_mmio_setup_intr(device_t dev, device_t mmio_dev,
- void *handler, void *ih_user)
- {
+}
- return (1);
- }
-};
+HEADER {
+ struct nhi_softc;
+
+ struct tb_lcmbox_cmd {
+ uint32_t cmd;
+ uint32_t cmd_resp;
+ uint32_t data_in;
+ uint32_t data_out;
+ };
+
+ int tb_generic_find_ufp(device_t, device_t *);
+ int tb_generic_get_debug(device_t, u_int *);
+}
#
-# Inform backend we are going to write data at offset.
+# Read the LC Mailbox
#
-METHOD int prewrite {
+METHOD int lc_mailbox {
device_t dev;
- size_t offset;
- int val;
-} DEFAULT virtio_mmio_prewrite;
+ struct tb_lcmbox_cmd *cmd;
+};
#
-# Inform backend we have data wrotten to offset.
+# Read from the PCIE2CIO port
#
-METHOD int note {
+METHOD int pcie2cio_read {
device_t dev;
- size_t offset;
- int val;
-} DEFAULT virtio_mmio_note;
+ u_int space;
+ u_int port;
+ u_int index;
+ uint32_t *val;
+}
#
-# Inform backend we are going to poll virtqueue.
+# Write to the PCIE2CIO port
#
-METHOD int poll {
+METHOD int pcie2cio_write {
device_t dev;
-};
+ u_int space;
+ u_int port;
+ u_int index;
+ uint32_t val;
+}
#
-# Setup backend-specific interrupts.
+# Return the device that's the upstream facing port
#
-METHOD int setup_intr {
- device_t dev;
- device_t mmio_dev;
- void *handler;
- void *ih_user;
-} DEFAULT virtio_mmio_setup_intr;
+METHOD int find_ufp {
+ device_t dev;
+ device_t *ufp;
+} DEFAULT tb_generic_find_ufp;
+
+METHOD int get_debug {
+ device_t dev;
+ u_int *debug;
+} DEFAULT tb_generic_get_debug;
diff --git a/sys/dev/thunderbolt/tb_ioctl.h b/sys/dev/thunderbolt/tb_ioctl.h
new file mode 100644
index 000000000000..60fafb091cef
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_ioctl.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_IOCTL_H
+#define _TB_IOCTL_H
+
+struct tbt_ioc {
+ void *data; /* user-supplied buffer for the nvlist */
+ size_t size; /* size of the user-supplied buffer */
+ size_t len; /* amount of data in the nvlist */
+};
+
+#define TBT_NAMLEN 16
+#define TBT_DEVICE_NAME "tbtctl"
+#define TBT_IOCMAXLEN 4096
+
+#define TBT_DISCOVER _IOWR('h', 1, struct tbt_ioc)
+#define TBT_DISCOVER_TYPE "type"
+#define TBT_DISCOVER_IFACE "iface"
+#define TBT_DISCOVER_DOMAIN "domain"
+#define TBT_DISCOVER_ROUTER "router"
+
+#define TBT_REQUEST _IOWR('h', 2, struct tbt_ioc)
+
+#endif /* _TB_IOCTL_H */
diff --git a/sys/dev/thunderbolt/tb_pcib.c b/sys/dev/thunderbolt/tb_pcib.c
new file mode 100644
index 000000000000..00738984ad1c
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_pcib.c
@@ -0,0 +1,614 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+
+/* PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcib_private.h>
+#include <dev/pci/pci_private.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+#include <machine/md_var.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_pcib.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include "tb_if.h"
+
+static int tb_pcib_probe(device_t);
+static int tb_pcib_attach(device_t);
+static int tb_pcib_detach(device_t);
+static int tb_pcib_lc_mailbox(device_t, struct tb_lcmbox_cmd *);
+static int tb_pcib_pcie2cio_read(device_t, u_int, u_int, u_int,
+ uint32_t *);
+static int tb_pcib_pcie2cio_write(device_t, u_int, u_int, u_int, uint32_t);
+static int tb_pcib_find_ufp(device_t, device_t *);
+static int tb_pcib_get_debug(device_t, u_int *);
+
+static int tb_pci_probe(device_t);
+static int tb_pci_attach(device_t);
+static int tb_pci_detach(device_t);
+
+struct tb_pcib_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ uint32_t flags; /* This follows the tb_softc flags */
+ const char *desc;
+} tb_pcib_identifiers[] = {
+ { VENDOR_INTEL, TB_DEV_AR_2C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge 2C)" },
+ { VENDOR_INTEL, TB_DEV_AR_LP, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge LP)" },
+ { VENDOR_INTEL, TB_DEV_AR_C_4C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge C 4C)" },
+ { VENDOR_INTEL, TB_DEV_AR_C_2C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge C (Alpine Ridge C 2C)" },
+ { VENDOR_INTEL, TB_DEV_ICL_0, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_ICL,
+ "Thunderbolt 3 PCI-PCI Bridge (IceLake)" },
+ { VENDOR_INTEL, TB_DEV_ICL_1, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_ICL,
+ "Thunderbolt 3 PCI-PCI Bridge (IceLake)" },
+ { 0, 0, 0, 0, 0, NULL }
+};
+
+static struct tb_pcib_ident *
+tb_pcib_find_ident(device_t dev)
+{
+ struct tb_pcib_ident *n;
+ uint16_t v, d, sv, sd;
+
+ v = pci_get_vendor(dev);
+ d = pci_get_device(dev);
+ sv = pci_get_subvendor(dev);
+ sd = pci_get_subdevice(dev);
+
+ for (n = tb_pcib_identifiers; n->vendor != 0; n++) {
+ if ((n->vendor != v) || (n->device != d))
+ continue;
+ if (((n->subvendor != 0xffff) && (n->subvendor != sv)) ||
+ ((n->subdevice != 0xffff) && (n->subdevice != sd)))
+ continue;
+ return (n);
+ }
+
+ return (NULL);
+}
+
+static void
+tb_pcib_get_tunables(struct tb_pcib_softc *sc)
+{
+ char tmpstr[80], oid[80];
+
+ /* Set the default */
+ sc->debug = 0;
+
+ /* Grab global variables */
+ bzero(oid, 80);
+ if (TUNABLE_STR_FETCH("hw.tbolt.debug_level", oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+
+ /* Grab instance variables */
+ bzero(oid, 80);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.tbolt.%d.debug_level",
+ device_get_unit(sc->dev));
+ if (TUNABLE_STR_FETCH(tmpstr, oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+
+ return;
+}
+
+static int
+tb_pcib_setup_sysctl(struct tb_pcib_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = NULL;
+ struct sysctl_oid *tree = NULL;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ if (ctx != NULL)
+ tree = device_get_sysctl_tree(sc->dev);
+
+ if (tree == NULL) {
+ tb_printf(sc, "Error: cannot create sysctl nodes\n");
+ return (EINVAL);
+ }
+ sc->sysctl_tree = tree;
+ sc->sysctl_ctx = ctx;
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
+ OID_AUTO, "debug_level", CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ &sc->debug, 0, tb_debug_sysctl, "A", "Thunderbolt debug level");
+
+ return (0);
+}
+
+/*
+ * This is used for both the PCI and ACPI attachments. It shouldn't return
+ * 0, doing so will force the ACPI attachment to fail.
+ */
+int
+tb_pcib_probe_common(device_t dev, char *desc)
+{
+ device_t ufp;
+ struct tb_pcib_ident *n;
+ char *suffix;
+
+ if ((n = tb_pcib_find_ident(dev)) != NULL) {
+ ufp = NULL;
+ if ((TB_FIND_UFP(dev, &ufp) == 0) && (ufp == dev))
+ suffix = "(Upstream port)";
+ else
+ suffix = "(Downstream port)";
+ snprintf(desc, TB_DESC_MAX, "%s %s", n->desc, suffix);
+ return (BUS_PROBE_VENDOR);
+ }
+ return (ENXIO);
+}
+
+static int
+tb_pcib_probe(device_t dev)
+{
+ char desc[TB_DESC_MAX];
+ int val;
+
+ if ((val = tb_pcib_probe_common(dev, desc)) <= 0)
+ device_set_desc_copy(dev, desc);
+
+ return (val);
+}
+
+int
+tb_pcib_attach_common(device_t dev)
+{
+ device_t ufp;
+ struct tb_pcib_ident *n;
+ struct tb_pcib_softc *sc;
+ uint32_t val;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->vsec = -1;
+
+ n = tb_pcib_find_ident(dev);
+ KASSERT(n != NULL, ("Cannot find TB ident"));
+ sc->flags = n->flags;
+
+ tb_pcib_get_tunables(sc);
+ tb_pcib_setup_sysctl(sc);
+
+ /* XXX Is this necessary for ACPI attachments? */
+ tb_debug(sc, DBG_BRIDGE, "busmaster status was %s\n",
+ (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)
+ ? "enabled" : "disabled");
+ pci_enable_busmaster(dev);
+
+ /*
+ * Determine if this is an upstream or downstream facing device, and
+ * whether it's the root of the Thunderbolt topology. It's too bad
+ * that there aren't unique PCI ID's to help with this.
+ */
+ ufp = NULL;
+ if ((TB_FIND_UFP(dev, &ufp) == 0) && (ufp != NULL)) {
+ if (ufp == dev) {
+ sc->flags |= TB_FLAGS_ISUFP;
+ if (TB_FIND_UFP(device_get_parent(dev), NULL) ==
+ EOPNOTSUPP) {
+ sc->flags |= TB_FLAGS_ISROOT;
+ }
+ }
+ }
+
+ /*
+ * Find the PCI Vendor Specific Extended Capability. It's the magic
+ * wand to configuring the Thunderbolt root bridges.
+ */
+ if (TB_IS_AR(sc) || TB_IS_TR(sc)) {
+ error = pci_find_extcap(dev, PCIZ_VENDOR, &sc->vsec);
+ if (error) {
+ tb_printf(sc, "Cannot find VSEC capability: %d\n",
+ error);
+ return (ENXIO);
+ }
+ }
+
+ /*
+ * Take the AR bridge out of low-power mode.
+ * XXX AR only?
+ */
+ if ((1 || TB_IS_AR(sc)) && TB_IS_ROOT(sc)) {
+ struct tb_lcmbox_cmd cmd;
+
+ cmd.cmd = LC_MBOXOUT_CMD_SXEXIT_TBT;
+ cmd.data_in = 0;
+
+ error = TB_LC_MAILBOX(dev, &cmd);
+ tb_debug(sc, DBG_BRIDGE, "SXEXIT returned error= %d resp= 0x%x "
+ "data= 0x%x\n", error, cmd.cmd_resp, cmd.data_out);
+ }
+
+ /* The downstream facing port on AR needs some help */
+ if (TB_IS_AR(sc) && TB_IS_DFP(sc)) {
+ tb_debug(sc, DBG_BRIDGE, "Doing AR L1 fixup\n");
+ val = pci_read_config(dev, sc->vsec + AR_VSCAP_1C, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "VSEC+0x1c= 0x%08x\n", val);
+ val |= (1 << 8);
+ pci_write_config(dev, sc->vsec + AR_VSCAP_1C, val, 4);
+
+ val = pci_read_config(dev, sc->vsec + AR_VSCAP_B0, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "VSEC+0xb0= 0x%08x\n", val);
+ val |= (1 << 12);
+ pci_write_config(dev, sc->vsec + AR_VSCAP_B0, val, 4);
+ }
+
+ return (0);
+}
+
+static int
+tb_pcib_attach(device_t dev)
+{
+ int error;
+
+ error = tb_pcib_attach_common(dev);
+ if (error)
+ return (error);
+ return (pcib_attach(dev));
+}
+
+static int
+tb_pcib_detach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ tb_debug(sc, DBG_BRIDGE|DBG_ROUTER|DBG_EXTRA, "tb_pcib_detach\n");
+
+ /* Put the AR bridge back to sleep */
+ /* XXX disable this until power control for downstream switches works */
+ if (0 && TB_IS_ROOT(sc)) {
+ struct tb_lcmbox_cmd cmd;
+
+ cmd.cmd = LC_MBOXOUT_CMD_GO2SX;
+ cmd.data_in = 0;
+
+ error = TB_LC_MAILBOX(dev, &cmd);
+ tb_debug(sc, DBG_BRIDGE, "SXEXIT returned error= %d resp= 0x%x "
+ "data= 0x%x\n", error, cmd.cmd_resp, cmd.data_out);
+ }
+
+ return (pcib_detach(dev));
+}
+
+/* Read/write the Link Controller registers in CFG space */
+static int
+tb_pcib_lc_mailbox(device_t dev, struct tb_lcmbox_cmd *cmd)
+{
+ struct tb_pcib_softc *sc;
+ uint32_t regcmd, result;
+ uint16_t m_in, m_out;
+ int vsec, i;
+
+ sc = device_get_softc(dev);
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ if (TB_IS_AR(sc)) {
+ m_in = AR_LC_MBOX_IN;
+ m_out = AR_LC_MBOX_OUT;
+ } else if (TB_IS_ICL(sc)) {
+ m_in = ICL_LC_MBOX_IN;
+ m_out = ICL_LC_MBOX_OUT;
+ } else
+ return (EOPNOTSUPP);
+
+ /* Set the valid bit to signal we're sending a command */
+ regcmd = LC_MBOXOUT_VALID | (cmd->cmd & LC_MBOXOUT_CMD_MASK);
+ regcmd |= (cmd->data_in << LC_MBOXOUT_DATA_SHIFT);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "Writing LC cmd 0x%x\n", regcmd);
+ pci_write_config(dev, vsec + m_out, regcmd, 4);
+
+ for (i = 0; i < 10; i++) {
+ pause("nhi", 1 * hz);
+ result = pci_read_config(dev, vsec + m_in, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "LC Mailbox= 0x%08x\n",
+ result);
+ if ((result & LC_MBOXIN_DONE) != 0)
+ break;
+ }
+
+ /* Clear the valid bit to signal we're done sending the command */
+ pci_write_config(dev, vsec + m_out, 0, 4);
+
+ cmd->cmd_resp = result & LC_MBOXIN_CMD_MASK;
+ cmd->data_out = result >> LC_MBOXIN_CMD_SHIFT;
+
+ if ((result & LC_MBOXIN_DONE) == 0)
+ return (ETIMEDOUT);
+
+ return (0);
+}
+
+static int
+tb_pcib_pcie2cio_wait(device_t dev, u_int timeout)
+{
+#if 0
+ uint32_t val;
+ int vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ do {
+ pci_read_config(dev, vsec + PCIE2CIO_CMD, &val);
+ if ((val & PCIE2CIO_CMD_START) == 0) {
+ if (val & PCIE2CIO_CMD_TIMEOUT)
+ break;
+ return 0;
+ }
+
+ msleep(50);
+ } while (time_before(jiffies, end));
+
+#endif
+ return ETIMEDOUT;
+}
+
+static int
+tb_pcib_pcie2cio_read(device_t dev, u_int space, u_int port, u_int offset,
+ uint32_t *val)
+{
+#if 0
+ uint32_t cmd;
+ int ret, vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (space << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_START;
+ pci_write_config(dev, vsec + PCIE2CIO_CMD, cmd, 4);
+
+ if ((ret = pci2cio_wait_completion(dev, 5000)) != 0)
+ return (ret);
+
+ *val = pci_read_config(dev, vsec + PCIE2CIO_RDDATA, 4);
+#endif
+ return (0);
+}
+
+static int
+tb_pcib_pcie2cio_write(device_t dev, u_int space, u_int port, u_int offset,
+ uint32_t val)
+{
+#if 0
+ uint32_t cmd;
+ int ret, vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ pci_write_config(dev, vsec + PCIE2CIO_WRDATA, val, 4);
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (space << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
+ pci_write_config(dev, vsec + PCIE2CIO_CMD, cmd);
+
+#endif
+ return (tb_pcib_pcie2cio_wait(dev, 5000));
+}
+
+/*
+ * The Upstream Facing Port (UFP) in a switch is special, it's the function
+ * that responds to some of the special programming mailboxes. It can't be
+ * differentiated by PCI ID, so a heuristic approach to identifying it is
+ * required.
+ */
+static int
+tb_pcib_find_ufp(device_t dev, device_t *ufp)
+{
+ device_t upstream;
+ struct tb_pcib_softc *sc;
+ uint32_t vsec, val;
+ int error;
+
+ upstream = NULL;
+ sc = device_get_softc(dev);
+ if (sc == NULL)
+ return (EOPNOTSUPP);
+
+ if (TB_IS_UFP(sc)) {
+ upstream = dev;
+ error = 0;
+ goto out;
+ }
+
+ /*
+ * This register is supposed to be filled in on the upstream port
+ * and tells how many downstream ports there are. It doesn't seem
+ * to get filled in on AR host controllers, but is on various
+ * peripherals.
+ */
+ error = pci_find_extcap(dev, PCIZ_VENDOR, &vsec);
+ if (error == 0) {
+ val = pci_read_config(dev, vsec + 0x18, 4);
+ if ((val & 0x1f) > 0) {
+ upstream = dev;
+ goto out;
+ }
+ }
+
+ /*
+ * Since we can't trust that the VSEC register is filled in, the only
+ * other option is to see if we're at the top of the topology, which
+ * implies that we're at the upstream port of the host controller.
+ */
+ error = TB_FIND_UFP(device_get_parent(dev), ufp);
+ if (error == EOPNOTSUPP) {
+ upstream = dev;
+ error = 0;
+ goto out;
+ } else
+ return (error);
+
+out:
+ if (ufp != NULL)
+ *ufp = upstream;
+
+ return (error);
+}
+
+static int
+tb_pcib_get_debug(device_t dev, u_int *debug)
+{
+ struct tb_pcib_softc *sc;
+
+ sc = device_get_softc(dev);
+ if ((sc == NULL) || (debug == NULL))
+ return (EOPNOTSUPP);
+
+ *debug = sc->debug;
+ return (0);
+}
+
+static device_method_t tb_pcib_methods[] = {
+ DEVMETHOD(device_probe, tb_pcib_probe),
+ DEVMETHOD(device_attach, tb_pcib_attach),
+ DEVMETHOD(device_detach, tb_pcib_detach),
+
+ DEVMETHOD(tb_lc_mailbox, tb_pcib_lc_mailbox),
+ DEVMETHOD(tb_pcie2cio_read, tb_pcib_pcie2cio_read),
+ DEVMETHOD(tb_pcie2cio_write, tb_pcib_pcie2cio_write),
+
+ DEVMETHOD(tb_find_ufp, tb_pcib_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_pcib_get_debug),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(tbolt, tb_pcib_driver, tb_pcib_methods,
+ sizeof(struct tb_pcib_softc), pcib_driver);
+DRIVER_MODULE_ORDERED(tb_pcib, pci, tb_pcib_driver,
+ NULL, NULL, SI_ORDER_MIDDLE);
+MODULE_DEPEND(tb_pcib, pci, 1, 1, 1);
+MODULE_PNP_INFO("U16:vendor;U16:device;U16:subvendor;U16:subdevice;U32:#;D:#",
+ pci, tb_pcib, tb_pcib_identifiers, nitems(tb_pcib_identifiers) - 1);
+
+static int
+tb_pci_probe(device_t dev)
+{
+ struct tb_pcib_ident *n;
+
+ if ((n = tb_pcib_find_ident(device_get_parent(dev))) != NULL) {
+ switch (n->flags & TB_GEN_MASK) {
+ case TB_GEN_TB1:
+ device_set_desc(dev, "Thunderbolt 1 Link");
+ break;
+ case TB_GEN_TB2:
+ device_set_desc(dev, "Thunderbolt 2 Link");
+ break;
+ case TB_GEN_TB3:
+ device_set_desc(dev, "Thunderbolt 3 Link");
+ break;
+ case TB_GEN_USB4:
+ device_set_desc(dev, "USB4 Link");
+ break;
+ case TB_GEN_UNK:
+ /* Fallthrough */
+ default:
+ device_set_desc(dev, "Thunderbolt Link");
+ }
+ return (BUS_PROBE_VENDOR);
+ }
+ return (ENXIO);
+}
+
+static int
+tb_pci_attach(device_t dev)
+{
+
+ return (pci_attach(dev));
+}
+
+static int
+tb_pci_detach(device_t dev)
+{
+
+ return (pci_detach(dev));
+}
+
+static device_method_t tb_pci_methods[] = {
+ DEVMETHOD(device_probe, tb_pci_probe),
+ DEVMETHOD(device_attach, tb_pci_attach),
+ DEVMETHOD(device_detach, tb_pci_detach),
+
+ DEVMETHOD(tb_find_ufp, tb_generic_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_generic_get_debug),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(pci, tb_pci_driver, tb_pci_methods, sizeof(struct pci_softc),
+ pci_driver);
+DRIVER_MODULE(tb_pci, pcib, tb_pci_driver, NULL, NULL);
+MODULE_DEPEND(tb_pci, pci, 1, 1, 1);
+MODULE_VERSION(tb_pci, 1);
diff --git a/sys/dev/thunderbolt/tb_pcib.h b/sys/dev/thunderbolt/tb_pcib.h
new file mode 100644
index 000000000000..6928e866a083
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_pcib.h
@@ -0,0 +1,93 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt PCIe bridge/switch definitions
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_PCIB_H
+#define _TB_PCIB_H
+
+DECLARE_CLASS(tb_pcib_driver);
+
+/*
+ * The order of the fields is very important. Class inherentence replies on
+ * implicitly knowing the location of the first 3 fields.
+ */
+struct tb_pcib_softc {
+ struct pcib_softc pcibsc;
+ ACPI_HANDLE ap_handle;
+ ACPI_BUFFER ap_prt;
+ device_t dev;
+ u_int debug;
+ int vsec;
+ int flags;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+};
+
+/* Flags for tb_softc */
+#define TB_GEN_UNK 0x00
+#define TB_GEN_TB1 0x01
+#define TB_GEN_TB2 0x02
+#define TB_GEN_TB3 0x03
+#define TB_GEN_USB4 0x04
+#define TB_GEN_MASK 0x0f
+#define TB_HWIF_UNK 0x00
+#define TB_HWIF_AR 0x10
+#define TB_HWIF_TR 0x20
+#define TB_HWIF_ICL 0x30
+#define TB_HWIF_USB4 0x40
+#define TB_HWIF_MASK 0xf0
+#define TB_FLAGS_ISROOT 0x100
+#define TB_FLAGS_ISUFP 0x200
+
+#define TB_IS_AR(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_AR)
+#define TB_IS_TR(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_TR)
+#define TB_IS_ICL(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_ICL)
+#define TB_IS_USB4(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_USB4)
+
+#define TB_IS_ROOT(sc) (((sc)->flags & TB_FLAGS_ISROOT) != 0)
+#define TB_IS_UFP(sc) (((sc)->flags & TB_FLAGS_ISUFP) != 0)
+#define TB_IS_DFP(sc) (((sc)->flags & TB_FLAGS_ISUFP) == 0)
+
+/* PCI IDs for the TB bridges */
+#define TB_DEV_AR_2C 0x1576
+#define TB_DEV_AR_LP 0x15c0
+#define TB_DEV_AR_C_4C 0x15d3
+#define TB_DEV_AR_C_2C 0x15da
+#define TB_DEV_ICL_0 0x8a1d
+#define TB_DEV_ICL_1 0x8a21
+
+#define TB_PCIB_VSEC(dev) ((struct tb_pcib_softc *)(device_get_softc(dev)))->vsec;
+#define TB_DESC_MAX 80
+
+int tb_pcib_probe_common(device_t, char *);
+int tb_pcib_attach_common(device_t dev);
+
+#endif /* _TB_PCIB_H */
diff --git a/sys/dev/thunderbolt/tb_reg.h b/sys/dev/thunderbolt/tb_reg.h
new file mode 100644
index 000000000000..b065e01e6972
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_reg.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt Variables
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_REG_H
+#define _TB_REG_H
+
+#define TBSEC_NONE 0x00
+#define TBSEC_USER 0x01
+#define TBSEC_SECURE 0x02
+#define TBSEC_DP 0x03
+#define TBSEC_UNKNOWN 0xff
+
+/*
+ * SW-FW commands and responses. These are sent over Ring0 to communicate
+ * with the fabric and the TBT Connection Manager firmware.
+ */
+
+typedef struct {
+ uint32_t hi;
+ uint32_t lo;
+} __packed tb_route_t;
+
+#endif /* _TB_REG_H */
diff --git a/sys/dev/thunderbolt/tb_var.h b/sys/dev/thunderbolt/tb_var.h
new file mode 100644
index 000000000000..4874c420300e
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_var.h
@@ -0,0 +1,54 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt firmware connection manager functions.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_VAR_H
+#define _TB_VAR_H
+
+typedef struct {
+ int8_t link;
+ int8_t depth;
+} tb_addr_t;
+
+MALLOC_DECLARE(M_THUNDERBOLT);
+
+#define TB_VENDOR_LEN 48
+#define TB_MODEL_LEN 48
+#define TB_MAX_LINKS 4
+#define TB_MAX_DEPTH 6
+
+static __inline uint32_t
+tb_calc_crc(void *data, u_int len)
+{
+ return ( ~ (calculate_crc32c(~0L, data, len)));
+}
+
+#endif /* _TB_VAR_H */
diff --git a/sys/dev/thunderbolt/tbcfg_reg.h b/sys/dev/thunderbolt/tbcfg_reg.h
new file mode 100644
index 000000000000..bb68faa543b0
--- /dev/null
+++ b/sys/dev/thunderbolt/tbcfg_reg.h
@@ -0,0 +1,363 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt3/USB4 config space register definitions
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TBCFG_REG_H
+#define _TBCFG_REG_H
+
+/* Config space read request, 6.4.2.3 */
+struct tb_cfg_read {
+ tb_route_t route;
+ uint32_t addr_attrs;
+#define TB_CFG_ADDR_SHIFT 0
+#define TB_CFG_ADDR_MASK GENMASK(12,0)
+#define TB_CFG_SIZE_SHIFT 13
+#define TB_CFG_SIZE_MASK GENMASK(18,13)
+#define TB_CFG_ADAPTER_SHIFT 19
+#define TB_CFG_ADAPTER_MASK GENMASK(24,19)
+#define TB_CFG_CS_PATH (0x00 << 25)
+#define TB_CFG_CS_ADAPTER (0x01 << 25)
+#define TB_CFG_CS_ROUTER (0x02 << 25)
+#define TB_CFG_CS_COUNTERS (0x03 << 25)
+#define TB_CFG_SEQ_SHIFT 27
+#define TB_CFG_SEQ_MASK (28,27)
+ uint32_t crc;
+};
+
+/* Config space read request, 6.4.2.4 */
+struct tb_cfg_read_resp {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t data[0]; /* Up to 60 dwords */
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space write request, 6.4.2.5 */
+struct tb_cfg_write {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t data[0]; /* Up to 60 dwords */
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space write response, 6.4.2.6 */
+struct tb_cfg_write_resp {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t crc;
+} __packed;
+
+/* Config space event, 6.4.2.7 */
+struct tb_cfg_notify {
+ tb_route_t route;
+ uint32_t event_adap;
+#define TB_CFG_EVENT_MASK GENMASK(7,0)
+#define GET_NOTIFY_EVENT(n) ((n)->event_adap & TB_CFG_EVENT_MASK)
+#define TB_CFG_ERR_CONN 0x00
+#define TB_CFG_ERR_LINK 0x01
+#define TB_CFG_ERR_ADDR 0x02
+#define TB_CFG_ERR_ADP 0x04
+#define TB_CFG_ERR_ENUM 0x08
+#define TB_CFG_ERR_NUA 0x09
+#define TB_CFG_ERR_LEN 0x0b
+#define TB_CFG_ERR_HEC 0x0c
+#define TB_CFG_ERR_FC 0x0d
+#define TB_CFG_ERR_PLUG 0x0e
+#define TB_CFG_ERR_LOCK 0x0f
+#define TB_CFG_HP_ACK 0x07
+#define TB_CFG_DP_BW 0x20
+#define TB_CFG_EVENT_ADAPTER_SHIFT 8
+#define TB_CFG_EVENT_ADAPTER_MASK GENMASK(13,8)
+#define GET_NOTIFY_ADAPTER(n) (((n)->event_adap & \
+ TB_CFG_EVENT_ADAPTER_MASK) >> \
+ TB_CFG_EVENT_ADAPTER_SHIFT)
+#define TB_CFG_PG_NONE 0x00000000
+#define TB_CFG_PG_PLUG 0x80000000
+#define TB_CFG_PG_UNPLUG 0xc0000000
+ uint32_t crc;
+} __packed;
+
+/* Config space event acknowledgement, 6.4.2.8 */
+struct tb_cfg_notify_ack {
+ tb_route_t route;
+ uint32_t crc;
+} __packed;
+
+/* Config space hot plug event, 6.4.2.10 */
+struct tb_cfg_hotplug {
+ tb_route_t route;
+ uint32_t adapter_attrs;
+#define TB_CFG_ADPT_MASK GENMASK(5,0)
+#define TB_CFG_UPG_PLUG (0x0 << 31)
+#define TB_CFG_UPG_UNPLUG (0x1 << 31)
+ uint32_t crc;
+} __packed;
+
+/* Config space inter-domain request, 6.4.2.11 */
+struct tb_cfg_xdomain {
+ tb_route_t route;
+ uint32_t data[0];
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space inter-domain response, 6.4.2.12 */
+struct tb_cfg_xdomain_resp {
+ tb_route_t route;
+ uint32_t data[0];
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space router basic registers 8.2.1.1 */
+struct tb_cfg_router {
+ uint16_t vendor_id; /* ROUTER_CS_0 */
+ uint16_t product_id;
+ uint32_t router_cs_1; /* ROUTER_CS_1 */
+#define ROUTER_CS1_NEXT_CAP_MASK GENMASK(7,0)
+#define GET_ROUTER_CS_NEXT_CAP(r) (r->router_cs_1 & \
+ ROUTER_CS1_NEXT_CAP_MASK)
+#define ROUTER_CS1_UPSTREAM_SHIFT 8
+#define ROUTER_CS1_UPSTREAM_MASK GENMASK(13,8)
+#define GET_ROUTER_CS_UPSTREAM_ADAP(r) ((r->router_cs_1 & \
+ ROUTER_CS1_UPSTREAM_MASK) >> \
+ ROUTER_CS1_UPSTREAM_SHIFT)
+#define ROUTER_CS1_MAX_SHIFT 14
+#define ROUTER_CS1_MAX_MASK GENMASK(19,14)
+#define GET_ROUTER_CS_MAX_ADAP(r) ((r->router_cs_1 & \
+ ROUTER_CS1_MAX_MASK) >> \
+ ROUTER_CS1_MAX_SHIFT)
+#define ROUTER_CS1_MAX_ADAPTERS 64
+#define ROUTER_CS1_DEPTH_SHIFT 20
+#define ROUTER_CS1_DEPTH_MASK GENMASK(22,20)
+#define GET_ROUTER_CS_DEPTH(r) ((r->router_cs_1 & \
+ ROUTER_CS1_DEPTH_MASK) >> \
+ ROUTER_CS1_DEPTH_SHIFT)
+#define ROUTER_CS1_REVISION_SHIFT 24
+#define ROUTER_CS1_REVISION_MASK GENMASK(31,24)
+#define GET_ROUTER_CS_REVISION ((r->router_cs_1 & \
+ ROUTER_CS1_REVISION_MASK) >> \
+ ROUTER_CS1_REVISION_SHIFT)
+ uint32_t topology_lo; /* ROUTER_CS_2 */
+ uint32_t topology_hi; /* ROUTER_CS_3 */
+#define CFG_TOPOLOGY_VALID (1 << 31)
+ uint8_t notification_timeout; /* ROUTER_CS_4 */
+ uint8_t cm_version;
+#define CFG_CM_USB4 0x10
+ uint8_t rsrvd1;
+ uint8_t usb4_version;
+#define CFG_USB4_V1_0 0x10
+ uint32_t flags_cs5; /* ROUTER_CS_5 */
+#define CFG_CS5_SLP (1 << 0)
+#define CFG_CS5_WOP (1 << 1)
+#define CFG_CS5_WOU (1 << 2)
+#define CFG_CS5_DP (1 << 3)
+#define CFG_CS5_C3S (1 << 23)
+#define CFG_CS5_PTO (1 << 24)
+#define CFG_CS5_UTO (1 << 25)
+#define CFG_CS5_HCO (1 << 26)
+#define CFG_CS5_CV (1 << 31)
+ uint32_t flags_cs6; /* ROUTER_CS_6 */
+#define CFG_CS6_SLPR (1 << 0)
+#define CFG_CS6_TNS (1 << 1)
+#define CFG_CS6_WAKE_PCIE (1 << 2)
+#define CFG_CS6_WAKE_USB3 (1 << 3)
+#define CFG_CS6_WAKE_DP (1 << 4)
+#define CFG_CS6_HCI (1 << 18)
+#define CFG_CS6_RR (1 << 24)
+#define CFG_CS6_CR (1 << 25)
+ uint32_t uuid_hi; /* ROUTER_CS_7 */
+ uint32_t uuid_lo; /* ROUTER_CS_8 */
+ uint32_t data[16]; /* ROUTER_CS_9-24 */
+ uint32_t metadata; /* ROUTER_CS_25 */
+ uint32_t opcode_status; /* ROUTER_CS_26 */
+/* TBD: Opcodes and status */
+#define CFG_ONS (1 << 30)
+#define CFG_OV (1 << 31)
+} __packed;
+
+#define TB_CFG_CAP_OFFSET_MAX 0xfff
+
+/* Config space router capability header 8.2.1.3/8.2.1.4 */
+struct tb_cfg_cap_hdr {
+ uint8_t next_cap;
+ uint8_t cap_id;
+} __packed;
+
+/* Config space router TMU registers 8.2.1.2 */
+struct tb_cfg_cap_tmu {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_TMU 0x03
+} __packed;
+
+struct tb_cfg_vsc_cap {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_VSC 0x05
+ uint8_t vsc_id;
+ uint8_t len;
+} __packed;
+
+struct tb_cfg_vsec_cap {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_VSEC 0x05
+ uint8_t vsec_id;
+ uint8_t len;
+ uint16_t vsec_next_cap;
+ uint16_t vsec_len;
+} __packed;
+
+union tb_cfg_cap {
+ struct tb_cfg_cap_hdr hdr;
+ struct tb_cfg_cap_tmu tmu;
+ struct tb_cfg_vsc_cap vsc;
+ struct tb_cfg_vsec_cap vsec;
+} __packed;
+
+#define TB_CFG_VSC_PLUG 0x01 /* Hot Plug and DROM */
+
+#define TB_CFG_VSEC_LC 0x06 /* Link Controller */
+#define TB_LC_DESC 0x02 /* LC Descriptor fields */
+#define TB_LC_DESC_NUM_LC_MASK GENMASK(3, 0)
+#define TB_LC_DESC_SIZE_SHIFT 8
+#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
+#define TB_LC_DESC_PORT_SHIFT 16
+#define TB_LC_DESC_PORT_MASK GENMASK(27, 16)
+#define TB_LC_UUID 0x03
+#define TB_LC_DP_SINK 0x10 /* Display Port config */
+#define TB_LC_PORT_ATTR 0x8d /* Port attributes */
+#define TB_LC_PORT_ATTR_BE (1 << 12) /* Bonding enabled */
+#define TB_LC_SX_CTRL 0x96 /* Sleep control */
+#define TB_LC_SX_CTRL_WOC (1 << 1)
+#define TB_LC_SX_CTRL_WOD (1 << 2)
+#define TB_LC_SX_CTRL_WOU4 (1 << 5)
+#define TB_LC_SX_CTRL_WOP (1 << 6)
+#define TB_LC_SX_CTRL_L1C (1 << 16)
+#define TB_LC_SX_CTRL_L1D (1 << 17)
+#define TB_LC_SX_CTRL_L2C (1 << 20)
+#define TB_LC_SX_CTRL_L2D (1 << 21)
+#define TB_LC_SX_CTRL_UFP (1 << 30)
+#define TB_LC_SX_CTRL_SLP (1 << 31)
+#define TB_LC_POWER 0x740
+
+/* Config space adapter basic registers 8.2.2.1 */
+struct tb_cfg_adapter {
+ uint16_t vendor_id; /* ADP CS0 */
+ uint16_t product_id;
+ uint32_t adp_cs1; /* ADP CS1 */
+#define ADP_CS1_NEXT_CAP_MASK GENMASK(7,0)
+#define GET_ADP_CS_NEXT_CAP(a) (a->adp_cs1 & \
+ ADP_CS1_NEXT_CAP_MASK)
+#define ADP_CS1_COUNTER_SHIFT 8
+#define ADP_CS1_COUNTER_MASK GENMASK(18,8)
+#define GET_ADP_CS_MAX_COUNTERS(a) ((a->adp_cs1 & \
+ ADP_CS1_COUNTER_MASK) >> \
+ ADP_CS1_COUNTER_SHIFT)
+#define CFG_COUNTER_CONFIG_FLAG (1 << 19)
+ uint32_t adp_cs2; /* ADP CS2 */
+#define ADP_CS2_TYPE_MASK GENMASK(23,0)
+#define GET_ADP_CS_TYPE(a) (a->adp_cs2 & ADP_CS2_TYPE_MASK)
+#define ADP_CS2_UNSUPPORTED 0x000000
+#define ADP_CS2_LANE 0x000001
+#define ADP_CS2_HOSTIF 0x000002
+#define ADP_CS2_PCIE_DFP 0x100101
+#define ADP_CS2_PCIE_UFP 0x100102
+#define ADP_CS2_DP_OUT 0x0e0102
+#define ADP_CS2_DP_IN 0x0e0101
+#define ADP_CS2_USB3_DFP 0x200101
+#define ADP_CS2_USB3_UFP 0x200102
+ uint32_t adp_cs3; /* ADP CS 3 */
+#define ADP_CS3_ADP_NUM_SHIFT 20
+#define ADP_CS3_ADP_NUM_MASK GENMASK(25,20)
+#define GET_ADP_CS_ADP_NUM(a) ((a->adp_cs3 & \
+ ADP_CS3_ADP_NUM_MASK) >> \
+ ADP_CS3_ADP_NUM_SHIFT)
+#define CFG_ADP_HEC_ERROR (1 << 29)
+#define CFG_ADP_FC_ERROR (1 << 30)
+#define CFG_ADP_SBC (1 << 31)
+} __packed;
+
+/* Config space lane adapter capability 8.2.2.3 */
+struct tb_cfg_cap_lane {
+ struct tb_cfg_cap_hdr hdr; /* LANE_ADP_CS_0 */
+#define TB_CFG_CAP_LANE 0x01
+ /* Supported link/width/power */
+ uint16_t supp_lwp;
+#define CAP_LANE_LINK_MASK GENMASK(3,0)
+#define CAP_LANE_LINK_GEN3 0x0004
+#define CAP_LANE_LINK_GEN2 0x0008
+#define CAP_LANE_WIDTH_MASK GENMASK(9,4)
+#define CAP_LANE_WIDTH_1X 0x0010
+#define CAP_LANE_WIDTH_2X 0x0020
+#define CAP_LANE_POWER_CL0 0x0400
+#define CAP_LANE_POWER_CL1 0x0800
+#define CAP_LANE_POWER_CL2 0x1000
+ /* Target link/width/power */
+ uint16_t targ_lwp; /* LANE_ADP_CS_1 */
+#define CAP_LANE_TARGET_GEN2 0x0008
+#define CAP_LANE_TARGET_GEN3 0x000c
+#define CAP_LANE_TARGET_SINGLE 0x0010
+#define CAP_LANE_TARGET_DUAL 0x0030
+#define CAP_LANE_DISABLE 0x4000
+#define CAP_LANE_BONDING 0x8000
+ /* Current link/width/state */
+ uint16_t current_lws;
+/* Same definitions a supp_lwp for bits 0 - 9 */
+#define CAP_LANE_STATE_SHIFT 10
+#define CAP_LANE_STATE_MASK GENMASK(13,10)
+#define CAP_LANE_STATE_DISABLE (0x0 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_TRAINING (0x1 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL0 (0x2 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_TXCL0 (0x3 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_RXCL0 (0x4 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL1 (0x5 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL2 (0x6 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CLD (0x7 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_PMS 0x4000
+ /* Logical Layer Errors */
+ uint16_t lle; /* LANE_ADP_CS_2 */
+#define CAP_LANE_LLE_MASK GENMASK(6,0)
+#define CAP_LANE_LLE_ALE 0x01
+#define CAP_LANE_LLE_OSE 0x02
+#define CAP_LANE_LLE_TE 0x04
+#define CAP_LANE_LLE_EBE 0x08
+#define CAP_LANE_LLE_DBE 0x10
+#define CAP_LANE_LLE_RDE 0x20
+#define CAP_LANE_LLE_RST 0x40
+ uint16_t lle_enable;
+} __packed;
+
+/* Config space path registers 8.2.3.1 */
+struct tb_cfg_path {
+} __packed;
+
+/* Config space counter registers 8.2.4 */
+struct tb_cfg_counters {
+} __packed;
+
+#endif /* _TBCFG_REG_H */
diff --git a/sys/dev/tpm/tpm20.c b/sys/dev/tpm/tpm20.c
index 876dd0bcc40d..067e7ccae8f9 100644
--- a/sys/dev/tpm/tpm20.c
+++ b/sys/dev/tpm/tpm20.c
@@ -25,8 +25,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include <sys/random.h>
+#include <dev/random/randomdev.h>
#include "tpm20.h"
@@ -184,6 +184,13 @@ tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
return (ENOTTY);
}
+#ifdef TPM_HARVEST
+static const struct random_source random_tpm = {
+ .rs_ident = "TPM",
+ .rs_source = RANDOM_PURE_TPM,
+};
+#endif
+
int
tpm20_init(struct tpm_sc *sc)
{
@@ -206,7 +213,7 @@ tpm20_init(struct tpm_sc *sc)
tpm20_release(sc);
#ifdef TPM_HARVEST
- random_harvest_register_source(RANDOM_PURE_TPM);
+ random_source_register(&random_tpm);
TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
tpm20_harvest, sc);
taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0);
@@ -223,7 +230,7 @@ tpm20_release(struct tpm_sc *sc)
#ifdef TPM_HARVEST
if (device_is_attached(sc->dev))
taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
- random_harvest_deregister_source(RANDOM_PURE_TPM);
+ random_source_deregister(&random_tpm);
#endif
if (sc->buf != NULL)
diff --git a/sys/dev/tpm/tpm_tis_core.c b/sys/dev/tpm/tpm_tis_core.c
index d8421f8156c9..4159de4daf3b 100644
--- a/sys/dev/tpm/tpm_tis_core.c
+++ b/sys/dev/tpm/tpm_tis_core.c
@@ -97,6 +97,7 @@ tpmtis_attach(device_t dev)
{
struct tpm_sc *sc;
int result;
+ int poll = 0;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -105,6 +106,12 @@ tpmtis_attach(device_t dev)
sx_init(&sc->dev_lock, "TPM driver lock");
sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK);
+ resource_int_value("tpm", device_get_unit(dev), "use_polling", &poll);
+ if (poll != 0) {
+ device_printf(dev, "Using poll method to get TPM operation status \n");
+ goto skip_irq;
+ }
+
sc->irq_rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
RF_ACTIVE | RF_SHAREABLE);
diff --git a/sys/dev/tws/tws.c b/sys/dev/tws/tws.c
index af151c8c4f06..fccd6689a6aa 100644
--- a/sys/dev/tws/tws.c
+++ b/sys/dev/tws/tws.c
@@ -311,7 +311,7 @@ attach_fail_4:
if (sc->cmd_tag)
bus_dma_tag_destroy(sc->cmd_tag);
attach_fail_3:
- for(i=0;i<sc->irqs;i++) {
+ for (i = 0; i < sc->irqs; i++) {
if ( sc->irq_res[i] ){
if (bus_release_resource(sc->tws_dev,
SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
@@ -369,7 +369,7 @@ tws_detach(device_t dev)
tws_teardown_intr(sc);
/* Release irq resource */
- for(i=0;i<sc->irqs;i++) {
+ for (i = 0; i < sc->irqs; i++) {
if ( sc->irq_res[i] ){
if (bus_release_resource(sc->tws_dev,
SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
@@ -402,7 +402,7 @@ tws_detach(device_t dev)
TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
}
- for ( i=0; i< tws_queue_depth; i++) {
+ for (i = 0; i < tws_queue_depth; i++) {
if (sc->reqs[i].dma_map)
bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map);
callout_drain(&sc->reqs[i].timeout);
@@ -432,7 +432,7 @@ tws_setup_intr(struct tws_softc *sc, int irqs)
{
int i, error;
- for(i=0;i<irqs;i++) {
+ for (i = 0; i < irqs; i++) {
if (!(sc->intr_handle[i])) {
if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
INTR_TYPE_CAM | INTR_MPSAFE,
@@ -452,7 +452,7 @@ tws_teardown_intr(struct tws_softc *sc)
{
int i;
- for(i=0;i<sc->irqs;i++) {
+ for (i = 0; i < sc->irqs; i++) {
if (sc->intr_handle[i]) {
bus_teardown_intr(sc->tws_dev,
sc->irq_res[i], sc->intr_handle[i]);
@@ -669,8 +669,7 @@ tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
bzero(cmd_buf, dma_mem_size);
TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
mtx_lock(&sc->q_lock);
- for ( i=0; i< tws_queue_depth; i++)
- {
+ for (i = 0; i < tws_queue_depth; i++) {
if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
/* log a ENOMEM failure msg here */
mtx_unlock(&sc->q_lock);
diff --git a/sys/dev/tws/tws_services.c b/sys/dev/tws/tws_services.c
index da8bbacc39f7..e5c3d45c533f 100644
--- a/sys/dev/tws/tws_services.c
+++ b/sys/dev/tws/tws_services.c
@@ -200,7 +200,7 @@ tws_init_qs(struct tws_softc *sc)
{
mtx_lock(&sc->q_lock);
- for(int i=0;i<TWS_MAX_QS;i++) {
+ for (int i = 0; i < TWS_MAX_QS; i++) {
sc->q_head[i] = NULL;
sc->q_tail[i] = NULL;
}
diff --git a/sys/dev/uart/uart_bus_pci.c b/sys/dev/uart/uart_bus_pci.c
index 14ac213066b8..22af8ee8663c 100644
--- a/sys/dev/uart/uart_bus_pci.c
+++ b/sys/dev/uart/uart_bus_pci.c
@@ -141,6 +141,8 @@ static const struct pci_id pci_ns8250_ids[] = {
0x10, 16384000 },
{ 0x1415, 0xc120, 0xffff, 0, "Oxford Semiconductor OXPCIe952 PCIe 16950 UART",
0x10 },
+{ 0x14a1, 0x0008, 0x14a1, 0x0008, "Systembase SB16C1058",
+ 0x10, 8 * DEFAULT_RCLK, },
{ 0x14e4, 0x160a, 0xffff, 0, "Broadcom TruManage UART", 0x10,
128 * DEFAULT_RCLK, 2},
{ 0x14e4, 0x4344, 0xffff, 0, "Sony Ericsson GC89 PC Card", 0x10},
diff --git a/sys/dev/uart/uart_cpu_acpi.c b/sys/dev/uart/uart_cpu_acpi.c
index 7382c47a8db6..da77603f0093 100644
--- a/sys/dev/uart/uart_cpu_acpi.c
+++ b/sys/dev/uart/uart_cpu_acpi.c
@@ -44,23 +44,15 @@
#include <contrib/dev/acpica/include/accommon.h>
#include <contrib/dev/acpica/include/actables.h>
-static struct acpi_uart_compat_data *
+static struct acpi_spcr_compat_data *
uart_cpu_acpi_scan(uint8_t interface_type)
{
- struct acpi_uart_compat_data **cd, *curcd;
+ struct acpi_spcr_compat_data **cd, *curcd;
int i;
- SET_FOREACH(cd, uart_acpi_class_and_device_set) {
+ SET_FOREACH(cd, uart_acpi_spcr_class_set) {
curcd = *cd;
- for (i = 0; curcd[i].cd_hid != NULL; i++) {
- if (curcd[i].cd_port_subtype == interface_type)
- return (&curcd[i]);
- }
- }
-
- SET_FOREACH(cd, uart_acpi_class_set) {
- curcd = *cd;
- for (i = 0; curcd[i].cd_hid != NULL; i++) {
+ for (i = 0; curcd[i].cd_class != NULL; i++) {
if (curcd[i].cd_port_subtype == interface_type)
return (&curcd[i]);
}
@@ -143,7 +135,7 @@ uart_cpu_acpi_spcr(int devtype, struct uart_devinfo *di)
{
vm_paddr_t spcr_physaddr;
ACPI_TABLE_SPCR *spcr;
- struct acpi_uart_compat_data *cd;
+ struct acpi_spcr_compat_data *cd;
struct uart_class *class;
int error = ENXIO;
@@ -237,7 +229,7 @@ uart_cpu_acpi_dbg2(struct uart_devinfo *di)
ACPI_TABLE_DBG2 *dbg2;
ACPI_DBG2_DEVICE *dbg2_dev;
ACPI_GENERIC_ADDRESS *base_address;
- struct acpi_uart_compat_data *cd;
+ struct acpi_spcr_compat_data *cd;
struct uart_class *class;
int error;
bool found;
diff --git a/sys/dev/uart/uart_cpu_acpi.h b/sys/dev/uart/uart_cpu_acpi.h
index 94329e1f1349..218f643c7621 100644
--- a/sys/dev/uart/uart_cpu_acpi.h
+++ b/sys/dev/uart/uart_cpu_acpi.h
@@ -35,11 +35,18 @@
struct uart_class;
+struct acpi_spcr_compat_data {
+ struct uart_class *cd_class;
+ uint16_t cd_port_subtype;
+};
+SET_DECLARE(uart_acpi_spcr_class_set, struct acpi_spcr_compat_data);
+#define UART_ACPI_SPCR_CLASS(data) \
+ DATA_SET(uart_acpi_spcr_class_set, data)
+
struct acpi_uart_compat_data {
const char *cd_hid;
struct uart_class *cd_class;
- uint16_t cd_port_subtype;
int cd_regshft;
int cd_regiowidth;
int cd_rclk;
@@ -56,14 +63,6 @@ SET_DECLARE(uart_acpi_class_and_device_set, struct acpi_uart_compat_data);
#define UART_ACPI_CLASS_AND_DEVICE(data) \
DATA_SET(uart_acpi_class_and_device_set, data)
-/*
- * If your UART driver implements uart_class and custom device layer,
- * then use UART_ACPI_CLASS for its declaration
- */
-SET_DECLARE(uart_acpi_class_set, struct acpi_uart_compat_data);
-#define UART_ACPI_CLASS(data) \
- DATA_SET(uart_acpi_class_set, data)
-
/* Try to initialize UART device from ACPI tables */
int uart_cpu_acpi_setup(int devtype, struct uart_devinfo *di);
diff --git a/sys/dev/uart/uart_dev_ns8250.c b/sys/dev/uart/uart_dev_ns8250.c
index 0f19ede6d9df..c38d50e54ad8 100644
--- a/sys/dev/uart/uart_dev_ns8250.c
+++ b/sys/dev/uart/uart_dev_ns8250.c
@@ -492,24 +492,32 @@ UART_CLASS(uart_ns8250_class);
* XXX -- refactor out ACPI and FDT ifdefs
*/
#ifdef DEV_ACPI
+static struct acpi_spcr_compat_data acpi_spcr_compat_data[] = {
+ { &uart_ns8250_class, ACPI_DBG2_16550_COMPATIBLE },
+ { &uart_ns8250_class, ACPI_DBG2_16550_SUBSET },
+ { &uart_ns8250_class, ACPI_DBG2_16550_WITH_GAS },
+ { NULL, 0 },
+};
+UART_ACPI_SPCR_CLASS(acpi_spcr_compat_data);
+
static struct acpi_uart_compat_data acpi_compat_data[] = {
- {"AMD0020", &uart_ns8250_class, 0, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
- {"AMDI0020", &uart_ns8250_class, 0, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
- {"APMC0D08", &uart_ns8250_class, ACPI_DBG2_16550_COMPATIBLE, 2, 4, 0, 0, "APM compatible UART"},
- {"MRVL0001", &uart_ns8250_class, ACPI_DBG2_16550_SUBSET, 2, 0, 200000000, UART_F_BUSY_DETECT, "Marvell / Synopsys Designware UART"},
- {"SCX0006", &uart_ns8250_class, 0, 2, 0, 62500000, UART_F_BUSY_DETECT, "SynQuacer / Synopsys Designware UART"},
- {"HISI0031", &uart_ns8250_class, 0, 2, 0, 200000000, UART_F_BUSY_DETECT, "HiSilicon / Synopsys Designware UART"},
- {"INTC1006", &uart_ns8250_class, 0, 2, 0, 25000000, 0, "Intel ARM64 UART"},
- {"NXP0018", &uart_ns8250_class, 0, 0, 0, 350000000, UART_F_BUSY_DETECT, "NXP / Synopsys Designware UART"},
- {"PNP0500", &uart_ns8250_class, 0, 0, 0, 0, 0, "Standard PC COM port"},
- {"PNP0501", &uart_ns8250_class, 0, 0, 0, 0, 0, "16550A-compatible COM port"},
- {"PNP0502", &uart_ns8250_class, 0, 0, 0, 0, 0, "Multiport serial device (non-intelligent 16550)"},
- {"PNP0510", &uart_ns8250_class, 0, 0, 0, 0, 0, "Generic IRDA-compatible device"},
- {"PNP0511", &uart_ns8250_class, 0, 0, 0, 0, 0, "Generic IRDA-compatible device"},
- {"WACF004", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet PC Screen"},
- {"WACF00E", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet PC Screen 00e"},
- {"FUJ02E5", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet at FuS Lifebook T"},
- {NULL, NULL, 0, 0 , 0, 0, 0, NULL},
+ {"AMD0020", &uart_ns8250_class, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
+ {"AMDI0020", &uart_ns8250_class, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
+ {"APMC0D08", &uart_ns8250_class, 2, 4, 0, 0, "APM compatible UART"},
+ {"MRVL0001", &uart_ns8250_class, 2, 0, 200000000, UART_F_BUSY_DETECT, "Marvell / Synopsys Designware UART"},
+ {"SCX0006", &uart_ns8250_class, 2, 0, 62500000, UART_F_BUSY_DETECT, "SynQuacer / Synopsys Designware UART"},
+ {"HISI0031", &uart_ns8250_class, 2, 0, 200000000, UART_F_BUSY_DETECT, "HiSilicon / Synopsys Designware UART"},
+ {"INTC1006", &uart_ns8250_class, 2, 0, 25000000, 0, "Intel ARM64 UART"},
+ {"NXP0018", &uart_ns8250_class, 0, 0, 350000000, UART_F_BUSY_DETECT, "NXP / Synopsys Designware UART"},
+ {"PNP0500", &uart_ns8250_class, 0, 0, 0, 0, "Standard PC COM port"},
+ {"PNP0501", &uart_ns8250_class, 0, 0, 0, 0, "16550A-compatible COM port"},
+ {"PNP0502", &uart_ns8250_class, 0, 0, 0, 0, "Multiport serial device (non-intelligent 16550)"},
+ {"PNP0510", &uart_ns8250_class, 0, 0, 0, 0, "Generic IRDA-compatible device"},
+ {"PNP0511", &uart_ns8250_class, 0, 0, 0, 0, "Generic IRDA-compatible device"},
+ {"WACF004", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet PC Screen"},
+ {"WACF00E", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet PC Screen 00e"},
+ {"FUJ02E5", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet at FuS Lifebook T"},
+ {NULL, NULL, 0 , 0, 0, 0, NULL},
};
UART_ACPI_CLASS_AND_DEVICE(acpi_compat_data);
#endif
diff --git a/sys/dev/uart/uart_dev_pl011.c b/sys/dev/uart/uart_dev_pl011.c
index a0d5a5b1c7e2..6afc693cd347 100644
--- a/sys/dev/uart/uart_dev_pl011.c
+++ b/sys/dev/uart/uart_dev_pl011.c
@@ -391,11 +391,19 @@ UART_FDT_CLASS_AND_DEVICE(fdt_compat_data);
#endif
#ifdef DEV_ACPI
+static struct acpi_spcr_compat_data acpi_spcr_compat_data[] = {
+ { &uart_pl011_class, ACPI_DBG2_ARM_PL011 },
+ { &uart_pl011_class, ACPI_DBG2_ARM_SBSA_GENERIC },
+ { &uart_pl011_class, ACPI_DBG2_ARM_SBSA_32BIT },
+ { NULL, 0 },
+};
+UART_ACPI_SPCR_CLASS(acpi_spcr_compat_data);
+
static struct acpi_uart_compat_data acpi_compat_data[] = {
- {"ARMH0011", &uart_pl011_class, ACPI_DBG2_ARM_PL011, 2, 0, 0, 0, "uart pl011"},
- {"ARMHB000", &uart_pl011_class, ACPI_DBG2_ARM_SBSA_GENERIC, 2, 0, 0, 0, "uart pl011"},
- {"ARMHB000", &uart_pl011_class, ACPI_DBG2_ARM_SBSA_32BIT, 2, 0, 0, 0, "uart pl011"},
- {NULL, NULL, 0, 0, 0, 0, 0, NULL},
+ {"ARMH0011", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {"ARMHB000", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {"ARMHB000", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {NULL, NULL, 0, 0, 0, 0, NULL},
};
UART_ACPI_CLASS_AND_DEVICE(acpi_compat_data);
#endif
diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h
index 9f0faaadeb57..b055d2d2d769 100644
--- a/sys/dev/ufshci/ufshci.h
+++ b/sys/dev/ufshci/ufshci.h
@@ -160,19 +160,19 @@ enum ufshci_data_direction {
UFSHCI_DATA_DIRECTION_RESERVED = 0b11,
};
-enum ufshci_overall_command_status {
- UFSHCI_OCS_SUCCESS = 0x0,
- UFSHCI_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
- UFSHCI_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
- UFSHCI_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
- UFSHCI_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
- UFSHCI_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
- UFSHCI_OCS_ABORTED = 0x06,
- UFSHCI_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
- UFSHCI_OCS_DEVICE_FATAL_ERROR = 0x08,
- UFSHCI_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
- UFSHCI_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
- UFSHCI_OCS_INVALID = 0xF,
+enum ufshci_utr_overall_command_status {
+ UFSHCI_UTR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTR_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
+ UFSHCI_UTR_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
+ UFSHCI_UTR_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
+ UFSHCI_UTR_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
+ UFSHCI_UTR_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
+ UFSHCI_UTR_OCS_ABORTED = 0x06,
+ UFSHCI_UTR_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
+ UFSHCI_UTR_OCS_DEVICE_FATAL_ERROR = 0x08,
+ UFSHCI_UTR_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
+ UFSHCI_UTR_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
+ UFSHCI_UTR_OCS_INVALID = 0xF,
};
struct ufshci_utp_xfer_req_desc {
@@ -271,6 +271,18 @@ _Static_assert(sizeof(struct ufshci_utp_cmd_desc) ==
#define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32
#define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32
+enum ufshci_utmr_overall_command_status {
+ UFSHCI_UTMR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTMR_OCS_INVALID_TASK_MANAGEMENT_FUNCTION_ATTRIBUTES = 0x01,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_REQUEST_SIZE = 0x02,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_RESPONSE_SIZE = 0x03,
+ UFSHCI_UTMR_OCS_PEER_COMMUNICATION_FAILURE = 0x04,
+ UFSHCI_UTMR_OCS_ABORTED = 0x05,
+ UFSHCI_UTMR_OCS_FATAL_ERROR = 0x06,
+ UFSHCI_UTMR_OCS_DEVICE_FATAL_ERROR = 0x07,
+ UFSHCI_UTMR_OCS_INVALID = 0xF,
+};
+
/* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */
struct ufshci_utp_task_mgmt_req_desc {
/* dword 0 */
@@ -356,6 +368,7 @@ struct ufshci_upiu {
_Static_assert(sizeof(struct ufshci_upiu) == 512,
"ufshci_upiu must be 512 bytes");
+/* UFS Spec 4.1, section 10.7.1 "COMMAND UPIU" */
struct ufshci_cmd_command_upiu {
/* dword 0-2 */
struct ufshci_upiu_header header;
@@ -376,6 +389,7 @@ _Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT ==
0,
"UPIU requires 64-bit alignment");
+/* UFS Spec 4.1, section 10.7.2 "RESPONSE UPIU" */
struct ufshci_cmd_response_upiu {
/* dword 0-2 */
struct ufshci_upiu_header header;
@@ -403,6 +417,69 @@ _Static_assert(sizeof(struct ufshci_cmd_response_upiu) %
0,
"UPIU requires 64-bit alignment");
+enum task_management_function {
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK = 0x01,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK_SET = 0x02,
+ UFSHCI_TASK_MGMT_FUNCTION_CLEAR_TASK_SET = 0x04,
+ UFSHCI_TASK_MGMT_FUNCTION_LOGICAL_UNIT_RESET = 0x08,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASK = 0x80,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASKSET = 0x81,
+};
+
+/* UFS Spec 4.1, section 10.7.6 "TASK MANAGEMENT REQUEST UPIU" */
+struct ufshci_task_mgmt_request_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t input_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t input_param2; /* (Big-endian) */
+ /* dword 5 */
+ uint32_t input_param3; /* (Big-endian) */
+ /* dword 6-7 */
+ uint8_t reserved[8];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) == 32,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+enum task_management_service_response {
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE = 0x00,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_NOT_SUPPORTED = 0x04,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_FAILED = 0x05,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED = 0x08,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_INCORRECT_LUN = 0x09,
+};
+
+/* UFS Spec 4.1, section 10.7.7 "TASK MANAGEMENT RESPONSE UPIU" */
+struct ufshci_task_mgmt_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t output_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t output_param2; /* (Big-endian) */
+ /* dword 5-7 */
+ uint8_t reserved[12];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) == 32,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
/* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */
enum ufshci_query_function {
UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
@@ -554,6 +631,7 @@ union ufshci_reponse_upiu {
struct ufshci_upiu_header header;
struct ufshci_cmd_response_upiu cmd_response_upiu;
struct ufshci_query_response_upiu query_response_upiu;
+ struct ufshci_task_mgmt_response_upiu task_mgmt_response_upiu;
struct ufshci_nop_in_upiu nop_in_upiu;
};
@@ -638,6 +716,42 @@ struct ufshci_device_descriptor {
_Static_assert(sizeof(struct ufshci_device_descriptor) == 89,
"bad size for ufshci_device_descriptor");
+/* Defines the bit field of dExtendedUfsFeaturesSupport. */
+enum ufshci_desc_wb_ext_ufs_feature {
+ UFSHCI_DESC_EXT_UFS_FEATURE_FFU = (1 << 0),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PSA = (1 << 1),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LIFE_SPAN = (1 << 2),
+ UFSHCI_DESC_EXT_UFS_FEATURE_REFRESH_OP = (1 << 3),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_HIGH_TEMP = (1 << 4),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_LOW_TEMP = (1 << 5),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_TEMP = (1 << 6),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HPB_SUPPORT = (1 << 7),
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER = (1 << 8),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PERF_THROTTLING = (1 << 9),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ADVANCED_RPMB = (1 << 10),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ZONED_UFS_EXTENSION = (1 << 11),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LEVEL_EXCEPTION = (1 << 12),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HID = (1 << 13),
+ UFSHCI_DESC_EXT_UFS_FEATURE_BARRIER = (1 << 14),
+ UFSHCI_DESC_EXT_UFS_FEATURE_CLEAR_ERROR_HISTORY = (1 << 15),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_IID = (1 << 16),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FBO = (1 << 17),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FAST_RECOVERY_MODE = (1 << 18),
+ UFSHCI_DESC_EXT_UFS_FEATURE_RPMB_VENDOR_CMD = (1 << 19),
+};
+
+/* Defines the bit field of bWriteBoosterBufferType. */
+enum ufshci_desc_wb_buffer_type {
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED = 0x00,
+ UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED = 0x01,
+};
+
+/* Defines the bit field of bWriteBoosterBufferPreserveUserSpaceEn. */
+enum ufshci_desc_user_space_config {
+ UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION = 0x00,
+ UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE = 0x01,
+};
+
/*
* UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor"
* ConfigurationDescriptor use big-endian byte ordering.
@@ -936,4 +1050,37 @@ enum ufshci_attributes {
UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f,
};
+/* bAvailableWriteBoosterBufferSize codes (UFS WriteBooster abailable buffer
+ * left %) */
+enum ufshci_wb_available_buffer_Size {
+ UFSHCI_ATTR_WB_AVAILABLE_0 = 0x00, /* 0% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_10 = 0x01, /* 10% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_20 = 0x02, /* 20% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_30 = 0x03, /* 30% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_40 = 0x04, /* 40% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_50 = 0x05, /* 50% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_60 = 0x06, /* 60% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_70 = 0x07, /* 70% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_80 = 0x08, /* 80% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_90 = 0x09, /* 90% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_100 = 0x0A, /* 100% buffer remains */
+};
+
+/* bWriteBoosterBufferLifeTimeEst codes (UFS WriteBooster buffer life %) */
+enum ufshci_wb_lifetime {
+ UFSHCI_ATTR_WB_LIFE_DISABLED = 0x00, /* Info not available */
+ UFSHCI_ATTR_WB_LIFE_0_10 = 0x01, /* 0%–10% used */
+ UFSHCI_ATTR_WB_LIFE_10_20 = 0x02, /* 10%–20% used */
+ UFSHCI_ATTR_WB_LIFE_20_30 = 0x03, /* 20%–30% used */
+ UFSHCI_ATTR_WB_LIFE_30_40 = 0x04, /* 30%–40% used */
+ UFSHCI_ATTR_WB_LIFE_40_50 = 0x05, /* 40%–50% used */
+ UFSHCI_ATTR_WB_LIFE_50_60 = 0x06, /* 50%–60% used */
+ UFSHCI_ATTR_WB_LIFE_60_70 = 0x07, /* 60%–70% used */
+ UFSHCI_ATTR_WB_LIFE_70_80 = 0x08, /* 70%–80% used */
+ UFSHCI_ATTR_WB_LIFE_80_90 = 0x09, /* 80%–90% used */
+ UFSHCI_ATTR_WB_LIFE_90_100 = 0x0A, /* 90%–100% used */
+ UFSHCI_ATTR_WB_LIFE_EXCEEDED =
+ 0x0B, /* Exceeded estimated life (treat as WB disabled) */
+};
+
#endif /* __UFSHCI_H__ */
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
index 55d8363d3287..35663b480cfa 100644
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -12,8 +12,108 @@
#include "ufshci_private.h"
#include "ufshci_reg.h"
+static void
+ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
+{
+ ctrlr->is_failed = true;
+
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->task_mgmt_req_queue.qops.get_hw_queue(
+ &ctrlr->task_mgmt_req_queue));
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->transfer_req_queue.qops.get_hw_queue(
+ &ctrlr->transfer_req_queue));
+}
+
+static void
+ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
+{
+ TSENTER();
+
+ /*
+ * If `resetting` is true, we are on the reset path.
+ * Re-enable request queues here because ufshci_ctrlr_reset_task()
+ * disables them during reset.
+ */
+ if (resetting) {
+ if (ufshci_utmr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ if (ufshci_utr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ }
+
+ if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS target drvice */
+ if (ufshci_dev_init(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize Reference Clock */
+ if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize unipro */
+ if (ufshci_dev_init_unipro(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /*
+ * Initialize UIC Power Mode
+ * QEMU UFS devices do not support unipro and power mode.
+ */
+ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
+ ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS Power Mode */
+ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Read Controller Descriptor (Device, Geometry) */
+ if (ufshci_dev_get_descriptor(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (ufshci_dev_config_write_booster(ctrlr)) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* TODO: Configure Write Protect */
+
+ /* TODO: Configure Background Operations */
+
+ /*
+ * If the reset is due to a timeout, it is already attached to the SIM
+ * and does not need to be attached again.
+ */
+ if (!resetting && ufshci_sim_attach(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ TSEXIT();
+}
+
static int
-ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr)
{
int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
sbintime_t delta_t = SBT_1US;
@@ -27,6 +127,35 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, hce, hce);
}
+ /* Wait for the HCE flag to change */
+ while (1) {
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+ if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "host controller failed to disable "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint32_t hce;
+
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+
/* Enable UFS host controller */
hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
ufshci_mmio_write_4(ctrlr, hce, hce);
@@ -36,7 +165,7 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
* unstable, so we need to read the HCE value after some time after
* initialization is complete.
*/
- pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1));
/* Wait for the HCE flag to change */
while (1) {
@@ -51,17 +180,103 @@ ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
return (ENXIO);
}
- pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1));
+ pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1));
delta_t = min(SBT_1MS, delta_t * 3 / 2);
}
return (0);
}
+static int
+ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ /* Disable all interrupts */
+ ufshci_mmio_write_4(ctrlr, ie, 0);
+
+ error = ufshci_ctrlr_disable_host_ctrlr(ctrlr);
+ return (error);
+}
+
+static int
+ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
+{
+ uint32_t ie, hcs;
+ int error;
+
+ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ if (error)
+ return (error);
+
+ /* Send DME_LINKSTARTUP command to start the link startup procedure */
+ error = ufshci_uic_send_dme_link_startup(ctrlr);
+ if (error)
+ return (error);
+
+ /*
+ * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
+ * controller has successfully received a Link Startup UIC command
+ * response and the UFS device has found a physical link to the
+ * controller.
+ */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
+ ufshci_printf(ctrlr, "UFS device not found\n");
+ return (ENXIO);
+ }
+
+ /* Enable additional interrupts by programming the IE register. */
+ ie = ufshci_mmio_read_4(ctrlr, ie);
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
+ ufshci_mmio_write_4(ctrlr, ie, ie);
+
+ /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ error = ufshci_ctrlr_disable(ctrlr);
+ if (error)
+ return (error);
+
+ error = ufshci_ctrlr_enable(ctrlr);
+ return (error);
+}
+
+static void
+ufshci_ctrlr_reset_task(void *arg, int pending)
+{
+ struct ufshci_controller *ctrlr = arg;
+ int error;
+
+ /* Release resources */
+ ufshci_utmr_req_queue_disable(ctrlr);
+ ufshci_utr_req_queue_disable(ctrlr);
+
+ error = ufshci_ctrlr_hw_reset(ctrlr);
+ if (error)
+ return (ufshci_ctrlr_fail(ctrlr));
+
+ ufshci_ctrlr_start(ctrlr, true);
+}
+
int
ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
{
- uint32_t ver, cap, hcs, ie;
+ uint32_t ver, cap, ahit;
uint32_t timeout_period, retry_count;
int error;
@@ -114,58 +329,49 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
ctrlr->retry_count = retry_count;
- /* Disable all interrupts */
- ufshci_mmio_write_4(ctrlr, ie, 0);
+ ctrlr->enable_aborts = 1;
+ if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK)
+ ctrlr->enable_aborts = 0;
+ else
+ TUNABLE_INT_FETCH("hw.ufshci.enable_aborts",
+ &ctrlr->enable_aborts);
- /* Enable Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ /* Reset the UFSHCI controller */
+ error = ufshci_ctrlr_hw_reset(ctrlr);
if (error)
return (error);
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
- if (error)
- return (error);
+ /* Read the UECPA register to clear */
+ ufshci_mmio_read_4(ctrlr, uecpa);
- /*
- * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
- * controller has successfully received a Link Startup UIC command
- * response and the UFS device has found a physical link to the
- * controller.
- */
- hcs = ufshci_mmio_read_4(ctrlr, hcs);
- if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
- ufshci_printf(ctrlr, "UFS device not found\n");
- return (ENXIO);
- }
-
- /* Enable additional interrupts by programming the IE register. */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
- ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
- ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+ /* Diable Auto-hibernate */
+ ahit = 0;
+ ufshci_mmio_write_4(ctrlr, ahit, ahit);
/* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utm_req_queue_construct(ctrlr);
+ error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
return (error);
/* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_ut_req_queue_construct(ctrlr);
+ error = ufshci_utr_req_queue_construct(ctrlr);
if (error)
return (error);
/* TODO: Separate IO and Admin slot */
- /* max_hw_pend_io is the number of slots in the transfer_req_queue */
- ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries;
+
+ /*
+ * max_hw_pend_io is the number of slots in the transfer_req_queue.
+ * Reduce num_entries by one to reserve an admin slot.
+ */
+ ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
+
+ /* Create a thread for the taskqueue. */
+ ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ctrlr->taskqueue);
+ taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq");
+
+ TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr);
return (0);
}
@@ -179,8 +385,8 @@ ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
/* TODO: Flush In-flight IOs */
/* Release resources */
- ufshci_utm_req_queue_destroy(ctrlr);
- ufshci_ut_req_queue_destroy(ctrlr);
+ ufshci_utmr_req_queue_destroy(ctrlr);
+ ufshci_utr_req_queue_destroy(ctrlr);
if (ctrlr->tag)
bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
@@ -198,50 +404,30 @@ ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
ctrlr->resource);
nores:
+ KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock),
+ ("destroying uic_cmd_lock while still owned"));
mtx_destroy(&ctrlr->uic_cmd_lock);
+
+ KASSERT(!mtx_owned(&ctrlr->sc_mtx),
+ ("destroying sc_mtx while still owned"));
mtx_destroy(&ctrlr->sc_mtx);
return;
}
-int
+void
ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
{
- uint32_t ie;
- int error;
-
- /* Backup and disable all interrupts */
- ie = ufshci_mmio_read_4(ctrlr, ie);
- ufshci_mmio_write_4(ctrlr, ie, 0);
-
- /* Release resources */
- ufshci_utm_req_queue_destroy(ctrlr);
- ufshci_ut_req_queue_destroy(ctrlr);
-
- /* Reset Host Controller */
- error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
- if (error)
- return (error);
-
- /* Send DME_LINKSTARTUP command to start the link startup procedure */
- error = ufshci_uic_send_dme_link_startup(ctrlr);
- if (error)
- return (error);
-
- /* Enable interrupts */
- ufshci_mmio_write_4(ctrlr, ie, ie);
-
- /* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utm_req_queue_construct(ctrlr);
- if (error)
- return (error);
-
- /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_ut_req_queue_construct(ctrlr);
- if (error)
- return (error);
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
+}
- return (0);
+int
+ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (
+ ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req,
+ /*is_admin*/ false));
}
int
@@ -276,83 +462,6 @@ ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
return (0);
}
-static void
-ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also)
-{
- printf("ufshci(4): ufshci_ctrlr_fail\n");
-
- ctrlr->is_failed = true;
-
- /* TODO: task_mgmt_req_queue should be handled as fail */
-
- ufshci_req_queue_fail(ctrlr,
- &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]);
-}
-
-static void
-ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
-{
- TSENTER();
-
- if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS target drvice */
- if (ufshci_dev_init(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize Reference Clock */
- if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize unipro */
- if (ufshci_dev_init_unipro(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /*
- * Initialize UIC Power Mode
- * QEMU UFS devices do not support unipro and power mode.
- */
- if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
- ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Initialize UFS Power Mode */
- if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* Read Controller Descriptor (Device, Geometry)*/
- if (ufshci_dev_get_descriptor(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- /* TODO: Configure Write Protect */
-
- /* TODO: Configure Background Operations */
-
- /* TODO: Configure Write Booster */
-
- if (ufshci_sim_attach(ctrlr) != 0) {
- ufshci_ctrlr_fail(ctrlr, false);
- return;
- }
-
- TSEXIT();
-}
-
void
ufshci_ctrlr_start_config_hook(void *arg)
{
@@ -360,11 +469,11 @@ ufshci_ctrlr_start_config_hook(void *arg)
TSENTER();
- if (ufshci_utm_req_queue_enable(ctrlr) == 0 &&
- ufshci_ut_req_queue_enable(ctrlr) == 0)
- ufshci_ctrlr_start(ctrlr);
+ if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
+ ufshci_utr_req_queue_enable(ctrlr) == 0)
+ ufshci_ctrlr_start(ctrlr, false);
else
- ufshci_ctrlr_fail(ctrlr, false);
+ ufshci_ctrlr_fail(ctrlr);
ufshci_sysctl_initialize_ctrlr(ctrlr);
config_intrhook_disestablish(&ctrlr->config_hook);
@@ -445,9 +554,9 @@ ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
}
/* UTP Task Management Request Completion Status */
if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
- ufshci_printf(ctrlr, "TODO: Implement UTMR completion\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
- /* TODO: Implement UTMR completion */
+ ufshci_req_queue_process_completions(
+ &ctrlr->task_mgmt_req_queue);
}
/* UTP Transfer Request Completion Status */
if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
index ddf28c58fa88..253f31a93c2e 100644
--- a/sys/dev/ufshci/ufshci_ctrlr_cmd.c
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -8,6 +8,32 @@
#include "ufshci_private.h"
void
+ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid)
+{
+ struct ufshci_request *req;
+ struct ufshci_task_mgmt_request_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_NOWAIT, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
+ req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
+
+ upiu = (struct ufshci_task_mgmt_request_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type =
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST;
+ upiu->header.lun = lun;
+ upiu->header.ext_iid_or_function = function;
+ upiu->input_param1 = lun;
+ upiu->input_param2 = task_tag;
+ upiu->input_param3 = iid;
+
+ ufshci_ctrlr_submit_task_mgmt_request(ctrlr, req);
+}
+
+void
ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn,
void *cb_arg)
{
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
index a0e32914e2aa..975468e5156f 100644
--- a/sys/dev/ufshci/ufshci_dev.c
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -60,6 +60,14 @@ ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
}
static int
+ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
+ struct ufshci_unit_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
+ desc, sizeof(struct ufshci_unit_descriptor)));
+}
+
+static int
ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
enum ufshci_flags flag_type, uint8_t *flag)
{
@@ -114,6 +122,61 @@ ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
}
static int
+ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
+ enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
+ uint64_t *value)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
+ param.type = attr_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
+ return (ENXIO);
+ }
+
+ *value = status.cpl.response_upiu.query_response_upiu.value_64;
+
+ return (0);
+}
+
+static int
ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
uint64_t value)
@@ -270,7 +333,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
*/
const uint32_t fast_mode = 1;
const uint32_t rx_bit_shift = 4;
- const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode;
+ uint32_t power_mode, peer_granularity;
/* Update lanes with available TX/RX lanes */
if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
@@ -295,6 +358,20 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
ctrlr->rx_lanes))
return (ENXIO);
+ if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
+ /* Before changing gears, first change the number of lanes. */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ return (ENXIO);
+
+ /* Wait for power mode changed. */
+ if (ufshci_uic_power_mode_ready(ctrlr)) {
+ ufshci_reg_dump(ctrlr);
+ return (ENXIO);
+ }
+ }
+
/* Set HS-GEAR to max gear */
ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
@@ -346,6 +423,7 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
return (ENXIO);
/* Set TX/RX PWRMode */
+ power_mode = (fast_mode << rx_bit_shift) | fast_mode;
if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
return (ENXIO);
@@ -366,7 +444,8 @@ ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
/* Test with dme_peer_get to make sure there are no errors. */
- if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL))
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
+ &peer_granularity))
return (ENXIO);
}
@@ -398,7 +477,7 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
return (error);
ver = be16toh(device->dev_desc.wSpecVersion);
- ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n",
+ ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
UFSHCIV(UFSHCI_VER_REG_VS, ver));
ufshci_printf(ctrlr, "%u enabled LUNs found\n",
@@ -426,3 +505,272 @@ ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
return (0);
}
+
+static int
+ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Enable WriteBooster */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = true;
+
+ /* Enable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_set_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Enable WriteBooster buffer flush */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = true;
+
+ return (0);
+}
+
+static int
+ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Disable WriteBooster buffer flush */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = false;
+
+ /* Disable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_clear_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Disable WriteBooster */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = false;
+
+ return (0);
+}
+
+static int
+ufshci_dev_is_write_booster_buffer_life_time_left(
+ struct ufshci_controller *ctrlr, bool *is_life_time_left)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint8_t buffer_lun;
+ uint64_t life_time;
+ uint32_t error;
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
+ if (error)
+ return (error);
+
+ *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
+
+ return (0);
+}
+
+/*
+ * This function is not yet in use. It will be used when suspend/resume is
+ * implemented.
+ */
+static __unused int
+ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
+ bool *need_flush)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ bool is_life_time_left = false;
+ uint64_t available_buffer_size, current_buffer_size;
+ uint8_t buffer_lun;
+ uint32_t error;
+
+ *need_flush = false;
+
+ if (!dev->is_wb_enabled)
+ return (0);
+
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ return (error);
+
+ if (!is_life_time_left)
+ return (ufshci_dev_disable_write_booster(ctrlr));
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
+ &available_buffer_size);
+ if (error)
+ return (error);
+
+ switch (dev->wb_user_space_config_option) {
+ case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
+ *need_flush = (available_buffer_size <=
+ UFSHCI_ATTR_WB_AVAILABLE_10);
+ break;
+ case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
+ /*
+ * In PRESERVE USER SPACE mode, flush should be performed when
+ * the current buffer is greater than 0 and the available buffer
+ * below write_booster_flush_threshold is left.
+ */
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
+ &current_buffer_size);
+ if (error)
+ return (error);
+
+ if (current_buffer_size == 0)
+ return (0);
+
+ *need_flush = (available_buffer_size <
+ dev->write_booster_flush_threshold);
+ break;
+ default:
+ ufshci_printf(ctrlr,
+ "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
+ return (EINVAL);
+ }
+
+ /*
+ * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
+ * wExceptionEventStatus attribute.
+ */
+
+ return (0);
+}
+
+int
+ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint32_t extended_ufs_feature_support;
+ uint32_t alloc_units;
+ struct ufshci_unit_descriptor unit_desc;
+ uint8_t lun;
+ bool is_life_time_left;
+ uint32_t mega_byte = 1024 * 1024;
+ uint32_t error = 0;
+
+ extended_ufs_feature_support = be32toh(
+ dev->dev_desc.dExtendedUfsFeaturesSupport);
+ if (!(extended_ufs_feature_support &
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
+ /* This device does not support Write Booster */
+ return (0);
+ }
+
+ if (ufshci_dev_enable_write_booster(ctrlr))
+ return (0);
+
+ /* Get WriteBooster buffer parameters */
+ dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
+ dev->wb_user_space_config_option =
+ dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
+
+ /*
+ * Find the size of the write buffer.
+ * With LU-dedicated (00h), the WriteBooster buffer is assigned
+ * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
+ * uses a single device-wide buffer shared by multiple LUs.
+ */
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
+ alloc_units = be32toh(
+ dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
+ ufshci_printf(ctrlr,
+ "WriteBooster buffer type = Shared, alloc_units=%d\n",
+ alloc_units);
+ } else if (dev->wb_buffer_type ==
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
+ ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
+ for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
+ /* Find a dedicated buffer using a unit descriptor */
+ if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
+ &unit_desc))
+ continue;
+
+ alloc_units = be32toh(
+ unit_desc.dLUNumWriteBoosterBufferAllocUnits);
+ if (alloc_units) {
+ dev->wb_dedicated_lu = lun;
+ break;
+ }
+ }
+ } else {
+ ufshci_printf(ctrlr,
+ "Not supported WriteBooster buffer type: 0x%x\n",
+ dev->wb_buffer_type);
+ goto out;
+ }
+
+ if (alloc_units == 0) {
+ ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
+ goto out;
+ }
+
+ dev->wb_buffer_size_mb = alloc_units *
+ dev->geo_desc.bAllocationUnitSize *
+ (be32toh(dev->geo_desc.dSegmentSize)) /
+ (mega_byte / UFSHCI_SECTOR_SIZE);
+
+ /* Set to flush when 40% of the available buffer size remains */
+ dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
+
+ /*
+ * Check if WriteBooster Buffer lifetime is left.
+ * WriteBooster Buffer lifetime — percent of life used based on P/E
+ * cycles. If "preserve user space" is enabled, writes to normal user
+ * space also consume WB life since the area is shared.
+ */
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ goto out;
+
+ if (!is_life_time_left) {
+ ufshci_printf(ctrlr,
+ "There is no WriteBooster buffer life time left.\n");
+ goto out;
+ }
+
+ ufshci_printf(ctrlr, "WriteBooster Enabled\n");
+ return (0);
+out:
+ ufshci_dev_disable_write_booster(ctrlr);
+ return (error);
+}
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
index 65a69ee0b518..992026fd4f4d 100644
--- a/sys/dev/ufshci/ufshci_pci.c
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -49,11 +49,13 @@ static struct _pcsid {
uint32_t ref_clk;
uint32_t quirks;
} pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz,
- UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE },
+ UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE |
+ UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK },
{ 0x98fa8086, "Intel Lakefield UFS Host Controller",
UFSHCI_REF_CLK_19_2MHz,
UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
- UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE },
+ UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE |
+ UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY },
{ 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz },
{ 0x00000000, NULL } };
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
index ac58d44102a0..ec388c06e248 100644
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -46,6 +46,8 @@ MALLOC_DECLARE(M_UFSHCI);
#define UFSHCI_UTR_ENTRIES (32)
#define UFSHCI_UTRM_ENTRIES (8)
+#define UFSHCI_SECTOR_SIZE (512)
+
struct ufshci_controller;
struct ufshci_completion_poll_status {
@@ -66,7 +68,6 @@ struct ufshci_request {
bool is_admin;
int32_t retries;
bool payload_valid;
- bool timeout;
bool spare[2]; /* Future use */
STAILQ_ENTRY(ufshci_request) stailq;
};
@@ -80,6 +81,7 @@ enum ufshci_slot_state {
};
struct ufshci_tracker {
+ TAILQ_ENTRY(ufshci_tracker) tailq;
struct ufshci_request *req;
struct ufshci_req_queue *req_queue;
struct ufshci_hw_queue *hwq;
@@ -119,12 +121,16 @@ struct ufshci_qops {
struct ufshci_req_queue *req_queue);
int (*enable)(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
+ void (*disable)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int (*reserve_slot)(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
void (*ring_doorbell)(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
+ bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr,
+ uint8_t slot);
void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
bool (*process_cpl)(struct ufshci_req_queue *req_queue);
@@ -133,17 +139,31 @@ struct ufshci_qops {
#define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
+enum ufshci_recovery {
+ RECOVERY_NONE = 0, /* Normal operations */
+ RECOVERY_WAITING, /* waiting for the reset to complete */
+};
+
/*
* Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
* (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
* cq_head are not used in SDB but used in MCQ.
*/
struct ufshci_hw_queue {
+ struct ufshci_controller *ctrlr;
+ struct ufshci_req_queue *req_queue;
uint32_t id;
int domain;
int cpu;
- struct ufshci_utp_xfer_req_desc *utrd;
+ struct callout timer; /* recovery lock */
+ bool timer_armed; /* recovery lock */
+ enum ufshci_recovery recovery_state; /* recovery lock */
+
+ union {
+ struct ufshci_utp_xfer_req_desc *utrd;
+ struct ufshci_utp_task_mgmt_req_desc *utmrd;
+ };
bus_dma_tag_t dma_tag_queue;
bus_dmamap_t queuemem_map;
@@ -154,6 +174,9 @@ struct ufshci_hw_queue {
uint32_t num_entries;
uint32_t num_trackers;
+ TAILQ_HEAD(, ufshci_tracker) free_tr;
+ TAILQ_HEAD(, ufshci_tracker) outstanding_tr;
+
/*
* A Request List using the single doorbell method uses a dedicated
* ufshci_tracker, one per slot.
@@ -170,7 +193,13 @@ struct ufshci_hw_queue {
int64_t num_retries;
int64_t num_failures;
+ /*
+ * Each lock may be acquired independently.
+ * When both are required, acquire them in this order to avoid
+ * deadlocks. (recovery_lock -> qlock)
+ */
struct mtx_padalign qlock;
+ struct mtx_padalign recovery_lock;
};
struct ufshci_req_queue {
@@ -209,6 +238,15 @@ struct ufshci_device {
struct ufshci_geometry_descriptor geo_desc;
uint32_t unipro_version;
+
+ /* WriteBooster */
+ bool is_wb_enabled;
+ bool is_wb_flush_enabled;
+ uint32_t wb_buffer_type;
+ uint32_t wb_buffer_size_mb;
+ uint32_t wb_user_space_config_option;
+ uint8_t wb_dedicated_lu;
+ uint32_t write_booster_flush_threshold;
};
/*
@@ -224,6 +262,10 @@ struct ufshci_controller {
2 /* Need an additional 200 ms of PA_TActivate */
#define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
4 /* Need to wait 1250us after power mode change */
+#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
+ 8 /* Need to change the number of lanes before changing HS-GEAR. */
+#define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \
+ 16 /* QEMU does not support Task Management Request */
uint32_t ref_clk;
@@ -247,6 +289,9 @@ struct ufshci_controller {
/* Fields for tracking progress during controller initialization. */
struct intr_config_hook config_hook;
+ struct task reset_task;
+ struct taskqueue *taskqueue;
+
/* For shared legacy interrupt. */
int rid;
struct resource *res;
@@ -255,6 +300,8 @@ struct ufshci_controller {
uint32_t major_version;
uint32_t minor_version;
+ uint32_t enable_aborts;
+
uint32_t num_io_queues;
uint32_t max_hw_pend_io;
@@ -328,11 +375,13 @@ void ufshci_sim_detach(struct ufshci_controller *ctrlr);
/* Controller */
int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
-int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
+void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
/* ctrlr defined as void * to allow use with config_intrhook. */
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
+int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req);
int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
@@ -349,8 +398,12 @@ int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
+int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr);
/* Controller Command */
+void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid);
void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
ufshci_cb_fn_t cb_fn, void *cb_arg);
void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
@@ -361,12 +414,14 @@ void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
/* Request Queue */
bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
-int ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr);
-int ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr);
-void ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr);
-void ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr);
-int ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr);
-int ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq);
int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
@@ -381,13 +436,23 @@ void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
struct ufshci_req_queue *req_queue);
+void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
-void ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
-void ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
@@ -458,13 +523,12 @@ _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
struct ufshci_request *req;
KASSERT(how == M_WAITOK || how == M_NOWAIT,
- ("nvme_allocate_request: invalid how %d", how));
+ ("ufshci_allocate_request: invalid how %d", how));
req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
if (req != NULL) {
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
- req->timeout = true;
}
return (req);
}
diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h
index 6c9b3e2c8c04..6d5768505102 100644
--- a/sys/dev/ufshci/ufshci_reg.h
+++ b/sys/dev/ufshci/ufshci_reg.h
@@ -274,7 +274,7 @@ struct ufshci_registers {
#define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1)
#define UFSHCI_HCS_REG_UCRDY_SHIFT (3)
#define UFSHCI_HCS_REG_UCRDY_MASK (0x1)
-#define UFSHCI_HCS_REG_UPMCRS_SHIFT (7)
+#define UFSHCI_HCS_REG_UPMCRS_SHIFT (8)
#define UFSHCI_HCS_REG_UPMCRS_MASK (0x7)
#define UFSHCI_HCS_REG_UTPEC_SHIFT (12)
#define UFSHCI_HCS_REG_UTPEC_MASK (0xF)
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
index cc9a2ddae768..7aa164d00bec 100644
--- a/sys/dev/ufshci/ufshci_req_queue.c
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -19,21 +19,38 @@
static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
-static const struct ufshci_qops sdb_qops = {
+static const struct ufshci_qops sdb_utmr_qops = {
.construct = ufshci_req_sdb_construct,
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
- .ring_doorbell = ufshci_req_sdb_ring_doorbell,
- .clear_cpl_ntf = ufshci_req_sdb_clear_cpl_ntf,
+ .ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
+ .process_cpl = ufshci_req_sdb_process_cpl,
+ .get_inflight_io = ufshci_req_sdb_get_inflight_io,
+};
+
+static const struct ufshci_qops sdb_utr_qops = {
+ .construct = ufshci_req_sdb_construct,
+ .destroy = ufshci_req_sdb_destroy,
+ .get_hw_queue = ufshci_req_sdb_get_hw_queue,
+ .enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
+ .reserve_slot = ufshci_req_sdb_reserve_slot,
+ .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
+ .ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
.process_cpl = ufshci_req_sdb_process_cpl,
.get_inflight_io = ufshci_req_sdb_get_inflight_io,
};
int
-ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
@@ -44,7 +61,7 @@ ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
*/
req_queue = &ctrlr->task_mgmt_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
- req_queue->qops = sdb_qops;
+ req_queue->qops = sdb_utmr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
/*is_task_mgmt*/ true);
@@ -53,21 +70,28 @@ ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
}
void
-ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
&ctrlr->task_mgmt_req_queue);
}
+void
+ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
+ &ctrlr->task_mgmt_req_queue);
+}
+
int
-ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
&ctrlr->task_mgmt_req_queue));
}
int
-ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
@@ -79,7 +103,7 @@ ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
*/
req_queue = &ctrlr->transfer_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
- req_queue->qops = sdb_qops;
+ req_queue->qops = sdb_utr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
/*is_task_mgmt*/ false);
@@ -88,14 +112,21 @@ ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
}
void
-ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->transfer_req_queue.qops.destroy(ctrlr,
&ctrlr->transfer_req_queue);
}
+void
+ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->transfer_req_queue.qops.disable(ctrlr,
+ &ctrlr->transfer_req_queue);
+}
+
int
-ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
&ctrlr->transfer_req_queue));
@@ -211,22 +242,31 @@ void
ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
{
struct ufshci_req_queue *req_queue = tr->req_queue;
+ struct ufshci_hw_queue *hwq = tr->hwq;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
- struct ufshci_utp_xfer_req_desc *desc;
uint8_t ocs;
bool retry, error, retriable;
- mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
-
- bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ mtx_assert(&hwq->qlock, MA_NOTOWNED);
+ /* Copy the response from the Request Descriptor or UTP Command
+ * Descriptor. */
cpl.size = tr->response_size;
- memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, cpl.size);
+ if (req_queue->is_task_mgmt) {
+ memcpy(&cpl.response_upiu,
+ (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
- desc = &tr->hwq->utrd[tr->slot_num];
- ocs = desc->overall_command_status;
+ ocs = hwq->utmrd[tr->slot_num].overall_command_status;
+ } else {
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
+ cpl.size);
+
+ ocs = hwq->utrd[tr->slot_num].overall_command_status;
+ }
error = ufshci_req_queue_response_is_error(req_queue, ocs,
&cpl.response_upiu);
@@ -237,9 +277,9 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
retry = error && retriable &&
req->retries < req_queue->ctrlr->retry_count;
if (retry)
- tr->hwq->num_retries++;
+ hwq->num_retries++;
if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
- tr->hwq->num_failures++;
+ hwq->num_failures++;
KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
KASSERT(cpl.response_upiu.header.task_tag ==
@@ -257,7 +297,7 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
req->cb_fn(req->cb_arg, &cpl, error);
}
- mtx_lock(&tr->hwq->qlock);
+ mtx_lock(&hwq->qlock);
/* Clear the UTRL Completion Notification register */
req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
@@ -276,6 +316,9 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
ufshci_free_request(req);
tr->req = NULL;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
}
mtx_unlock(&tr->hwq->qlock);
@@ -284,7 +327,16 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
bool
ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
{
- return (req_queue->qops.process_cpl(req_queue));
+ struct ufshci_hw_queue *hwq;
+ bool done;
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_lock(&hwq->recovery_lock);
+ done = req_queue->qops.process_cpl(req_queue);
+ mtx_unlock(&hwq->recovery_lock);
+
+ return (done);
}
static void
@@ -358,7 +410,19 @@ ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
}
static void
-ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
+ufshci_req_queue_fill_utmr_descriptor(
+ struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
+{
+ memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
+ desc->interrupt = true;
+ /* Set the initial value to Invalid. */
+ desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
+
+ memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
+}
+
+static void
+ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
const uint16_t response_len, const uint16_t prdt_off,
const uint16_t prdt_entry_cnt)
@@ -378,7 +442,7 @@ ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
desc->data_direction = data_direction;
desc->interrupt = true;
/* Set the initial value to Invalid. */
- desc->overall_command_status = UFSHCI_OCS_INVALID;
+ desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
0xffffffff);
desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
@@ -390,6 +454,225 @@ ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
desc->prdt_length = prdt_entry_cnt;
}
+static void
+ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq)
+{
+ /* TODO: Step 2. Logical unit reset */
+ /* TODO: Step 3. Target device reset */
+ /* TODO: Step 4. Bus reset */
+
+ /*
+ * Step 5. All previous commands were timeout.
+ * Recovery failed, reset the host controller.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 5: Resetting controller due to a timeout.\n");
+ hwq->recovery_state = RECOVERY_WAITING;
+
+ ufshci_ctrlr_reset(ctrlr);
+}
+
+static void
+ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
+ bool error)
+{
+ struct ufshci_tracker *tr = arg;
+
+ /*
+ * We still need to check the active tracker array, to cover race where
+ * I/O timed out at same time controller was completing the I/O. An
+ * abort request always is on the Task Management Request queue, but
+ * affects either an Task Management Request or an I/O (UTRL) queue, so
+ * take the appropriate queue lock for the original command's queue,
+ * since we'll need it to avoid races with the completion code and to
+ * complete the command manually.
+ */
+ mtx_lock(&tr->hwq->qlock);
+ if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
+ mtx_unlock(&tr->hwq->qlock);
+ /*
+ * An I/O has timed out, and the controller was unable to abort
+ * it for some reason. And we've not processed a completion for
+ * it yet. Construct a fake completion status, and then complete
+ * the I/O's tracker manually.
+ */
+ ufshci_printf(tr->hwq->ctrlr,
+ "abort task request failed, aborting task manually\n");
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+
+ if ((status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
+ (status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
+ ufshci_printf(tr->hwq->ctrlr,
+ "Warning: the abort task request completed \
+ successfully, but the original task is still incomplete.");
+ return;
+ }
+
+ /* Abort Task failed. Perform recovery steps 2-5 */
+ ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
+ } else {
+ mtx_unlock(&tr->hwq->qlock);
+ }
+}
+
+static void
+ufshci_req_queue_timeout(void *arg)
+{
+ struct ufshci_hw_queue *hwq = arg;
+ struct ufshci_controller *ctrlr = hwq->ctrlr;
+ struct ufshci_tracker *tr;
+ sbintime_t now;
+ bool idle = true;
+ bool fast;
+
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
+ /*
+ * If the controller is failed, then stop polling. This ensures that any
+ * failure processing that races with the hwq timeout will fail safely.
+ */
+ if (ctrlr->is_failed) {
+ ufshci_printf(ctrlr,
+ "Failed controller, stopping watchdog timeout.\n");
+ hwq->timer_armed = false;
+ return;
+ }
+
+ /*
+ * Shutdown condition: We set hwq->timer_armed to false in
+ * ufshci_req_sdb_destroy before calling callout_drain. When we call
+ * that, this routine might get called one last time. Exit w/o setting a
+ * timeout. None of the watchdog stuff needs to be done since we're
+ * destroying the hwq.
+ */
+ if (!hwq->timer_armed) {
+ ufshci_printf(ctrlr,
+ "Timeout fired during ufshci_utr_req_queue_destroy\n");
+ return;
+ }
+
+ switch (hwq->recovery_state) {
+ case RECOVERY_NONE:
+ /*
+ * See if there's any recovery needed. First, do a fast check to
+ * see if anything could have timed out. If not, then skip
+ * everything else.
+ */
+ fast = false;
+ mtx_lock(&hwq->qlock);
+ now = getsbinuptime();
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If the first real transaction is not in timeout, then
+ * we're done. Otherwise, we try recovery.
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ fast = true;
+ break;
+ }
+ mtx_unlock(&hwq->qlock);
+ if (idle || fast)
+ break;
+
+ /*
+ * There's a stale transaction at the start of the queue whose
+ * deadline has passed. Poll the competions as a last-ditch
+ * effort in case an interrupt has been missed.
+ */
+ hwq->req_queue->qops.process_cpl(hwq->req_queue);
+
+ /*
+ * Now that we've run the ISR, re-rheck to see if there's any
+ * timed out commands and abort them or reset the card if so.
+ */
+ mtx_lock(&hwq->qlock);
+ idle = true;
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If we know this tracker hasn't timed out, we also
+ * know all subsequent ones haven't timed out. The tr
+ * queue is in submission order and all normal commands
+ * in a queue have the same timeout (or the timeout was
+ * changed by the user, but we eventually timeout then).
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ break;
+
+ /*
+ * Timeout recovery is performed in five steps. If
+ * recovery fails at any step, the process continues to
+ * the next one:
+ * next steps:
+ * Step 1. Abort task
+ * Step 2. Logical unit reset (TODO)
+ * Step 3. Target device reset (TODO)
+ * Step 4. Bus reset (TODO)
+ * Step 5. Host controller reset
+ *
+ * If the timeout occurred in the Task Management
+ * Request queue, ignore Step 1.
+ */
+ if (ctrlr->enable_aborts &&
+ !hwq->req_queue->is_task_mgmt &&
+ tr->req->cb_fn != ufshci_abort_complete) {
+ /*
+ * Step 1. Timeout expired, abort the task.
+ *
+ * This isn't an abort command, ask for a
+ * hardware abort. This goes to the Task
+ * Management Request queue which will reset the
+ * task if it times out.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
+ tr->req->request_upiu.header.task_tag);
+ ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
+ ufshci_abort_complete, tr,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
+ tr->req->request_upiu.header.lun,
+ tr->req->request_upiu.header.task_tag, 0);
+ } else {
+ /* Recovery Step 2-5 */
+ ufshci_req_queue_timeout_recovery(ctrlr, hwq);
+ idle = false;
+ break;
+ }
+ }
+ mtx_unlock(&hwq->qlock);
+ break;
+
+ case RECOVERY_WAITING:
+ /*
+ * These messages aren't interesting while we're suspended. We
+ * put the queues into waiting state while suspending.
+ * Suspending takes a while, so we'll see these during that time
+ * and they aren't diagnostic. At other times, they indicate a
+ * problem that's worth complaining about.
+ */
+ if (!device_is_suspended(ctrlr->dev))
+ ufshci_printf(ctrlr, "Waiting for reset to complete\n");
+ idle = false; /* We want to keep polling */
+ break;
+ }
+
+ /*
+ * Rearm the timeout.
+ */
+ if (!idle) {
+ callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
+ } else {
+ hwq->timer_armed = false;
+ }
+}
+
/*
* Submit the tracker to the hardware.
*/
@@ -399,34 +682,57 @@ ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
{
struct ufshci_controller *ctrlr = req_queue->ctrlr;
struct ufshci_request *req = tr->req;
+ struct ufshci_hw_queue *hwq;
uint64_t ucd_paddr;
uint16_t request_len, response_off, response_len;
uint8_t slot_num = tr->slot_num;
+ int timeout;
- mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
-
- /* TODO: Check timeout */
-
- request_len = req->request_size;
- response_off = UFSHCI_UTP_XFER_REQ_SIZE;
- response_len = req->response_size;
-
- /* Prepare UTP Command Descriptor */
- memcpy(tr->ucd, &req->request_upiu, request_len);
- memset((uint8_t *)tr->ucd + response_off, 0, response_len);
+ hwq = req_queue->qops.get_hw_queue(req_queue);
- /* Prepare PRDT */
- if (req->payload_valid)
- ufshci_req_queue_prepare_prdt(tr);
+ mtx_assert(&hwq->qlock, MA_OWNED);
- bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ if (req->cb_fn == ufshci_completion_poll_cb)
+ timeout = 1;
+ else
+ timeout = ctrlr->timeout_period;
+ tr->deadline = getsbinuptime() + timeout * SBT_1S;
+ if (!hwq->timer_armed) {
+ hwq->timer_armed = true;
+ /*
+ * It wakes up once every 0.5 seconds to check if the deadline
+ * has passed.
+ */
+ callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
+ ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
+ }
- /* Prepare UTP Transfer Request Descriptor. */
- ucd_paddr = tr->ucd_bus_addr;
- ufshci_req_queue_fill_descriptor(&tr->hwq->utrd[slot_num],
- data_direction, ucd_paddr, response_off, response_len, tr->prdt_off,
- tr->prdt_entry_cnt);
+ if (req_queue->is_task_mgmt) {
+ /* Prepare UTP Task Management Request Descriptor. */
+ ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
+ req);
+ } else {
+ request_len = req->request_size;
+ response_off = UFSHCI_UTP_XFER_REQ_SIZE;
+ response_len = req->response_size;
+
+ /* Prepare UTP Command Descriptor */
+ memcpy(tr->ucd, &req->request_upiu, request_len);
+ memset((uint8_t *)tr->ucd + response_off, 0, response_len);
+
+ /* Prepare PRDT */
+ if (req->payload_valid)
+ ufshci_req_queue_prepare_prdt(tr);
+
+ /* Prepare UTP Transfer Request Descriptor. */
+ ucd_paddr = tr->ucd_bus_addr;
+ ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
+ data_direction, ucd_paddr, response_off, response_len,
+ tr->prdt_off, tr->prdt_entry_cnt);
+
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -465,6 +771,9 @@ _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
tr->deadline = SBT_MAX;
tr->req = req;
+ TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
+ TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
+
ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
return (0);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
index b1f303afaef5..ca47aa159c5b 100644
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -26,12 +26,6 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
tr = hwq->act_tr[i];
bus_dmamap_destroy(req_queue->dma_tag_payload,
tr->payload_dma_map);
- free(tr, M_UFSHCI);
- }
-
- if (hwq->act_tr) {
- free(hwq->act_tr, M_UFSHCI);
- hwq->act_tr = NULL;
}
if (req_queue->ucd) {
@@ -46,6 +40,8 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
bus_dma_tag_destroy(req_queue->dma_tag_ucd);
req_queue->dma_tag_ucd = NULL;
}
+
+ free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
}
static void
@@ -76,11 +72,14 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
uint32_t num_entries, struct ufshci_controller *ctrlr)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
- struct ufshci_tracker *tr;
size_t ucd_allocsz, payload_allocsz;
uint8_t *ucdmem;
int i, error;
+ req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
+ req_queue->num_trackers,
+ M_UFSHCI, M_ZERO | M_NOWAIT);
+
/*
* Each component must be page aligned, and individual PRP lists
* cannot cross a page boundary.
@@ -134,27 +133,14 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
goto out;
}
- hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
- req_queue->num_entries,
- M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
-
for (i = 0; i < req_queue->num_trackers; i++) {
- tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
- DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
-
bus_dmamap_create(req_queue->dma_tag_payload, 0,
- &tr->payload_dma_map);
+ &hwq->act_tr[i]->payload_dma_map);
- tr->req_queue = req_queue;
- tr->slot_num = i;
- tr->slot_state = UFSHCI_SLOT_STATE_FREE;
-
- tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
- tr->ucd_bus_addr = hwq->ucd_bus_addr[i];
+ hwq->act_tr[i]->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
+ hwq->act_tr[i]->ucd_bus_addr = hwq->ucd_bus_addr[i];
ucdmem += sizeof(struct ufshci_utp_cmd_desc);
-
- hwq->act_tr[i] = tr;
}
return (0);
@@ -163,25 +149,19 @@ out:
return (ENOMEM);
}
-static bool
-ufshci_req_sdb_is_doorbell_cleared(struct ufshci_controller *ctrlr,
- uint8_t slot)
-{
- uint32_t utrldbr;
-
- utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
- return (!(utrldbr & (1 << slot)));
-}
-
int
ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
{
struct ufshci_hw_queue *hwq;
- size_t allocsz;
+ size_t desc_size, alloc_size;
uint64_t queuemem_phys;
uint8_t *queuemem;
- int error;
+ struct ufshci_tracker *tr;
+ const size_t lock_name_len = 32;
+ char qlock_name[lock_name_len], recovery_lock_name[lock_name_len];
+ char *base;
+ int i, error;
req_queue->ctrlr = ctrlr;
req_queue->is_task_mgmt = is_task_mgmt;
@@ -198,21 +178,34 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
hwq = &req_queue->hwq[UFSHCI_SDB_Q];
hwq->num_entries = req_queue->num_entries;
hwq->num_trackers = req_queue->num_trackers;
- req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
- req_queue->num_trackers,
- M_UFSHCI, M_ZERO | M_NOWAIT);
+ hwq->ctrlr = ctrlr;
+ hwq->req_queue = req_queue;
+
+ base = is_task_mgmt ? "ufshci utmrq" : "ufshci utrq";
+ snprintf(qlock_name, sizeof(qlock_name), "%s #%d lock", base,
+ UFSHCI_SDB_Q);
+ snprintf(recovery_lock_name, sizeof(recovery_lock_name),
+ "%s #%d recovery lock", base, UFSHCI_SDB_Q);
- mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
+ mtx_init(&hwq->qlock, qlock_name, NULL, MTX_DEF);
+ mtx_init(&hwq->recovery_lock, recovery_lock_name, NULL, MTX_DEF);
+
+ callout_init_mtx(&hwq->timer, &hwq->recovery_lock, 0);
+ hwq->timer_armed = false;
+ hwq->recovery_state = RECOVERY_WAITING;
/*
* Allocate physical memory for request queue (UTP Transfer Request
* Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
* Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
*/
- allocsz = num_entries * sizeof(struct ufshci_utp_xfer_req_desc);
+ desc_size = is_task_mgmt ?
+ sizeof(struct ufshci_utp_task_mgmt_req_desc) :
+ sizeof(struct ufshci_utp_xfer_req_desc);
+ alloc_size = num_entries * desc_size;
error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
- allocsz, 1, allocsz, 0, NULL, NULL, &hwq->dma_tag_queue);
+ alloc_size, 1, alloc_size, 0, NULL, NULL, &hwq->dma_tag_queue);
if (error != 0) {
ufshci_printf(ctrlr, "request queue tag create failed %d\n",
error);
@@ -227,7 +220,7 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
}
if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
- allocsz, ufshci_single_map, &queuemem_phys, 0) != 0) {
+ alloc_size, ufshci_single_map, &queuemem_phys, 0) != 0) {
ufshci_printf(ctrlr, "failed to load request queue memory\n");
bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
hwq->queuemem_map);
@@ -238,13 +231,34 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
hwq->num_intr_handler_calls = 0;
hwq->num_retries = 0;
hwq->num_failures = 0;
- hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
hwq->req_queue_addr = queuemem_phys;
+ /* Allocate trackers */
+ hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
+ req_queue->num_entries,
+ M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ TAILQ_INIT(&hwq->free_tr);
+ TAILQ_INIT(&hwq->outstanding_tr);
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
+ DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ tr->req_queue = req_queue;
+ tr->slot_num = i;
+ tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
+
+ hwq->act_tr[i] = tr;
+ }
+
if (is_task_mgmt) {
/* UTP Task Management Request (UTMR) */
uint32_t utmrlba, utmrlbau;
+ hwq->utmrd = (struct ufshci_utp_task_mgmt_req_desc *)queuemem;
+
utmrlba = hwq->req_queue_addr & 0xffffffff;
utmrlbau = hwq->req_queue_addr >> 32;
ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
@@ -253,6 +267,8 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
/* UTP Transfer Request (UTR) */
uint32_t utrlba, utrlbau;
+ hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
+
/*
* Allocate physical memory for the command descriptor.
* UTP Transfer Request (UTR) requires memory for a separate
@@ -262,8 +278,6 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
ctrlr) != 0) {
ufshci_printf(ctrlr,
"failed to construct cmd descriptor memory\n");
- bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
- hwq->queuemem_map);
goto out;
}
@@ -284,10 +298,27 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ int i;
+
+ mtx_lock(&hwq->recovery_lock);
+ hwq->timer_armed = false;
+ mtx_unlock(&hwq->recovery_lock);
+ callout_drain(&hwq->timer);
if (!req_queue->is_task_mgmt)
ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = hwq->act_tr[i];
+ free(tr, M_UFSHCI);
+ }
+
+ if (hwq->act_tr) {
+ free(hwq->act_tr, M_UFSHCI);
+ hwq->act_tr = NULL;
+ }
+
if (hwq->utrd != NULL) {
bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
@@ -300,10 +331,11 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
hwq->dma_tag_queue = NULL;
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_destroy(&hwq->recovery_lock);
if (mtx_initialized(&hwq->qlock))
mtx_destroy(&hwq->qlock);
- free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
free(req_queue->hwq, M_UFSHCI);
}
@@ -313,10 +345,36 @@ ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
return &req_queue->hwq[UFSHCI_SDB_Q];
}
+void
+ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr, *tr_temp;
+
+ mtx_lock(&hwq->recovery_lock);
+ mtx_lock(&hwq->qlock);
+
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+
+ hwq->recovery_state = RECOVERY_WAITING;
+ TAILQ_FOREACH_SAFE(tr, &hwq->outstanding_tr, tailq, tr_temp) {
+ tr->deadline = SBT_MAX;
+ }
+
+ mtx_unlock(&hwq->qlock);
+ mtx_unlock(&hwq->recovery_lock);
+}
+
int
ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+
if (req_queue->is_task_mgmt) {
uint32_t hcs, utmrldbr, utmrlrsr;
@@ -368,6 +426,14 @@ ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
}
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+ KASSERT(!req_queue->ctrlr->is_failed, ("Enabling a failed hwq\n"));
+
+ hwq->recovery_state = RECOVERY_NONE;
+
return (0);
}
@@ -389,7 +455,18 @@ ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
}
void
-ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ /*
+ * NOP
+ * UTP Task Management does not have a Completion Notification
+ * Register.
+ */
+}
+
+void
+ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr)
{
uint32_t utrlcnr;
@@ -399,7 +476,19 @@ ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
}
void
-ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utmrldbr = 0;
+
+ utmrldbr |= 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utmrldbr, utmrldbr);
+
+ tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+void
+ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr)
{
uint32_t utrldbr = 0;
@@ -408,9 +497,26 @@ ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+bool
+ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utmrldbr;
- // utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
- // printf("DB=0x%08x\n", utrldbr);
+ utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
+ return (!(utmrldbr & (1 << slot)));
+}
+
+bool
+ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utrldbr;
+
+ utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
+ return (!(utrldbr & (1 << slot)));
}
bool
@@ -421,6 +527,8 @@ ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
uint8_t slot;
bool done = false;
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
hwq->num_intr_handler_calls++;
bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
@@ -435,7 +543,7 @@ ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
* is cleared.
*/
if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
- ufshci_req_sdb_is_doorbell_cleared(req_queue->ctrlr,
+ req_queue->qops.is_doorbell_cleared(req_queue->ctrlr,
slot)) {
ufshci_req_queue_complete_tracker(tr);
done = true;
diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c
index db24561f4169..828b520614a5 100644
--- a/sys/dev/ufshci/ufshci_sim.c
+++ b/sys/dev/ufshci/ufshci_sim.c
@@ -241,7 +241,6 @@ ufshci_cam_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_ABORT:
- /* TODO: Implement Task Management CMD*/
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
break;
case XPT_SET_TRAN_SETTINGS:
diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c
index 5e5069f12e5f..56bc06b13f3c 100644
--- a/sys/dev/ufshci/ufshci_sysctl.c
+++ b/sys/dev/ufshci/ufshci_sysctl.c
@@ -152,6 +152,7 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
struct sysctl_ctx_list *ctrlr_ctx;
struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree;
struct sysctl_oid_list *ctrlr_list, *ioq_list;
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
#define QUEUE_NAME_LENGTH 16
char queue_name[QUEUE_NAME_LENGTH];
int i;
@@ -177,6 +178,25 @@ ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD,
&ctrlr->cap, 0, "Number of I/O queue pairs");
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled",
+ CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable");
+
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_flush_enabled",
+ CTLFLAG_RD, &dev->is_wb_flush_enabled, 0,
+ "WriteBooster flush enable/disable");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_type",
+ CTLFLAG_RD, &dev->wb_buffer_type, 0, "WriteBooster type");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_size_mb",
+ CTLFLAG_RD, &dev->wb_buffer_size_mb, 0,
+ "WriteBooster buffer size in MB");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "wb_user_space_config_option", CTLFLAG_RD,
+ &dev->wb_user_space_config_option, 0,
+ "WriteBooster preserve user space mode");
+
SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period",
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period,
0, ufshci_sysctl_timeout_period, "IU",
diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c
index 2c5f635dc11e..b9c867ff7065 100644
--- a/sys/dev/ufshci/ufshci_uic_cmd.c
+++ b/sys/dev/ufshci/ufshci_uic_cmd.c
@@ -14,7 +14,7 @@
int
ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
{
- uint32_t is;
+ uint32_t is, hcs;
int timeout;
/* Wait for the IS flag to change */
@@ -40,6 +40,15 @@ ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
DELAY(10);
}
+ /* Check HCS power mode change request status */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) {
+ ufshci_printf(ctrlr,
+ "Power mode change request status error: 0x%x\n",
+ UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs));
+ return (ENXIO);
+ }
+
return (0);
}
@@ -112,6 +121,7 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value)
{
int error;
+ uint32_t config_result_code;
mtx_lock(&ctrlr->uic_cmd_lock);
@@ -134,6 +144,13 @@ ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
if (error)
return (ENXIO);
+ config_result_code = ufshci_mmio_read_4(ctrlr, ucmdarg2);
+ if (config_result_code) {
+ ufshci_printf(ctrlr,
+ "Failed to send UIC command. (config result code = 0x%x)\n",
+ config_result_code);
+ }
+
if (return_value != NULL)
*return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3);
diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c
index 5be592512196..788b2b718062 100644
--- a/sys/dev/usb/controller/xhci.c
+++ b/sys/dev/usb/controller/xhci.c
@@ -156,6 +156,7 @@ struct xhci_std_temp {
static void xhci_do_poll(struct usb_bus *);
static void xhci_device_done(struct usb_xfer *, usb_error_t);
+static void xhci_get_xecp(struct xhci_softc *);
static void xhci_root_intr(struct xhci_softc *);
static void xhci_free_device_ext(struct usb_device *);
static struct xhci_endpoint_ext *xhci_get_endpoint_ext(struct usb_device *,
@@ -566,6 +567,8 @@ xhci_init(struct xhci_softc *sc, device_t self, uint8_t dma32)
device_printf(self, "%d bytes context size, %d-bit DMA\n",
sc->sc_ctx_is_64_byte ? 64 : 32, (int)sc->sc_bus.dma_bits);
+ xhci_get_xecp(sc);
+
/* enable 64Kbyte control endpoint quirk */
sc->sc_bus.control_ep_quirk = (xhcictlquirk ? 1 : 0);
@@ -654,6 +657,88 @@ xhci_uninit(struct xhci_softc *sc)
}
static void
+xhci_get_xecp(struct xhci_softc *sc)
+{
+
+ uint32_t hccp1;
+ uint32_t eec;
+ uint32_t eecp;
+ bool first = true;
+
+ hccp1 = XREAD4(sc, capa, XHCI_HCSPARAMS0);
+
+ if (XHCI_HCS0_XECP(hccp1) == 0) {
+ device_printf(sc->sc_bus.parent,
+ "xECP: no capabilities found\n");
+ return;
+ }
+
+ /*
+ * Parse the xECP Capabilities table and print known caps.
+ * Implemented, vendor and reserved xECP Capabilities values are
+ * documented in Table 7.2 of eXtensible Host Controller Interface for
+ * Universal Serial Bus (xHCI) Rev 1.2b 2023.
+ */
+ device_printf(sc->sc_bus.parent, "xECP capabilities <");
+
+ eec = -1;
+ for (eecp = XHCI_HCS0_XECP(hccp1) << 2;
+ eecp != 0 && XHCI_XECP_NEXT(eec) != 0;
+ eecp += XHCI_XECP_NEXT(eec) << 2) {
+ eec = XREAD4(sc, capa, eecp);
+
+ uint8_t xecpid = XHCI_XECP_ID(eec);
+
+ if ((xecpid >= 11 && xecpid <= 16) ||
+ (xecpid >= 19 && xecpid <= 191)) {
+ if (!first)
+ printf(",");
+ printf("RES(%x)", xecpid);
+ } else if (xecpid > 191) {
+ if (!first)
+ printf(",");
+ printf("VEND(%x)", xecpid);
+ } else {
+ if (!first)
+ printf(",");
+ switch (xecpid)
+ {
+ case XHCI_ID_USB_LEGACY:
+ printf("LEGACY");
+ break;
+ case XHCI_ID_PROTOCOLS:
+ printf("PROTO");
+ break;
+ case XHCI_ID_POWER_MGMT:
+ printf("POWER");
+ break;
+ case XHCI_ID_VIRTUALIZATION:
+ printf("VIRT");
+ break;
+ case XHCI_ID_MSG_IRQ:
+ printf("MSG IRQ");
+ break;
+ case XHCI_ID_USB_LOCAL_MEM:
+ printf("LOCAL MEM");
+ break;
+ case XHCI_ID_USB_DEBUG:
+ printf("DEBUG");
+ break;
+ case XHCI_ID_EXT_MSI:
+ printf("EXT MSI");
+ break;
+ case XHCI_ID_USB3_TUN:
+ printf("TUN");
+ break;
+
+ }
+ }
+ first = false;
+ }
+ printf(">\n");
+}
+
+static void
xhci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state)
{
struct xhci_softc *sc = XHCI_BUS2SC(bus);
diff --git a/sys/dev/usb/controller/xhci_pci.c b/sys/dev/usb/controller/xhci_pci.c
index d5cfd228a429..820fb2f738a1 100644
--- a/sys/dev/usb/controller/xhci_pci.c
+++ b/sys/dev/usb/controller/xhci_pci.c
@@ -178,6 +178,8 @@ xhci_pci_match(device_t self)
return ("Intel Tiger Lake-H USB 3.2 controller");
case 0x461e8086:
return ("Intel Alder Lake-P Thunderbolt 4 USB controller");
+ case 0x4b7d8086:
+ return ("Intel Elkhart Lake USB 3.1 controller");
case 0x51ed8086:
return ("Intel Alder Lake USB 3.2 controller");
case 0x5aa88086:
diff --git a/sys/dev/usb/controller/xhcireg.h b/sys/dev/usb/controller/xhcireg.h
index 9d0b6e2f4b4b..821897155544 100644
--- a/sys/dev/usb/controller/xhcireg.h
+++ b/sys/dev/usb/controller/xhcireg.h
@@ -205,6 +205,11 @@
#define XHCI_ID_VIRTUALIZATION 0x0004
#define XHCI_ID_MSG_IRQ 0x0005
#define XHCI_ID_USB_LOCAL_MEM 0x0006
+/* values 7-9 are reserved */
+#define XHCI_ID_USB_DEBUG 0x000a
+/* values 11-16 are reserved */
+#define XHCI_ID_EXT_MSI 0x0011
+#define XHCI_ID_USB3_TUN 0x0012
/* XHCI register R/W wrappers */
#define XREAD1(sc, what, a) \
diff --git a/sys/dev/usb/input/uhid.c b/sys/dev/usb/input/uhid.c
index a31081663f0c..e2b97f5accac 100644
--- a/sys/dev/usb/input/uhid.c
+++ b/sys/dev/usb/input/uhid.c
@@ -40,8 +40,6 @@
* HID spec: http://www.usb.org/developers/devclass_docs/HID1_11.pdf
*/
-#include "opt_hid.h"
-
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -928,11 +926,7 @@ static device_method_t uhid_methods[] = {
};
static driver_t uhid_driver = {
-#ifdef HIDRAW_MAKE_UHID_ALIAS
- .name = "hidraw",
-#else
.name = "uhid",
-#endif
.methods = uhid_methods,
.size = sizeof(struct uhid_softc),
};
diff --git a/sys/dev/usb/input/usbhid.c b/sys/dev/usb/input/usbhid.c
index df810012b3f8..cba3f34053e5 100644
--- a/sys/dev/usb/input/usbhid.c
+++ b/sys/dev/usb/input/usbhid.c
@@ -114,6 +114,7 @@ struct usbhid_xfer_ctx {
void *cb_ctx;
int waiters;
bool influx;
+ bool no_readahead;
};
struct usbhid_softc {
@@ -272,7 +273,7 @@ usbhid_intr_handler_cb(struct usbhid_xfer_ctx *xfer_ctx)
sc->sc_intr_handler(sc->sc_intr_ctx, xfer_ctx->buf,
xfer_ctx->req.intr.actlen);
- return (0);
+ return (xfer_ctx->no_readahead ? ECANCELED : 0);
}
static int
@@ -430,6 +431,7 @@ usbhid_intr_start(device_t dev, device_t child __unused)
.cb = usbhid_intr_handler_cb,
.cb_ctx = sc,
.buf = sc->sc_intr_buf,
+ .no_readahead = hid_test_quirk(&sc->sc_hw, HQ_NO_READAHEAD),
};
sc->sc_xfer_ctx[POLL_XFER(USBHID_INTR_IN_DT)] = (struct usbhid_xfer_ctx) {
.req.intr.maxlen =
@@ -705,6 +707,10 @@ usbhid_ioctl(device_t dev, device_t child __unused, unsigned long cmd,
if (error == 0)
ucr->ucr_actlen = UGETW(req.ctrl.wLength);
break;
+ case USB_GET_DEVICEINFO:
+ error = usbd_fill_deviceinfo(sc->sc_udev,
+ (struct usb_device_info *)data);
+ break;
default:
error = EINVAL;
}
diff --git a/sys/dev/usb/misc/cp2112.c b/sys/dev/usb/misc/cp2112.c
index d4776ca342cb..201a3ec51ce4 100644
--- a/sys/dev/usb/misc/cp2112.c
+++ b/sys/dev/usb/misc/cp2112.c
@@ -708,11 +708,12 @@ cp2112gpio_attach(device_t dev)
}
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
- device_printf(dev, "gpiobus_attach_bus failed\n");
+ device_printf(dev, "gpiobus_add_bus failed\n");
goto detach;
}
+ bus_attach_children(dev);
return (0);
detach:
diff --git a/sys/dev/usb/net/if_ipheth.c b/sys/dev/usb/net/if_ipheth.c
index f70113c53eb4..cfa800707391 100644
--- a/sys/dev/usb/net/if_ipheth.c
+++ b/sys/dev/usb/net/if_ipheth.c
@@ -55,6 +55,7 @@
#include <net/if_var.h>
#include <dev/usb/usb.h>
+#include <dev/usb/usb_cdc.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
#include "usbdevs.h"
@@ -81,6 +82,9 @@ static uether_fn_t ipheth_start;
static uether_fn_t ipheth_setmulti;
static uether_fn_t ipheth_setpromisc;
+static ipheth_consumer_t ipheth_consume_read;
+static ipheth_consumer_t ipheth_consume_read_ncm;
+
#ifdef USB_DEBUG
static int ipheth_debug = 0;
@@ -96,7 +100,31 @@ static const struct usb_config ipheth_config[IPHETH_N_TRANSFER] = {
.direction = UE_DIR_RX,
.frames = IPHETH_RX_FRAMES_MAX,
.bufsize = (IPHETH_RX_FRAMES_MAX * MCLBYTES),
- .flags = {.short_frames_ok = 1,.short_xfer_ok = 1,.ext_buffer = 1,},
+ .flags = {.short_frames_ok = 1, .short_xfer_ok = 1, .ext_buffer = 1,},
+ .callback = ipheth_bulk_read_callback,
+ .timeout = 0, /* no timeout */
+ },
+
+ [IPHETH_BULK_TX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_TX,
+ .frames = IPHETH_TX_FRAMES_MAX,
+ .bufsize = (IPHETH_TX_FRAMES_MAX * IPHETH_BUF_SIZE),
+ .flags = {.force_short_xfer = 1,},
+ .callback = ipheth_bulk_write_callback,
+ .timeout = IPHETH_TX_TIMEOUT,
+ },
+};
+
+static const struct usb_config ipheth_config_ncm[IPHETH_N_TRANSFER] = {
+ [IPHETH_BULK_RX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_RX,
+ .frames = 1,
+ .bufsize = IPHETH_RX_NCM_BUF_SIZE,
+ .flags = {.short_frames_ok = 1, .short_xfer_ok = 1,},
.callback = ipheth_bulk_read_callback,
.timeout = 0, /* no timeout */
},
@@ -204,6 +232,21 @@ ipheth_get_mac_addr(struct ipheth_softc *sc)
return (0);
}
+static bool
+ipheth_enable_ncm(struct ipheth_softc *sc)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_VENDOR_INTERFACE;
+ req.bRequest = IPHETH_CMD_ENABLE_NCM;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+
+ return (usbd_do_request(sc->sc_ue.ue_udev, NULL, &req, NULL) == 0);
+}
+
static int
ipheth_probe(device_t dev)
{
@@ -221,6 +264,7 @@ ipheth_attach(device_t dev)
struct ipheth_softc *sc = device_get_softc(dev);
struct usb_ether *ue = &sc->sc_ue;
struct usb_attach_arg *uaa = device_get_ivars(dev);
+ const struct usb_config *config;
int error;
sc->sc_iface_no = uaa->info.bIfaceIndex;
@@ -235,18 +279,29 @@ ipheth_attach(device_t dev)
device_printf(dev, "Cannot set alternate setting\n");
goto detach;
}
- error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no,
- sc->sc_xfer, ipheth_config, IPHETH_N_TRANSFER, sc, &sc->sc_mtx);
- if (error) {
- device_printf(dev, "Cannot setup USB transfers\n");
- goto detach;
- }
+
ue->ue_sc = sc;
ue->ue_dev = dev;
ue->ue_udev = uaa->device;
ue->ue_mtx = &sc->sc_mtx;
ue->ue_methods = &ipheth_ue_methods;
+ if (ipheth_enable_ncm(sc)) {
+ config = ipheth_config_ncm;
+ sc->is_ncm = true;
+ sc->consume = &ipheth_consume_read_ncm;
+ } else {
+ config = ipheth_config;
+ sc->consume = &ipheth_consume_read;
+ }
+
+ error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no, sc->sc_xfer,
+ config, IPHETH_N_TRANSFER, sc, &sc->sc_mtx);
+ if (error) {
+ device_printf(dev, "Cannot setup USB transfers\n");
+ goto detach;
+ }
+
error = ipheth_get_mac_addr(sc);
if (error) {
device_printf(dev, "Cannot get MAC address\n");
@@ -389,12 +444,9 @@ ipheth_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
int actlen;
int aframes;
- usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
-
- DPRINTFN(1, "\n");
-
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
DPRINTFN(11, "transfer complete: %u bytes in %u frames\n",
actlen, aframes);
@@ -471,53 +523,40 @@ ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
uint8_t x;
int actlen;
int aframes;
- int len;
-
- usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
-
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
DPRINTF("received %u bytes in %u frames\n", actlen, aframes);
- for (x = 0; x != aframes; x++) {
- m = sc->sc_rx_buf[x];
- sc->sc_rx_buf[x] = NULL;
- len = usbd_xfer_frame_len(xfer, x);
-
- if (len < (int)(sizeof(struct ether_header) +
- IPHETH_RX_ADJ)) {
- m_freem(m);
- continue;
- }
-
- m_adj(m, IPHETH_RX_ADJ);
-
- /* queue up mbuf */
- uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
- }
+ for (x = 0; x != aframes; x++)
+ sc->consume(xfer, x);
/* FALLTHROUGH */
case USB_ST_SETUP:
-
- for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
- if (sc->sc_rx_buf[x] == NULL) {
- m = uether_newbuf();
- if (m == NULL)
- goto tr_stall;
-
- /* cancel alignment for ethernet */
- m_adj(m, ETHER_ALIGN);
-
- sc->sc_rx_buf[x] = m;
- } else {
- m = sc->sc_rx_buf[x];
+ if (!sc->is_ncm) {
+ for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
+ if (sc->sc_rx_buf[x] == NULL) {
+ m = uether_newbuf();
+ if (m == NULL)
+ goto tr_stall;
+
+ /* cancel alignment for ethernet */
+ m_adj(m, ETHER_ALIGN);
+
+ sc->sc_rx_buf[x] = m;
+ } else {
+ m = sc->sc_rx_buf[x];
+ }
+ usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
}
-
- usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
+ usbd_xfer_set_frames(xfer, x);
+ } else {
+ usbd_xfer_set_frame_len(xfer, 0,
+ IPHETH_RX_NCM_BUF_SIZE);
+ usbd_xfer_set_frames(xfer, 1);
}
- /* set number of frames and start hardware */
- usbd_xfer_set_frames(xfer, x);
+
usbd_transfer_submit(xfer);
/* flush any received frames */
uether_rxflush(&sc->sc_ue);
@@ -539,3 +578,86 @@ ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
break;
}
}
+
+static void
+ipheth_consume_read(struct usb_xfer *xfer, int x)
+{
+ struct ipheth_softc *sc = usbd_xfer_softc(xfer);
+ struct mbuf *m = sc->sc_rx_buf[x];
+ int len;
+
+ sc->sc_rx_buf[x] = NULL;
+ len = usbd_xfer_frame_len(xfer, x);
+
+ if (len < (int)(sizeof(struct ether_header) + IPHETH_RX_ADJ)) {
+ m_freem(m);
+ return;
+ }
+
+ m_adj(m, IPHETH_RX_ADJ);
+
+ /* queue up mbuf */
+ uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
+}
+
+static void
+ipheth_consume_read_ncm(struct usb_xfer *xfer, int x)
+{
+ struct ipheth_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc = usbd_xfer_get_frame(xfer, 0);
+ struct ncm_data_cache ncm;
+ if_t ifp = uether_getifp(&sc->sc_ue);
+ struct mbuf *new_buf;
+ int i, actlen;
+ uint16_t dp_offset, dp_len;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+
+ if (actlen < IPHETH_NCM_HEADER_SIZE)
+ return;
+
+ usbd_copy_out(pc, 0, &ncm.hdr, sizeof(ncm.hdr));
+
+ if (UGETDW(ncm.hdr.dwSignature) != 0x484D434E)
+ return;
+
+ /* Dpt follows the hdr on iOS */
+ if (UGETW(ncm.hdr.wDptIndex) != (int)(sizeof(struct usb_ncm16_hdr)))
+ return;
+
+ usbd_copy_out(pc, UGETW(ncm.hdr.wDptIndex), &ncm.dpt, sizeof(ncm.dpt));
+
+ if (UGETDW(ncm.dpt.dwSignature) != 0x304D434E)
+ return;
+
+ usbd_copy_out(pc, UGETW(ncm.hdr.wDptIndex) + sizeof(ncm.dpt), &ncm.dp,
+ sizeof(ncm.dp));
+
+ for (i = 0; i < IPHETH_NCM_DPT_DP_NUM; ++i) {
+ dp_offset = UGETW(ncm.dp[i].wFrameIndex);
+ dp_len = UGETW(ncm.dp[i].wFrameLength);
+
+ /* (3.3.1 USB CDC NCM spec v1.0) */
+ if (dp_offset == 0 && dp_len == 0)
+ break;
+
+ if (dp_offset < IPHETH_NCM_HEADER_SIZE || dp_offset >= actlen ||
+ actlen < (dp_len + dp_offset) ||
+ dp_len < sizeof(struct ether_header)) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ continue;
+ }
+ if (dp_len > (MCLBYTES - ETHER_ALIGN)) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+
+ new_buf = uether_newbuf();
+ if (new_buf == NULL) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+ usbd_copy_out(pc, dp_offset, new_buf->m_data, dp_len);
+ uether_rxmbuf(&sc->sc_ue, new_buf, dp_len);
+ }
+}
diff --git a/sys/dev/usb/net/if_iphethvar.h b/sys/dev/usb/net/if_iphethvar.h
index 203bb96b6f22..d637e8f67d01 100644
--- a/sys/dev/usb/net/if_iphethvar.h
+++ b/sys/dev/usb/net/if_iphethvar.h
@@ -41,6 +41,7 @@
#define IPHETH_BUF_SIZE 1514
#define IPHETH_TX_TIMEOUT 5000 /* ms */
+#define IPHETH_RX_NCM_BUF_SIZE 65536
#define IPHETH_RX_FRAMES_MAX 1
#define IPHETH_TX_FRAMES_MAX 8
@@ -55,10 +56,20 @@
#define IPHETH_CTRL_TIMEOUT 5000 /* ms */
#define IPHETH_CMD_GET_MACADDR 0x00
+#define IPHETH_CMD_ENABLE_NCM 0x04
#define IPHETH_CMD_CARRIER_CHECK 0x45
#define IPHETH_CARRIER_ON 0x04
+#define IPHETH_NCM_DPT_DP_NUM 22
+#define IPHETH_NCM_DPT_HEADER_SIZE \
+ (sizeof(struct usb_ncm16_dpt) + \
+ IPHETH_NCM_DPT_DP_NUM * sizeof(struct usb_ncm16_dp))
+#define IPHETH_NCM_HEADER_SIZE \
+ (sizeof(struct usb_ncm16_hdr) + IPHETH_NCM_DPT_HEADER_SIZE)
+
+typedef void (ipheth_consumer_t)(struct usb_xfer *xfer, int idx);
+
enum {
IPHETH_BULK_TX,
IPHETH_BULK_RX,
@@ -76,6 +87,16 @@ struct ipheth_softc {
uint8_t sc_data[IPHETH_CTRL_BUF_SIZE];
uint8_t sc_iface_no;
uint8_t sc_carrier_on;
+
+ bool is_ncm;
+
+ ipheth_consumer_t *consume;
+};
+
+struct ncm_data_cache {
+ struct usb_ncm16_hdr hdr;
+ struct usb_ncm16_dpt dpt;
+ struct usb_ncm16_dp dp[IPHETH_NCM_DPT_DP_NUM];
};
#define IPHETH_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
diff --git a/sys/dev/usb/net/if_umb.c b/sys/dev/usb/net/if_umb.c
index 5703bc03dd39..b1082b117259 100644
--- a/sys/dev/usb/net/if_umb.c
+++ b/sys/dev/usb/net/if_umb.c
@@ -177,9 +177,7 @@ static void umb_ncm_setup(struct umb_softc *, struct usb_config *);
static void umb_close_bulkpipes(struct umb_softc *);
static int umb_ioctl(if_t , u_long, caddr_t);
static void umb_init(void *);
-#ifdef DEV_NETMAP
static void umb_input(if_t , struct mbuf *);
-#endif
static int umb_output(if_t , struct mbuf *,
const struct sockaddr *, struct route *);
static void umb_start(if_t );
@@ -585,9 +583,7 @@ umb_attach_task(struct usb_proc_msg *msg)
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_POINTOPOINT);
if_setioctlfn(ifp, umb_ioctl);
-#ifdef DEV_NETMAP
if_setinputfn(ifp, umb_input);
-#endif
if_setoutputfn(ifp, umb_output);
if_setstartfn(ifp, umb_start);
if_setinitfn(ifp, umb_init);
@@ -666,7 +662,7 @@ umb_ncm_setup(struct umb_softc *sc, struct usb_config * config)
struct ncm_ntb_parameters np;
usb_error_t error;
- /* Query NTB tranfers sizes */
+ /* Query NTB transfers sizes */
req.bmRequestType = UT_READ_CLASS_INTERFACE;
req.bRequest = NCM_GET_NTB_PARAMETERS;
USETW(req.wValue, 0);
diff --git a/sys/dev/usb/serial/udbc.c b/sys/dev/usb/serial/udbc.c
new file mode 100644
index 000000000000..d7ca6b25bf32
--- /dev/null
+++ b/sys/dev/usb/serial/udbc.c
@@ -0,0 +1,404 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-NetBSD
+ *
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * Copyright (c) 2016-2024 Hiroki Sato <hrs@FreeBSD.org>
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/callout.h>
+#include <sys/condvar.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/priv.h>
+#include <sys/queue.h>
+#include <sys/stddef.h>
+#include <sys/stdint.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/unistd.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usb_ioctl.h>
+#include <dev/usb/usbdi.h>
+#include <dev/usb/usbdi_util.h>
+#include <dev/usb/usb_core.h>
+
+#include "usbdevs.h"
+
+#define USB_DEBUG_VAR udbc_debug
+#include <dev/usb/usb_process.h>
+#include <dev/usb/serial/usb_serial.h>
+#include <dev/usb/usb_debug.h>
+
+static SYSCTL_NODE(_hw_usb, OID_AUTO, udbc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "USB DbC Client");
+
+#ifdef USB_DEBUG
+static int udbc_debug = 0;
+SYSCTL_INT(_hw_usb_udbc, OID_AUTO, debug, CTLFLAG_RWTUN, &udbc_debug, 0,
+ "Debug level");
+#endif
+
+#define UDBC_CONFIG_INDEX 0
+
+#define UDBC_IBUFSIZE 1024
+#define UDBC_OBUFSIZE 1024
+
+enum {
+ UDBC_BULK_DT_WR,
+ UDBC_BULK_DT_RD,
+ UDBC_N_TRANSFER, /* n of EP */
+};
+
+struct udbc_softc {
+ struct ucom_super_softc sc_super_ucom;
+ struct ucom_softc sc_ucom;
+
+ struct usb_device *sc_udev;
+ struct usb_xfer *sc_xfer[UDBC_N_TRANSFER];
+ device_t sc_dev;
+ struct mtx sc_mtx;
+
+ uint32_t sc_unit;
+};
+
+/* prototypes */
+
+static device_probe_t udbc_probe;
+static device_attach_t udbc_attach;
+static device_detach_t udbc_detach;
+static void udbc_free_softc(struct udbc_softc *);
+
+static usb_callback_t udbc_write_callback;
+static usb_callback_t udbc_read_callback;
+
+static void udbc_free(struct ucom_softc *);
+static void udbc_cfg_open(struct ucom_softc *);
+static void udbc_cfg_close(struct ucom_softc *);
+static int udbc_pre_param(struct ucom_softc *, struct termios *);
+static int udbc_ioctl(struct ucom_softc *, uint32_t, caddr_t, int,
+ struct thread *);
+static void udbc_start_read(struct ucom_softc *);
+static void udbc_stop_read(struct ucom_softc *);
+static void udbc_start_write(struct ucom_softc *);
+static void udbc_stop_write(struct ucom_softc *);
+static void udbc_poll(struct ucom_softc *ucom);
+
+static const struct usb_config udbc_config[UDBC_N_TRANSFER] = {
+ [UDBC_BULK_DT_WR] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = UDBC_OBUFSIZE,
+ .flags = {.pipe_bof = 1,},
+ .callback = &udbc_write_callback,
+ },
+
+ [UDBC_BULK_DT_RD] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = UDBC_IBUFSIZE,
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
+ .callback = &udbc_read_callback,
+ },
+};
+
+static const struct ucom_callback udbc_callback = {
+ .ucom_cfg_open = &udbc_cfg_open,
+ .ucom_cfg_close = &udbc_cfg_close,
+ .ucom_pre_param = &udbc_pre_param,
+ .ucom_ioctl = &udbc_ioctl,
+ .ucom_start_read = &udbc_start_read,
+ .ucom_stop_read = &udbc_stop_read,
+ .ucom_start_write = &udbc_start_write,
+ .ucom_stop_write = &udbc_stop_write,
+ .ucom_poll = &udbc_poll,
+ .ucom_free = &udbc_free,
+};
+
+static device_method_t udbc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, udbc_probe),
+ DEVMETHOD(device_attach, udbc_attach),
+ DEVMETHOD(device_detach, udbc_detach),
+ DEVMETHOD_END
+};
+
+static int
+udbc_probe(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+
+ if (uaa->usb_mode != USB_MODE_HOST)
+ return (ENXIO);
+ if (uaa->info.bConfigIndex != UDBC_CONFIG_INDEX)
+ return (ENXIO);
+ if (uaa->info.bInterfaceClass != UICLASS_DIAGNOSTIC)
+ return (ENXIO);
+ if (uaa->info.bDeviceProtocol != 0x00) /* GNU GDB == 1 */
+ return (ENXIO);
+
+ return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+udbc_attach(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct udbc_softc *sc = device_get_softc(dev);
+ int error;
+
+ DPRINTF("\n");
+
+ sc->sc_udev = uaa->device;
+ sc->sc_dev = dev;
+ sc->sc_unit = device_get_unit(dev);
+
+ device_set_usb_desc(dev);
+ mtx_init(&sc->sc_mtx, "udbc", NULL, MTX_DEF);
+ ucom_ref(&sc->sc_super_ucom);
+
+ sc->sc_ucom.sc_portno = 0;
+
+ error = usbd_transfer_setup(uaa->device, &uaa->info.bIfaceIndex,
+ sc->sc_xfer, udbc_config, UDBC_N_TRANSFER, sc, &sc->sc_mtx);
+
+ if (error) {
+ device_printf(dev,
+ "allocating USB transfers failed\n");
+ goto detach;
+ }
+ /* clear stall at first run */
+ mtx_lock(&sc->sc_mtx);
+ usbd_xfer_set_stall(sc->sc_xfer[UDBC_BULK_DT_WR]);
+ usbd_xfer_set_stall(sc->sc_xfer[UDBC_BULK_DT_RD]);
+ mtx_unlock(&sc->sc_mtx);
+
+ error = ucom_attach(&sc->sc_super_ucom, &sc->sc_ucom, 1, sc,
+ &udbc_callback, &sc->sc_mtx);
+ if (error)
+ goto detach;
+ ucom_set_pnpinfo_usb(&sc->sc_super_ucom, dev);
+
+ return (0); /* success */
+
+detach:
+ udbc_detach(dev);
+ return (ENXIO);
+}
+
+static int
+udbc_detach(device_t dev)
+{
+ struct udbc_softc *sc = device_get_softc(dev);
+
+ ucom_detach(&sc->sc_super_ucom, &sc->sc_ucom);
+ usbd_transfer_unsetup(sc->sc_xfer, UDBC_N_TRANSFER);
+
+ device_claim_softc(dev);
+
+ udbc_free_softc(sc);
+
+ return (0);
+}
+
+UCOM_UNLOAD_DRAIN(udbc);
+
+static void
+udbc_free_softc(struct udbc_softc *sc)
+{
+ if (ucom_unref(&sc->sc_super_ucom)) {
+ mtx_destroy(&sc->sc_mtx);
+ device_free_softc(sc);
+ }
+}
+
+static void
+udbc_free(struct ucom_softc *ucom)
+{
+ udbc_free_softc(ucom->sc_parent);
+}
+
+static void
+udbc_cfg_open(struct ucom_softc *ucom)
+{
+ /*
+ * This do-nothing open routine exists for the sole purpose of this
+ * DPRINTF() so that you can see the point at which open gets called
+ * when debugging is enabled.
+ */
+ DPRINTF("\n");
+}
+
+static void
+udbc_cfg_close(struct ucom_softc *ucom)
+{
+ /*
+ * This do-nothing close routine exists for the sole purpose of this
+ * DPRINTF() so that you can see the point at which close gets called
+ * when debugging is enabled.
+ */
+ DPRINTF("\n");
+}
+
+static void
+udbc_write_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct udbc_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc;
+ uint32_t buflen;
+
+ DPRINTFN(3, "\n");
+
+ switch (USB_GET_STATE(xfer)) {
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ }
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+ case USB_ST_TRANSFERRED:
+ pc = usbd_xfer_get_frame(xfer, 0);
+ if (ucom_get_data(&sc->sc_ucom, pc, 0, UDBC_OBUFSIZE,
+ &buflen) == 0)
+ break;
+ if (buflen != 0) {
+ usbd_xfer_set_frame_len(xfer, 0, buflen);
+ usbd_transfer_submit(xfer);
+ }
+ break;
+ }
+}
+
+static void
+udbc_read_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct udbc_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc;
+ int buflen;
+
+ DPRINTFN(3, "\n");
+
+ usbd_xfer_status(xfer, &buflen, NULL, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ pc = usbd_xfer_get_frame(xfer, 0);
+ ucom_put_data(&sc->sc_ucom, pc, 0, buflen);
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+tr_setup:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ return;
+ }
+}
+
+static int
+udbc_pre_param(struct ucom_softc *ucom, struct termios *t)
+{
+ DPRINTF("\n");
+
+ return (0);
+}
+
+static int
+udbc_ioctl(struct ucom_softc *ucom, uint32_t cmd, caddr_t data, int flag,
+ struct thread *td)
+{
+ return (ENOIOCTL);
+}
+
+static void
+udbc_start_read(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_start(sc->sc_xfer[UDBC_BULK_DT_RD]);
+}
+
+static void
+udbc_stop_read(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_stop(sc->sc_xfer[UDBC_BULK_DT_RD]);
+}
+
+static void
+udbc_start_write(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_start(sc->sc_xfer[UDBC_BULK_DT_WR]);
+}
+
+static void
+udbc_stop_write(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_stop(sc->sc_xfer[UDBC_BULK_DT_WR]);
+}
+
+static void
+udbc_poll(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_poll(sc->sc_xfer, UDBC_N_TRANSFER);
+}
+
+static driver_t udbc_driver = {
+ .name = "udbc",
+ .methods = udbc_methods,
+ .size = sizeof(struct udbc_softc),
+};
+
+DRIVER_MODULE(udbc, uhub, udbc_driver, NULL, NULL);
+MODULE_DEPEND(udbc, ucom, 1, 1, 1);
+MODULE_DEPEND(udbc, usb, 1, 1, 1);
+MODULE_VERSION(udbc, 1);
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index 60c2d6745b3f..f0989972f49f 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -3111,3 +3111,51 @@ usbd_get_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep)
{
return (ep->ep_mode);
}
+
+/*------------------------------------------------------------------------*
+ * usbd_fill_deviceinfo
+ *
+ * This function dumps information about an USB device to the
+ * structure pointed to by the "di" argument.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+int
+usbd_fill_deviceinfo(struct usb_device *udev, struct usb_device_info *di)
+{
+ struct usb_device *hub;
+
+ bzero(di, sizeof(di[0]));
+
+ di->udi_bus = device_get_unit(udev->bus->bdev);
+ di->udi_addr = udev->address;
+ di->udi_index = udev->device_index;
+ strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
+ strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
+ strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
+ usb_printbcd(di->udi_release, sizeof(di->udi_release),
+ UGETW(udev->ddesc.bcdDevice));
+ di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
+ di->udi_productNo = UGETW(udev->ddesc.idProduct);
+ di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
+ di->udi_class = udev->ddesc.bDeviceClass;
+ di->udi_subclass = udev->ddesc.bDeviceSubClass;
+ di->udi_protocol = udev->ddesc.bDeviceProtocol;
+ di->udi_config_no = udev->curr_config_no;
+ di->udi_config_index = udev->curr_config_index;
+ di->udi_power = udev->flags.self_powered ? 0 : udev->power;
+ di->udi_speed = udev->speed;
+ di->udi_mode = udev->flags.usb_mode;
+ di->udi_power_mode = udev->power_mode;
+ di->udi_suspended = udev->flags.peer_suspended;
+
+ hub = udev->parent_hub;
+ if (hub) {
+ di->udi_hubaddr = hub->address;
+ di->udi_hubindex = hub->device_index;
+ di->udi_hubport = udev->port_no;
+ }
+ return (0);
+}
diff --git a/sys/dev/usb/usb_generic.c b/sys/dev/usb/usb_generic.c
index c0af27d77e5d..ccb0b2184ec4 100644
--- a/sys/dev/usb/usb_generic.c
+++ b/sys/dev/usb/usb_generic.c
@@ -831,42 +831,7 @@ ugen_get_iface_driver(struct usb_fifo *f, struct usb_gen_descriptor *ugd)
int
ugen_fill_deviceinfo(struct usb_fifo *f, struct usb_device_info *di)
{
- struct usb_device *udev;
- struct usb_device *hub;
-
- udev = f->udev;
-
- bzero(di, sizeof(di[0]));
-
- di->udi_bus = device_get_unit(udev->bus->bdev);
- di->udi_addr = udev->address;
- di->udi_index = udev->device_index;
- strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
- strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
- strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
- usb_printbcd(di->udi_release, sizeof(di->udi_release),
- UGETW(udev->ddesc.bcdDevice));
- di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
- di->udi_productNo = UGETW(udev->ddesc.idProduct);
- di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
- di->udi_class = udev->ddesc.bDeviceClass;
- di->udi_subclass = udev->ddesc.bDeviceSubClass;
- di->udi_protocol = udev->ddesc.bDeviceProtocol;
- di->udi_config_no = udev->curr_config_no;
- di->udi_config_index = udev->curr_config_index;
- di->udi_power = udev->flags.self_powered ? 0 : udev->power;
- di->udi_speed = udev->speed;
- di->udi_mode = udev->flags.usb_mode;
- di->udi_power_mode = udev->power_mode;
- di->udi_suspended = udev->flags.peer_suspended;
-
- hub = udev->parent_hub;
- if (hub) {
- di->udi_hubaddr = hub->address;
- di->udi_hubindex = hub->device_index;
- di->udi_hubport = udev->port_no;
- }
- return (0);
+ return (usbd_fill_deviceinfo(f->udev, di));
}
int
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index e3509862ef54..ee9d8ab0c9bb 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -954,7 +954,8 @@ done:
* packet. This function is called having the "bus_mtx" locked.
*------------------------------------------------------------------------*/
void
-uhub_root_intr(struct usb_bus *bus, const uint8_t *ptr, uint8_t len)
+uhub_root_intr(struct usb_bus *bus,
+ const uint8_t *ptr __unused, uint8_t len __unused)
{
USB_BUS_LOCK_ASSERT(bus, MA_OWNED);
diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h
index 08d130aa2868..0826d9f078c4 100644
--- a/sys/dev/usb/usbdi.h
+++ b/sys/dev/usb/usbdi.h
@@ -38,6 +38,7 @@ struct usb_process;
struct usb_proc_msg;
struct usb_mbuf;
struct usb_fs_privdata;
+struct usb_device_info;
struct mbuf;
typedef enum { /* keep in sync with usb_errstr_table */
@@ -587,6 +588,8 @@ usb_error_t usbd_set_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep, uint8_t ep_mode);
uint8_t usbd_get_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep);
+int usbd_fill_deviceinfo(struct usb_device *udev,
+ struct usb_device_info *di);
const struct usb_device_id *usbd_lookup_id_by_info(
const struct usb_device_id *id, usb_size_t sizeof_id,
diff --git a/sys/dev/usb/wlan/if_mtw.c b/sys/dev/usb/wlan/if_mtw.c
index 137590651948..6967e5081542 100644
--- a/sys/dev/usb/wlan/if_mtw.c
+++ b/sys/dev/usb/wlan/if_mtw.c
@@ -638,6 +638,7 @@ mtw_attach(device_t self)
ic->ic_flags |= IEEE80211_F_DATAPAD;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -3131,6 +3132,8 @@ mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ ieee80211_output_seqno_assign(ni, -1, m);
+
mtw_set_tx_desc(sc, data);
/*
@@ -3390,6 +3393,8 @@ mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
break;
data->ridx = ridx;
+ ieee80211_output_seqno_assign(ni, -1, m);
+
mtw_set_tx_desc(sc, data);
MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
diff --git a/sys/dev/usb/wlan/if_rsu.c b/sys/dev/usb/wlan/if_rsu.c
index 07f7b6f3a708..e976948f6849 100644
--- a/sys/dev/usb/wlan/if_rsu.c
+++ b/sys/dev/usb/wlan/if_rsu.c
@@ -371,18 +371,16 @@ rsu_update_chw(struct ieee80211com *ic)
/*
* notification from net80211 that it'd like to do A-MPDU on the given TID.
- *
- * Note: this actually hangs traffic at the present moment, so don't use it.
- * The firmware debug does indiciate it's sending and establishing a TX AMPDU
- * session, but then no traffic flows.
*/
static int
rsu_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
-#if 0
struct rsu_softc *sc = ni->ni_ic->ic_softc;
struct r92s_add_ba_req req;
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: called, tid=%d\n",
+ __func__, tap->txa_tid);
+
/* Don't enable if it's requested or running */
if (IEEE80211_AMPDU_REQUESTED(tap))
return (0);
@@ -397,23 +395,30 @@ rsu_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
return (0);
/* Send the firmware command */
- RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: establishing AMPDU TX for TID %d\n",
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU,
+ "%s: establishing AMPDU TX for TID %d\n",
__func__,
tap->txa_tid);
RSU_LOCK(sc);
- if (rsu_fw_cmd(sc, R92S_CMD_ADDBA_REQ, &req, sizeof(req)) != 1) {
+ if (rsu_fw_cmd(sc, R92S_CMD_ADDBA_REQ, &req, sizeof(req)) != 0) {
RSU_UNLOCK(sc);
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: AMPDU TX cmd failure\n",
+ __func__);
/* Mark failure */
- (void) ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 0);
+ ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 0);
+ /* Return 0, we've been driving this ourselves */
return (0);
}
RSU_UNLOCK(sc);
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: AMPDU TX cmd success\n",
+ __func__);
+
/* Mark success; we don't get any further notifications */
- (void) ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 1);
-#endif
- /* Return 0, we're driving this ourselves */
+ ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 1);
+
+ /* Return 0, we've been driving this ourselves */
return (0);
}
@@ -563,9 +568,7 @@ rsu_attach(device_t self)
/* Enable basic HT */
ic->ic_htcaps = IEEE80211_HTC_HT |
-#if 0
IEEE80211_HTC_AMPDU |
-#endif
IEEE80211_HTC_AMSDU |
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;
@@ -576,6 +579,7 @@ rsu_attach(device_t self)
ic->ic_rxstream = sc->sc_nrxstream;
}
ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
rsu_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1537,6 +1541,10 @@ rsu_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
is_checked = 1;
k->wk_flags |= IEEE80211_KEY_SWCRYPT;
} else
+ /*
+ * TODO: should allocate these from the CAM space;
+ * skipping over the fixed slots and _BC / _BSS.
+ */
*keyix = R92S_MACID_BSS;
}
@@ -2166,7 +2174,7 @@ rsu_event_addba_req_report(struct rsu_softc *sc, uint8_t *buf, int len)
__func__,
ether_sprintf(ba->mac_addr),
(int) ba->tid,
- (int) le16toh(ba->ssn));
+ (int) le16toh(ba->ssn) >> 4);
/* XXX do node lookup; this is STA specific */
@@ -2212,6 +2220,11 @@ rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len)
if (vap->iv_state == IEEE80211_S_AUTH)
rsu_event_join_bss(sc, buf, len);
break;
+
+ /* TODO: what about R92S_EVT_ADD_STA? and decoding macid? */
+ /* It likely is required for IBSS/AP mode */
+
+ /* TODO: should I be doing this transition in AP mode? */
case R92S_EVT_DEL_STA:
RSU_DPRINTF(sc, RSU_DEBUG_FWCMD | RSU_DEBUG_STATE,
"%s: disassociated from %s\n", __func__,
@@ -2229,6 +2242,7 @@ rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len)
break;
case R92S_EVT_FWDBG:
buf[60] = '\0';
+ /* TODO: some are \n terminated, some aren't, sigh */
RSU_DPRINTF(sc, RSU_DEBUG_FWDBG, "FWDBG: %s\n", (char *)buf);
break;
case R92S_EVT_ADDBA_REQ_REPORT:
@@ -2782,6 +2796,9 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
if (rate != 0)
ridx = rate2ridx(rate);
+ /* Assign sequence number, A-MPDU or otherwise */
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
@@ -2838,8 +2855,10 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
SM(R92S_TXDW0_OFFSET, sizeof(*txd)) |
R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG);
+ /* TODO: correct macid here? It should be in the node */
txd->txdw1 |= htole32(
SM(R92S_TXDW1_MACID, R92S_MACID_BSS) | SM(R92S_TXDW1_QSEL, qid));
+
if (!hasqos)
txd->txdw1 |= htole32(R92S_TXDW1_NONQOS);
if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWENCRYPT)) {
@@ -2860,8 +2879,13 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
SM(R92S_TXDW1_CIPHER, cipher) |
SM(R92S_TXDW1_KEYIDX, k->wk_keyix));
}
- /* XXX todo: set AGGEN bit if appropriate? */
- txd->txdw2 |= htole32(R92S_TXDW2_BK);
+
+ /*
+ * Note: no need to set TXDW2_AGGEN/TXDW2_BK to mark
+ * A-MPDU and non-AMPDU candidates; the firmware will
+ * handle this for us.
+ */
+
if (ismcast)
txd->txdw2 |= htole32(R92S_TXDW2_BMCAST);
@@ -2880,8 +2904,11 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
}
/*
- * Firmware will use and increment the sequence number for the
- * specified priority.
+ * Pass in prio here, NOT the sequence number.
+ *
+ * The hardware is in theory incrementing sequence numbers
+ * for us, but I haven't yet figured out exactly when/how
+ * it's supposed to work.
*/
txd->txdw3 |= htole32(SM(R92S_TXDW3_SEQ, prio));
@@ -3481,7 +3508,8 @@ rsu_load_firmware(struct rsu_softc *sc)
dmem.vcs_mode = R92S_VCS_MODE_RTS_CTS;
dmem.turbo_mode = 0;
dmem.bw40_en = !! (ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40);
- dmem.amsdu2ampdu_en = !! (sc->sc_ht);
+ /* net80211 handles AMSDUs just fine */
+ dmem.amsdu2ampdu_en = 0;
dmem.ampdu_en = !! (sc->sc_ht);
dmem.agg_offload = !! (sc->sc_ht);
dmem.qos_en = 1;
diff --git a/sys/dev/usb/wlan/if_rsureg.h b/sys/dev/usb/wlan/if_rsureg.h
index fb706a4d9b1a..e2074e1dd2ad 100644
--- a/sys/dev/usb/wlan/if_rsureg.h
+++ b/sys/dev/usb/wlan/if_rsureg.h
@@ -593,7 +593,14 @@ struct r92s_event_join_bss {
struct ndis_wlan_bssid_ex bss;
} __packed;
-#define R92S_MACID_BSS 5 /* XXX hardcoded somewhere */
+/*
+ * This is hard-coded in the firmware for a STA mode
+ * BSS join. If you turn on FWDEBUG, you'll see this
+ * in the logs:
+ *
+ * rsu0: FWDBG: mac id #5: 0000005b, 000fffff, 00000000
+ */
+#define R92S_MACID_BSS 5
/* Rx MAC descriptor. */
struct r92s_rx_stat {
diff --git a/sys/dev/usb/wlan/if_run.c b/sys/dev/usb/wlan/if_run.c
index 97c790dd5b81..147aa4044057 100644
--- a/sys/dev/usb/wlan/if_run.c
+++ b/sys/dev/usb/wlan/if_run.c
@@ -882,6 +882,7 @@ run_attach(device_t self)
ic->ic_flags |= IEEE80211_F_DATAPAD;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
run_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -3522,6 +3523,9 @@ run_tx(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
/*
@@ -3627,6 +3631,9 @@ run_tx_mgt(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n",
@@ -3771,6 +3778,9 @@ run_tx_param(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
break;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
@@ -6416,6 +6426,10 @@ run_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
/* For now, no A-MPDU TX support in the driver */
+ /*
+ * TODO: maybe we needed to enable seqno generation too?
+ * What other TX desc bits are missing/needed?
+ */
return (0);
}
diff --git a/sys/dev/usb/wlan/if_uath.c b/sys/dev/usb/wlan/if_uath.c
index b49c75032d77..cc303e565bca 100644
--- a/sys/dev/usb/wlan/if_uath.c
+++ b/sys/dev/usb/wlan/if_uath.c
@@ -432,6 +432,8 @@ uath_attach(device_t dev)
/* put a regulatory domain to reveal informations. */
uath_regdomain = sc->sc_devcap.regDomain;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11G);
@@ -1548,6 +1550,8 @@ uath_tx_start(struct uath_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
ieee80211_radiotap_tx(vap, m0);
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
diff --git a/sys/dev/usb/wlan/if_upgt.c b/sys/dev/usb/wlan/if_upgt.c
index 642631ae34b7..1ab833301b3c 100644
--- a/sys/dev/usb/wlan/if_upgt.c
+++ b/sys/dev/usb/wlan/if_upgt.c
@@ -354,6 +354,8 @@ upgt_attach(device_t dev)
ic->ic_transmit = upgt_transmit;
ic->ic_parent = upgt_parent;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
UPGT_TX_RADIOTAP_PRESENT,
@@ -2116,6 +2118,9 @@ upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
upgt_set_led(sc, UPGT_LED_BLINK);
+ /* Assign sequence number */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/*
* Software crypto.
*/
diff --git a/sys/dev/usb/wlan/if_ural.c b/sys/dev/usb/wlan/if_ural.c
index 260d75a9821d..adef924a085c 100644
--- a/sys/dev/usb/wlan/if_ural.c
+++ b/sys/dev/usb/wlan/if_ural.c
@@ -473,6 +473,8 @@ ural_attach(device_t self)
| IEEE80211_C_WPA /* 802.11i */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ural_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1073,6 +1075,8 @@ ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
@@ -1229,6 +1233,8 @@ ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
rate = ieee80211_node_get_txrate_dot11rate(ni);
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/usb/wlan/if_urtw.c b/sys/dev/usb/wlan/if_urtw.c
index 439faeefc408..86cf4c653ae7 100644
--- a/sys/dev/usb/wlan/if_urtw.c
+++ b/sys/dev/usb/wlan/if_urtw.c
@@ -884,6 +884,8 @@ urtw_attach(device_t dev)
/* XXX TODO: setup regdomain if URTW_EPROM_CHANPLAN_BY_HW bit is set.*/
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
urtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1699,6 +1701,10 @@ urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0,
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+
+ /* Assign sequence number */
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/*
* Software crypto.
*/
diff --git a/sys/dev/usb/wlan/if_zyd.c b/sys/dev/usb/wlan/if_zyd.c
index 1a698caef3c5..7affdcdce089 100644
--- a/sys/dev/usb/wlan/if_zyd.c
+++ b/sys/dev/usb/wlan/if_zyd.c
@@ -384,6 +384,8 @@ zyd_attach(device_t dev)
| IEEE80211_C_WPA /* 802.11i */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
zyd_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -2463,6 +2465,8 @@ zyd_tx_start(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
}
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/virtio/mmio/virtio_mmio.c b/sys/dev/virtio/mmio/virtio_mmio.c
index 5a81c8a24779..fe531fced998 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.c
+++ b/sys/dev/virtio/mmio/virtio_mmio.c
@@ -53,7 +53,6 @@
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/mmio/virtio_mmio.h>
-#include "virtio_mmio_if.h"
#include "virtio_bus_if.h"
#include "virtio_if.h"
@@ -79,7 +78,6 @@ static int vtmmio_alloc_virtqueues(device_t, int,
struct vq_alloc_info *);
static int vtmmio_setup_intr(device_t, enum intr_type);
static void vtmmio_stop(device_t);
-static void vtmmio_poll(device_t);
static int vtmmio_reinit(device_t, uint64_t);
static void vtmmio_reinit_complete(device_t);
static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t);
@@ -104,29 +102,11 @@ static void vtmmio_vq_intr(void *);
* I/O port read/write wrappers.
*/
#define vtmmio_write_config_1(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_1((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_1((sc)->res[0], (o), (v))
#define vtmmio_write_config_2(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_2((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_2((sc)->res[0], (o), (v))
#define vtmmio_write_config_4(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_4((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_4((sc)->res[0], (o), (v))
#define vtmmio_read_config_1(sc, o) \
bus_read_1((sc)->res[0], (o))
@@ -157,7 +137,6 @@ static device_method_t vtmmio_methods[] = {
DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
DEVMETHOD(virtio_bus_stop, vtmmio_stop),
- DEVMETHOD(virtio_bus_poll, vtmmio_poll),
DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
@@ -220,19 +199,9 @@ vtmmio_setup_intr(device_t dev, enum intr_type type)
{
struct vtmmio_softc *sc;
int rid;
- int err;
sc = device_get_softc(dev);
- if (sc->platform != NULL) {
- err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
- vtmmio_vq_intr, sc);
- if (err == 0) {
- /* Okay we have backend-specific interrupts */
- return (0);
- }
- }
-
rid = 0;
sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
@@ -597,17 +566,6 @@ vtmmio_stop(device_t dev)
vtmmio_reset(device_get_softc(dev));
}
-static void
-vtmmio_poll(device_t dev)
-{
- struct vtmmio_softc *sc;
-
- sc = device_get_softc(dev);
-
- if (sc->platform != NULL)
- VIRTIO_MMIO_POLL(sc->platform);
-}
-
static int
vtmmio_reinit(device_t dev, uint64_t features)
{
diff --git a/sys/dev/virtio/mmio/virtio_mmio.h b/sys/dev/virtio/mmio/virtio_mmio.h
index ac6a96c1c7fe..edcbf0519acc 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.h
+++ b/sys/dev/virtio/mmio/virtio_mmio.h
@@ -37,7 +37,6 @@ struct vtmmio_virtqueue;
struct vtmmio_softc {
device_t dev;
- device_t platform;
struct resource *res[2];
uint64_t vtmmio_features;
diff --git a/sys/dev/virtio/mmio/virtio_mmio_fdt.c b/sys/dev/virtio/mmio/virtio_mmio_fdt.c
index 7fba8aad8db8..bb9ea8efbaeb 100644
--- a/sys/dev/virtio/mmio/virtio_mmio_fdt.c
+++ b/sys/dev/virtio/mmio/virtio_mmio_fdt.c
@@ -63,12 +63,10 @@
#include <dev/virtio/mmio/virtio_mmio.h>
static int vtmmio_fdt_probe(device_t);
-static int vtmmio_fdt_attach(device_t);
static device_method_t vtmmio_fdt_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtmmio_fdt_probe),
- DEVMETHOD(device_attach, vtmmio_fdt_attach),
DEVMETHOD_END
};
@@ -93,48 +91,3 @@ vtmmio_fdt_probe(device_t dev)
return (vtmmio_probe(dev));
}
-
-static int
-vtmmio_setup_platform(device_t dev, struct vtmmio_softc *sc)
-{
- phandle_t platform_node;
- struct fdt_ic *ic;
- phandle_t xref;
- phandle_t node;
-
- sc->platform = NULL;
-
- if ((node = ofw_bus_get_node(dev)) == -1)
- return (ENXIO);
-
- if (OF_searchencprop(node, "platform", &xref,
- sizeof(xref)) == -1) {
- return (ENXIO);
- }
-
- platform_node = OF_node_from_xref(xref);
-
- SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
- if (ic->iph == platform_node) {
- sc->platform = ic->dev;
- break;
- }
- }
-
- if (sc->platform == NULL) {
- /* No platform-specific device. Ignore it. */
- }
-
- return (0);
-}
-
-static int
-vtmmio_fdt_attach(device_t dev)
-{
- struct vtmmio_softc *sc;
-
- sc = device_get_softc(dev);
- vtmmio_setup_platform(dev, sc);
-
- return (vtmmio_attach(dev));
-}
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 2ff9be9680b8..471c6b3714b2 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -28,6 +28,9 @@
/* Driver for VirtIO network devices. */
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
@@ -82,9 +85,6 @@
#include <dev/virtio/network/if_vtnetvar.h>
#include "virtio_if.h"
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
#if defined(INET) || defined(INET6)
#include <machine/in_cksum.h>
#endif
@@ -133,12 +133,14 @@ static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_new_buf(struct vtnet_rxq *);
+#if defined(INET) || defined(INET6)
static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
- uint16_t, int, struct virtio_net_hdr *);
-static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
- uint16_t, int, struct virtio_net_hdr *);
+ bool, int, struct virtio_net_hdr *);
+static void vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
+ int);
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
+#endif
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
@@ -279,7 +281,7 @@ static int vtnet_tso_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
&vtnet_tso_disable, 0, "Disables TSO");
-static int vtnet_lro_disable = 0;
+static int vtnet_lro_disable = 1;
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
&vtnet_lro_disable, 0, "Disables hardware LRO");
@@ -1151,11 +1153,9 @@ vtnet_setup_interface(struct vtnet_softc *sc)
}
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
- if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
-#ifdef notyet
/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
+ if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM_IPV6, 0);
-#endif
if (vtnet_tunable_int(sc, "fixup_needs_csum",
vtnet_fixup_needs_csum) != 0)
@@ -1178,6 +1178,7 @@ vtnet_setup_interface(struct vtnet_softc *sc)
if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
+ if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
/*
* Capabilities after here are not enabled by default.
@@ -1344,14 +1345,22 @@ vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
VTNET_CORE_LOCK_ASSERT(sc);
- if (mask & IFCAP_TXCSUM)
+ if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
- if (mask & IFCAP_TXCSUM_IPV6)
+ if_togglehwassist(ifp, VTNET_CSUM_OFFLOAD);
+ }
+ if (mask & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
- if (mask & IFCAP_TSO4)
+ if_togglehwassist(ifp, VTNET_CSUM_OFFLOAD_IPV6);
+ }
+ if (mask & IFCAP_TSO4) {
if_togglecapenable(ifp, IFCAP_TSO4);
- if (mask & IFCAP_TSO6)
+ if_togglehwassist(ifp, IFCAP_TSO4);
+ }
+ if (mask & IFCAP_TSO6) {
if_togglecapenable(ifp, IFCAP_TSO6);
+ if_togglehwassist(ifp, IFCAP_TSO6);
+ }
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
/*
@@ -1367,27 +1376,20 @@ vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
IFCAP_LRO && vtnet_software_lro(sc))
reinit = update = 0;
-
- if (mask & IFCAP_RXCSUM)
+ /*
+ * VirtIO does not distinguish between receive checksum offload
+ * for IPv4 and IPv6 packets, so treat them as a pair.
+ */
+ if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
- if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
+ }
if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
-
- /*
- * VirtIO does not distinguish between IPv4 and IPv6 checksums
- * so treat them as a pair. Guest TSO (LRO) requires receive
- * checksums.
- */
- if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
- if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
-#ifdef notyet
- if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0);
-#endif
- } else
- if_setcapenablebit(ifp, 0,
- (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO));
+ /* Both SW and HW TCP LRO require receive checksum offload. */
+ if ((if_getcapenable(ifp) &
+ (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
+ if_setcapenablebit(ifp, 0, IFCAP_LRO);
}
if (mask & IFCAP_VLAN_HWFILTER) {
@@ -1760,164 +1762,165 @@ vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
return (error);
}
+#if defined(INET) || defined(INET6)
static int
-vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
- int hoff, struct virtio_net_hdr *hdr)
+vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, bool isipv6,
+ int protocol, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
- int error;
- sc = rxq->vtnrx_sc;
+ /*
+ * The packet is likely from another VM on the same host or from the
+ * host that itself performed checksum offloading so Tx/Rx is basically
+ * a memcpy and the checksum has little value so far.
+ */
+
+ KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP,
+ ("%s: unsupported IP protocol %d", __func__, protocol));
/*
- * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
- * not have an analogous CSUM flag. The checksum has been validated,
- * but is incomplete (TCP/UDP pseudo header).
- *
- * The packet is likely from another VM on the same host that itself
- * performed checksum offloading so Tx/Rx is basically a memcpy and
- * the checksum has little value.
- *
- * Default to receiving the packet as-is for performance reasons, but
- * this can cause issues if the packet is to be forwarded because it
- * does not contain a valid checksum. This patch may be helpful:
- * https://reviews.freebsd.org/D6611. In the meantime, have the driver
- * compute the checksum if requested.
- *
- * BMV: Need to add an CSUM_PARTIAL flag?
+ * If the user don't want us to fix it up here by computing the
+ * checksum, just forward the order to compute the checksum by setting
+ * the corresponding mbuf flag (e.g., CSUM_TCP).
*/
+ sc = rxq->vtnrx_sc;
if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
- error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
- return (error);
+ switch (protocol) {
+ case IPPROTO_TCP:
+ m->m_pkthdr.csum_flags |=
+ (isipv6 ? CSUM_TCP_IPV6 : CSUM_TCP);
+ break;
+ case IPPROTO_UDP:
+ m->m_pkthdr.csum_flags |=
+ (isipv6 ? CSUM_UDP_IPV6 : CSUM_UDP);
+ break;
+ }
+ m->m_pkthdr.csum_data = hdr->csum_offset;
+ return (0);
}
/*
* Compute the checksum in the driver so the packet will contain a
* valid checksum. The checksum is at csum_offset from csum_start.
*/
- switch (etype) {
-#if defined(INET) || defined(INET6)
- case ETHERTYPE_IP:
- case ETHERTYPE_IPV6: {
- int csum_off, csum_end;
- uint16_t csum;
+ int csum_off, csum_end;
+ uint16_t csum;
- csum_off = hdr->csum_start + hdr->csum_offset;
- csum_end = csum_off + sizeof(uint16_t);
-
- /* Assume checksum will be in the first mbuf. */
- if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
- return (1);
+ csum_off = hdr->csum_start + hdr->csum_offset;
+ csum_end = csum_off + sizeof(uint16_t);
- /*
- * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
- * checksum and write it at the specified offset. We could
- * try to verify the packet: csum_start should probably
- * correspond to the start of the TCP/UDP header.
- *
- * BMV: Need to properly handle UDP with zero checksum. Is
- * the IPv4 header checksum implicitly validated?
- */
- csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
- *(uint16_t *)(mtodo(m, csum_off)) = csum;
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xFFFF;
- break;
- }
-#endif
- default:
- sc->vtnet_stats.rx_csum_bad_ethtype++;
+ /* Assume checksum will be in the first mbuf. */
+ if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) {
+ sc->vtnet_stats.rx_csum_bad_offset++;
return (1);
}
+ /*
+ * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
+ * checksum and write it at the specified offset. We could
+ * try to verify the packet: csum_start should probably
+ * correspond to the start of the TCP/UDP header.
+ *
+ * BMV: Need to properly handle UDP with zero checksum. Is
+ * the IPv4 header checksum implicitly validated?
+ */
+ csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
+ *(uint16_t *)(mtodo(m, csum_off)) = csum;
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+
return (0);
}
+static void
+vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, int protocol)
+{
+ KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP,
+ ("%s: unsupported IP protocol %d", __func__, protocol));
+
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+}
+
static int
-vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
- uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused)
+vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
+ struct virtio_net_hdr *hdr)
{
-#if 0
+ const struct ether_header *eh;
struct vtnet_softc *sc;
-#endif
- int protocol;
+ int hoff, protocol;
+ uint16_t etype;
+ bool isipv6;
+
+ KASSERT(hdr->flags &
+ (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID),
+ ("%s: missing checksum offloading flag %x", __func__, hdr->flags));
+
+ eh = mtod(m, const struct ether_header *);
+ etype = ntohs(eh->ether_type);
+ if (etype == ETHERTYPE_VLAN) {
+ /* TODO BMV: Handle QinQ. */
+ const struct ether_vlan_header *evh =
+ mtod(m, const struct ether_vlan_header *);
+ etype = ntohs(evh->evl_proto);
+ hoff = sizeof(struct ether_vlan_header);
+ } else
+ hoff = sizeof(struct ether_header);
-#if 0
sc = rxq->vtnrx_sc;
-#endif
+ /* Check whether ethernet type is IP or IPv6, and get protocol. */
switch (etype) {
#if defined(INET)
case ETHERTYPE_IP:
- if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
- protocol = IPPROTO_DONE;
- else {
+ if (__predict_false(m->m_len < hoff + sizeof(struct ip))) {
+ sc->vtnet_stats.rx_csum_inaccessible_ipproto++;
+ return (1);
+ } else {
struct ip *ip = (struct ip *)(m->m_data + hoff);
protocol = ip->ip_p;
}
+ isipv6 = false;
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
- || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
- protocol = IPPROTO_DONE;
+ || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) {
+ sc->vtnet_stats.rx_csum_inaccessible_ipproto++;
+ return (1);
+ }
+ isipv6 = true;
break;
#endif
default:
- protocol = IPPROTO_DONE;
- break;
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ return (1);
}
+ /* Check whether protocol is TCP or UDP. */
switch (protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xFFFF;
break;
default:
/*
* FreeBSD does not support checksum offloading of this
- * protocol. Let the stack re-verify the checksum later
- * if the protocol is supported.
+ * protocol here.
*/
-#if 0
- if_printf(sc->vtnet_ifp,
- "%s: checksum offload of unsupported protocol "
- "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
- __func__, etype, protocol, hdr->csum_start,
- hdr->csum_offset);
-#endif
- break;
+ sc->vtnet_stats.rx_csum_bad_ipproto++;
+ return (1);
}
- return (0);
-}
-
-static int
-vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
- struct virtio_net_hdr *hdr)
-{
- const struct ether_header *eh;
- int hoff;
- uint16_t etype;
-
- eh = mtod(m, const struct ether_header *);
- etype = ntohs(eh->ether_type);
- if (etype == ETHERTYPE_VLAN) {
- /* TODO BMV: Handle QinQ. */
- const struct ether_vlan_header *evh =
- mtod(m, const struct ether_vlan_header *);
- etype = ntohs(evh->evl_proto);
- hoff = sizeof(struct ether_vlan_header);
- } else
- hoff = sizeof(struct ether_header);
-
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
- return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
+ return (vtnet_rxq_csum_needs_csum(rxq, m, isipv6, protocol,
+ hdr));
else /* VIRTIO_NET_HDR_F_DATA_VALID */
- return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
+ vtnet_rxq_csum_data_valid(rxq, m, protocol);
+
+ return (0);
}
+#endif
static void
vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
@@ -2040,10 +2043,15 @@ vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
if (hdr->flags &
(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
+#if defined(INET) || defined(INET6)
if (vtnet_rxq_csum(rxq, m, hdr) == 0)
rxq->vtnrx_stats.vrxs_csum++;
else
rxq->vtnrx_stats.vrxs_csum_failed++;
+#else
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ rxq->vtnrx_stats.vrxs_csum_failed++;
+#endif
}
if (hdr->gso_size != 0) {
@@ -2497,6 +2505,10 @@ vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
hdr->csum_start = vtnet_gtoh16(sc, csum_start);
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
txq->vtntx_stats.vtxs_csum++;
+ } else if ((flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) &&
+ (proto == IPPROTO_TCP || proto == IPPROTO_UDP) &&
+ (m->m_pkthdr.csum_data == 0xFFFF)) {
+ hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID;
}
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
@@ -2551,8 +2563,10 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
error = sglist_append_mbuf(sg, m);
if (error) {
m = m_defrag(m, M_NOWAIT);
- if (m == NULL)
+ if (m == NULL) {
+ sc->vtnet_stats.tx_defrag_failed++;
goto fail;
+ }
*m_head = m;
sc->vtnet_stats.tx_defragged++;
@@ -2568,7 +2582,6 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
return (error);
fail:
- sc->vtnet_stats.tx_defrag_failed++;
m_freem(*m_head);
*m_head = NULL;
@@ -2609,7 +2622,8 @@ vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
m->m_flags &= ~M_VLANTAG;
}
- if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
+ if (m->m_pkthdr.csum_flags &
+ (VTNET_CSUM_ALL_OFFLOAD | CSUM_DATA_VALID)) {
m = vtnet_txq_offload(txq, m, hdr);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
@@ -3031,16 +3045,14 @@ vtnet_get_counter(if_t ifp, ift_counter cnt)
return (rxaccum.vrxs_iqdrops);
case IFCOUNTER_IERRORS:
return (rxaccum.vrxs_ierrors);
+ case IFCOUNTER_IBYTES:
+ return (rxaccum.vrxs_ibytes);
case IFCOUNTER_OPACKETS:
return (txaccum.vtxs_opackets);
case IFCOUNTER_OBYTES:
- if (!VTNET_ALTQ_ENABLED)
- return (txaccum.vtxs_obytes);
- /* FALLTHROUGH */
+ return (txaccum.vtxs_obytes);
case IFCOUNTER_OMCASTS:
- if (!VTNET_ALTQ_ENABLED)
- return (txaccum.vtxs_omcasts);
- /* FALLTHROUGH */
+ return (txaccum.vtxs_omcasts);
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -3813,9 +3825,9 @@ vtnet_rx_filter_mac(struct vtnet_softc *sc)
if_printf(ifp, "error setting host MAC filter table\n");
out:
- if (promisc != 0 && vtnet_set_promisc(sc, true) != 0)
+ if (promisc && vtnet_set_promisc(sc, true) != 0)
if_printf(ifp, "cannot enable promiscuous mode\n");
- if (allmulti != 0 && vtnet_set_allmulti(sc, true) != 0)
+ if (allmulti && vtnet_set_allmulti(sc, true) != 0)
if_printf(ifp, "cannot enable all-multicast mode\n");
}
@@ -4100,21 +4112,29 @@ vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
stats = &rxq->vtnrx_stats;
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ipackets, "Receive packets");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ibytes, "Receive bytes");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_iqdrops, "Receive drops");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ierrors, "Receive errors");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_csum, "Receive checksum offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_csum_failed, "Receive checksum offload failed");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_host_lro, "Receive host segmentation offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_rescheduled,
"Receive interrupt handler rescheduled");
}
@@ -4135,17 +4155,23 @@ vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
stats = &txq->vtntx_stats;
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_opackets, "Transmit packets");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_obytes, "Transmit bytes");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_omcasts, "Transmit multicasts");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_csum, "Transmit checksum offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_tso, "Transmit TCP segmentation offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_rescheduled,
"Transmit interrupt handler rescheduled");
}
@@ -4170,6 +4196,102 @@ vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
}
}
+static int
+vtnet_sysctl_rx_csum_failed(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_csum_failed = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_csum_failed += rxst->vrxs_csum_failed;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_csum_failed, req));
+}
+
+static int
+vtnet_sysctl_rx_csum_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_csum_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_csum_offloaded += rxst->vrxs_csum;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_csum_offloaded, req));
+}
+
+static int
+vtnet_sysctl_rx_task_rescheduled(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_task_rescheduled = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_task_rescheduled += rxst->vrxs_rescheduled;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_task_rescheduled, req));
+}
+
+static int
+vtnet_sysctl_tx_csum_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_csum_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_csum_offloaded += txst->vtxs_csum;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_csum_offloaded, req));
+}
+
+static int
+vtnet_sysctl_tx_tso_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_tso_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_tso_offloaded += txst->vtxs_tso;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_tso_offloaded, req));
+}
+
+static int
+vtnet_sysctl_tx_task_rescheduled(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_task_rescheduled = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_task_rescheduled += txst->vtxs_rescheduled;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_task_rescheduled, req));
+}
+
static void
vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_softc *sc)
@@ -4189,69 +4311,75 @@ vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
- CTLFLAG_RD, &stats->mbuf_alloc_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->mbuf_alloc_failed,
"Mbuf cluster allocation failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
- CTLFLAG_RD, &stats->rx_frame_too_large,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_frame_too_large,
"Received frame larger than the mbuf chain");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
- CTLFLAG_RD, &stats->rx_enq_replacement_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_enq_replacement_failed,
"Enqueuing the replacement receive mbuf failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
- CTLFLAG_RD, &stats->rx_mergeable_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_mergeable_failed,
"Mergeable buffers receive failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
- CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ethtype,
"Received checksum offloaded buffer with unsupported "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
- CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ipproto,
"Received checksum offloaded buffer with incorrect IP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
- CTLFLAG_RD, &stats->rx_csum_bad_offset,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_offset,
"Received checksum offloaded buffer with incorrect offset");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
- CTLFLAG_RD, &stats->rx_csum_bad_proto,
- "Received checksum offloaded buffer with incorrect protocol");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
- CTLFLAG_RD, &stats->rx_csum_failed,
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_inaccessible_ipproto",
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_inaccessible_ipproto,
+ "Received checksum offloaded buffer with inaccessible IP protocol");
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_failed",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_csum_failed, "QU",
"Received buffer checksum offload failed");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
- CTLFLAG_RD, &stats->rx_csum_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_csum_offloaded, "QU",
"Received buffer checksum offload succeeded");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
- CTLFLAG_RD, &stats->rx_task_rescheduled,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_task_rescheduled",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_task_rescheduled, "QU",
"Times the receive interrupt task rescheduled itself");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
- CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_unknown_ethtype,
"Aborted transmit of checksum offloaded buffer with unknown "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
- CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_proto_mismatch,
"Aborted transmit of checksum offloaded buffer because mismatched "
"protocols");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
- CTLFLAG_RD, &stats->tx_tso_not_tcp,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_not_tcp,
"Aborted transmit of TSO buffer with non TCP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
- CTLFLAG_RD, &stats->tx_tso_without_csum,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_without_csum,
"Aborted transmit of TSO buffer without TCP checksum offload");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
- CTLFLAG_RD, &stats->tx_defragged,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defragged,
"Transmit mbufs defragged");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
- CTLFLAG_RD, &stats->tx_defrag_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defrag_failed,
"Aborted transmit of buffer because defrag failed");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
- CTLFLAG_RD, &stats->tx_csum_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_csum_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_csum_offloaded, "QU",
"Offloaded checksum of transmitted buffer");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
- CTLFLAG_RD, &stats->tx_tso_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_tso_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_tso_offloaded, "QU",
"Segmentation offload of transmitted buffer");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
- CTLFLAG_RD, &stats->tx_task_rescheduled,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_task_rescheduled",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_task_rescheduled, "QU",
"Times the transmit interrupt task rescheduled itself");
}
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
index 0144b0f3232d..cab7ced639a7 100644
--- a/sys/dev/virtio/network/if_vtnetvar.h
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -46,7 +46,7 @@ struct vtnet_statistics {
uint64_t rx_csum_bad_ethtype;
uint64_t rx_csum_bad_ipproto;
uint64_t rx_csum_bad_offset;
- uint64_t rx_csum_bad_proto;
+ uint64_t rx_csum_inaccessible_ipproto;
uint64_t tx_csum_unknown_ethtype;
uint64_t tx_csum_proto_mismatch;
uint64_t tx_tso_not_tcp;
diff --git a/sys/dev/virtio/random/virtio_random.c b/sys/dev/virtio/random/virtio_random.c
index f938ba99ae53..3f30c8b68f4c 100644
--- a/sys/dev/virtio/random/virtio_random.c
+++ b/sys/dev/virtio/random/virtio_random.c
@@ -77,7 +77,7 @@ static struct virtio_feature_desc vtrnd_feature_desc[] = {
{ 0, NULL }
};
-static struct random_source random_vtrnd = {
+static const struct random_source random_vtrnd = {
.rs_ident = "VirtIO Entropy Adapter",
.rs_source = RANDOM_PURE_VIRTIO,
.rs_read = vtrnd_read,
diff --git a/sys/dev/virtio/virtio_bus_if.m b/sys/dev/virtio/virtio_bus_if.m
index 57ae90bdc917..4181b641faad 100644
--- a/sys/dev/virtio/virtio_bus_if.m
+++ b/sys/dev/virtio/virtio_bus_if.m
@@ -109,7 +109,3 @@ METHOD void write_device_config {
int len;
};
-METHOD void poll {
- device_t dev;
-};
-
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index 8cc3326dc08e..cc7a233d60ee 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -605,10 +605,8 @@ virtqueue_poll(struct virtqueue *vq, uint32_t *len)
{
void *cookie;
- VIRTIO_BUS_POLL(vq->vq_dev);
while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
cpu_spinwait();
- VIRTIO_BUS_POLL(vq->vq_dev);
}
return (cookie);
diff --git a/sys/dev/vmgenc/vmgenc_acpi.c b/sys/dev/vmgenc/vmgenc_acpi.c
index 2ad8929dfd34..18519a8e4f22 100644
--- a/sys/dev/vmgenc/vmgenc_acpi.c
+++ b/sys/dev/vmgenc/vmgenc_acpi.c
@@ -56,6 +56,7 @@
#include <contrib/dev/acpica/include/acpi.h>
#include <dev/acpica/acpivar.h>
+#include <dev/random/randomdev.h>
#include <dev/random/random_harvestq.h>
#include <dev/vmgenc/vmgenc_acpi.h>
@@ -210,6 +211,11 @@ acpi_GetPackedUINT64(device_t dev, ACPI_HANDLE handle, char *path,
}
+static const struct random_source random_vmgenid = {
+ .rs_ident = "VM Generation ID",
+ .rs_source = RANDOM_PURE_VMGENID,
+};
+
static int
vmgenc_attach(device_t dev)
{
@@ -234,7 +240,7 @@ vmgenc_attach(device_t dev)
memcpy(sc->vmg_cache_guid, __DEVOLATILE(void *, sc->vmg_pguid),
sizeof(sc->vmg_cache_guid));
- random_harvest_register_source(RANDOM_PURE_VMGENID);
+ random_source_register(&random_vmgenid);
vmgenc_harvest_all(sc->vmg_cache_guid, sizeof(sc->vmg_cache_guid));
AcpiInstallNotifyHandler(h, ACPI_DEVICE_NOTIFY, vmgenc_notify, dev);
diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c
index 9f2b009d02ec..460a508a60dc 100644
--- a/sys/dev/vmm/vmm_dev.c
+++ b/sys/dev/vmm/vmm_dev.c
@@ -901,6 +901,7 @@ vmmdev_lookup_and_destroy(const char *name, struct ucred *cred)
sc->cdev = NULL;
sx_xunlock(&vmmdev_mtx);
+ vm_suspend(sc->vm, VM_SUSPEND_DESTROY);
destroy_dev(cdev);
vmmdev_destroy(sc);
diff --git a/sys/dev/vmm/vmm_mem.c b/sys/dev/vmm/vmm_mem.c
index be59e37de33d..9df31c9ba133 100644
--- a/sys/dev/vmm/vmm_mem.c
+++ b/sys/dev/vmm/vmm_mem.c
@@ -26,10 +26,14 @@
static void vm_free_memmap(struct vm *vm, int ident);
-void
-vm_mem_init(struct vm_mem *mem)
+int
+vm_mem_init(struct vm_mem *mem, vm_offset_t lo, vm_offset_t hi)
{
+ mem->mem_vmspace = vmmops_vmspace_alloc(lo, hi);
+ if (mem->mem_vmspace == NULL)
+ return (ENOMEM);
sx_init(&mem->mem_segs_lock, "vm_mem_segs");
+ return (0);
}
static bool
@@ -93,10 +97,21 @@ vm_mem_destroy(struct vm *vm)
for (int i = 0; i < VM_MAX_MEMSEGS; i++)
vm_free_memseg(vm, i);
+ vmmops_vmspace_free(mem->mem_vmspace);
+
sx_xunlock(&mem->mem_segs_lock);
sx_destroy(&mem->mem_segs_lock);
}
+struct vmspace *
+vm_vmspace(struct vm *vm)
+{
+ struct vm_mem *mem;
+
+ mem = vm_mem(vm);
+ return (mem->mem_vmspace);
+}
+
void
vm_slock_memsegs(struct vm *vm)
{
@@ -246,7 +261,7 @@ vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
struct vm_mem *mem;
struct vm_mem_seg *seg;
struct vm_mem_map *m, *map;
- struct vmspace *vmspace;
+ struct vm_map *vmmap;
vm_ooffset_t last;
int i, error;
@@ -282,19 +297,19 @@ vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
if (map == NULL)
return (ENOSPC);
- vmspace = vm_vmspace(vm);
- error = vm_map_find(&vmspace->vm_map, seg->object, first, &gpa,
- len, 0, VMFS_NO_SPACE, prot, prot, 0);
+ vmmap = &mem->mem_vmspace->vm_map;
+ error = vm_map_find(vmmap, seg->object, first, &gpa, len, 0,
+ VMFS_NO_SPACE, prot, prot, 0);
if (error != KERN_SUCCESS)
return (EFAULT);
vm_object_reference(seg->object);
if (flags & VM_MEMMAP_F_WIRED) {
- error = vm_map_wire(&vmspace->vm_map, gpa, gpa + len,
+ error = vm_map_wire(vmmap, gpa, gpa + len,
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
if (error != KERN_SUCCESS) {
- vm_map_remove(&vmspace->vm_map, gpa, gpa + len);
+ vm_map_remove(vmmap, gpa, gpa + len);
return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
EFAULT);
}
diff --git a/sys/dev/vmm/vmm_mem.h b/sys/dev/vmm/vmm_mem.h
index 856470cf2590..f3d22058c7b8 100644
--- a/sys/dev/vmm/vmm_mem.h
+++ b/sys/dev/vmm/vmm_mem.h
@@ -36,6 +36,7 @@ enum {
struct vm;
struct vm_object;
+struct vmspace;
struct vm_mem_seg {
size_t len;
@@ -56,12 +57,15 @@ struct vm_mem {
struct vm_mem_map mem_maps[VM_MAX_MEMMAPS];
struct vm_mem_seg mem_segs[VM_MAX_MEMSEGS];
struct sx mem_segs_lock;
+ struct vmspace *mem_vmspace;
};
-void vm_mem_init(struct vm_mem *mem);
+int vm_mem_init(struct vm_mem *mem, vm_offset_t lo, vm_offset_t hi);
void vm_mem_cleanup(struct vm *vm);
void vm_mem_destroy(struct vm *vm);
+struct vmspace *vm_vmspace(struct vm *vm);
+
/*
* APIs that modify the guest memory map require all vcpus to be frozen.
*/
diff --git a/sys/dev/vmware/vmxnet3/if_vmx.c b/sys/dev/vmware/vmxnet3/if_vmx.c
index 62b5f313a137..1a314ca6660e 100644
--- a/sys/dev/vmware/vmxnet3/if_vmx.c
+++ b/sys/dev/vmware/vmxnet3/if_vmx.c
@@ -2056,7 +2056,12 @@ vmxnet3_update_admin_status(if_ctx_t ctx)
struct vmxnet3_softc *sc;
sc = iflib_get_softc(ctx);
- if (sc->vmx_ds->event != 0)
+ /*
+ * iflib may invoke this routine before vmxnet3_attach_post() has
+ * run, which is before the top level shared data area is
+ * initialized and the device made aware of it.
+ */
+ if (sc->vmx_ds != NULL && sc->vmx_ds->event != 0)
vmxnet3_evintr(sc);
vmxnet3_refresh_host_stats(sc);
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index b51ef6766de4..bcf67ddc9689 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -195,8 +195,8 @@ static void vt_update_static(void *);
#ifndef SC_NO_CUTPASTE
static void vt_mouse_paste(void);
#endif
-static void vt_suspend_handler(void *priv);
-static void vt_resume_handler(void *priv);
+static void vt_suspend_handler(void *priv, enum power_stype stype);
+static void vt_resume_handler(void *priv, enum power_stype stype);
SET_DECLARE(vt_drv_set, struct vt_driver);
@@ -3330,7 +3330,7 @@ vt_replace_backend(const struct vt_driver *drv, void *softc)
}
static void
-vt_suspend_handler(void *priv)
+vt_suspend_handler(void *priv, enum power_stype stype)
{
struct vt_device *vd;
@@ -3341,7 +3341,7 @@ vt_suspend_handler(void *priv)
}
static void
-vt_resume_handler(void *priv)
+vt_resume_handler(void *priv, enum power_stype stype)
{
struct vt_device *vd;
diff --git a/sys/dev/watchdog/watchdog.c b/sys/dev/watchdog/watchdog.c
index e6b6dc1eac70..c599db56bf95 100644
--- a/sys/dev/watchdog/watchdog.c
+++ b/sys/dev/watchdog/watchdog.c
@@ -50,11 +50,20 @@
#include <sys/syscallsubr.h> /* kern_clock_gettime() */
-static int wd_set_pretimeout(int newtimeout, int disableiftoolong);
+#ifdef COMPAT_FREEBSD14
+#define WDIOCPATPAT_14 _IOW('W', 42, u_int) /* pat the watchdog */
+#define WDIOC_SETTIMEOUT_14 _IOW('W', 43, int) /* set/reset the timer */
+#define WDIOC_GETTIMEOUT_14 _IOR('W', 44, int) /* get total timeout */
+#define WDIOC_GETTIMELEFT_14 _IOR('W', 45, int) /* get time left */
+#define WDIOC_GETPRETIMEOUT_14 _IOR('W', 46, int) /* get the pre-timeout */
+#define WDIOC_SETPRETIMEOUT_14 _IOW('W', 47, int) /* set the pre-timeout */
+#endif
+
+static int wd_set_pretimeout(sbintime_t newtimeout, int disableiftoolong);
static void wd_timeout_cb(void *arg);
static struct callout wd_pretimeo_handle;
-static int wd_pretimeout;
+static sbintime_t wd_pretimeout;
static int wd_pretimeout_act = WD_SOFT_LOG;
static struct callout wd_softtimeo_handle;
@@ -63,6 +72,8 @@ static int wd_softtimer; /* true = use softtimer instead of hardware
static int wd_softtimeout_act = WD_SOFT_LOG; /* action for the software timeout */
static struct cdev *wd_dev;
+static volatile sbintime_t wd_last_sbt; /* last timeout value (sbt) */
+static sbintime_t wd_last_sbt_sysctl; /* last timeout value (sbt) */
static volatile u_int wd_last_u; /* last timeout value set by kern_do_pat */
static u_int wd_last_u_sysctl; /* last timeout value set by kern_do_pat */
static u_int wd_last_u_sysctl_secs; /* wd_last_u in seconds */
@@ -73,6 +84,8 @@ SYSCTL_UINT(_hw_watchdog, OID_AUTO, wd_last_u, CTLFLAG_RD,
&wd_last_u_sysctl, 0, "Watchdog last update time");
SYSCTL_UINT(_hw_watchdog, OID_AUTO, wd_last_u_secs, CTLFLAG_RD,
&wd_last_u_sysctl_secs, 0, "Watchdog last update time");
+SYSCTL_SBINTIME_MSEC(_hw_watchdog, OID_AUTO, wd_last_msecs, CTLFLAG_RD,
+ &wd_last_sbt_sysctl, "Watchdog last update time (milliseconds)");
static int wd_lastpat_valid = 0;
static time_t wd_lastpat = 0; /* when the watchdog was last patted */
@@ -80,105 +93,94 @@ static time_t wd_lastpat = 0; /* when the watchdog was last patted */
/* Hook for external software watchdog to register for use if needed */
void (*wdog_software_attach)(void);
-static void
-pow2ns_to_ts(int pow2ns, struct timespec *ts)
+/* Legacy interface to watchdog. */
+int
+wdog_kern_pat(u_int utim)
{
- uint64_t ns;
+ sbintime_t sbt;
- ns = 1ULL << pow2ns;
- ts->tv_sec = ns / 1000000000ULL;
- ts->tv_nsec = ns % 1000000000ULL;
-}
+ if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
+ return (EINVAL);
-static int
-pow2ns_to_ticks(int pow2ns)
-{
- struct timeval tv;
- struct timespec ts;
+ if ((utim & WD_LASTVAL) != 0) {
+ return (wdog_control(WD_CTRL_RESET));
+ }
- pow2ns_to_ts(pow2ns, &ts);
- TIMESPEC_TO_TIMEVAL(&tv, &ts);
- return (tvtohz(&tv));
+ utim &= WD_INTERVAL;
+ if (utim == WD_TO_NEVER)
+ sbt = 0;
+ else
+ sbt = nstosbt(1 << utim);
+
+ return (wdog_kern_pat_sbt(sbt));
}
-static int
-seconds_to_pow2ns(int seconds)
+int
+wdog_control(int ctrl)
{
- uint64_t power;
- uint64_t ns;
- uint64_t shifted;
-
- ns = ((uint64_t)seconds) * 1000000000ULL;
- power = flsll(ns);
- shifted = 1ULL << power;
- if (shifted <= ns) {
- power++;
+ /* Disable takes precedence */
+ if (ctrl == WD_CTRL_DISABLE) {
+ wdog_kern_pat(0);
}
- return (power);
+
+ if ((ctrl & WD_CTRL_RESET) != 0) {
+ wdog_kern_pat_sbt(wd_last_sbt);
+ } else if ((ctrl & WD_CTRL_ENABLE) != 0) {
+ wdog_kern_pat_sbt(wd_last_sbt);
+ }
+
+ return (0);
}
int
-wdog_kern_pat(u_int utim)
+wdog_kern_pat_sbt(sbintime_t sbt)
{
- int error;
- static int first = 1;
-
- if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
- return (EINVAL);
-
- if ((utim & WD_LASTVAL) != 0) {
- /*
- * if WD_LASTVAL is set, fill in the bits for timeout
- * from the saved value in wd_last_u.
- */
- MPASS((wd_last_u & ~WD_INTERVAL) == 0);
- utim &= ~WD_LASTVAL;
- utim |= wd_last_u;
- } else {
- /*
- * Otherwise save the new interval.
- * This can be zero (to disable the watchdog)
- */
- wd_last_u = (utim & WD_INTERVAL);
+ sbintime_t error_sbt = 0;
+ int pow2ns = 0;
+ int error = 0;
+ static bool first = true;
+
+ /* legacy uses power-of-2-nanoseconds time. */
+ if (sbt != 0) {
+ pow2ns = flsl(sbttons(sbt));
+ }
+ if (wd_last_sbt != sbt) {
+ wd_last_u = pow2ns;
wd_last_u_sysctl = wd_last_u;
- wd_last_u_sysctl_secs = pow2ns_to_ticks(wd_last_u) / hz;
+ wd_last_u_sysctl_secs = sbt / SBT_1S;
+
+ wd_last_sbt = sbt;
}
- if ((utim & WD_INTERVAL) == WD_TO_NEVER) {
- utim = 0;
- /* Assume all is well; watchdog signals failure. */
- error = 0;
- } else {
- /* Assume no watchdog available; watchdog flags success */
+ if (sbt != 0)
error = EOPNOTSUPP;
- }
+
if (wd_softtimer) {
- if (utim == 0) {
+ if (sbt == 0) {
callout_stop(&wd_softtimeo_handle);
} else {
- (void) callout_reset(&wd_softtimeo_handle,
- pow2ns_to_ticks(utim), wd_timeout_cb, "soft");
+ (void) callout_reset_sbt(&wd_softtimeo_handle,
+ sbt, 0, wd_timeout_cb, "soft", 0);
}
error = 0;
} else {
- EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
+ EVENTHANDLER_INVOKE(watchdog_sbt_list, sbt, &error_sbt, &error);
+ EVENTHANDLER_INVOKE(watchdog_list, pow2ns, &error);
}
/*
- * If we no hardware watchdog responded, we have not tried to
+ * If no hardware watchdog responded, we have not tried to
* attach an external software watchdog, and one is available,
* attach it now and retry.
*/
- if (error == EOPNOTSUPP && first && *wdog_software_attach != NULL) {
+ if (error == EOPNOTSUPP && first && wdog_software_attach != NULL) {
(*wdog_software_attach)();
- EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
+ EVENTHANDLER_INVOKE(watchdog_sbt_list, sbt, &error_sbt, &error);
+ EVENTHANDLER_INVOKE(watchdog_list, pow2ns, &error);
}
- first = 0;
+ first = false;
+ /* TODO: Print a (rate limited?) warning if error_sbt is too far away */
wd_set_pretimeout(wd_pretimeout, true);
- /*
- * If we were able to arm/strobe the watchdog, then
- * update the last time it was strobed for WDIOC_GETTIMELEFT
- */
if (!error) {
struct timespec ts;
@@ -189,6 +191,7 @@ wdog_kern_pat(u_int utim)
wd_lastpat_valid = 1;
}
}
+
return (error);
}
@@ -201,6 +204,7 @@ wd_valid_act(int act)
return true;
}
+#ifdef COMPAT_FREEBSD14
static int
wd_ioctl_patpat(caddr_t data)
{
@@ -220,6 +224,7 @@ wd_ioctl_patpat(caddr_t data)
return (wdog_kern_pat(u));
}
+#endif
static int
wd_get_time_left(struct thread *td, time_t *remainp)
@@ -265,16 +270,14 @@ wd_timeout_cb(void *arg)
* current actual watchdog timeout.
*/
static int
-wd_set_pretimeout(int newtimeout, int disableiftoolong)
+wd_set_pretimeout(sbintime_t newtimeout, int disableiftoolong)
{
- u_int utime;
- struct timespec utime_ts;
- int timeout_ticks;
+ sbintime_t utime;
+ sbintime_t timeout_left;
- utime = wdog_kern_last_timeout();
- pow2ns_to_ts(utime, &utime_ts);
+ utime = wdog_kern_last_timeout_sbt();
/* do not permit a pre-timeout >= than the timeout. */
- if (newtimeout >= utime_ts.tv_sec) {
+ if (newtimeout >= utime) {
/*
* If 'disableiftoolong' then just fall through
* so as to disable the pre-watchdog
@@ -292,7 +295,7 @@ wd_set_pretimeout(int newtimeout, int disableiftoolong)
return 0;
}
- timeout_ticks = pow2ns_to_ticks(utime) - (hz*newtimeout);
+ timeout_left = utime - newtimeout;
#if 0
printf("wd_set_pretimeout: "
"newtimeout: %d, "
@@ -306,8 +309,8 @@ wd_set_pretimeout(int newtimeout, int disableiftoolong)
#endif
/* We determined the value is sane, so reset the callout */
- (void) callout_reset(&wd_pretimeo_handle,
- timeout_ticks, wd_timeout_cb, "pre");
+ (void) callout_reset_sbt(&wd_pretimeo_handle,
+ timeout_left, 0, wd_timeout_cb, "pre", 0);
wd_pretimeout = newtimeout;
return 0;
}
@@ -316,6 +319,7 @@ static int
wd_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
int flags __unused, struct thread *td)
{
+ sbintime_t sb;
u_int u;
time_t timeleft;
int error;
@@ -351,29 +355,55 @@ wd_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
error = EINVAL;
}
break;
- case WDIOC_GETPRETIMEOUT:
- *(int *)data = (int)wd_pretimeout;
+#ifdef COMPAT_FREEBSD14
+ case WDIOC_GETPRETIMEOUT_14:
+ *(int *)data = (int)(wd_pretimeout / SBT_1S);
break;
- case WDIOC_SETPRETIMEOUT:
- error = wd_set_pretimeout(*(int *)data, false);
+ case WDIOC_SETPRETIMEOUT_14:
+ error = wd_set_pretimeout(*(int *)data * SBT_1S, false);
break;
- case WDIOC_GETTIMELEFT:
+ case WDIOC_GETTIMELEFT_14:
error = wd_get_time_left(td, &timeleft);
if (error)
break;
*(int *)data = (int)timeleft;
break;
- case WDIOC_SETTIMEOUT:
+ case WDIOC_SETTIMEOUT_14:
u = *(u_int *)data;
- error = wdog_kern_pat(seconds_to_pow2ns(u));
+ error = wdog_kern_pat_sbt(mstosbt(u * 1000ULL));
break;
- case WDIOC_GETTIMEOUT:
+ case WDIOC_GETTIMEOUT_14:
u = wdog_kern_last_timeout();
*(u_int *)data = u;
break;
- case WDIOCPATPAT:
+ case WDIOCPATPAT_14:
error = wd_ioctl_patpat(data);
break;
+#endif
+
+ /* New API */
+ case WDIOC_CONTROL:
+ wdog_control(*(int *)data);
+ break;
+ case WDIOC_SETTIMEOUT:
+ sb = *(sbintime_t *)data;
+ error = wdog_kern_pat_sbt(sb);
+ break;
+ case WDIOC_GETTIMEOUT:
+ *(sbintime_t *)data = wdog_kern_last_timeout_sbt();
+ break;
+ case WDIOC_GETTIMELEFT:
+ error = wd_get_time_left(td, &timeleft);
+ if (error)
+ break;
+ *(sbintime_t *)data = (sbintime_t)timeleft * SBT_1S;
+ break;
+ case WDIOC_GETPRETIMEOUT:
+ *(sbintime_t *)data = wd_pretimeout;
+ break;
+ case WDIOC_SETPRETIMEOUT:
+ error = wd_set_pretimeout(*(sbintime_t *)data, false);
+ break;
default:
error = ENOIOCTL;
break;
@@ -392,6 +422,12 @@ wdog_kern_last_timeout(void)
return (wd_last_u);
}
+sbintime_t
+wdog_kern_last_timeout_sbt(void)
+{
+ return (wd_last_sbt);
+}
+
static struct cdevsw wd_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = wd_ioctl,
diff --git a/sys/dev/xdma/xdma.c b/sys/dev/xdma/xdma.c
index 62b781159d03..cdd9ad0b8f39 100644
--- a/sys/dev/xdma/xdma.c
+++ b/sys/dev/xdma/xdma.c
@@ -555,7 +555,7 @@ xdma_put(xdma_controller_t *xdma)
}
static void
-xdma_init(void)
+xdma_init(void *dummy __unused)
{
mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
diff --git a/sys/dev/xen/bus/xen_intr.c b/sys/dev/xen/bus/xen_intr.c
index cb30b6efa484..2b5fa8fb7cd1 100644
--- a/sys/dev/xen/bus/xen_intr.c
+++ b/sys/dev/xen/bus/xen_intr.c
@@ -460,7 +460,7 @@ xen_intr_handle_upcall(void *unused __unused)
return (FILTER_HANDLED);
}
-static int
+static void
xen_intr_init(void *dummy __unused)
{
shared_info_t *s = HYPERVISOR_shared_info;
@@ -468,7 +468,7 @@ xen_intr_init(void *dummy __unused)
int i;
if (!xen_domain())
- return (0);
+ return;
_Static_assert(is_valid_evtchn(0),
"is_valid_evtchn(0) fails (unused by Xen, but valid by interface");
@@ -502,8 +502,6 @@ xen_intr_init(void *dummy __unused)
if (bootverbose)
printf("Xen interrupt system initialized\n");
-
- return (0);
}
SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
diff --git a/sys/dev/xen/control/control.c b/sys/dev/xen/control/control.c
index 123df4992894..2c61b48c0451 100644
--- a/sys/dev/xen/control/control.c
+++ b/sys/dev/xen/control/control.c
@@ -91,6 +91,7 @@
#include <sys/smp.h>
#include <sys/eventhandler.h>
#include <sys/timetc.h>
+#include <sys/power.h>
#include <geom/geom.h>
@@ -175,12 +176,12 @@ xctrl_suspend(void)
cpuset_t cpu_suspend_map;
#endif
- EVENTHANDLER_INVOKE(power_suspend_early);
+ EVENTHANDLER_INVOKE(power_suspend_early, POWER_STYPE_SUSPEND_TO_MEM);
xs_lock();
stop_all_proc();
xs_unlock();
suspend_all_fs();
- EVENTHANDLER_INVOKE(power_suspend);
+ EVENTHANDLER_INVOKE(power_suspend, POWER_STYPE_SUSPEND_TO_MEM);
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started);
@@ -297,7 +298,7 @@ xctrl_suspend(void)
resume_all_fs();
resume_all_proc();
- EVENTHANDLER_INVOKE(power_resume);
+ EVENTHANDLER_INVOKE(power_resume, POWER_STYPE_SUSPEND_TO_MEM);
if (bootverbose)
printf("System resumed after suspension\n");
diff --git a/sys/fs/cd9660/cd9660_lookup.c b/sys/fs/cd9660/cd9660_lookup.c
index 75fcdc9152cd..4d0bf73ab235 100644
--- a/sys/fs/cd9660/cd9660_lookup.c
+++ b/sys/fs/cd9660/cd9660_lookup.c
@@ -386,7 +386,7 @@ found:
return (error);
*vpp = tdp;
} else if (dp->i_number == i_ino) {
- VREF(vdp); /* we want ourself, ie "." */
+ vref(vdp); /* we want ourself, ie "." */
/*
* When we lookup "." we still can be asked to lock it
* differently.
diff --git a/sys/fs/cd9660/cd9660_vfsops.c b/sys/fs/cd9660/cd9660_vfsops.c
index b4db4c4f7331..ce6d03b73290 100644
--- a/sys/fs/cd9660/cd9660_vfsops.c
+++ b/sys/fs/cd9660/cd9660_vfsops.c
@@ -617,13 +617,13 @@ cd9660_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
#endif
if ((error = VFS_VGET(mp, ifh.ifid_ino, LK_EXCLUSIVE, &nvp)) != 0) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
ip = VTOI(nvp);
if (ip->inode.iso_mode == 0) {
vput(nvp);
- *vpp = NULLVP;
+ *vpp = NULL;
return (ESTALE);
}
*vpp = nvp;
@@ -704,7 +704,7 @@ cd9660_vget_internal(struct mount *mp, ino_t ino, int flags,
/* Allocate a new vnode/iso_node. */
if ((error = getnewvnode("isofs", mp, &cd9660_vnodeops, &vp)) != 0) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
ip = malloc(sizeof(struct iso_node), M_ISOFSNODE,
@@ -717,7 +717,7 @@ cd9660_vget_internal(struct mount *mp, ino_t ino, int flags,
error = insmntque(vp, mp);
if (error != 0) {
free(ip, M_ISOFSNODE);
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
error = vfs_hash_insert(vp, ino, flags, td, vpp, cd9660_vfs_hash_cmp,
diff --git a/sys/fs/cd9660/cd9660_vnops.c b/sys/fs/cd9660/cd9660_vnops.c
index c4d0e6ba7b30..4a2b80a7ccdd 100644
--- a/sys/fs/cd9660/cd9660_vnops.c
+++ b/sys/fs/cd9660/cd9660_vnops.c
@@ -124,7 +124,7 @@ cd9660_access(struct vop_access_args *ap)
uid_t uid;
gid_t gid;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
/*
@@ -162,7 +162,7 @@ cd9660_open(struct vop_open_args *ap)
struct vnode *vp = ap->a_vp;
struct iso_node *ip = VTOI(vp);
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
vnode_create_vobject(vp, ip->i_size, ap->a_td);
@@ -191,7 +191,7 @@ cd9660_getattr(struct vop_getattr_args *ap)
vap->va_atime = ip->inode.iso_atime;
vap->va_mtime = ip->inode.iso_mtime;
vap->va_ctime = ip->inode.iso_ctime;
- vap->va_rdev = ip->inode.iso_rdev;
+ vap->va_rdev = VN_ISDEV(vp) ? ip->inode.iso_rdev : NODEV;
vap->va_size = (u_quad_t) ip->i_size;
if (ip->i_size == 0 && (vap->va_mode & S_IFMT) == S_IFLNK) {
@@ -242,7 +242,7 @@ cd9660_ioctl(struct vop_ioctl_args *ap)
VOP_UNLOCK(vp);
return (EBADF);
}
- if (vp->v_type == VCHR || vp->v_type == VBLK) {
+ if (VN_ISDEV(vp)) {
VOP_UNLOCK(vp);
return (EOPNOTSUPP);
}
@@ -280,7 +280,7 @@ cd9660_read(struct vop_read_args *ap)
int seqcount;
long size, n, on;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
seqcount = ap->a_ioflag >> IO_SEQSHIFT;
@@ -711,7 +711,7 @@ cd9660_strategy(struct vop_strategy_args *ap)
struct bufobj *bo;
ip = VTOI(vp);
- if (vp->v_type == VBLK || vp->v_type == VCHR)
+ if (VN_ISDEV(vp))
panic("cd9660_strategy: spec");
if (bp->b_blkno == bp->b_lblkno) {
bp->b_blkno = (ip->iso_start + bp->b_lblkno) <<
@@ -818,7 +818,7 @@ cd9660_getpages(struct vop_getpages_args *ap)
struct vnode *vp;
vp = ap->a_vp;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (use_buf_pager)
diff --git a/sys/fs/devfs/devfs_dir.c b/sys/fs/devfs/devfs_dir.c
index 3dc87538017d..aad87606e738 100644
--- a/sys/fs/devfs/devfs_dir.c
+++ b/sys/fs/devfs/devfs_dir.c
@@ -162,7 +162,7 @@ int
devfs_pathpath(const char *p1, const char *p2)
{
- for (;;p1++, p2++) {
+ for (;; p1++, p2++) {
if (*p1 != *p2) {
if (*p1 == '/' && *p2 == '\0')
return (1);
diff --git a/sys/fs/devfs/devfs_vnops.c b/sys/fs/devfs/devfs_vnops.c
index 3a64c205186f..caadf257b8ad 100644
--- a/sys/fs/devfs/devfs_vnops.c
+++ b/sys/fs/devfs/devfs_vnops.c
@@ -1061,7 +1061,7 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
mp = dvp->v_mount;
dmp = VFSTODEVFS(mp);
dd = dvp->v_data;
- *vpp = NULLVP;
+ *vpp = NULL;
if ((flags & ISLASTCN) && nameiop == RENAME)
return (EOPNOTSUPP);
@@ -1080,7 +1080,7 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
if ((flags & ISLASTCN) && nameiop != LOOKUP)
return (EINVAL);
*vpp = dvp;
- VREF(dvp);
+ vref(dvp);
return (0);
}
@@ -1170,7 +1170,7 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
if (error)
return (error);
if (*vpp == dvp) {
- VREF(dvp);
+ vref(dvp);
*vpp = dvp;
return (0);
}
diff --git a/sys/fs/ext2fs/ext2_lookup.c b/sys/fs/ext2fs/ext2_lookup.c
index c80e0f99ab56..bb830d07b126 100644
--- a/sys/fs/ext2fs/ext2_lookup.c
+++ b/sys/fs/ext2fs/ext2_lookup.c
@@ -579,7 +579,7 @@ found:
if (dd_ino != NULL)
return (0);
if (dp->i_number == ino) {
- VREF(vdp);
+ vref(vdp);
*vpp = vdp;
return (0);
}
@@ -675,7 +675,7 @@ found:
}
*vpp = tdp;
} else if (dp->i_number == ino) {
- VREF(vdp); /* we want ourself, ie "." */
+ vref(vdp); /* we want ourself, ie "." */
/*
* When we lookup "." we still can be asked to lock it
* differently.
diff --git a/sys/fs/ext2fs/ext2_vfsops.c b/sys/fs/ext2fs/ext2_vfsops.c
index 9e7a03fffd71..0f3808a7c747 100644
--- a/sys/fs/ext2fs/ext2_vfsops.c
+++ b/sys/fs/ext2fs/ext2_vfsops.c
@@ -1334,14 +1334,14 @@ ext2_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
error = VFS_VGET(mp, ufhp->ufid_ino, LK_EXCLUSIVE, &nvp);
if (error) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
ip = VTOI(nvp);
if (ip->i_mode == 0 ||
ip->i_gen != ufhp->ufid_gen || ip->i_nlink <= 0) {
vput(nvp);
- *vpp = NULLVP;
+ *vpp = NULL;
return (ESTALE);
}
*vpp = nvp;
diff --git a/sys/fs/ext2fs/ext2_vnops.c b/sys/fs/ext2fs/ext2_vnops.c
index 064c10bd18b2..35e7ca77c732 100644
--- a/sys/fs/ext2fs/ext2_vnops.c
+++ b/sys/fs/ext2fs/ext2_vnops.c
@@ -222,7 +222,7 @@ ext2_itimes_locked(struct vnode *vp)
ip = VTOI(vp);
if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) == 0)
return;
- if ((vp->v_type == VBLK || vp->v_type == VCHR))
+ if (VN_ISDEV(vp))
ip->i_flag |= IN_LAZYMOD;
else
ip->i_flag |= IN_MODIFIED;
@@ -276,7 +276,7 @@ static int
ext2_open(struct vop_open_args *ap)
{
- if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
/*
@@ -360,7 +360,7 @@ ext2_getattr(struct vop_getattr_args *ap)
vap->va_nlink = ip->i_nlink;
vap->va_uid = ip->i_uid;
vap->va_gid = ip->i_gid;
- vap->va_rdev = ip->i_rdev;
+ vap->va_rdev = VN_ISDEV(vp) ? ip->i_rdev : NODEV;
vap->va_size = ip->i_size;
vap->va_atime.tv_sec = ip->i_atime;
vap->va_atime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_atimensec : 0;
@@ -905,7 +905,7 @@ abortit:
error = ext2_checkpath(ip, dp, tcnp->cn_cred);
if (error)
goto out;
- VREF(tdvp);
+ vref(tdvp);
error = vfs_relookup(tdvp, &tvp, tcnp, true);
if (error)
goto out;
@@ -1031,7 +1031,7 @@ abortit:
*/
fcnp->cn_flags &= ~MODMASK;
fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
- VREF(fdvp);
+ vref(fdvp);
error = vfs_relookup(fdvp, &fvp, fcnp, true);
if (error == 0)
vrele(fdvp);
@@ -1571,7 +1571,7 @@ ext2_strategy(struct vop_strategy_args *ap)
daddr_t blkno;
int error;
- if (vp->v_type == VBLK || vp->v_type == VCHR)
+ if (VN_ISDEV(vp))
panic("ext2_strategy: spec");
if (bp->b_blkno == bp->b_lblkno) {
if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS)
@@ -1733,7 +1733,7 @@ ext2_deleteextattr(struct vop_deleteextattr_args *ap)
if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR))
return (EOPNOTSUPP);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1771,7 +1771,7 @@ ext2_getextattr(struct vop_getextattr_args *ap)
if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR))
return (EOPNOTSUPP);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1814,7 +1814,7 @@ ext2_listextattr(struct vop_listextattr_args *ap)
if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR))
return (EOPNOTSUPP);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1855,7 +1855,7 @@ ext2_setextattr(struct vop_setextattr_args *ap)
if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR))
return (EOPNOTSUPP);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
diff --git a/sys/fs/fdescfs/fdesc_vnops.c b/sys/fs/fdescfs/fdesc_vnops.c
index 58a22b8bdc50..c1188c3819e7 100644
--- a/sys/fs/fdescfs/fdesc_vnops.c
+++ b/sys/fs/fdescfs/fdesc_vnops.c
@@ -196,7 +196,7 @@ loop:
if (error != 0) {
vgone(vp);
vput(vp);
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
@@ -211,7 +211,7 @@ loop:
mtx_unlock(&fdesc_hashmtx);
vgone(vp);
vput(vp);
- *vpp = NULLVP;
+ *vpp = NULL;
return (-1);
}
@@ -227,7 +227,7 @@ loop:
vput(vp);
/* If we didn't get it, return no vnode. */
if (error)
- vp2 = NULLVP;
+ vp2 = NULL;
*vpp = vp2;
return (error);
}
@@ -301,7 +301,7 @@ fdesc_lookup(struct vop_lookup_args *ap)
if (cnp->cn_namelen == 1 && *pname == '.') {
*vpp = dvp;
- VREF(dvp);
+ vref(dvp);
return (0);
}
diff --git a/sys/fs/fuse/fuse_ipc.c b/sys/fs/fuse/fuse_ipc.c
index a751c09159ff..7f754ab7f1d4 100644
--- a/sys/fs/fuse/fuse_ipc.c
+++ b/sys/fs/fuse/fuse_ipc.c
@@ -193,7 +193,6 @@ fuse_interrupt_send(struct fuse_ticket *otick, int err)
struct fuse_data *data = otick->tk_data;
struct fuse_ticket *tick, *xtick;
struct ucred reused_creds;
- gid_t reused_groups[1];
if (otick->irq_unique == 0) {
/*
@@ -237,8 +236,7 @@ fuse_interrupt_send(struct fuse_ticket *otick, int err)
*/
ftick_hdr = fticket_in_header(otick);
reused_creds.cr_uid = ftick_hdr->uid;
- reused_groups[0] = ftick_hdr->gid;
- reused_creds.cr_groups = reused_groups;
+ reused_creds.cr_gid = ftick_hdr->gid;
fdisp_init(&fdi, sizeof(*fii));
fdisp_make_pid(&fdi, FUSE_INTERRUPT, data, ftick_hdr->nodeid,
ftick_hdr->pid, &reused_creds);
diff --git a/sys/fs/fuse/fuse_vfsops.c b/sys/fs/fuse/fuse_vfsops.c
index 1b858a988289..b617925c4e5f 100644
--- a/sys/fs/fuse/fuse_vfsops.c
+++ b/sys/fs/fuse/fuse_vfsops.c
@@ -278,13 +278,13 @@ fuse_vfsop_fhtovp(struct mount *mp, struct fid *fhp, int flags,
error = VFS_VGET(mp, ffhp->nid, LK_EXCLUSIVE, &nvp);
if (error) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
fvdat = VTOFUD(nvp);
if (fvdat->generation != ffhp->gen ) {
vput(nvp);
- *vpp = NULLVP;
+ *vpp = NULL;
return (ESTALE);
}
*vpp = nvp;
diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c
index b782146b7278..683ee2f7ad56 100644
--- a/sys/fs/fuse/fuse_vnops.c
+++ b/sys/fs/fuse/fuse_vnops.c
@@ -284,7 +284,7 @@ fuse_flush(struct vnode *vp, struct ucred *cred, pid_t pid, int fflag)
struct mount *mp = vnode_mount(vp);
int err;
- if (fsess_not_impl(vnode_mount(vp), FUSE_FLUSH))
+ if (fsess_not_impl(mp, FUSE_FLUSH))
return 0;
err = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid);
@@ -292,7 +292,7 @@ fuse_flush(struct vnode *vp, struct ucred *cred, pid_t pid, int fflag)
return err;
if (fufh->fuse_open_flags & FOPEN_NOFLUSH &&
- (!fsess_opt_writeback(vnode_mount(vp))))
+ (!fsess_opt_writeback(mp)))
return (0);
fdisp_init(&fdi, sizeof(*ffi));
@@ -795,11 +795,15 @@ fuse_vnop_close(struct vop_close_args *ap)
struct mount *mp = vnode_mount(vp);
struct ucred *cred = ap->a_cred;
int fflag = ap->a_fflag;
- struct thread *td = ap->a_td;
- pid_t pid = td->td_proc->p_pid;
+ struct thread *td;
struct fuse_vnode_data *fvdat = VTOFUD(vp);
+ pid_t pid;
int err = 0;
+ /* NB: a_td will be NULL from some async kernel contexts */
+ td = ap->a_td ? ap->a_td : curthread;
+ pid = td->td_proc->p_pid;
+
if (fuse_isdeadfs(vp))
return 0;
if (vnode_isdir(vp))
@@ -838,7 +842,7 @@ fuse_vnop_close(struct vop_close_args *ap)
}
/* TODO: close the file handle, if we're sure it's no longer used */
if ((fvdat->flag & FN_SIZECHANGE) != 0) {
- fuse_vnode_savesize(vp, cred, td->td_proc->p_pid);
+ fuse_vnode_savesize(vp, cred, pid);
}
return err;
}
@@ -953,7 +957,7 @@ fuse_vnop_copy_file_range(struct vop_copy_file_range_args *ap)
*ap->a_outoffp += fwo->size;
fuse_internal_clear_suid_on_write(outvp, outcred, td);
if (*ap->a_outoffp > outfvdat->cached_attrs.va_size) {
- fuse_vnode_setsize(outvp, *ap->a_outoffp, false);
+ fuse_vnode_setsize(outvp, *ap->a_outoffp, false);
getnanouptime(&outfvdat->last_local_modify);
}
fuse_vnode_update(invp, FN_ATIMECHANGE);
@@ -1748,7 +1752,7 @@ fuse_vnop_open(struct vop_open_args *ap)
if (fuse_isdeadfs(vp))
return (EXTERROR(ENXIO, "This FUSE session is about "
"to be closed"));
- if (vp->v_type == VCHR || vp->v_type == VBLK || vp->v_type == VFIFO)
+ if (VN_ISDEV(vp) || vp->v_type == VFIFO)
return (EXTERROR(EOPNOTSUPP, "Unsupported vnode type",
vp->v_type));
if ((a_mode & (FREAD | FWRITE | FEXEC)) == 0)
diff --git a/sys/fs/msdosfs/bootsect.h b/sys/fs/msdosfs/bootsect.h
index 170d94cb9512..94b1137a153e 100644
--- a/sys/fs/msdosfs/bootsect.h
+++ b/sys/fs/msdosfs/bootsect.h
@@ -20,7 +20,7 @@
/*
* Format of a boot sector. This is the first sector on a DOS floppy disk
- * or the fist sector of a partition on a hard disk. But, it is not the
+ * or the first sector of a partition on a hard disk. But, it is not the
* first sector of a partitioned hard disk.
*/
struct bootsector33 {
diff --git a/sys/fs/msdosfs/msdosfs_lookup.c b/sys/fs/msdosfs/msdosfs_lookup.c
index 8ab6d35a2685..58ce8eff9dbd 100644
--- a/sys/fs/msdosfs/msdosfs_lookup.c
+++ b/sys/fs/msdosfs/msdosfs_lookup.c
@@ -515,7 +515,7 @@ foundroot:
* Save directory inode pointer in ndp->ni_dvp for dirremove().
*/
if (dp->de_StartCluster == scn && isadir) { /* "." */
- VREF(vdp);
+ vref(vdp);
*vpp = vdp;
return (0);
}
@@ -602,7 +602,7 @@ foundroot:
msdosfs_integrity_error(pmp);
return (EBADF);
}
- VREF(vdp); /* we want ourself, ie "." */
+ vref(vdp); /* we want ourself, ie "." */
*vpp = vdp;
} else {
if ((error = deget(pmp, cluster, blkoff, LK_EXCLUSIVE,
diff --git a/sys/fs/msdosfs/msdosfs_vfsops.c b/sys/fs/msdosfs/msdosfs_vfsops.c
index 4431d36c8a8e..30c63cfa8a35 100644
--- a/sys/fs/msdosfs/msdosfs_vfsops.c
+++ b/sys/fs/msdosfs/msdosfs_vfsops.c
@@ -1184,7 +1184,7 @@ msdosfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
error = deget(pmp, defhp->defid_dirclust, defhp->defid_dirofs,
LK_EXCLUSIVE, &dep);
if (error) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
*vpp = DETOV(dep);
diff --git a/sys/fs/nfs/nfs_commonkrpc.c b/sys/fs/nfs/nfs_commonkrpc.c
index 0ae3b94bef89..1e4e8506790f 100644
--- a/sys/fs/nfs/nfs_commonkrpc.c
+++ b/sys/fs/nfs/nfs_commonkrpc.c
@@ -239,6 +239,7 @@ static bool nfscl_use_gss[NFSV42_NPROCS] = {
true,
true,
true,
+ true,
};
/*
diff --git a/sys/fs/nfs/nfs_commonport.c b/sys/fs/nfs/nfs_commonport.c
index e5fdb395c9f7..862780741ee7 100644
--- a/sys/fs/nfs/nfs_commonport.c
+++ b/sys/fs/nfs/nfs_commonport.c
@@ -371,8 +371,6 @@ nfsrv_atroot(struct vnode *vp, uint64_t *retp)
/*
* Set the credentials to refer to root.
- * If only the various BSDen could agree on whether cr_gid is a separate
- * field or cr_groups[0]...
*/
void
newnfs_setroot(struct ucred *cred)
diff --git a/sys/fs/nfs/nfs_commonsubs.c b/sys/fs/nfs/nfs_commonsubs.c
index 67e33193ecec..7f5b29ca2085 100644
--- a/sys/fs/nfs/nfs_commonsubs.c
+++ b/sys/fs/nfs/nfs_commonsubs.c
@@ -187,7 +187,7 @@ struct nfsv4_opflag nfsv4_opflag[NFSV42_NOPS] = {
{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 }, /* Read Plus */
{ 0, 1, 0, 0, LK_SHARED, 1, 0 }, /* Seek */
{ 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 }, /* Write Same */
- { 0, 0, 0, 0, LK_EXCLUSIVE, 1, 1 }, /* Clone */
+ { 2, 1, 1, 0, LK_SHARED, 1, 0 }, /* Clone */
{ 0, 1, 0, 0, LK_SHARED, 1, 1 }, /* Getxattr */
{ 0, 1, 1, 1, LK_EXCLUSIVE, 1, 1 }, /* Setxattr */
{ 0, 1, 0, 0, LK_SHARED, 1, 1 }, /* Listxattrs */
@@ -219,7 +219,7 @@ NFSD_VNET_DEFINE_STATIC(u_char *, nfsrv_dnsname) = NULL;
static int nfs_bigreply[NFSV42_NPROCS] = { 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0, 0, 1, 0, 0, 0, 0, 0, 0 };
+ 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 };
/* local functions */
static int nfsrv_skipace(struct nfsrv_descript *nd, int *acesizep);
@@ -310,6 +310,7 @@ static struct {
{ NFSV4OP_LAYOUTERROR, 1, "LayoutError", 11, },
{ NFSV4OP_VERIFY, 3, "AppendWrite", 11, },
{ NFSV4OP_OPENATTR, 3, "OpenAttr", 8, },
+ { NFSV4OP_SAVEFH, 5, "Clone", 5, },
};
/*
@@ -319,7 +320,7 @@ static int nfs_bigrequest[NFSV42_NPROCS] = {
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
- 0, 1, 0
+ 0, 1, 0, 0
};
/*
@@ -648,7 +649,7 @@ nfscl_fillsattr(struct nfsrv_descript *nd, struct vattr *vap,
NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMECREATE);
(void) nfsv4_fillattr(nd, vp->v_mount, vp, NULL, vap, NULL, 0,
&attrbits, NULL, NULL, 0, 0, 0, 0, (uint64_t)0, NULL,
- false, false, false);
+ false, false, false, 0);
break;
}
}
@@ -1302,7 +1303,7 @@ nfsv4_loadattr(struct nfsrv_descript *nd, vnode_t vp,
struct nfsv3_pathconf *pc, struct statfs *sbp, struct nfsstatfs *sfp,
struct nfsfsinfo *fsp, NFSACL_T *aclp, int compare, int *retcmpp,
u_int32_t *leasep, u_int32_t *rderrp, bool *has_namedattrp,
- NFSPROC_T *p, struct ucred *cred)
+ uint32_t *clone_blksizep, NFSPROC_T *p, struct ucred *cred)
{
u_int32_t *tl;
int i = 0, j, k, l = 0, m, bitpos, attrsum = 0;
@@ -1437,6 +1438,13 @@ nfsv4_loadattr(struct nfsrv_descript *nd, vnode_t vp,
NFSCLRBIT_ATTRBIT(&checkattrbits,
NFSATTRBIT_SYSTEM);
}
+ /* Some filesystems do not support block cloning */
+ if (vp == NULL || VOP_PATHCONF(vp,
+ _PC_CLONE_BLKSIZE, &has_pathconf) != 0)
+ has_pathconf = 0;
+ if (has_pathconf == 0)
+ NFSCLRBIT_ATTRBIT(&checkattrbits,
+ NFSATTRBIT_CLONEBLKSIZE);
if (!NFSEQUAL_ATTRBIT(&retattrbits, &checkattrbits)
|| retnotsup)
*retcmpp = NFSERR_NOTSAME;
@@ -2374,6 +2382,23 @@ nfsv4_loadattr(struct nfsrv_descript *nd, vnode_t vp,
if (compare && !(*retcmpp) && i != nfs_srvmaxio)
*retcmpp = NFSERR_NOTSAME;
break;
+ case NFSATTRBIT_CLONEBLKSIZE:
+ NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
+ if (compare) {
+ if (!(*retcmpp)) {
+ if (vp == NULL || VOP_PATHCONF(vp,
+ _PC_CLONE_BLKSIZE, &has_pathconf)
+ != 0)
+ has_pathconf = 0;
+ if (has_pathconf !=
+ fxdr_unsigned(uint32_t, *tl))
+ *retcmpp = NFSERR_NOTSAME;
+ }
+ } else if (clone_blksizep != NULL) {
+ *clone_blksizep = fxdr_unsigned(uint32_t, *tl);
+ }
+ attrsum += NFSX_UNSIGNED;
+ break;
case NFSATTRBIT_CHANGEATTRTYPE:
NFSM_DISSECT(tl, uint32_t *, NFSX_UNSIGNED);
if (compare) {
@@ -2648,7 +2673,7 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
nfsattrbit_t *attrbitp, struct ucred *cred, NFSPROC_T *p, int isdgram,
int reterr, int supports_nfsv4acls, int at_root, uint64_t mounted_on_fileno,
struct statfs *pnfssf, bool xattrsupp, bool has_hiddensystem,
- bool has_namedattr)
+ bool has_namedattr, uint32_t clone_blksize)
{
int bitpos, retnum = 0;
u_int32_t *tl;
@@ -2771,6 +2796,9 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
NFSCLRBIT_ATTRBIT(&attrbits, NFSATTRBIT_HIDDEN);
NFSCLRBIT_ATTRBIT(&attrbits, NFSATTRBIT_SYSTEM);
}
+ if (clone_blksize == 0)
+ NFSCLRBIT_ATTRBIT(&attrbits,
+ NFSATTRBIT_CLONEBLKSIZE);
retnum += nfsrv_putattrbit(nd, &attrbits);
break;
case NFSATTRBIT_TYPE:
@@ -3249,6 +3277,11 @@ nfsv4_fillattr(struct nfsrv_descript *nd, struct mount *mp, vnode_t vp,
}
retnum += NFSX_UNSIGNED;
break;
+ case NFSATTRBIT_CLONEBLKSIZE:
+ NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+ *tl = txdr_unsigned(clone_blksize);
+ retnum += NFSX_UNSIGNED;
+ break;
default:
printf("EEK! Bad V4 attribute bitpos=%d\n", bitpos);
}
diff --git a/sys/fs/nfs/nfs_var.h b/sys/fs/nfs/nfs_var.h
index 54f60a753c50..61083ecf2d66 100644
--- a/sys/fs/nfs/nfs_var.h
+++ b/sys/fs/nfs/nfs_var.h
@@ -286,6 +286,8 @@ int nfsrvd_deallocate(struct nfsrv_descript *, int,
vnode_t, struct nfsexstuff *);
int nfsrvd_copy_file_range(struct nfsrv_descript *, int,
vnode_t, vnode_t, struct nfsexstuff *, struct nfsexstuff *);
+int nfsrvd_clone(struct nfsrv_descript *, int,
+ vnode_t, vnode_t, struct nfsexstuff *, struct nfsexstuff *);
int nfsrvd_seek(struct nfsrv_descript *, int,
vnode_t, struct nfsexstuff *);
int nfsrvd_getxattr(struct nfsrv_descript *, int,
@@ -341,7 +343,8 @@ int nfsv4_loadattr(struct nfsrv_descript *, vnode_t,
struct nfsvattr *, struct nfsfh **, fhandle_t *, int,
struct nfsv3_pathconf *, struct statfs *, struct nfsstatfs *,
struct nfsfsinfo *, NFSACL_T *,
- int, int *, u_int32_t *, u_int32_t *, bool *, NFSPROC_T *, struct ucred *);
+ int, int *, u_int32_t *, u_int32_t *, bool *, uint32_t *, NFSPROC_T *,
+ struct ucred *);
int nfsv4_lock(struct nfsv4lock *, int, int *, struct mtx *, struct mount *);
void nfsv4_unlock(struct nfsv4lock *, int);
void nfsv4_relref(struct nfsv4lock *);
@@ -397,7 +400,7 @@ void nfsrv_wcc(struct nfsrv_descript *, int, struct nfsvattr *, int,
int nfsv4_fillattr(struct nfsrv_descript *, struct mount *, vnode_t, NFSACL_T *,
struct vattr *, fhandle_t *, int, nfsattrbit_t *, struct ucred *,
NFSPROC_T *, int, int, int, int, uint64_t, struct statfs *, bool, bool,
- bool);
+ bool, uint32_t);
void nfsrv_fillattr(struct nfsrv_descript *, struct nfsvattr *);
struct mbuf *nfsrv_adj(struct mbuf *, int, int);
void nfsrv_postopattr(struct nfsrv_descript *, int, struct nfsvattr *);
@@ -517,10 +520,10 @@ int nfsrpc_lock(struct nfsrv_descript *, struct nfsmount *, vnode_t,
u_int8_t *, int, struct nfscllockowner *, int, int, u_int64_t,
u_int64_t, short, struct ucred *, NFSPROC_T *, int);
int nfsrpc_statfs(vnode_t, struct nfsstatfs *, struct nfsfsinfo *, uint32_t *,
- struct ucred *, NFSPROC_T *, struct nfsvattr *, int *);
+ uint32_t *, struct ucred *, NFSPROC_T *, struct nfsvattr *, int *);
int nfsrpc_fsinfo(vnode_t, struct nfsfsinfo *, struct ucred *,
NFSPROC_T *, struct nfsvattr *, int *);
-int nfsrpc_pathconf(vnode_t, struct nfsv3_pathconf *, bool *,
+int nfsrpc_pathconf(vnode_t, struct nfsv3_pathconf *, bool *, uint32_t *,
struct ucred *, NFSPROC_T *, struct nfsvattr *, int *);
int nfsrpc_renew(struct nfsclclient *, struct nfsclds *, struct ucred *,
NFSPROC_T *);
@@ -562,6 +565,8 @@ int nfsrpc_deallocate(vnode_t, off_t, off_t, struct nfsvattr *, int *,
int nfsrpc_copy_file_range(vnode_t, off_t *, vnode_t, off_t *, size_t *,
unsigned int, int *, struct nfsvattr *, int *, struct nfsvattr *,
struct ucred *, bool, bool *);
+int nfsrpc_clone(vnode_t, off_t *, vnode_t, off_t *, size_t *, bool,
+ int *, struct nfsvattr *, int *, struct nfsvattr *, struct ucred *);
int nfsrpc_seek(vnode_t, off_t *, bool *, int, struct ucred *,
struct nfsvattr *, int *);
int nfsrpc_getextattr(vnode_t, const char *, struct uio *, ssize_t *,
@@ -668,7 +673,7 @@ int nfscl_nget(mount_t, vnode_t, struct nfsfh *,
NFSPROC_T *nfscl_getparent(NFSPROC_T *);
void nfscl_start_renewthread(struct nfsclclient *);
void nfscl_loadsbinfo(struct nfsmount *, struct nfsstatfs *, void *);
-void nfscl_loadfsinfo (struct nfsmount *, struct nfsfsinfo *);
+void nfscl_loadfsinfo(struct nfsmount *, struct nfsfsinfo *, uint32_t);
void nfscl_delegreturn(struct nfscldeleg *, int, struct nfsmount *,
struct ucred *, NFSPROC_T *);
void nfsrvd_cbinit(int);
@@ -737,7 +742,7 @@ int nfsvno_updfilerev(vnode_t, struct nfsvattr *, struct nfsrv_descript *,
int nfsvno_fillattr(struct nfsrv_descript *, struct mount *, vnode_t,
struct nfsvattr *, fhandle_t *, int, nfsattrbit_t *,
struct ucred *, NFSPROC_T *, int, int, int, int, uint64_t, bool, bool,
- bool);
+ bool, uint32_t);
int nfsrv_sattr(struct nfsrv_descript *, vnode_t, struct nfsvattr *, nfsattrbit_t *,
NFSACL_T *, NFSPROC_T *);
int nfsv4_sattr(struct nfsrv_descript *, vnode_t, struct nfsvattr *, nfsattrbit_t *,
diff --git a/sys/fs/nfs/nfsport.h b/sys/fs/nfs/nfsport.h
index c30b46261df0..4e9aae70da6f 100644
--- a/sys/fs/nfs/nfsport.h
+++ b/sys/fs/nfs/nfsport.h
@@ -442,10 +442,13 @@
/* Do a NFSv4 Openattr. */
#define NFSPROC_OPENATTR 70
+/* Do a NFSv4.2 Clone. */
+#define NFSPROC_CLONE 71
+
/*
* Must be defined as one higher than the last NFSv4.2 Proc# above.
*/
-#define NFSV42_NPROCS 71
+#define NFSV42_NPROCS 72
/* Value of NFSV42_NPROCS for old nfsstats structure. (Always 69) */
#define NFSV42_OLDNPROCS 69
@@ -477,7 +480,7 @@ struct nfsstatsv1 {
uint64_t readlink_bios;
uint64_t biocache_readdirs;
uint64_t readdir_bios;
- uint64_t rpccnt[NFSV42_NPROCS + 9];
+ uint64_t rpccnt[NFSV42_NPROCS + 8];
uint64_t rpcretries;
uint64_t srvrpccnt[NFSV42_NOPS + NFSV4OP_FAKENOPS + 15];
uint64_t srvlayouts;
@@ -906,15 +909,6 @@ int nfsmsleep(void *, void *, int, const char *, struct timespec *);
#define NFSBZERO(s, l) bzero((s), (l))
/*
- * Some queue.h files don't have these dfined in them.
- */
-#ifndef LIST_END
-#define LIST_END(head) NULL
-#define SLIST_END(head) NULL
-#define TAILQ_END(head) NULL
-#endif
-
-/*
* This must be defined to be a global variable that increments once
* per second, but never stops or goes backwards, even when a "date"
* command changes the TOD clock. It is used for delta times for
@@ -1023,7 +1017,7 @@ MALLOC_DECLARE(M_NEWNFSDSESSION);
int nfscl_loadattrcache(struct vnode **, struct nfsvattr *, void *, int, int);
int newnfs_realign(struct mbuf **, int);
bool ncl_pager_setsize(struct vnode *vp, u_quad_t *nsizep);
-void ncl_copy_vattr(struct vattr *dst, struct vattr *src);
+void ncl_copy_vattr(struct vnode *vp, struct vattr *dst, struct vattr *src);
/*
* If the port runs on an SMP box that can enforce Atomic ops with low
@@ -1035,9 +1029,6 @@ void ncl_copy_vattr(struct vattr *dst, struct vattr *src);
#define NFSDECRGLOBAL(a) ((a)--)
/*
- * Assorted funky stuff to make things work under Darwin8.
- */
-/*
* These macros checks for a field in vattr being set.
*/
#define NFSATTRISSET(t, v, a) ((v)->a != (t)VNOVAL)
diff --git a/sys/fs/nfs/nfsproto.h b/sys/fs/nfs/nfsproto.h
index cb5a80e8df73..d628108bdc1a 100644
--- a/sys/fs/nfs/nfsproto.h
+++ b/sys/fs/nfs/nfsproto.h
@@ -411,10 +411,13 @@
/* Do a NFSv4 Openattr. */
#define NFSPROC_OPENATTR 70
+/* Do a NFSv4.2 Clone. */
+#define NFSPROC_CLONE 71
+
/*
* Must be defined as one higher than the last NFSv4.2 Proc# above.
*/
-#define NFSV42_NPROCS 71
+#define NFSV42_NPROCS 72
/* Value of NFSV42_NPROCS for old nfsstats structure. (Always 69) */
#define NFSV42_OLDNPROCS 69
@@ -1194,6 +1197,7 @@ struct nfsv3_sattr {
NFSATTRBM_LAYOUTBLKSIZE | \
NFSATTRBM_LAYOUTALIGNMENT | \
NFSATTRBM_SUPPATTREXCLCREAT | \
+ NFSATTRBM_CLONEBLKSIZE | \
NFSATTRBM_CHANGEATTRTYPE | \
NFSATTRBM_XATTRSUPPORT)
@@ -1242,7 +1246,8 @@ struct nfsv3_sattr {
* NFSATTRBIT_NFSV42 - Attributes only supported by NFSv4.2.
*/
#define NFSATTRBIT_NFSV42_2 \
- (NFSATTRBM_CHANGEATTRTYPE | \
+ (NFSATTRBM_CLONEBLKSIZE | \
+ NFSATTRBM_CHANGEATTRTYPE | \
NFSATTRBM_XATTRSUPPORT | \
NFSATTRBM_MODEUMASK)
@@ -1415,7 +1420,7 @@ struct nfsv3_sattr {
/*
* NFSGETATTRBIT_STATFS2 - bits 64<->95
*/
-#define NFSGETATTRBIT_STATFS2 0
+#define NFSGETATTRBIT_STATFS2 (NFSATTRBM_CLONEBLKSIZE)
/*
* Set of attributes for the equivalent of an nfsv3 pathconf rpc.
@@ -1438,7 +1443,7 @@ struct nfsv3_sattr {
/*
* NFSGETATTRBIT_PATHCONF2 - bits 64<->95
*/
-#define NFSGETATTRBIT_PATHCONF2 0
+#define NFSGETATTRBIT_PATHCONF2 (NFSATTRBM_CLONEBLKSIZE)
/*
* Sets of attributes required by readdir and readdirplus.
diff --git a/sys/fs/nfsclient/nfs_clcomsubs.c b/sys/fs/nfsclient/nfs_clcomsubs.c
index bca0bdcd0df1..05963074e53d 100644
--- a/sys/fs/nfsclient/nfs_clcomsubs.c
+++ b/sys/fs/nfsclient/nfs_clcomsubs.c
@@ -272,7 +272,7 @@ nfsm_loadattr(struct nfsrv_descript *nd, struct nfsvattr *nap)
if (nd->nd_flag & ND_NFSV4) {
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, NULL,
NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL,
- NULL);
+ NULL, NULL);
} else if (nd->nd_flag & ND_NFSV3) {
NFSM_DISSECT(fp, struct nfs_fattr *, NFSX_V3FATTR);
nap->na_type = nfsv34tov_type(fp->fa_type);
diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c
index b25d967982a1..77e71d4153c9 100644
--- a/sys/fs/nfsclient/nfs_clport.c
+++ b/sys/fs/nfsclient/nfs_clport.c
@@ -412,7 +412,7 @@ nfscl_warn_fileid(struct nfsmount *nmp, struct nfsvattr *oldnap,
}
void
-ncl_copy_vattr(struct vattr *dst, struct vattr *src)
+ncl_copy_vattr(struct vnode *vp, struct vattr *dst, struct vattr *src)
{
dst->va_type = src->va_type;
dst->va_mode = src->va_mode;
@@ -429,7 +429,7 @@ ncl_copy_vattr(struct vattr *dst, struct vattr *src)
dst->va_birthtime = src->va_birthtime;
dst->va_gen = src->va_gen;
dst->va_flags = src->va_flags;
- dst->va_rdev = src->va_rdev;
+ dst->va_rdev = VN_ISDEV(vp) ? src->va_rdev : NODEV;
dst->va_bytes = src->va_bytes;
dst->va_filerev = src->va_filerev;
}
@@ -595,7 +595,7 @@ nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
}
if (vaper != NULL) {
- ncl_copy_vattr(vaper, vap);
+ ncl_copy_vattr(vp, vaper, vap);
if (np->n_flag & NCHG) {
if (np->n_flag & NACC)
vaper->va_atime = np->n_atim;
@@ -828,7 +828,7 @@ nfscl_wcc_data(struct nfsrv_descript *nd, struct vnode *vp,
== (ND_NFSV4 | ND_V4WCCATTR)) {
error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
- NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL);
if (error)
return (error);
/*
@@ -963,7 +963,8 @@ nfscl_loadsbinfo(struct nfsmount *nmp, struct nfsstatfs *sfp, void *statfs)
* Use the fsinfo stuff to update the mount point.
*/
void
-nfscl_loadfsinfo(struct nfsmount *nmp, struct nfsfsinfo *fsp)
+nfscl_loadfsinfo(struct nfsmount *nmp, struct nfsfsinfo *fsp,
+ uint32_t clone_blksize)
{
if ((nmp->nm_wsize == 0 || fsp->fs_wtpref < nmp->nm_wsize) &&
@@ -1003,6 +1004,14 @@ nfscl_loadfsinfo(struct nfsmount *nmp, struct nfsfsinfo *fsp)
fsp->fs_maxfilesize < nmp->nm_maxfilesize)
nmp->nm_maxfilesize = fsp->fs_maxfilesize;
nmp->nm_mountp->mnt_stat.f_iosize = newnfs_iosize(nmp);
+
+ /*
+ * Although ZFS reports a clone_blksize of 16Mbytes,
+ * 128Kbytes usually works, so set it to that.
+ */
+ if (clone_blksize > 128 * 1024)
+ clone_blksize = 128 * 1024;
+ nmp->nm_cloneblksize = clone_blksize;
nmp->nm_state |= NFSSTA_GOTFSINFO;
}
@@ -1089,9 +1098,10 @@ newnfs_copyincred(struct ucred *cr, struct nfscred *nfscr)
KASSERT(cr->cr_ngroups >= 0,
("newnfs_copyincred: negative cr_ngroups"));
nfscr->nfsc_uid = cr->cr_uid;
- nfscr->nfsc_ngroups = MIN(cr->cr_ngroups, NFS_MAXGRPS + 1);
- for (i = 0; i < nfscr->nfsc_ngroups; i++)
- nfscr->nfsc_groups[i] = cr->cr_groups[i];
+ nfscr->nfsc_ngroups = MIN(cr->cr_ngroups + 1, NFS_MAXGRPS + 1);
+ nfscr->nfsc_groups[0] = cr->cr_gid;
+ for (i = 1; i < nfscr->nfsc_ngroups; i++)
+ nfscr->nfsc_groups[i] = cr->cr_groups[i - 1];
}
/*
diff --git a/sys/fs/nfsclient/nfs_clrpcops.c b/sys/fs/nfsclient/nfs_clrpcops.c
index 920fcf7b8c61..ad9404a18fc8 100644
--- a/sys/fs/nfsclient/nfs_clrpcops.c
+++ b/sys/fs/nfsclient/nfs_clrpcops.c
@@ -225,6 +225,9 @@ static int nfsrpc_layoutgetres(struct nfsmount *, vnode_t, uint8_t *,
static int nfsrpc_copyrpc(vnode_t, off_t, vnode_t, off_t, size_t *,
nfsv4stateid_t *, nfsv4stateid_t *, struct nfsvattr *, int *,
struct nfsvattr *, int *, bool, int *, struct ucred *, NFSPROC_T *);
+static int nfsrpc_clonerpc(vnode_t, off_t, vnode_t, off_t, size_t *, bool,
+ nfsv4stateid_t *, nfsv4stateid_t *, struct nfsvattr *, int *,
+ struct nfsvattr *, int *, struct ucred *, NFSPROC_T *);
static int nfsrpc_seekrpc(vnode_t, off_t *, nfsv4stateid_t *, bool *,
int, struct nfsvattr *, int *, struct ucred *);
static struct mbuf *nfsm_split(struct mbuf *, uint64_t);
@@ -696,7 +699,7 @@ nfsrpc_openrpc(struct nfsmount *nmp, vnode_t vp, u_int8_t *nfhp, int fhlen,
("nfsrpc_openrpc: Getattr repstat"));
error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
- NULL, NULL, NULL, NULL, p, cred);
+ NULL, NULL, NULL, NULL, NULL, p, cred);
if (error)
goto nfsmout;
}
@@ -1355,7 +1358,7 @@ nfsrpc_getattrnovp(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, int syscred,
if ((nd->nd_flag & ND_NFSV4) != 0)
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
NULL, NULL, NULL, NULL, NULL, 0, NULL, leasep, NULL,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
else
error = nfsm_loadattr(nd, nap);
} else
@@ -3597,7 +3600,7 @@ nfsrpc_readdir(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
nfsva.na_mntonfileno = UINT64_MAX;
error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
- NULL, NULL, NULL, NULL, p, cred);
+ NULL, NULL, NULL, NULL, NULL, p, cred);
if (error) {
dotdotfileid = dotfileid;
} else if (gotmnton) {
@@ -3847,7 +3850,7 @@ nfsrpc_readdir(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
nfsva.na_mntonfileno = UINT64_MAX;
error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
- NULL, NULL, &rderr, NULL, p, cred);
+ NULL, NULL, &rderr, NULL, NULL, p, cred);
if (error)
goto nfsmout;
NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
@@ -3978,7 +3981,7 @@ nfsrpc_readdirplus(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
int len, left;
struct dirent *dp = NULL;
u_int32_t *tl;
- vnode_t newvp = NULLVP;
+ vnode_t newvp = NULL;
struct nfsrv_descript nfsd, *nd = &nfsd;
struct nameidata nami, *ndp = &nami;
struct componentname *cnp = &ndp->ni_cnd;
@@ -4072,7 +4075,7 @@ nfsrpc_readdirplus(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
nfsva.na_mntonfileno = UINT64_MAX;
error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
- NULL, NULL, NULL, NULL, p, cred);
+ NULL, NULL, NULL, NULL, NULL, p, cred);
if (error) {
dotdotfileid = dotfileid;
} else if (gotmnton) {
@@ -4346,7 +4349,7 @@ nfsrpc_readdirplus(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
nfsva.na_mntonfileno = 0xffffffff;
error = nfsv4_loadattr(nd, NULL, &nfsva, &nfhp,
NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
- NULL, NULL, &rderr, NULL, p, cred);
+ NULL, NULL, &rderr, NULL, NULL, p, cred);
if (error)
goto nfsmout;
}
@@ -4384,7 +4387,7 @@ nfsrpc_readdirplus(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
attr_ok = true;
if (NFSRV_CMPFH(nfhp->nfh_fh, nfhp->nfh_len,
dnp->n_fhp->nfh_fh, dnp->n_fhp->nfh_len)) {
- VREF(vp);
+ vref(vp);
newvp = vp;
unlocknewvp = 0;
free(nfhp, M_NFSFH);
@@ -4433,7 +4436,7 @@ nfsrpc_readdirplus(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
}
}
nfhp = NULL;
- if (newvp != NULLVP) {
+ if (newvp != NULL) {
if (attr_ok)
error = nfscl_loadattrcache(&newvp,
&nfsva, NULL, 0, 0);
@@ -4463,7 +4466,7 @@ nfsrpc_readdirplus(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep,
vput(newvp);
else
vrele(newvp);
- newvp = NULLVP;
+ newvp = NULL;
}
}
} else if (nfhp != NULL) {
@@ -4981,8 +4984,8 @@ nfsmout:
*/
int
nfsrpc_statfs(vnode_t vp, struct nfsstatfs *sbp, struct nfsfsinfo *fsp,
- uint32_t *leasep, struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap,
- int *attrflagp)
+ uint32_t *leasep, uint32_t *cloneblksizep, struct ucred *cred, NFSPROC_T *p,
+ struct nfsvattr *nap, int *attrflagp)
{
u_int32_t *tl = NULL;
struct nfsrv_descript nfsd, *nd = &nfsd;
@@ -4991,6 +4994,8 @@ nfsrpc_statfs(vnode_t vp, struct nfsstatfs *sbp, struct nfsfsinfo *fsp,
int error;
*attrflagp = 0;
+ if (cloneblksizep != NULL)
+ *cloneblksizep = 0;
nmp = VFSTONFS(vp->v_mount);
if (NFSHASNFSV4(nmp)) {
/*
@@ -5009,7 +5014,7 @@ nfsrpc_statfs(vnode_t vp, struct nfsstatfs *sbp, struct nfsfsinfo *fsp,
if (nd->nd_repstat == 0) {
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
NULL, NULL, sbp, fsp, NULL, 0, NULL, leasep, NULL,
- NULL, p, cred);
+ NULL, cloneblksizep, p, cred);
if (!error) {
nmp->nm_fsid[0] = nap->na_filesid[0];
nmp->nm_fsid[1] = nap->na_filesid[1];
@@ -5063,7 +5068,8 @@ nfsmout:
*/
int
nfsrpc_pathconf(vnode_t vp, struct nfsv3_pathconf *pc, bool *has_namedattrp,
- struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp)
+ uint32_t *clone_blksizep, struct ucred *cred, NFSPROC_T *p,
+ struct nfsvattr *nap, int *attrflagp)
{
struct nfsrv_descript nfsd, *nd = &nfsd;
struct nfsmount *nmp;
@@ -5074,6 +5080,7 @@ nfsrpc_pathconf(vnode_t vp, struct nfsv3_pathconf *pc, bool *has_namedattrp,
*has_namedattrp = false;
*attrflagp = 0;
+ *clone_blksizep = 0;
nmp = VFSTONFS(vp->v_mount);
if (NFSHASNFSV4(nmp)) {
np = VTONFS(vp);
@@ -5100,7 +5107,7 @@ nfsrpc_pathconf(vnode_t vp, struct nfsv3_pathconf *pc, bool *has_namedattrp,
if (nd->nd_repstat == 0) {
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
pc, NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL,
- has_namedattrp, p, cred);
+ has_namedattrp, clone_blksizep, p, cred);
if (!error)
*attrflagp = 1;
} else {
@@ -5395,7 +5402,8 @@ nfsrpc_getacl(vnode_t vp, struct ucred *cred, NFSPROC_T *p, struct acl *aclp)
return (error);
if (!nd->nd_repstat)
error = nfsv4_loadattr(nd, vp, NULL, NULL, NULL, 0, NULL,
- NULL, NULL, NULL, aclp, 0, NULL, NULL, NULL, NULL, p, cred);
+ NULL, NULL, NULL, aclp, 0, NULL, NULL, NULL, NULL, NULL, p,
+ cred);
else
error = nd->nd_repstat;
m_freem(nd->nd_mrep);
@@ -5437,7 +5445,7 @@ nfsrpc_setaclrpc(vnode_t vp, struct ucred *cred, NFSPROC_T *p,
NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_ACL);
(void) nfsv4_fillattr(nd, vp->v_mount, vp, aclp, NULL, NULL, 0,
&attrbits, NULL, NULL, 0, 0, 0, 0, (uint64_t)0, NULL, false, false,
- false);
+ false, 0);
error = nfscl_request(nd, vp, p, cred);
if (error)
return (error);
@@ -8496,7 +8504,7 @@ nfsrpc_openlayoutrpc(struct nfsmount *nmp, vnode_t vp, u_int8_t *nfhp,
if (*++tl == 0) {
error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
- NULL, NULL, NULL, NULL, p, cred);
+ NULL, NULL, NULL, NULL, NULL, p, cred);
if (error != 0)
goto nfsmout;
if (ndp != NULL) {
@@ -9168,6 +9176,199 @@ nfsmout:
}
/*
+ * nfs clone operation.
+ */
+int
+nfsrpc_clone(vnode_t invp, off_t *inoffp, vnode_t outvp,
+ off_t *outoffp, size_t *lenp, bool toeof, int *inattrflagp,
+ struct nfsvattr *innap, int *outattrflagp, struct nfsvattr *outnap,
+ struct ucred *cred)
+{
+ int error, expireret = 0, retrycnt;
+ uint32_t clidrev = 0;
+ struct nfsmount *nmp = VFSTONFS(invp->v_mount);
+ struct nfsfh *innfhp = NULL, *outnfhp = NULL;
+ nfsv4stateid_t instateid, outstateid;
+ void *inlckp, *outlckp;
+
+ if (nmp->nm_clp != NULL)
+ clidrev = nmp->nm_clp->nfsc_clientidrev;
+ innfhp = VTONFS(invp)->n_fhp;
+ outnfhp = VTONFS(outvp)->n_fhp;
+ retrycnt = 0;
+ do {
+ /* Get both stateids. */
+ inlckp = NULL;
+ nfscl_getstateid(invp, innfhp->nfh_fh, innfhp->nfh_len,
+ NFSV4OPEN_ACCESSREAD, 0, NULL, curthread, &instateid,
+ &inlckp);
+ outlckp = NULL;
+ nfscl_getstateid(outvp, outnfhp->nfh_fh, outnfhp->nfh_len,
+ NFSV4OPEN_ACCESSWRITE, 0, NULL, curthread, &outstateid,
+ &outlckp);
+
+ error = nfsrpc_clonerpc(invp, *inoffp, outvp, *outoffp, lenp,
+ toeof, &instateid, &outstateid, innap, inattrflagp, outnap,
+ outattrflagp, cred, curthread);
+ if (error == 0) {
+ *inoffp += *lenp;
+ *outoffp += *lenp;
+ } else if (error == NFSERR_STALESTATEID)
+ nfscl_initiate_recovery(nmp->nm_clp);
+ if (inlckp != NULL)
+ nfscl_lockderef(inlckp);
+ if (outlckp != NULL)
+ nfscl_lockderef(outlckp);
+ if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID ||
+ error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY ||
+ error == NFSERR_OLDSTATEID || error == NFSERR_BADSESSION) {
+ (void) nfs_catnap(PZERO, error, "nfs_cfr");
+ } else if ((error == NFSERR_EXPIRED || (!NFSHASINT(nmp) &&
+ error == NFSERR_BADSTATEID)) && clidrev != 0) {
+ expireret = nfscl_hasexpired(nmp->nm_clp, clidrev,
+ curthread);
+ } else if (error == NFSERR_BADSTATEID && NFSHASINT(nmp)) {
+ error = EIO;
+ }
+ retrycnt++;
+ } while (error == NFSERR_GRACE || error == NFSERR_DELAY ||
+ error == NFSERR_STALESTATEID || error == NFSERR_BADSESSION ||
+ error == NFSERR_STALEDONTRECOVER ||
+ (error == NFSERR_OLDSTATEID && retrycnt < 20) ||
+ ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) &&
+ expireret == 0 && clidrev != 0 && retrycnt < 4));
+ if (error != 0 && (retrycnt >= 4 ||
+ error == NFSERR_STALESTATEID || error == NFSERR_BADSESSION ||
+ error == NFSERR_STALEDONTRECOVER))
+ error = EIO;
+ return (error);
+}
+
+/*
+ * The clone RPC.
+ */
+static int
+nfsrpc_clonerpc(vnode_t invp, off_t inoff, vnode_t outvp, off_t outoff,
+ size_t *lenp, bool toeof, nfsv4stateid_t *instateidp,
+ nfsv4stateid_t *outstateidp, struct nfsvattr *innap, int *inattrflagp,
+ struct nfsvattr *outnap, int *outattrflagp, struct ucred *cred,
+ NFSPROC_T *p)
+{
+ uint32_t *tl, *opcntp;
+ int error;
+ struct nfsrv_descript nfsd;
+ struct nfsrv_descript *nd = &nfsd;
+ struct nfsmount *nmp;
+ nfsattrbit_t attrbits;
+ struct vattr va;
+ uint64_t len;
+
+ nmp = VFSTONFS(invp->v_mount);
+ *inattrflagp = *outattrflagp = 0;
+ len = *lenp;
+ if (len == 0)
+ return (0);
+ if (toeof)
+ len = 0;
+ nfscl_reqstart(nd, NFSPROC_CLONE, nmp, VTONFS(invp)->n_fhp->nfh_fh,
+ VTONFS(invp)->n_fhp->nfh_len, &opcntp, NULL, 0, 0, cred);
+ /*
+ * First do a Setattr of atime to the server's clock
+ * time. The FreeBSD "collective" was of the opinion
+ * that setting atime was necessary for this syscall.
+ * Do the Setattr before the Clone, so that it can be
+ * handled well if the server replies NFSERR_DELAY to
+ * the Setattr operation.
+ */
+ if ((nmp->nm_mountp->mnt_flag & MNT_NOATIME) == 0) {
+ NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+ *tl = txdr_unsigned(NFSV4OP_SETATTR);
+ nfsm_stateidtom(nd, instateidp, NFSSTATEID_PUTSTATEID);
+ VATTR_NULL(&va);
+ va.va_atime.tv_sec = va.va_atime.tv_nsec = 0;
+ va.va_vaflags = VA_UTIMES_NULL;
+ nfscl_fillsattr(nd, &va, invp, 0, 0);
+ /* Bump opcnt from 7 to 8. */
+ *opcntp = txdr_unsigned(8);
+ }
+
+ /* Now Getattr the invp attributes. */
+ NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+ *tl = txdr_unsigned(NFSV4OP_GETATTR);
+ NFSGETATTR_ATTRBIT(&attrbits);
+ nfsrv_putattrbit(nd, &attrbits);
+
+ /* Set outvp. */
+ NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+ *tl = txdr_unsigned(NFSV4OP_PUTFH);
+ (void)nfsm_fhtom(nmp, nd, VTONFS(outvp)->n_fhp->nfh_fh,
+ VTONFS(outvp)->n_fhp->nfh_len, 0);
+
+ /* Do the Clone. */
+ NFSM_BUILD(tl, uint32_t *, NFSX_UNSIGNED);
+ *tl = txdr_unsigned(NFSV4OP_CLONE);
+ nfsm_stateidtom(nd, instateidp, NFSSTATEID_PUTSTATEID);
+ nfsm_stateidtom(nd, outstateidp, NFSSTATEID_PUTSTATEID);
+ NFSM_BUILD(tl, uint32_t *, 3 * NFSX_HYPER + NFSX_UNSIGNED);
+ txdr_hyper(inoff, tl); tl += 2;
+ txdr_hyper(outoff, tl); tl += 2;
+ txdr_hyper(len, tl); tl += 2;
+
+ /* Get the outvp attributes. */
+ *tl = txdr_unsigned(NFSV4OP_GETATTR);
+ NFSWRITEGETATTR_ATTRBIT(&attrbits);
+ nfsrv_putattrbit(nd, &attrbits);
+
+ error = nfscl_request(nd, invp, p, cred);
+ if (error != 0)
+ return (error);
+ /* Skip over the Setattr reply. */
+ if ((nd->nd_flag & ND_NOMOREDATA) == 0 &&
+ (nmp->nm_mountp->mnt_flag & MNT_NOATIME) == 0) {
+ NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+ if (*(tl + 1) == 0) {
+ error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL);
+ if (error != 0)
+ goto nfsmout;
+ } else
+ nd->nd_flag |= ND_NOMOREDATA;
+ }
+ if ((nd->nd_flag & ND_NOMOREDATA) == 0) {
+ /* Get the input file's attributes. */
+ NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+ if (*(tl + 1) == 0) {
+ error = nfsm_loadattr(nd, innap);
+ if (error != 0)
+ goto nfsmout;
+ *inattrflagp = 1;
+ } else
+ nd->nd_flag |= ND_NOMOREDATA;
+ }
+ /* Skip over return stat for PutFH. */
+ if ((nd->nd_flag & ND_NOMOREDATA) == 0) {
+ NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+ if (*++tl != 0)
+ nd->nd_flag |= ND_NOMOREDATA;
+ }
+ /* Skip over return stat for Clone. */
+ if ((nd->nd_flag & ND_NOMOREDATA) == 0)
+ NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+ if (nd->nd_repstat == 0) {
+ NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
+ error = nfsm_loadattr(nd, outnap);
+ if (error == 0)
+ *outattrflagp = NFS_LATTR_NOSHRINK;
+ } else {
+ *lenp = 0;
+ }
+ if (error == 0)
+ error = nd->nd_repstat;
+nfsmout:
+ m_freem(nd->nd_mrep);
+ return (error);
+}
+
+/*
* Seek operation.
*/
int
@@ -9724,13 +9925,13 @@ nfscl_statfs(struct vnode *vp, struct ucred *cred, NFSPROC_T *td)
struct nfsstatfs sb;
struct mount *mp;
struct nfsmount *nmp;
- uint32_t lease;
+ uint32_t clone_blksize, lease;
int attrflag, error;
mp = vp->v_mount;
nmp = VFSTONFS(mp);
- error = nfsrpc_statfs(vp, &sb, &fs, &lease, cred, td, &nfsva,
- &attrflag);
+ error = nfsrpc_statfs(vp, &sb, &fs, &lease, &clone_blksize, cred, td,
+ &nfsva, &attrflag);
if (attrflag != 0)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
if (error == 0) {
@@ -9739,7 +9940,7 @@ nfscl_statfs(struct vnode *vp, struct ucred *cred, NFSPROC_T *td)
nmp->nm_clp->nfsc_renew = NFSCL_RENEW(lease);
NFSUNLOCKCLSTATE();
mtx_lock(&nmp->nm_mtx);
- nfscl_loadfsinfo(nmp, &fs);
+ nfscl_loadfsinfo(nmp, &fs, clone_blksize);
nfscl_loadsbinfo(nmp, &sb, &mp->mnt_stat);
mp->mnt_stat.f_iosize = newnfs_iosize(nmp);
mtx_unlock(&nmp->nm_mtx);
diff --git a/sys/fs/nfsclient/nfs_clstate.c b/sys/fs/nfsclient/nfs_clstate.c
index 99a781640c53..aa9d01fc4632 100644
--- a/sys/fs/nfsclient/nfs_clstate.c
+++ b/sys/fs/nfsclient/nfs_clstate.c
@@ -3701,7 +3701,7 @@ nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
if (!error)
(void) nfsv4_fillattr(nd, NULL, NULL, NULL, &va,
NULL, 0, &rattrbits, NULL, p, 0, 0, 0, 0,
- (uint64_t)0, NULL, false, false, false);
+ (uint64_t)0, NULL, false, false, false, 0);
break;
case NFSV4OP_CBRECALL:
NFSCL_DEBUG(4, "cbrecall\n");
diff --git a/sys/fs/nfsclient/nfs_clvfsops.c b/sys/fs/nfsclient/nfs_clvfsops.c
index 0bd05c03885b..5ea7eab07632 100644
--- a/sys/fs/nfsclient/nfs_clvfsops.c
+++ b/sys/fs/nfsclient/nfs_clvfsops.c
@@ -292,8 +292,10 @@ nfs_statfs(struct mount *mp, struct statfs *sbp)
int error = 0, attrflag, gotfsinfo = 0, ret;
struct nfsnode *np;
char *fakefh;
+ uint32_t clone_blksize;
td = curthread;
+ clone_blksize = 0;
error = vfs_busy(mp, MBF_NOWAIT);
if (error)
@@ -337,8 +339,8 @@ nfs_statfs(struct mount *mp, struct statfs *sbp)
} else
mtx_unlock(&nmp->nm_mtx);
if (!error)
- error = nfsrpc_statfs(vp, &sb, &fs, NULL, td->td_ucred, td,
- &nfsva, &attrflag);
+ error = nfsrpc_statfs(vp, &sb, &fs, NULL, &clone_blksize,
+ td->td_ucred, td, &nfsva, &attrflag);
if ((nmp->nm_privflag & NFSMNTP_FAKEROOTFH) != 0 &&
error == NFSERR_WRONGSEC) {
/* Cannot get new stats, so return what is in mnt_stat. */
@@ -375,7 +377,7 @@ nfs_statfs(struct mount *mp, struct statfs *sbp)
if (!error) {
mtx_lock(&nmp->nm_mtx);
if (gotfsinfo || (nmp->nm_flag & NFSMNT_NFSV4))
- nfscl_loadfsinfo(nmp, &fs);
+ nfscl_loadfsinfo(nmp, &fs, clone_blksize);
nfscl_loadsbinfo(nmp, &sb, sbp);
sbp->f_iosize = newnfs_iosize(nmp);
mtx_unlock(&nmp->nm_mtx);
@@ -408,7 +410,7 @@ ncl_fsinfo(struct nfsmount *nmp, struct vnode *vp, struct ucred *cred,
if (attrflag)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
mtx_lock(&nmp->nm_mtx);
- nfscl_loadfsinfo(nmp, &fs);
+ nfscl_loadfsinfo(nmp, &fs, 0);
mtx_unlock(&nmp->nm_mtx);
}
return (error);
diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c
index fa451887e73e..e9ae91e046e7 100644
--- a/sys/fs/nfsclient/nfs_clvnops.c
+++ b/sys/fs/nfsclient/nfs_clvnops.c
@@ -1026,7 +1026,7 @@ nfs_getattr(struct vop_getattr_args *ap)
* cached attributes should be ignored.
*/
if (nmp->nm_fhsize > 0 && ncl_getattrcache(vp, &vattr) == 0) {
- ncl_copy_vattr(vap, &vattr);
+ ncl_copy_vattr(vp, vap, &vattr);
/*
* Get the local modify time for the case of a write
@@ -1284,7 +1284,7 @@ nfs_lookup(struct vop_lookup_args *ap)
bool is_nameddir, needs_nameddir, opennamed;
dattrflag = 0;
- *vpp = NULLVP;
+ *vpp = NULL;
nmp = VFSTONFS(mp);
opennamed = (flags & (OPENNAMED | ISLASTCN)) == (OPENNAMED | ISLASTCN);
if (opennamed && (!NFSHASNFSV4(nmp) || !NFSHASNFSV4N(nmp)))
@@ -1309,7 +1309,7 @@ nfs_lookup(struct vop_lookup_args *ap)
/*
* If the named attribute directory is needed, acquire it now.
*/
- newvp = NULLVP;
+ newvp = NULL;
if (needs_nameddir) {
KASSERT(np->n_v4 == NULL, ("nfs_lookup: O_NAMEDATTR when"
" n_v4 not NULL"));
@@ -1322,10 +1322,10 @@ nfs_lookup(struct vop_lookup_args *ap)
}
dvp = newvp;
np = VTONFS(dvp);
- newvp = NULLVP;
+ newvp = NULL;
} else if (opennamed && cnp->cn_namelen == 1 &&
*cnp->cn_nameptr == '.') {
- VREF(dvp);
+ vref(dvp);
*vpp = dvp;
return (0);
}
@@ -1399,7 +1399,7 @@ nfs_lookup(struct vop_lookup_args *ap)
vput(newvp);
else
vrele(newvp);
- *vpp = NULLVP;
+ *vpp = NULL;
} else if (error == ENOENT) {
if (VN_IS_DOOMED(dvp))
return (ENOENT);
@@ -1450,7 +1450,7 @@ nfs_lookup(struct vop_lookup_args *ap)
NFSUNLOCKMNT(nmp);
#endif
- newvp = NULLVP;
+ newvp = NULL;
NFSINCRGLOBAL(nfsstatsv1.lookupcache_misses);
nanouptime(&ts);
error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
@@ -1464,9 +1464,9 @@ nfs_lookup(struct vop_lookup_args *ap)
}
handle_error:
if (error) {
- if (newvp != NULLVP) {
+ if (newvp != NULL) {
vput(newvp);
- *vpp = NULLVP;
+ *vpp = NULL;
}
if (error != ENOENT) {
@@ -1587,7 +1587,7 @@ handle_error:
0, 1);
} else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
free(nfhp, M_NFSFH);
- VREF(dvp);
+ vref(dvp);
newvp = dvp;
if (attrflag)
(void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
@@ -1782,7 +1782,7 @@ nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
int error = 0, attrflag, dattrflag;
u_int32_t rdev;
- if (vap->va_type == VCHR || vap->va_type == VBLK)
+ if (VATTR_ISDEV(vap))
rdev = vap->va_rdev;
else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
rdev = 0xffffffff;
@@ -2863,7 +2863,7 @@ nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
M_NEWNFSREQ, M_WAITOK);
sp->s_cred = crhold(cnp->cn_cred);
sp->s_dvp = dvp;
- VREF(dvp);
+ vref(dvp);
/*
* Fudge together a funny name.
@@ -2961,7 +2961,7 @@ nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred,
newvp = NFSTOV(np);
} else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) {
free(nfhp, M_NFSFH);
- VREF(dvp);
+ vref(dvp);
newvp = dvp;
} else {
cn.cn_nameptr = name;
@@ -3474,7 +3474,7 @@ nfs_advlock(struct vop_advlock_args *ap)
u_quad_t size;
struct nfsmount *nmp;
- error = NFSVOPLOCK(vp, LK_SHARED);
+ error = NFSVOPLOCK(vp, LK_EXCLUSIVE);
if (error != 0)
return (EBADF);
nmp = VFSTONFS(vp->v_mount);
@@ -3511,11 +3511,6 @@ nfs_advlock(struct vop_advlock_args *ap)
cred = p->p_ucred;
else
cred = td->td_ucred;
- NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
- if (VN_IS_DOOMED(vp)) {
- error = EBADF;
- goto out;
- }
/*
* If this is unlocking a write locked region, flush and
@@ -4027,31 +4022,51 @@ nfs_copy_file_range(struct vop_copy_file_range_args *ap)
struct vattr va, *vap;
struct uio io;
struct nfsmount *nmp;
+ struct nfsnode *np;
size_t len, len2;
ssize_t r;
int error, inattrflag, outattrflag, ret, ret2, invp_lock;
off_t inoff, outoff;
- bool consecutive, must_commit, tryoutcred;
+ bool consecutive, must_commit, onevp, toeof, tryclone, tryoutcred;
+ bool mustclone;
/*
* NFSv4.2 Copy is not permitted for infile == outfile.
+ * The NFSv4.2 Clone operation does work on non-overlapping
+ * byte ranges in the same file, but only if offsets
+ * (and len if not to EOF) are aligned properly.
* TODO: copy_file_range() between multiple NFS mountpoints
+ * --> This is not possible now, since each mount appears to
+ * the NFSv4.n server as a separate client.
*/
- if (invp == outvp || invp->v_mount != outvp->v_mount) {
+ if ((invp == outvp && (ap->a_flags & COPY_FILE_RANGE_CLONE) == 0) ||
+ (invp != outvp && invp->v_mount != outvp->v_mount)) {
generic_copy:
return (ENOSYS);
}
-
- invp_lock = LK_SHARED;
+ if (invp == outvp) {
+ onevp = true;
+ invp_lock = LK_EXCLUSIVE;
+ } else {
+ onevp = false;
+ invp_lock = LK_SHARED;
+ }
+ mustclone = false;
+ if (onevp || (ap->a_flags & COPY_FILE_RANGE_CLONE) != 0)
+ mustclone = true;
relock:
+ inoff = *ap->a_inoffp;
+ outoff = *ap->a_outoffp;
- /* Lock both vnodes, avoiding risk of deadlock. */
+ /* Lock vnode(s), avoiding risk of deadlock. */
do {
mp = NULL;
error = vn_start_write(outvp, &mp, V_WAIT);
if (error == 0) {
error = vn_lock(outvp, LK_EXCLUSIVE);
if (error == 0) {
+ if (onevp)
+ break;
error = vn_lock(invp, invp_lock | LK_NOWAIT);
if (error == 0)
break;
@@ -4071,16 +4086,24 @@ relock:
return (error);
/*
- * More reasons to avoid nfs copy: not NFSv4.2, or explicitly
- * disabled.
+ * More reasons to avoid nfs copy/clone: not NFSv4.2, explicitly
+ * disabled or requires cloning and unable to clone.
+ * Only clone if the clone_blksize attribute is supported
+ * and the clone_blksize is greater than 0.
+ * Alignment of offsets and length will be checked later.
*/
nmp = VFSTONFS(invp->v_mount);
+ np = VTONFS(invp);
mtx_lock(&nmp->nm_mtx);
+ if ((nmp->nm_privflag & NFSMNTP_NOCOPY) != 0)
+ mustclone = true;
if (!NFSHASNFSV4(nmp) || nmp->nm_minorvers < NFSV42_MINORVERSION ||
- (nmp->nm_privflag & NFSMNTP_NOCOPY) != 0) {
+ (mustclone && (!NFSISSET_ATTRBIT(&np->n_vattr.na_suppattr,
+ NFSATTRBIT_CLONEBLKSIZE) || nmp->nm_cloneblksize == 0))) {
mtx_unlock(&nmp->nm_mtx);
VOP_UNLOCK(invp);
- VOP_UNLOCK(outvp);
+ if (!onevp)
+ VOP_UNLOCK(outvp); /* For onevp, same as invp. */
if (mp != NULL)
vn_finished_write(mp);
goto generic_copy;
@@ -4111,6 +4134,8 @@ relock:
invp_obj = invp->v_object;
if (invp_obj != NULL && vm_object_mightbedirty(invp_obj)) {
if (invp_lock != LK_EXCLUSIVE) {
+ KASSERT(!onevp, ("nfs_copy_file_range: "
+ "invp_lock LK_SHARED for onevp"));
invp_lock = LK_EXCLUSIVE;
VOP_UNLOCK(invp);
VOP_UNLOCK(outvp);
@@ -4134,10 +4159,10 @@ relock:
else
consecutive = false;
mtx_unlock(&nmp->nm_mtx);
- inoff = *ap->a_inoffp;
- outoff = *ap->a_outoffp;
tryoutcred = true;
must_commit = false;
+ toeof = false;
+
if (error == 0) {
vap = &VTONFS(invp)->n_vattr.na_vattr;
error = VOP_GETATTR(invp, vap, ap->a_incred);
@@ -4169,29 +4194,63 @@ relock:
if (error == 0 && ret != 0)
error = ret;
}
- } else if (inoff + len > vap->va_size)
+ } else if (inoff + len >= vap->va_size) {
+ toeof = true;
*ap->a_lenp = len = vap->va_size - inoff;
+ }
} else
error = 0;
}
/*
+ * For cloning, the offsets must be clone blksize aligned and
+ * the len must be blksize aligned unless it goes to EOF on
+ * the input file.
+ */
+ tryclone = false;
+ if (len > 0) {
+ if (error == 0 && NFSISSET_ATTRBIT(&np->n_vattr.na_suppattr,
+ NFSATTRBIT_CLONEBLKSIZE) && nmp->nm_cloneblksize != 0 &&
+ (inoff % nmp->nm_cloneblksize) == 0 &&
+ (outoff % nmp->nm_cloneblksize) == 0 &&
+ (toeof || (len % nmp->nm_cloneblksize) == 0))
+ tryclone = true;
+ else if (mustclone)
+ error = ENOSYS;
+ }
+
+ /*
* len will be set to 0 upon a successful Copy RPC.
- * As such, this only loops when the Copy RPC needs to be retried.
+ * As such, this only loops when the Copy/Clone RPC needs to be retried.
*/
while (len > 0 && error == 0) {
inattrflag = outattrflag = 0;
len2 = len;
- if (tryoutcred)
- error = nfsrpc_copy_file_range(invp, ap->a_inoffp,
- outvp, ap->a_outoffp, &len2, ap->a_flags,
- &inattrflag, &innfsva, &outattrflag, &outnfsva,
- ap->a_outcred, consecutive, &must_commit);
- else
- error = nfsrpc_copy_file_range(invp, ap->a_inoffp,
- outvp, ap->a_outoffp, &len2, ap->a_flags,
- &inattrflag, &innfsva, &outattrflag, &outnfsva,
- ap->a_incred, consecutive, &must_commit);
+ if (tryclone) {
+ if (tryoutcred)
+ error = nfsrpc_clone(invp, ap->a_inoffp, outvp,
+ ap->a_outoffp, &len2, toeof, &inattrflag,
+ &innfsva, &outattrflag, &outnfsva,
+ ap->a_outcred);
+ else
+ error = nfsrpc_clone(invp, ap->a_inoffp, outvp,
+ ap->a_outoffp, &len2, toeof, &inattrflag,
+ &innfsva, &outattrflag, &outnfsva,
+ ap->a_incred);
+ } else {
+ if (tryoutcred)
+ error = nfsrpc_copy_file_range(invp,
+ ap->a_inoffp, outvp, ap->a_outoffp, &len2,
+ ap->a_flags, &inattrflag, &innfsva,
+ &outattrflag, &outnfsva,
+ ap->a_outcred, consecutive, &must_commit);
+ else
+ error = nfsrpc_copy_file_range(invp,
+ ap->a_inoffp, outvp, ap->a_outoffp, &len2,
+ ap->a_flags, &inattrflag, &innfsva,
+ &outattrflag, &outnfsva,
+ ap->a_incred, consecutive, &must_commit);
+ }
if (inattrflag != 0)
ret = nfscl_loadattrcache(&invp, &innfsva, NULL, 0, 1);
if (outattrflag != 0)
@@ -4230,6 +4289,13 @@ relock:
/* Try again with incred. */
tryoutcred = false;
error = 0;
+ } else if (tryclone && error != 0) {
+ if (mustclone) {
+ error = ENOSYS;
+ } else {
+ tryclone = false;
+ error = 0;
+ }
}
if (error == NFSERR_STALEWRITEVERF) {
/*
@@ -4243,11 +4309,12 @@ relock:
}
}
VOP_UNLOCK(invp);
- VOP_UNLOCK(outvp);
+ if (!onevp)
+ VOP_UNLOCK(outvp); /* For onevp, same as invp. */
if (mp != NULL)
vn_finished_write(mp);
if (error == NFSERR_NOTSUPP || error == NFSERR_OFFLOADNOREQS ||
- error == NFSERR_ACCES) {
+ error == NFSERR_ACCES || error == ENOSYS) {
/*
* Unlike the NFSv4.2 Copy, vn_generic_copy_file_range() can
* use a_incred for the read and a_outcred for the write, so
@@ -4255,7 +4322,7 @@ relock:
* For NFSERR_NOTSUPP and NFSERR_OFFLOADNOREQS, the Copy can
* never succeed, so disable it.
*/
- if (error != NFSERR_ACCES) {
+ if (error != NFSERR_ACCES && error != ENOSYS) {
/* Can never do Copy on this mount. */
mtx_lock(&nmp->nm_mtx);
nmp->nm_privflag |= NFSMNTP_NOCOPY;
@@ -4596,6 +4663,7 @@ nfs_pathconf(struct vop_pathconf_args *ap)
struct nfsmount *nmp;
struct thread *td = curthread;
off_t off;
+ uint32_t clone_blksize;
bool eof, has_namedattr, named_enabled;
int attrflag, error;
struct nfsnode *np;
@@ -4604,19 +4672,22 @@ nfs_pathconf(struct vop_pathconf_args *ap)
np = VTONFS(vp);
named_enabled = false;
has_namedattr = false;
+ clone_blksize = 0;
if ((NFS_ISV34(vp) && (ap->a_name == _PC_LINK_MAX ||
ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED ||
ap->a_name == _PC_NO_TRUNC)) ||
(NFS_ISV4(vp) && (ap->a_name == _PC_ACL_NFS4 ||
- ap->a_name == _PC_HAS_NAMEDATTR))) {
+ ap->a_name == _PC_HAS_NAMEDATTR ||
+ ap->a_name == _PC_CLONE_BLKSIZE))) {
/*
* Since only the above 4 a_names are returned by the NFSv3
* Pathconf RPC, there is no point in doing it for others.
* For NFSv4, the Pathconf RPC (actually a Getattr Op.) can
- * be used for _PC_ACL_NFS4 and _PC_HAS_NAMEDATTR as well.
+ * be used for _PC_ACL_NFS4, _PC_HAS_NAMEDATTR and
+ * _PC_CLONE_BLKSIZE as well.
*/
- error = nfsrpc_pathconf(vp, &pc, &has_namedattr, td->td_ucred,
- td, &nfsva, &attrflag);
+ error = nfsrpc_pathconf(vp, &pc, &has_namedattr, &clone_blksize,
+ td->td_ucred, td, &nfsva, &attrflag);
if (attrflag != 0)
(void) nfscl_loadattrcache(&vp, &nfsva, NULL, 0, 1);
if (error != 0)
@@ -4771,6 +4842,9 @@ nfs_pathconf(struct vop_pathconf_args *ap)
else
*ap->a_retval = 0;
break;
+ case _PC_CLONE_BLKSIZE:
+ *ap->a_retval = clone_blksize;
+ break;
default:
error = vop_stdpathconf(ap);
diff --git a/sys/fs/nfsclient/nfsmount.h b/sys/fs/nfsclient/nfsmount.h
index 37b84a015dab..ef876dd30e59 100644
--- a/sys/fs/nfsclient/nfsmount.h
+++ b/sys/fs/nfsclient/nfsmount.h
@@ -87,6 +87,7 @@ struct nfsmount {
/* unclipped, wraps to 0 */
struct __rpc_client *nm_aconn[NFS_MAXNCONN - 1]; /* Additional nconn */
/* Locked via nm_sockreq.nr_mtx */
+ uint32_t nm_cloneblksize; /* Block cloning alignment */
u_int16_t nm_krbnamelen; /* Krb5 host principal, if any */
u_int16_t nm_dirpathlen; /* and mount dirpath, for V4 */
u_int16_t nm_srvkrbnamelen; /* and the server's target name */
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index 8c427c66c156..eb6ba285f8fe 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -675,7 +675,7 @@ nfsvno_namei(struct nfsrv_descript *nd, struct nameidata *ndp,
}
if (islocked)
NFSVOPUNLOCK(dp);
- VREF(dp);
+ vref(dp);
*retdirp = dp;
if (NFSVNO_EXRDONLY(exp))
cnp->cn_flags |= RDONLY;
@@ -697,7 +697,7 @@ nfsvno_namei(struct nfsrv_descript *nd, struct nameidata *ndp,
goto out;
}
dp = rootvnode;
- VREF(dp);
+ vref(dp);
}
} else if ((nfsrv_enable_crossmntpt == 0 && NFSVNO_EXPORTED(exp)) ||
(nd->nd_flag & ND_NFSV4) == 0) {
@@ -814,7 +814,7 @@ nfsvno_namei(struct nfsrv_descript *nd, struct nameidata *ndp,
if (cnp->cn_pnbuf[0] == '/') {
vrele(ndp->ni_dvp);
ndp->ni_dvp = ndp->ni_rootdir;
- VREF(ndp->ni_dvp);
+ vref(ndp->ni_dvp);
}
ndp->ni_startdir = ndp->ni_dvp;
ndp->ni_dvp = NULL;
@@ -2113,7 +2113,8 @@ nfsvno_fillattr(struct nfsrv_descript *nd, struct mount *mp, struct vnode *vp,
struct nfsvattr *nvap, fhandle_t *fhp, int rderror, nfsattrbit_t *attrbitp,
struct ucred *cred, struct thread *p, int isdgram, int reterr,
int supports_nfsv4acls, int at_root, uint64_t mounted_on_fileno,
- bool xattrsupp, bool has_hiddensystem, bool has_namedattr)
+ bool xattrsupp, bool has_hiddensystem, bool has_namedattr,
+ uint32_t clone_blksize)
{
struct statfs *sf;
int error;
@@ -2130,9 +2131,11 @@ nfsvno_fillattr(struct nfsrv_descript *nd, struct mount *mp, struct vnode *vp,
sf = NULL;
}
}
+
error = nfsv4_fillattr(nd, mp, vp, NULL, &nvap->na_vattr, fhp, rderror,
attrbitp, cred, p, isdgram, reterr, supports_nfsv4acls, at_root,
- mounted_on_fileno, sf, xattrsupp, has_hiddensystem, has_namedattr);
+ mounted_on_fileno, sf, xattrsupp, has_hiddensystem, has_namedattr,
+ clone_blksize);
free(sf, M_TEMP);
NFSEXITCODE2(0, nd);
return (error);
@@ -2441,7 +2444,7 @@ nfsrvd_readdirplus(struct nfsrv_descript *nd, int isdgram,
struct vnode *vp, struct nfsexstuff *exp)
{
struct dirent *dp;
- u_int32_t *tl;
+ uint32_t clone_blksize, *tl;
int dirlen;
char *cpos, *cend, *rbuf;
struct vnode *nvp;
@@ -2604,6 +2607,7 @@ again:
* rpc reply
*/
if (siz == 0) {
+ateof:
vput(vp);
if (nd->nd_flag & ND_NFSV3)
nfsrv_postopattr(nd, getret, &at);
@@ -2645,6 +2649,8 @@ again:
ncookies--;
}
if (cpos >= cend || ncookies == 0) {
+ if (eofflag != 0)
+ goto ateof;
siz = fullsiz;
toff = off;
goto again;
@@ -2943,6 +2949,7 @@ again:
xattrsupp = false;
has_hiddensystem = false;
has_namedattr = false;
+ clone_blksize = 0;
if (nvp != NULL) {
supports_nfsv4acls =
nfs_supportsnfsv4acls(nvp);
@@ -2966,6 +2973,11 @@ again:
&pathval) != 0)
pathval = 0;
has_namedattr = pathval > 0;
+ pathval = 0;
+ if (VOP_PATHCONF(nvp, _PC_CLONE_BLKSIZE,
+ &pathval) != 0)
+ pathval = 0;
+ clone_blksize = pathval;
NFSVOPUNLOCK(nvp);
} else
supports_nfsv4acls = 0;
@@ -2986,14 +2998,16 @@ again:
nd->nd_cred, p, isdgram, 0,
supports_nfsv4acls, at_root,
mounted_on_fileno, xattrsupp,
- has_hiddensystem, has_namedattr);
+ has_hiddensystem, has_namedattr,
+ clone_blksize);
} else {
dirlen += nfsvno_fillattr(nd, new_mp,
nvp, nvap, &nfh, r, &attrbits,
nd->nd_cred, p, isdgram, 0,
supports_nfsv4acls, at_root,
mounted_on_fileno, xattrsupp,
- has_hiddensystem, has_namedattr);
+ has_hiddensystem, has_namedattr,
+ clone_blksize);
}
if (nvp != NULL)
vrele(nvp);
@@ -3464,11 +3478,6 @@ nfsd_excred(struct nfsrv_descript *nd, struct nfsexstuff *exp,
(nd->nd_flag & ND_AUTHNONE) != 0) {
nd->nd_cred->cr_uid = credanon->cr_uid;
nd->nd_cred->cr_gid = credanon->cr_gid;
- /*
- * 'credanon' is already a 'struct ucred' that was built
- * internally with calls to crsetgroups_and_egid(), so
- * we don't need a fallback here.
- */
crsetgroups(nd->nd_cred, credanon->cr_ngroups,
credanon->cr_groups);
} else if ((nd->nd_flag & ND_GSS) == 0) {
@@ -3740,6 +3749,7 @@ nfsrv_v4rootexport(void *argp, struct ucred *cred, struct thread *p)
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, nfsexargp->fspec);
if ((error = namei(&nd)) != 0)
goto out;
+ NDFREE_PNBUF(&nd);
error = nfsvno_getfh(nd.ni_vp, &fh, p);
vrele(nd.ni_vp);
if (!error) {
@@ -5690,7 +5700,8 @@ nfsrv_writedsdorpc(struct nfsmount *nmp, fhandle_t *fhp, off_t off, int len,
if ((nd->nd_flag & (ND_NOMOREDATA | ND_NFSV4 | ND_V4WCCATTR)) ==
(ND_NFSV4 | ND_V4WCCATTR)) {
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, NULL, NULL,
- NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
NFSD_DEBUG(4, "nfsrv_writedsdorpc: wcc attr=%d\n", error);
if (error != 0)
goto nfsmout;
@@ -5721,7 +5732,8 @@ nfsrv_writedsdorpc(struct nfsmount *nmp, fhandle_t *fhp, off_t off, int len,
if (error == 0) {
NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, NULL, NULL,
- NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
}
NFSD_DEBUG(4, "nfsrv_writedsdorpc: aft loadattr=%d\n", error);
nfsmout:
@@ -5887,7 +5899,8 @@ nfsrv_allocatedsdorpc(struct nfsmount *nmp, fhandle_t *fhp, off_t off,
if (nd->nd_repstat == 0) {
NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, NULL, NULL,
- NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
} else
error = nd->nd_repstat;
NFSD_DEBUG(4, "nfsrv_allocatedsdorpc: aft loadattr=%d\n", error);
@@ -6054,7 +6067,8 @@ nfsrv_deallocatedsdorpc(struct nfsmount *nmp, fhandle_t *fhp, off_t off,
if ((nd->nd_flag & (ND_NOMOREDATA | ND_NFSV4 | ND_V4WCCATTR)) ==
(ND_NFSV4 | ND_V4WCCATTR)) {
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, NULL, NULL,
- NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
NFSD_DEBUG(4, "nfsrv_deallocatedsdorpc: wcc attr=%d\n", error);
if (error != 0)
goto nfsmout;
@@ -6068,7 +6082,8 @@ nfsrv_deallocatedsdorpc(struct nfsmount *nmp, fhandle_t *fhp, off_t off,
if (nd->nd_repstat == 0) {
NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, NULL, NULL,
- NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
} else
error = nd->nd_repstat;
NFSD_DEBUG(4, "nfsrv_deallocatedsdorpc: aft loadattr=%d\n", error);
@@ -6216,7 +6231,8 @@ nfsrv_setattrdsdorpc(fhandle_t *fhp, struct ucred *cred, NFSPROC_T *p,
if ((nd->nd_flag & (ND_NOMOREDATA | ND_NFSV4 | ND_V4WCCATTR)) ==
(ND_NFSV4 | ND_V4WCCATTR)) {
error = nfsv4_loadattr(nd, NULL, dsnap, NULL, NULL, 0, NULL,
- NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL);
NFSD_DEBUG(4, "nfsrv_setattrdsdorpc: wcc attr=%d\n", error);
if (error != 0)
goto nfsmout;
@@ -6241,7 +6257,7 @@ nfsrv_setattrdsdorpc(fhandle_t *fhp, struct ucred *cred, NFSPROC_T *p,
NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_UNSIGNED);
error = nfsv4_loadattr(nd, NULL, dsnap, NULL, NULL, 0, NULL,
NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL,
- NULL);
+ NULL, NULL);
}
NFSD_DEBUG(4, "nfsrv_setattrdsdorpc: aft setattr loadattr=%d\n", error);
nfsmout:
@@ -6386,7 +6402,7 @@ nfsrv_setacldsdorpc(fhandle_t *fhp, struct ucred *cred, NFSPROC_T *p,
* the same type (VREG).
*/
nfsv4_fillattr(nd, NULL, vp, aclp, NULL, NULL, 0, &attrbits, NULL,
- NULL, 0, 0, 0, 0, 0, NULL, false, false, false);
+ NULL, 0, 0, 0, 0, 0, NULL, false, false, false, 0);
error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred,
NFS_PROG, NFS_VER4, NULL, 1, NULL, NULL);
if (error != 0) {
@@ -6530,7 +6546,7 @@ nfsrv_getattrdsrpc(fhandle_t *fhp, struct ucred *cred, NFSPROC_T *p,
if (nd->nd_repstat == 0) {
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
/*
* We can only save the updated values in the extended
* attribute if the vp is exclusively locked.
diff --git a/sys/fs/nfsserver/nfs_nfsdserv.c b/sys/fs/nfsserver/nfs_nfsdserv.c
index 9eebcda548c6..921ea4887af1 100644
--- a/sys/fs/nfsserver/nfs_nfsdserv.c
+++ b/sys/fs/nfsserver/nfs_nfsdserv.c
@@ -253,6 +253,7 @@ nfsrvd_getattr(struct nfsrv_descript *nd, int isdgram,
size_t atsiz;
long pathval;
bool has_hiddensystem, has_namedattr, xattrsupp;
+ uint32_t clone_blksize;
if (nd->nd_repstat)
goto out;
@@ -330,13 +331,18 @@ nfsrvd_getattr(struct nfsrv_descript *nd, int isdgram,
&pathval) != 0)
pathval = 0;
has_namedattr = pathval > 0;
+ pathval = 0;
+ if (VOP_PATHCONF(vp, _PC_CLONE_BLKSIZE,
+ &pathval) != 0)
+ pathval = 0;
+ clone_blksize = pathval;
mp = vp->v_mount;
if (nfsrv_enable_crossmntpt != 0 &&
vp->v_type == VDIR &&
(vp->v_vflag & VV_ROOT) != 0 &&
vp != rootvnode) {
tvp = mp->mnt_vnodecovered;
- VREF(tvp);
+ vref(tvp);
at_root = 1;
} else
at_root = 0;
@@ -365,7 +371,7 @@ nfsrvd_getattr(struct nfsrv_descript *nd, int isdgram,
isdgram, 1, supports_nfsv4acls,
at_root, mounted_on_fileno,
xattrsupp, has_hiddensystem,
- has_namedattr);
+ has_namedattr, clone_blksize);
vfs_unbusy(mp);
}
vrele(vp);
@@ -1760,7 +1766,7 @@ nfsrvd_rename(struct nfsrv_descript *nd, int isdgram,
/* If this is the same file handle, just VREF() the vnode. */
if (!NFSBCMP(tfh.nfsrvfh_data, &fh, NFSX_MYFH)) {
- VREF(dp);
+ vref(dp);
tdp = dp;
tnes = *exp;
tdirfor_ret = nfsvno_getattr(tdp, &tdirfor, nd, p, 1,
@@ -4347,7 +4353,7 @@ nfsrvd_verify(struct nfsrv_descript *nd, int isdgram,
if (!nd->nd_repstat) {
nfsvno_getfs(&fs, isdgram);
error = nfsv4_loadattr(nd, vp, &nva, NULL, &fh, fhsize, NULL,
- sf, NULL, &fs, NULL, 1, &ret, NULL, NULL, NULL, p,
+ sf, NULL, &fs, NULL, 1, &ret, NULL, NULL, NULL, NULL, p,
nd->nd_cred);
if (!error) {
if (nd->nd_procnum == NFSV4OP_NVERIFY) {
@@ -6011,6 +6017,212 @@ nfsmout:
}
/*
+ * nfs clone service
+ */
+int
+nfsrvd_clone(struct nfsrv_descript *nd, __unused int isdgram,
+ vnode_t vp, vnode_t tovp, struct nfsexstuff *exp, struct nfsexstuff *toexp)
+{
+ uint32_t *tl;
+ struct nfsvattr at;
+ int error = 0, ret;
+ off_t inoff, outoff;
+ uint64_t len;
+ size_t xfer;
+ struct nfsstate inst, outst, *instp = &inst, *outstp = &outst;
+ struct nfslock inlo, outlo, *inlop = &inlo, *outlop = &outlo;
+ nfsquad_t clientid;
+ nfsv4stateid_t stateid;
+ nfsattrbit_t attrbits;
+ void *rl_rcookie, *rl_wcookie;
+ long pathval;
+
+ rl_rcookie = rl_wcookie = NULL;
+ pathval = 0;
+ if (nfsrv_maxcopyrange == 0 || nfsrv_devidcnt > 0 ||
+ VOP_PATHCONF(vp, _PC_CLONE_BLKSIZE, &pathval) != 0 ||
+ pathval == 0) {
+ /*
+ * For a pNFS server, reply NFSERR_NOTSUPP so that the client
+ * will not do the clone and will do I/O on the DS(s).
+ * If vfs.nfsd.maxcopyrange set to 0, disable Clone.
+ */
+ nd->nd_repstat = NFSERR_NOTSUPP;
+ goto nfsmout;
+ }
+ NFSM_DISSECT(tl, uint32_t *, 2 * NFSX_STATEID + 3 * NFSX_HYPER);
+ instp->ls_flags = (NFSLCK_CHECK | NFSLCK_READACCESS);
+ inlop->lo_flags = NFSLCK_READ;
+ instp->ls_ownerlen = 0;
+ instp->ls_op = NULL;
+ instp->ls_uid = nd->nd_cred->cr_uid;
+ instp->ls_stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
+ clientid.lval[0] = instp->ls_stateid.other[0] = *tl++;
+ clientid.lval[1] = instp->ls_stateid.other[1] = *tl++;
+ if ((nd->nd_flag & ND_IMPLIEDCLID) != 0)
+ clientid.qval = nd->nd_clientid.qval;
+ instp->ls_stateid.other[2] = *tl++;
+ outstp->ls_flags = (NFSLCK_CHECK | NFSLCK_WRITEACCESS);
+ outlop->lo_flags = NFSLCK_WRITE;
+ outstp->ls_ownerlen = 0;
+ outstp->ls_op = NULL;
+ outstp->ls_uid = nd->nd_cred->cr_uid;
+ outstp->ls_stateid.seqid = fxdr_unsigned(uint32_t, *tl++);
+ outstp->ls_stateid.other[0] = *tl++;
+ outstp->ls_stateid.other[1] = *tl++;
+ outstp->ls_stateid.other[2] = *tl++;
+ inoff = fxdr_hyper(tl); tl += 2;
+ inlop->lo_first = inoff;
+ outoff = fxdr_hyper(tl); tl += 2;
+ outlop->lo_first = outoff;
+ len = fxdr_hyper(tl);
+ if (len == 0) {
+ /* len == 0 means to EOF. */
+ inlop->lo_end = OFF_MAX;
+ outlop->lo_end = OFF_MAX;
+ } else {
+ inlop->lo_end = inlop->lo_first + len;
+ outlop->lo_end = outlop->lo_first + len;
+ }
+
+ if ((inoff > OFF_MAX || outoff > OFF_MAX ||
+ inlop->lo_end > OFF_MAX || outlop->lo_end > OFF_MAX ||
+ inlop->lo_end < inlop->lo_first || outlop->lo_end <
+ outlop->lo_first))
+ nd->nd_repstat = NFSERR_INVAL;
+
+ if (nd->nd_repstat == 0 && vp->v_type != VREG)
+ nd->nd_repstat = NFSERR_WRONGTYPE;
+
+ /* Check permissions for the input file. */
+ NFSZERO_ATTRBIT(&attrbits);
+ NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNER);
+ ret = nfsvno_getattr(vp, &at, nd, curthread, 1, &attrbits);
+ if (nd->nd_repstat == 0)
+ nd->nd_repstat = ret;
+ if (nd->nd_repstat == 0 && (at.na_uid != nd->nd_cred->cr_uid ||
+ NFSVNO_EXSTRICTACCESS(exp)))
+ nd->nd_repstat = nfsvno_accchk(vp, VREAD, nd->nd_cred, exp,
+ curthread, NFSACCCHK_ALLOWOWNER, NFSACCCHK_VPISLOCKED,
+ NULL);
+ if (nd->nd_repstat == 0)
+ nd->nd_repstat = nfsrv_lockctrl(vp, &instp, &inlop, NULL,
+ clientid, &stateid, exp, nd, curthread);
+ if (vp != tovp) {
+ NFSVOPUNLOCK(vp);
+ if (nd->nd_repstat != 0)
+ goto out;
+
+ error = NFSVOPLOCK(tovp, LK_SHARED);
+ if (error != 0)
+ goto out;
+ pathval = 0;
+ if (VOP_PATHCONF(tovp, _PC_CLONE_BLKSIZE, &pathval) != 0 ||
+ pathval == 0)
+ nd->nd_repstat = NFSERR_NOTSUPP;
+ else if (tovp->v_type != VREG)
+ nd->nd_repstat = NFSERR_WRONGTYPE;
+ }
+
+ /* For the output file, we only need the Owner attribute. */
+ ret = nfsvno_getattr(tovp, &at, nd, curthread, 1, &attrbits);
+ if (nd->nd_repstat == 0)
+ nd->nd_repstat = ret;
+ if (nd->nd_repstat == 0 && (at.na_uid != nd->nd_cred->cr_uid ||
+ NFSVNO_EXSTRICTACCESS(exp)))
+ nd->nd_repstat = nfsvno_accchk(tovp, VWRITE, nd->nd_cred, toexp,
+ curthread, NFSACCCHK_ALLOWOWNER, NFSACCCHK_VPISLOCKED,
+ NULL);
+ if (nd->nd_repstat == 0)
+ nd->nd_repstat = nfsrv_lockctrl(tovp, &outstp, &outlop, NULL,
+ clientid, &stateid, toexp, nd, curthread);
+ NFSVOPUNLOCK(tovp);
+
+ /* Range lock the byte ranges for both invp and outvp. */
+ if (nd->nd_repstat == 0) {
+ for (;;) {
+ if (len == 0)
+ rl_wcookie = vn_rangelock_wlock(tovp, outoff,
+ OFF_MAX);
+ else
+ rl_wcookie = vn_rangelock_wlock(tovp, outoff,
+ outoff + len);
+ if (vp != tovp) {
+ if (len == 0)
+ rl_rcookie = vn_rangelock_tryrlock(vp,
+ inoff, OFF_MAX);
+ else
+ rl_rcookie = vn_rangelock_tryrlock(vp,
+ inoff, inoff + len);
+ if (rl_rcookie != NULL)
+ break;
+ } else {
+ rl_rcookie = NULL;
+ break;
+ }
+ vn_rangelock_unlock(tovp, rl_wcookie);
+ if (len == 0)
+ rl_rcookie = vn_rangelock_rlock(vp, inoff,
+ OFF_MAX);
+ else
+ rl_rcookie = vn_rangelock_rlock(vp, inoff,
+ inoff + len);
+ vn_rangelock_unlock(vp, rl_rcookie);
+ }
+
+ error = NFSVOPLOCK(vp, LK_SHARED);
+ if (error == 0) {
+ ret = nfsvno_getattr(vp, &at, nd, curthread, 1, NULL);
+ if (ret == 0) {
+ /*
+ * Since invp is range locked, na_size should
+ * not change.
+ */
+ if (len == 0 && at.na_size > inoff)
+ len = SSIZE_MAX; /* To EOF. */
+ else if (inoff + len > at.na_size)
+ nd->nd_repstat = NFSERR_INVAL;
+ }
+ NFSVOPUNLOCK(vp);
+ if (ret != 0 && nd->nd_repstat == 0)
+ nd->nd_repstat = ret;
+ } else if (nd->nd_repstat == 0)
+ nd->nd_repstat = error;
+ }
+
+ /*
+ * Do the actual copy to an upper limit of vfs.nfsd.maxcopyrange.
+ * This size limit can be set to limit the time a copy RPC will
+ * take.
+ */
+ xfer = len;
+ if (nd->nd_repstat == 0) {
+ nd->nd_repstat = vn_copy_file_range(vp, &inoff, tovp, &outoff,
+ &xfer, COPY_FILE_RANGE_CLONE, nd->nd_cred, nd->nd_cred,
+ NULL);
+ if (nd->nd_repstat == ENOSYS)
+ nd->nd_repstat = NFSERR_INVAL;
+ }
+
+ /* Unlock the ranges. */
+ if (rl_rcookie != NULL)
+ vn_rangelock_unlock(vp, rl_rcookie);
+ if (rl_wcookie != NULL)
+ vn_rangelock_unlock(tovp, rl_wcookie);
+
+out:
+ vrele(vp);
+ vrele(tovp);
+ NFSEXITCODE2(error, nd);
+ return (error);
+nfsmout:
+ vput(vp);
+ vrele(tovp);
+ NFSEXITCODE2(error, nd);
+ return (error);
+}
+
+/*
* nfs seek service
*/
int
diff --git a/sys/fs/nfsserver/nfs_nfsdsocket.c b/sys/fs/nfsserver/nfs_nfsdsocket.c
index d6832b4f74be..201f3b74b946 100644
--- a/sys/fs/nfsserver/nfs_nfsdsocket.c
+++ b/sys/fs/nfsserver/nfs_nfsdsocket.c
@@ -371,7 +371,7 @@ int (*nfsrv4_ops2[NFSV42_NOPS])(struct nfsrv_descript *,
(int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , struct nfsexstuff *, struct nfsexstuff *))0,
(int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , struct nfsexstuff *, struct nfsexstuff *))0,
(int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , struct nfsexstuff *, struct nfsexstuff *))0,
- (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , struct nfsexstuff *, struct nfsexstuff *))0,
+ nfsrvd_clone,
(int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , struct nfsexstuff *, struct nfsexstuff *))0,
(int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , struct nfsexstuff *, struct nfsexstuff *))0,
(int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , struct nfsexstuff *, struct nfsexstuff *))0,
@@ -1110,7 +1110,7 @@ nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, u_char *tag,
if (vp != savevp) {
if (savevp)
vrele(savevp);
- VREF(vp);
+ vref(vp);
savevp = vp;
savevpnes = vpnes;
save_fsid = cur_fsid;
@@ -1155,7 +1155,7 @@ nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, u_char *tag,
nfsvno_testexp(nd,
&savevpnes);
if (nd->nd_repstat == 0) {
- VREF(savevp);
+ vref(savevp);
vrele(vp);
vp = savevp;
vpnes = savevpnes;
@@ -1235,7 +1235,7 @@ tryagain:
break;
}
}
- VREF(vp);
+ vref(vp);
if (nfsv4_opflag[op].modifyfs)
vn_start_write(vp, &temp_mp, V_WAIT);
error = (*(nfsrv4_ops1[op]))(nd, isdgram, vp,
@@ -1279,8 +1279,8 @@ tryagain:
if (nfsv4_opflag[op].modifyfs)
vn_start_write(savevp, &temp_mp, V_WAIT);
if (NFSVOPLOCK(savevp, LK_EXCLUSIVE) == 0) {
- VREF(vp);
- VREF(savevp);
+ vref(vp);
+ vref(savevp);
error = (*(nfsrv4_ops2[op]))(nd, isdgram,
savevp, vp, &savevpnes, &vpnes);
} else
@@ -1301,7 +1301,7 @@ tryagain:
lktype = LK_SHARED;
}
if (NFSVOPLOCK(vp, lktype) == 0)
- VREF(vp);
+ vref(vp);
else
nd->nd_repstat = NFSERR_PERM;
} else {
diff --git a/sys/fs/nfsserver/nfs_nfsdstate.c b/sys/fs/nfsserver/nfs_nfsdstate.c
index 2e27817389dd..111b0f26d0b5 100644
--- a/sys/fs/nfsserver/nfs_nfsdstate.c
+++ b/sys/fs/nfsserver/nfs_nfsdstate.c
@@ -4675,7 +4675,7 @@ errout:
} else if (error == 0 && procnum == NFSV4OP_CBGETATTR)
error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0,
NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL,
- NULL, p, NULL);
+ NULL, NULL, p, NULL);
m_freem(nd->nd_mrep);
}
NFSLOCKSTATE();
@@ -7731,6 +7731,7 @@ nfsrv_setdsserver(char *dspathp, char *mdspathp, NFSPROC_T *p,
NFSD_DEBUG(4, "lookup=%d\n", error);
if (error != 0)
return (error);
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR) {
vput(nd.ni_vp);
NFSD_DEBUG(4, "dspath not dir\n");
@@ -7767,6 +7768,7 @@ nfsrv_setdsserver(char *dspathp, char *mdspathp, NFSPROC_T *p,
NFSD_DEBUG(4, "dsdirpath=%s lookup=%d\n", dsdirpath, error);
if (error != 0)
break;
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR) {
vput(nd.ni_vp);
error = ENOTDIR;
@@ -7795,6 +7797,7 @@ nfsrv_setdsserver(char *dspathp, char *mdspathp, NFSPROC_T *p,
NFSD_DEBUG(4, "mds lookup=%d\n", error);
if (error != 0)
goto out;
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR) {
vput(nd.ni_vp);
error = ENOTDIR;
@@ -8654,6 +8657,7 @@ nfsrv_mdscopymr(char *mdspathp, char *dspathp, char *curdspathp, char *buf,
NFSD_DEBUG(4, "lookup=%d\n", error);
if (error != 0)
return (error);
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VREG) {
vput(nd.ni_vp);
NFSD_DEBUG(4, "mdspath not reg\n");
@@ -8675,6 +8679,7 @@ nfsrv_mdscopymr(char *mdspathp, char *dspathp, char *curdspathp, char *buf,
vput(vp);
return (error);
}
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR) {
vput(nd.ni_vp);
vput(vp);
@@ -8717,6 +8722,7 @@ nfsrv_mdscopymr(char *mdspathp, char *dspathp, char *curdspathp, char *buf,
vput(curvp);
return (error);
}
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR || nd.ni_vp == curvp) {
vput(nd.ni_vp);
vput(vp);
diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h
index 0a93878c859f..7bfdc20a3f67 100644
--- a/sys/fs/nullfs/null.h
+++ b/sys/fs/nullfs/null.h
@@ -35,7 +35,11 @@
#ifndef FS_NULL_H
#define FS_NULL_H
-#define NULLM_CACHE 0x0001
+#include <sys/ck.h>
+#include <vm/uma.h>
+
+#define NULLM_CACHE 0x0001
+#define NULLM_NOUNPBYPASS 0x0002
struct null_mount {
struct mount *nullm_vfs;
@@ -50,7 +54,7 @@ struct null_mount {
* A cache of vnode references
*/
struct null_node {
- LIST_ENTRY(null_node) null_hash; /* Hash list */
+ CK_SLIST_ENTRY(null_node) null_hash; /* Hash list */
struct vnode *null_lowervp; /* VREFed once */
struct vnode *null_vnode; /* Back pointer */
u_int null_flags;
@@ -61,6 +65,7 @@ struct null_node {
#define MOUNTTONULLMOUNT(mp) ((struct null_mount *)((mp)->mnt_data))
#define VTONULL(vp) ((struct null_node *)(vp)->v_data)
+#define VTONULL_SMR(vp) ((struct null_node *)vn_load_v_data_smr(vp))
#define NULLTOV(xp) ((xp)->null_vnode)
int nullfs_init(struct vfsconf *vfsp);
@@ -78,10 +83,18 @@ struct vnode *null_checkvp(struct vnode *vp, char *fil, int lno);
#endif
extern struct vop_vector null_vnodeops;
+extern struct vop_vector null_vnodeops_no_unp_bypass;
-#ifdef MALLOC_DECLARE
-MALLOC_DECLARE(M_NULLFSNODE);
-#endif
+static inline bool
+null_is_nullfs_vnode(struct vnode *vp)
+{
+ const struct vop_vector *op;
+
+ op = vp->v_op;
+ return (op == &null_vnodeops || op == &null_vnodeops_no_unp_bypass);
+}
+
+extern uma_zone_t null_node_zone;
#ifdef NULLFS_DEBUG
#define NULLFSDEBUG(format, args...) printf(format ,## args)
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index 7dcc83880bb9..a843ae44f121 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -36,14 +36,19 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/rwlock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/smr.h>
#include <sys/vnode.h>
#include <fs/nullfs/null.h>
+#include <vm/uma.h>
+
+VFS_SMR_DECLARE;
+
/*
* Null layer cache:
* Each cache entry holds a reference to the lower vnode
@@ -54,12 +59,12 @@
#define NULL_NHASH(vp) (&null_node_hashtbl[vfs_hash_index(vp) & null_hash_mask])
-static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
+static CK_SLIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
static struct rwlock null_hash_lock;
static u_long null_hash_mask;
static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table");
-MALLOC_DEFINE(M_NULLFSNODE, "nullfs_node", "NULLFS vnode private part");
+uma_zone_t __read_mostly null_node_zone;
static void null_hashins(struct mount *, struct null_node *);
@@ -73,6 +78,10 @@ nullfs_init(struct vfsconf *vfsp)
null_node_hashtbl = hashinit(desiredvnodes, M_NULLFSHASH,
&null_hash_mask);
rw_init(&null_hash_lock, "nullhs");
+ null_node_zone = uma_zcreate("nullfs node", sizeof(struct null_node),
+ NULL, NULL, NULL, NULL, 0, UMA_ZONE_ZINIT);
+ VFS_SMR_ZONE_SET(null_node_zone);
+
return (0);
}
@@ -80,6 +89,7 @@ int
nullfs_uninit(struct vfsconf *vfsp)
{
+ uma_zdestroy(null_node_zone);
rw_destroy(&null_hash_lock);
hashdestroy(null_node_hashtbl, M_NULLFSHASH, null_hash_mask);
return (0);
@@ -96,7 +106,7 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp)
struct null_node *a;
struct vnode *vp;
- ASSERT_VOP_LOCKED(lowervp, "null_hashget");
+ ASSERT_VOP_LOCKED(lowervp, __func__);
rw_assert(&null_hash_lock, RA_LOCKED);
/*
@@ -106,37 +116,57 @@ null_hashget_locked(struct mount *mp, struct vnode *lowervp)
* reference count (but NOT the lower vnode's VREF counter).
*/
hd = NULL_NHASH(lowervp);
- LIST_FOREACH(a, hd, null_hash) {
- if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
- /*
- * Since we have the lower node locked the nullfs
- * node can not be in the process of recycling. If
- * it had been recycled before we grabed the lower
- * lock it would not have been found on the hash.
- */
- vp = NULLTOV(a);
- vref(vp);
- return (vp);
- }
+ CK_SLIST_FOREACH(a, hd, null_hash) {
+ if (a->null_lowervp != lowervp)
+ continue;
+ /*
+ * Since we have the lower node locked the nullfs
+ * node can not be in the process of recycling. If
+ * it had been recycled before we grabed the lower
+ * lock it would not have been found on the hash.
+ */
+ vp = NULLTOV(a);
+ VNPASS(!VN_IS_DOOMED(vp), vp);
+ if (vp->v_mount != mp)
+ continue;
+ vref(vp);
+ return (vp);
}
- return (NULLVP);
+ return (NULL);
}
struct vnode *
null_hashget(struct mount *mp, struct vnode *lowervp)
{
struct null_node_hashhead *hd;
+ struct null_node *a;
struct vnode *vp;
+ enum vgetstate vs;
- hd = NULL_NHASH(lowervp);
- if (LIST_EMPTY(hd))
- return (NULLVP);
+ ASSERT_VOP_LOCKED(lowervp, __func__);
+ rw_assert(&null_hash_lock, RA_UNLOCKED);
- rw_rlock(&null_hash_lock);
- vp = null_hashget_locked(mp, lowervp);
- rw_runlock(&null_hash_lock);
-
- return (vp);
+ vfs_smr_enter();
+ hd = NULL_NHASH(lowervp);
+ CK_SLIST_FOREACH(a, hd, null_hash) {
+ if (a->null_lowervp != lowervp)
+ continue;
+ /*
+ * See null_hashget_locked as to why the nullfs vnode can't be
+ * doomed here.
+ */
+ vp = NULLTOV(a);
+ VNPASS(!VN_IS_DOOMED(vp), vp);
+ if (vp->v_mount != mp)
+ continue;
+ vs = vget_prep_smr(vp);
+ vfs_smr_exit();
+ VNPASS(vs != VGET_NONE, vp);
+ vget_finish_ref(vp, vs);
+ return (vp);
+ }
+ vfs_smr_exit();
+ return (NULL);
}
static void
@@ -151,7 +181,7 @@ null_hashins(struct mount *mp, struct null_node *xp)
hd = NULL_NHASH(xp->null_lowervp);
#ifdef INVARIANTS
- LIST_FOREACH(oxp, hd, null_hash) {
+ CK_SLIST_FOREACH(oxp, hd, null_hash) {
if (oxp->null_lowervp == xp->null_lowervp &&
NULLTOV(oxp)->v_mount == mp) {
VNASSERT(0, NULLTOV(oxp),
@@ -159,7 +189,7 @@ null_hashins(struct mount *mp, struct null_node *xp)
}
}
#endif
- LIST_INSERT_HEAD(hd, xp, null_hash);
+ CK_SLIST_INSERT_HEAD(hd, xp, null_hash);
}
static void
@@ -174,7 +204,7 @@ null_destroy_proto(struct vnode *vp, void *xp)
VI_UNLOCK(vp);
vgone(vp);
vput(vp);
- free(xp, M_NULLFSNODE);
+ uma_zfree_smr(null_node_zone, xp);
}
/*
@@ -208,12 +238,14 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
* Note that duplicate can only appear in hash if the lowervp is
* locked LK_SHARED.
*/
- xp = malloc(sizeof(struct null_node), M_NULLFSNODE, M_WAITOK);
+ xp = uma_zalloc_smr(null_node_zone, M_WAITOK);
- error = getnewvnode("nullfs", mp, &null_vnodeops, &vp);
+ error = getnewvnode("nullfs", mp, (MOUNTTONULLMOUNT(mp)->nullm_flags &
+ NULLM_NOUNPBYPASS) != 0 ? &null_vnodeops_no_unp_bypass :
+ &null_vnodeops, &vp);
if (error) {
vput(lowervp);
- free(xp, M_NULLFSNODE);
+ uma_zfree_smr(null_node_zone, xp);
return (error);
}
@@ -261,8 +293,8 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
return (error);
}
- null_hashins(mp, xp);
vn_set_state(vp, VSTATE_CONSTRUCTED);
+ null_hashins(mp, xp);
rw_wunlock(&null_hash_lock);
*vpp = vp;
@@ -275,9 +307,11 @@ null_nodeget(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
void
null_hashrem(struct null_node *xp)
{
+ struct null_node_hashhead *hd;
+ hd = NULL_NHASH(xp->null_lowervp);
rw_wlock(&null_hash_lock);
- LIST_REMOVE(xp, null_hash);
+ CK_SLIST_REMOVE(hd, xp, null_node, null_hash);
rw_wunlock(&null_hash_lock);
}
@@ -298,7 +332,7 @@ null_checkvp(struct vnode *vp, char *fil, int lno)
panic("null_checkvp");
}
#endif
- if (a->null_lowervp == NULLVP) {
+ if (a->null_lowervp == NULL) {
/* Should never happen */
panic("null_checkvp %p", vp);
}
diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c
index 4cddf24a5745..170a3dd51cd8 100644
--- a/sys/fs/nullfs/null_vfsops.c
+++ b/sys/fs/nullfs/null_vfsops.c
@@ -85,6 +85,10 @@ nullfs_mount(struct mount *mp)
char *target;
int error, len;
bool isvnunlocked;
+ static const char cache_opt_name[] = "cache";
+ static const char nocache_opt_name[] = "nocache";
+ static const char unixbypass_opt_name[] = "unixbypass";
+ static const char nounixbypass_opt_name[] = "nounixbypass";
NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp);
@@ -116,7 +120,7 @@ nullfs_mount(struct mount *mp)
/*
* Unlock lower node to avoid possible deadlock.
*/
- if (mp->mnt_vnodecovered->v_op == &null_vnodeops &&
+ if (null_is_nullfs_vnode(mp->mnt_vnodecovered) &&
VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) {
VOP_UNLOCK(mp->mnt_vnodecovered);
isvnunlocked = true;
@@ -150,7 +154,7 @@ nullfs_mount(struct mount *mp)
/*
* Check multi null mount to avoid `lock against myself' panic.
*/
- if (mp->mnt_vnodecovered->v_op == &null_vnodeops) {
+ if (null_is_nullfs_vnode(mp->mnt_vnodecovered)) {
nn = VTONULL(mp->mnt_vnodecovered);
if (nn == NULL || lowerrootvp == nn->null_lowervp) {
NULLFSDEBUG("nullfs_mount: multi null mount?\n");
@@ -205,9 +209,10 @@ nullfs_mount(struct mount *mp)
MNT_IUNLOCK(mp);
}
- if (vfs_getopt(mp->mnt_optnew, "cache", NULL, NULL) == 0) {
+ if (vfs_getopt(mp->mnt_optnew, cache_opt_name, NULL, NULL) == 0) {
xmp->nullm_flags |= NULLM_CACHE;
- } else if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0) {
+ } else if (vfs_getopt(mp->mnt_optnew, nocache_opt_name, NULL,
+ NULL) == 0) {
;
} else if (null_cache_vnodes &&
(xmp->nullm_vfs->mnt_kern_flag & MNTK_NULL_NOCACHE) == 0) {
@@ -219,6 +224,13 @@ nullfs_mount(struct mount *mp)
&xmp->notify_node);
}
+ if (vfs_getopt(mp->mnt_optnew, unixbypass_opt_name, NULL, NULL) == 0) {
+ ;
+ } else if (vfs_getopt(mp->mnt_optnew, nounixbypass_opt_name, NULL,
+ NULL) == 0) {
+ xmp->nullm_flags |= NULLM_NOUNPBYPASS;
+ }
+
if (lowerrootvp == mp->mnt_vnodecovered) {
vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
lowerrootvp->v_vflag |= VV_CROSSLOCK;
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index 74c1a8f3acb6..d4baabeb40ab 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -174,6 +174,8 @@
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/namei.h>
+#include <sys/proc.h>
+#include <sys/smr.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <sys/stat.h>
@@ -185,6 +187,8 @@
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
+VFS_SMR_DECLARE;
+
static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
&null_bug_bypass, 0, "");
@@ -273,9 +277,9 @@ null_bypass(struct vop_generic_args *ap)
* are of our type. Check for and don't map any
* that aren't. (We must always map first vp or vclean fails.)
*/
- if (i != 0 && (*this_vp_p == NULLVP ||
- (*this_vp_p)->v_op != &null_vnodeops)) {
- old_vps[i] = NULLVP;
+ if (i != 0 && (*this_vp_p == NULL ||
+ !null_is_nullfs_vnode(*this_vp_p))) {
+ old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
@@ -306,7 +310,7 @@ null_bypass(struct vop_generic_args *ap)
* with the modified argument structure.
*/
if (vps_p[0] != NULL && *vps_p[0] != NULL) {
- error = VCALL(ap);
+ error = ap->a_desc->vdesc_call(ap);
} else {
printf("null_bypass: no map for %s\n", descp->vdesc_name);
error = EINVAL;
@@ -336,7 +340,7 @@ null_bypass(struct vop_generic_args *ap)
* must move lock ownership from lower to
* upper (reclaimed) vnode.
*/
- if (lvp != NULLVP) {
+ if (lvp != NULL) {
null_copy_inotify(old_vps[i], lvp,
VIRF_INOTIFY);
null_copy_inotify(old_vps[i], lvp,
@@ -494,7 +498,7 @@ null_lookup(struct vop_lookup_args *ap)
if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
if (ldvp == lvp) {
*ap->a_vpp = dvp;
- VREF(dvp);
+ vref(dvp);
vrele(lvp);
} else {
error = null_nodeget(mp, lvp, &vp);
@@ -665,7 +669,7 @@ null_remove(struct vop_remove_args *ap)
vp = ap->a_vp;
if (vrefcnt(vp) > 1) {
lvp = NULLVPTOLOWERVP(vp);
- VREF(lvp);
+ vref(lvp);
vreleit = 1;
} else
vreleit = 0;
@@ -768,83 +772,111 @@ null_rmdir(struct vop_rmdir_args *ap)
}
/*
- * We need to process our own vnode lock and then clear the
- * interlock flag as it applies only to our vnode, not the
- * vnodes below us on the stack.
+ * We need to process our own vnode lock and then clear the interlock flag as
+ * it applies only to our vnode, not the vnodes below us on the stack.
+ *
+ * We have to hold the vnode here to solve a potential reclaim race. If we're
+ * forcibly vgone'd while we still have refs, a thread could be sleeping inside
+ * the lowervp's vop_lock routine. When we vgone we will drop our last ref to
+ * the lowervp, which would allow it to be reclaimed. The lowervp could then
+ * be recycled, in which case it is not legal to be sleeping in its VOP. We
+ * prevent it from being recycled by holding the vnode here.
*/
+static struct vnode *
+null_lock_prep_with_smr(struct vop_lock1_args *ap)
+{
+ struct null_node *nn;
+ struct vnode *lvp;
+
+ lvp = NULL;
+
+ vfs_smr_enter();
+
+ nn = VTONULL_SMR(ap->a_vp);
+ if (__predict_true(nn != NULL)) {
+ lvp = nn->null_lowervp;
+ if (lvp != NULL && !vhold_smr(lvp))
+ lvp = NULL;
+ }
+
+ vfs_smr_exit();
+ return (lvp);
+}
+
+static struct vnode *
+null_lock_prep_with_interlock(struct vop_lock1_args *ap)
+{
+ struct null_node *nn;
+ struct vnode *lvp;
+
+ ASSERT_VI_LOCKED(ap->a_vp, __func__);
+
+ ap->a_flags &= ~LK_INTERLOCK;
+
+ lvp = NULL;
+
+ nn = VTONULL(ap->a_vp);
+ if (__predict_true(nn != NULL)) {
+ lvp = nn->null_lowervp;
+ if (lvp != NULL)
+ vholdnz(lvp);
+ }
+ VI_UNLOCK(ap->a_vp);
+ return (lvp);
+}
+
static int
null_lock(struct vop_lock1_args *ap)
{
- struct vnode *vp = ap->a_vp;
- int flags;
- struct null_node *nn;
struct vnode *lvp;
- int error;
+ int error, flags;
- if ((ap->a_flags & LK_INTERLOCK) == 0)
- VI_LOCK(vp);
- else
- ap->a_flags &= ~LK_INTERLOCK;
- flags = ap->a_flags;
- nn = VTONULL(vp);
+ if (__predict_true((ap->a_flags & LK_INTERLOCK) == 0)) {
+ lvp = null_lock_prep_with_smr(ap);
+ if (__predict_false(lvp == NULL)) {
+ VI_LOCK(ap->a_vp);
+ lvp = null_lock_prep_with_interlock(ap);
+ }
+ } else {
+ lvp = null_lock_prep_with_interlock(ap);
+ }
+
+ ASSERT_VI_UNLOCKED(ap->a_vp, __func__);
+
+ if (__predict_false(lvp == NULL))
+ return (vop_stdlock(ap));
+
+ VNPASS(lvp->v_holdcnt > 0, lvp);
+ error = VOP_LOCK(lvp, ap->a_flags);
/*
- * If we're still active we must ask the lower layer to
- * lock as ffs has special lock considerations in its
- * vop lock.
+ * We might have slept to get the lock and someone might have
+ * clean our vnode already, switching vnode lock from one in
+ * lowervp to v_lock in our own vnode structure. Handle this
+ * case by reacquiring correct lock in requested mode.
*/
- if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
- /*
- * We have to hold the vnode here to solve a potential
- * reclaim race. If we're forcibly vgone'd while we
- * still have refs, a thread could be sleeping inside
- * the lowervp's vop_lock routine. When we vgone we will
- * drop our last ref to the lowervp, which would allow it
- * to be reclaimed. The lowervp could then be recycled,
- * in which case it is not legal to be sleeping in its VOP.
- * We prevent it from being recycled by holding the vnode
- * here.
- */
- vholdnz(lvp);
- VI_UNLOCK(vp);
- error = VOP_LOCK(lvp, flags);
-
- /*
- * We might have slept to get the lock and someone might have
- * clean our vnode already, switching vnode lock from one in
- * lowervp to v_lock in our own vnode structure. Handle this
- * case by reacquiring correct lock in requested mode.
- */
- if (VTONULL(vp) == NULL && error == 0) {
- ap->a_flags &= ~LK_TYPE_MASK;
- switch (flags & LK_TYPE_MASK) {
- case LK_SHARED:
- ap->a_flags |= LK_SHARED;
- break;
- case LK_UPGRADE:
- case LK_EXCLUSIVE:
- ap->a_flags |= LK_EXCLUSIVE;
- break;
- default:
- panic("Unsupported lock request %d\n",
- ap->a_flags);
- }
- VOP_UNLOCK(lvp);
- error = vop_stdlock(ap);
+ if (VTONULL(ap->a_vp) == NULL && error == 0) {
+ VOP_UNLOCK(lvp);
+
+ flags = ap->a_flags;
+ ap->a_flags &= ~LK_TYPE_MASK;
+ switch (flags & LK_TYPE_MASK) {
+ case LK_SHARED:
+ ap->a_flags |= LK_SHARED;
+ break;
+ case LK_UPGRADE:
+ case LK_EXCLUSIVE:
+ ap->a_flags |= LK_EXCLUSIVE;
+ break;
+ default:
+ panic("Unsupported lock request %d\n",
+ flags);
}
- vdrop(lvp);
- } else {
- VI_UNLOCK(vp);
error = vop_stdlock(ap);
}
-
+ vdrop(lvp);
return (error);
}
-/*
- * We need to process our own vnode unlock and then clear the
- * interlock flag as it applies only to our vnode, not the
- * vnodes below us on the stack.
- */
static int
null_unlock(struct vop_unlock_args *ap)
{
@@ -853,11 +885,20 @@ null_unlock(struct vop_unlock_args *ap)
struct vnode *lvp;
int error;
+ /*
+ * Contrary to null_lock, we don't need to hold the vnode around
+ * unlock.
+ *
+ * We hold the lock, which means we can't be racing against vgone.
+ *
+ * At the same time VOP_UNLOCK promises to not touch anything after
+ * it finishes unlock, just like we don't.
+ *
+ * vop_stdunlock for a doomed vnode matches doomed locking in null_lock.
+ */
nn = VTONULL(vp);
if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
- vholdnz(lvp);
error = VOP_UNLOCK(lvp);
- vdrop(lvp);
} else {
error = vop_stdunlock(ap);
}
@@ -961,7 +1002,7 @@ null_reclaim(struct vop_reclaim_args *ap)
vunref(lowervp);
else
vput(lowervp);
- free(xp, M_NULLFSNODE);
+ uma_zfree_smr(null_node_zone, xp);
return (0);
}
@@ -1215,3 +1256,11 @@ struct vop_vector null_vnodeops = {
.vop_copy_file_range = VOP_PANIC,
};
VFS_VOP_VECTOR_REGISTER(null_vnodeops);
+
+struct vop_vector null_vnodeops_no_unp_bypass = {
+ .vop_default = &null_vnodeops,
+ .vop_unp_bind = vop_stdunp_bind,
+ .vop_unp_connect = vop_stdunp_connect,
+ .vop_unp_detach = vop_stdunp_detach,
+};
+VFS_VOP_VECTOR_REGISTER(null_vnodeops_no_unp_bypass);
diff --git a/sys/fs/p9fs/p9_transport.c b/sys/fs/p9fs/p9_transport.c
index c82d81fedcd7..25eee984265c 100644
--- a/sys/fs/p9fs/p9_transport.c
+++ b/sys/fs/p9fs/p9_transport.c
@@ -34,9 +34,8 @@
TAILQ_HEAD(, p9_trans_module) transports;
static void
-p9_transport_init(void)
+p9_transport_init(void *dummy __unused)
{
-
TAILQ_INIT(&transports);
}
diff --git a/sys/fs/p9fs/p9fs_vfsops.c b/sys/fs/p9fs/p9fs_vfsops.c
index e0e91e7e1709..953e6eda547a 100644
--- a/sys/fs/p9fs/p9fs_vfsops.c
+++ b/sys/fs/p9fs/p9fs_vfsops.c
@@ -287,7 +287,7 @@ p9fs_vget_common(struct mount *mp, struct p9fs_node *np, int flags,
node->flags |= P9FS_NODE_DELETED;
vput(vp);
- *vpp = NULLVP;
+ *vpp = NULL;
vp = NULL;
} else {
*vpp = vp;
@@ -308,7 +308,7 @@ p9fs_vget_common(struct mount *mp, struct p9fs_node *np, int flags,
/* Allocate a new vnode. */
if ((error = getnewvnode("p9fs", mp, &p9fs_vnops, &vp)) != 0) {
- *vpp = NULLVP;
+ *vpp = NULL;
P9_DEBUG(ERROR, "%s: getnewvnode failed: %d\n", __func__, error);
return (error);
}
@@ -397,7 +397,7 @@ out:
vput(vp);
}
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
@@ -525,14 +525,14 @@ p9fs_root(struct mount *mp, int lkflags, struct vnode **vpp)
if (vfid == NULL && clnt->trans_status == P9FS_BEGIN_DISCONNECT)
vfid = vmp->p9fs_session.mnt_fid;
else {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
}
error = p9fs_vget_common(mp, np, lkflags, np, vfid, vpp, NULL);
if (error != 0) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
np->v_node = *vpp;
diff --git a/sys/fs/p9fs/p9fs_vnops.c b/sys/fs/p9fs/p9fs_vnops.c
index 227e2b93883e..2ed1be82b57f 100644
--- a/sys/fs/p9fs/p9fs_vnops.c
+++ b/sys/fs/p9fs/p9fs_vnops.c
@@ -233,7 +233,7 @@ p9fs_lookup(struct vop_lookup_args *ap)
dnp = P9FS_VTON(dvp);
error = 0;
flags = cnp->cn_flags;
- *vpp = NULLVP;
+ *vpp = NULL;
if (dnp == NULL)
return (ENOENT);
@@ -329,7 +329,7 @@ p9fs_lookup(struct vop_lookup_args *ap)
else
vrele(vp);
- *vpp = NULLVP;
+ *vpp = NULL;
} else if (error == ENOENT) {
if (VN_IS_DOOMED(dvp))
goto out;
@@ -341,7 +341,7 @@ p9fs_lookup(struct vop_lookup_args *ap)
}
/* Reset values */
error = 0;
- vp = NULLVP;
+ vp = NULL;
tmpchr = cnp->cn_nameptr[cnp->cn_namelen];
cnp->cn_nameptr[cnp->cn_namelen] = '\0';
@@ -1326,7 +1326,7 @@ p9fs_read(struct vop_read_args *ap)
np = P9FS_VTON(vp);
error = 0;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (vp->v_type != VREG)
return (EISDIR);
diff --git a/sys/fs/procfs/procfs.c b/sys/fs/procfs/procfs.c
index ab60ba47f322..cd66dd6f8b3b 100644
--- a/sys/fs/procfs/procfs.c
+++ b/sys/fs/procfs/procfs.c
@@ -156,42 +156,42 @@ procfs_init(PFS_INIT_ARGS)
root = pi->pi_root;
- pfs_create_link(root, "curproc", procfs_docurproc,
- NULL, NULL, NULL, 0);
- pfs_create_link(root, "self", procfs_docurproc,
- NULL, NULL, NULL, 0);
-
- dir = pfs_create_dir(root, "pid",
- procfs_attr_all_rx, NULL, NULL, PFS_PROCDEP);
- pfs_create_file(dir, "cmdline", procfs_doproccmdline,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "dbregs", procfs_doprocdbregs,
+ pfs_create_link(root, NULL, "curproc", procfs_docurproc, NULL, NULL,
+ NULL, 0);
+ pfs_create_link(root, NULL, "self", procfs_docurproc, NULL, NULL, NULL,
+ 0);
+
+ pfs_create_dir(root, &dir, "pid", procfs_attr_all_rx, NULL, NULL,
+ PFS_PROCDEP);
+ pfs_create_file(dir, NULL, "cmdline", procfs_doproccmdline, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "dbregs", procfs_doprocdbregs,
procfs_attr_rw, procfs_candebug, NULL, PFS_RDWR | PFS_RAW);
- pfs_create_file(dir, "etype", procfs_doproctype,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "fpregs", procfs_doprocfpregs,
+ pfs_create_file(dir, NULL, "etype", procfs_doproctype, NULL, NULL, NULL,
+ PFS_RD);
+ pfs_create_file(dir, NULL, "fpregs", procfs_doprocfpregs,
procfs_attr_rw, procfs_candebug, NULL, PFS_RDWR | PFS_RAW);
- pfs_create_file(dir, "map", procfs_doprocmap,
- NULL, procfs_notsystem, NULL, PFS_RD);
- pfs_create_file(dir, "mem", procfs_doprocmem,
- procfs_attr_rw, procfs_candebug, NULL, PFS_RDWR | PFS_RAW);
- pfs_create_file(dir, "note", procfs_doprocnote,
- procfs_attr_w, procfs_candebug, NULL, PFS_WR);
- pfs_create_file(dir, "notepg", procfs_doprocnote,
- procfs_attr_w, procfs_candebug, NULL, PFS_WR);
- pfs_create_file(dir, "regs", procfs_doprocregs,
- procfs_attr_rw, procfs_candebug, NULL, PFS_RDWR | PFS_RAW);
- pfs_create_file(dir, "rlimit", procfs_doprocrlimit,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "status", procfs_doprocstatus,
- NULL, NULL, NULL, PFS_RD);
- pfs_create_file(dir, "osrel", procfs_doosrel,
- procfs_attr_rw, procfs_candebug, NULL, PFS_RDWR);
-
- pfs_create_link(dir, "file", procfs_doprocfile,
- NULL, procfs_notsystem, NULL, 0);
- pfs_create_link(dir, "exe", procfs_doprocfile,
- NULL, procfs_notsystem, NULL, 0);
+ pfs_create_file(dir, NULL, "map", procfs_doprocmap, NULL,
+ procfs_notsystem, NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "mem", procfs_doprocmem, procfs_attr_rw,
+ procfs_candebug, NULL, PFS_RDWR | PFS_RAW);
+ pfs_create_file(dir, NULL, "note", procfs_doprocnote, procfs_attr_w,
+ procfs_candebug, NULL, PFS_WR);
+ pfs_create_file(dir, NULL, "notepg", procfs_doprocnote, procfs_attr_w,
+ procfs_candebug, NULL, PFS_WR);
+ pfs_create_file(dir, NULL, "regs", procfs_doprocregs, procfs_attr_rw,
+ procfs_candebug, NULL, PFS_RDWR | PFS_RAW);
+ pfs_create_file(dir, NULL, "rlimit", procfs_doprocrlimit, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "status", procfs_doprocstatus, NULL, NULL,
+ NULL, PFS_RD);
+ pfs_create_file(dir, NULL, "osrel", procfs_doosrel, procfs_attr_rw,
+ procfs_candebug, NULL, PFS_RDWR);
+
+ pfs_create_link(dir, NULL, "file", procfs_doprocfile, NULL,
+ procfs_notsystem, NULL, 0);
+ pfs_create_link(dir, NULL, "exe", procfs_doprocfile, NULL,
+ procfs_notsystem, NULL, 0);
return (0);
}
diff --git a/sys/fs/procfs/procfs_status.c b/sys/fs/procfs/procfs_status.c
index 38070e0946bb..49c084d02ff8 100644
--- a/sys/fs/procfs/procfs_status.c
+++ b/sys/fs/procfs/procfs_status.c
@@ -141,13 +141,9 @@ procfs_doprocstatus(PFS_FILL_ARGS)
(u_long)cr->cr_uid,
(u_long)cr->cr_ruid,
(u_long)cr->cr_rgid);
-
- /* egid (cr->cr_svgid) is equal to cr_ngroups[0]
- see also getegid(2) in /sys/kern/kern_prot.c */
-
- for (i = 0; i < cr->cr_ngroups; i++) {
+ sbuf_printf(sb, ",%lu", (u_long)cr->cr_gid);
+ for (i = 0; i < cr->cr_ngroups; i++)
sbuf_printf(sb, ",%lu", (u_long)cr->cr_groups[i]);
- }
if (jailed(cr)) {
mtx_lock(&cr->cr_prison->pr_mtx);
diff --git a/sys/fs/pseudofs/pseudofs.c b/sys/fs/pseudofs/pseudofs.c
index ef45f96a6192..7a4e67455214 100644
--- a/sys/fs/pseudofs/pseudofs.c
+++ b/sys/fs/pseudofs/pseudofs.c
@@ -133,7 +133,7 @@ pfs_add_node(struct pfs_node *parent, struct pfs_node *pn)
for (iter = parent->pn_nodes; iter != NULL; iter = iter->pn_next) {
if (strcmp(pn->pn_name, iter->pn_name) != 0)
continue;
- printf("pfs_add_node: homonymous siblings: '%s/%s' type %d",
+ printf("pfs_add_node: homonymous siblings: '%s/%s' type %d\n",
parent->pn_name, pn->pn_name, pn->pn_type);
/* Do not detach, because we are not yet attached. */
pn->pn_parent = NULL;
@@ -234,81 +234,101 @@ pfs_fixup_dir(struct pfs_node *parent)
/*
* Create a directory
*/
-struct pfs_node *
-pfs_create_dir(struct pfs_node *parent, const char *name,
- pfs_attr_t attr, pfs_vis_t vis, pfs_destroy_t destroy,
- int flags)
+int
+pfs_create_dir(struct pfs_node *parent, struct pfs_node **opn,
+ const char *name, pfs_attr_t attr, pfs_vis_t vis,
+ pfs_destroy_t destroy, int flags)
{
- struct pfs_node *pn;
+ struct pfs_node *pdir, *pn;
int rc;
- pn = pfs_alloc_node_flags(parent->pn_info, name,
+ /* Preserve in case the caller is reusing the one pointer for both. */
+ pdir = parent;
+ if (opn != NULL)
+ *opn = NULL;
+ pn = pfs_alloc_node_flags(pdir->pn_info, name,
(flags & PFS_PROCDEP) ? pfstype_procdir : pfstype_dir, flags);
if (pn == NULL)
- return (NULL);
+ return (ENOMEM);
pn->pn_attr = attr;
pn->pn_vis = vis;
pn->pn_destroy = destroy;
pn->pn_flags = flags;
- rc = pfs_add_node(parent, pn);
+ rc = pfs_add_node(pdir, pn);
if (rc == 0)
rc = pfs_fixup_dir_flags(pn, flags);
if (rc != 0) {
pfs_destroy(pn);
pn = NULL;
+ } else if (opn != NULL) {
+ *opn = pn;
}
- return (pn);
+
+ return (rc);
}
/*
* Create a file
*/
-struct pfs_node *
-pfs_create_file(struct pfs_node *parent, const char *name, pfs_fill_t fill,
- pfs_attr_t attr, pfs_vis_t vis, pfs_destroy_t destroy,
- int flags)
+int
+pfs_create_file(struct pfs_node *parent, struct pfs_node **opn,
+ const char *name, pfs_fill_t fill, pfs_attr_t attr,
+ pfs_vis_t vis, pfs_destroy_t destroy, int flags)
{
struct pfs_node *pn;
+ int rc;
+ if (opn != NULL)
+ *opn = NULL;
pn = pfs_alloc_node_flags(parent->pn_info, name, pfstype_file, flags);
if (pn == NULL)
- return (NULL);
+ return (ENOMEM);
+
pn->pn_fill = fill;
pn->pn_attr = attr;
pn->pn_vis = vis;
pn->pn_destroy = destroy;
pn->pn_flags = flags;
- if (pfs_add_node(parent, pn) != 0) {
+ if ((rc = pfs_add_node(parent, pn)) != 0) {
pfs_destroy(pn);
pn = NULL;
+ } else if (opn != NULL) {
+ *opn = pn;
}
- return (pn);
+
+ return (rc);
}
/*
* Create a symlink
*/
-struct pfs_node *
-pfs_create_link(struct pfs_node *parent, const char *name, pfs_fill_t fill,
- pfs_attr_t attr, pfs_vis_t vis, pfs_destroy_t destroy,
- int flags)
+int
+pfs_create_link(struct pfs_node *parent, struct pfs_node **opn,
+ const char *name, pfs_fill_t fill, pfs_attr_t attr,
+ pfs_vis_t vis, pfs_destroy_t destroy, int flags)
{
struct pfs_node *pn;
+ int rc;
+ if (opn != NULL)
+ *opn = NULL;
pn = pfs_alloc_node_flags(parent->pn_info, name, pfstype_symlink, flags);
if (pn == NULL)
- return (NULL);
+ return (ENOMEM);
+
pn->pn_fill = fill;
pn->pn_attr = attr;
pn->pn_vis = vis;
pn->pn_destroy = destroy;
pn->pn_flags = flags;
- if (pfs_add_node(parent, pn) != 0) {
+ if ((rc = pfs_add_node(parent, pn)) != 0) {
pfs_destroy(pn);
pn = NULL;
+ } else if (opn != NULL) {
+ *opn = pn;
}
- return (pn);
+ return (rc);
}
/*
@@ -475,6 +495,7 @@ pfs_init(struct pfs_info *pi, struct vfsconf *vfc)
if (error) {
pfs_destroy(root);
pi->pi_root = NULL;
+ pfs_fileno_uninit(pi);
return (error);
}
diff --git a/sys/fs/pseudofs/pseudofs.h b/sys/fs/pseudofs/pseudofs.h
index c60dd7b339d1..2b08dcad978d 100644
--- a/sys/fs/pseudofs/pseudofs.h
+++ b/sys/fs/pseudofs/pseudofs.h
@@ -255,17 +255,18 @@ int pfs_uninit (struct pfs_info *pi, struct vfsconf *vfc);
/*
* Directory structure construction and manipulation
*/
-struct pfs_node *pfs_create_dir (struct pfs_node *parent, const char *name,
- pfs_attr_t attr, pfs_vis_t vis,
- pfs_destroy_t destroy, int flags);
-struct pfs_node *pfs_create_file(struct pfs_node *parent, const char *name,
- pfs_fill_t fill, pfs_attr_t attr,
- pfs_vis_t vis, pfs_destroy_t destroy,
- int flags);
-struct pfs_node *pfs_create_link(struct pfs_node *parent, const char *name,
- pfs_fill_t fill, pfs_attr_t attr,
+int pfs_create_dir (struct pfs_node *parent, struct pfs_node **opn,
+ const char *name, pfs_attr_t attr,
pfs_vis_t vis, pfs_destroy_t destroy,
int flags);
+int pfs_create_file (struct pfs_node *parent, struct pfs_node **opn,
+ const char *name, pfs_fill_t fill,
+ pfs_attr_t attr, pfs_vis_t vis,
+ pfs_destroy_t destroy, int flags);
+int pfs_create_link (struct pfs_node *parent, struct pfs_node **opn,
+ const char *name, pfs_fill_t fill,
+ pfs_attr_t attr, pfs_vis_t vis,
+ pfs_destroy_t destroy, int flags);
struct pfs_node *pfs_find_node (struct pfs_node *parent, const char *name);
void pfs_purge (struct pfs_node *pn);
int pfs_destroy (struct pfs_node *pn);
diff --git a/sys/fs/pseudofs/pseudofs_vncache.c b/sys/fs/pseudofs/pseudofs_vncache.c
index e58aced7f81b..4fd493f8b9d3 100644
--- a/sys/fs/pseudofs/pseudofs_vncache.c
+++ b/sys/fs/pseudofs/pseudofs_vncache.c
@@ -202,7 +202,7 @@ alloc:
error = insmntque(*vpp, mp);
if (error != 0) {
free(pvd, M_PFSVNCACHE);
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
vn_set_state(*vpp, VSTATE_CONSTRUCTED);
diff --git a/sys/fs/pseudofs/pseudofs_vnops.c b/sys/fs/pseudofs/pseudofs_vnops.c
index 8cd092118d0e..a30b5e4f551d 100644
--- a/sys/fs/pseudofs/pseudofs_vnops.c
+++ b/sys/fs/pseudofs/pseudofs_vnops.c
@@ -485,7 +485,7 @@ pfs_lookup(struct vop_cachedlookup_args *va)
if (namelen == 1 && pname[0] == '.') {
pn = pd;
*vpp = vn;
- VREF(vn);
+ vref(vn);
PFS_RETURN (0);
}
diff --git a/sys/fs/smbfs/smbfs_vnops.c b/sys/fs/smbfs/smbfs_vnops.c
index 63b249c93771..e960d8d78b66 100644
--- a/sys/fs/smbfs/smbfs_vnops.c
+++ b/sys/fs/smbfs/smbfs_vnops.c
@@ -1121,13 +1121,13 @@ smbfs_lookup(struct vop_lookup_args *ap)
vput(vp);
else
vrele(vp);
- *vpp = NULLVP;
+ *vpp = NULL;
}
/*
* entry is not in the cache or has been expired
*/
error = 0;
- *vpp = NULLVP;
+ *vpp = NULL;
scred = smbfs_malloc_scred();
smb_makescred(scred, td, cnp->cn_cred);
fap = &fattr;
@@ -1174,7 +1174,7 @@ smbfs_lookup(struct vop_lookup_args *ap)
if (error)
goto out;
if (isdot) {
- VREF(dvp);
+ vref(dvp);
*vpp = dvp;
goto out;
}
diff --git a/sys/fs/tarfs/tarfs_vfsops.c b/sys/fs/tarfs/tarfs_vfsops.c
index a534b18ebf34..4cc70e4d5781 100644
--- a/sys/fs/tarfs/tarfs_vfsops.c
+++ b/sys/fs/tarfs/tarfs_vfsops.c
@@ -1201,7 +1201,7 @@ tarfs_vget(struct mount *mp, ino_t ino, int lkflags, struct vnode **vpp)
return (0);
bad:
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
@@ -1220,7 +1220,7 @@ tarfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
error = VFS_VGET(mp, tfp->ino, LK_EXCLUSIVE, &nvp);
if (error != 0) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
tnp = VP_TO_TARFS_NODE(nvp);
@@ -1228,7 +1228,7 @@ tarfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
tnp->gen != tfp->gen ||
tnp->nlink <= 0) {
vput(nvp);
- *vpp = NULLVP;
+ *vpp = NULL;
return (ESTALE);
}
*vpp = nvp;
diff --git a/sys/fs/tarfs/tarfs_vnops.c b/sys/fs/tarfs/tarfs_vnops.c
index afb8e05f5929..70fd7a441d81 100644
--- a/sys/fs/tarfs/tarfs_vnops.c
+++ b/sys/fs/tarfs/tarfs_vnops.c
@@ -208,8 +208,7 @@ tarfs_getattr(struct vop_getattr_args *ap)
vap->va_birthtime = tnp->birthtime;
vap->va_gen = tnp->gen;
vap->va_flags = tnp->flags;
- vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
- tnp->rdev : NODEV;
+ vap->va_rdev = VN_ISDEV(vp) ? tnp->rdev : NODEV;
vap->va_bytes = round_page(tnp->physize);
vap->va_filerev = 0;
@@ -232,7 +231,7 @@ tarfs_lookup(struct vop_cachedlookup_args *ap)
vpp = ap->a_vpp;
cnp = ap->a_cnp;
- *vpp = NULLVP;
+ *vpp = NULL;
dirnode = VP_TO_TARFS_NODE(dvp);
parent = dirnode->parent;
tmp = dirnode->tmp;
@@ -257,7 +256,7 @@ tarfs_lookup(struct vop_cachedlookup_args *ap)
if (error != 0)
return (error);
} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
- VREF(dvp);
+ vref(dvp);
*vpp = dvp;
#ifdef TARFS_DEBUG
} else if (dirnode == dirnode->tmp->root &&
@@ -335,6 +334,10 @@ tarfs_readdir(struct vop_readdir_args *ap)
tnp, tnp->name, uio->uio_offset, uio->uio_resid);
if (uio->uio_offset == TARFS_COOKIE_EOF) {
+ if (eofflag != NULL) {
+ TARFS_DPF(VNODE, "%s: Setting EOF flag\n", __func__);
+ *eofflag = 1;
+ }
TARFS_DPF(VNODE, "%s: EOF\n", __func__);
return (0);
}
@@ -515,7 +518,7 @@ tarfs_read(struct vop_read_args *ap)
uiop = ap->a_uio;
vp = ap->a_vp;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (vp->v_type != VREG)
@@ -582,7 +585,7 @@ tarfs_reclaim(struct vop_reclaim_args *ap)
vfs_hash_remove(vp);
TARFS_NODE_LOCK(tnp);
- tnp->vnode = NULLVP;
+ tnp->vnode = NULL;
vp->v_data = NULL;
TARFS_NODE_UNLOCK(tnp);
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 1237f6b92cdb..dd281d18d87d 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -551,7 +551,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, __enum_uint8(vtype)
MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
MPASS((type == VLNK) ^ (target == NULL));
- MPASS((type == VBLK || type == VCHR) ^ (rdev == VNOVAL));
+ MPASS(VTYPE_ISDEV(type) ^ (rdev == VNOVAL));
if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
return (ENOSPC);
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index 79b6c8b2e6a1..5082ee1ebdd0 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -98,7 +98,7 @@ tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
/* Caller assumes responsibility for ensuring access (VEXEC). */
dnode = VP_TO_TMPFS_DIR(dvp);
- *vpp = NULLVP;
+ *vpp = NULL;
/* We cannot be requesting the parent directory of the root node. */
MPASS(IMPLIES(dnode->tn_type == VDIR &&
@@ -120,7 +120,7 @@ tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
if (error != 0)
goto out;
} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
- VREF(dvp);
+ vref(dvp);
*vpp = dvp;
error = 0;
} else {
@@ -222,7 +222,7 @@ out:
* locked.
*/
if (error == 0) {
- MPASS(*vpp != NULLVP);
+ MPASS(*vpp != NULL);
ASSERT_VOP_LOCKED(*vpp, __func__);
} else {
MPASS(*vpp == NULL);
@@ -280,8 +280,7 @@ tmpfs_mknod(struct vop_mknod_args *v)
struct componentname *cnp = v->a_cnp;
struct vattr *vap = v->a_vap;
- if (vap->va_type != VBLK && vap->va_type != VCHR &&
- vap->va_type != VFIFO)
+ if (!VATTR_ISDEV(vap) && vap->va_type != VFIFO)
return (EINVAL);
return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
@@ -462,8 +461,7 @@ tmpfs_stat(struct vop_stat_args *v)
sb->st_nlink = node->tn_links;
sb->st_uid = node->tn_uid;
sb->st_gid = node->tn_gid;
- sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
- node->tn_rdev : NODEV;
+ sb->st_rdev = VN_ISDEV(vp) ? node->tn_rdev : NODEV;
sb->st_size = node->tn_size;
sb->st_atim.tv_sec = node->tn_atime.tv_sec;
sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
@@ -521,8 +519,7 @@ tmpfs_getattr(struct vop_getattr_args *v)
vap->va_birthtime = node->tn_birthtime;
vap->va_gen = node->tn_gen;
vap->va_flags = node->tn_flags;
- vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
- node->tn_rdev : NODEV;
+ vap->va_rdev = VN_ISDEV(vp) ? node->tn_rdev : NODEV;
if (vp->v_type == VREG) {
#ifdef __ILP32__
vm_object_t obj = node->tn_reg.tn_aobj;
@@ -1918,7 +1915,7 @@ tmpfs_deleteextattr(struct vop_deleteextattr_args *ap)
node = VP_TO_TMPFS_NODE(vp);
tmp = VFS_TO_TMPFS(vp->v_mount);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
@@ -1956,7 +1953,7 @@ tmpfs_getextattr(struct vop_getextattr_args *ap)
int error;
node = VP_TO_TMPFS_NODE(vp);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
@@ -1993,7 +1990,7 @@ tmpfs_listextattr(struct vop_listextattr_args *ap)
int error;
node = VP_TO_TMPFS_NODE(vp);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
@@ -2037,7 +2034,7 @@ tmpfs_setextattr(struct vop_setextattr_args *ap)
tmp = VFS_TO_TMPFS(vp->v_mount);
attr_size = ap->a_uio->uio_resid;
diff = 0;
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
diff --git a/sys/fs/udf/osta.c b/sys/fs/udf/osta.c
index f79b86993367..1a083d8c26b1 100644
--- a/sys/fs/udf/osta.c
+++ b/sys/fs/udf/osta.c
@@ -383,7 +383,7 @@ int UDFTransName(
int maxFilenameLen;
/* Translate extension, and store it in ext. */
for(index = 0; index<EXT_SIZE &&
- extIndex + index +1 < udfLen; index++ ) {
+ extIndex + index +1 < udfLen; index++) {
current = udfName[extIndex + index + 1];
if (IsIllegal(current) ||
!UnicodeIsPrint(current)) {
@@ -432,7 +432,7 @@ int UDFTransName(
/* Place a translated extension at end, if found. */
if (hasExt) {
newName[newIndex++] = PERIOD;
- for (index = 0;index < localExtIndex ;index++ ) {
+ for (index = 0; index < localExtIndex; index++) {
newName[newIndex++] = ext[index];
}
}
diff --git a/sys/fs/udf/udf_vfsops.c b/sys/fs/udf/udf_vfsops.c
index c5ef1f686093..c1627285a174 100644
--- a/sys/fs/udf/udf_vfsops.c
+++ b/sys/fs/udf/udf_vfsops.c
@@ -736,14 +736,14 @@ udf_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
ifhp = (struct ifid *)fhp;
if ((error = VFS_VGET(mp, ifhp->ifid_ino, LK_EXCLUSIVE, &nvp)) != 0) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (error);
}
np = VTON(nvp);
fsize = le64toh(np->fentry->inf_len);
if (fsize > OFF_MAX) {
- *vpp = NULLVP;
+ *vpp = NULL;
return (EIO);
}
diff --git a/sys/fs/udf/udf_vnops.c b/sys/fs/udf/udf_vnops.c
index 37889241e8c3..ec61618b6e18 100644
--- a/sys/fs/udf/udf_vnops.c
+++ b/sys/fs/udf/udf_vnops.c
@@ -1226,7 +1226,7 @@ lookloop:
if (flags & ISDOTDOT) {
error = vn_vget_ino(dvp, id, lkflags, &tdp);
} else if (node->hash_id == id) {
- VREF(dvp); /* we want ourself, ie "." */
+ vref(dvp); /* we want ourself, ie "." */
/*
* When we lookup "." we still can be asked to lock it
* differently.
diff --git a/sys/fs/unionfs/union_subr.c b/sys/fs/unionfs/union_subr.c
index edcc6716b674..b6d6db60ca3d 100644
--- a/sys/fs/unionfs/union_subr.c
+++ b/sys/fs/unionfs/union_subr.c
@@ -160,7 +160,7 @@ unionfs_get_cached_vnode_locked(struct vnode *lookup, struct vnode *dvp)
if (VN_IS_DOOMED(vp) ||
((vp->v_iflag & VI_DOINGINACT) != 0)) {
VI_UNLOCK(vp);
- vp = NULLVP;
+ vp = NULL;
} else {
vrefl(vp);
VI_UNLOCK(vp);
@@ -169,7 +169,7 @@ unionfs_get_cached_vnode_locked(struct vnode *lookup, struct vnode *dvp)
}
}
- return (NULLVP);
+ return (NULL);
}
@@ -182,11 +182,11 @@ unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp,
{
struct vnode *vp;
- vp = NULLVP;
+ vp = NULL;
VI_LOCK(dvp);
- if (uvp != NULLVP)
+ if (uvp != NULL)
vp = unionfs_get_cached_vnode_locked(uvp, dvp);
- else if (lvp != NULLVP)
+ else if (lvp != NULL)
vp = unionfs_get_cached_vnode_locked(lvp, dvp);
VI_UNLOCK(dvp);
@@ -203,22 +203,22 @@ unionfs_ins_cached_vnode(struct unionfs_node *uncp,
struct unionfs_node_hashhead *hd;
struct vnode *vp;
- vp = NULLVP;
+ vp = NULL;
VI_LOCK(dvp);
- if (uncp->un_uppervp != NULLVP) {
+ if (uncp->un_uppervp != NULL) {
ASSERT_VOP_ELOCKED(uncp->un_uppervp, __func__);
KASSERT(uncp->un_uppervp->v_type == VDIR,
("%s: v_type != VDIR", __func__));
vp = unionfs_get_cached_vnode_locked(uncp->un_uppervp, dvp);
- } else if (uncp->un_lowervp != NULLVP) {
+ } else if (uncp->un_lowervp != NULL) {
ASSERT_VOP_ELOCKED(uncp->un_lowervp, __func__);
KASSERT(uncp->un_lowervp->v_type == VDIR,
("%s: v_type != VDIR", __func__));
vp = unionfs_get_cached_vnode_locked(uncp->un_lowervp, dvp);
}
- if (vp == NULLVP) {
- hd = unionfs_get_hashhead(dvp, (uncp->un_uppervp != NULLVP ?
- uncp->un_uppervp : uncp->un_lowervp));
+ if (vp == NULL) {
+ hd = unionfs_get_hashhead(dvp, (uncp->un_uppervp != NULL ?
+ uncp->un_uppervp : uncp->un_lowervp));
LIST_INSERT_HEAD(hd, uncp, un_hash);
}
VI_UNLOCK(dvp);
@@ -233,8 +233,8 @@ static void
unionfs_rem_cached_vnode(struct unionfs_node *unp, struct vnode *dvp)
{
KASSERT(unp != NULL, ("%s: null node", __func__));
- KASSERT(dvp != NULLVP,
- ("%s: null parent vnode", __func__));
+ KASSERT(dvp != NULL,
+ ("%s: null parent vnode", __func__));
VI_LOCK(dvp);
if (unp->un_hash.le_prev != NULL) {
@@ -274,13 +274,13 @@ unionfs_nodeget_cleanup(struct vnode *vp, struct unionfs_node *unp)
vgone(vp);
vput(vp);
- if (unp->un_dvp != NULLVP)
+ if (unp->un_dvp != NULL)
vrele(unp->un_dvp);
- if (unp->un_uppervp != NULLVP) {
+ if (unp->un_uppervp != NULL) {
vput(unp->un_uppervp);
- if (unp->un_lowervp != NULLVP)
+ if (unp->un_lowervp != NULL)
vrele(unp->un_lowervp);
- } else if (unp->un_lowervp != NULLVP)
+ } else if (unp->un_lowervp != NULL)
vput(unp->un_lowervp);
if (unp->un_hashtbl != NULL)
hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, UNIONFSHASHMASK);
@@ -313,21 +313,21 @@ unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
ump = MOUNTTOUNIONFSMOUNT(mp);
lkflags = (cnp ? cnp->cn_lkflags : 0);
path = (cnp ? cnp->cn_nameptr : NULL);
- *vpp = NULLVP;
+ *vpp = NULL;
- if (uppervp == NULLVP && lowervp == NULLVP)
+ if (uppervp == NULL && lowervp == NULL)
panic("%s: upper and lower are both null", __func__);
- vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type);
+ vt = (uppervp != NULL ? uppervp->v_type : lowervp->v_type);
/* If it has no ISLASTCN flag, path check is skipped. */
if (cnp && !(cnp->cn_flags & ISLASTCN))
path = NULL;
/* check the cache */
- if (dvp != NULLVP && vt == VDIR) {
+ if (dvp != NULL && vt == VDIR) {
vp = unionfs_get_cached_vnode(uppervp, lowervp, dvp);
- if (vp != NULLVP) {
+ if (vp != NULL) {
*vpp = vp;
if (lkflags != 0)
vn_lock(*vpp, lkflags | LK_RETRY);
@@ -343,11 +343,11 @@ unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
free(unp, M_UNIONFSNODE);
return (error);
}
- if (dvp != NULLVP)
+ if (dvp != NULL)
vref(dvp);
- if (uppervp != NULLVP)
+ if (uppervp != NULL)
vref(uppervp);
- if (lowervp != NULLVP)
+ if (lowervp != NULL)
vref(lowervp);
if (vt == VDIR) {
@@ -361,7 +361,7 @@ unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
unp->un_uppervp = uppervp;
unp->un_lowervp = lowervp;
unp->un_dvp = dvp;
- if (uppervp != NULLVP)
+ if (uppervp != NULL)
vp->v_vnlock = uppervp->v_vnlock;
else
vp->v_vnlock = lowervp->v_vnlock;
@@ -407,7 +407,7 @@ unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
* possibility of deadlock due to some other agent on the system
* attempting to lock those two specific vnodes in the opposite order.
*/
- if (uppervp != NULLVP)
+ if (uppervp != NULL)
vn_lock(uppervp, LK_EXCLUSIVE | LK_RETRY);
else
vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
@@ -426,16 +426,16 @@ unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
* blocked on our vnode lock, effectively also preventing unmount
* of the underlying filesystems.
*/
- VNASSERT(lowervp == NULLVP || !VN_IS_DOOMED(lowervp), vp,
+ VNASSERT(lowervp == NULL || !VN_IS_DOOMED(lowervp), vp,
("%s: doomed lowervp %p", __func__, lowervp));
- VNASSERT(uppervp == NULLVP || !VN_IS_DOOMED(uppervp), vp,
+ VNASSERT(uppervp == NULL || !VN_IS_DOOMED(uppervp), vp,
("%s: doomed lowervp %p", __func__, uppervp));
vn_set_state(vp, VSTATE_CONSTRUCTED);
- if (dvp != NULLVP && vt == VDIR)
+ if (dvp != NULL && vt == VDIR)
*vpp = unionfs_ins_cached_vnode(unp, dvp);
- if (*vpp != NULLVP) {
+ if (*vpp != NULL) {
unionfs_nodeget_cleanup(vp, unp);
if (lkflags != 0)
vn_lock(*vpp, lkflags | LK_RETRY);
@@ -484,7 +484,7 @@ unionfs_noderem(struct vnode *vp)
lvp = unp->un_lowervp;
uvp = unp->un_uppervp;
dvp = unp->un_dvp;
- unlock_lvp = (uvp == NULLVP);
+ unlock_lvp = (uvp == NULL);
/*
* Lock the lower vnode in addition to the upper vnode lock in order
@@ -496,7 +496,7 @@ unionfs_noderem(struct vnode *vp)
* Moreover, during unmount of a non-"below" unionfs mount, the lower
* root vnode will already be locked as it is the covered vnode.
*/
- if (uvp != NULLVP && lvp != NULLVP && (vp->v_vflag & VV_ROOT) == 0) {
+ if (uvp != NULL && lvp != NULL && (vp->v_vflag & VV_ROOT) == 0) {
vn_lock_pair(uvp, true, LK_EXCLUSIVE, lvp, false, LK_EXCLUSIVE);
unlock_lvp = true;
}
@@ -508,7 +508,7 @@ unionfs_noderem(struct vnode *vp)
* prevent faults in unionfs_lock().
*/
VI_LOCK(vp);
- unp->un_lowervp = unp->un_uppervp = NULLVP;
+ unp->un_lowervp = unp->un_uppervp = NULL;
vp->v_vnlock = &(vp->v_lock);
vp->v_data = NULL;
vp->v_object = NULL;
@@ -543,14 +543,14 @@ unionfs_noderem(struct vnode *vp)
("%s: write reference without upper vnode", __func__));
VOP_ADD_WRITECOUNT(uvp, -writerefs);
}
- if (uvp != NULLVP)
+ if (uvp != NULL)
vput(uvp);
if (unlock_lvp)
vput(lvp);
- else if (lvp != NULLVP)
+ else if (lvp != NULL)
vrele(lvp);
- if (dvp != NULLVP)
+ if (dvp != NULL)
unionfs_rem_cached_vnode(unp, dvp);
if (unp->un_path != NULL) {
@@ -567,7 +567,7 @@ unionfs_noderem(struct vnode *vp)
LIST_REMOVE(unsp, uns_list);
free(unsp, M_TEMP);
}
- if (dvp != NULLVP) {
+ if (dvp != NULL) {
mtx_lock(&unionfs_deferred_rele_lock);
STAILQ_INSERT_TAIL(&unionfs_deferred_rele_list, unp, un_rele);
mtx_unlock(&unionfs_deferred_rele_lock);
@@ -587,6 +587,7 @@ unionfs_find_node_status(struct unionfs_node *unp, struct thread *td)
struct unionfs_node_status *unsp;
pid_t pid;
+ MPASS(td != NULL);
pid = td->td_proc->p_pid;
ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), __func__);
@@ -612,6 +613,7 @@ unionfs_get_node_status(struct unionfs_node *unp, struct thread *td,
struct unionfs_node_status *unsp;
pid_t pid;
+ MPASS(td != NULL);
pid = td->td_proc->p_pid;
KASSERT(NULL != unspp, ("%s: NULL status", __func__));
@@ -793,7 +795,7 @@ unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp,
/*
* Re-cache the unionfs vnode against the upper vnode
*/
- if (dvp != NULLVP && vp->v_type == VDIR) {
+ if (dvp != NULL && vp->v_type == VDIR) {
VI_LOCK(dvp);
if (unp->un_hash.le_prev != NULL) {
LIST_REMOVE(unp, un_hash);
@@ -841,7 +843,7 @@ unionfs_set_in_progress_flag(struct vnode *vp, unsigned int flag)
if (unp == NULL)
error = ENOENT;
else if (flag == UNIONFS_COPY_IN_PROGRESS &&
- unp->un_uppervp != NULLVP)
+ unp->un_uppervp != NULL)
error = EJUSTRETURN;
else if (flag == UNIONFS_LOOKUP_IN_PROGRESS)
error = ERELOOKUP;
@@ -902,7 +904,7 @@ unionfs_mkshadowdir(struct vnode *dvp, struct vnode *vp,
ASSERT_VOP_ELOCKED(vp, __func__);
ump = MOUNTTOUNIONFSMOUNT(vp->v_mount);
unp = VTOUNIONFS(vp);
- if (unp->un_uppervp != NULLVP)
+ if (unp->un_uppervp != NULL)
return (EEXIST);
dunp = VTOUNIONFS(dvp);
udvp = dunp->un_uppervp;
@@ -914,7 +916,7 @@ unionfs_mkshadowdir(struct vnode *dvp, struct vnode *vp,
return (error);
lvp = unp->un_lowervp;
- uvp = NULLVP;
+ uvp = NULL;
credbk = cnp->cn_cred;
/* Authority change to root */
@@ -953,7 +955,7 @@ unionfs_mkshadowdir(struct vnode *dvp, struct vnode *vp,
vput(udvp);
goto unionfs_mkshadowdir_relock;
}
- if (uvp != NULLVP) {
+ if (uvp != NULL) {
if (udvp == uvp)
vrele(uvp);
else
@@ -1218,7 +1220,7 @@ unionfs_mkwhiteout(struct vnode *dvp, struct vnode *vp,
ASSERT_VOP_ELOCKED(vp, __func__);
udvp = VTOUNIONFS(dvp)->un_uppervp;
- wvp = NULLVP;
+ wvp = NULL;
NDPREINIT(&nd);
vref(udvp);
VOP_UNLOCK(vp);
@@ -1226,7 +1228,7 @@ unionfs_mkwhiteout(struct vnode *dvp, struct vnode *vp,
pathlen, CREATE))) {
goto unionfs_mkwhiteout_cleanup;
}
- if (wvp != NULLVP) {
+ if (wvp != NULL) {
if (udvp == wvp)
vrele(wvp);
else
@@ -1281,7 +1283,7 @@ unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp,
ASSERT_VOP_ELOCKED(vp, __func__);
unp = VTOUNIONFS(vp);
ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount);
- uvp = NULLVP;
+ uvp = NULL;
lvp = unp->un_lowervp;
cred = td->td_ucred;
fmode = FFLAGS(O_WRONLY | O_CREAT | O_TRUNC | O_EXCL);
@@ -1310,7 +1312,7 @@ unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp,
return (error);
}
- if (uvp != NULLVP) {
+ if (uvp != NULL) {
if (uvp == udvp)
vrele(uvp);
else
@@ -1433,23 +1435,23 @@ unionfs_copyfile(struct vnode *vp, int docopy, struct ucred *cred,
ASSERT_VOP_ELOCKED(vp, __func__);
unp = VTOUNIONFS(vp);
lvp = unp->un_lowervp;
- uvp = NULLVP;
+ uvp = NULL;
if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY))
return (EROFS);
- if (unp->un_dvp == NULLVP)
+ if (unp->un_dvp == NULL)
return (EINVAL);
- if (unp->un_uppervp != NULLVP)
+ if (unp->un_uppervp != NULL)
return (EEXIST);
- udvp = NULLVP;
+ udvp = NULL;
VI_LOCK(unp->un_dvp);
dunp = VTOUNIONFS(unp->un_dvp);
if (dunp != NULL)
udvp = dunp->un_uppervp;
VI_UNLOCK(unp->un_dvp);
- if (udvp == NULLVP)
+ if (udvp == NULL)
return (EROFS);
if ((udvp->v_mount->mnt_flag & MNT_RDONLY))
return (EROFS);
@@ -1646,7 +1648,7 @@ unionfs_check_rmdir(struct vnode *vp, struct ucred *cred, struct thread *td)
cn.cn_cred = cred;
error = VOP_LOOKUP(uvp, &tvp, &cn);
- if (tvp != NULLVP)
+ if (tvp != NULL)
vput(tvp);
if (error != 0 && error != ENOENT && error != EJUSTRETURN)
break;
diff --git a/sys/fs/unionfs/union_vfsops.c b/sys/fs/unionfs/union_vfsops.c
index 9342317ad08e..284b24a604f4 100644
--- a/sys/fs/unionfs/union_vfsops.c
+++ b/sys/fs/unionfs/union_vfsops.c
@@ -256,7 +256,7 @@ unionfs_domount(struct mount *mp)
ump->um_lowervp = lowerrootvp;
ump->um_uppervp = upperrootvp;
}
- ump->um_rootvp = NULLVP;
+ ump->um_rootvp = NULL;
ump->um_uid = uid;
ump->um_gid = gid;
ump->um_udir = udir;
@@ -280,7 +280,7 @@ unionfs_domount(struct mount *mp)
* Get the unionfs root vnode.
*/
error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp,
- NULLVP, &(ump->um_rootvp), NULL);
+ NULL, &(ump->um_rootvp), NULL);
if (error != 0) {
vrele(upperrootvp);
free(ump, M_UNIONFSMNT);
@@ -558,7 +558,7 @@ unionfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
ump = MOUNTTOUNIONFSMOUNT(mp);
unp = VTOUNIONFS(filename_vp);
- if (unp->un_uppervp != NULLVP) {
+ if (unp->un_uppervp != NULL) {
return (VFS_EXTATTRCTL(ump->um_uppermp, cmd,
unp->un_uppervp, namespace, attrname));
} else {
diff --git a/sys/fs/unionfs/union_vnops.c b/sys/fs/unionfs/union_vnops.c
index 03130f0ca949..66fee97a07d5 100644
--- a/sys/fs/unionfs/union_vnops.c
+++ b/sys/fs/unionfs/union_vnops.c
@@ -114,9 +114,9 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
dunp = VTOUNIONFS(dvp);
udvp = dunp->un_uppervp;
ldvp = dunp->un_lowervp;
- vp = uvp = lvp = NULLVP;
+ vp = uvp = lvp = NULL;
td = curthread;
- *(ap->a_vpp) = NULLVP;
+ *(ap->a_vpp) = NULL;
UNIONFS_INTERNAL_DEBUG(
"unionfs_lookup: enter: nameiop=%ld, flags=%lx, path=%s\n",
@@ -159,7 +159,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
* lookup dotdot
*/
if (cnflags & ISDOTDOT) {
- if (LOOKUP != nameiop && udvp == NULLVP) {
+ if (LOOKUP != nameiop && udvp == NULL) {
error = EROFS;
goto unionfs_lookup_return;
}
@@ -170,7 +170,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
goto unionfs_lookup_return;
}
- if (udvp != NULLVP)
+ if (udvp != NULL)
dtmpvp = udvp;
else
dtmpvp = ldvp;
@@ -186,7 +186,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
* reference, or (if dvp was reclaimed) we'll need to drop
* vp's lock and reference to return early.
*/
- if (vp != NULLVP)
+ if (vp != NULL)
vput(vp);
dunp = VTOUNIONFS(dvp);
if (error == 0 && dunp == NULL)
@@ -202,7 +202,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
if (VN_IS_DOOMED(dtmpvp)) {
vput(dtmpvp);
- *(ap->a_vpp) = NULLVP;
+ *(ap->a_vpp) = NULL;
error = ENOENT;
}
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
@@ -219,11 +219,11 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
* The cost of this is that we may end up performing an unnecessary
* lower layer lookup if a whiteout is present in the upper layer.
*/
- if (ldvp != NULLVP && !(cnflags & DOWHITEOUT)) {
+ if (ldvp != NULL && !(cnflags & DOWHITEOUT)) {
struct componentname lcn;
bool is_dot;
- if (udvp != NULLVP) {
+ if (udvp != NULL) {
vref(ldvp);
VOP_UNLOCK(dvp);
vn_lock(ldvp, LK_EXCLUSIVE | LK_RETRY);
@@ -235,18 +235,18 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
lcn.cn_flags = cnflags;
is_dot = false;
- if (udvp == NULLVP)
+ if (udvp == NULL)
unionfs_forward_vop_start(ldvp, &lkflags);
lerror = VOP_LOOKUP(ldvp, &lvp, &lcn);
- if (udvp == NULLVP &&
+ if (udvp == NULL &&
unionfs_forward_vop_finish(dvp, ldvp, lkflags)) {
- if (lvp != NULLVP)
+ if (lvp != NULL)
VOP_UNLOCK(lvp);
error = ENOENT;
goto unionfs_lookup_cleanup;
}
- if (udvp == NULLVP)
+ if (udvp == NULL)
cnp->cn_flags = lcn.cn_flags;
if (lerror == 0) {
@@ -256,11 +256,11 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
vref(dvp);
is_dot = true;
error = lerror;
- } else if (lvp != NULLVP)
+ } else if (lvp != NULL)
VOP_UNLOCK(lvp);
}
- if (udvp != NULLVP) {
+ if (udvp != NULL) {
vput(ldvp);
vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
if (VN_IS_DOOMED(dvp))
@@ -274,13 +274,13 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
/*
* lookup upper layer
*/
- if (udvp != NULLVP) {
+ if (udvp != NULL) {
bool iswhiteout = false;
unionfs_forward_vop_start(udvp, &lkflags);
uerror = VOP_LOOKUP(udvp, &uvp, cnp);
if (unionfs_forward_vop_finish(dvp, udvp, lkflags)) {
- if (uvp != NULLVP)
+ if (uvp != NULL)
VOP_UNLOCK(uvp);
error = ENOENT;
goto unionfs_lookup_cleanup;
@@ -288,7 +288,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
if (uerror == 0) {
if (udvp == uvp) { /* is dot */
- if (lvp != NULLVP)
+ if (lvp != NULL)
vrele(lvp);
vrele(uvp);
*(ap->a_vpp) = dvp;
@@ -296,7 +296,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
error = uerror;
goto unionfs_lookup_return;
- } else if (uvp != NULLVP)
+ } else if (uvp != NULL)
VOP_UNLOCK(uvp);
}
@@ -308,9 +308,9 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
(va.va_flags & OPAQUE))
iswhiteout = true;
- if (iswhiteout && lvp != NULLVP) {
+ if (iswhiteout && lvp != NULL) {
vrele(lvp);
- lvp = NULLVP;
+ lvp = NULL;
}
#if 0
@@ -323,29 +323,29 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
/*
* check lookup result
*/
- if (uvp == NULLVP && lvp == NULLVP) {
- error = (udvp != NULLVP ? uerror : lerror);
+ if (uvp == NULL && lvp == NULL) {
+ error = (udvp != NULL ? uerror : lerror);
goto unionfs_lookup_return;
}
/*
* check vnode type
*/
- if (uvp != NULLVP && lvp != NULLVP && uvp->v_type != lvp->v_type) {
+ if (uvp != NULL && lvp != NULL && uvp->v_type != lvp->v_type) {
vrele(lvp);
- lvp = NULLVP;
+ lvp = NULL;
}
/*
* check shadow dir
*/
- if (uerror != 0 && uerror != EJUSTRETURN && udvp != NULLVP &&
- lerror == 0 && lvp != NULLVP && lvp->v_type == VDIR &&
+ if (uerror != 0 && uerror != EJUSTRETURN && udvp != NULL &&
+ lerror == 0 && lvp != NULL && lvp->v_type == VDIR &&
!(dvp->v_mount->mnt_flag & MNT_RDONLY) &&
(1 < cnp->cn_namelen || '.' != *(cnp->cn_nameptr))) {
/* get unionfs vnode in order to create a new shadow dir. */
- error = unionfs_nodeget(dvp->v_mount, NULLVP, lvp, dvp, &vp,
- cnp);
+ error = unionfs_nodeget(dvp->v_mount, NULL, lvp, dvp, &vp,
+ cnp);
if (error != 0)
goto unionfs_lookup_cleanup;
@@ -382,7 +382,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
* get unionfs vnode.
*/
else {
- if (uvp != NULLVP)
+ if (uvp != NULL)
error = uerror;
else
error = lerror;
@@ -409,14 +409,14 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
cache_enter(dvp, vp, cnp);
unionfs_lookup_cleanup:
- if (uvp != NULLVP)
+ if (uvp != NULL)
vrele(uvp);
- if (lvp != NULLVP)
+ if (lvp != NULL)
vrele(lvp);
if (error == ENOENT && (cnflags & MAKEENTRY) != 0 &&
!VN_IS_DOOMED(dvp))
- cache_enter(dvp, NULLVP, cnp);
+ cache_enter(dvp, NULL, cnp);
unionfs_lookup_return:
unionfs_clear_in_progress_flag(dvp, UNIONFS_LOOKUP_IN_PROGRESS);
@@ -444,7 +444,7 @@ unionfs_create(struct vop_create_args *ap)
udvp = dunp->un_uppervp;
error = EROFS;
- if (udvp != NULLVP) {
+ if (udvp != NULL) {
int lkflags;
bool vp_created = false;
unionfs_forward_vop_start(udvp, &lkflags);
@@ -457,8 +457,8 @@ unionfs_create(struct vop_create_args *ap)
}
if (error == 0) {
VOP_UNLOCK(vp);
- error = unionfs_nodeget(ap->a_dvp->v_mount, vp, NULLVP,
- ap->a_dvp, ap->a_vpp, cnp);
+ error = unionfs_nodeget(ap->a_dvp->v_mount, vp, NULL,
+ ap->a_dvp, ap->a_vpp, cnp);
vrele(vp);
} else if (vp_created)
vput(vp);
@@ -486,7 +486,7 @@ unionfs_whiteout(struct vop_whiteout_args *ap)
udvp = dunp->un_uppervp;
error = EOPNOTSUPP;
- if (udvp != NULLVP) {
+ if (udvp != NULL) {
int lkflags;
switch (ap->a_flags) {
case CREATE:
@@ -525,7 +525,7 @@ unionfs_mknod(struct vop_mknod_args *ap)
udvp = dunp->un_uppervp;
error = EROFS;
- if (udvp != NULLVP) {
+ if (udvp != NULL) {
int lkflags;
bool vp_created = false;
unionfs_forward_vop_start(udvp, &lkflags);
@@ -538,8 +538,8 @@ unionfs_mknod(struct vop_mknod_args *ap)
}
if (error == 0) {
VOP_UNLOCK(vp);
- error = unionfs_nodeget(ap->a_dvp->v_mount, vp, NULLVP,
- ap->a_dvp, ap->a_vpp, cnp);
+ error = unionfs_nodeget(ap->a_dvp->v_mount, vp, NULL,
+ ap->a_dvp, ap->a_vpp, cnp);
vrele(vp);
} else if (vp_created)
vput(vp);
@@ -611,7 +611,7 @@ unionfs_lock_lvp(struct vnode *vp, int *lkflags)
vn_lock(lvp, *lkflags | LK_RETRY);
if (VN_IS_DOOMED(lvp)) {
vput(lvp);
- lvp = NULLVP;
+ lvp = NULL;
vn_lock(vp, *lkflags | LK_RETRY);
}
return (lvp);
@@ -656,7 +656,7 @@ unionfs_open(struct vop_open_args *ap)
error = 0;
vp = ap->a_vp;
- targetvp = NULLVP;
+ targetvp = NULL;
cred = ap->a_cred;
td = ap->a_td;
open_lvp = lock_lvp = false;
@@ -686,10 +686,10 @@ unionfs_open(struct vop_open_args *ap)
if (targetvp == lvp &&
(ap->a_mode & FWRITE) && lvp->v_type == VREG)
- targetvp = NULLVP;
+ targetvp = NULL;
}
- if (targetvp == NULLVP) {
- if (uvp == NULLVP) {
+ if (targetvp == NULL) {
+ if (uvp == NULL) {
if ((ap->a_mode & FWRITE) && lvp->v_type == VREG) {
error = unionfs_copyfile(vp,
!(ap->a_mode & O_TRUNC), cred, td);
@@ -704,16 +704,16 @@ unionfs_open(struct vop_open_args *ap)
targetvp = uvp;
}
- if (targetvp == uvp && uvp->v_type == VDIR && lvp != NULLVP &&
+ if (targetvp == uvp && uvp->v_type == VDIR && lvp != NULL &&
unsp->uns_lower_opencnt <= 0)
open_lvp = true;
- else if (targetvp == lvp && uvp != NULLVP)
+ else if (targetvp == lvp && uvp != NULL)
lock_lvp = true;
if (lock_lvp) {
unp = NULL;
lvp = unionfs_lock_lvp(vp, &lkflags);
- if (lvp == NULLVP) {
+ if (lvp == NULL) {
error = ENOENT;
goto unionfs_open_abort;
}
@@ -736,7 +736,7 @@ unionfs_open(struct vop_open_args *ap)
if (open_lvp) {
unp = NULL;
lvp = unionfs_lock_lvp(vp, &lkflags);
- if (lvp == NULLVP) {
+ if (lvp == NULL) {
error = ENOENT;
goto unionfs_open_abort;
}
@@ -814,7 +814,7 @@ unionfs_close(struct vop_close_args *ap)
unp = VTOUNIONFS(vp);
lvp = unp->un_lowervp;
uvp = unp->un_uppervp;
- unsp = unionfs_find_node_status(unp, td);
+ unsp = (td != NULL) ? unionfs_find_node_status(unp, td) : NULL;
if (unsp == NULL ||
(unsp->uns_lower_opencnt <= 0 && unsp->uns_upper_opencnt <= 0)) {
@@ -822,7 +822,7 @@ unionfs_close(struct vop_close_args *ap)
if (unsp != NULL)
printf("unionfs_close: warning: open count is 0\n");
#endif
- if (uvp != NULLVP)
+ if (uvp != NULL)
ovp = uvp;
else
ovp = lvp;
@@ -831,11 +831,11 @@ unionfs_close(struct vop_close_args *ap)
else
ovp = lvp;
- if (ovp == lvp && uvp != NULLVP) {
+ if (ovp == lvp && uvp != NULL) {
lock_lvp = true;
unp = NULL;
lvp = unionfs_lock_lvp(vp, &lkflags);
- if (lvp == NULLVP) {
+ if (lvp == NULL) {
error = ENOENT;
goto unionfs_close_abort;
}
@@ -861,7 +861,7 @@ unionfs_close(struct vop_close_args *ap)
if (unsp->uns_node_flag & UNS_OPENL_4_READDIR) {
unp = NULL;
lvp = unionfs_lock_lvp(vp, &lkflags);
- if (lvp == NULLVP) {
+ if (lvp == NULL) {
error = ENOENT;
goto unionfs_close_abort;
}
@@ -978,7 +978,7 @@ unionfs_access(struct vop_access_args *ap)
}
}
- if (uvp != NULLVP) {
+ if (uvp != NULL) {
error = VOP_ACCESS(uvp, accmode, ap->a_cred, td);
UNIONFS_INTERNAL_DEBUG("unionfs_access: leave (%d)\n", error);
@@ -986,7 +986,7 @@ unionfs_access(struct vop_access_args *ap)
return (error);
}
- if (lvp != NULLVP) {
+ if (lvp != NULL) {
if (accmode & VWRITE) {
if ((ump->um_uppermp->mnt_flag & MNT_RDONLY) != 0) {
switch (ap->a_vp->v_type) {
@@ -1044,7 +1044,7 @@ unionfs_getattr(struct vop_getattr_args *ap)
lvp = unp->un_lowervp;
td = curthread;
- if (uvp != NULLVP) {
+ if (uvp != NULL) {
if ((error = VOP_GETATTR(uvp, ap->a_vap, ap->a_cred)) == 0)
ap->a_vap->va_fsid =
ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
@@ -1106,7 +1106,7 @@ unionfs_setattr(struct vop_setattr_args *ap)
vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL))
return (EROFS);
- if (uvp == NULLVP && lvp->v_type == VREG) {
+ if (uvp == NULL && lvp->v_type == VREG) {
error = unionfs_copyfile(ap->a_vp, (vap->va_size != 0),
ap->a_cred, td);
if (error != 0)
@@ -1114,7 +1114,7 @@ unionfs_setattr(struct vop_setattr_args *ap)
uvp = unp->un_uppervp;
}
- if (uvp != NULLVP) {
+ if (uvp != NULL) {
int lkflags;
unionfs_forward_vop_start(uvp, &lkflags);
error = VOP_SETATTR(uvp, vap, ap->a_cred);
@@ -1138,7 +1138,7 @@ unionfs_read(struct vop_read_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- tvp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
+ tvp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
error = VOP_READ(tvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
@@ -1160,7 +1160,7 @@ unionfs_write(struct vop_write_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- tvp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
+ tvp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
unionfs_forward_vop_start(tvp, &lkflags);
error = VOP_WRITE(tvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
@@ -1190,7 +1190,7 @@ unionfs_ioctl(struct vop_ioctl_args *ap)
unionfs_tryrem_node_status(unp, unsp);
VOP_UNLOCK(ap->a_vp);
- if (ovp == NULLVP)
+ if (ovp == NULL)
return (EBADF);
error = VOP_IOCTL(ovp, ap->a_command, ap->a_data, ap->a_fflag,
@@ -1217,7 +1217,7 @@ unionfs_poll(struct vop_poll_args *ap)
unionfs_tryrem_node_status(unp, unsp);
VOP_UNLOCK(ap->a_vp);
- if (ovp == NULLVP)
+ if (ovp == NULL)
return (EBADF);
return (VOP_POLL(ovp, ap->a_events, ap->a_cred, ap->a_td));
@@ -1246,7 +1246,7 @@ unionfs_fsync(struct vop_fsync_args *ap)
unionfs_downgrade_lock(ap->a_vp, lkstatus);
- if (ovp == NULLVP)
+ if (ovp == NULL)
return (EBADF);
unionfs_forward_vop_start(ovp, &lkflags);
@@ -1289,20 +1289,20 @@ unionfs_remove(struct vop_remove_args *ap)
path = unp->un_path;
pathlen = unp->un_pathlen;
- if (udvp == NULLVP)
+ if (udvp == NULL)
return (EROFS);
- if (uvp != NULLVP) {
+ if (uvp != NULL) {
int udvp_lkflags, uvp_lkflags;
if (ump == NULL || ump->um_whitemode == UNIONFS_WHITE_ALWAYS ||
- lvp != NULLVP)
+ lvp != NULL)
cnp->cn_flags |= DOWHITEOUT;
unionfs_forward_vop_start_pair(udvp, &udvp_lkflags,
uvp, &uvp_lkflags);
error = VOP_REMOVE(udvp, uvp, cnp);
unionfs_forward_vop_finish_pair(ap->a_dvp, udvp, udvp_lkflags,
ap->a_vp, uvp, uvp_lkflags);
- } else if (lvp != NULLVP) {
+ } else if (lvp != NULL) {
error = unionfs_mkwhiteout(ap->a_dvp, ap->a_vp, cnp, td,
path, pathlen);
}
@@ -1332,16 +1332,16 @@ unionfs_link(struct vop_link_args *ap)
dunp = VTOUNIONFS(ap->a_tdvp);
unp = NULL;
udvp = dunp->un_uppervp;
- uvp = NULLVP;
+ uvp = NULL;
cnp = ap->a_cnp;
td = curthread;
- if (udvp == NULLVP)
+ if (udvp == NULL)
return (EROFS);
unp = VTOUNIONFS(ap->a_vp);
- if (unp->un_uppervp == NULLVP) {
+ if (unp->un_uppervp == NULL) {
if (ap->a_vp->v_type != VREG)
return (EOPNOTSUPP);
@@ -1405,7 +1405,7 @@ unionfs_rename(struct vop_rename_args *ap)
/* check for cross device rename */
if (fvp->v_mount != tdvp->v_mount ||
- (tvp != NULLVP && fvp->v_mount != tvp->v_mount)) {
+ (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
if (fvp->v_op != &unionfs_vnodeops)
error = ENODEV;
else
@@ -1418,7 +1418,7 @@ unionfs_rename(struct vop_rename_args *ap)
goto unionfs_rename_abort;
KASSERT_UNIONFS_VNODE(tdvp);
- if (tvp != NULLVP)
+ if (tvp != NULL)
KASSERT_UNIONFS_VNODE(tvp);
if (fdvp != tdvp)
VI_LOCK(fdvp);
@@ -1433,7 +1433,7 @@ unionfs_rename(struct vop_rename_args *ap)
UNIONFS_INTERNAL_DEBUG("fdvp=%p, ufdvp=%p, lfdvp=%p\n",
fdvp, unp->un_uppervp, unp->un_lowervp);
#endif
- if (unp->un_uppervp == NULLVP) {
+ if (unp->un_uppervp == NULL) {
error = ENODEV;
} else {
rfdvp = unp->un_uppervp;
@@ -1460,10 +1460,10 @@ unionfs_rename(struct vop_rename_args *ap)
* If we only have a lower vnode, copy the source file to the upper
* FS so that the rename operation can be issued against the upper FS.
*/
- if (unp->un_uppervp == NULLVP) {
+ if (unp->un_uppervp == NULL) {
bool unlock_fdvp = false, relock_tdvp = false;
VI_UNLOCK(fvp);
- if (tvp != NULLVP)
+ if (tvp != NULL)
VOP_UNLOCK(tvp);
if (fvp->v_type == VREG) {
/*
@@ -1496,7 +1496,7 @@ unionfs_rename(struct vop_rename_args *ap)
unp = VTOUNIONFS(fvp);
if (unp == NULL)
error = ENOENT;
- else if (unp->un_uppervp == NULLVP) {
+ else if (unp->un_uppervp == NULL) {
switch (fvp->v_type) {
case VREG:
error = unionfs_copyfile(fvp, 1, fcnp->cn_cred, td);
@@ -1514,7 +1514,7 @@ unionfs_rename(struct vop_rename_args *ap)
VOP_UNLOCK(fdvp);
if (relock_tdvp)
vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY);
- if (tvp != NULLVP)
+ if (tvp != NULL)
vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
/*
* Since we've dropped tdvp's lock at some point in the copy
@@ -1526,7 +1526,7 @@ unionfs_rename(struct vop_rename_args *ap)
goto unionfs_rename_abort;
}
- if (unp->un_lowervp != NULLVP)
+ if (unp->un_lowervp != NULL)
fcnp->cn_flags |= DOWHITEOUT;
rfvp = unp->un_uppervp;
vref(rfvp);
@@ -1539,14 +1539,14 @@ unionfs_rename(struct vop_rename_args *ap)
UNIONFS_INTERNAL_DEBUG("tdvp=%p, utdvp=%p, ltdvp=%p\n",
tdvp, unp->un_uppervp, unp->un_lowervp);
#endif
- if (unp->un_uppervp == NULLVP) {
+ if (unp->un_uppervp == NULL) {
error = ENODEV;
goto unionfs_rename_abort;
}
rtdvp = unp->un_uppervp;
vref(rtdvp);
- if (tvp != NULLVP) {
+ if (tvp != NULL) {
unp = VTOUNIONFS(tvp);
if (unp == NULL) {
error = ENOENT;
@@ -1556,8 +1556,8 @@ unionfs_rename(struct vop_rename_args *ap)
UNIONFS_INTERNAL_DEBUG("tvp=%p, utvp=%p, ltvp=%p\n",
tvp, unp->un_uppervp, unp->un_lowervp);
#endif
- if (unp->un_uppervp == NULLVP)
- rtvp = NULLVP;
+ if (unp->un_uppervp == NULL)
+ rtvp = NULL;
else {
if (tvp->v_type == VDIR) {
error = EINVAL;
@@ -1574,7 +1574,7 @@ unionfs_rename(struct vop_rename_args *ap)
error = VOP_RENAME(rfdvp, rfvp, fcnp, rtdvp, rtvp, tcnp);
if (error == 0) {
- if (rtvp != NULLVP && rtvp->v_type == VDIR)
+ if (rtvp != NULL && rtvp->v_type == VDIR)
cache_purge(tdvp);
if (fvp->v_type == VDIR && fdvp != tdvp)
cache_purge(fdvp);
@@ -1582,8 +1582,8 @@ unionfs_rename(struct vop_rename_args *ap)
if (tdvp != rtdvp)
vrele(tdvp);
- if (tvp != rtvp && tvp != NULLVP) {
- if (rtvp == NULLVP)
+ if (tvp != rtvp && tvp != NULL) {
+ if (rtvp == NULL)
vput(tvp);
else
vrele(tvp);
@@ -1601,13 +1601,13 @@ unionfs_rename_abort:
vput(tdvp);
if (tdvp != rtdvp)
vrele(rtdvp);
- if (tvp != NULLVP) {
+ if (tvp != NULL) {
if (tdvp != tvp)
vput(tvp);
else
vrele(tvp);
}
- if (tvp != rtvp && rtvp != NULLVP)
+ if (tvp != rtvp && rtvp != NULL)
vrele(rtvp);
if (fdvp != rfdvp)
vrele(rfdvp);
@@ -1644,7 +1644,7 @@ unionfs_mkdir(struct vop_mkdir_args *ap)
lkflags = cnp->cn_lkflags;
udvp = dunp->un_uppervp;
- if (udvp != NULLVP) {
+ if (udvp != NULL) {
/* check opaque */
if (!(cnp->cn_flags & ISWHITEOUT)) {
error = VOP_GETATTR(udvp, &va, cnp->cn_cred);
@@ -1666,8 +1666,8 @@ unionfs_mkdir(struct vop_mkdir_args *ap)
if (error == 0) {
VOP_UNLOCK(uvp);
cnp->cn_lkflags = LK_EXCLUSIVE;
- error = unionfs_nodeget(dvp->v_mount, uvp, NULLVP,
- dvp, ap->a_vpp, cnp);
+ error = unionfs_nodeget(dvp->v_mount, uvp, NULL,
+ dvp, ap->a_vpp, cnp);
vrele(uvp);
cnp->cn_lkflags = lkflags;
} else if (uvp_created)
@@ -1707,14 +1707,14 @@ unionfs_rmdir(struct vop_rmdir_args *ap)
uvp = unp->un_uppervp;
lvp = unp->un_lowervp;
- if (udvp == NULLVP)
+ if (udvp == NULL)
return (EROFS);
if (udvp == uvp)
return (EOPNOTSUPP);
- if (uvp != NULLVP) {
- if (lvp != NULLVP) {
+ if (uvp != NULL) {
+ if (lvp != NULL) {
/*
* We need to keep dvp and vp's upper vnodes locked
* going into the VOP_RMDIR() call, but the empty
@@ -1752,7 +1752,7 @@ unionfs_rmdir(struct vop_rmdir_args *ap)
return (error);
}
ump = MOUNTTOUNIONFSMOUNT(ap->a_vp->v_mount);
- if (ump->um_whitemode == UNIONFS_WHITE_ALWAYS || lvp != NULLVP)
+ if (ump->um_whitemode == UNIONFS_WHITE_ALWAYS || lvp != NULL)
cnp->cn_flags |= (DOWHITEOUT | IGNOREWHITEOUT);
int udvp_lkflags, uvp_lkflags;
unionfs_forward_vop_start_pair(udvp, &udvp_lkflags,
@@ -1760,7 +1760,7 @@ unionfs_rmdir(struct vop_rmdir_args *ap)
error = VOP_RMDIR(udvp, uvp, cnp);
unionfs_forward_vop_finish_pair(ap->a_dvp, udvp, udvp_lkflags,
ap->a_vp, uvp, uvp_lkflags);
- } else if (lvp != NULLVP) {
+ } else if (lvp != NULL) {
error = unionfs_mkwhiteout(ap->a_dvp, ap->a_vp, cnp, td,
unp->un_path, unp->un_pathlen);
}
@@ -1795,7 +1795,7 @@ unionfs_symlink(struct vop_symlink_args *ap)
lkflags = cnp->cn_lkflags;
udvp = dunp->un_uppervp;
- if (udvp != NULLVP) {
+ if (udvp != NULL) {
int udvp_lkflags;
bool uvp_created = false;
unionfs_forward_vop_start(udvp, &udvp_lkflags);
@@ -1808,8 +1808,8 @@ unionfs_symlink(struct vop_symlink_args *ap)
if (error == 0) {
VOP_UNLOCK(uvp);
cnp->cn_lkflags = LK_EXCLUSIVE;
- error = unionfs_nodeget(ap->a_dvp->v_mount, uvp, NULLVP,
- ap->a_dvp, ap->a_vpp, cnp);
+ error = unionfs_nodeget(ap->a_dvp->v_mount, uvp, NULL,
+ ap->a_dvp, ap->a_vpp, cnp);
vrele(uvp);
cnp->cn_lkflags = lkflags;
} else if (uvp_created)
@@ -1849,8 +1849,8 @@ unionfs_readdir(struct vop_readdir_args *ap)
eofflag = 0;
uio_offset_bk = 0;
uio = ap->a_uio;
- uvp = NULLVP;
- lvp = NULLVP;
+ uvp = NULL;
+ lvp = NULL;
td = uio->uio_td;
ncookies_bk = 0;
cookies_bk = NULL;
@@ -1872,8 +1872,8 @@ unionfs_readdir(struct vop_readdir_args *ap)
lvp = unp->un_lowervp;
/* check the open count. unionfs needs open before readdir. */
unionfs_get_node_status(unp, td, &unsp);
- if ((uvp != NULLVP && unsp->uns_upper_opencnt <= 0) ||
- (lvp != NULLVP && unsp->uns_lower_opencnt <= 0)) {
+ if ((uvp != NULL && unsp->uns_upper_opencnt <= 0) ||
+ (lvp != NULL && unsp->uns_lower_opencnt <= 0)) {
unionfs_tryrem_node_status(unp, unsp);
error = EBADF;
}
@@ -1883,15 +1883,15 @@ unionfs_readdir(struct vop_readdir_args *ap)
goto unionfs_readdir_exit;
/* check opaque */
- if (uvp != NULLVP && lvp != NULLVP) {
+ if (uvp != NULL && lvp != NULL) {
if ((error = VOP_GETATTR(uvp, &va, ap->a_cred)) != 0)
goto unionfs_readdir_exit;
if (va.va_flags & OPAQUE)
- lvp = NULLVP;
+ lvp = NULL;
}
/* upper only */
- if (uvp != NULLVP && lvp == NULLVP) {
+ if (uvp != NULL && lvp == NULL) {
unionfs_forward_vop_start(uvp, &lkflags);
error = VOP_READDIR(uvp, uio, ap->a_cred, ap->a_eofflag,
ap->a_ncookies, ap->a_cookies);
@@ -1904,7 +1904,7 @@ unionfs_readdir(struct vop_readdir_args *ap)
}
/* lower only */
- if (uvp == NULLVP && lvp != NULLVP) {
+ if (uvp == NULL && lvp != NULL) {
unionfs_forward_vop_start(lvp, &lkflags);
error = VOP_READDIR(lvp, uio, ap->a_cred, ap->a_eofflag,
ap->a_ncookies, ap->a_cookies);
@@ -1919,8 +1919,8 @@ unionfs_readdir(struct vop_readdir_args *ap)
/*
* readdir upper and lower
*/
- KASSERT(uvp != NULLVP, ("unionfs_readdir: null upper vp"));
- KASSERT(lvp != NULLVP, ("unionfs_readdir: null lower vp"));
+ KASSERT(uvp != NULL, ("unionfs_readdir: null upper vp"));
+ KASSERT(lvp != NULL, ("unionfs_readdir: null lower vp"));
if (uio->uio_offset == 0)
unsp->uns_readdir_status = 0;
@@ -2040,7 +2040,7 @@ unionfs_readlink(struct vop_readlink_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
+ vp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
error = VOP_READLINK(vp, ap->a_uio, ap->a_cred);
@@ -2061,7 +2061,7 @@ unionfs_getwritemount(struct vop_getwritemount_args *ap)
error = 0;
vp = ap->a_vp;
- uvp = NULLVP;
+ uvp = NULL;
VI_LOCK(vp);
unp = VTOUNIONFS(vp);
@@ -2073,7 +2073,7 @@ unionfs_getwritemount(struct vop_getwritemount_args *ap)
* We may be initiating a write operation that will produce a
* new upper vnode through CoW.
*/
- if (uvp == NULLVP && unp != NULL) {
+ if (uvp == NULL && unp != NULL) {
ovp = vp;
vp = unp->un_dvp;
/*
@@ -2086,11 +2086,11 @@ unionfs_getwritemount(struct vop_getwritemount_args *ap)
unp = VTOUNIONFS(vp);
if (unp != NULL)
uvp = unp->un_uppervp;
- if (uvp == NULLVP)
+ if (uvp == NULL)
error = EACCES;
}
- if (uvp != NULLVP) {
+ if (uvp != NULL) {
vholdnz(uvp);
VI_UNLOCK(vp);
error = VOP_GETWRITEMOUNT(uvp, ap->a_mpp);
@@ -2141,9 +2141,9 @@ unionfs_print(struct vop_print_args *ap)
unsp->uns_upper_opencnt, unsp->uns_lower_opencnt);
*/
- if (unp->un_uppervp != NULLVP)
+ if (unp->un_uppervp != NULL)
vn_printf(unp->un_uppervp, "unionfs: upper ");
- if (unp->un_lowervp != NULLVP)
+ if (unp->un_lowervp != NULL)
vn_printf(unp->un_lowervp, "unionfs: lower ");
return (0);
@@ -2208,7 +2208,6 @@ unionfs_lock_restart:
vholdnz(tvp);
VI_UNLOCK(vp);
error = VOP_LOCK(tvp, flags);
- vdrop(tvp);
if (error == 0 && (lvp_locked || VTOUNIONFS(vp) == NULL)) {
/*
* After dropping the interlock above, there exists a window
@@ -2232,8 +2231,9 @@ unionfs_lock_restart:
* lower vnode lock here.
*/
unp = VTOUNIONFS(vp);
- if (unp == NULL || unp->un_uppervp != NULLVP) {
+ if (unp == NULL || unp->un_uppervp != NULL) {
VOP_UNLOCK(tvp);
+ vdrop(tvp);
/*
* If we previously held the lock, the upgrade may
* have temporarily dropped the lock, in which case
@@ -2249,6 +2249,7 @@ unionfs_lock_restart:
goto unionfs_lock_restart;
}
}
+ vdrop(tvp);
return (error);
}
@@ -2259,7 +2260,6 @@ unionfs_unlock(struct vop_unlock_args *ap)
struct vnode *vp;
struct vnode *tvp;
struct unionfs_node *unp;
- int error;
KASSERT_UNIONFS_VNODE(ap->a_vp);
@@ -2271,11 +2271,7 @@ unionfs_unlock(struct vop_unlock_args *ap)
tvp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
- vholdnz(tvp);
- error = VOP_UNLOCK(tvp);
- vdrop(tvp);
-
- return (error);
+ return (VOP_UNLOCK(tvp));
}
static int
@@ -2287,7 +2283,7 @@ unionfs_pathconf(struct vop_pathconf_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
+ vp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
return (VOP_PATHCONF(vp, ap->a_name, ap->a_retval));
}
@@ -2314,7 +2310,7 @@ unionfs_advlock(struct vop_advlock_args *ap)
unp = VTOUNIONFS(ap->a_vp);
uvp = unp->un_uppervp;
- if (uvp == NULLVP) {
+ if (uvp == NULL) {
error = unionfs_copyfile(ap->a_vp, 1, td->td_ucred, td);
if (error != 0)
goto unionfs_advlock_abort;
@@ -2360,10 +2356,10 @@ unionfs_strategy(struct vop_strategy_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
+ vp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
#ifdef DIAGNOSTIC
- if (vp == NULLVP)
+ if (vp == NULL)
panic("unionfs_strategy: nullvp");
if (ap->a_bp->b_iocmd == BIO_WRITE && vp == unp->un_lowervp)
@@ -2383,7 +2379,7 @@ unionfs_getacl(struct vop_getacl_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
+ vp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
UNIONFS_INTERNAL_DEBUG("unionfs_getacl: enter\n");
@@ -2416,13 +2412,13 @@ unionfs_setacl(struct vop_setacl_args *ap)
if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
- if (uvp == NULLVP && lvp->v_type == VREG) {
+ if (uvp == NULL && lvp->v_type == VREG) {
if ((error = unionfs_copyfile(ap->a_vp, 1, ap->a_cred, td)) != 0)
return (error);
uvp = unp->un_uppervp;
}
- if (uvp != NULLVP) {
+ if (uvp != NULL) {
int lkflags;
unionfs_forward_vop_start(uvp, &lkflags);
error = VOP_SETACL(uvp, ap->a_type, ap->a_aclp, ap->a_cred, td);
@@ -2446,7 +2442,7 @@ unionfs_aclcheck(struct vop_aclcheck_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
+ vp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
error = VOP_ACLCHECK(vp, ap->a_type, ap->a_aclp, ap->a_cred, ap->a_td);
@@ -2467,7 +2463,7 @@ unionfs_openextattr(struct vop_openextattr_args *ap)
vp = ap->a_vp;
unp = VTOUNIONFS(vp);
- tvp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
+ tvp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp);
if ((tvp == unp->un_uppervp && (unp->un_flag & UNIONFS_OPENEXTU)) ||
(tvp == unp->un_lowervp && (unp->un_flag & UNIONFS_OPENEXTL)))
@@ -2502,14 +2498,14 @@ unionfs_closeextattr(struct vop_closeextattr_args *ap)
vp = ap->a_vp;
unp = VTOUNIONFS(vp);
- tvp = NULLVP;
+ tvp = NULL;
if (unp->un_flag & UNIONFS_OPENEXTU)
tvp = unp->un_uppervp;
else if (unp->un_flag & UNIONFS_OPENEXTL)
tvp = unp->un_lowervp;
- if (tvp == NULLVP)
+ if (tvp == NULL)
return (EOPNOTSUPP);
error = VOP_CLOSEEXTATTR(tvp, ap->a_commit, ap->a_cred, ap->a_td);
@@ -2538,14 +2534,14 @@ unionfs_getextattr(struct vop_getextattr_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- vp = NULLVP;
+ vp = NULL;
if (unp->un_flag & UNIONFS_OPENEXTU)
vp = unp->un_uppervp;
else if (unp->un_flag & UNIONFS_OPENEXTL)
vp = unp->un_lowervp;
- if (vp == NULLVP)
+ if (vp == NULL)
return (EOPNOTSUPP);
return (VOP_GETEXTATTR(vp, ap->a_attrnamespace, ap->a_name,
@@ -2569,7 +2565,7 @@ unionfs_setextattr(struct vop_setextattr_args *ap)
unp = VTOUNIONFS(ap->a_vp);
uvp = unp->un_uppervp;
lvp = unp->un_lowervp;
- ovp = NULLVP;
+ ovp = NULL;
cred = ap->a_cred;
td = ap->a_td;
@@ -2584,12 +2580,12 @@ unionfs_setextattr(struct vop_setextattr_args *ap)
else if (unp->un_flag & UNIONFS_OPENEXTL)
ovp = unp->un_lowervp;
- if (ovp == NULLVP)
+ if (ovp == NULL)
return (EOPNOTSUPP);
if (ovp == lvp && lvp->v_type == VREG) {
VOP_CLOSEEXTATTR(lvp, 0, cred, td);
- if (uvp == NULLVP &&
+ if (uvp == NULL &&
(error = unionfs_copyfile(ap->a_vp, 1, cred, td)) != 0) {
unionfs_setextattr_reopen:
unp = VTOUNIONFS(ap->a_vp);
@@ -2633,14 +2629,14 @@ unionfs_listextattr(struct vop_listextattr_args *ap)
KASSERT_UNIONFS_VNODE(ap->a_vp);
unp = VTOUNIONFS(ap->a_vp);
- vp = NULLVP;
+ vp = NULL;
if (unp->un_flag & UNIONFS_OPENEXTU)
vp = unp->un_uppervp;
else if (unp->un_flag & UNIONFS_OPENEXTL)
vp = unp->un_lowervp;
- if (vp == NULLVP)
+ if (vp == NULL)
return (EOPNOTSUPP);
return (VOP_LISTEXTATTR(vp, ap->a_attrnamespace, ap->a_uio,
@@ -2664,7 +2660,7 @@ unionfs_deleteextattr(struct vop_deleteextattr_args *ap)
unp = VTOUNIONFS(ap->a_vp);
uvp = unp->un_uppervp;
lvp = unp->un_lowervp;
- ovp = NULLVP;
+ ovp = NULL;
cred = ap->a_cred;
td = ap->a_td;
@@ -2679,12 +2675,12 @@ unionfs_deleteextattr(struct vop_deleteextattr_args *ap)
else if (unp->un_flag & UNIONFS_OPENEXTL)
ovp = unp->un_lowervp;
- if (ovp == NULLVP)
+ if (ovp == NULL)
return (EOPNOTSUPP);
if (ovp == lvp && lvp->v_type == VREG) {
VOP_CLOSEEXTATTR(lvp, 0, cred, td);
- if (uvp == NULLVP &&
+ if (uvp == NULL &&
(error = unionfs_copyfile(ap->a_vp, 1, cred, td)) != 0) {
unionfs_deleteextattr_reopen:
unp = VTOUNIONFS(ap->a_vp);
@@ -2737,13 +2733,13 @@ unionfs_setlabel(struct vop_setlabel_args *ap)
if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
return (EROFS);
- if (uvp == NULLVP && lvp->v_type == VREG) {
+ if (uvp == NULL && lvp->v_type == VREG) {
if ((error = unionfs_copyfile(ap->a_vp, 1, ap->a_cred, td)) != 0)
return (error);
uvp = unp->un_uppervp;
}
- if (uvp != NULLVP)
+ if (uvp != NULL)
error = VOP_SETLABEL(uvp, ap->a_label, ap->a_cred, td);
UNIONFS_INTERNAL_DEBUG("unionfs_setlabel: leave (%d)\n", error);
@@ -2796,10 +2792,10 @@ unionfs_vput_pair(struct vop_vput_pair_args *ap)
dvp = ap->a_dvp;
vpp = ap->a_vpp;
- vp = NULLVP;
- lvp = NULLVP;
- uvp = NULLVP;
- tvp = NULLVP;
+ vp = NULL;
+ lvp = NULL;
+ uvp = NULL;
+ tvp = NULL;
unp = NULL;
dunp = VTOUNIONFS(dvp);
@@ -2819,11 +2815,11 @@ unionfs_vput_pair(struct vop_vput_pair_args *ap)
if (vpp != NULL)
vp = *vpp;
- if (vp != NULLVP) {
+ if (vp != NULL) {
unp = VTOUNIONFS(vp);
uvp = unp->un_uppervp;
lvp = unp->un_lowervp;
- if (uvp != NULLVP)
+ if (uvp != NULL)
tvp = uvp;
else
tvp = lvp;
@@ -2838,9 +2834,9 @@ unionfs_vput_pair(struct vop_vput_pair_args *ap)
*/
if (!ap->a_unlock_vp) {
vhold(vp);
- if (uvp != NULLVP)
+ if (uvp != NULL)
vhold(uvp);
- if (lvp != NULLVP)
+ if (lvp != NULL)
vhold(lvp);
mp = vp->v_mount;
vfs_ref(mp);
@@ -2850,12 +2846,12 @@ unionfs_vput_pair(struct vop_vput_pair_args *ap)
ASSERT_VOP_LOCKED(tdvp, __func__);
ASSERT_VOP_LOCKED(tvp, __func__);
- if (tdvp == dunp->un_uppervp && tvp != NULLVP && tvp == lvp) {
+ if (tdvp == dunp->un_uppervp && tvp != NULL && tvp == lvp) {
vput(tvp);
vput(tdvp);
res = 0;
} else {
- res = VOP_VPUT_PAIR(tdvp, tvp != NULLVP ? &tvp : NULL, true);
+ res = VOP_VPUT_PAIR(tdvp, tvp != NULL ? &tvp : NULL, true);
}
ASSERT_VOP_UNLOCKED(tdvp, __func__);
@@ -2865,11 +2861,11 @@ unionfs_vput_pair(struct vop_vput_pair_args *ap)
* VOP_VPUT_PAIR() dropped the references we added to the underlying
* vnodes, now drop the caller's reference to the unionfs vnodes.
*/
- if (vp != NULLVP && ap->a_unlock_vp)
+ if (vp != NULL && ap->a_unlock_vp)
vrele(vp);
vrele(dvp);
- if (vp == NULLVP || ap->a_unlock_vp)
+ if (vp == NULL || ap->a_unlock_vp)
return (res);
/*
@@ -2887,9 +2883,9 @@ unionfs_vput_pair(struct vop_vput_pair_args *ap)
vget(vp, LK_EXCLUSIVE | LK_RETRY);
vfs_unbusy(mp);
}
- if (lvp != NULLVP)
+ if (lvp != NULL)
vdrop(lvp);
- if (uvp != NULLVP)
+ if (uvp != NULL)
vdrop(uvp);
vdrop(vp);
vfs_rel(mp);
diff --git a/sys/geom/cache/g_cache.c b/sys/geom/cache/g_cache.c
index 9d0b10f4192e..c6b80786ade5 100644
--- a/sys/geom/cache/g_cache.c
+++ b/sys/geom/cache/g_cache.c
@@ -504,7 +504,7 @@ g_cache_create(struct g_class *mp, struct g_provider *pp,
return (NULL);
}
- gp = g_new_geomf(mp, "%s", md->md_name);
+ gp = g_new_geom(mp, md->md_name);
sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
sc->sc_type = type;
sc->sc_bshift = bshift;
@@ -665,7 +665,7 @@ g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
- gp = g_new_geomf(mp, "cache:taste");
+ gp = g_new_geom(mp, "cache:taste");
gp->start = g_cache_start;
gp->orphan = g_cache_orphan;
gp->access = g_cache_access;
diff --git a/sys/geom/concat/g_concat.c b/sys/geom/concat/g_concat.c
index 2173a84c7acf..5461c6dd73d3 100644
--- a/sys/geom/concat/g_concat.c
+++ b/sys/geom/concat/g_concat.c
@@ -646,7 +646,7 @@ g_concat_create(struct g_class *mp, const struct g_concat_metadata *md,
return (NULL);
}
}
- gp = g_new_geomf(mp, "%s", md->md_name);
+ gp = g_new_geom(mp, md->md_name);
sc = malloc(sizeof(*sc), M_CONCAT, M_WAITOK | M_ZERO);
gp->start = g_concat_start;
gp->spoiled = g_concat_orphan;
@@ -753,7 +753,7 @@ g_concat_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
G_CONCAT_DEBUG(3, "Tasting %s.", pp->name);
- gp = g_new_geomf(mp, "concat:taste");
+ gp = g_new_geom(mp, "concat:taste");
gp->start = g_concat_start;
gp->access = g_concat_access;
gp->orphan = g_concat_orphan;
@@ -1107,8 +1107,6 @@ g_concat_ctl_append(struct gctl_req *req, struct g_class *mp)
gctl_error(req, "No 'arg%u' argument.", 1);
goto fail;
}
- if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
- name += strlen("/dev/");
pp = g_provider_by_name(name);
if (pp == NULL) {
G_CONCAT_DEBUG(1, "Disk %s is invalid.", name);
diff --git a/sys/geom/eli/g_eli.c b/sys/geom/eli/g_eli.c
index 5bd2d465183e..7fca50e7635c 100644
--- a/sys/geom/eli/g_eli.c
+++ b/sys/geom/eli/g_eli.c
@@ -769,7 +769,7 @@ g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp,
g_topology_assert();
- gp = g_new_geomf(mp, "eli:taste");
+ gp = g_new_geom(mp, "eli:taste");
gp->start = g_eli_start;
gp->access = g_std_access;
/*
diff --git a/sys/geom/gate/g_gate.c b/sys/geom/gate/g_gate.c
index ecdcacff6707..76a4328227dd 100644
--- a/sys/geom/gate/g_gate.c
+++ b/sys/geom/gate/g_gate.c
@@ -571,7 +571,7 @@ g_gate_create(struct g_gate_ctl_create *ggio)
}
}
- gp = g_new_geomf(&g_gate_class, "%s", name);
+ gp = g_new_geom(&g_gate_class, name);
gp->start = g_gate_start;
gp->access = g_gate_access;
gp->orphan = g_gate_orphan;
diff --git a/sys/geom/geom.h b/sys/geom/geom.h
index 908ce86f03a6..50e6627b0157 100644
--- a/sys/geom/geom.h
+++ b/sys/geom/geom.h
@@ -289,8 +289,9 @@ int g_handleattr_int(struct bio *bp, const char *attribute, int val);
int g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val);
int g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val);
int g_handleattr_str(struct bio *bp, const char *attribute, const char *str);
-struct g_consumer * g_new_consumer(struct g_geom *gp);
-struct g_geom * g_new_geomf(struct g_class *mp, const char *fmt, ...)
+struct g_consumer *g_new_consumer(struct g_geom *gp);
+struct g_geom *g_new_geom(struct g_class *mp, const char *name);
+struct g_geom *g_new_geomf(struct g_class *mp, const char *fmt, ...)
__printflike(2, 3);
struct g_provider * g_new_providerf(struct g_geom *gp, const char *fmt, ...)
__printflike(2, 3);
diff --git a/sys/geom/geom_dev.c b/sys/geom/geom_dev.c
index 4a2a850c2eab..27c65f15d5e3 100644
--- a/sys/geom/geom_dev.c
+++ b/sys/geom/geom_dev.c
@@ -355,7 +355,7 @@ g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
g_topology_assert();
- gp = g_new_geomf(mp, "%s", pp->name);
+ gp = g_new_geom(mp, pp->name);
sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF);
cp = g_new_consumer(gp);
diff --git a/sys/geom/geom_event.c b/sys/geom/geom_event.c
index 341233a6ef47..ffd46db55416 100644
--- a/sys/geom/geom_event.c
+++ b/sys/geom/geom_event.c
@@ -347,6 +347,7 @@ static void
g_post_event_ep_va(g_event_t *func, void *arg, int wuflag,
struct g_event *ep, va_list ap)
{
+ struct thread *td;
void *p;
u_int n;
@@ -366,8 +367,12 @@ g_post_event_ep_va(g_event_t *func, void *arg, int wuflag,
TAILQ_INSERT_TAIL(&g_events, ep, events);
mtx_unlock(&g_eventlock);
wakeup(&g_wait_event);
- curthread->td_pflags |= TDP_GEOM;
- ast_sched(curthread, TDA_GEOM);
+
+ td = curthread;
+ if ((td->td_pflags & TDP_KTHREAD) == 0) {
+ td->td_pflags |= TDP_GEOM;
+ ast_sched(td, TDA_GEOM);
+ }
}
void
diff --git a/sys/geom/geom_slice.c b/sys/geom/geom_slice.c
index 0491b0069be4..935293950c37 100644
--- a/sys/geom/geom_slice.c
+++ b/sys/geom/geom_slice.c
@@ -529,7 +529,7 @@ g_slice_new(struct g_class *mp, u_int slices, struct g_provider *pp, struct g_co
g_topology_assert();
vp = (void **)extrap;
- gp = g_new_geomf(mp, "%s", pp->name);
+ gp = g_new_geom(mp, pp->name);
gsp = g_slice_alloc(slices, extra);
gsp->start = start;
gp->softc = gsp;
diff --git a/sys/geom/geom_subr.c b/sys/geom/geom_subr.c
index 1429c84942ed..2a6ce1ab6486 100644
--- a/sys/geom/geom_subr.c
+++ b/sys/geom/geom_subr.c
@@ -368,20 +368,15 @@ g_retaste(struct g_class *mp)
}
struct g_geom *
-g_new_geomf(struct g_class *mp, const char *fmt, ...)
+g_new_geom(struct g_class *mp, const char *name)
{
+ int len;
struct g_geom *gp;
- va_list ap;
- struct sbuf *sb;
g_topology_assert();
G_VALID_CLASS(mp);
- sb = sbuf_new_auto();
- va_start(ap, fmt);
- sbuf_vprintf(sb, fmt, ap);
- va_end(ap);
- sbuf_finish(sb);
- gp = g_malloc(sizeof(*gp) + sbuf_len(sb) + 1, M_WAITOK | M_ZERO);
+ len = strlen(name);
+ gp = g_malloc(sizeof(*gp) + len + 1, M_WAITOK | M_ZERO);
gp->name = (char *)(gp + 1);
gp->class = mp;
gp->rank = 1;
@@ -389,8 +384,7 @@ g_new_geomf(struct g_class *mp, const char *fmt, ...)
LIST_INIT(&gp->provider);
LIST_INSERT_HEAD(&mp->geom, gp, geom);
TAILQ_INSERT_HEAD(&geoms, gp, geoms);
- strcpy(gp->name, sbuf_data(sb));
- sbuf_delete(sb);
+ memcpy(gp->name, name, len);
/* Fill in defaults from class */
gp->start = mp->start;
gp->spoiled = mp->spoiled;
@@ -404,6 +398,23 @@ g_new_geomf(struct g_class *mp, const char *fmt, ...)
return (gp);
}
+struct g_geom *
+g_new_geomf(struct g_class *mp, const char *fmt, ...)
+{
+ struct g_geom *gp;
+ va_list ap;
+ struct sbuf *sb;
+
+ sb = sbuf_new_auto();
+ va_start(ap, fmt);
+ sbuf_vprintf(sb, fmt, ap);
+ va_end(ap);
+ sbuf_finish(sb);
+ gp = g_new_geom(mp, sbuf_data(sb));
+ sbuf_delete(sb);
+ return (gp);
+}
+
void
g_destroy_geom(struct g_geom *gp)
{
diff --git a/sys/geom/journal/g_journal.c b/sys/geom/journal/g_journal.c
index 6d9f6239e632..b520194b7d7c 100644
--- a/sys/geom/journal/g_journal.c
+++ b/sys/geom/journal/g_journal.c
@@ -2477,7 +2477,7 @@ g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
if (pp->geom->class == mp)
return (NULL);
- gp = g_new_geomf(mp, "journal:taste");
+ gp = g_new_geom(mp, "journal:taste");
/* This orphan function should be never called. */
gp->orphan = g_journal_taste_orphan;
cp = g_new_consumer(gp);
diff --git a/sys/geom/label/g_label.c b/sys/geom/label/g_label.c
index acb17d40914e..faefbd7c2ef6 100644
--- a/sys/geom/label/g_label.c
+++ b/sys/geom/label/g_label.c
@@ -399,7 +399,7 @@ g_label_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
if (strcmp(pp->geom->class->name, mp->name) == 0)
return (NULL);
- gp = g_new_geomf(mp, "label:taste");
+ gp = g_new_geom(mp, "label:taste");
gp->start = g_label_start_taste;
gp->access = g_label_access_taste;
gp->orphan = g_label_orphan_taste;
diff --git a/sys/geom/linux_lvm/g_linux_lvm.c b/sys/geom/linux_lvm/g_linux_lvm.c
index c63318fed729..f333c08f45d9 100644
--- a/sys/geom/linux_lvm/g_linux_lvm.c
+++ b/sys/geom/linux_lvm/g_linux_lvm.c
@@ -537,7 +537,7 @@ g_llvm_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
g_topology_assert();
g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
- gp = g_new_geomf(mp, "linux_lvm:taste");
+ gp = g_new_geom(mp, "linux_lvm:taste");
/* This orphan function should be never called. */
gp->orphan = g_llvm_taste_orphan;
cp = g_new_consumer(gp);
@@ -557,7 +557,7 @@ g_llvm_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
vg = md.md_vg;
if (vg->vg_geom == NULL) {
/* new volume group */
- gp = g_new_geomf(mp, "%s", vg->vg_name);
+ gp = g_new_geom(mp, vg->vg_name);
gp->start = g_llvm_start;
gp->spoiled = g_llvm_orphan;
gp->orphan = g_llvm_orphan;
diff --git a/sys/geom/mirror/g_mirror.c b/sys/geom/mirror/g_mirror.c
index 25c0490938ef..03902a2f2491 100644
--- a/sys/geom/mirror/g_mirror.c
+++ b/sys/geom/mirror/g_mirror.c
@@ -3149,7 +3149,7 @@ g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md,
/*
* Action geom.
*/
- gp = g_new_geomf(mp, "%s", md->md_name);
+ gp = g_new_geom(mp, md->md_name);
sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
gp->start = g_mirror_start;
gp->orphan = g_mirror_orphan;
@@ -3290,7 +3290,7 @@ g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
- gp = g_new_geomf(mp, "mirror:taste");
+ gp = g_new_geom(mp, "mirror:taste");
/*
* This orphan function should be never called.
*/
diff --git a/sys/geom/mirror/g_mirror_ctl.c b/sys/geom/mirror/g_mirror_ctl.c
index 82bc05a142c0..b31bf098ac4b 100644
--- a/sys/geom/mirror/g_mirror_ctl.c
+++ b/sys/geom/mirror/g_mirror_ctl.c
@@ -433,7 +433,7 @@ g_mirror_ctl_create(struct gctl_req *req, struct g_class *mp)
g_topology_lock();
mediasize = OFF_MAX;
sectorsize = 0;
- gp = g_new_geomf(mp, "%s", md.md_name);
+ gp = g_new_geom(mp, md.md_name);
gp->orphan = g_mirror_create_orphan;
cp = g_new_consumer(gp);
for (no = 1; no < *nargs; no++) {
diff --git a/sys/geom/mountver/g_mountver.c b/sys/geom/mountver/g_mountver.c
index de3a298735d4..c7d55c4734a2 100644
--- a/sys/geom/mountver/g_mountver.c
+++ b/sys/geom/mountver/g_mountver.c
@@ -291,7 +291,7 @@ g_mountver_create(struct gctl_req *req, struct g_class *mp, struct g_provider *p
return (EEXIST);
}
}
- gp = g_new_geomf(mp, "%s", name);
+ gp = g_new_geom(mp, name);
sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
mtx_init(&sc->sc_mtx, "gmountver", NULL, MTX_DEF | MTX_RECURSE);
TAILQ_INIT(&sc->sc_queue);
diff --git a/sys/geom/multipath/g_multipath.c b/sys/geom/multipath/g_multipath.c
index a4935df7eaa1..4459bd9f03f5 100644
--- a/sys/geom/multipath/g_multipath.c
+++ b/sys/geom/multipath/g_multipath.c
@@ -549,7 +549,7 @@ g_multipath_create(struct g_class *mp, struct g_multipath_metadata *md)
}
}
- gp = g_new_geomf(mp, "%s", md->md_name);
+ gp = g_new_geom(mp, md->md_name);
sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
mtx_init(&sc->sc_mtx, "multipath", NULL, MTX_DEF);
memcpy(sc->sc_uuid, md->md_uuid, sizeof(sc->sc_uuid));
@@ -821,7 +821,7 @@ g_multipath_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
g_topology_assert();
- gp = g_new_geomf(mp, "multipath:taste");
+ gp = g_new_geom(mp, "multipath:taste");
gp->start = g_multipath_start;
gp->access = g_multipath_access;
gp->orphan = g_multipath_orphan;
@@ -949,7 +949,6 @@ g_multipath_ctl_add_name(struct gctl_req *req, struct g_class *mp,
struct g_consumer *cp;
struct g_provider *pp;
const char *mpname;
- static const char devpf[6] = _PATH_DEV;
int error;
g_topology_assert();
@@ -966,8 +965,6 @@ g_multipath_ctl_add_name(struct gctl_req *req, struct g_class *mp,
}
sc = gp->softc;
- if (strncmp(name, devpf, 5) == 0)
- name += 5;
pp = g_provider_by_name(name);
if (pp == NULL) {
gctl_error(req, "Provider %s is invalid", name);
diff --git a/sys/geom/nop/g_nop.c b/sys/geom/nop/g_nop.c
index a32111e3a29a..1fb99f4a0a5b 100644
--- a/sys/geom/nop/g_nop.c
+++ b/sys/geom/nop/g_nop.c
@@ -416,7 +416,7 @@ g_nop_create(struct gctl_req *req, struct g_class *mp, struct g_provider *pp,
return (EEXIST);
}
}
- gp = g_new_geomf(mp, "%s", name);
+ gp = g_new_geom(mp, name);
sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
sc->sc_offset = offset;
sc->sc_explicitsize = explicitsize;
diff --git a/sys/geom/part/g_part.c b/sys/geom/part/g_part.c
index 41125f6478ac..1e4236507fa4 100644
--- a/sys/geom/part/g_part.c
+++ b/sys/geom/part/g_part.c
@@ -122,13 +122,13 @@ struct g_part_alias_list {
{ "ntfs", G_PART_ALIAS_MS_NTFS },
{ "openbsd-data", G_PART_ALIAS_OPENBSD_DATA },
{ "prep-boot", G_PART_ALIAS_PREP_BOOT },
- { "solaris-boot", G_PART_ALIAS_SOLARIS_BOOT },
- { "solaris-root", G_PART_ALIAS_SOLARIS_ROOT },
- { "solaris-swap", G_PART_ALIAS_SOLARIS_SWAP },
- { "solaris-backup", G_PART_ALIAS_SOLARIS_BACKUP },
- { "solaris-var", G_PART_ALIAS_SOLARIS_VAR },
- { "solaris-home", G_PART_ALIAS_SOLARIS_HOME },
- { "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC },
+ { "solaris-boot", G_PART_ALIAS_SOLARIS_BOOT },
+ { "solaris-root", G_PART_ALIAS_SOLARIS_ROOT },
+ { "solaris-swap", G_PART_ALIAS_SOLARIS_SWAP },
+ { "solaris-backup", G_PART_ALIAS_SOLARIS_BACKUP },
+ { "solaris-var", G_PART_ALIAS_SOLARIS_VAR },
+ { "solaris-home", G_PART_ALIAS_SOLARIS_HOME },
+ { "solaris-altsec", G_PART_ALIAS_SOLARIS_ALTSEC },
{ "solaris-reserved", G_PART_ALIAS_SOLARIS_RESERVED },
{ "u-boot-env", G_PART_ALIAS_U_BOOT_ENV },
{ "vmware-reserved", G_PART_ALIAS_VMRESERVED },
@@ -552,8 +552,6 @@ g_part_parm_provider(struct gctl_req *req, const char *name,
pname = gctl_get_asciiparam(req, name);
if (pname == NULL)
return (ENOATTR);
- if (strncmp(pname, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
- pname += sizeof(_PATH_DEV) - 1;
pp = g_provider_by_name(pname);
if (pp == NULL) {
gctl_error(req, "%d %s '%s'", EINVAL, name, pname);
@@ -998,7 +996,7 @@ g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
}
if (null == NULL)
- gp = g_new_geomf(&g_part_class, "%s", pp->name);
+ gp = g_new_geom(&g_part_class, pp->name);
gp->softc = kobj_create((kobj_class_t)gpp->gpp_scheme, M_GEOM,
M_WAITOK);
table = gp->softc;
@@ -1046,7 +1044,7 @@ g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
/*
* Synthesize a disk geometry. Some partitioning schemes
* depend on it and since some file systems need it even
- * when the partitition scheme doesn't, we do it here in
+ * when the partition scheme doesn't, we do it here in
* scheme-independent code.
*/
g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
@@ -1539,7 +1537,7 @@ g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
/*
* Synthesize a disk geometry. Some partitioning schemes
* depend on it and since some file systems need it even
- * when the partitition scheme doesn't, we do it here in
+ * when the partition scheme doesn't, we do it here in
* scheme-independent code.
*/
pp = cp->provider;
@@ -1979,7 +1977,7 @@ g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
* With that we become part of the topology. Obtain read access
* to the provider.
*/
- gp = g_new_geomf(mp, "%s", pp->name);
+ gp = g_new_geom(mp, pp->name);
cp = g_new_consumer(gp);
cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
error = g_attach(cp, pp);
@@ -2023,7 +2021,7 @@ g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
/*
* Synthesize a disk geometry. Some partitioning schemes
* depend on it and since some file systems need it even
- * when the partitition scheme doesn't, we do it here in
+ * when the partition scheme doesn't, we do it here in
* scheme-independent code.
*/
g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
diff --git a/sys/geom/raid/g_raid.c b/sys/geom/raid/g_raid.c
index a483622d14a5..d35695482eea 100644
--- a/sys/geom/raid/g_raid.c
+++ b/sys/geom/raid/g_raid.c
@@ -775,8 +775,6 @@ g_raid_open_consumer(struct g_raid_softc *sc, const char *name)
g_topology_assert();
- if (strncmp(name, _PATH_DEV, 5) == 0)
- name += 5;
pp = g_provider_by_name(name);
if (pp == NULL)
return (NULL);
@@ -1876,7 +1874,7 @@ g_raid_create_node(struct g_class *mp,
g_topology_assert();
G_RAID_DEBUG(1, "Creating array %s.", name);
- gp = g_new_geomf(mp, "%s", name);
+ gp = g_new_geom(mp, name);
sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO);
gp->start = g_raid_start;
gp->orphan = g_raid_orphan;
@@ -2217,7 +2215,7 @@ g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
geom = NULL;
status = G_RAID_MD_TASTE_FAIL;
- gp = g_new_geomf(mp, "raid:taste");
+ gp = g_new_geom(mp, "raid:taste");
/*
* This orphan function should be never called.
*/
diff --git a/sys/geom/raid3/g_raid3.c b/sys/geom/raid3/g_raid3.c
index c2d05b48d80d..64951bd01deb 100644
--- a/sys/geom/raid3/g_raid3.c
+++ b/sys/geom/raid3/g_raid3.c
@@ -3164,7 +3164,7 @@ g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
/*
* Action geom.
*/
- gp = g_new_geomf(mp, "%s", md->md_name);
+ gp = g_new_geom(mp, md->md_name);
sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO);
sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3,
M_WAITOK | M_ZERO);
@@ -3338,7 +3338,7 @@ g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
G_RAID3_DEBUG(2, "Tasting %s.", pp->name);
- gp = g_new_geomf(mp, "raid3:taste");
+ gp = g_new_geom(mp, "raid3:taste");
/* This orphan function should be never called. */
gp->orphan = g_raid3_taste_orphan;
cp = g_new_consumer(gp);
diff --git a/sys/geom/raid3/g_raid3_ctl.c b/sys/geom/raid3/g_raid3_ctl.c
index 824de07e4836..5eafcce917cf 100644
--- a/sys/geom/raid3/g_raid3_ctl.c
+++ b/sys/geom/raid3/g_raid3_ctl.c
@@ -425,7 +425,7 @@ g_raid3_ctl_insert(struct gctl_req *req, struct g_class *mp)
no = gctl_get_paraml(req, "number", sizeof(*no));
else
no = NULL;
- gp = g_new_geomf(mp, "raid3:insert");
+ gp = g_new_geom(mp, "raid3:insert");
gp->orphan = g_raid3_ctl_insert_orphan;
cp = g_new_consumer(gp);
error = g_attach(cp, pp);
diff --git a/sys/geom/shsec/g_shsec.c b/sys/geom/shsec/g_shsec.c
index 3ccc23e7eb8b..9da814e5eb34 100644
--- a/sys/geom/shsec/g_shsec.c
+++ b/sys/geom/shsec/g_shsec.c
@@ -545,7 +545,7 @@ g_shsec_create(struct g_class *mp, const struct g_shsec_metadata *md)
return (NULL);
}
}
- gp = g_new_geomf(mp, "%s", md->md_name);
+ gp = g_new_geom(mp, md->md_name);
sc = malloc(sizeof(*sc), M_SHSEC, M_WAITOK | M_ZERO);
gp->start = g_shsec_start;
gp->spoiled = g_shsec_orphan;
@@ -643,7 +643,7 @@ g_shsec_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
G_SHSEC_DEBUG(3, "Tasting %s.", pp->name);
- gp = g_new_geomf(mp, "shsec:taste");
+ gp = g_new_geom(mp, "shsec:taste");
gp->start = g_shsec_start;
gp->access = g_shsec_access;
gp->orphan = g_shsec_orphan;
diff --git a/sys/geom/stripe/g_stripe.c b/sys/geom/stripe/g_stripe.c
index 6f336c18c8e6..ba1953f036d3 100644
--- a/sys/geom/stripe/g_stripe.c
+++ b/sys/geom/stripe/g_stripe.c
@@ -454,11 +454,9 @@ g_stripe_start_economic(struct bio *bp, u_int no, off_t offset, off_t length)
cbp->bio_done = g_stripe_done;
cbp->bio_offset = offset;
cbp->bio_length = length;
- if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
- bp->bio_ma_n = round_page(bp->bio_ma_offset +
- bp->bio_length) / PAGE_SIZE;
+ if ((bp->bio_flags & BIO_UNMAPPED) != 0)
addr = NULL;
- } else
+ else
addr = bp->bio_data;
cbp->bio_caller2 = sc->sc_disks[no];
@@ -864,7 +862,7 @@ g_stripe_create(struct g_class *mp, const struct g_stripe_metadata *md,
return (NULL);
}
}
- gp = g_new_geomf(mp, "%s", md->md_name);
+ gp = g_new_geom(mp, md->md_name);
sc = malloc(sizeof(*sc), M_STRIPE, M_WAITOK | M_ZERO);
gp->start = g_stripe_start;
gp->spoiled = g_stripe_orphan;
@@ -965,7 +963,7 @@ g_stripe_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
G_STRIPE_DEBUG(3, "Tasting %s.", pp->name);
- gp = g_new_geomf(mp, "stripe:taste");
+ gp = g_new_geom(mp, "stripe:taste");
gp->start = g_stripe_start;
gp->access = g_stripe_access;
gp->orphan = g_stripe_orphan;
diff --git a/sys/geom/union/g_union.c b/sys/geom/union/g_union.c
index 9734fc1bcfe3..43c16c86e5a8 100644
--- a/sys/geom/union/g_union.c
+++ b/sys/geom/union/g_union.c
@@ -246,7 +246,7 @@ g_union_ctl_create(struct gctl_req *req, struct g_class *mp, bool verbose)
return;
}
}
- gp = g_new_geomf(mp, "%s", name);
+ gp = g_new_geom(mp, name);
sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
rw_init(&sc->sc_rwlock, "gunion");
TAILQ_INIT(&sc->sc_wiplist);
@@ -358,6 +358,8 @@ fail2:
fail1:
g_destroy_consumer(lowercp);
g_destroy_provider(newpp);
+ rw_destroy(&sc->sc_rwlock);
+ g_free(sc);
g_destroy_geom(gp);
}
diff --git a/sys/geom/virstor/g_virstor.c b/sys/geom/virstor/g_virstor.c
index c7d737493f11..1490ed103329 100644
--- a/sys/geom/virstor/g_virstor.c
+++ b/sys/geom/virstor/g_virstor.c
@@ -771,7 +771,7 @@ g_virstor_taste(struct g_class *mp, struct g_provider *pp, int flags)
LOG_MSG(LVL_DEBUG, "Tasting %s", pp->name);
/* We need a dummy geom to attach a consumer to the given provider */
- gp = g_new_geomf(mp, "virstor:taste.helper");
+ gp = g_new_geom(mp, "virstor:taste.helper");
gp->start = (void *)invalid_call; /* XXX: hacked up so the */
gp->access = (void *)invalid_call; /* compiler doesn't complain. */
gp->orphan = (void *)invalid_call; /* I really want these to fail. */
@@ -1085,7 +1085,7 @@ create_virstor_geom(struct g_class *mp, struct g_virstor_metadata *md)
return (NULL);
}
}
- gp = g_new_geomf(mp, "%s", md->md_name);
+ gp = g_new_geom(mp, md->md_name);
gp->softc = NULL; /* to circumevent races that test softc */
gp->start = g_virstor_start;
diff --git a/sys/geom/zero/g_zero.c b/sys/geom/zero/g_zero.c
index 91ef0fb1ef95..e9934ba6c784 100644
--- a/sys/geom/zero/g_zero.c
+++ b/sys/geom/zero/g_zero.c
@@ -102,7 +102,7 @@ g_zero_init(struct g_class *mp)
struct g_provider *pp;
g_topology_assert();
- gp = g_new_geomf(mp, "gzero");
+ gp = g_new_geom(mp, "gzero");
gp->start = g_zero_start;
gp->access = g_std_access;
gpp = pp = g_new_providerf(gp, "%s", gp->name);
diff --git a/sys/i386/acpica/acpi_wakeup.c b/sys/i386/acpica/acpi_wakeup.c
index 2d60d5e037a0..96be64de017b 100644
--- a/sys/i386/acpica/acpi_wakeup.c
+++ b/sys/i386/acpica/acpi_wakeup.c
@@ -84,7 +84,7 @@ static cpuset_t suspcpus;
static struct susppcb **susppcbs;
#endif
-static void acpi_stop_beep(void *);
+static void acpi_stop_beep(void *, enum power_stype);
#ifdef SMP
static int acpi_wakeup_ap(struct acpi_softc *, int);
@@ -100,7 +100,7 @@ static void acpi_wakeup_cpus(struct acpi_softc *);
} while (0)
static void
-acpi_stop_beep(void *arg)
+acpi_stop_beep(void *arg, enum power_stype stype)
{
if (acpi_resume_beep != 0)
diff --git a/sys/i386/conf/GENERIC b/sys/i386/conf/GENERIC
index f577cd07ac7c..06738a7ed506 100644
--- a/sys/i386/conf/GENERIC
+++ b/sys/i386/conf/GENERIC
@@ -249,9 +249,9 @@ device wlan # 802.11 support
options IEEE80211_DEBUG # enable debug msgs
options IEEE80211_SUPPORT_MESH # enable 802.11s draft support
device wlan_wep # 802.11 WEP support
+device wlan_tkip # 802.11 TKIP support
device wlan_ccmp # 802.11 CCMP support
device wlan_gcmp # 802.11 GCMP support
-device wlan_tkip # 802.11 TKIP support
device wlan_amrr # AMRR transmit rate control algorithm
device ath # Atheros CardBus/PCI NICs
device ath_hal # Atheros CardBus/PCI chip support
diff --git a/sys/i386/i386/in_cksum_machdep.c b/sys/i386/i386/in_cksum_machdep.c
index 27ab09d82da0..b658d85bc892 100644
--- a/sys/i386/i386/in_cksum_machdep.c
+++ b/sys/i386/i386/in_cksum_machdep.c
@@ -84,7 +84,7 @@ in_cksum_skip(struct mbuf *m, int len, int skip)
}
}
- for (;m && len; m = m->m_next) {
+ for (; m && len; m = m->m_next) {
if (m->m_len == 0)
continue;
w = mtod(m, u_short *);
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 6aac0e968362..3f659432552c 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -1605,7 +1605,7 @@ init386(int first)
}
static void
-machdep_init_trampoline(void)
+machdep_init_trampoline(void *dummy __unused)
{
struct region_descriptor r_gdt, r_idt;
struct i386tss *tss;
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index b44f5e08bbcf..1cf0867d57c3 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -720,7 +720,7 @@ __CONCAT(PMTYPE, bootstrap)(vm_paddr_t firstaddr)
}
static void
-pmap_init_reserved_pages(void)
+pmap_init_reserved_pages(void *dummy __unused)
{
struct pcpu *pc;
vm_offset_t pages;
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index 4bed57b5afbf..b200588b0739 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -74,7 +74,7 @@ static __inline void
clflushopt(u_long addr)
{
- __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
+ __asm __volatile("clflushopt %0" : : "m" (*(char *)addr));
}
static __inline void
diff --git a/sys/isa/isa_common.c b/sys/isa/isa_common.c
index 8e4064af1455..1a6df7bf6046 100644
--- a/sys/isa/isa_common.c
+++ b/sys/isa/isa_common.c
@@ -1114,7 +1114,7 @@ isab_attach(device_t dev)
{
device_t child;
- child = device_add_child(dev, "isa", 0);
+ child = device_add_child(dev, "isa", DEVICE_UNIT_ANY);
if (child == NULL)
return (ENXIO);
bus_attach_children(dev);
diff --git a/sys/isa/isareg.h b/sys/isa/isareg.h
index e89136c7e1e5..8b2d55608078 100644
--- a/sys/isa/isareg.h
+++ b/sys/isa/isareg.h
@@ -49,7 +49,7 @@
#define IO_RTC 0x070 /* RTC */
#define IO_ICU2 0x0A0 /* 8259A Interrupt Controller #2 */
-#define IO_MDA 0x3B0 /* Monochome Adapter */
+#define IO_MDA 0x3B0 /* Monochrome Adapter */
#define IO_VGA 0x3C0 /* E/VGA Ports */
#define IO_CGA 0x3D0 /* CGA Ports */
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 2690ad3b2679..a1fabbc86f27 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -84,8 +84,15 @@
#define ELF_NOTE_ROUNDSIZE 4
#define OLD_EI_BRAND 8
+/*
+ * ELF_ABI_NAME is a string name of the ELF ABI. ELF_ABI_ID is used
+ * to build variable names.
+ */
+#define ELF_ABI_NAME __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
+#define ELF_ABI_ID __CONCAT(elf, __ELF_WORD_SIZE)
+
static int __elfN(check_header)(const Elf_Ehdr *hdr);
-static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
+static const Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
const char *interp, int32_t *osrel, uint32_t *fctl0);
static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
u_long *entry);
@@ -97,21 +104,22 @@ static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note,
int32_t *osrel);
static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
static bool __elfN(check_note)(struct image_params *imgp,
- Elf_Brandnote *checknote, int32_t *osrel, bool *has_fctl0,
+ const Elf_Brandnote *checknote, int32_t *osrel, bool *has_fctl0,
uint32_t *fctl0);
static vm_prot_t __elfN(trans_prot)(Elf_Word);
static Elf_Word __elfN(untrans_prot)(vm_prot_t);
static size_t __elfN(prepare_register_notes)(struct thread *td,
struct note_info_list *list, struct thread *target_td);
-SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE),
- CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+SYSCTL_NODE(_kern, OID_AUTO, ELF_ABI_ID, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"");
+#define ELF_NODE_OID __CONCAT(_kern_, ELF_ABI_ID)
+
int __elfN(fallback_brand) = -1;
-SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO,
fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
+ ELF_ABI_NAME " brand of last resort");
static int elf_legacy_coredump = 0;
SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
@@ -126,22 +134,22 @@ int __elfN(nxstack) =
#else
0;
#endif
-SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO,
nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": support PT_GNU_STACK for non-executable stack control");
+ ELF_ABI_NAME ": support PT_GNU_STACK for non-executable stack control");
#if defined(__amd64__)
static int __elfN(vdso) = 1;
-SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO,
vdso, CTLFLAG_RWTUN, &__elfN(vdso), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable vdso preloading");
+ ELF_ABI_NAME ": enable vdso preloading");
#else
static int __elfN(vdso) = 0;
#endif
#if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
int i386_read_exec = 0;
-SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
"enable execution from readable segments");
#endif
@@ -161,15 +169,15 @@ sysctl_pie_base(SYSCTL_HANDLER_ARGS)
__elfN(pie_base) = val;
return (0);
}
-SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base,
+SYSCTL_PROC(ELF_NODE_OID, OID_AUTO, pie_base,
CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
sysctl_pie_base, "LU",
"PIE load base without randomization");
-SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr,
+SYSCTL_NODE(ELF_NODE_OID, OID_AUTO, aslr,
CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"");
-#define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr)
+#define ASLR_NODE_OID __CONCAT(ELF_NODE_OID, _aslr)
/*
* Enable ASLR by default for 64-bit non-PIE binaries. 32-bit architectures
@@ -179,8 +187,7 @@ SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr,
static int __elfN(aslr_enabled) = __ELF_WORD_SIZE == 64;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN,
&__elfN(aslr_enabled), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
- ": enable address map randomization");
+ ELF_ABI_NAME ": enable address map randomization");
/*
* Enable ASLR by default for 64-bit PIE binaries.
@@ -188,8 +195,7 @@ SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN,
static int __elfN(pie_aslr_enabled) = __ELF_WORD_SIZE == 64;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN,
&__elfN(pie_aslr_enabled), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
- ": enable address map randomization for PIE binaries");
+ ELF_ABI_NAME ": enable address map randomization for PIE binaries");
/*
* Sbrk is deprecated and it can be assumed that in most cases it will not be
@@ -199,31 +205,29 @@ SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN,
static int __elfN(aslr_honor_sbrk) = 0;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW,
&__elfN(aslr_honor_sbrk), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used");
+ ELF_ABI_NAME ": assume sbrk is used");
static int __elfN(aslr_stack) = __ELF_WORD_SIZE == 64;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack, CTLFLAG_RWTUN,
&__elfN(aslr_stack), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
- ": enable stack address randomization");
+ ELF_ABI_NAME ": enable stack address randomization");
static int __elfN(aslr_shared_page) = __ELF_WORD_SIZE == 64;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, shared_page, CTLFLAG_RWTUN,
&__elfN(aslr_shared_page), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
- ": enable shared page address randomization");
+ ELF_ABI_NAME ": enable shared page address randomization");
static int __elfN(sigfastblock) = 1;
-SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO, sigfastblock,
CTLFLAG_RWTUN, &__elfN(sigfastblock), 0,
"enable sigfastblock for new processes");
static bool __elfN(allow_wx) = true;
-SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx,
+SYSCTL_BOOL(ELF_NODE_OID, OID_AUTO, allow_wx,
CTLFLAG_RWTUN, &__elfN(allow_wx), 0,
"Allow pages to be mapped simultaneously writable and executable");
-static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
+static const Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
#define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a))
@@ -282,7 +286,7 @@ kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
}
int
-__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
+__elfN(insert_brand_entry)(const Elf_Brandinfo *entry)
{
int i;
@@ -301,7 +305,7 @@ __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
}
int
-__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
+__elfN(remove_brand_entry)(const Elf_Brandinfo *entry)
{
int i;
@@ -317,7 +321,7 @@ __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
}
bool
-__elfN(brand_inuse)(Elf_Brandinfo *entry)
+__elfN(brand_inuse)(const Elf_Brandinfo *entry)
{
struct proc *p;
bool rval = false;
@@ -334,12 +338,12 @@ __elfN(brand_inuse)(Elf_Brandinfo *entry)
return (rval);
}
-static Elf_Brandinfo *
+static const Elf_Brandinfo *
__elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
int32_t *osrel, uint32_t *fctl0)
{
const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
- Elf_Brandinfo *bi, *bi_m;
+ const Elf_Brandinfo *bi, *bi_m;
bool ret, has_fctl0;
int i, interp_name_len;
@@ -488,7 +492,7 @@ __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr)
static int
__elfN(check_header)(const Elf_Ehdr *hdr)
{
- Elf_Brandinfo *bi;
+ const Elf_Brandinfo *bi;
int i;
if (!IS_ELF(*hdr) ||
@@ -1105,7 +1109,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
struct vmspace *vmspace;
vm_map_t map;
char *interp;
- Elf_Brandinfo *brand_info;
+ const Elf_Brandinfo *brand_info;
struct sysentvec *sv;
u_long addr, baddr, entry, proghdr;
u_long maxalign, maxsalign, mapsz, maxv, maxv1, anon_loc;
@@ -1921,7 +1925,7 @@ __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs,
Elf_Phdr *phdr;
Elf_Shdr *shdr;
struct phdr_closure phc;
- Elf_Brandinfo *bi;
+ const Elf_Brandinfo *bi;
ehdr = (Elf_Ehdr *)hdr;
bi = td->td_proc->p_elf_brandinfo;
@@ -2606,11 +2610,13 @@ note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep)
int structsize;
p = arg;
- size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t);
+ size = sizeof(structsize) +
+ (1 + p->p_ucred->cr_ngroups) * sizeof(gid_t);
if (sb != NULL) {
KASSERT(*sizep == size, ("invalid size"));
structsize = sizeof(gid_t);
sbuf_bcat(sb, &structsize, sizeof(structsize));
+ sbuf_bcat(sb, &p->p_ucred->cr_gid, sizeof(gid_t));
sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups *
sizeof(gid_t));
}
@@ -2825,7 +2831,7 @@ __elfN(parse_notes)(const struct image_params *imgp, const Elf_Note *checknote,
}
if ((const char *)note_end - (const char *)note <
sizeof(Elf_Note)) {
- uprintf("ELF note to short\n");
+ uprintf("ELF note too short\n");
goto retf;
}
if (note->n_namesz != checknote->n_namesz ||
@@ -2833,9 +2839,9 @@ __elfN(parse_notes)(const struct image_params *imgp, const Elf_Note *checknote,
note->n_type != checknote->n_type)
goto nextnote;
note_name = (const char *)(note + 1);
- if (note_name + checknote->n_namesz >=
- (const char *)note_end || strncmp(note_vendor,
- note_name, checknote->n_namesz) != 0)
+ if (note_name + roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) +
+ note->n_descsz >= (const char *)note_end ||
+ strncmp(note_vendor, note_name, checknote->n_namesz) != 0)
goto nextnote;
if (cb(note, cb_arg, &res))
@@ -2855,7 +2861,7 @@ ret:
}
struct brandnote_cb_arg {
- Elf_Brandnote *brandnote;
+ const Elf_Brandnote *brandnote;
int32_t *osrel;
};
@@ -2877,7 +2883,7 @@ brandnote_cb(const Elf_Note *note, void *arg0, bool *res)
return (true);
}
-static Elf_Note fctl_note = {
+static const Elf_Note fctl_note = {
.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
.n_descsz = sizeof(uint32_t),
.n_type = NT_FREEBSD_FEATURE_CTL,
@@ -2912,7 +2918,7 @@ note_fctl_cb(const Elf_Note *note, void *arg0, bool *res)
* as for headers.
*/
static bool
-__elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote,
+__elfN(check_note)(struct image_params *imgp, const Elf_Brandnote *brandnote,
int32_t *osrel, bool *has_fctl0, uint32_t *fctl0)
{
const Elf_Phdr *phdr;
@@ -2951,9 +2957,9 @@ __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote,
*/
static struct execsw __elfN(execsw) = {
.ex_imgact = __CONCAT(exec_, __elfN(imgact)),
- .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
+ .ex_name = ELF_ABI_NAME
};
-EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
+EXEC_SET(ELF_ABI_ID, __elfN(execsw));
static vm_prot_t
__elfN(trans_prot)(Elf_Word flags)
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 36ce44b988be..87ffdb8dbf07 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -145,13 +145,6 @@ FEATURE(invariants, "Kernel compiled with INVARIANTS, may affect performance");
#endif
/*
- * This ensures that there is at least one entry so that the sysinit_set
- * symbol is not undefined. A sybsystem ID of SI_SUB_DUMMY is never
- * executed.
- */
-SYSINIT(placeholder, SI_SUB_DUMMY, SI_ORDER_ANY, NULL, NULL);
-
-/*
* The sysinit linker set compiled into the kernel. These are placed onto the
* sysinit list by mi_startup; sysinit_add can add (e.g., from klds) additional
* sysinits to the linked list but the linker set here does not change.
@@ -296,7 +289,7 @@ mi_startup(void)
BOOTTRACE_INIT("sysinit 0x%7x", sip->subsystem);
#if defined(VERBOSE_SYSINIT)
- if (sip->subsystem > last && verbose_sysinit != 0) {
+ if (sip->subsystem != last && verbose_sysinit != 0) {
verbose = 1;
printf("subsystem %x\n", sip->subsystem);
}
diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c
index c0a5479c9634..e42e7dcf8b44 100644
--- a/sys/kern/init_sysent.c
+++ b/sys/kern/init_sysent.c
@@ -145,8 +145,8 @@ struct sysent sysent[] = {
{ .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 76 = obsolete vhangup */
{ .sy_narg = 0, .sy_call = (sy_call_t *)nosys, .sy_auevent = AUE_NULL, .sy_flags = 0, .sy_thrcnt = SY_THR_ABSENT }, /* 77 = obsolete vlimit */
{ .sy_narg = AS(mincore_args), .sy_call = (sy_call_t *)sys_mincore, .sy_auevent = AUE_MINCORE, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 78 = mincore */
- { .sy_narg = AS(getgroups_args), .sy_call = (sy_call_t *)sys_getgroups, .sy_auevent = AUE_GETGROUPS, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 79 = getgroups */
- { .sy_narg = AS(setgroups_args), .sy_call = (sy_call_t *)sys_setgroups, .sy_auevent = AUE_SETGROUPS, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 80 = setgroups */
+ { compat14(AS(freebsd14_getgroups_args),getgroups), .sy_auevent = AUE_GETGROUPS, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 79 = freebsd14 getgroups */
+ { compat14(AS(freebsd14_setgroups_args),setgroups), .sy_auevent = AUE_SETGROUPS, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 80 = freebsd14 setgroups */
{ .sy_narg = 0, .sy_call = (sy_call_t *)sys_getpgrp, .sy_auevent = AUE_GETPGRP, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 81 = getpgrp */
{ .sy_narg = AS(setpgid_args), .sy_call = (sy_call_t *)sys_setpgid, .sy_auevent = AUE_SETPGRP, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 82 = setpgid */
{ .sy_narg = AS(setitimer_args), .sy_call = (sy_call_t *)sys_setitimer, .sy_auevent = AUE_SETITIMER, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 83 = setitimer */
@@ -661,4 +661,8 @@ struct sysent sysent[] = {
{ .sy_narg = AS(exterrctl_args), .sy_call = (sy_call_t *)sys_exterrctl, .sy_auevent = AUE_NULL, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 592 = exterrctl */
{ .sy_narg = AS(inotify_add_watch_at_args), .sy_call = (sy_call_t *)sys_inotify_add_watch_at, .sy_auevent = AUE_INOTIFY, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 593 = inotify_add_watch_at */
{ .sy_narg = AS(inotify_rm_watch_args), .sy_call = (sy_call_t *)sys_inotify_rm_watch, .sy_auevent = AUE_INOTIFY, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 594 = inotify_rm_watch */
+ { .sy_narg = AS(getgroups_args), .sy_call = (sy_call_t *)sys_getgroups, .sy_auevent = AUE_GETGROUPS, .sy_flags = SYF_CAPENABLED, .sy_thrcnt = SY_THR_STATIC }, /* 595 = getgroups */
+ { .sy_narg = AS(setgroups_args), .sy_call = (sy_call_t *)sys_setgroups, .sy_auevent = AUE_SETGROUPS, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 596 = setgroups */
+ { .sy_narg = AS(jail_attach_jd_args), .sy_call = (sy_call_t *)sys_jail_attach_jd, .sy_auevent = AUE_JAIL_ATTACH, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 597 = jail_attach_jd */
+ { .sy_narg = AS(jail_remove_jd_args), .sy_call = (sy_call_t *)sys_jail_remove_jd, .sy_auevent = AUE_JAIL_REMOVE, .sy_flags = 0, .sy_thrcnt = SY_THR_STATIC }, /* 598 = jail_remove_jd */
};
diff --git a/sys/kern/kern_boottrace.c b/sys/kern/kern_boottrace.c
index 1fa87955a299..c83255bc74ee 100644
--- a/sys/kern/kern_boottrace.c
+++ b/sys/kern/kern_boottrace.c
@@ -579,7 +579,7 @@ sysctl_boottrace_reset(SYSCTL_HANDLER_ARGS)
}
static void
-boottrace_init(void)
+boottrace_init(void *dummy __unused)
{
if (!boottrace_enabled)
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index a27ab33b34da..19118eb7f275 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -658,6 +658,7 @@ kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
error = EBADF;
break;
}
+ fsetfl_lock(fp);
do {
tmp = flg = fp->f_flag;
tmp &= ~FCNTLFLAGS;
@@ -665,26 +666,34 @@ kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
} while (atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
got_set = tmp & ~flg;
got_cleared = flg & ~tmp;
- tmp = fp->f_flag & FNONBLOCK;
- error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
- if (error != 0)
- goto revert_f_setfl;
- tmp = fp->f_flag & FASYNC;
- error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
- if (error == 0) {
- fdrop(fp, td);
- break;
+ if (((got_set | got_cleared) & FNONBLOCK) != 0) {
+ tmp = fp->f_flag & FNONBLOCK;
+ error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
+ if (error != 0)
+ goto revert_flags;
+ }
+ if (((got_set | got_cleared) & FASYNC) != 0) {
+ tmp = fp->f_flag & FASYNC;
+ error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
+ if (error != 0)
+ goto revert_nonblock;
+ }
+ fsetfl_unlock(fp);
+ fdrop(fp, td);
+ break;
+revert_nonblock:
+ if (((got_set | got_cleared) & FNONBLOCK) != 0) {
+ tmp = ~fp->f_flag & FNONBLOCK;
+ (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
}
- atomic_clear_int(&fp->f_flag, FNONBLOCK);
- tmp = 0;
- (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
-revert_f_setfl:
+revert_flags:
do {
tmp = flg = fp->f_flag;
tmp &= ~FCNTLFLAGS;
tmp |= got_cleared;
tmp &= ~got_set;
} while (atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
+ fsetfl_unlock(fp);
fdrop(fp, td);
break;
@@ -5250,6 +5259,8 @@ file_type_to_name(short type)
return ("eventfd");
case DTYPE_TIMERFD:
return ("timerfd");
+ case DTYPE_JAILDESC:
+ return ("jail");
default:
return ("unkn");
}
diff --git a/sys/kern/kern_devctl.c b/sys/kern/kern_devctl.c
index 7a2818c29b1a..a1696225df32 100644
--- a/sys/kern/kern_devctl.c
+++ b/sys/kern/kern_devctl.c
@@ -140,7 +140,7 @@ static struct devctlbridge {
} devctl_notify_hook = { .send_f = NULL };
static void
-devctl_init(void)
+devctl_init(void *dummy __unused)
{
int reserve;
uma_zone_t z;
diff --git a/sys/kern/kern_environment.c b/sys/kern/kern_environment.c
index 0cb0f566a839..7c0654769581 100644
--- a/sys/kern/kern_environment.c
+++ b/sys/kern/kern_environment.c
@@ -1098,65 +1098,65 @@ kernenv_next(char *cp)
}
void
-tunable_int_init(void *data)
+tunable_int_init(const void *data)
{
- struct tunable_int *d = (struct tunable_int *)data;
+ const struct tunable_int *d = data;
TUNABLE_INT_FETCH(d->path, d->var);
}
void
-tunable_long_init(void *data)
+tunable_long_init(const void *data)
{
- struct tunable_long *d = (struct tunable_long *)data;
+ const struct tunable_long *d = data;
TUNABLE_LONG_FETCH(d->path, d->var);
}
void
-tunable_ulong_init(void *data)
+tunable_ulong_init(const void *data)
{
- struct tunable_ulong *d = (struct tunable_ulong *)data;
+ const struct tunable_ulong *d = data;
TUNABLE_ULONG_FETCH(d->path, d->var);
}
void
-tunable_int64_init(void *data)
+tunable_int64_init(const void *data)
{
- struct tunable_int64 *d = (struct tunable_int64 *)data;
+ const struct tunable_int64 *d = data;
TUNABLE_INT64_FETCH(d->path, d->var);
}
void
-tunable_uint64_init(void *data)
+tunable_uint64_init(const void *data)
{
- struct tunable_uint64 *d = (struct tunable_uint64 *)data;
+ const struct tunable_uint64 *d = data;
TUNABLE_UINT64_FETCH(d->path, d->var);
}
void
-tunable_quad_init(void *data)
+tunable_quad_init(const void *data)
{
- struct tunable_quad *d = (struct tunable_quad *)data;
+ const struct tunable_quad *d = data;
TUNABLE_QUAD_FETCH(d->path, d->var);
}
void
-tunable_bool_init(void *data)
+tunable_bool_init(const void *data)
{
- struct tunable_bool *d = (struct tunable_bool *)data;
+ const struct tunable_bool *d = data;
TUNABLE_BOOL_FETCH(d->path, d->var);
}
void
-tunable_str_init(void *data)
+tunable_str_init(const void *data)
{
- struct tunable_str *d = (struct tunable_str *)data;
+ const struct tunable_str *d = data;
TUNABLE_STR_FETCH(d->path, d->var, d->size);
}
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index eb77a5064113..a6333d8011b1 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -50,6 +50,8 @@
#include <sys/filedesc.h>
#include <sys/filio.h>
#include <sys/fcntl.h>
+#include <sys/jail.h>
+#include <sys/jaildesc.h>
#include <sys/kthread.h>
#include <sys/selinfo.h>
#include <sys/queue.h>
@@ -154,7 +156,7 @@ static void knote_drop(struct knote *kn, struct thread *td);
static void knote_drop_detached(struct knote *kn, struct thread *td);
static void knote_enqueue(struct knote *kn);
static void knote_dequeue(struct knote *kn);
-static void knote_init(void);
+static void knote_init(void *);
static struct knote *knote_alloc(int mflag);
static void knote_free(struct knote *kn);
@@ -163,6 +165,9 @@ static int filt_kqueue(struct knote *kn, long hint);
static int filt_procattach(struct knote *kn);
static void filt_procdetach(struct knote *kn);
static int filt_proc(struct knote *kn, long hint);
+static int filt_jailattach(struct knote *kn);
+static void filt_jaildetach(struct knote *kn);
+static int filt_jail(struct knote *kn, long hint);
static int filt_fileattach(struct knote *kn);
static void filt_timerexpire(void *knx);
static void filt_timerexpire_l(struct knote *kn, bool proc_locked);
@@ -195,6 +200,12 @@ static const struct filterops proc_filtops = {
.f_detach = filt_procdetach,
.f_event = filt_proc,
};
+static const struct filterops jail_filtops = {
+ .f_isfd = 0,
+ .f_attach = filt_jailattach,
+ .f_detach = filt_jaildetach,
+ .f_event = filt_jail,
+};
static const struct filterops timer_filtops = {
.f_isfd = 0,
.f_attach = filt_timerattach,
@@ -365,6 +376,8 @@ static struct {
[~EVFILT_USER] = { &user_filtops, 1 },
[~EVFILT_SENDFILE] = { &null_filtops },
[~EVFILT_EMPTY] = { &file_filtops, 1 },
+ [~EVFILT_JAIL] = { &jail_filtops, 1 },
+ [~EVFILT_JAILDESC] = { &file_filtops, 1 },
};
/*
@@ -614,6 +627,86 @@ knote_fork(struct knlist *list, int pid)
}
}
+int
+filt_jailattach(struct knote *kn)
+{
+ struct prison *pr;
+
+ if (kn->kn_id == 0) {
+ /* Let jid=0 watch the current prison (including prison0). */
+ pr = curthread->td_ucred->cr_prison;
+ mtx_lock(&pr->pr_mtx);
+ } else {
+ sx_slock(&allprison_lock);
+ pr = prison_find_child(curthread->td_ucred->cr_prison,
+ kn->kn_id);
+ sx_sunlock(&allprison_lock);
+ if (pr == NULL)
+ return (ENOENT);
+ if (!prison_isalive(pr)) {
+ mtx_unlock(&pr->pr_mtx);
+ return (ENOENT);
+ }
+ }
+ kn->kn_ptr.p_prison = pr;
+ kn->kn_flags |= EV_CLEAR;
+ knlist_add(pr->pr_klist, kn, 1);
+ mtx_unlock(&pr->pr_mtx);
+ return (0);
+}
+
+void
+filt_jaildetach(struct knote *kn)
+{
+ if (kn->kn_ptr.p_prison != NULL) {
+ knlist_remove(kn->kn_knlist, kn, 0);
+ kn->kn_ptr.p_prison = NULL;
+ } else
+ kn->kn_status |= KN_DETACHED;
+}
+
+int
+filt_jail(struct knote *kn, long hint)
+{
+ struct prison *pr;
+ u_int event;
+
+ pr = kn->kn_ptr.p_prison;
+ if (pr == NULL) /* already activated, from attach filter */
+ return (0);
+
+ /*
+ * Mask off extra data. In the NOTE_JAIL_CHILD case, that's
+ * everything except the NOTE_JAIL_CHILD bit itself, since a
+ * JID is any positive integer.
+ */
+ event = ((u_int)hint & NOTE_JAIL_CHILD) ? NOTE_JAIL_CHILD :
+ (u_int)hint & NOTE_JAIL_CTRLMASK;
+
+ /* If the user is interested in this event, record it. */
+ if (kn->kn_sfflags & event) {
+ kn->kn_fflags |= event;
+ /* Report the created jail id or attached process id. */
+ if (event == NOTE_JAIL_CHILD || event == NOTE_JAIL_ATTACH) {
+ if (kn->kn_data != 0)
+ kn->kn_fflags |= NOTE_JAIL_MULTI;
+ kn->kn_data = (kn->kn_fflags & NOTE_JAIL_MULTI) ? 0U :
+ (u_int)hint & ~event;
+ }
+ }
+
+ /* Prison is gone, so flag the event as finished. */
+ if (event == NOTE_JAIL_REMOVE) {
+ kn->kn_flags |= EV_EOF | EV_ONESHOT;
+ kn->kn_ptr.p_prison = NULL;
+ if (kn->kn_fflags == 0)
+ kn->kn_flags |= EV_DROP;
+ return (1);
+ }
+
+ return (kn->kn_fflags != 0);
+}
+
/*
* XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
* interval timer support code.
@@ -1771,7 +1864,7 @@ kqueue_acquire(struct file *fp, struct kqueue **kqp)
kq = fp->f_data;
if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
- return (EBADF);
+ return (EINVAL);
*kqp = kq;
KQ_LOCK(kq);
if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
@@ -2794,12 +2887,13 @@ knote_dequeue(struct knote *kn)
}
static void
-knote_init(void)
+knote_init(void *dummy __unused)
{
knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, 0);
ast_register(TDA_KQUEUE, ASTR_ASTF_REQUIRED, 0, ast_kqueue);
+ prison0.pr_klist = knlist_alloc(&prison0.pr_mtx);
}
SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
@@ -3033,7 +3127,7 @@ sysctl_kern_proc_kqueue(SYSCTL_HANDLER_ARGS)
return (error);
td = curthread;
-#ifdef FREEBSD_COMPAT32
+#ifdef COMPAT_FREEBSD32
compat32 = SV_CURPROC_FLAG(SV_ILP32);
#else
compat32 = false;
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 0fc2d0e7f1bc..2bdd6faa025a 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -418,7 +418,7 @@ do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
#endif
int error, i, orig_osrel;
uint32_t orig_fctl0;
- Elf_Brandinfo *orig_brandinfo;
+ const Elf_Brandinfo *orig_brandinfo;
size_t freepath_size;
static const char fexecv_proc_title[] = "(fexecv)";
@@ -1314,7 +1314,7 @@ exec_map_stack(struct image_params *imgp)
MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
} else {
sharedpage_addr = sv->sv_shared_page_base;
- vm_map_fixed(map, obj, 0,
+ error = vm_map_fixed(map, obj, 0,
sharedpage_addr, sv->sv_shared_page_len,
VM_PROT_READ | VM_PROT_EXECUTE,
VM_PROT_READ | VM_PROT_EXECUTE,
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index a32b5a1b3354..c4b1c8201ff2 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -127,6 +127,27 @@ proc_realparent(struct proc *child)
return (parent);
}
+static void
+reaper_clear(struct proc *p, struct proc *rp)
+{
+ struct proc *p1;
+ bool clear;
+
+ sx_assert(&proctree_lock, SX_XLOCKED);
+ LIST_REMOVE(p, p_reapsibling);
+ if (p->p_reapsubtree == 1)
+ return;
+ clear = true;
+ LIST_FOREACH(p1, &rp->p_reaplist, p_reapsibling) {
+ if (p1->p_reapsubtree == p->p_reapsubtree) {
+ clear = false;
+ break;
+ }
+ }
+ if (clear)
+ proc_id_clear(PROC_ID_REAP, p->p_reapsubtree);
+}
+
void
reaper_abandon_children(struct proc *p, bool exiting)
{
@@ -138,7 +159,7 @@ reaper_abandon_children(struct proc *p, bool exiting)
return;
p1 = p->p_reaper;
LIST_FOREACH_SAFE(p2, &p->p_reaplist, p_reapsibling, ptmp) {
- LIST_REMOVE(p2, p_reapsibling);
+ reaper_clear(p2, p);
p2->p_reaper = p1;
p2->p_reapsubtree = p->p_reapsubtree;
LIST_INSERT_HEAD(&p1->p_reaplist, p2, p_reapsibling);
@@ -152,27 +173,6 @@ reaper_abandon_children(struct proc *p, bool exiting)
p->p_treeflag &= ~P_TREE_REAPER;
}
-static void
-reaper_clear(struct proc *p)
-{
- struct proc *p1;
- bool clear;
-
- sx_assert(&proctree_lock, SX_LOCKED);
- LIST_REMOVE(p, p_reapsibling);
- if (p->p_reapsubtree == 1)
- return;
- clear = true;
- LIST_FOREACH(p1, &p->p_reaper->p_reaplist, p_reapsibling) {
- if (p1->p_reapsubtree == p->p_reapsubtree) {
- clear = false;
- break;
- }
- }
- if (clear)
- proc_id_clear(PROC_ID_REAP, p->p_reapsubtree);
-}
-
void
proc_clear_orphan(struct proc *p)
{
@@ -807,7 +807,7 @@ kern_abort2(struct thread *td, const char *why, int nargs, void **uargs)
}
if (nargs > 0) {
sbuf_putc(sb, '(');
- for (i = 0;i < nargs; i++)
+ for (i = 0; i < nargs; i++)
sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]);
sbuf_putc(sb, ')');
}
@@ -972,7 +972,7 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options)
sx_xunlock(PIDHASHLOCK(p->p_pid));
LIST_REMOVE(p, p_sibling);
reaper_abandon_children(p, true);
- reaper_clear(p);
+ reaper_clear(p, p->p_reaper);
PROC_LOCK(p);
proc_clear_orphan(p);
PROC_UNLOCK(p);
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 2ab9b363f8b5..7f6abae187b3 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -610,10 +610,12 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *
p2->p_flag |= p1->p_flag & P_SUGID;
td2->td_pflags |= td->td_pflags & (TDP_ALTSTACK | TDP_SIGFASTBLOCK);
td2->td_pflags2 |= td->td_pflags2 & TDP2_UEXTERR;
- SESS_LOCK(p1->p_session);
- if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
- p2->p_flag |= P_CONTROLT;
- SESS_UNLOCK(p1->p_session);
+ if (p1->p_flag & P_CONTROLT) {
+ SESS_LOCK(p1->p_session);
+ if (p1->p_session->s_ttyvp != NULL)
+ p2->p_flag |= P_CONTROLT;
+ SESS_UNLOCK(p1->p_session);
+ }
if (fr->fr_flags & RFPPWAIT)
p2->p_flag |= P_PPWAIT;
diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c
index 7c9a15ae18f3..3697d95fe0e5 100644
--- a/sys/kern/kern_jail.c
+++ b/sys/kern/kern_jail.c
@@ -39,15 +39,18 @@
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/errno.h>
+#include <sys/file.h>
#include <sys/sysproto.h>
#include <sys/malloc.h>
#include <sys/osd.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/epoch.h>
+#include <sys/event.h>
#include <sys/taskqueue.h>
#include <sys/fcntl.h>
#include <sys/jail.h>
+#include <sys/jaildesc.h>
#include <sys/linker.h>
#include <sys/lock.h>
#include <sys/mman.h>
@@ -154,7 +157,8 @@ static void prison_complete(void *context, int pending);
static void prison_deref(struct prison *pr, int flags);
static void prison_deref_kill(struct prison *pr, struct prisonlist *freeprison);
static int prison_lock_xlock(struct prison *pr, int flags);
-static void prison_cleanup(struct prison *pr);
+static void prison_cleanup_locked(struct prison *pr);
+static void prison_cleanup_unlocked(struct prison *pr);
static void prison_free_not_last(struct prison *pr);
static void prison_proc_free_not_last(struct prison *pr);
static void prison_proc_relink(struct prison *opr, struct prison *npr,
@@ -167,6 +171,7 @@ static void prison_racct_attach(struct prison *pr);
static void prison_racct_modify(struct prison *pr);
static void prison_racct_detach(struct prison *pr);
#endif
+static void prison_knote(struct prison *pr, long hint);
/* Flags for prison_deref */
#define PD_DEREF 0x01 /* Decrement pr_ref */
@@ -238,6 +243,9 @@ static struct bool_flags pr_flag_allow[NBBY * NBPW] = {
{"allow.unprivileged_parent_tampering",
"allow.nounprivileged_parent_tampering",
PR_ALLOW_UNPRIV_PARENT_TAMPER},
+#ifdef AUDIT
+ {"allow.setaudit", "allow.nosetaudit", PR_ALLOW_SETAUDIT},
+#endif
};
static unsigned pr_allow_all = PR_ALLOW_ALL_STATIC;
const size_t pr_flag_allow_size = sizeof(pr_flag_allow);
@@ -985,6 +993,7 @@ prison_ip_cnt(const struct prison *pr, const pr_family_t af)
int
kern_jail_set(struct thread *td, struct uio *optuio, int flags)
{
+ struct file *jfp_out;
struct nameidata nd;
#ifdef INET
struct prison_ip *ip4;
@@ -995,6 +1004,7 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
struct vfsopt *opt;
struct vfsoptlist *opts;
struct prison *pr, *deadpr, *dinspr, *inspr, *mypr, *ppr, *tpr;
+ struct ucred *jdcred;
struct vnode *root;
char *domain, *errmsg, *host, *name, *namelc, *p, *path, *uuid;
char *g_path, *osrelstr;
@@ -1008,7 +1018,7 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
int created, cuflags, descend, drflags, enforce;
int error, errmsg_len, errmsg_pos;
int gotchildmax, gotenforce, gothid, gotrsnum, gotslevel;
- int deadid, jid, jsys, len, level;
+ int deadid, jfd_in, jfd_out, jfd_pos, jid, jsys, len, level;
int childmax, osreldt, rsnum, slevel;
#ifdef INET
int ip4s;
@@ -1018,22 +1028,32 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
int ip6s;
bool redo_ip6;
#endif
+ bool maybe_changed;
uint64_t pr_allow, ch_allow, pr_flags, ch_flags;
uint64_t pr_allow_diff;
unsigned tallow;
char numbuf[12];
- error = priv_check(td, PRIV_JAIL_SET);
- if (!error && (flags & JAIL_ATTACH))
- error = priv_check(td, PRIV_JAIL_ATTACH);
- if (error)
- return (error);
mypr = td->td_ucred->cr_prison;
- if ((flags & JAIL_CREATE) && mypr->pr_childmax == 0)
+ if (((flags & (JAIL_CREATE | JAIL_AT_DESC)) == JAIL_CREATE) &&
+ mypr->pr_childmax == 0)
return (EPERM);
if (flags & ~JAIL_SET_MASK)
return (EINVAL);
+ if ((flags & (JAIL_USE_DESC | JAIL_AT_DESC)) ==
+ (JAIL_USE_DESC | JAIL_AT_DESC))
+ return (EINVAL);
+ prison_hold(mypr);
+#ifdef INET
+ ip4 = NULL;
+#endif
+#ifdef INET6
+ ip6 = NULL;
+#endif
+ g_path = NULL;
+ jfp_out = NULL;
+ jfd_out = -1;
/*
* Check all the parameters before committing to anything. Not all
* errors can be caught early, but we may as well try. Also, this
@@ -1046,14 +1066,7 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
*/
error = vfs_buildopts(optuio, &opts);
if (error)
- return (error);
-#ifdef INET
- ip4 = NULL;
-#endif
-#ifdef INET6
- ip6 = NULL;
-#endif
- g_path = NULL;
+ goto done_free;
cuflags = flags & (JAIL_CREATE | JAIL_UPDATE);
if (!cuflags) {
@@ -1062,6 +1075,61 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
goto done_errmsg;
}
+ error = vfs_copyopt(opts, "desc", &jfd_in, sizeof(jfd_in));
+ if (error == ENOENT) {
+ if (flags & (JAIL_USE_DESC | JAIL_AT_DESC | JAIL_GET_DESC |
+ JAIL_OWN_DESC)) {
+ vfs_opterror(opts, "missing desc");
+ goto done_errmsg;
+ }
+ jfd_in = -1;
+ } else if (error != 0)
+ goto done_free;
+ else {
+ if (!(flags & (JAIL_USE_DESC | JAIL_AT_DESC | JAIL_GET_DESC |
+ JAIL_OWN_DESC))) {
+ vfs_opterror(opts, "unexpected desc");
+ goto done_errmsg;
+ }
+ if (flags & JAIL_AT_DESC) {
+ /*
+ * Look up and create jails based on the
+ * descriptor's prison.
+ */
+ prison_free(mypr);
+ error = jaildesc_find(td, jfd_in, &mypr, NULL);
+ if (error != 0) {
+ vfs_opterror(opts, error == ENOENT ?
+ "descriptor to dead jail" :
+ "not a jail descriptor");
+ goto done_errmsg;
+ }
+ if ((flags & JAIL_CREATE) && mypr->pr_childmax == 0) {
+ error = EPERM;
+ goto done_free;
+ }
+ }
+ if (flags & (JAIL_GET_DESC | JAIL_OWN_DESC)) {
+ /* Allocate a jail descriptor to return later. */
+ error = jaildesc_alloc(td, &jfp_out, &jfd_out,
+ flags & JAIL_OWN_DESC);
+ if (error)
+ goto done_free;
+ }
+ }
+
+ /*
+ * Delay the permission check if using a jail descriptor,
+ * until we get the descriptor's credentials.
+ */
+ if (!(flags & JAIL_USE_DESC)) {
+ error = priv_check(td, PRIV_JAIL_SET);
+ if (error == 0 && (flags & JAIL_ATTACH))
+ error = priv_check(td, PRIV_JAIL_ATTACH);
+ if (error)
+ goto done_free;
+ }
+
error = vfs_copyopt(opts, "jid", &jid, sizeof(jid));
if (error == ENOENT)
jid = 0;
@@ -1422,6 +1490,7 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
pr = NULL;
inspr = NULL;
deadpr = NULL;
+ maybe_changed = false;
if (cuflags == JAIL_CREATE && jid == 0 && name != NULL) {
namelc = strrchr(name, '.');
jid = strtoul(namelc != NULL ? namelc + 1 : name, &p, 10);
@@ -1436,7 +1505,45 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
error = EAGAIN;
goto done_deref;
}
- if (jid != 0) {
+ if (flags & JAIL_USE_DESC) {
+ /* Get the jail from its descriptor. */
+ error = jaildesc_find(td, jfd_in, &pr, &jdcred);
+ if (error) {
+ vfs_opterror(opts, error == ENOENT ?
+ "descriptor to dead jail" :
+ "not a jail descriptor");
+ goto done_deref;
+ }
+ drflags |= PD_DEREF;
+ error = priv_check_cred(jdcred, PRIV_JAIL_SET);
+ if (error == 0 && (flags & JAIL_ATTACH))
+ error = priv_check_cred(jdcred, PRIV_JAIL_ATTACH);
+ crfree(jdcred);
+ if (error)
+ goto done_deref;
+ mtx_lock(&pr->pr_mtx);
+ drflags |= PD_LOCKED;
+ if (cuflags == JAIL_CREATE) {
+ error = EEXIST;
+ vfs_opterror(opts, "jail %d already exists",
+ pr->pr_id);
+ goto done_deref;
+ }
+ if (!prison_isalive(pr)) {
+ /* While a jid can be resurrected, the prison
+ * itself cannot.
+ */
+ error = ENOENT;
+ vfs_opterror(opts, "jail %d is dying", pr->pr_id);
+ goto done_deref;
+ }
+ if (jid != 0 && jid != pr->pr_id) {
+ error = EINVAL;
+ vfs_opterror(opts, "cannot change jid");
+ goto done_deref;
+ }
+ jid = pr->pr_id;
+ } else if (jid != 0) {
if (jid < 0) {
error = EINVAL;
vfs_opterror(opts, "negative jid");
@@ -1570,7 +1677,7 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
}
}
}
- /* Update: must provide a jid or name. */
+ /* Update: must provide a desc, jid, or name. */
else if (cuflags == JAIL_UPDATE && pr == NULL) {
error = ENOENT;
vfs_opterror(opts, "update specified no jail");
@@ -1643,6 +1750,7 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
LIST_INSERT_HEAD(&ppr->pr_children, pr, pr_sibling);
for (tpr = ppr; tpr != NULL; tpr = tpr->pr_parent)
tpr->pr_childcount++;
+ pr->pr_klist = knlist_alloc(&pr->pr_mtx);
/* Set some default values, and inherit some from the parent. */
if (namelc == NULL)
@@ -1722,8 +1830,10 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
* Grab a reference for existing prisons, to ensure they
* continue to exist for the duration of the call.
*/
- prison_hold(pr);
- drflags |= PD_DEREF;
+ if (!(drflags & PD_DEREF)) {
+ prison_hold(pr);
+ drflags |= PD_DEREF;
+ }
#if defined(VIMAGE) && (defined(INET) || defined(INET6))
if ((pr->pr_flags & PR_VNET) &&
(ch_flags & (PR_IP4_USER | PR_IP6_USER))) {
@@ -1880,6 +1990,7 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
goto done_deref;
}
}
+ maybe_changed = true;
/* Set the parameters of the prison. */
#ifdef INET
@@ -2112,7 +2223,10 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
* reference via persistence, or is about to gain one via attachment.
*/
if (created) {
- drflags = prison_lock_xlock(pr, drflags);
+ sx_assert(&allprison_lock, SX_XLOCKED);
+ prison_knote(ppr, NOTE_JAIL_CHILD | pr->pr_id);
+ mtx_lock(&pr->pr_mtx);
+ drflags |= PD_LOCKED;
pr->pr_state = PRISON_STATE_ALIVE;
}
@@ -2146,10 +2260,37 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
printf("Warning jail jid=%d: mountd/nfsd requires a separate"
" file system\n", pr->pr_id);
+ /*
+ * Now that the prison is fully created without error, set the
+ * jail descriptor if one was requested. This is the only
+ * parameter that is returned to the caller (except the error
+ * message).
+ */
+ if (jfd_out >= 0) {
+ if (!(drflags & PD_LOCKED)) {
+ mtx_lock(&pr->pr_mtx);
+ drflags |= PD_LOCKED;
+ }
+ jfd_pos = 2 * vfs_getopt_pos(opts, "desc") + 1;
+ if (optuio->uio_segflg == UIO_SYSSPACE)
+ *(int*)optuio->uio_iov[jfd_pos].iov_base = jfd_out;
+ else
+ (void)copyout(&jfd_out,
+ optuio->uio_iov[jfd_pos].iov_base, sizeof(jfd_out));
+ jaildesc_set_prison(jfp_out, pr);
+ }
+
drflags &= ~PD_KILL;
td->td_retval[0] = pr->pr_id;
done_deref:
+ /*
+ * Report changes to kevent. This can happen even if the
+ * system call fails, as changes might have been made before
+ * the failure.
+ */
+ if (maybe_changed && !created)
+ prison_knote(pr, NOTE_JAIL_SET);
/* Release any temporary prison holds and/or locks. */
if (pr != NULL)
prison_deref(pr, drflags);
@@ -2176,15 +2317,21 @@ kern_jail_set(struct thread *td, struct uio *optuio, int flags)
}
}
done_free:
+ /* Clean up other resources. */
#ifdef INET
prison_ip_free(ip4);
#endif
#ifdef INET6
prison_ip_free(ip6);
#endif
+ if (jfp_out != NULL)
+ fdrop(jfp_out, td);
+ if (error && jfd_out >= 0)
+ (void)kern_close(td, jfd_out);
if (g_path != NULL)
free(g_path, M_TEMP);
vfs_freeopts(opts);
+ prison_free(mypr);
return (error);
}
@@ -2329,16 +2476,21 @@ int
kern_jail_get(struct thread *td, struct uio *optuio, int flags)
{
struct bool_flags *bf;
+ struct file *jfp_out;
struct jailsys_flags *jsf;
struct prison *pr, *mypr;
struct vfsopt *opt;
struct vfsoptlist *opts;
char *errmsg, *name;
int drflags, error, errmsg_len, errmsg_pos, i, jid, len, pos;
+ int jfd_in, jfd_out;
unsigned f;
if (flags & ~JAIL_GET_MASK)
return (EINVAL);
+ if ((flags & (JAIL_USE_DESC | JAIL_AT_DESC)) ==
+ (JAIL_USE_DESC | JAIL_AT_DESC))
+ return (EINVAL);
/* Get the parameter list. */
error = vfs_buildopts(optuio, &opts);
@@ -2346,13 +2498,70 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
return (error);
errmsg_pos = vfs_getopt_pos(opts, "errmsg");
mypr = td->td_ucred->cr_prison;
+ prison_hold(mypr);
pr = NULL;
+ jfp_out = NULL;
+ jfd_out = -1;
/*
- * Find the prison specified by one of: lastjid, jid, name.
+ * Find the prison specified by one of: desc, lastjid, jid, name.
*/
sx_slock(&allprison_lock);
drflags = PD_LIST_SLOCKED;
+
+ error = vfs_copyopt(opts, "desc", &jfd_in, sizeof(jfd_in));
+ if (error == ENOENT) {
+ if (flags & (JAIL_AT_DESC | JAIL_GET_DESC | JAIL_OWN_DESC)) {
+ vfs_opterror(opts, "missing desc");
+ goto done;
+ }
+ } else if (error == 0) {
+ if (!(flags & (JAIL_USE_DESC | JAIL_AT_DESC | JAIL_GET_DESC |
+ JAIL_OWN_DESC))) {
+ vfs_opterror(opts, "unexpected desc");
+ goto done;
+ }
+ if (flags & JAIL_USE_DESC) {
+ /* Get the jail from its descriptor. */
+ error = jaildesc_find(td, jfd_in, &pr, NULL);
+ if (error) {
+ vfs_opterror(opts, error == ENOENT ?
+ "descriptor to dead jail" :
+ "not a jail descriptor");
+ goto done;
+ }
+ drflags |= PD_DEREF;
+ mtx_lock(&pr->pr_mtx);
+ drflags |= PD_LOCKED;
+ if (!(prison_isalive(pr) || (flags & JAIL_DYING))) {
+ error = ENOENT;
+ vfs_opterror(opts, "jail %d is dying",
+ pr->pr_id);
+ goto done;
+ }
+ goto found_prison;
+ }
+ if (flags & JAIL_AT_DESC) {
+ /* Look up jails based on the descriptor's prison. */
+ prison_free(mypr);
+ error = jaildesc_find(td, jfd_in, &mypr, NULL);
+ if (error != 0) {
+ vfs_opterror(opts, error == ENOENT ?
+ "descriptor to dead jail" :
+ "not a jail descriptor");
+ goto done;
+ }
+ }
+ if (flags & (JAIL_GET_DESC | JAIL_OWN_DESC)) {
+ /* Allocate a jail descriptor to return later. */
+ error = jaildesc_alloc(td, &jfp_out, &jfd_out,
+ flags & JAIL_OWN_DESC);
+ if (error)
+ goto done;
+ }
+ } else
+ goto done;
+
error = vfs_copyopt(opts, "lastjid", &jid, sizeof(jid));
if (error == 0) {
TAILQ_FOREACH(pr, &allprison, pr_list) {
@@ -2421,9 +2630,17 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
found_prison:
/* Get the parameters of the prison. */
- prison_hold(pr);
- drflags |= PD_DEREF;
+ if (!(drflags & PD_DEREF)) {
+ prison_hold(pr);
+ drflags |= PD_DEREF;
+ }
td->td_retval[0] = pr->pr_id;
+ if (jfd_out >= 0) {
+ error = vfs_setopt(opts, "desc", &jfd_out, sizeof(jfd_out));
+ if (error != 0 && error != ENOENT)
+ goto done;
+ jaildesc_set_prison(jfp_out, pr);
+ }
error = vfs_setopt(opts, "jid", &pr->pr_id, sizeof(pr->pr_id));
if (error != 0 && error != ENOENT)
goto done;
@@ -2603,6 +2820,13 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
prison_deref(pr, drflags);
else if (drflags & PD_LIST_SLOCKED)
sx_sunlock(&allprison_lock);
+ else if (drflags & PD_LIST_XLOCKED)
+ sx_xunlock(&allprison_lock);
+ /* Clean up other resources. */
+ if (jfp_out != NULL)
+ (void)fdrop(jfp_out, td);
+ if (error && jfd_out >= 0)
+ (void)kern_close(td, jfd_out);
if (error && errmsg_pos >= 0) {
/* Write the error message back to userspace. */
vfs_getopt(opts, "errmsg", (void **)&errmsg, &errmsg_len);
@@ -2619,6 +2843,7 @@ kern_jail_get(struct thread *td, struct uio *optuio, int flags)
}
}
vfs_freeopts(opts);
+ prison_free(mypr);
return (error);
}
@@ -2643,14 +2868,54 @@ sys_jail_remove(struct thread *td, struct jail_remove_args *uap)
sx_xunlock(&allprison_lock);
return (EINVAL);
}
+ prison_hold(pr);
+ prison_remove(pr);
+ return (0);
+}
+
+/*
+ * struct jail_remove_jd_args {
+ * int fd;
+ * };
+ */
+int
+sys_jail_remove_jd(struct thread *td, struct jail_remove_jd_args *uap)
+{
+ struct prison *pr;
+ struct ucred *jdcred;
+ int error;
+
+ error = jaildesc_find(td, uap->fd, &pr, &jdcred);
+ if (error)
+ return (error);
+ error = priv_check_cred(jdcred, PRIV_JAIL_REMOVE);
+ crfree(jdcred);
+ if (error) {
+ prison_free(pr);
+ return (error);
+ }
+ sx_xlock(&allprison_lock);
+ mtx_lock(&pr->pr_mtx);
+ prison_remove(pr);
+ return (0);
+}
+
+/*
+ * Begin the removal process for a prison. The allprison lock should
+ * be held exclusively, and the prison should be both locked and held.
+ */
+void
+prison_remove(struct prison *pr)
+{
+ sx_assert(&allprison_lock, SA_XLOCKED);
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
if (!prison_isalive(pr)) {
/* Silently ignore already-dying prisons. */
mtx_unlock(&pr->pr_mtx);
sx_xunlock(&allprison_lock);
- return (0);
+ return;
}
- prison_deref(pr, PD_KILL | PD_LOCKED | PD_LIST_XLOCKED);
- return (0);
+ prison_deref(pr, PD_KILL | PD_DEREF | PD_LOCKED | PD_LIST_XLOCKED);
}
/*
@@ -2685,6 +2950,44 @@ sys_jail_attach(struct thread *td, struct jail_attach_args *uap)
return (do_jail_attach(td, pr, PD_LOCKED | PD_LIST_SLOCKED));
}
+/*
+ * struct jail_attach_jd_args {
+ * int fd;
+ * };
+ */
+int
+sys_jail_attach_jd(struct thread *td, struct jail_attach_jd_args *uap)
+{
+ struct prison *pr;
+ struct ucred *jdcred;
+ int drflags, error;
+
+ sx_slock(&allprison_lock);
+ drflags = PD_LIST_SLOCKED;
+ error = jaildesc_find(td, uap->fd, &pr, &jdcred);
+ if (error)
+ goto fail;
+ drflags |= PD_DEREF;
+ error = priv_check_cred(jdcred, PRIV_JAIL_ATTACH);
+ crfree(jdcred);
+ if (error)
+ goto fail;
+ mtx_lock(&pr->pr_mtx);
+ drflags |= PD_LOCKED;
+
+ /* Do not allow a process to attach to a prison that is not alive. */
+ if (!prison_isalive(pr)) {
+ error = EINVAL;
+ goto fail;
+ }
+
+ return (do_jail_attach(td, pr, drflags));
+
+ fail:
+ prison_deref(pr, drflags);
+ return (error);
+}
+
static int
do_jail_attach(struct thread *td, struct prison *pr, int drflags)
{
@@ -2703,9 +3006,12 @@ do_jail_attach(struct thread *td, struct prison *pr, int drflags)
* a process root from one prison, but attached to the jail
* of another.
*/
- prison_hold(pr);
+ if (!(drflags & PD_DEREF)) {
+ prison_hold(pr);
+ drflags |= PD_DEREF;
+ }
refcount_acquire(&pr->pr_uref);
- drflags |= PD_DEREF | PD_DEUREF;
+ drflags |= PD_DEUREF;
mtx_unlock(&pr->pr_mtx);
drflags &= ~PD_LOCKED;
@@ -2755,6 +3061,7 @@ do_jail_attach(struct thread *td, struct prison *pr, int drflags)
prison_proc_relink(oldcred->cr_prison, pr, p);
prison_deref(oldcred->cr_prison, drflags);
crfree(oldcred);
+ prison_knote(pr, NOTE_JAIL_ATTACH | td->td_proc->p_pid);
/*
* If the prison was killed while changing credentials, die along
@@ -3182,9 +3489,10 @@ prison_deref(struct prison *pr, int flags)
refcount_load(&prison0.pr_uref) > 0,
("prison0 pr_uref=0"));
pr->pr_state = PRISON_STATE_DYING;
+ prison_cleanup_locked(pr);
mtx_unlock(&pr->pr_mtx);
flags &= ~PD_LOCKED;
- prison_cleanup(pr);
+ prison_cleanup_unlocked(pr);
}
}
}
@@ -3327,8 +3635,9 @@ prison_deref_kill(struct prison *pr, struct prisonlist *freeprison)
}
if (!(cpr->pr_flags & PR_REMOVE))
continue;
- prison_cleanup(cpr);
+ prison_cleanup_unlocked(cpr);
mtx_lock(&cpr->pr_mtx);
+ prison_cleanup_locked(cpr);
cpr->pr_flags &= ~PR_REMOVE;
if (cpr->pr_flags & PR_PERSIST) {
cpr->pr_flags &= ~PR_PERSIST;
@@ -3363,8 +3672,9 @@ prison_deref_kill(struct prison *pr, struct prisonlist *freeprison)
if (rpr != NULL)
LIST_REMOVE(rpr, pr_sibling);
- prison_cleanup(pr);
+ prison_cleanup_unlocked(pr);
mtx_lock(&pr->pr_mtx);
+ prison_cleanup_locked(pr);
if (pr->pr_flags & PR_PERSIST) {
pr->pr_flags &= ~PR_PERSIST;
prison_proc_free_not_last(pr);
@@ -3411,10 +3721,22 @@ prison_lock_xlock(struct prison *pr, int flags)
/*
* Release a prison's resources when it starts dying (when the last user
- * reference is dropped, or when it is killed).
+ * reference is dropped, or when it is killed). Two functions are called,
+ * for work that requires a locked prison or an unlocked one.
*/
static void
-prison_cleanup(struct prison *pr)
+prison_cleanup_locked(struct prison *pr)
+{
+ sx_assert(&allprison_lock, SA_XLOCKED);
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+ prison_knote(pr, NOTE_JAIL_REMOVE);
+ knlist_detach(pr->pr_klist);
+ jaildesc_prison_cleanup(pr);
+ pr->pr_klist = NULL;
+}
+
+static void
+prison_cleanup_unlocked(struct prison *pr)
{
sx_assert(&allprison_lock, SA_XLOCKED);
mtx_assert(&pr->pr_mtx, MA_NOTOWNED);
@@ -3970,7 +4292,6 @@ prison_priv_check(struct ucred *cred, int priv)
*/
case PRIV_KTRACE:
-#if 0
/*
* Allow jailed processes to configure audit identity and
* submit audit records (login, etc). In the future we may
@@ -3979,6 +4300,11 @@ prison_priv_check(struct ucred *cred, int priv)
*/
case PRIV_AUDIT_GETAUDIT:
case PRIV_AUDIT_SETAUDIT:
+ if (cred->cr_prison->pr_allow & PR_ALLOW_SETAUDIT)
+ return (0);
+ else
+ return (EPERM);
+#if 0
case PRIV_AUDIT_SUBMIT:
#endif
@@ -4715,6 +5041,10 @@ SYSCTL_JAIL_PARAM(_allow, settime, CTLTYPE_INT | CTLFLAG_RW,
"B", "Jail may set system time");
SYSCTL_JAIL_PARAM(_allow, routing, CTLTYPE_INT | CTLFLAG_RW,
"B", "Jail may modify routing table");
+#ifdef AUDIT
+SYSCTL_JAIL_PARAM(_allow, setaudit, CTLTYPE_INT | CTLFLAG_RW,
+ "B", "Jail may set and get audit session state");
+#endif
SYSCTL_JAIL_PARAM_SUBNODE(allow, mount, "Jail mount/unmount permission flags");
SYSCTL_JAIL_PARAM(_allow_mount, , CTLTYPE_INT | CTLFLAG_RW,
@@ -5039,6 +5369,23 @@ prison_racct_detach(struct prison *pr)
}
#endif /* RACCT */
+/*
+ * Submit a knote for a prison, locking if necessary.
+ */
+static void
+prison_knote(struct prison *pr, long hint)
+{
+ int locked;
+
+ locked = mtx_owned(&pr->pr_mtx);
+ if (!locked)
+ mtx_lock(&pr->pr_mtx);
+ KNOTE_LOCKED(pr->pr_klist, hint);
+ jaildesc_knote(pr, hint);
+ if (!locked)
+ mtx_unlock(&pr->pr_mtx);
+}
+
#ifdef DDB
static void
diff --git a/sys/kern/kern_jaildesc.c b/sys/kern/kern_jaildesc.c
new file mode 100644
index 000000000000..3f322b271400
--- /dev/null
+++ b/sys/kern/kern_jaildesc.c
@@ -0,0 +1,412 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 James Gritton.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <sys/kernel.h>
+#include <sys/jail.h>
+#include <sys/jaildesc.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/poll.h>
+#include <sys/priv.h>
+#include <sys/stat.h>
+#include <sys/sysproto.h>
+#include <sys/systm.h>
+#include <sys/ucred.h>
+#include <sys/user.h>
+#include <sys/vnode.h>
+
+MALLOC_DEFINE(M_JAILDESC, "jaildesc", "jail descriptors");
+
+static fo_poll_t jaildesc_poll;
+static fo_kqfilter_t jaildesc_kqfilter;
+static fo_stat_t jaildesc_stat;
+static fo_close_t jaildesc_close;
+static fo_fill_kinfo_t jaildesc_fill_kinfo;
+static fo_cmp_t jaildesc_cmp;
+
+static struct fileops jaildesc_ops = {
+ .fo_read = invfo_rdwr,
+ .fo_write = invfo_rdwr,
+ .fo_truncate = invfo_truncate,
+ .fo_ioctl = invfo_ioctl,
+ .fo_poll = jaildesc_poll,
+ .fo_kqfilter = jaildesc_kqfilter,
+ .fo_stat = jaildesc_stat,
+ .fo_close = jaildesc_close,
+ .fo_chmod = invfo_chmod,
+ .fo_chown = invfo_chown,
+ .fo_sendfile = invfo_sendfile,
+ .fo_fill_kinfo = jaildesc_fill_kinfo,
+ .fo_cmp = jaildesc_cmp,
+ .fo_flags = DFLAG_PASSABLE,
+};
+
+/*
+ * Given a jail descriptor number, return its prison and/or its
+ * credential. They are returned held, and will need to be released
+ * by the caller.
+ */
+int
+jaildesc_find(struct thread *td, int fd, struct prison **prp,
+ struct ucred **ucredp)
+{
+ struct file *fp;
+ struct jaildesc *jd;
+ struct prison *pr;
+ int error;
+
+ error = fget(td, fd, &cap_no_rights, &fp);
+ if (error != 0)
+ return (error);
+ if (fp->f_type != DTYPE_JAILDESC) {
+ error = EINVAL;
+ goto out;
+ }
+ jd = fp->f_data;
+ JAILDESC_LOCK(jd);
+ pr = jd->jd_prison;
+ if (pr == NULL || !prison_isvalid(pr)) {
+ error = ENOENT;
+ JAILDESC_UNLOCK(jd);
+ goto out;
+ }
+ if (prp != NULL) {
+ prison_hold(pr);
+ *prp = pr;
+ }
+ JAILDESC_UNLOCK(jd);
+ if (ucredp != NULL)
+ *ucredp = crhold(fp->f_cred);
+ out:
+ fdrop(fp, td);
+ return (error);
+}
+
+/*
+ * Allocate a new jail decriptor, not yet associated with a prison.
+ * Return the file pointer (with a reference held) and the descriptor
+ * number.
+ */
+int
+jaildesc_alloc(struct thread *td, struct file **fpp, int *fdp, int owning)
+{
+ struct file *fp;
+ struct jaildesc *jd;
+ int error;
+
+ if (owning) {
+ error = priv_check(td, PRIV_JAIL_REMOVE);
+ if (error != 0)
+ return (error);
+ }
+ jd = malloc(sizeof(*jd), M_JAILDESC, M_WAITOK | M_ZERO);
+ error = falloc_caps(td, &fp, fdp, 0, NULL);
+ if (error != 0) {
+ free(jd, M_JAILDESC);
+ return (error);
+ }
+ finit(fp, priv_check_cred(fp->f_cred, PRIV_JAIL_SET) == 0 ?
+ FREAD | FWRITE : FREAD, DTYPE_JAILDESC, jd, &jaildesc_ops);
+ JAILDESC_LOCK_INIT(jd);
+ knlist_init_mtx(&jd->jd_selinfo.si_note, &jd->jd_lock);
+ if (owning)
+ jd->jd_flags |= JDF_OWNING;
+ *fpp = fp;
+ return (0);
+}
+
+/*
+ * Assocate a jail descriptor with its prison.
+ */
+void
+jaildesc_set_prison(struct file *fp, struct prison *pr)
+{
+ struct jaildesc *jd;
+
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+ jd = fp->f_data;
+ JAILDESC_LOCK(jd);
+ jd->jd_prison = pr;
+ LIST_INSERT_HEAD(&pr->pr_descs, jd, jd_list);
+ prison_hold(pr);
+ JAILDESC_UNLOCK(jd);
+}
+
+/*
+ * Detach all the jail descriptors from a prison.
+ */
+void
+jaildesc_prison_cleanup(struct prison *pr)
+{
+ struct jaildesc *jd;
+
+ mtx_assert(&pr->pr_mtx, MA_OWNED);
+ while ((jd = LIST_FIRST(&pr->pr_descs))) {
+ JAILDESC_LOCK(jd);
+ LIST_REMOVE(jd, jd_list);
+ jd->jd_prison = NULL;
+ JAILDESC_UNLOCK(jd);
+ prison_free(pr);
+ }
+}
+
+/*
+ * Pass a note to all listening kqueues.
+ */
+void
+jaildesc_knote(struct prison *pr, long hint)
+{
+ struct jaildesc *jd;
+ int prison_locked;
+
+ if (!LIST_EMPTY(&pr->pr_descs)) {
+ prison_locked = mtx_owned(&pr->pr_mtx);
+ if (!prison_locked)
+ prison_lock(pr);
+ LIST_FOREACH(jd, &pr->pr_descs, jd_list) {
+ JAILDESC_LOCK(jd);
+ if (hint == NOTE_JAIL_REMOVE) {
+ jd->jd_flags |= JDF_REMOVED;
+ if (jd->jd_flags & JDF_SELECTED) {
+ jd->jd_flags &= ~JDF_SELECTED;
+ selwakeup(&jd->jd_selinfo);
+ }
+ }
+ KNOTE_LOCKED(&jd->jd_selinfo.si_note, hint);
+ JAILDESC_UNLOCK(jd);
+ }
+ if (!prison_locked)
+ prison_unlock(pr);
+ }
+}
+
+static int
+jaildesc_close(struct file *fp, struct thread *td)
+{
+ struct jaildesc *jd;
+ struct prison *pr;
+
+ jd = fp->f_data;
+ fp->f_data = NULL;
+ if (jd != NULL) {
+ JAILDESC_LOCK(jd);
+ pr = jd->jd_prison;
+ if (pr != NULL) {
+ /*
+ * Free or remove the associated prison.
+ * This requires a second check after re-
+ * ordering locks. This jaildesc can remain
+ * unlocked once we have a prison reference,
+ * because that prison is the only place that
+ * still points back to it.
+ */
+ prison_hold(pr);
+ JAILDESC_UNLOCK(jd);
+ if (jd->jd_flags & JDF_OWNING) {
+ sx_xlock(&allprison_lock);
+ prison_lock(pr);
+ if (jd->jd_prison != NULL) {
+ /*
+ * Unlink the prison, but don't free
+ * it; that will be done as part of
+ * of prison_remove.
+ */
+ LIST_REMOVE(jd, jd_list);
+ prison_remove(pr);
+ } else {
+ prison_unlock(pr);
+ sx_xunlock(&allprison_lock);
+ }
+ } else {
+ prison_lock(pr);
+ if (jd->jd_prison != NULL) {
+ LIST_REMOVE(jd, jd_list);
+ prison_free(pr);
+ }
+ prison_unlock(pr);
+ }
+ prison_free(pr);
+ }
+ knlist_destroy(&jd->jd_selinfo.si_note);
+ JAILDESC_LOCK_DESTROY(jd);
+ free(jd, M_JAILDESC);
+ }
+ return (0);
+}
+
+static int
+jaildesc_poll(struct file *fp, int events, struct ucred *active_cred,
+ struct thread *td)
+{
+ struct jaildesc *jd;
+ int revents;
+
+ revents = 0;
+ jd = fp->f_data;
+ JAILDESC_LOCK(jd);
+ if (jd->jd_flags & JDF_REMOVED)
+ revents |= POLLHUP;
+ if (revents == 0) {
+ selrecord(td, &jd->jd_selinfo);
+ jd->jd_flags |= JDF_SELECTED;
+ }
+ JAILDESC_UNLOCK(jd);
+ return (revents);
+}
+
+static void
+jaildesc_kqops_detach(struct knote *kn)
+{
+ struct jaildesc *jd;
+
+ jd = kn->kn_fp->f_data;
+ knlist_remove(&jd->jd_selinfo.si_note, kn, 0);
+}
+
+static int
+jaildesc_kqops_event(struct knote *kn, long hint)
+{
+ struct jaildesc *jd;
+ u_int event;
+
+ jd = kn->kn_fp->f_data;
+ if (hint == 0) {
+ /*
+ * Initial test after registration. Generate a
+ * NOTE_JAIL_REMOVE in case the prison already died
+ * before registration.
+ */
+ event = jd->jd_flags & JDF_REMOVED ? NOTE_JAIL_REMOVE : 0;
+ } else {
+ /*
+ * Mask off extra data. In the NOTE_JAIL_CHILD case,
+ * that's everything except the NOTE_JAIL_CHILD bit
+ * itself, since a JID is any positive integer.
+ */
+ event = ((u_int)hint & NOTE_JAIL_CHILD) ? NOTE_JAIL_CHILD :
+ (u_int)hint & NOTE_JAIL_CTRLMASK;
+ }
+
+ /* If the user is interested in this event, record it. */
+ if (kn->kn_sfflags & event) {
+ kn->kn_fflags |= event;
+ /* Report the created jail id or attached process id. */
+ if (event == NOTE_JAIL_CHILD || event == NOTE_JAIL_ATTACH) {
+ if (kn->kn_data != 0)
+ kn->kn_fflags |= NOTE_JAIL_MULTI;
+ kn->kn_data = (kn->kn_fflags & NOTE_JAIL_MULTI) ? 0U :
+ (u_int)hint & ~event;
+ }
+ }
+
+ /* Prison is gone, so flag the event as finished. */
+ if (event == NOTE_JAIL_REMOVE) {
+ kn->kn_flags |= EV_EOF | EV_ONESHOT;
+ if (kn->kn_fflags == 0)
+ kn->kn_flags |= EV_DROP;
+ return (1);
+ }
+
+ return (kn->kn_fflags != 0);
+}
+
+static const struct filterops jaildesc_kqops = {
+ .f_isfd = 1,
+ .f_detach = jaildesc_kqops_detach,
+ .f_event = jaildesc_kqops_event,
+};
+
+static int
+jaildesc_kqfilter(struct file *fp, struct knote *kn)
+{
+ struct jaildesc *jd;
+
+ jd = fp->f_data;
+ switch (kn->kn_filter) {
+ case EVFILT_JAILDESC:
+ kn->kn_fop = &jaildesc_kqops;
+ kn->kn_flags |= EV_CLEAR;
+ knlist_add(&jd->jd_selinfo.si_note, kn, 0);
+ return (0);
+ default:
+ return (EINVAL);
+ }
+}
+
+static int
+jaildesc_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
+{
+ struct jaildesc *jd;
+
+ bzero(sb, sizeof(struct stat));
+ jd = fp->f_data;
+ JAILDESC_LOCK(jd);
+ if (jd->jd_prison != NULL) {
+ sb->st_ino = jd->jd_prison->pr_id;
+ sb->st_mode = S_IFREG | S_IRWXU;
+ } else
+ sb->st_mode = S_IFREG;
+ JAILDESC_UNLOCK(jd);
+ return (0);
+}
+
+static int
+jaildesc_fill_kinfo(struct file *fp, struct kinfo_file *kif,
+ struct filedesc *fdp)
+{
+ struct jaildesc *jd;
+
+ jd = fp->f_data;
+ kif->kf_type = KF_TYPE_JAILDESC;
+ kif->kf_un.kf_jail.kf_jid = jd->jd_prison ? jd->jd_prison->pr_id : 0;
+ return (0);
+}
+
+static int
+jaildesc_cmp(struct file *fp1, struct file *fp2, struct thread *td)
+{
+ struct jaildesc *jd1, *jd2;
+ int jid1, jid2;
+
+ if (fp2->f_type != DTYPE_JAILDESC)
+ return (3);
+ jd1 = fp1->f_data;
+ JAILDESC_LOCK(jd1);
+ jid1 = jd1->jd_prison ? (uintptr_t)jd1->jd_prison->pr_id : 0;
+ JAILDESC_UNLOCK(jd1);
+ jd2 = fp2->f_data;
+ JAILDESC_LOCK(jd2);
+ jid2 = jd2->jd_prison ? (uintptr_t)jd2->jd_prison->pr_id : 0;
+ JAILDESC_UNLOCK(jd2);
+ return (kcmp_cmp(jid1, jid2));
+}
diff --git a/sys/kern/kern_jailmeta.c b/sys/kern/kern_jailmeta.c
index 4e37eccad03a..91bb7155820d 100644
--- a/sys/kern/kern_jailmeta.c
+++ b/sys/kern/kern_jailmeta.c
@@ -599,22 +599,18 @@ SYSCTL_PROC(_security_jail, OID_AUTO, env,
/* Setup and tear down. */
-static int
+static void
jm_sysinit(void *arg __unused)
{
meta.osd_slot = osd_jail_register(jm_osd_destructor, meta.methods);
env.osd_slot = osd_jail_register(jm_osd_destructor, env.methods);
-
- return (0);
}
-static int
+static void
jm_sysuninit(void *arg __unused)
{
osd_jail_deregister(meta.osd_slot);
osd_jail_deregister(env.osd_slot);
-
- return (0);
}
SYSINIT(jailmeta, SI_SUB_DRIVERS, SI_ORDER_ANY, jm_sysinit, NULL);
diff --git a/sys/kern/kern_linker.c b/sys/kern/kern_linker.c
index d566bc01bc5e..e2f63cbc0c5a 100644
--- a/sys/kern/kern_linker.c
+++ b/sys/kern/kern_linker.c
@@ -435,7 +435,7 @@ linker_file_register_modules(linker_file_t lf)
}
static void
-linker_init_kernel_modules(void)
+linker_init_kernel_modules(void *dummy __unused)
{
sx_xlock(&kld_sx);
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 31bff6d2c1aa..76f68677e292 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1780,9 +1780,11 @@ lockmgr_chain(struct thread *td, struct thread **ownerp)
lk = td->td_wchan;
- if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
+ if (!TD_ON_SLEEPQ(td) || sleepq_type(td->td_wchan) != SLEEPQ_LK ||
+ LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
return (0);
- db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
+ db_printf("blocked on lock %p (%s) \"%s\" ", &lk->lock_object,
+ lock_class_lockmgr.lc_name, lk->lock_object.lo_name);
if (lk->lk_lock & LK_SHARE)
db_printf("SHARED (count %ju)\n",
(uintmax_t)LK_SHARERS(lk->lk_lock));
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 879220be050b..fcbfbe64f854 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -303,7 +303,7 @@ sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
*/
#if MALLOC_DEBUG_MAXZONES > 1
static void
-tunable_set_numzones(void)
+tunable_set_numzones(void *dummy __unused)
{
TUNABLE_INT_FETCH("debug.malloc.numzones",
@@ -751,11 +751,14 @@ malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
return (malloc_large(size, mtp, DOMAINSET_RR(), flags
DEBUG_REDZONE_ARG));
- vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
- do {
- va = malloc_domain(&size, &indx, mtp, domain, flags);
- } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
+ indx = -1;
+ va = NULL;
+ if (vm_domainset_iter_policy_init(&di, ds, &domain, &flags) == 0)
+ do {
+ va = malloc_domain(&size, &indx, mtp, domain, flags);
+ } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
+
if (__predict_false(va == NULL)) {
KASSERT((flags & M_WAITOK) == 0,
("malloc(M_WAITOK) returned NULL"));
@@ -1299,7 +1302,7 @@ mallocinit(void *dummy)
#endif
align, UMA_ZONE_MALLOC);
}
- for (;i <= size; i+= KMEM_ZBASE)
+ for (; i <= size; i+= KMEM_ZBASE)
kmemsize[i >> KMEM_ZSHIFT] = indx;
}
}
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index f952b3fc8805..d67c70984528 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -503,8 +503,8 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
/*
* __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
*
- * We call this if the lock is either contested (i.e. we need to go to
- * sleep waiting for it), or if we need to recurse on it.
+ * We get here if lock profiling is enabled, the lock is already held by
+ * someone else or we are recursing on it.
*/
#if LOCK_DEBUG > 0
void
@@ -660,13 +660,8 @@ retry_turnstile:
}
#endif
- /*
- * If the mutex isn't already contested and a failure occurs
- * setting the contested bit, the mutex was either released
- * or the state of the MTX_RECURSED bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
+ if ((v & MTX_WAITERS) == 0 &&
+ !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_WAITERS)) {
goto retry_turnstile;
}
@@ -869,7 +864,7 @@ _thread_lock(struct thread *td)
WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
return;
}
- _mtx_release_lock_quick(m);
+ atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED);
slowpath_unlocked:
spinlock_exit();
slowpath_noirq:
@@ -959,7 +954,7 @@ retry:
}
if (m == td->td_lock)
break;
- _mtx_release_lock_quick(m);
+ atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED);
}
LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
@@ -1029,8 +1024,8 @@ thread_lock_set(struct thread *td, struct mtx *new)
/*
* __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
*
- * We are only called here if the lock is recursed, contested (i.e. we
- * need to wake up a blocked thread) or lockstat probe is active.
+ * We get here if lock profiling is enabled, the lock is already held by
+ * someone else or we are recursing on it.
*/
#if LOCK_DEBUG > 0
void
@@ -1071,7 +1066,7 @@ __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
* can be removed from the hash list if it is empty.
*/
turnstile_chain_lock(&m->lock_object);
- _mtx_release_lock_quick(m);
+ atomic_store_rel_ptr(&m->mtx_lock, MTX_UNOWNED);
ts = turnstile_lookup(&m->lock_object);
MPASS(ts != NULL);
if (LOCK_LOG_TEST(&m->lock_object, opts))
@@ -1136,9 +1131,9 @@ __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
* General init routine used by the MTX_SYSINIT() macro.
*/
void
-mtx_sysinit(void *arg)
+mtx_sysinit(const void *arg)
{
- struct mtx_args *margs = arg;
+ const struct mtx_args *margs = arg;
mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
margs->ma_opts);
@@ -1207,7 +1202,7 @@ _mtx_destroy(volatile uintptr_t *c)
if (!mtx_owned(m))
MPASS(mtx_unowned(m));
else {
- MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
+ MPASS((m->mtx_lock & (MTX_RECURSED|MTX_WAITERS)) == 0);
/* Perform the non-mtx related part of mtx_unlock_spin(). */
if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
@@ -1359,8 +1354,8 @@ db_show_mtx(const struct lock_object *lock)
db_printf("DESTROYED");
else {
db_printf("OWNED");
- if (m->mtx_lock & MTX_CONTESTED)
- db_printf(", CONTESTED");
+ if (m->mtx_lock & MTX_WAITERS)
+ db_printf(", WAITERS");
if (m->mtx_lock & MTX_RECURSED)
db_printf(", RECURSED");
}
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 379fbda619c0..6e56664d12ce 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -1112,13 +1112,14 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
if (cred->cr_flags & CRED_FLAG_CAPMODE)
kp->ki_cr_flags |= KI_CRF_CAPABILITY_MODE;
/* XXX bde doesn't like KI_NGROUPS */
- if (cred->cr_ngroups > KI_NGROUPS) {
+ if (1 + cred->cr_ngroups > KI_NGROUPS) {
kp->ki_ngroups = KI_NGROUPS;
kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
} else
- kp->ki_ngroups = cred->cr_ngroups;
- bcopy(cred->cr_groups, kp->ki_groups,
- kp->ki_ngroups * sizeof(gid_t));
+ kp->ki_ngroups = 1 + cred->cr_ngroups;
+ kp->ki_groups[0] = cred->cr_gid;
+ bcopy(cred->cr_groups, kp->ki_groups + 1,
+ (kp->ki_ngroups - 1) * sizeof(gid_t));
kp->ki_rgid = cred->cr_rgid;
kp->ki_svgid = cred->cr_svgid;
/* If jailed(cred), emulate the old P_JAILED flag. */
@@ -2943,8 +2944,11 @@ sysctl_kern_proc_groups(SYSCTL_HANDLER_ARGS)
cred = crhold(p->p_ucred);
PROC_UNLOCK(p);
- error = SYSCTL_OUT(req, cred->cr_groups,
- cred->cr_ngroups * sizeof(gid_t));
+ error = SYSCTL_OUT(req, &cred->cr_gid, sizeof(gid_t));
+ if (error == 0)
+ error = SYSCTL_OUT(req, cred->cr_groups,
+ cred->cr_ngroups * sizeof(gid_t));
+
crfree(cred);
return (error);
}
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index 2cd5b7069023..a4c5bcc52529 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -291,11 +291,6 @@ sys_getgid(struct thread *td, struct getgid_args *uap)
return (0);
}
-/*
- * Get effective group ID. The "egid" is groups[0], and could be obtained
- * via getgroups. This syscall exists because it is somewhat painful to do
- * correctly in a library function.
- */
#ifndef _SYS_SYSPROTO_H_
struct getegid_args {
int dummy;
@@ -310,6 +305,39 @@ sys_getegid(struct thread *td, struct getegid_args *uap)
return (0);
}
+#ifdef COMPAT_FREEBSD14
+int
+freebsd14_getgroups(struct thread *td, struct freebsd14_getgroups_args *uap)
+{
+ struct ucred *cred;
+ int ngrp, error;
+
+ cred = td->td_ucred;
+
+ /*
+ * For FreeBSD < 15.0, we account for the egid being placed at the
+ * beginning of the group list prior to all supplementary groups.
+ */
+ ngrp = cred->cr_ngroups + 1;
+ if (uap->gidsetsize == 0) {
+ error = 0;
+ goto out;
+ } else if (uap->gidsetsize < ngrp) {
+ return (EINVAL);
+ }
+
+ error = copyout(&cred->cr_gid, uap->gidset, sizeof(gid_t));
+ if (error == 0)
+ error = copyout(cred->cr_groups, uap->gidset + 1,
+ (ngrp - 1) * sizeof(gid_t));
+
+out:
+ td->td_retval[0] = ngrp;
+ return (error);
+
+}
+#endif /* COMPAT_FREEBSD14 */
+
#ifndef _SYS_SYSPROTO_H_
struct getgroups_args {
int gidsetsize;
@@ -320,18 +348,11 @@ int
sys_getgroups(struct thread *td, struct getgroups_args *uap)
{
struct ucred *cred;
- gid_t *ugidset;
int ngrp, error;
cred = td->td_ucred;
- /*
- * cr_gid has been moved out of cr_groups, but we'll continue exporting
- * the egid as groups[0] for the time being until we audit userland for
- * any surprises.
- */
- ngrp = cred->cr_ngroups + 1;
-
+ ngrp = cred->cr_ngroups;
if (uap->gidsetsize == 0) {
error = 0;
goto out;
@@ -339,14 +360,7 @@ sys_getgroups(struct thread *td, struct getgroups_args *uap)
if (uap->gidsetsize < ngrp)
return (EINVAL);
- ugidset = uap->gidset;
- error = copyout(&cred->cr_gid, ugidset, sizeof(*ugidset));
- if (error != 0)
- goto out;
-
- if (ngrp > 1)
- error = copyout(cred->cr_groups, ugidset + 1,
- (ngrp - 1) * sizeof(*ugidset));
+ error = copyout(cred->cr_groups, uap->gidset, ngrp * sizeof(gid_t));
out:
td->td_retval[0] = ngrp;
return (error);
@@ -1186,6 +1200,44 @@ fail:
return (error);
}
+#ifdef COMPAT_FREEBSD14
+int
+freebsd14_setgroups(struct thread *td, struct freebsd14_setgroups_args *uap)
+{
+ gid_t smallgroups[CRED_SMALLGROUPS_NB];
+ gid_t *groups;
+ int gidsetsize, error;
+
+ /*
+ * Before FreeBSD 15.0, we allow one more group to be supplied to
+ * account for the egid appearing before the supplementary groups. This
+ * may technically allow one more supplementary group for systems that
+ * did use the default NGROUPS_MAX if we round it back up to 1024.
+ */
+ gidsetsize = uap->gidsetsize;
+ if (gidsetsize > ngroups_max + 1 || gidsetsize < 0)
+ return (EINVAL);
+
+ if (gidsetsize > CRED_SMALLGROUPS_NB)
+ groups = malloc(gidsetsize * sizeof(gid_t), M_TEMP, M_WAITOK);
+ else
+ groups = smallgroups;
+
+ error = copyin(uap->gidset, groups, gidsetsize * sizeof(gid_t));
+ if (error == 0) {
+ int ngroups = gidsetsize > 0 ? gidsetsize - 1 /* egid */ : 0;
+
+ error = kern_setgroups(td, &ngroups, groups + 1);
+ if (error == 0 && gidsetsize > 0)
+ td->td_proc->p_ucred->cr_gid = groups[0];
+ }
+
+ if (groups != smallgroups)
+ free(groups, M_TEMP);
+ return (error);
+}
+#endif /* COMPAT_FREEBSD14 */
+
#ifndef _SYS_SYSPROTO_H_
struct setgroups_args {
int gidsetsize;
@@ -1210,8 +1262,7 @@ sys_setgroups(struct thread *td, struct setgroups_args *uap)
* setgroups() differ.
*/
gidsetsize = uap->gidsetsize;
- /* XXXKE Limit to ngroups_max when we change the userland interface. */
- if (gidsetsize > ngroups_max + 1 || gidsetsize < 0)
+ if (gidsetsize > ngroups_max || gidsetsize < 0)
return (EINVAL);
if (gidsetsize > CRED_SMALLGROUPS_NB)
@@ -1238,35 +1289,17 @@ kern_setgroups(struct thread *td, int *ngrpp, gid_t *groups)
struct proc *p = td->td_proc;
struct ucred *newcred, *oldcred;
int ngrp, error;
- gid_t egid;
ngrp = *ngrpp;
/* Sanity check size. */
- /* XXXKE Limit to ngroups_max when we change the userland interface. */
- if (ngrp < 0 || ngrp > ngroups_max + 1)
+ if (ngrp < 0 || ngrp > ngroups_max)
return (EINVAL);
AUDIT_ARG_GROUPSET(groups, ngrp);
- /*
- * setgroups(0, NULL) is a legitimate way of clearing the groups vector
- * on non-BSD systems (which generally do not have the egid in the
- * groups[0]). We risk security holes when running non-BSD software if
- * we do not do the same. So we allow and treat 0 for 'ngrp' specially
- * below (twice).
- */
- if (ngrp != 0) {
- /*
- * To maintain userland compat for now, we use the first group
- * as our egid and we'll use the rest as our supplemental
- * groups.
- */
- egid = groups[0];
- ngrp--;
- groups++;
- groups_normalize(&ngrp, groups);
- *ngrpp = ngrp;
- }
+ groups_normalize(&ngrp, groups);
+ *ngrpp = ngrp;
+
newcred = crget();
crextend(newcred, ngrp);
PROC_LOCK(p);
@@ -1289,15 +1322,7 @@ kern_setgroups(struct thread *td, int *ngrpp, gid_t *groups)
if (error)
goto fail;
- /*
- * If some groups were passed, the first one is currently the desired
- * egid. This code is to be removed (along with some commented block
- * above) when setgroups() is changed to take only supplementary groups.
- */
- if (ngrp != 0)
- newcred->cr_gid = egid;
crsetgroups_internal(newcred, ngrp, groups);
-
setsugid(p);
proc_set_cred(p, newcred);
PROC_UNLOCK(p);
@@ -1773,12 +1798,6 @@ groupmember(gid_t gid, const struct ucred *cred)
bool
realgroupmember(gid_t gid, const struct ucred *cred)
{
- /*
- * Although the equality test on 'cr_rgid' below doesn't access
- * 'cr_groups', we check for the latter's length here as we assume that,
- * if 'cr_ngroups' is 0, the passed 'struct ucred' is invalid, and
- * 'cr_rgid' may not have been filled.
- */
groups_check_positive_len(cred->cr_ngroups);
if (gid == cred->cr_rgid)
@@ -1866,19 +1885,22 @@ SYSCTL_INT(_security_bsd, OID_AUTO, see_other_gids, CTLFLAG_RW,
static int
cr_canseeothergids(struct ucred *u1, struct ucred *u2)
{
- if (!see_other_gids) {
- if (realgroupmember(u1->cr_rgid, u2))
- return (0);
+ if (see_other_gids)
+ return (0);
- for (int i = 1; i < u1->cr_ngroups; i++)
- if (realgroupmember(u1->cr_groups[i], u2))
- return (0);
+ /* Restriction in force. */
- if (priv_check_cred(u1, PRIV_SEEOTHERGIDS) != 0)
- return (ESRCH);
- }
+ if (realgroupmember(u1->cr_rgid, u2))
+ return (0);
- return (0);
+ for (int i = 0; i < u1->cr_ngroups; i++)
+ if (realgroupmember(u1->cr_groups[i], u2))
+ return (0);
+
+ if (priv_check_cred(u1, PRIV_SEEOTHERGIDS) == 0)
+ return (0);
+
+ return (ESRCH);
}
/*
@@ -2246,6 +2268,7 @@ cr_xids_subset(struct ucred *active_cred, struct ucred *obj_cred)
}
}
grpsubset = grpsubset &&
+ groupmember(obj_cred->cr_gid, active_cred) &&
groupmember(obj_cred->cr_rgid, active_cred) &&
groupmember(obj_cred->cr_svgid, active_cred);
@@ -2891,7 +2914,8 @@ crextend(struct ucred *cr, int n)
* Normalizes a set of groups to be applied to a 'struct ucred'.
*
* Normalization ensures that the supplementary groups are sorted in ascending
- * order and do not contain duplicates.
+ * order and do not contain duplicates. This allows group_is_supplementary() to
+ * do a binary search.
*/
static void
groups_normalize(int *ngrp, gid_t *groups)
@@ -2954,9 +2978,9 @@ crsetgroups_internal(struct ucred *cr, int ngrp, const gid_t *groups)
* Copy groups in to a credential after expanding it if required.
*
* May sleep in order to allocate memory (except if, e.g., crextend() was called
- * before with 'ngrp' or greater). Truncates the list to ngroups_max if
+ * before with 'ngrp' or greater). Truncates the list to 'ngroups_max' if
* it is too large. Array 'groups' doesn't need to be sorted. 'ngrp' must be
- * strictly positive.
+ * positive.
*/
void
crsetgroups(struct ucred *cr, int ngrp, const gid_t *groups)
@@ -2987,8 +3011,8 @@ crsetgroups(struct ucred *cr, int ngrp, const gid_t *groups)
* Same as crsetgroups() but sets the effective GID as well.
*
* This function ensures that an effective GID is always present in credentials.
- * An empty array will only set the effective GID to the default_egid, while a
- * non-empty array will peel off groups[0] to set as the effective GID and use
+ * An empty array will only set the effective GID to 'default_egid', while
+ * a non-empty array will peel off groups[0] to set as the effective GID and use
* the remainder, if any, as supplementary groups.
*/
void
diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c
index 7351e9cb6313..2aab151aba08 100644
--- a/sys/kern/kern_racct.c
+++ b/sys/kern/kern_racct.c
@@ -1312,7 +1312,7 @@ static struct kproc_desc racctd_kp = {
};
static void
-racctd_init(void)
+racctd_init(void *dummy __unused)
{
if (!racct_enable)
return;
@@ -1322,7 +1322,7 @@ racctd_init(void)
SYSINIT(racctd, SI_SUB_RACCTD, SI_ORDER_FIRST, racctd_init, NULL);
static void
-racct_init(void)
+racct_init(void *dummy __unused)
{
if (!racct_enable)
return;
diff --git a/sys/kern/kern_rangelock.c b/sys/kern/kern_rangelock.c
index 3854ffbeec29..cd66bff62608 100644
--- a/sys/kern/kern_rangelock.c
+++ b/sys/kern/kern_rangelock.c
@@ -300,7 +300,7 @@ static void rangelock_free_free(struct rl_q_entry *free);
static void rangelock_noncheating_destroy(struct rangelock *lock);
static void
-rangelock_sys_init(void)
+rangelock_sys_init(void *dummy __unused)
{
rl_entry_zone = uma_zcreate("rl_entry", sizeof(struct rl_q_entry),
NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct rl_q_entry),
diff --git a/sys/kern/kern_rctl.c b/sys/kern/kern_rctl.c
index 4232c71f86fb..682ba86d23ff 100644
--- a/sys/kern/kern_rctl.c
+++ b/sys/kern/kern_rctl.c
@@ -209,7 +209,7 @@ static struct dict actionnames[] = {
{ "throttle", RCTL_ACTION_THROTTLE },
{ NULL, -1 }};
-static void rctl_init(void);
+static void rctl_init(void *);
SYSINIT(rctl, SI_SUB_RACCT, SI_ORDER_FIRST, rctl_init, NULL);
static uma_zone_t rctl_rule_zone;
@@ -2175,7 +2175,7 @@ rctl_racct_release(struct racct *racct)
}
static void
-rctl_init(void)
+rctl_init(void *dummy __unused)
{
if (!racct_enable)
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index c1633dd19de2..7206572ffc02 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -337,9 +337,9 @@ rm_wowned(const struct rmlock *rm)
}
void
-rm_sysinit(void *arg)
+rm_sysinit(const void *arg)
{
- struct rm_args *args;
+ const struct rm_args *args;
args = arg;
rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index e182d1fe9baf..84a3a890be63 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -266,9 +266,9 @@ _rw_destroy(volatile uintptr_t *c)
}
void
-rw_sysinit(void *arg)
+rw_sysinit(const void *arg)
{
- struct rw_args *args;
+ const struct rw_args *args;
args = arg;
rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
diff --git a/sys/kern/kern_sharedpage.c b/sys/kern/kern_sharedpage.c
index 5b8398caaca9..f48d0e3d616b 100644
--- a/sys/kern/kern_sharedpage.c
+++ b/sys/kern/kern_sharedpage.c
@@ -130,8 +130,7 @@ shared_page_init(void *dummy __unused)
shared_page_mapping = (char *)addr;
}
-SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init,
- NULL);
+SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, shared_page_init, NULL);
/*
* Push the timehands update to the shared page.
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index da0efac0598d..21f765b17f62 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -113,7 +113,7 @@ static int filt_sigattach(struct knote *kn);
static void filt_sigdetach(struct knote *kn);
static int filt_signal(struct knote *kn, long hint);
static struct thread *sigtd(struct proc *p, int sig, bool fast_sigblock);
-static void sigqueue_start(void);
+static void sigqueue_start(void *);
static void sigfastblock_setpend(struct thread *td, bool resched);
static void sig_handle_first_stop(struct thread *td, struct proc *p,
int sig);
@@ -344,7 +344,7 @@ ast_sigsuspend(struct thread *td, int tda __unused)
}
static void
-sigqueue_start(void)
+sigqueue_start(void *dummy __unused)
{
ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
@@ -2656,9 +2656,11 @@ ptrace_coredumpreq(struct thread *td, struct proc *p,
return;
}
+ memset(&wctx, 0, sizeof(wctx));
wctx.vp = tcq->tc_vp;
wctx.fcred = NOCRED;
+ memset(&cdw, 0, sizeof(wctx));
cdw.ctx = &wctx;
cdw.write_fn = core_vn_write;
cdw.extend_fn = core_vn_extend;
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index accea5d288eb..249faf5b1ec4 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -222,9 +222,9 @@ owner_sx(const struct lock_object *lock, struct thread **owner)
#endif
void
-sx_sysinit(void *arg)
+sx_sysinit(const void *arg)
{
- struct sx_args *sargs = arg;
+ const struct sx_args *sargs = arg;
sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
}
@@ -1539,16 +1539,19 @@ sx_chain(struct thread *td, struct thread **ownerp)
/*
* Check to see if this thread is blocked on an sx lock.
- * First, we check the lock class. If that is ok, then we
- * compare the lock name against the wait message.
+ * The thread should be on a sleep queue with type SLEEPQ_SX, the
+ * purported lock should have the lock class index of sx, and the lock
+ * name should match the wait message.
*/
sx = td->td_wchan;
- if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
+ if (!TD_ON_SLEEPQ(td) || sleepq_type(td->td_wchan) != SLEEPQ_SX ||
+ LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
sx->lock_object.lo_name != td->td_wmesg)
return (0);
/* We think we have an sx lock, so output some details. */
- db_printf("blocked on sx \"%s\" ", td->td_wmesg);
+ db_printf("blocked on lock %p (%s) \"%s\" ", &sx->lock_object,
+ lock_class_sx.lc_name, td->td_wmesg);
*ownerp = sx_xholder(sx);
if (sx->sx_lock & SX_LOCK_SHARED)
db_printf("SLOCK (count %ju)\n",
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index 0e8c2b9f362e..4329959a2ef4 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -347,6 +347,17 @@ kern_thr_exit(struct thread *td)
p = td->td_proc;
/*
+ * Clear kernel ASTs in advance of selecting the last exiting
+ * thread and acquiring schedulers locks. It is fine to
+ * clear the ASTs here even if we are not going to exit after
+ * all. On the other hand, leaving them pending could trigger
+ * execution in subsystems in a context where they are not
+ * prepared to handle top kernel actions, even in execution of
+ * an unrelated thread.
+ */
+ ast_kclear(td);
+
+ /*
* If all of the threads in a process call this routine to
* exit (e.g. all threads call pthread_exit()), exactly one
* thread should return to the caller to terminate the process
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 50b040132396..3180c66cb42b 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -1694,8 +1694,10 @@ thread_single_end(struct proc *p, int mode)
thread_unlock(td);
}
}
- KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
- ("inconsistent boundary count %d", p->p_boundary_count));
+ KASSERT(mode != SINGLE_BOUNDARY || P_SHOULDSTOP(p) ||
+ p->p_boundary_count == 0,
+ ("pid %d proc %p flags %#x inconsistent boundary count %d",
+ p->p_pid, p, p->p_flag, p->p_boundary_count));
PROC_SUNLOCK(p);
wakeup(&p->p_flag);
}
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index 2a6f0989f6aa..5b7485c25cd7 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -90,7 +90,7 @@ static int user_clock_nanosleep(struct thread *td, clockid_t clock_id,
int flags, const struct timespec *ua_rqtp,
struct timespec *ua_rmtp);
-static void itimer_start(void);
+static void itimer_start(void *);
static int itimer_init(void *, int, int);
static void itimer_fini(void *, int);
static void itimer_enter(struct itimer *);
@@ -1170,7 +1170,7 @@ eventratecheck(struct timeval *lasttime, int *cureps, int maxeps)
}
static void
-itimer_start(void)
+itimer_start(void *dummy __unused)
{
static const struct kclock rt_clock = {
.timer_create = realtimer_create,
diff --git a/sys/kern/kern_tslog.c b/sys/kern/kern_tslog.c
index fbf81d423b95..09070eea284f 100644
--- a/sys/kern/kern_tslog.c
+++ b/sys/kern/kern_tslog.c
@@ -220,3 +220,13 @@ SYSCTL_PROC(_debug, OID_AUTO, tslog_user,
CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_SKIP,
0, 0, sysctl_debug_tslog_user,
"", "Dump recorded userland event timestamps");
+
+void
+sysinit_tslog_shim(const void *data)
+{
+ const struct sysinit_tslog *x = data;
+
+ tslog(curthread, TS_ENTER, "SYSINIT", x->name);
+ (x->func)(x->data);
+ tslog(curthread, TS_EXIT, "SYSINIT", x->name);
+}
diff --git a/sys/kern/link_elf.c b/sys/kern/link_elf.c
index bbebadc4c395..ebd203858b66 100644
--- a/sys/kern/link_elf.c
+++ b/sys/kern/link_elf.c
@@ -518,9 +518,15 @@ link_elf_init(void* arg)
(void)link_elf_link_common_finish(linker_kernel_file);
linker_kernel_file->flags |= LINKER_FILE_LINKED;
TAILQ_INIT(&set_pcpu_list);
+ ef->pcpu_start = DPCPU_START;
+ ef->pcpu_stop = DPCPU_STOP;
+ ef->pcpu_base = DPCPU_START;
#ifdef VIMAGE
TAILQ_INIT(&set_vnet_list);
vnet_save_init((void *)VNET_START, VNET_STOP - VNET_START);
+ ef->vnet_start = VNET_START;
+ ef->vnet_stop = VNET_STOP;
+ ef->vnet_base = VNET_START;
#endif
}
diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c
index 151aab96f9be..a3a53a39bfd6 100644
--- a/sys/kern/link_elf_obj.c
+++ b/sys/kern/link_elf_obj.c
@@ -70,6 +70,7 @@
typedef struct {
void *addr;
+ void *origaddr; /* Used by debuggers. */
Elf_Off size;
int flags; /* Section flags. */
int sec; /* Original section number. */
@@ -492,7 +493,8 @@ link_elf_link_preload(linker_class_t cls, const char *filename,
case SHT_FINI_ARRAY:
if (shdr[i].sh_addr == 0)
break;
- ef->progtab[pb].addr = (void *)shdr[i].sh_addr;
+ ef->progtab[pb].addr = ef->progtab[pb].origaddr =
+ (void *)shdr[i].sh_addr;
if (shdr[i].sh_type == SHT_PROGBITS)
ef->progtab[pb].name = "<<PROGBITS>>";
#ifdef __amd64__
@@ -1088,6 +1090,8 @@ link_elf_load_file(linker_class_t cls, const char *filename,
ef->progtab[pb].name = "<<NOBITS>>";
if (ef->progtab[pb].name != NULL &&
!strcmp(ef->progtab[pb].name, DPCPU_SETNAME)) {
+ ef->progtab[pb].origaddr =
+ (void *)(uintptr_t)mapbase;
ef->progtab[pb].addr =
dpcpu_alloc(shdr[i].sh_size);
if (ef->progtab[pb].addr == NULL) {
@@ -1101,6 +1105,8 @@ link_elf_load_file(linker_class_t cls, const char *filename,
#ifdef VIMAGE
else if (ef->progtab[pb].name != NULL &&
!strcmp(ef->progtab[pb].name, VNET_SETNAME)) {
+ ef->progtab[pb].origaddr =
+ (void *)(uintptr_t)mapbase;
ef->progtab[pb].addr =
vnet_data_alloc(shdr[i].sh_size);
if (ef->progtab[pb].addr == NULL) {
diff --git a/sys/kern/subr_asan.c b/sys/kern/subr_asan.c
index 464efda1e91a..fee6c1a844e2 100644
--- a/sys/kern/subr_asan.c
+++ b/sys/kern/subr_asan.c
@@ -835,6 +835,7 @@ ASAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t);
ASAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t);
ASAN_ATOMIC_FUNC_TESTANDSET(int, u_int);
ASAN_ATOMIC_FUNC_TESTANDSET(long, u_long);
+ASAN_ATOMIC_FUNC_TESTANDSET(acq_long, u_long);
ASAN_ATOMIC_FUNC_TESTANDSET(ptr, uintptr_t);
ASAN_ATOMIC_FUNC_SWAP(32, uint32_t);
diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c
index 62a3da964c37..bf5bda7e058d 100644
--- a/sys/kern/subr_bus.c
+++ b/sys/kern/subr_bus.c
@@ -280,6 +280,9 @@ device_sysctl_handler(SYSCTL_HANDLER_ARGS)
struct sbuf sb;
device_t dev = (device_t)arg1;
device_t iommu;
+#ifdef IOMMU
+ device_t requester;
+#endif
int error;
uint16_t rid;
const char *c;
@@ -314,9 +317,15 @@ device_sysctl_handler(SYSCTL_HANDLER_ARGS)
}
rid = 0;
#ifdef IOMMU
- iommu_get_requester(dev, &rid);
+ error = iommu_get_requester(dev, &requester, &rid);
+ /*
+ * Do not return requester error from sysctl, iommu
+ * unit might be assigned by other means.
+ */
+#else
+ error = ENXIO;
#endif
- if (rid != 0)
+ if (error == 0)
sbuf_printf(&sb, "%srid=%#x", c, rid);
break;
default:
diff --git a/sys/kern/subr_devstat.c b/sys/kern/subr_devstat.c
index 07a9cc0f57be..c4d0223d484f 100644
--- a/sys/kern/subr_devstat.c
+++ b/sys/kern/subr_devstat.c
@@ -415,7 +415,7 @@ sysctl_devstat(SYSCTL_HANDLER_ARGS)
if (error != 0)
return (error);
- for (;nds != NULL;) {
+ while (nds != NULL) {
error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
if (error != 0)
return (error);
diff --git a/sys/kern/subr_msan.c b/sys/kern/subr_msan.c
index a3238b61482b..883dbd2b7604 100644
--- a/sys/kern/subr_msan.c
+++ b/sys/kern/subr_msan.c
@@ -1301,6 +1301,7 @@ MSAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t);
MSAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t);
MSAN_ATOMIC_FUNC_TESTANDSET(int, u_int);
MSAN_ATOMIC_FUNC_TESTANDSET(long, u_long);
+MSAN_ATOMIC_FUNC_TESTANDSET(acq_long, u_long);
MSAN_ATOMIC_FUNC_TESTANDSET(ptr, uintptr_t);
MSAN_ATOMIC_FUNC_SWAP(32, uint32_t);
diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c
index 471640c290a7..a67e5fa6cbff 100644
--- a/sys/kern/subr_param.c
+++ b/sys/kern/subr_param.c
@@ -235,14 +235,11 @@ init_param1(void)
* specification for <limits.h>, paragraph "Runtime Increasable
* Values").
*
- * On the other hand, INT_MAX would result in an overflow for the common
- * 'ngroups_max + 1' computation (to obtain the size of the internal
- * groups array, its first element being reserved for the effective
- * GID). Also, the number of allocated bytes for the group array must
- * not overflow on 32-bit machines. For all these reasons, we limit the
- * number of supplementary groups to some very high number that we
- * expect will never be reached in all practical uses and ensures we
- * avoid the problems just exposed, even if 'gid_t' was to be enlarged
+ * On the other hand, a too high value would result in an overflow when
+ * computing the number of bytes to allocate for the groups array. We
+ * thus limit the number of supplementary groups to some very high
+ * number that we expect will never be reached in all practical uses,
+ * avoiding the problem just exposed even if 'gid_t' were to be enlarged
* by a magnitude.
*/
ngroups_max = NGROUPS_MAX;
diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c
index 5c14e15830f4..c9a387a5e87b 100644
--- a/sys/kern/subr_pcpu.c
+++ b/sys/kern/subr_pcpu.c
@@ -140,7 +140,7 @@ uma_zone_t pcpu_zone_32;
uma_zone_t pcpu_zone_64;
static void
-pcpu_zones_startup(void)
+pcpu_zones_startup(void *dummy __unused)
{
pcpu_zone_4 = uma_zcreate("pcpu-4", 4,
diff --git a/sys/kern/subr_power.c b/sys/kern/subr_power.c
index db0e7bf5b0e3..f5a581e42bf3 100644
--- a/sys/kern/subr_power.c
+++ b/sys/kern/subr_power.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2001 Mitsuru IWASAKI
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,24 +34,113 @@
#include <sys/eventhandler.h>
#include <sys/power.h>
#include <sys/proc.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/taskqueue.h>
+enum power_stype power_standby_stype = POWER_STYPE_UNKNOWN;
+enum power_stype power_suspend_stype = POWER_STYPE_UNKNOWN;
+enum power_stype power_hibernate_stype = POWER_STYPE_UNKNOWN;
+
static u_int power_pm_type = POWER_PM_TYPE_NONE;
static power_pm_fn_t power_pm_fn = NULL;
static void *power_pm_arg = NULL;
+static bool power_pm_supported[POWER_STYPE_COUNT] = {0};
static struct task power_pm_task;
+enum power_stype
+power_name_to_stype(const char *name)
+{
+ enum power_stype stype;
+
+ for (stype = 0; stype < POWER_STYPE_COUNT; stype++) {
+ if (strcasecmp(name, power_stype_names[stype]) == 0)
+ return (stype);
+ }
+ return (POWER_STYPE_UNKNOWN);
+}
+
+const char *
+power_stype_to_name(enum power_stype stype)
+{
+ if (stype == POWER_STYPE_UNKNOWN)
+ return ("NONE");
+ if (stype < POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
+ return (NULL);
+ return (power_stype_names[stype]);
+}
+
+static int
+sysctl_supported_stypes(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ struct sbuf sb;
+ enum power_stype stype;
+
+ sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
+ for (stype = 0; stype < POWER_STYPE_COUNT; stype++) {
+ if (power_pm_supported[stype])
+ sbuf_printf(&sb, "%s ", power_stype_to_name(stype));
+ }
+ sbuf_trim(&sb);
+ sbuf_finish(&sb);
+ error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+ sbuf_delete(&sb);
+
+ return (error);
+}
+
+static int
+power_sysctl_stype(SYSCTL_HANDLER_ARGS)
+{
+ char name[10];
+ int err;
+ enum power_stype new_stype, old_stype;
+
+ old_stype = *(enum power_stype *)oidp->oid_arg1;
+ strlcpy(name, power_stype_to_name(old_stype), sizeof(name));
+ err = sysctl_handle_string(oidp, name, sizeof(name), req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ new_stype = power_name_to_stype(name);
+ if (new_stype == POWER_STYPE_UNKNOWN)
+ return (EINVAL);
+ if (!power_pm_supported[new_stype])
+ return (EOPNOTSUPP);
+ if (new_stype != old_stype)
+ *(enum power_stype *)oidp->oid_arg1 = new_stype;
+ return (0);
+}
+
+static SYSCTL_NODE(_kern, OID_AUTO, power, CTLFLAG_RW, 0,
+ "Generic power management related sysctls");
+
+SYSCTL_PROC(_kern_power, OID_AUTO, supported_stype,
+ CTLTYPE_STRING | CTLFLAG_RD, 0, 0, sysctl_supported_stypes, "A",
+ "List supported sleep types");
+SYSCTL_PROC(_kern_power, OID_AUTO, standby, CTLTYPE_STRING | CTLFLAG_RW,
+ &power_standby_stype, 0, power_sysctl_stype, "A",
+ "Sleep type to enter on standby");
+SYSCTL_PROC(_kern_power, OID_AUTO, suspend, CTLTYPE_STRING | CTLFLAG_RW,
+ &power_suspend_stype, 0, power_sysctl_stype, "A",
+ "Sleep type to enter on suspend");
+SYSCTL_PROC(_kern_power, OID_AUTO, hibernate, CTLTYPE_STRING | CTLFLAG_RW,
+ &power_hibernate_stype, 0, power_sysctl_stype, "A",
+ "Sleep type to enter on hibernate");
+
static void
power_pm_deferred_fn(void *arg, int pending)
{
- int state = (intptr_t)arg;
+ enum power_stype stype = (intptr_t)arg;
- power_pm_fn(POWER_CMD_SUSPEND, power_pm_arg, state);
+ power_pm_fn(POWER_CMD_SUSPEND, power_pm_arg, stype);
}
int
-power_pm_register(u_int pm_type, power_pm_fn_t pm_fn, void *pm_arg)
+power_pm_register(u_int pm_type, power_pm_fn_t pm_fn, void *pm_arg,
+ bool pm_supported[static POWER_STYPE_COUNT])
{
int error;
@@ -56,6 +149,16 @@ power_pm_register(u_int pm_type, power_pm_fn_t pm_fn, void *pm_arg)
power_pm_type = pm_type;
power_pm_fn = pm_fn;
power_pm_arg = pm_arg;
+ memcpy(power_pm_supported, pm_supported,
+ sizeof(power_pm_supported));
+ if (power_pm_supported[POWER_STYPE_STANDBY])
+ power_standby_stype = POWER_STYPE_STANDBY;
+ if (power_pm_supported[POWER_STYPE_SUSPEND_TO_MEM])
+ power_suspend_stype = POWER_STYPE_SUSPEND_TO_MEM;
+ else if (power_pm_supported[POWER_STYPE_SUSPEND_TO_IDLE])
+ power_suspend_stype = POWER_STYPE_SUSPEND_TO_IDLE;
+ if (power_pm_supported[POWER_STYPE_HIBERNATE])
+ power_hibernate_stype = POWER_STYPE_HIBERNATE;
error = 0;
TASK_INIT(&power_pm_task, 0, power_pm_deferred_fn, NULL);
} else {
@@ -75,14 +178,27 @@ power_pm_get_type(void)
void
power_pm_suspend(int state)
{
+ enum power_stype stype;
+
if (power_pm_fn == NULL)
return;
- if (state != POWER_SLEEP_STATE_STANDBY &&
- state != POWER_SLEEP_STATE_SUSPEND &&
- state != POWER_SLEEP_STATE_HIBERNATE)
+ switch (state) {
+ case POWER_SLEEP_STATE_STANDBY:
+ stype = power_standby_stype;
+ break;
+ case POWER_SLEEP_STATE_SUSPEND:
+ stype = power_suspend_stype;
+ break;
+ case POWER_SLEEP_STATE_HIBERNATE:
+ stype = power_hibernate_stype;
+ break;
+ default:
+ printf("%s: unknown sleep state %d\n", __func__, state);
return;
- power_pm_task.ta_context = (void *)(intptr_t)state;
+ }
+
+ power_pm_task.ta_context = (void *)(intptr_t)stype;
taskqueue_enqueue(taskqueue_thread, &power_pm_task);
}
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
index db0ceb17b9f0..e2070ae3f865 100644
--- a/sys/kern/subr_prf.c
+++ b/sys/kern/subr_prf.c
@@ -766,7 +766,7 @@ reswitch: switch (ch = (u_char)*fmt++) {
PCHAR(hex2ascii(*up & 0x0f));
up++;
if (width)
- for (q=p;*q;q++)
+ for (q = p; *q; q++)
PCHAR(*q);
}
break;
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index ab47b6ad29a3..c937f6a82757 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -57,7 +57,7 @@
* b : public affirmation by word or example of usually
* religious faith or conviction <the heroic witness to divine
* life -- Pilot>
- * 6 capitalized : a member of the Jehovah's Witnesses
+ * 6 capitalized : a member of the Jehovah's Witnesses
*/
/*
@@ -131,7 +131,7 @@
#define LI_SLEEPABLE 0x00040000 /* Lock may be held while sleeping. */
#ifndef WITNESS_COUNT
-#define WITNESS_COUNT 1536
+#define WITNESS_COUNT 1536
#endif
#define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */
#define WITNESS_PENDLIST (512 + (MAXCPU * 4))
@@ -158,20 +158,18 @@
* These flags go in the witness relationship matrix and describe the
* relationship between any two struct witness objects.
*/
-#define WITNESS_UNRELATED 0x00 /* No lock order relation. */
-#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
-#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
-#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
-#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
-#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
-#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
-#define WITNESS_RELATED_MASK \
- (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
-#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been
- * observed. */
-#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
-#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
-#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
+#define WITNESS_UNRELATED 0x00 /* No lock order relation. */
+#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
+#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
+#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
+#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
+#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
+#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
+#define WITNESS_RELATED_MASK (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
+#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been observed. */
+#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
+#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
+#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
/* Descendant to ancestor flags */
#define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2)
@@ -218,20 +216,18 @@ struct lock_list_entry {
* (for example, "vnode interlock").
*/
struct witness {
- char w_name[MAX_W_NAME];
- uint32_t w_index; /* Index in the relationship matrix */
+ char w_name[MAX_W_NAME];
+ uint32_t w_index; /* Index in the relationship matrix */
struct lock_class *w_class;
- STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
- STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
- struct witness *w_hash_next; /* Linked list in hash buckets. */
- const char *w_file; /* File where last acquired */
- uint32_t w_line; /* Line where last acquired */
- uint32_t w_refcount;
- uint16_t w_num_ancestors; /* direct/indirect
- * ancestor count */
- uint16_t w_num_descendants; /* direct/indirect
- * descendant count */
- int16_t w_ddb_level;
+ STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
+ STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
+ struct witness *w_hash_next; /* Linked list in hash buckets. */
+ const char *w_file; /* File where last acquired */
+ uint32_t w_line; /* Line where last acquired */
+ uint32_t w_refcount;
+ uint16_t w_num_ancestors; /* direct/indirect ancestor count */
+ uint16_t w_num_descendants; /* direct/indirect descendant count */
+ int16_t w_ddb_level;
unsigned w_displayed:1;
unsigned w_reversed:1;
};
@@ -265,7 +261,7 @@ struct witness_lock_order_data {
/*
* The witness lock order data hash table. Keys are witness index tuples
* (struct witness_lock_order_key), elements are lock order data objects
- * (struct witness_lock_order_data).
+ * (struct witness_lock_order_data).
*/
struct witness_lock_order_hash {
struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE];
@@ -295,7 +291,6 @@ struct witness_order_list_entry {
static __inline int
witness_lock_type_equal(struct witness *w1, struct witness *w2)
{
-
return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
}
@@ -304,7 +299,6 @@ static __inline int
witness_lock_order_key_equal(const struct witness_lock_order_key *a,
const struct witness_lock_order_key *b)
{
-
return (a->from == b->from && a->to == b->to);
}
@@ -415,7 +409,7 @@ SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
int badstack_sbuf_size;
int witness_count = WITNESS_COUNT;
-SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
+SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
&witness_count, 0, "");
/*
@@ -760,7 +754,6 @@ static int witness_spin_warn = 0;
static const char *
fixup_filename(const char *file)
{
-
if (file == NULL)
return (NULL);
while (strncmp(file, "../", 3) == 0)
@@ -835,7 +828,7 @@ witness_startup(void *mem)
w_free_cnt--;
for (i = 0; i < witness_count; i++) {
- memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
+ memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
(witness_count + 1));
}
@@ -989,16 +982,16 @@ witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
{
int i;
- for (i = 0; i < indent; i++)
- prnt(" ");
+ for (i = 0; i < indent; i++)
+ prnt(" ");
prnt("%s (type: %s, depth: %d, active refs: %d)",
w->w_name, w->w_class->lc_name,
w->w_ddb_level, w->w_refcount);
- if (w->w_displayed) {
- prnt(" -- (already displayed)\n");
- return;
- }
- w->w_displayed = 1;
+ if (w->w_displayed) {
+ prnt(" -- (already displayed)\n");
+ return;
+ }
+ w->w_displayed = 1;
if (w->w_file != NULL && w->w_line != 0)
prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
w->w_line);
@@ -1079,7 +1072,6 @@ witness_ddb_display(int(*prnt)(const char *fmt, ...))
int
witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
{
-
if (witness_watch == -1 || KERNEL_PANICKED())
return (0);
@@ -1257,7 +1249,7 @@ witness_checkorder(struct lock_object *lock, int flags, const char *file,
w->w_reversed = 1;
mtx_unlock_spin(&w_mtx);
witness_output(
- "acquiring duplicate lock of same type: \"%s\"\n",
+ "acquiring duplicate lock of same type: \"%s\"\n",
w->w_name);
witness_output(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
fixup_filename(plock->li_file), plock->li_line);
@@ -1523,6 +1515,10 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
else
lock_list = PCPU_PTR(spinlocks);
+ /* Update per-witness last file and line acquire. */
+ w->w_file = file;
+ w->w_line = line;
+
/* Check to see if we are recursing on a lock we already own. */
instance = find_instance(*lock_list, lock);
if (instance != NULL) {
@@ -1530,15 +1526,9 @@ witness_lock(struct lock_object *lock, int flags, const char *file, int line)
CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
td->td_proc->p_pid, lock->lo_name,
instance->li_flags & LI_RECURSEMASK);
- instance->li_file = file;
- instance->li_line = line;
return;
}
- /* Update per-witness last file and line acquire. */
- w->w_file = file;
- w->w_line = line;
-
/* Find the next open lock instance in the list and fill it. */
lle = *lock_list;
if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
@@ -1743,7 +1733,7 @@ found:
/*
* In order to reduce contention on w_mtx, we want to keep always an
- * head object into lists so that frequent allocation from the
+ * head object into lists so that frequent allocation from the
* free witness pool (and subsequent locking) is avoided.
* In order to maintain the current code simple, when the head
* object is totally unloaded it means also that we do not have
@@ -1781,7 +1771,7 @@ witness_thread_exit(struct thread *td)
n++;
witness_list_lock(&lle->ll_children[i],
witness_output);
-
+
}
kassert_panic(
"Thread %p cannot exit while holding sleeplocks\n", td);
@@ -1948,7 +1938,6 @@ found:
static void
depart(struct witness *w)
{
-
MPASS(w->w_refcount == 0);
if (w->w_class->lc_flags & LC_SLEEPLOCK) {
w_sleep_cnt--;
@@ -1999,18 +1988,18 @@ adopt(struct witness *parent, struct witness *child)
child->w_num_ancestors++;
}
- /*
- * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
+ /*
+ * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
* an ancestor of 'pi' during this loop.
*/
for (i = 1; i <= w_max_used_index; i++) {
- if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
+ if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
(i != pi))
continue;
/* Find each descendant of 'i' and mark it as a descendant. */
for (j = 1; j <= w_max_used_index; j++) {
- /*
+ /*
* Skip children that are already marked as
* descendants of 'i'.
*/
@@ -2021,7 +2010,7 @@ adopt(struct witness *parent, struct witness *child)
* We are only interested in descendants of 'ci'. Note
* that 'ci' itself is counted as a descendant of 'ci'.
*/
- if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
+ if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
(j != ci))
continue;
w_rmatrix[i][j] |= WITNESS_ANCESTOR;
@@ -2029,16 +2018,16 @@ adopt(struct witness *parent, struct witness *child)
w_data[i].w_num_descendants++;
w_data[j].w_num_ancestors++;
- /*
+ /*
* Make sure we aren't marking a node as both an
- * ancestor and descendant. We should have caught
+ * ancestor and descendant. We should have caught
* this as a lock order reversal earlier.
*/
if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
(w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
printf("witness rmatrix paradox! [%d][%d]=%d "
"both ancestor and descendant\n",
- i, j, w_rmatrix[i][j]);
+ i, j, w_rmatrix[i][j]);
kdb_backtrace();
printf("Witness disabled.\n");
witness_watch = -1;
@@ -2047,7 +2036,7 @@ adopt(struct witness *parent, struct witness *child)
(w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
printf("witness rmatrix paradox! [%d][%d]=%d "
"both ancestor and descendant\n",
- j, i, w_rmatrix[j][i]);
+ j, i, w_rmatrix[j][i]);
kdb_backtrace();
printf("Witness disabled.\n");
witness_watch = -1;
@@ -2124,7 +2113,6 @@ _isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
static int
isitmychild(struct witness *parent, struct witness *child)
{
-
return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
}
@@ -2134,7 +2122,6 @@ isitmychild(struct witness *parent, struct witness *child)
static int
isitmydescendant(struct witness *ancestor, struct witness *descendant)
{
-
return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
__func__));
}
@@ -2182,7 +2169,7 @@ witness_get(void)
STAILQ_REMOVE_HEAD(&w_free, w_list);
w_free_cnt--;
index = w->w_index;
- MPASS(index > 0 && index == w_max_used_index+1 &&
+ MPASS(index > 0 && index == w_max_used_index + 1 &&
index < witness_count);
bzero(w, sizeof(*w));
w->w_index = index;
@@ -2194,7 +2181,6 @@ witness_get(void)
static void
witness_free(struct witness *w)
{
-
STAILQ_INSERT_HEAD(&w_free, w, w_list);
w_free_cnt++;
}
@@ -2219,11 +2205,10 @@ witness_lock_list_get(void)
bzero(lle, sizeof(*lle));
return (lle);
}
-
+
static void
witness_lock_list_free(struct lock_list_entry *lle)
{
-
mtx_lock_spin(&w_mtx);
lle->ll_next = w_lock_list_free;
w_lock_list_free = lle;
@@ -2297,7 +2282,6 @@ witness_voutput(const char *fmt, va_list ap)
static int
witness_thread_has_locks(struct thread *td)
{
-
if (td->td_sleeplocks == NULL)
return (0);
return (td->td_sleeplocks->ll_count != 0);
@@ -2573,14 +2557,12 @@ witness_setflag(struct lock_object *lock, int flag, int set)
void
witness_norelease(struct lock_object *lock)
{
-
witness_setflag(lock, LI_NORELEASE, 1);
}
void
witness_releaseok(struct lock_object *lock)
{
-
witness_setflag(lock, LI_NORELEASE, 0);
}
@@ -2588,7 +2570,6 @@ witness_releaseok(struct lock_object *lock)
static void
witness_ddb_list(struct thread *td)
{
-
KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
KASSERT(kdb_active, ("%s: not in the debugger", __func__));
@@ -2653,7 +2634,6 @@ DB_SHOW_ALIAS_FLAGS(alllocks, db_witness_list_all, DB_CMD_MEMSAFE);
DB_SHOW_COMMAND_FLAGS(witness, db_witness_display, DB_CMD_MEMSAFE)
{
-
witness_ddb_display(db_printf);
}
#endif
@@ -2673,9 +2653,9 @@ sbuf_print_witness_badstacks(struct sbuf *sb, size_t *oldidx)
/* Allocate and init temporary storage space. */
tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
- tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
+ tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
M_WAITOK | M_ZERO);
- tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
+ tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
M_WAITOK | M_ZERO);
stack_zero(&tmp_data1->wlod_stack);
stack_zero(&tmp_data2->wlod_stack);
@@ -2750,12 +2730,12 @@ restart:
sbuf_printf(sb,
"\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
- tmp_w1->w_name, tmp_w1->w_class->lc_name,
+ tmp_w1->w_name, tmp_w1->w_class->lc_name,
tmp_w2->w_name, tmp_w2->w_class->lc_name);
if (data1) {
sbuf_printf(sb,
"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
- tmp_w1->w_name, tmp_w1->w_class->lc_name,
+ tmp_w1->w_name, tmp_w1->w_class->lc_name,
tmp_w2->w_name, tmp_w2->w_class->lc_name);
stack_sbuf_print(sb, &tmp_data1->wlod_stack);
sbuf_putc(sb, '\n');
@@ -2763,7 +2743,7 @@ restart:
if (data2 && data2 != data1) {
sbuf_printf(sb,
"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
- tmp_w2->w_name, tmp_w2->w_class->lc_name,
+ tmp_w2->w_name, tmp_w2->w_class->lc_name,
tmp_w1->w_name, tmp_w1->w_class->lc_name);
stack_sbuf_print(sb, &tmp_data2->wlod_stack);
sbuf_putc(sb, '\n');
@@ -2823,7 +2803,6 @@ sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
static int
sbuf_db_printf_drain(void *arg __unused, const char *data, int len)
{
-
return (db_printf("%.*s", len, data));
}
@@ -3068,7 +3047,7 @@ witness_lock_order_get(struct witness *parent, struct witness *child)
& WITNESS_LOCK_ORDER_KNOWN) == 0)
goto out;
- hash = witness_hash_djb2((const char*)&key,
+ hash = witness_hash_djb2((const char *)&key,
sizeof(key)) % w_lohash.wloh_size;
data = w_lohash.wloh_array[hash];
while (data != NULL) {
@@ -3089,7 +3068,6 @@ out:
static int
witness_lock_order_check(struct witness *parent, struct witness *child)
{
-
if (parent != child &&
w_rmatrix[parent->w_index][child->w_index]
& WITNESS_LOCK_ORDER_KNOWN &&
@@ -3115,7 +3093,7 @@ witness_lock_order_add(struct witness *parent, struct witness *child)
& WITNESS_LOCK_ORDER_KNOWN)
return (1);
- hash = witness_hash_djb2((const char*)&key,
+ hash = witness_hash_djb2((const char *)&key,
sizeof(key)) % w_lohash.wloh_size;
w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
data = w_lofree;
@@ -3134,7 +3112,6 @@ witness_lock_order_add(struct witness *parent, struct witness *child)
static void
witness_increment_graph_generation(void)
{
-
if (witness_cold == 0)
mtx_assert(&w_mtx, MA_OWNED);
w_generation++;
@@ -3143,7 +3120,6 @@ witness_increment_graph_generation(void)
static int
witness_output_drain(void *arg __unused, const char *data, int len)
{
-
witness_output("%.*s", len, data);
return (len);
}
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index 5606b36f772f..7d666da9f88b 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -729,7 +729,7 @@ kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
{
struct file *fp;
struct filedesc *fdp;
- int error, tmp, locked;
+ int error, f_flag, tmp, locked;
AUDIT_ARG_FD(fd);
AUDIT_ARG_CMD(com);
@@ -782,30 +782,36 @@ kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
goto out;
}
+ f_flag = 0;
switch (com) {
case FIONCLEX:
fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
- goto out;
+ break;
case FIOCLEX:
fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
- goto out;
- case FIONBIO:
- if ((tmp = *(int *)data))
- atomic_set_int(&fp->f_flag, FNONBLOCK);
- else
- atomic_clear_int(&fp->f_flag, FNONBLOCK);
- data = (void *)&tmp;
break;
+ case FIONBIO:
case FIOASYNC:
- if ((tmp = *(int *)data))
- atomic_set_int(&fp->f_flag, FASYNC);
- else
- atomic_clear_int(&fp->f_flag, FASYNC);
- data = (void *)&tmp;
+ f_flag = com == FIONBIO ? FNONBLOCK : FASYNC;
+ tmp = *(int *)data;
+ fsetfl_lock(fp);
+ if (((fp->f_flag & f_flag) != 0) != (tmp != 0)) {
+ error = fo_ioctl(fp, com, (void *)&tmp, td->td_ucred,
+ td);
+ if (error == 0) {
+ if (tmp != 0)
+ atomic_set_int(&fp->f_flag, f_flag);
+ else
+ atomic_clear_int(&fp->f_flag, f_flag);
+ }
+ }
+ fsetfl_unlock(fp);
+ break;
+ default:
+ error = fo_ioctl(fp, com, data, td->td_ucred, td);
break;
}
- error = fo_ioctl(fp, com, data, td->td_ucred, td);
out:
switch (locked) {
case LA_XLOCKED:
diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c
index ed651da96b14..57ebe8dc85f0 100644
--- a/sys/kern/sys_pipe.c
+++ b/sys/kern/sys_pipe.c
@@ -234,6 +234,7 @@ static void pipeinit(void *dummy __unused);
static void pipeclose(struct pipe *cpipe);
static void pipe_free_kmem(struct pipe *cpipe);
static int pipe_create(struct pipe *pipe, bool backing);
+static void pipe_destroy(struct pipe *pipe);
static int pipe_paircreate(struct thread *td, struct pipepair **p_pp);
static __inline int pipelock(struct pipe *cpipe, bool catch);
static __inline void pipeunlock(struct pipe *cpipe);
@@ -399,16 +400,7 @@ pipe_paircreate(struct thread *td, struct pipepair **p_pp)
goto fail;
error = pipe_create(wpipe, false);
if (error != 0) {
- /*
- * This cleanup leaves the pipe inode number for rpipe
- * still allocated, but never used. We do not free
- * inode numbers for opened pipes, which is required
- * for correctness because numbers must be unique.
- * But also it avoids any memory use by the unr
- * allocator, so stashing away the transient inode
- * number is reasonable.
- */
- pipe_free_kmem(rpipe);
+ pipe_destroy(rpipe);
goto fail;
}
@@ -575,7 +567,7 @@ pipespace_new(struct pipe *cpipe, int size)
static int curfail = 0;
static struct timeval lastfail;
- KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
+ PIPE_LOCK_ASSERT(cpipe, MA_NOTOWNED);
KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
("pipespace: resize of direct writes not allowed"));
retry:
@@ -743,6 +735,16 @@ pipe_create(struct pipe *pipe, bool large_backing)
return (error);
}
+static void
+pipe_destroy(struct pipe *pipe)
+{
+ pipe_free_kmem(pipe);
+ /*
+ * Note: we "leak" pipe_ino -- by design the alloc_unr64 mechanism does
+ * not undo allocations.
+ */
+}
+
/* ARGSUSED */
static int
pipe_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
@@ -1677,8 +1679,7 @@ static void
pipe_free_kmem(struct pipe *cpipe)
{
- KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
- ("pipe_free_kmem: pipe mutex locked"));
+ PIPE_LOCK_ASSERT(cpipe, MA_NOTOWNED);
if (cpipe->pipe_buffer.buffer != NULL) {
atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
diff --git a/sys/kern/sys_procdesc.c b/sys/kern/sys_procdesc.c
index 11bd1b6f30e1..acaf1241cb2e 100644
--- a/sys/kern/sys_procdesc.c
+++ b/sys/kern/sys_procdesc.c
@@ -129,7 +129,7 @@ procdesc_find(struct thread *td, int fd, const cap_rights_t *rightsp,
if (error)
return (error);
if (fp->f_type != DTYPE_PROCDESC) {
- error = EBADF;
+ error = EINVAL;
goto out;
}
pd = fp->f_data;
diff --git a/sys/kern/sys_socket.c b/sys/kern/sys_socket.c
index c221106ae067..bc0725230cca 100644
--- a/sys/kern/sys_socket.c
+++ b/sys/kern/sys_socket.c
@@ -586,7 +586,7 @@ soaio_enqueue(struct task *task)
}
static void
-soaio_init(void)
+soaio_init(void *dummy __unused)
{
soaio_lifetime = AIOD_LIFETIME_DEFAULT;
diff --git a/sys/kern/sys_timerfd.c b/sys/kern/sys_timerfd.c
index ab7e048a2ab1..565ab3ad6ee6 100644
--- a/sys/kern/sys_timerfd.c
+++ b/sys/kern/sys_timerfd.c
@@ -206,7 +206,6 @@ retry:
mtx_unlock(&tfd->tfd_lock);
return (EAGAIN);
}
- td->td_rtcgen = atomic_load_acq_int(&rtc_generation);
error = mtx_sleep(&tfd->tfd_count, &tfd->tfd_lock,
PCATCH, "tfdrd", 0);
if (error == 0) {
diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c
index 09bf4d519927..4cef89cd5219 100644
--- a/sys/kern/syscalls.c
+++ b/sys/kern/syscalls.c
@@ -84,8 +84,8 @@ const char *syscallnames[] = {
"obs_vhangup", /* 76 = obsolete vhangup */
"obs_vlimit", /* 77 = obsolete vlimit */
"mincore", /* 78 = mincore */
- "getgroups", /* 79 = getgroups */
- "setgroups", /* 80 = setgroups */
+ "compat14.getgroups", /* 79 = freebsd14 getgroups */
+ "compat14.setgroups", /* 80 = freebsd14 setgroups */
"getpgrp", /* 81 = getpgrp */
"setpgid", /* 82 = setpgid */
"setitimer", /* 83 = setitimer */
@@ -600,4 +600,8 @@ const char *syscallnames[] = {
"exterrctl", /* 592 = exterrctl */
"inotify_add_watch_at", /* 593 = inotify_add_watch_at */
"inotify_rm_watch", /* 594 = inotify_rm_watch */
+ "getgroups", /* 595 = getgroups */
+ "setgroups", /* 596 = setgroups */
+ "jail_attach_jd", /* 597 = jail_attach_jd */
+ "jail_remove_jd", /* 598 = jail_remove_jd */
};
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index 53b5d3cbbba9..967af1f5313c 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -552,13 +552,13 @@
_Out_writes_bytes_(len/PAGE_SIZE) char *vec
);
}
-79 AUE_GETGROUPS STD|CAPENABLED {
+79 AUE_GETGROUPS COMPAT14|CAPENABLED {
int getgroups(
int gidsetsize,
_Out_writes_opt_(gidsetsize) gid_t *gidset
);
}
-80 AUE_SETGROUPS STD {
+80 AUE_SETGROUPS COMPAT14 {
int setgroups(
int gidsetsize,
_In_reads_(gidsetsize) const gid_t *gidset
@@ -3371,5 +3371,27 @@
int wd
);
}
+595 AUE_GETGROUPS STD|CAPENABLED {
+ int getgroups(
+ int gidsetsize,
+ _Out_writes_opt_(gidsetsize) gid_t *gidset
+ );
+ }
+596 AUE_SETGROUPS STD {
+ int setgroups(
+ int gidsetsize,
+ _In_reads_(gidsetsize) const gid_t *gidset
+ );
+ }
+597 AUE_JAIL_ATTACH STD {
+ int jail_attach_jd(
+ int fd
+ );
+ }
+598 AUE_JAIL_REMOVE STD {
+ int jail_remove_jd(
+ int fd
+ );
+ }
; vim: syntax=off
diff --git a/sys/kern/systrace_args.c b/sys/kern/systrace_args.c
index 4dfc63924da9..e28fef931ea8 100644
--- a/sys/kern/systrace_args.c
+++ b/sys/kern/systrace_args.c
@@ -454,22 +454,6 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 3;
break;
}
- /* getgroups */
- case 79: {
- struct getgroups_args *p = params;
- iarg[a++] = p->gidsetsize; /* int */
- uarg[a++] = (intptr_t)p->gidset; /* gid_t * */
- *n_args = 2;
- break;
- }
- /* setgroups */
- case 80: {
- struct setgroups_args *p = params;
- iarg[a++] = p->gidsetsize; /* int */
- uarg[a++] = (intptr_t)p->gidset; /* const gid_t * */
- *n_args = 2;
- break;
- }
/* getpgrp */
case 81: {
*n_args = 0;
@@ -3500,6 +3484,36 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 2;
break;
}
+ /* getgroups */
+ case 595: {
+ struct getgroups_args *p = params;
+ iarg[a++] = p->gidsetsize; /* int */
+ uarg[a++] = (intptr_t)p->gidset; /* gid_t * */
+ *n_args = 2;
+ break;
+ }
+ /* setgroups */
+ case 596: {
+ struct setgroups_args *p = params;
+ iarg[a++] = p->gidsetsize; /* int */
+ uarg[a++] = (intptr_t)p->gidset; /* const gid_t * */
+ *n_args = 2;
+ break;
+ }
+ /* jail_attach_jd */
+ case 597: {
+ struct jail_attach_jd_args *p = params;
+ iarg[a++] = p->fd; /* int */
+ *n_args = 1;
+ break;
+ }
+ /* jail_remove_jd */
+ case 598: {
+ struct jail_remove_jd_args *p = params;
+ iarg[a++] = p->fd; /* int */
+ *n_args = 1;
+ break;
+ }
default:
*n_args = 0;
break;
@@ -4199,32 +4213,6 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
- /* getgroups */
- case 79:
- switch (ndx) {
- case 0:
- p = "int";
- break;
- case 1:
- p = "userland gid_t *";
- break;
- default:
- break;
- };
- break;
- /* setgroups */
- case 80:
- switch (ndx) {
- case 0:
- p = "int";
- break;
- case 1:
- p = "userland const gid_t *";
- break;
- default:
- break;
- };
- break;
/* getpgrp */
case 81:
break;
@@ -9367,6 +9355,52 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
+ /* getgroups */
+ case 595:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland gid_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* setgroups */
+ case 596:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ case 1:
+ p = "userland const gid_t *";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* jail_attach_jd */
+ case 597:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
+ /* jail_remove_jd */
+ case 598:
+ switch (ndx) {
+ case 0:
+ p = "int";
+ break;
+ default:
+ break;
+ };
+ break;
default:
break;
};
@@ -9633,16 +9667,6 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
- /* getgroups */
- case 79:
- if (ndx == 0 || ndx == 1)
- p = "int";
- break;
- /* setgroups */
- case 80:
- if (ndx == 0 || ndx == 1)
- p = "int";
- break;
/* getpgrp */
case 81:
/* setpgid */
@@ -11365,6 +11389,26 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
+ /* getgroups */
+ case 595:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* setgroups */
+ case 596:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* jail_attach_jd */
+ case 597:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
+ /* jail_remove_jd */
+ case 598:
+ if (ndx == 0 || ndx == 1)
+ p = "int";
+ break;
default:
break;
};
diff --git a/sys/kern/uipc_mqueue.c b/sys/kern/uipc_mqueue.c
index 6f2760635bad..a8aec397b352 100644
--- a/sys/kern/uipc_mqueue.c
+++ b/sys/kern/uipc_mqueue.c
@@ -867,7 +867,7 @@ mqfs_lookupx(struct vop_cachedlookup_args *ap)
pd = VTON(dvp);
pn = NULL;
mqfs = pd->mn_info;
- *vpp = NULLVP;
+ *vpp = NULL;
if (dvp->v_type != VDIR)
return (ENOTDIR);
@@ -886,7 +886,7 @@ mqfs_lookupx(struct vop_cachedlookup_args *ap)
return (EINVAL);
pn = pd;
*vpp = dvp;
- VREF(dvp);
+ vref(dvp);
return (0);
}
@@ -921,7 +921,7 @@ mqfs_lookupx(struct vop_cachedlookup_args *ap)
return (error);
}
if (*vpp == dvp) {
- VREF(dvp);
+ vref(dvp);
*vpp = dvp;
mqnode_release(pn);
return (0);
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index 85fe48ddd466..eb1327f7f2de 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -1160,7 +1160,8 @@ kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
return (EINVAL);
- if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
+ if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC |
+ O_CLOFORK)) != 0)
return (EINVAL);
largepage = (shmflags & SHM_LARGEPAGE) != 0;
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 0056dac65c7d..90489e99491a 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -154,15 +154,12 @@ static struct task unp_defer_task;
* and don't really want to reserve the sendspace. Their recvspace should be
* large enough for at least one max-size datagram plus address.
*/
-#ifndef PIPSIZ
-#define PIPSIZ 8192
-#endif
-static u_long unpst_sendspace = PIPSIZ;
-static u_long unpst_recvspace = PIPSIZ;
+static u_long unpst_sendspace = 64*1024;
+static u_long unpst_recvspace = 64*1024;
static u_long unpdg_maxdgram = 8*1024; /* support 8KB syslog msgs */
static u_long unpdg_recvspace = 16*1024;
-static u_long unpsp_sendspace = PIPSIZ;
-static u_long unpsp_recvspace = PIPSIZ;
+static u_long unpsp_sendspace = 64*1024;
+static u_long unpsp_recvspace = 64*1024;
static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Local domain");
@@ -1072,6 +1069,21 @@ uipc_stream_sbspace(struct sockbuf *sb)
return (min(space, mbspace));
}
+/*
+ * UNIX version of generic sbwait() for writes. We wait on peer's receive
+ * buffer, using our timeout.
+ */
+static int
+uipc_stream_sbwait(struct socket *so, sbintime_t timeo)
+{
+ struct sockbuf *sb = &so->so_rcv;
+
+ SOCK_RECVBUF_LOCK_ASSERT(so);
+ sb->sb_flags |= SB_WAIT;
+ return (msleep_sbt(&sb->sb_acc, SOCK_RECVBUF_MTX(so), PSOCK | PCATCH,
+ "sbwait", timeo, 0, 0));
+}
+
static int
uipc_sosend_stream_or_seqpacket(struct socket *so, struct sockaddr *addr,
struct uio *uio0, struct mbuf *m, struct mbuf *c, int flags,
@@ -1206,7 +1218,8 @@ restart:
error = EWOULDBLOCK;
goto out4;
}
- if ((error = sbwait(so2, SO_RCV)) != 0) {
+ if ((error = uipc_stream_sbwait(so2,
+ so->so_snd.sb_timeo)) != 0) {
SOCK_RECVBUF_UNLOCK(so2);
goto out4;
} else
@@ -1546,15 +1559,19 @@ restart:
mc_init_m(&cmc, control);
SOCK_RECVBUF_LOCK(so);
- MPASS(!(sb->sb_state & SBS_CANTRCVMORE));
-
- if (__predict_false(cmc.mc_len + sb->sb_ccc +
- sb->sb_ctl > sb->sb_hiwat)) {
+ if (__predict_false(
+ (sb->sb_state & SBS_CANTRCVMORE) ||
+ cmc.mc_len + sb->sb_ccc + sb->sb_ctl >
+ sb->sb_hiwat)) {
/*
- * Too bad, while unp_externalize() was
- * failing, the other side had filled
- * the buffer and we can't prepend data
- * back. Losing data!
+ * While the lock was dropped and we
+ * were failing in unp_externalize(),
+ * the peer could has a) disconnected,
+ * b) filled the buffer so that we
+ * can't prepend data back.
+ * These are two edge conditions that
+ * we just can't handle, so lose the
+ * data and return the error.
*/
SOCK_RECVBUF_UNLOCK(so);
SOCK_IO_RECV_UNLOCK(so);
@@ -1810,9 +1827,7 @@ uipc_filt_sowrite(struct knote *kn, long hint)
kn->kn_data = uipc_stream_sbspace(&so2->so_rcv);
if (so2->so_rcv.sb_state & SBS_CANTRCVMORE) {
- /*
- * XXXGL: maybe kn->kn_flags |= EV_EOF ?
- */
+ kn->kn_flags |= EV_EOF;
return (1);
} else if (kn->kn_sfflags & NOTE_LOWAT)
return (kn->kn_data >= kn->kn_sdata);
@@ -2402,7 +2417,7 @@ uipc_sendfile_wait(struct socket *so, off_t need, int *space)
}
if (!sockref)
soref(so2);
- error = sbwait(so2, SO_RCV);
+ error = uipc_stream_sbwait(so2, so->so_snd.sb_timeo);
if (error == 0 &&
__predict_false(sb->sb_state & SBS_CANTRCVMORE))
error = EPIPE;
@@ -3672,11 +3687,14 @@ unp_internalize(struct mbuf *control, struct mchain *mc, struct thread *td)
cmcred->cmcred_uid = td->td_ucred->cr_ruid;
cmcred->cmcred_gid = td->td_ucred->cr_rgid;
cmcred->cmcred_euid = td->td_ucred->cr_uid;
- cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups,
+ _Static_assert(CMGROUP_MAX >= 1,
+ "Room needed for the effective GID.");
+ cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups + 1,
CMGROUP_MAX);
- for (i = 0; i < cmcred->cmcred_ngroups; i++)
+ cmcred->cmcred_groups[0] = td->td_ucred->cr_gid;
+ for (i = 1; i < cmcred->cmcred_ngroups; i++)
cmcred->cmcred_groups[i] =
- td->td_ucred->cr_groups[i];
+ td->td_ucred->cr_groups[i - 1];
break;
case SCM_RIGHTS:
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index fa655c43d155..19c39e42bafa 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -5170,7 +5170,7 @@ bufstrategy(struct bufobj *bo, struct buf *bp)
vp = bp->b_vp;
KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
- KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
+ KASSERT(!VN_ISDEV(vp),
("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
i = VOP_STRATEGY(vp, bp);
KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 89c1d779f04c..557e451f9a45 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -86,7 +86,7 @@
*
* This fundamental choice needs to be revisited. In the meantime, the current
* state is described below. Significance of all notable routines is explained
- * in comments placed above their implementation. Scattered thoroughout the
+ * in comments placed above their implementation. Scattered throughout the
* file are TODO comments indicating shortcomings which can be fixed without
* reworking everything (most of the fixes will likely be reusable). Various
* details are omitted from this explanation to not clutter the overview, they
@@ -109,18 +109,19 @@
* The (directory vnode; name) tuple reliably determines the target entry if
* it exists.
*
- * Since there are no small locks at this time (all are 32 bytes in size on
- * LP64), the code works around the problem by introducing lock arrays to
- * protect hash buckets and vnode lists.
+ * Since there were no small locks at the time of writing this comment (all are
+ * 32 bytes in size on LP64), the code works around the problem by introducing
+ * lock arrays to protect hash buckets and vnode lists.
*
* II. Filesystem integration
*
* Filesystems participating in name caching do the following:
* - set vop_lookup routine to vfs_cache_lookup
- * - set vop_cachedlookup to whatever can perform the lookup if the above fails
- * - if they support lockless lookup (see below), vop_fplookup_vexec and
- * vop_fplookup_symlink are set along with the MNTK_FPLOOKUP flag on the
- * mount point
+ * - set vop_cachedlookup to a routine which can perform the lookup if the
+ * above fails
+ * - if they support lockless lookup (see below), they set vop_fplookup_vexec
+ * and vop_fplookup_symlink along with the MNTK_FPLOOKUP flag on the mount
+ * point
* - call cache_purge or cache_vop_* routines to eliminate stale entries as
* applicable
* - call cache_enter to add entries depending on the MAKEENTRY flag
@@ -134,11 +135,15 @@
* ... -> namei -> cache_fplookup -> cache_fplookup_noentry -> VOP_LOOKUP ->
* vfs_cache_lookup -> VOP_CACHEDLOOKUP -> ufs_lookup_ino -> cache_enter
*
+ * You may notice a degree of CPU waste in this callchain.
+ *
* III. Performance considerations
*
* For lockless case forward lookup avoids any writes to shared areas apart
* from the terminal path component. In other words non-modifying lookups of
- * different files don't suffer any scalability problems in the namecache.
+ * different files don't suffer any scalability problems in the namecache
+ * itself.
+ *
* Looking up the same file is limited by VFS and goes beyond the scope of this
* file.
*
@@ -158,8 +163,10 @@
*
* IV. Observability
*
- * Note not everything has an explicit dtrace probe nor it should have, thus
- * some of the one-liners below depend on implementation details.
+ * Several statistics are collected in the vfs.cache sysctl tree.
+ *
+ * Some of the state can be checked for with explicit dtrace probes, must of it
+ * depends on implementation details.
*
* Examples:
*
@@ -167,7 +174,7 @@
* # line number, column 2 is status code (see cache_fpl_status)
* dtrace -n 'vfs:fplookup:lookup:done { @[arg1, arg2] = count(); }'
*
- * # Lengths of names added by binary name
+ * # Histogram of lengths of names added, aggregated by which programs are doing it
* dtrace -n 'fbt::cache_enter_time:entry { @[execname] = quantize(args[2]->cn_namelen); }'
*
* # Same as above but only those which exceed 64 characters
@@ -195,6 +202,11 @@
* - vnodes are subject to being recycled even if target inode is left in memory,
* which loses the name cache entries when it perhaps should not. in case of tmpfs
* names get duplicated -- kept by filesystem itself and namecache separately
+ * - vnode reclamation (see vnlru in kern/vfs_subr.c) defaults to skipping
+ * directories for this very reason, which arguably further reducing quality
+ * of vnode LRU. Per the above this is done to avoid breaking vnode -> path
+ * resolution (it becomes expensive for directories and impossible for the rest)
+ * This would not be a factor if namecache entries could persist without vnodes.
* - struct namecache has a fixed size and comes in 2 variants, often wasting
* space. now hard to replace with malloc due to dependence on SMR, which
* requires UMA zones to opt in
@@ -207,7 +219,8 @@
* performance left on the table, most notably from single-threaded standpoint.
* Below is a woefully incomplete list of changes which can help. Ideas are
* mostly sketched out, no claim is made all kinks or prerequisites are laid
- * out.
+ * out. The name of the game is eliding branches altogether and hopefully some
+ * of memory accesses.
*
* Note there is performance lost all over VFS.
*
@@ -223,13 +236,6 @@
* the vnode to hang around for the long haul, but would work for aforementioned
* stat(2) but also access(2), readlink(2), realpathat(2) and probably more.
*
- * === hotpatching for sdt probes
- *
- * They result in *tons* of branches all over with rather regrettable codegen
- * at times. Removing sdt probes altogether gives over 2% boost in lookup rate.
- * Reworking the code to patch itself at runtime with asm goto would solve it.
- * asm goto is fully supported by gcc and clang.
- *
* === copyinstr
*
* On all architectures it operates one byte at a time, while it could be
@@ -251,10 +257,12 @@
* things worked out locklessly. Instead the lockless lookup could be the
* actual entry point which calls what is currently namei as a fallback.
*
+ * It could be hotpatched if lockless lookup is disabled.
+ *
* === avoidable branches in cache_can_fplookup
*
* The cache_fast_lookup_enabled flag check could be hotpatchable (in fact if
- * this is off, none of fplookup code should execute).
+ * this is off, none of fplookup code should execute, see above).
*
* Both audit and capsicum branches can be combined into one, but it requires
* paying off a lot of tech debt first.
@@ -277,8 +285,18 @@
*
* === inactive on v_usecount reaching 0
*
- * VOP_NEED_INACTIVE should not exist. Filesystems would indicate need for such
- * processing with a bit in usecount.
+ * VOP_NEED_INACTIVE should not exist. Filesystems can indicate need for such
+ * processing with a bit in usecount and adding a hold count. Then vput fast path
+ * would become as simple as (ACHTUNG: locking ignored):
+ *
+ * ref = atomic_fetchadd_int(&vp->v_count, -1) - 1;
+ * if ((ref & MAGIC_BIT) == 0) // common case
+ * return;
+ * if (ref != 0) // the bit is set but this was not the last user
+ * return;
+ * // do inactive here
+ *
+ * Also see below.
*
* === v_holdcnt
*
@@ -287,7 +305,8 @@
* vnlru et al would consider the vnode not-freeable if has either hold or
* usecount on it.
*
- * This would eliminate 2 atomics.
+ * This would eliminate 2 atomics in the common case of securing a vnode and
+ * undoing it.
*/
static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
@@ -3321,12 +3340,10 @@ sys___realpathat(struct thread *td, struct __realpathat_args *uap)
uap->flags, UIO_USERSPACE));
}
-/*
- * Retrieve the full filesystem path that correspond to a vnode from the name
- * cache (if available)
- */
-int
-vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf)
+static int
+vn_fullpath_up_to_pwd_vnode(struct vnode *vp,
+ struct vnode *(*const get_pwd_vnode)(const struct pwd *),
+ char **retbuf, char **freebuf)
{
struct pwd *pwd;
char *buf;
@@ -3340,11 +3357,13 @@ vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf)
buf = malloc(buflen, M_TEMP, M_WAITOK);
vfs_smr_enter();
pwd = pwd_get_smr();
- error = vn_fullpath_any_smr(vp, pwd->pwd_rdir, buf, retbuf, &buflen, 0);
+ error = vn_fullpath_any_smr(vp, get_pwd_vnode(pwd), buf, retbuf,
+ &buflen, 0);
VFS_SMR_ASSERT_NOT_ENTERED();
if (error < 0) {
pwd = pwd_hold(curthread);
- error = vn_fullpath_any(vp, pwd->pwd_rdir, buf, retbuf, &buflen);
+ error = vn_fullpath_any(vp, get_pwd_vnode(pwd), buf, retbuf,
+ &buflen);
pwd_drop(pwd);
}
if (error == 0)
@@ -3354,6 +3373,42 @@ vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf)
return (error);
}
+static inline struct vnode *
+get_rdir(const struct pwd *pwd)
+{
+ return (pwd->pwd_rdir);
+}
+
+/*
+ * Produce a filesystem path that starts from the current chroot directory and
+ * corresponds to the passed vnode, using the name cache (if available).
+ */
+int
+vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf)
+{
+ return (vn_fullpath_up_to_pwd_vnode(vp, get_rdir, retbuf, freebuf));
+}
+
+static inline struct vnode *
+get_jdir(const struct pwd *pwd)
+{
+ return (pwd->pwd_jdir);
+}
+
+/*
+ * Produce a filesystem path that starts from the current jail's root directory
+ * and corresponds to the passed vnode, using the name cache (if available).
+ *
+ * This function allows to ignore chroots done inside a jail (or the host),
+ * allowing path checks to remain unaffected by privileged or unprivileged
+ * chroot calls.
+ */
+int
+vn_fullpath_jail(struct vnode *vp, char **retbuf, char **freebuf)
+{
+ return (vn_fullpath_up_to_pwd_vnode(vp, get_jdir, retbuf, freebuf));
+}
+
/*
* This function is similar to vn_fullpath, but it attempts to lookup the
* pathname relative to the global root mount point. This is required for the
@@ -4632,7 +4687,7 @@ cache_fplookup_negative_promote(struct cache_fpl *fpl, struct namecache *oncp,
}
/*
- * The target vnode is not supported, prepare for the slow path to take over.
+ * Prepare fallback to the locked lookup while trying to retain the progress.
*/
static int __noinline
cache_fplookup_partial_setup(struct cache_fpl *fpl)
@@ -6289,53 +6344,90 @@ cache_fplookup_impl(struct vnode *dvp, struct cache_fpl *fpl)
* Note: all VOP_FPLOOKUP_VEXEC routines have a comment referencing this one.
*
* Filesystems can opt in by setting the MNTK_FPLOOKUP flag and meeting criteria
- * outlined below.
- *
- * Traditional vnode lookup conceptually looks like this:
+ * outlined at the end.
*
- * vn_lock(current);
- * for (;;) {
- * next = find();
- * vn_lock(next);
- * vn_unlock(current);
- * current = next;
- * if (last)
- * break;
- * }
- * return (current);
+ * Traversing from one vnode to another requires atomicity with regard to
+ * permissions, mount points and of course their relative placement (if you are
+ * looking up "bar" in "foo" and you found it, it better be in that directory
+ * at the time).
*
- * Each jump to the next vnode is safe memory-wise and atomic with respect to
- * any modifications thanks to holding respective locks.
+ * Normally this is accomplished with locking, but it comes with a significant
+ * performance hit and is untenable as a fast path even in a moderate core
+ * count environment (at the time of writing this comment this would be a
+ * little south of 100).
*
* The same guarantee can be provided with a combination of safe memory
* reclamation and sequence counters instead. If all operations which affect
* the relationship between the current vnode and the one we are looking for
* also modify the counter, we can verify whether all the conditions held as
- * we made the jump. This includes things like permissions, mount points etc.
- * Counter modification is provided by enclosing relevant places in
- * vn_seqc_write_begin()/end() calls.
+ * we made the jump.
*
- * Thus this translates to:
+ * See places which issue vn_seqc_write_begin()/vn_seqc_write_end() for
+ * operations affected.
+ *
+ * Suppose the variable "cnp" contains lookup metadata (the path etc.), then
+ * locked lookup conceptually looks like this:
+ *
+ * // lock the current directory
+ * vn_lock(dvp);
+ * for (;;) {
+ * // permission check
+ * if (!canlookup(dvp, cnp))
+ * abort();
+ * // look for the target name inside dvp
+ * tvp = findnext(dvp, cnp);
+ * vn_lock(tvp);
+ * // tvp is still guaranteed to be inside of dvp because of the lock on dvp
+ * vn_unlock(dvp);
+ * // dvp is unlocked. its state is now arbitrary, but that's fine as we
+ * // made the jump while everything relevant was correct, continue with tvp
+ * // as the directory to look up names in
+ * tvp = dvp;
+ * if (last)
+ * break;
+ * // if not last loop back and continue until done
+ * }
+ * vget(tvp);
+ * return (tvp);
+ *
+ * Lockless lookup replaces locking with sequence counter checks:
*
* vfs_smr_enter();
* dvp_seqc = seqc_read_any(dvp);
- * if (seqc_in_modify(dvp_seqc)) // someone is altering the vnode
+ * // fail if someone is altering the directory vnode
+ * if (seqc_in_modify(dvp_seqc))
* abort();
* for (;;) {
- * tvp = find();
+ * // permission check. note it can race, but we will validate the outcome
+ * // with a seqc
+ * if (!canlookup_smr(dvp, cnp)) {
+ * // has dvp changed from under us? if so, the denial may be invalid
+ * if (!seqc_consistent(dvp, dvp_seqc)
+ * fallback_to_locked();
+ * // nothing changed, lookup denial is valid
+ * fail();
+ * }
+ * // look for the target name inside dvp
+ * tvp = findnext(dvp, cnp);
* tvp_seqc = seqc_read_any(tvp);
- * if (seqc_in_modify(tvp_seqc)) // someone is altering the target vnode
- * abort();
- * if (!seqc_consistent(dvp, dvp_seqc) // someone is altering the vnode
- * abort();
- * dvp = tvp; // we know nothing of importance has changed
- * dvp_seqc = tvp_seqc; // store the counter for the tvp iteration
+ * // bail if someone is altering the target vnode
+ * if (seqc_in_modify(tvp_seqc))
+ * fallback_to_locked();
+ * // bail if someone is altering the directory vnode
+ * if (!seqc_consistent(dvp, dvp_seqc)
+ * fallback_to_locked();
+ * // we confirmed neither dvp nor tvp changed while we were making the
+ * // jump to the next component, thus the result is the same as if we
+ * // held the lock on dvp and tvp the entire time, continue with tvp
+ * // as the directory to look up names in
+ * dvp = tvp;
+ * dvp_seqc = tvp_seqc;
* if (last)
* break;
* }
* vget(); // secure the vnode
* if (!seqc_consistent(tvp, tvp_seqc) // final check
- * abort();
+ * fallback_to_locked();
* // at this point we know nothing has changed for any parent<->child pair
* // as they were crossed during the lookup, meaning we matched the guarantee
* // of the locked variant
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 85f67731e1cc..05d1120030f3 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -708,7 +708,7 @@ vop_stdvptocnp(struct vop_vptocnp_args *ap)
if (error)
return (error);
- VREF(vp);
+ vref(vp);
locked = VOP_ISLOCKED(vp);
VOP_UNLOCK(vp);
NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
@@ -727,10 +727,10 @@ vop_stdvptocnp(struct vop_vptocnp_args *ap)
((*dvp)->v_vflag & VV_ROOT) &&
((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
*dvp = (*dvp)->v_mount->mnt_vnodecovered;
- VREF(mvp);
+ vref(mvp);
VOP_UNLOCK(mvp);
vn_close(mvp, FREAD, cred, td);
- VREF(*dvp);
+ vref(*dvp);
vn_lock(*dvp, LK_SHARED | LK_RETRY);
covered = 1;
}
diff --git a/sys/kern/vfs_init.c b/sys/kern/vfs_init.c
index cd30d5cfae47..ceda770cb714 100644
--- a/sys/kern/vfs_init.c
+++ b/sys/kern/vfs_init.c
@@ -103,6 +103,16 @@ struct vattr va_null;
* Routines having to do with the management of the vnode table.
*/
+void
+vfs_unref_vfsconf(struct vfsconf *vfsp)
+{
+ vfsconf_lock();
+ KASSERT(vfsp->vfc_refcount > 0,
+ ("vfs %p refcount underflow %d", vfsp, vfsp->vfc_refcount));
+ vfsp->vfc_refcount--;
+ vfsconf_unlock();
+}
+
static struct vfsconf *
vfs_byname_locked(const char *name)
{
@@ -123,9 +133,11 @@ vfs_byname(const char *name)
{
struct vfsconf *vfsp;
- vfsconf_slock();
+ vfsconf_lock();
vfsp = vfs_byname_locked(name);
- vfsconf_sunlock();
+ if (vfsp != NULL)
+ vfsp->vfc_refcount++;
+ vfsconf_unlock();
return (vfsp);
}
@@ -387,7 +399,7 @@ vfs_register(struct vfsconf *vfc)
static int once;
struct vfsconf *tvfc;
uint32_t hashval;
- int secondpass;
+ int error, prevmaxconf, secondpass;
if (!once) {
vattr_null(&va_null);
@@ -405,6 +417,7 @@ vfs_register(struct vfsconf *vfc)
return (EEXIST);
}
+ prevmaxconf = maxvfsconf;
if (vfs_typenumhash != 0) {
/*
* Calculate a hash on vfc_name to use for vfc_typenum. Unless
@@ -497,16 +510,24 @@ vfs_register(struct vfsconf *vfc)
vfc->vfc_vfsops = &vfsops_sigdefer;
}
- if (vfc->vfc_flags & VFCF_JAIL)
- prison_add_vfs(vfc);
-
/*
* Call init function for this VFS...
*/
if ((vfc->vfc_flags & VFCF_SBDRY) != 0)
- vfc->vfc_vfsops_sd->vfs_init(vfc);
+ error = vfc->vfc_vfsops_sd->vfs_init(vfc);
else
- vfc->vfc_vfsops->vfs_init(vfc);
+ error = vfc->vfc_vfsops->vfs_init(vfc);
+
+ if (error != 0) {
+ maxvfsconf = prevmaxconf;
+ TAILQ_REMOVE(&vfsconf, vfc, vfc_list);
+ vfsconf_unlock();
+ return (error);
+ }
+
+ if ((vfc->vfc_flags & VFCF_JAIL) != 0)
+ prison_add_vfs(vfc);
+
vfsconf_unlock();
/*
diff --git a/sys/kern/vfs_inotify.c b/sys/kern/vfs_inotify.c
index 746a5a39208e..b265a5ff3a62 100644
--- a/sys/kern/vfs_inotify.c
+++ b/sys/kern/vfs_inotify.c
@@ -801,6 +801,7 @@ vn_inotify_add_watch(struct vnode *vp, struct inotify_softc *sc, uint32_t mask,
vn_lock(vp, LK_SHARED | LK_RETRY);
if (error != 0)
break;
+ NDFREE_PNBUF(&nd);
vn_irflag_set_cond(nd.ni_vp, VIRF_INOTIFY_PARENT);
vrele(nd.ni_vp);
}
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index fb3e6a7a2534..39c7da803de1 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -883,7 +883,7 @@ vfs_lookup_degenerate(struct nameidata *ndp, struct vnode *dp, int wantparent)
}
if (wantparent) {
ndp->ni_dvp = dp;
- VREF(dp);
+ vref(dp);
}
ndp->ni_vp = dp;
cnp->cn_namelen = 0;
@@ -1121,7 +1121,7 @@ vfs_lookup(struct nameidata *ndp)
cnp->cn_lkflags = LK_SHARED;
dp = ndp->ni_startdir;
- ndp->ni_startdir = NULLVP;
+ ndp->ni_startdir = NULL;
/*
* Leading slashes, if any, are supposed to be skipped by the caller.
@@ -1284,7 +1284,7 @@ dirloop:
(cnp->cn_flags & NOCROSSMOUNT) != 0)) {
ndp->ni_dvp = dp;
ndp->ni_vp = dp;
- VREF(dp);
+ vref(dp);
goto nextname;
}
if ((dp->v_vflag & VV_ROOT) == 0)
@@ -1295,7 +1295,7 @@ dirloop:
}
tdp = dp;
dp = dp->v_mount->mnt_vnodecovered;
- VREF(dp);
+ vref(dp);
vput(tdp);
vn_lock(dp,
enforce_lkflags(dp->v_mount, cnp->cn_lkflags |
@@ -1343,7 +1343,7 @@ unionlookup:
(dp->v_mount->mnt_flag & MNT_UNION)) {
tdp = dp;
dp = dp->v_mount->mnt_vnodecovered;
- VREF(dp);
+ vref(dp);
vput(tdp);
vn_lock(dp,
enforce_lkflags(dp->v_mount, cnp->cn_lkflags |
@@ -1615,7 +1615,7 @@ vfs_relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
}
/* ASSERT(dvp == ndp->ni_startdir) */
if (refstart)
- VREF(dvp);
+ vref(dvp);
if ((cnp->cn_flags & LOCKPARENT) == 0)
VOP_UNLOCK(dp);
/*
@@ -1653,7 +1653,7 @@ vfs_relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
/* ASSERT(dvp == ndp->ni_startdir) */
if (refstart)
- VREF(dvp);
+ vref(dvp);
if ((cnp->cn_flags & LOCKLEAF) == 0)
VOP_UNLOCK(dp);
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 8e64a7fe966b..13403acacc08 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -683,7 +683,6 @@ vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath,
MPASSERT(mp->mnt_vfs_ops == 1, mp,
("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops));
(void) vfs_busy(mp, MBF_NOWAIT);
- atomic_add_acq_int(&vfsp->vfc_refcount, 1);
mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_vfc = vfsp;
mp->mnt_stat.f_type = vfsp->vfc_typenum;
@@ -731,7 +730,6 @@ vfs_mount_destroy(struct mount *mp)
__FILE__, __LINE__));
MPPASS(mp->mnt_writeopcount == 0, mp);
MPPASS(mp->mnt_secondary_writes == 0, mp);
- atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1);
if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) {
struct vnode *vp;
@@ -769,6 +767,9 @@ vfs_mount_destroy(struct mount *mp)
vfs_free_addrlist(mp->mnt_export);
free(mp->mnt_export, M_MOUNT);
}
+ vfsconf_lock();
+ mp->mnt_vfc->vfc_refcount--;
+ vfsconf_unlock();
crfree(mp->mnt_cred);
uma_zfree(mount_zone, mp);
}
@@ -1133,6 +1134,7 @@ vfs_domount_first(
if (jailed(td->td_ucred) && (!prison_allow(td->td_ucred,
vfsp->vfc_prison_flag) || vp == td->td_ucred->cr_prison->pr_root)) {
vput(vp);
+ vfs_unref_vfsconf(vfsp);
return (EPERM);
}
@@ -1169,6 +1171,7 @@ vfs_domount_first(
}
if (error != 0) {
vput(vp);
+ vfs_unref_vfsconf(vfsp);
return (error);
}
vn_seqc_write_begin(vp);
diff --git a/sys/kern/vfs_mountroot.c b/sys/kern/vfs_mountroot.c
index e0d1cec5bd71..dd2364f5bf6a 100644
--- a/sys/kern/vfs_mountroot.c
+++ b/sys/kern/vfs_mountroot.c
@@ -266,7 +266,7 @@ vfs_mountroot_devfs(struct thread *td, struct mount **mpp)
if (vfsp == NULL)
return (ENOENT);
- mp = vfs_mount_alloc(NULLVP, vfsp, "/dev", td->td_ucred);
+ mp = vfs_mount_alloc(NULL, vfsp, "/dev", td->td_ucred);
error = VFS_MOUNT(mp);
KASSERT(error == 0, ("VFS_MOUNT(devfs) failed %d", error));
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index a6e38be89291..73e110c05bc1 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -2186,6 +2186,8 @@ freevnode(struct vnode *vp)
{
struct bufobj *bo;
+ ASSERT_VOP_UNLOCKED(vp, __func__);
+
/*
* The vnode has been marked for destruction, so free it.
*
@@ -2222,12 +2224,16 @@ freevnode(struct vnode *vp)
mac_vnode_destroy(vp);
#endif
if (vp->v_pollinfo != NULL) {
+ int error __diagused;
+
/*
* Use LK_NOWAIT to shut up witness about the lock. We may get
* here while having another vnode locked when trying to
* satisfy a lookup and needing to recycle.
*/
- VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT);
+ error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT);
+ VNASSERT(error == 0, vp,
+ ("freevnode: cannot lock vp %p for pollinfo destroy", vp));
destroy_vpollinfo(vp->v_pollinfo);
VOP_UNLOCK(vp);
vp->v_pollinfo = NULL;
@@ -3346,13 +3352,22 @@ vget_abort(struct vnode *vp, enum vgetstate vs)
switch (vs) {
case VGET_USECOUNT:
vrele(vp);
- break;
+ goto out_ok;
case VGET_HOLDCNT:
vdrop(vp);
+ goto out_ok;
+ case VGET_NONE:
break;
- default:
- __assert_unreachable();
}
+
+ __assert_unreachable();
+
+ /*
+ * This is a goto label should the cases above have more in common than
+ * just the 'return' statement.
+ */
+out_ok:
+ return;
}
int
@@ -3561,11 +3576,6 @@ enum vput_op { VRELE, VPUT, VUNREF };
* exclusive lock on the vnode, while it is legal to call here with only a
* shared lock (or no locks). If locking the vnode in an expected manner fails,
* inactive processing gets deferred to the syncer.
- *
- * XXX Some filesystems pass in an exclusively locked vnode and strongly depend
- * on the lock being held all the way until VOP_INACTIVE. This in particular
- * happens with UFS which adds half-constructed vnodes to the hash, where they
- * can be found by other code.
*/
static void
vput_final(struct vnode *vp, enum vput_op func)
@@ -3643,26 +3653,26 @@ vput_final(struct vnode *vp, enum vput_op func)
}
break;
}
- if (error == 0) {
- if (func == VUNREF) {
- VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp,
- ("recursive vunref"));
- vp->v_vflag |= VV_UNREF;
- }
- for (;;) {
- error = vinactive(vp);
- if (want_unlock)
- VOP_UNLOCK(vp);
- if (error != ERELOOKUP || !want_unlock)
- break;
- VOP_LOCK(vp, LK_EXCLUSIVE);
- }
- if (func == VUNREF)
- vp->v_vflag &= ~VV_UNREF;
- vdropl(vp);
- } else {
+ if (error != 0) {
vdefer_inactive(vp);
+ return;
+ }
+ if (func == VUNREF) {
+ VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp,
+ ("recursive vunref"));
+ vp->v_vflag |= VV_UNREF;
+ }
+ for (;;) {
+ error = vinactive(vp);
+ if (want_unlock)
+ VOP_UNLOCK(vp);
+ if (error != ERELOOKUP || !want_unlock)
+ break;
+ VOP_LOCK(vp, LK_EXCLUSIVE);
}
+ if (func == VUNREF)
+ vp->v_vflag &= ~VV_UNREF;
+ vdropl(vp);
return;
out:
if (func == VPUT)
@@ -4501,6 +4511,17 @@ vgonel(struct vnode *vp)
/*
* Done with purge, reset to the standard lock and invalidate
* the vnode.
+ *
+ * FIXME: this is buggy for vnode ops with custom locking primitives.
+ *
+ * vget used to be gated with a special flag serializing it against vgone,
+ * which got lost in the process of SMP-ifying the VFS layer.
+ *
+ * Suppose a custom locking routine references ->v_data.
+ *
+ * Since now it is possible to start executing it as vgone is
+ * progressing, this very well may crash as ->v_data gets invalidated
+ * and memory used to back it is freed.
*/
vp->v_vnlock = &vp->v_lock;
vp->v_op = &dead_vnodeops;
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index c64618036733..9e1275359715 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -1932,7 +1932,7 @@ restart:
if (error != 0)
return (error);
- if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
+ if (nd.ni_vp != NULL || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
NDFREE_PNBUF(&nd);
if (nd.ni_vp == nd.ni_dvp)
vrele(nd.ni_dvp);
@@ -2839,7 +2839,7 @@ setfflags(struct thread *td, struct vnode *vp, u_long flags)
* if they are allowed to set flags and programs assume that
* chown can't fail when done as root.
*/
- if (vp->v_type == VCHR || vp->v_type == VBLK) {
+ if (VN_ISDEV(vp)) {
error = priv_check(td, PRIV_VFS_CHFLAGS_DEV);
if (error != 0)
return (error);
@@ -4363,7 +4363,7 @@ unionread:
struct vnode *tvp = vp;
vp = vp->v_mount->mnt_vnodecovered;
- VREF(vp);
+ vref(vp);
fp->f_vnode = vp;
foffset = 0;
vput(tvp);
@@ -5050,11 +5050,12 @@ kern_copy_file_range(struct thread *td, int infd, off_t *inoffp, int outfd,
size_t retlen;
void *rl_rcookie, *rl_wcookie;
off_t inoff, outoff, savinoff, savoutoff;
- bool foffsets_locked;
+ bool foffsets_locked, foffsets_set;
infp = outfp = NULL;
rl_rcookie = rl_wcookie = NULL;
foffsets_locked = false;
+ foffsets_set = false;
error = 0;
retlen = 0;
@@ -5122,6 +5123,8 @@ kern_copy_file_range(struct thread *td, int infd, off_t *inoffp, int outfd,
}
foffset_lock_pair(infp1, &inoff, outfp1, &outoff, 0);
foffsets_locked = true;
+ } else {
+ foffsets_set = true;
}
savinoff = inoff;
savoutoff = outoff;
@@ -5180,11 +5183,12 @@ out:
vn_rangelock_unlock(invp, rl_rcookie);
if (rl_wcookie != NULL)
vn_rangelock_unlock(outvp, rl_wcookie);
+ if ((foffsets_locked || foffsets_set) &&
+ (error == EINTR || error == ERESTART)) {
+ inoff = savinoff;
+ outoff = savoutoff;
+ }
if (foffsets_locked) {
- if (error == EINTR || error == ERESTART) {
- inoff = savinoff;
- outoff = savoutoff;
- }
if (inoffp == NULL)
foffset_unlock(infp, inoff, 0);
else
@@ -5193,6 +5197,9 @@ out:
foffset_unlock(outfp, outoff, 0);
else
*outoffp = outoff;
+ } else if (foffsets_set) {
+ *inoffp = inoff;
+ *outoffp = outoff;
}
if (outfp != NULL)
fdrop(outfp, td);
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 93f87ddae4de..a53df50c06bd 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -798,58 +798,84 @@ vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, size_t len,
}
#if OFF_MAX <= LONG_MAX
-off_t
-foffset_lock(struct file *fp, int flags)
+static void
+file_v_lock(struct file *fp, short lock_bit, short lock_wait_bit)
{
- volatile short *flagsp;
- off_t res;
+ short *flagsp;
short state;
- KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
-
- if ((flags & FOF_NOLOCK) != 0)
- return (atomic_load_long(&fp->f_offset));
-
- /*
- * According to McKusick the vn lock was protecting f_offset here.
- * It is now protected by the FOFFSET_LOCKED flag.
- */
- flagsp = &fp->f_vnread_flags;
- if (atomic_cmpset_acq_16(flagsp, 0, FOFFSET_LOCKED))
- return (atomic_load_long(&fp->f_offset));
+ flagsp = &fp->f_vflags;
+ state = atomic_load_16(flagsp);
+ for (;;) {
+ if ((state & lock_bit) != 0)
+ break;
+ if (atomic_fcmpset_acq_16(flagsp, &state, state | lock_bit))
+ return;
+ }
- sleepq_lock(&fp->f_vnread_flags);
+ sleepq_lock(flagsp);
state = atomic_load_16(flagsp);
for (;;) {
- if ((state & FOFFSET_LOCKED) == 0) {
+ if ((state & lock_bit) == 0) {
if (!atomic_fcmpset_acq_16(flagsp, &state,
- FOFFSET_LOCKED))
+ state | lock_bit))
continue;
break;
}
- if ((state & FOFFSET_LOCK_WAITING) == 0) {
+ if ((state & lock_wait_bit) == 0) {
if (!atomic_fcmpset_acq_16(flagsp, &state,
- state | FOFFSET_LOCK_WAITING))
+ state | lock_wait_bit))
continue;
}
DROP_GIANT();
- sleepq_add(&fp->f_vnread_flags, NULL, "vofflock", 0, 0);
- sleepq_wait(&fp->f_vnread_flags, PRI_MAX_KERN);
+ sleepq_add(flagsp, NULL, "vofflock", 0, 0);
+ sleepq_wait(flagsp, PRI_MAX_KERN);
PICKUP_GIANT();
- sleepq_lock(&fp->f_vnread_flags);
+ sleepq_lock(flagsp);
state = atomic_load_16(flagsp);
}
- res = atomic_load_long(&fp->f_offset);
- sleepq_release(&fp->f_vnread_flags);
- return (res);
+ sleepq_release(flagsp);
}
-void
-foffset_unlock(struct file *fp, off_t val, int flags)
+static void
+file_v_unlock(struct file *fp, short lock_bit, short lock_wait_bit)
{
- volatile short *flagsp;
+ short *flagsp;
short state;
+ flagsp = &fp->f_vflags;
+ state = atomic_load_16(flagsp);
+ for (;;) {
+ if ((state & lock_wait_bit) != 0)
+ break;
+ if (atomic_fcmpset_rel_16(flagsp, &state, state & ~lock_bit))
+ return;
+ }
+
+ sleepq_lock(flagsp);
+ MPASS((*flagsp & lock_bit) != 0);
+ MPASS((*flagsp & lock_wait_bit) != 0);
+ atomic_clear_16(flagsp, lock_bit | lock_wait_bit);
+ sleepq_broadcast(flagsp, SLEEPQ_SLEEP, 0, 0);
+ sleepq_release(flagsp);
+}
+
+off_t
+foffset_lock(struct file *fp, int flags)
+{
+ KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
+
+ if ((flags & FOF_NOLOCK) == 0) {
+ file_v_lock(fp, FILE_V_FOFFSET_LOCKED,
+ FILE_V_FOFFSET_LOCK_WAITING);
+ }
+
+ return (atomic_load_long(&fp->f_offset));
+}
+
+void
+foffset_unlock(struct file *fp, off_t val, int flags)
+{
KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
if ((flags & FOF_NOUPDATE) == 0)
@@ -859,21 +885,10 @@ foffset_unlock(struct file *fp, off_t val, int flags)
if ((flags & FOF_NEXTOFF_W) != 0)
fp->f_nextoff[UIO_WRITE] = val;
- if ((flags & FOF_NOLOCK) != 0)
- return;
-
- flagsp = &fp->f_vnread_flags;
- state = atomic_load_16(flagsp);
- if ((state & FOFFSET_LOCK_WAITING) == 0 &&
- atomic_cmpset_rel_16(flagsp, state, 0))
- return;
-
- sleepq_lock(&fp->f_vnread_flags);
- MPASS((fp->f_vnread_flags & FOFFSET_LOCKED) != 0);
- MPASS((fp->f_vnread_flags & FOFFSET_LOCK_WAITING) != 0);
- fp->f_vnread_flags = 0;
- sleepq_broadcast(&fp->f_vnread_flags, SLEEPQ_SLEEP, 0, 0);
- sleepq_release(&fp->f_vnread_flags);
+ if ((flags & FOF_NOLOCK) == 0) {
+ file_v_unlock(fp, FILE_V_FOFFSET_LOCKED,
+ FILE_V_FOFFSET_LOCK_WAITING);
+ }
}
static off_t
@@ -882,7 +897,47 @@ foffset_read(struct file *fp)
return (atomic_load_long(&fp->f_offset));
}
-#else
+
+void
+fsetfl_lock(struct file *fp)
+{
+ file_v_lock(fp, FILE_V_SETFL_LOCKED, FILE_V_SETFL_LOCK_WAITING);
+}
+
+void
+fsetfl_unlock(struct file *fp)
+{
+ file_v_unlock(fp, FILE_V_SETFL_LOCKED, FILE_V_SETFL_LOCK_WAITING);
+}
+
+#else /* OFF_MAX <= LONG_MAX */
+
+static void
+file_v_lock_mtxp(struct file *fp, struct mtx *mtxp, short lock_bit,
+ short lock_wait_bit)
+{
+ mtx_assert(mtxp, MA_OWNED);
+
+ while ((fp->f_vflags & lock_bit) != 0) {
+ fp->f_vflags |= lock_wait_bit;
+ msleep(&fp->f_vflags, mtxp, PRI_MAX_KERN,
+ "vofflock", 0);
+ }
+ fp->f_vflags |= lock_bit;
+}
+
+static void
+file_v_unlock_mtxp(struct file *fp, struct mtx *mtxp, short lock_bit,
+ short lock_wait_bit)
+{
+ mtx_assert(mtxp, MA_OWNED);
+
+ KASSERT((fp->f_vflags & lock_bit) != 0, ("Lost lock_bit"));
+ if ((fp->f_vflags & lock_wait_bit) != 0)
+ wakeup(&fp->f_vflags);
+ fp->f_vflags &= ~(lock_bit | lock_wait_bit);
+}
+
off_t
foffset_lock(struct file *fp, int flags)
{
@@ -894,12 +949,8 @@ foffset_lock(struct file *fp, int flags)
mtxp = mtx_pool_find(mtxpool_sleep, fp);
mtx_lock(mtxp);
if ((flags & FOF_NOLOCK) == 0) {
- while (fp->f_vnread_flags & FOFFSET_LOCKED) {
- fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
- msleep(&fp->f_vnread_flags, mtxp, PRI_MAX_KERN,
- "vofflock", 0);
- }
- fp->f_vnread_flags |= FOFFSET_LOCKED;
+ file_v_lock_mtxp(fp, mtxp, FILE_V_FOFFSET_LOCKED,
+ FILE_V_FOFFSET_LOCK_WAITING);
}
res = fp->f_offset;
mtx_unlock(mtxp);
@@ -922,11 +973,8 @@ foffset_unlock(struct file *fp, off_t val, int flags)
if ((flags & FOF_NEXTOFF_W) != 0)
fp->f_nextoff[UIO_WRITE] = val;
if ((flags & FOF_NOLOCK) == 0) {
- KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0,
- ("Lost FOFFSET_LOCKED"));
- if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
- wakeup(&fp->f_vnread_flags);
- fp->f_vnread_flags = 0;
+ file_v_unlock_mtxp(fp, mtxp, FILE_V_FOFFSET_LOCKED,
+ FILE_V_FOFFSET_LOCK_WAITING);
}
mtx_unlock(mtxp);
}
@@ -937,6 +985,30 @@ foffset_read(struct file *fp)
return (foffset_lock(fp, FOF_NOLOCK));
}
+
+void
+fsetfl_lock(struct file *fp)
+{
+ struct mtx *mtxp;
+
+ mtxp = mtx_pool_find(mtxpool_sleep, fp);
+ mtx_lock(mtxp);
+ file_v_lock_mtxp(fp, mtxp, FILE_V_SETFL_LOCKED,
+ FILE_V_SETFL_LOCK_WAITING);
+ mtx_unlock(mtxp);
+}
+
+void
+fsetfl_unlock(struct file *fp)
+{
+ struct mtx *mtxp;
+
+ mtxp = mtx_pool_find(mtxpool_sleep, fp);
+ mtx_lock(mtxp);
+ file_v_unlock_mtxp(fp, mtxp, FILE_V_SETFL_LOCKED,
+ FILE_V_SETFL_LOCK_WAITING);
+ mtx_unlock(mtxp);
+}
#endif
void
@@ -3444,7 +3516,7 @@ vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp,
dat = NULL;
if ((flags & COPY_FILE_RANGE_CLONE) != 0) {
- error = ENOSYS;
+ error = EOPNOTSUPP;
goto out;
}
diff --git a/sys/libkern/arc4random.c b/sys/libkern/arc4random.c
index 016822e9f03c..6fca7c3c4e9d 100644
--- a/sys/libkern/arc4random.c
+++ b/sys/libkern/arc4random.c
@@ -156,7 +156,7 @@ chacha20_randomstir(struct chacha20_s *chacha20)
* Initialize the contexts.
*/
static void
-chacha20_init(void)
+chacha20_init(void *dummy __unused)
{
struct chacha20_s *chacha20;
@@ -176,7 +176,7 @@ SYSINIT(chacha20, SI_SUB_LOCK, SI_ORDER_ANY, chacha20_init, NULL);
static void
-chacha20_uninit(void)
+chacha20_uninit(void *dummy __unused)
{
struct chacha20_s *chacha20;
diff --git a/sys/libkern/arm64/crc32c_armv8.S b/sys/libkern/arm64/crc32c_armv8.S
index 649afff4b711..430b24f7615a 100644
--- a/sys/libkern/arm64/crc32c_armv8.S
+++ b/sys/libkern/arm64/crc32c_armv8.S
@@ -39,14 +39,14 @@ ENTRY(armv8_crc32c)
cbz w2, end
tbz x1, #0x0, half_word_aligned
sub w2, w2, 0x1
- ldr w10, [x1], #0x1
+ ldrb w10, [x1], #0x1
crc32cb w0, w0, w10
half_word_aligned:
cmp w2, #0x2
b.lo last_byte
tbz x1, #0x1, word_aligned
sub w2, w2, 0x2
- ldr w10, [x1], #0x2
+ ldrh w10, [x1], #0x2
crc32ch w0, w0, w10
word_aligned:
cmp w2, #0x4
@@ -69,11 +69,11 @@ last_word:
crc32cw w0, w0, w10
last_half_word:
tbz w2, #0x1, last_byte
- ldr w10, [x1], #0x2
+ ldrh w10, [x1], #0x2
crc32ch w0, w0, w10
last_byte:
tbz w2, #0x0, end
- ldr w10, [x1], #0x1
+ ldrb w10, [x1], #0x1
crc32cb w0, w0, w10
end:
ret
diff --git a/sys/libkern/qsort.c b/sys/libkern/qsort.c
index 0255a3d64d76..342b1525dd8a 100644
--- a/sys/libkern/qsort.c
+++ b/sys/libkern/qsort.c
@@ -114,11 +114,10 @@ qsort(void *a, size_t n, size_t es, cmp_t *cmp)
char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
size_t d1, d2;
int cmp_result;
- int swaptype_long, swaptype_int, swap_cnt;
+ int swaptype_long, swaptype_int;
loop: SWAPINIT(long, a, es);
SWAPINIT(int, a, es);
- swap_cnt = 0;
if (n < 7) {
for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
for (pl = pm;
@@ -147,7 +146,6 @@ loop: SWAPINIT(long, a, es);
for (;;) {
while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) {
if (cmp_result == 0) {
- swap_cnt = 1;
swap(pa, pb);
pa += es;
}
@@ -155,7 +153,6 @@ loop: SWAPINIT(long, a, es);
}
while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) {
if (cmp_result == 0) {
- swap_cnt = 1;
swap(pc, pd);
pd -= es;
}
@@ -164,18 +161,9 @@ loop: SWAPINIT(long, a, es);
if (pb > pc)
break;
swap(pb, pc);
- swap_cnt = 1;
pb += es;
pc -= es;
}
- if (swap_cnt == 0) { /* Switch to insertion sort */
- for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
- for (pl = pm;
- pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
- pl -= es)
- swap(pl, pl - es);
- return;
- }
pn = (char *)a + n * es;
d1 = MIN(pa - (char *)a, pb - pa);
diff --git a/sys/libkern/x86/crc32_sse42.c b/sys/libkern/x86/crc32_sse42.c
index b79c7afbeeb1..94ffdc178910 100644
--- a/sys/libkern/x86/crc32_sse42.c
+++ b/sys/libkern/x86/crc32_sse42.c
@@ -199,8 +199,10 @@ crc32c_shift(uint32_t zeros[][256], uint32_t crc)
static void
#ifndef _KERNEL
__attribute__((__constructor__))
-#endif
crc32c_init_hw(void)
+#else
+crc32c_init_hw(void *dummy __unused)
+#endif
{
crc32c_zeros(crc32c_long, LONG);
crc32c_zeros(crc32c_2long, 2 * LONG);
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 5315d518afd8..feb9778c23da 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -395,6 +395,7 @@ SUBDIR= \
sysvipc \
tarfs \
tcp \
+ ${_thunderbolt} \
${_ti} \
tmpfs \
${_toecore} \
@@ -576,7 +577,10 @@ _mlx5ib= mlx5ib
${MACHINE_CPUARCH} == "i386"
_ena= ena
_gve= gve
+# gcc13 and earlier lack __builtin_bitcountg used by linux emulation
+.if !(${COMPILER_TYPE} == "gcc" && ${COMPILER_VERSION} < 140000)
_iwlwifi= iwlwifi
+.endif
_rtw88= rtw88
_rtw89= rtw89
_vmware= vmware
@@ -922,6 +926,10 @@ _bcm283x_clkman= bcm283x_clkman
_bcm283x_pwm= bcm283x_pwm
.endif
+.if ${MACHINE_CPUARCH} == "amd64"
+_thunderbolt= thunderbolt
+.endif
+
.if !(${COMPILER_TYPE} == "clang" && ${COMPILER_VERSION} < 110000)
# LLVM 10 crashes when building if_malo_pci.c, fixed in LLVM11:
# https://bugs.llvm.org/show_bug.cgi?id=44351
diff --git a/sys/modules/aic7xxx/ahc/Makefile b/sys/modules/aic7xxx/ahc/Makefile
index 3741d4fb666f..6f9bdcb1d8bd 100644
--- a/sys/modules/aic7xxx/ahc/Makefile
+++ b/sys/modules/aic7xxx/ahc/Makefile
@@ -1,6 +1,4 @@
SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/dev/aic7xxx
KMOD= ahc
SUBDIR+= ahc_isa ahc_pci
diff --git a/sys/modules/cxgb/Makefile b/sys/modules/cxgb/Makefile
index 2989ad580b97..7ebdc1d51945 100644
--- a/sys/modules/cxgb/Makefile
+++ b/sys/modules/cxgb/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
SUBDIR= cxgb
SUBDIR+= cxgb_t3fw
diff --git a/sys/modules/cxgbe/Makefile b/sys/modules/cxgbe/Makefile
index f94d3ae07f66..c2ee71465789 100644
--- a/sys/modules/cxgbe/Makefile
+++ b/sys/modules/cxgbe/Makefile
@@ -1,6 +1,3 @@
-#
-#
-
SYSDIR?=${SRCTOP}/sys
.include "${SYSDIR}/conf/kern.opts.mk"
@@ -13,6 +10,7 @@ SUBDIR+= if_ccv
SUBDIR+= t4_firmware
SUBDIR+= t5_firmware
SUBDIR+= t6_firmware
+SUBDIR+= t7_firmware
SUBDIR+= ${_tom}
SUBDIR+= ${_iw_cxgbe}
SUBDIR+= ${_cxgbei}
diff --git a/sys/modules/cxgbe/if_cxgbe/Makefile b/sys/modules/cxgbe/if_cxgbe/Makefile
index 981c3466c452..33383c84837f 100644
--- a/sys/modules/cxgbe/if_cxgbe/Makefile
+++ b/sys/modules/cxgbe/if_cxgbe/Makefile
@@ -23,6 +23,7 @@ SRCS+= t4_hw.c
SRCS+= t4_if.c t4_if.h
SRCS+= t4_iov.c
SRCS.KERN_TLS+= t6_kern_tls.c
+SRCS.KERN_TLS+= t7_kern_tls.c
SRCS+= t4_keyctx.c
SRCS+= t4_l2t.c
SRCS+= t4_main.c
@@ -31,6 +32,7 @@ SRCS+= t4_netmap.c
SRCS+= t4_sched.c
SRCS+= t4_sge.c
SRCS+= t4_smt.c
+SRCS+= t4_tpt.c
SRCS+= t4_tracer.c
SRCS+= cudbg_common.c
SRCS+= cudbg_flash_utils.c
diff --git a/sys/modules/cxgbe/t7_firmware/Makefile b/sys/modules/cxgbe/t7_firmware/Makefile
new file mode 100644
index 000000000000..afce06487f22
--- /dev/null
+++ b/sys/modules/cxgbe/t7_firmware/Makefile
@@ -0,0 +1,23 @@
+#
+# $FreeBSD$
+#
+
+T7FW= ${SRCTOP}/sys/dev/cxgbe/firmware
+.PATH: ${T7FW}
+
+KMOD= t7fw_cfg
+FIRMWS= ${KMOD}.txt:${KMOD}:1.0.0.0
+
+# You can have additional configuration files in the ${T7FW} directory.
+# t7fw_cfg_<name>.txt
+CFG_FILES != cd ${T7FW} && echo ${KMOD}_*.txt
+.for F in ${CFG_FILES}
+.if exists(${F})
+FIRMWS+= ${F}:${F:C/.txt//}:1.0.0.0
+.endif
+.endfor
+
+#T7FW_VER= 1.27.0.71
+#FIRMWS+= t7fw-${T7FW_VER}.bin:t7fw:${T7FW_VER}
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/dpdk_lpm4/Makefile b/sys/modules/dpdk_lpm4/Makefile
index ff68fac78915..9bc2693aeffb 100644
--- a/sys/modules/dpdk_lpm4/Makefile
+++ b/sys/modules/dpdk_lpm4/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/contrib/dpdk_rte_lpm
KMOD= dpdk_lpm4
diff --git a/sys/modules/dpdk_lpm6/Makefile b/sys/modules/dpdk_lpm6/Makefile
index f2248e5d1c1c..9de2c6650422 100644
--- a/sys/modules/dpdk_lpm6/Makefile
+++ b/sys/modules/dpdk_lpm6/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/contrib/dpdk_rte_lpm
KMOD= dpdk_lpm6
diff --git a/sys/modules/dtb/rockchip/Makefile b/sys/modules/dtb/rockchip/Makefile
index 33c2048cbb15..9c8ca1acc837 100644
--- a/sys/modules/dtb/rockchip/Makefile
+++ b/sys/modules/dtb/rockchip/Makefile
@@ -21,7 +21,8 @@ DTS= \
rockchip/rk3566-quartz64-a.dts \
rockchip/rk3568-nanopi-r5s.dts \
rockchip/rk3566-radxa-zero-3e.dts \
- rockchip/rk3566-radxa-zero-3w.dts
+ rockchip/rk3566-radxa-zero-3w.dts \
+ rockchip/rk3568-bpi-r2-pro.dts
DTSO= rk3328-analog-sound.dtso \
rk3328-i2c0.dtso \
diff --git a/sys/modules/dtrace/dtraceall/dtraceall.c b/sys/modules/dtrace/dtraceall/dtraceall.c
index 851d33a7e518..1978b9ead362 100644
--- a/sys/modules/dtrace/dtraceall/dtraceall.c
+++ b/sys/modules/dtrace/dtraceall/dtraceall.c
@@ -74,11 +74,11 @@ MODULE_DEPEND(dtraceall, dtnfscl, 1, 1, 1);
defined(__i386__) || defined(__powerpc__) || defined(__riscv)
MODULE_DEPEND(dtraceall, fbt, 1, 1, 1);
#endif
-#if defined(__amd64__) || defined(__i386__) || defined(__powerpc__)
-MODULE_DEPEND(dtraceall, fasttrap, 1, 1, 1);
-#if defined(__amd64__)
+#if defined(__amd64__) || defined(__aarch64__) || defined(__riscv)
MODULE_DEPEND(dtraceall, kinst, 1, 1, 1);
#endif
+#if defined(__amd64__) || defined(__i386__) || defined(__powerpc__)
+MODULE_DEPEND(dtraceall, fasttrap, 1, 1, 1);
#endif
MODULE_DEPEND(dtraceall, sdt, 1, 1, 1);
MODULE_DEPEND(dtraceall, systrace, 1, 1, 1);
diff --git a/sys/modules/e6000sw/Makefile b/sys/modules/e6000sw/Makefile
index da08f80b0a29..73cbaea801f0 100644
--- a/sys/modules/e6000sw/Makefile
+++ b/sys/modules/e6000sw/Makefile
@@ -3,6 +3,6 @@
KMOD= e6000sw
SRCS= e6000sw.c
-SRCS+= bus_if.h etherswitch_if.h mdio_if.h miibus_if.h ofw_bus_if.h opt_platform.h
+SRCS+= bus_if.h device_if.h etherswitch_if.h mdio_if.h miibus_if.h ofw_bus_if.h opt_platform.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/etherswitch/Makefile b/sys/modules/etherswitch/Makefile
index 087231545cd4..0b16a19e5117 100644
--- a/sys/modules/etherswitch/Makefile
+++ b/sys/modules/etherswitch/Makefile
@@ -3,7 +3,7 @@
KMOD = etherswitch
SRCS= etherswitch.c
-SRCS+= mdio_if.h miibus_if.h etherswitch_if.h etherswitch_if.c
+SRCS+= bus_if.h device_if.h mdio_if.h miibus_if.h etherswitch_if.h etherswitch_if.c
CFLAGS+= -I${SRCTOP}/sys/dev/etherswitch
.include <bsd.kmod.mk>
diff --git a/sys/modules/evdev/Makefile b/sys/modules/evdev/Makefile
index bd66013885db..20813b73f6dd 100644
--- a/sys/modules/evdev/Makefile
+++ b/sys/modules/evdev/Makefile
@@ -2,7 +2,7 @@
KMOD= evdev
SRCS= cdev.c evdev.c evdev_mt.c evdev_utils.c
-SRCS+= opt_evdev.h bus_if.h device_if.h
+SRCS+= opt_evdev.h opt_kbd.h bus_if.h device_if.h
EXPORT_SYMS= YES
diff --git a/sys/modules/fib_dxr/Makefile b/sys/modules/fib_dxr/Makefile
index 7d1996ba510f..f8a28abe957a 100644
--- a/sys/modules/fib_dxr/Makefile
+++ b/sys/modules/fib_dxr/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/netinet
KMOD= fib_dxr
diff --git a/sys/modules/gpio/gpioaei/Makefile b/sys/modules/gpio/gpioaei/Makefile
index 8f856af48eb7..1f0f1d0e53a6 100644
--- a/sys/modules/gpio/gpioaei/Makefile
+++ b/sys/modules/gpio/gpioaei/Makefile
@@ -10,6 +10,8 @@ SRCS+= \
gpio_if.h \
gpiobus_if.h
+SRCS+= opt_acpi.h opt_platform.h
+
CFLAGS+= -I. -I${SRCTOP}/sys/dev/gpio/
.include <bsd.kmod.mk>
diff --git a/sys/modules/gve/Makefile b/sys/modules/gve/Makefile
index 08b26a994e36..ece275485df7 100644
--- a/sys/modules/gve/Makefile
+++ b/sys/modules/gve/Makefile
@@ -40,5 +40,5 @@ SRCS= gve_main.c \
gve_tx_dqo.c \
gve_sysctl.c
SRCS+= device_if.h bus_if.h pci_if.h
-
+SRCS+= opt_inet6.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/hid/Makefile b/sys/modules/hid/Makefile
index 56c3267d8684..10720570deb7 100644
--- a/sys/modules/hid/Makefile
+++ b/sys/modules/hid/Makefile
@@ -17,6 +17,7 @@ SUBDIR += \
hsctrl \
ietp \
ps4dshock \
+ u2f \
xb360gp
.include <bsd.subdir.mk>
diff --git a/sys/modules/hid/u2f/Makefile b/sys/modules/hid/u2f/Makefile
new file mode 100644
index 000000000000..227e7154035b
--- /dev/null
+++ b/sys/modules/hid/u2f/Makefile
@@ -0,0 +1,8 @@
+.PATH: ${SRCTOP}/sys/dev/hid
+
+KMOD= u2f
+SRCS= u2f.c
+SRCS+= opt_hid.h opt_usb.h
+SRCS+= bus_if.h device_if.h
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/ichwd/Makefile b/sys/modules/ichwd/Makefile
index 3c3bbc37eff5..27b4c38437ff 100644
--- a/sys/modules/ichwd/Makefile
+++ b/sys/modules/ichwd/Makefile
@@ -1,6 +1,6 @@
.PATH: ${SRCTOP}/sys/dev/ichwd
KMOD= ichwd
-SRCS= ichwd.c device_if.h bus_if.h pci_if.h isa_if.h
+SRCS= i6300esbwd.c ichwd.c device_if.h bus_if.h pci_if.h isa_if.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/if_enc/Makefile b/sys/modules/if_enc/Makefile
index 449d869d6a21..bd865a0216a4 100644
--- a/sys/modules/if_enc/Makefile
+++ b/sys/modules/if_enc/Makefile
@@ -1,6 +1,4 @@
SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/net
KMOD= if_enc
diff --git a/sys/modules/if_gif/Makefile b/sys/modules/if_gif/Makefile
index efcd6952a8ac..5e3fda3a51c6 100644
--- a/sys/modules/if_gif/Makefile
+++ b/sys/modules/if_gif/Makefile
@@ -1,6 +1,4 @@
SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/net ${SYSDIR}/netinet ${SYSDIR}/netinet6
KMOD= if_gif
diff --git a/sys/modules/if_gre/Makefile b/sys/modules/if_gre/Makefile
index 9f50708a14d7..58bd03c23785 100644
--- a/sys/modules/if_gre/Makefile
+++ b/sys/modules/if_gre/Makefile
@@ -1,6 +1,5 @@
SYSDIR?=${SRCTOP}/sys
.PATH: ${SYSDIR}/net ${SYSDIR}/netinet ${SYSDIR}/netinet6
-.include "${SYSDIR}/conf/kern.opts.mk"
KMOD= if_gre
SRCS= if_gre.c opt_inet.h opt_inet6.h opt_rss.h
diff --git a/sys/modules/if_infiniband/Makefile b/sys/modules/if_infiniband/Makefile
index 01e3164b1271..7ec343999da1 100644
--- a/sys/modules/if_infiniband/Makefile
+++ b/sys/modules/if_infiniband/Makefile
@@ -3,7 +3,8 @@
KMOD= if_infiniband
SRCS= if_infiniband.c \
opt_inet.h \
- opt_inet6.h
+ opt_inet6.h \
+ opt_kbd.h
EXPORT_SYMS= YES
diff --git a/sys/modules/if_vlan/Makefile b/sys/modules/if_vlan/Makefile
index 3077f4289d5a..0cdab3f7653a 100644
--- a/sys/modules/if_vlan/Makefile
+++ b/sys/modules/if_vlan/Makefile
@@ -2,6 +2,6 @@
KMOD= if_vlan
SRCS= if_vlan.c
-SRCS+= opt_inet.h opt_inet6.h opt_kern_tls.h opt_vlan.h opt_ratelimit.h
+SRCS+= opt_inet.h opt_inet6.h opt_ipsec.h opt_kern_tls.h opt_vlan.h opt_ratelimit.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/irdma/Makefile b/sys/modules/irdma/Makefile
index b2ffb67ca66f..a9ef6e63d3f2 100644
--- a/sys/modules/irdma/Makefile
+++ b/sys/modules/irdma/Makefile
@@ -1,8 +1,8 @@
.include <bsd.own.mk>
-OFED_INC_DIR = ${.CURDIR}/../../ofed/include
-ICE_DIR = ${.CURDIR}/../../dev/ice
-.PATH: ${.CURDIR}/../../dev/irdma
+OFED_INC_DIR = ${SRCTOP}/sys/ofed/include
+ICE_DIR = ${SRCTOP}/sys/dev/ice
+.PATH: ${SRCTOP}/sys/dev/irdma
KMOD= irdma
SRCS= icrdma.c
diff --git a/sys/modules/iser/Makefile b/sys/modules/iser/Makefile
index 615199ec97a3..ff08ae6f346a 100644
--- a/sys/modules/iser/Makefile
+++ b/sys/modules/iser/Makefile
@@ -1,6 +1,4 @@
SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/dev/iser/
KMOD= iser
diff --git a/sys/modules/ix/Makefile b/sys/modules/ix/Makefile
index ad9f36e054e3..aec6eaabffdd 100644
--- a/sys/modules/ix/Makefile
+++ b/sys/modules/ix/Makefile
@@ -7,7 +7,7 @@ SRCS += if_ix.c if_bypass.c if_fdir.c if_sriov.c ix_txrx.c ixgbe_osdep.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
-SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c
+SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c ixgbe_e610.c
CFLAGS+= -I${SRCTOP}/sys/dev/ixgbe
.include <bsd.kmod.mk>
diff --git a/sys/modules/ixv/Makefile b/sys/modules/ixv/Makefile
index 1b4431ac11cd..e7066bb7829b 100644
--- a/sys/modules/ixv/Makefile
+++ b/sys/modules/ixv/Makefile
@@ -7,7 +7,7 @@ SRCS += if_ixv.c if_fdir.c ix_txrx.c ixgbe_osdep.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
-SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c
+SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c ixgbe_e610.c
CFLAGS+= -I${SRCTOP}/sys/dev/ixgbe
.include <bsd.kmod.mk>
diff --git a/sys/modules/ktest/Makefile b/sys/modules/ktest/Makefile
index 151db53417df..d5f15576f38b 100644
--- a/sys/modules/ktest/Makefile
+++ b/sys/modules/ktest/Makefile
@@ -1,8 +1,6 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
SUBDIR= ktest \
ktest_example \
- ktest_netlink_message_writer
+ ktest_netlink_message_writer \
+ ktest_tcphpts
.include <bsd.subdir.mk>
diff --git a/sys/modules/ktest/ktest/Makefile b/sys/modules/ktest/ktest/Makefile
index 3d4f1a8c2cc0..9741662ef709 100644
--- a/sys/modules/ktest/ktest/Makefile
+++ b/sys/modules/ktest/ktest/Makefile
@@ -1,9 +1,5 @@
PACKAGE= tests
-
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
-.PATH: ${SYSDIR}/tests
+.PATH: ${SRCTOP}/sys/tests
KMOD= ktest
SRCS= ktest.c
diff --git a/sys/modules/ktest/ktest_example/Makefile b/sys/modules/ktest/ktest_example/Makefile
index 2b572d867aa5..aacc8f0e4ca5 100644
--- a/sys/modules/ktest/ktest_example/Makefile
+++ b/sys/modules/ktest/ktest_example/Makefile
@@ -1,9 +1,8 @@
PACKAGE= tests
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
+.include "${SRCTOP}/sys/conf/kern.opts.mk"
-.PATH: ${SYSDIR}/tests
+.PATH: ${SRCTOP}/sys/tests
KMOD= ktest_example
SRCS= ktest_example.c
diff --git a/sys/modules/ktest/ktest_netlink_message_writer/Makefile b/sys/modules/ktest/ktest_netlink_message_writer/Makefile
index a91c45755d0d..3f05f9b26785 100644
--- a/sys/modules/ktest/ktest_netlink_message_writer/Makefile
+++ b/sys/modules/ktest/ktest_netlink_message_writer/Makefile
@@ -1,8 +1,6 @@
PACKAGE= tests
SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/netlink
KMOD= ktest_netlink_message_writer
diff --git a/sys/modules/ktest/ktest_tcphpts/Makefile b/sys/modules/ktest/ktest_tcphpts/Makefile
new file mode 100644
index 000000000000..b642c0cb4209
--- /dev/null
+++ b/sys/modules/ktest/ktest_tcphpts/Makefile
@@ -0,0 +1,13 @@
+PACKAGE= tests
+WARNS?= 6
+
+SYSDIR?=${SRCTOP}/sys
+.include "${SYSDIR}/conf/kern.opts.mk"
+
+.PATH: ${SYSDIR}/netinet
+
+KMOD= ktest_tcphpts
+SRCS= tcp_hpts_test.c
+
+.include <bsd.kmod.mk>
+
diff --git a/sys/modules/linux64/Makefile b/sys/modules/linux64/Makefile
index b23891a65a4f..327da11afdaf 100644
--- a/sys/modules/linux64/Makefile
+++ b/sys/modules/linux64/Makefile
@@ -31,6 +31,7 @@ SRCS= linux_dummy_machdep.c \
opt_ktrace.h \
opt_inet6.h \
opt_posix.h \
+ opt_usb.h \
bus_if.h \
device_if.h \
vnode_if.h \
diff --git a/sys/modules/md/Makefile b/sys/modules/md/Makefile
index 2b0586c44717..3f16e04860a1 100644
--- a/sys/modules/md/Makefile
+++ b/sys/modules/md/Makefile
@@ -1,6 +1,6 @@
.PATH: ${SRCTOP}/sys/dev/md
KMOD= geom_md
-SRCS= md.c opt_md.h opt_geom.h opt_rootdevname.h vnode_if.h
+SRCS= bus_if.h device_if.h md.c opt_md.h opt_geom.h opt_rootdevname.h vnode_if.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/miiproxy/Makefile b/sys/modules/miiproxy/Makefile
index 5173358989da..ab92ebe71b43 100644
--- a/sys/modules/miiproxy/Makefile
+++ b/sys/modules/miiproxy/Makefile
@@ -3,7 +3,7 @@
KMOD = miiproxy
SRCS= miiproxy.c
-SRCS+= mdio_if.h miibus_if.h
+SRCS+= bus_if.h device_if.h mdio_if.h miibus_if.h opt_platform.h
CFLAGS+= -I${SRCTOP}/sys/dev/etherswitch
.include <bsd.kmod.mk>
diff --git a/sys/modules/mlx5/Makefile b/sys/modules/mlx5/Makefile
index 506c045ab0ce..65341fdfb8aa 100644
--- a/sys/modules/mlx5/Makefile
+++ b/sys/modules/mlx5/Makefile
@@ -46,7 +46,7 @@ mlx5_ipsec_offload.c \
mlx5_ipsec.c \
mlx5_ipsec_rxtx.c
SRCS+= ${LINUXKPI_GENSRCS}
-SRCS+= opt_inet.h opt_inet6.h opt_rss.h opt_ratelimit.h
+SRCS+= opt_inet.h opt_inet6.h opt_ipsec.h opt_rss.h opt_ratelimit.h
CFLAGS+= -I${SRCTOP}/sys/ofed/include
CFLAGS+= -I${SRCTOP}/sys/ofed/include/uapi
diff --git a/sys/modules/mlx5en/Makefile b/sys/modules/mlx5en/Makefile
index 03bf174e33b0..3697fa65dc83 100644
--- a/sys/modules/mlx5en/Makefile
+++ b/sys/modules/mlx5en/Makefile
@@ -15,7 +15,7 @@ mlx5_en_rl.c \
mlx5_en_txrx.c \
mlx5_en_port_buffer.c
SRCS+= ${LINUXKPI_GENSRCS}
-SRCS+= opt_inet.h opt_inet6.h opt_rss.h opt_ratelimit.h opt_kern_tls.h
+SRCS+= opt_inet.h opt_inet6.h opt_ipsec.h opt_rss.h opt_ratelimit.h opt_kern_tls.h
.if defined(HAVE_PER_CQ_EVENT_PACKET)
CFLAGS+= -DHAVE_PER_CQ_EVENT_PACKET
diff --git a/sys/modules/netgraph/Makefile b/sys/modules/netgraph/Makefile
index 94560d5c51d7..b2d65af16e7f 100644
--- a/sys/modules/netgraph/Makefile
+++ b/sys/modules/netgraph/Makefile
@@ -1,5 +1,3 @@
-# $Whistle: Makefile,v 1.5 1999/01/24 06:48:37 archie Exp $
-
SYSDIR?=${SRCTOP}/sys
.include "${SYSDIR}/conf/kern.opts.mk"
diff --git a/sys/modules/netgraph/checksum/Makefile b/sys/modules/netgraph/checksum/Makefile
index 4e2b1f547a40..bbbc7363d045 100644
--- a/sys/modules/netgraph/checksum/Makefile
+++ b/sys/modules/netgraph/checksum/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
KMOD= ng_checksum
SRCS= ng_checksum.c opt_inet.h opt_inet6.h
diff --git a/sys/modules/netgraph/ksocket/Makefile b/sys/modules/netgraph/ksocket/Makefile
index 395fdbd7b3e3..7099648f6219 100644
--- a/sys/modules/netgraph/ksocket/Makefile
+++ b/sys/modules/netgraph/ksocket/Makefile
@@ -1,4 +1,6 @@
KMOD= ng_ksocket
SRCS= ng_ksocket.c
+SRCS+= opt_inet6.h
+
.include <bsd.kmod.mk>
diff --git a/sys/modules/netmap/Makefile b/sys/modules/netmap/Makefile
index 17b52aec1893..8c114ac51538 100644
--- a/sys/modules/netmap/Makefile
+++ b/sys/modules/netmap/Makefile
@@ -2,9 +2,6 @@
# Compile netmap as a module, useful if you want a netmap bridge
# or loadable drivers.
-.include <bsd.own.mk> # FreeBSD 10 and earlier
-# .include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${.CURDIR}/../../dev/netmap
.PATH.h: ${.CURDIR}/../../net
CFLAGS += -I${.CURDIR}/../../ -D INET -D VIMAGE
diff --git a/sys/modules/nvmf/nvmf/Makefile b/sys/modules/nvmf/nvmf/Makefile
index 7ebe614998bd..21d73d363d2f 100644
--- a/sys/modules/nvmf/nvmf/Makefile
+++ b/sys/modules/nvmf/nvmf/Makefile
@@ -10,4 +10,7 @@ SRCS= nvmf.c \
nvmf_qpair.c \
nvmf_sim.c
+SRCS+= bus_if.h device_if.h
+SRCS+= opt_cam.h
+
.include <bsd.kmod.mk>
diff --git a/sys/modules/opensolaris/Makefile b/sys/modules/opensolaris/Makefile
index 98f52057e45e..7e2d5f9101ad 100644
--- a/sys/modules/opensolaris/Makefile
+++ b/sys/modules/opensolaris/Makefile
@@ -1,4 +1,4 @@
-SYSDIR?= ${SRCTOP}/sys
+SYSDIR?=${SRCTOP}/sys
.PATH: ${SYSDIR}/cddl/compat/opensolaris/kern
.PATH: ${SYSDIR}/contrib/openzfs/module/os/freebsd/spl
diff --git a/sys/modules/ossl/Makefile b/sys/modules/ossl/Makefile
index 7a92742d6b36..c516fe0c158d 100644
--- a/sys/modules/ossl/Makefile
+++ b/sys/modules/ossl/Makefile
@@ -25,10 +25,11 @@ SRCS.arm= \
sha256-armv4.S \
sha512-armv4.S \
ossl_arm.c \
- ossl_aes_gcm.c
+ ossl_aes_gcm_neon.c
SRCS.aarch64= \
chacha-armv8.S \
+ chacha-armv8-sve.S \
poly1305-armv8.S \
sha1-armv8.S \
sha256-armv8.S \
@@ -47,6 +48,7 @@ SRCS.amd64= \
sha256-x86_64.S \
sha512-x86_64.S \
ossl_aes_gcm.c \
+ ossl_aes_gcm_avx512.c \
ossl_x86.c
SRCS.i386= \
@@ -59,6 +61,8 @@ SRCS.i386= \
ossl_x86.c
SRCS.powerpc64le= \
+ aes-gcm-ppc.S \
+ ossl_aes_gcm.c \
ossl_ppccap.c \
aes-ppc.S \
aesp8-ppc.S \
@@ -80,6 +84,8 @@ SRCS.powerpc64le= \
x25519-ppc64.S
SRCS.powerpc64= \
+ aes-gcm-ppc.S \
+ ossl_aes_gcm.c \
ossl_ppccap.c \
aes-ppc.S \
aesp8-ppc.S \
diff --git a/sys/modules/ow/Makefile b/sys/modules/ow/Makefile
index 76fefe3e63be..7aa9d2de8183 100644
--- a/sys/modules/ow/Makefile
+++ b/sys/modules/ow/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
SUBDIR = ow owc ow_temp
.include <bsd.subdir.mk>
diff --git a/sys/modules/qatfw/qat_4xxx/Makefile b/sys/modules/qatfw/qat_4xxx/Makefile
index fb7171bcaf45..f6f19d6cbe32 100644
--- a/sys/modules/qatfw/qat_4xxx/Makefile
+++ b/sys/modules/qatfw/qat_4xxx/Makefile
@@ -4,6 +4,9 @@
KMOD= qat_4xxx_fw
-FIRMWS= qat_4xxx.bin:qat_4xxx_fw:111 qat_4xxx_mmp.bin:qat_4xxx_mmp_fw:111
+FIRMWS= qat_4xxx.bin:qat_4xxx_fw:111 \
+ qat_4xxx_mmp.bin:qat_4xxx_mmp_fw:111 \
+ qat_402xx.bin:qat_402xx_fw:111 \
+ qat_402xx_mmp.bin:qat_402xx_mmp_fw:111
.include <bsd.kmod.mk>
diff --git a/sys/modules/qlnx/Makefile b/sys/modules/qlnx/Makefile
index 2121f9d586a6..291b681c809e 100644
--- a/sys/modules/qlnx/Makefile
+++ b/sys/modules/qlnx/Makefile
@@ -31,9 +31,6 @@
#
#
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
SUBDIR=qlnxe
SUBDIR+=qlnxev
SUBDIR+=qlnxr
diff --git a/sys/modules/qlnx/qlnxev/Makefile b/sys/modules/qlnx/qlnxev/Makefile
index ed62f1f1dd40..766a5a950032 100644
--- a/sys/modules/qlnx/qlnxev/Makefile
+++ b/sys/modules/qlnx/qlnxev/Makefile
@@ -49,6 +49,7 @@ SRCS+=ecore_vf.c
SRCS+=qlnx_ioctl.c
SRCS+=qlnx_os.c
+SRCS+=opt_inet.h
SRCS+= ${LINUXKPI_GENSRCS}
diff --git a/sys/modules/rtw88/Makefile b/sys/modules/rtw88/Makefile
index 9739ede11073..822be639da43 100644
--- a/sys/modules/rtw88/Makefile
+++ b/sys/modules/rtw88/Makefile
@@ -43,6 +43,7 @@ SRCS+= ${LINUXKPI_GENSRCS}
SRCS+= opt_wlan.h opt_inet6.h opt_inet.h
CFLAGS+= -DKBUILD_MODNAME='"rtw88"'
+CFLAGS+= -DLINUXKPI_VERSION=61400
CFLAGS+= -I${DEVRTW88DIR}
CFLAGS+= ${LINUXKPI_INCLUDES}
diff --git a/sys/modules/rtw89/Makefile b/sys/modules/rtw89/Makefile
index 09580f288c62..e66f85c3ac17 100644
--- a/sys/modules/rtw89/Makefile
+++ b/sys/modules/rtw89/Makefile
@@ -39,6 +39,7 @@ SRCS+= ${LINUXKPI_GENSRCS}
SRCS+= opt_wlan.h opt_inet6.h opt_inet.h opt_acpi.h
CFLAGS+= -DKBUILD_MODNAME='"rtw89"'
+CFLAGS+= -DLINUXKPI_VERSION=61400
CFLAGS+= -DLINUXKPI_WANT_LINUX_ACPI
CFLAGS+= -I${DEVRTW89DIR}
diff --git a/sys/modules/rtwn/Makefile b/sys/modules/rtwn/Makefile
index 9afdd2084ecb..f15cbbe8236b 100644
--- a/sys/modules/rtwn/Makefile
+++ b/sys/modules/rtwn/Makefile
@@ -1,7 +1,5 @@
.PATH: ${SRCTOP}/sys/dev/rtwn
-
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
+.include "${SRCTOP}/sys/conf/kern.opts.mk"
KMOD = rtwn
SRCS = if_rtwn.c if_rtwn_tx.c if_rtwn_rx.c if_rtwn_beacon.c \
diff --git a/sys/modules/rtwn_pci/Makefile b/sys/modules/rtwn_pci/Makefile
index ce2144121e88..3fea80d7d256 100644
--- a/sys/modules/rtwn_pci/Makefile
+++ b/sys/modules/rtwn_pci/Makefile
@@ -1,7 +1,5 @@
.PATH: ${SRCTOP}/sys/dev/rtwn/pci
-
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
+.include "${SRCTOP}/sys/conf/kern.opts.mk"
KMOD = if_rtwn_pci
SRCS = rtwn_pci_attach.c rtwn_pci_reg.c rtwn_pci_rx.c rtwn_pci_tx.c \
diff --git a/sys/modules/rtwn_usb/Makefile b/sys/modules/rtwn_usb/Makefile
index 16899b8a8c49..6a73276d088c 100644
--- a/sys/modules/rtwn_usb/Makefile
+++ b/sys/modules/rtwn_usb/Makefile
@@ -1,7 +1,5 @@
.PATH: ${SRCTOP}/sys/dev/rtwn/usb
-
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
+.include "${SRCTOP}/sys/conf/kern.opts.mk"
KMOD = if_rtwn_usb
SRCS = rtwn_usb_attach.c rtwn_usb_ep.c rtwn_usb_reg.c rtwn_usb_rx.c \
diff --git a/sys/modules/sound/driver/Makefile b/sys/modules/sound/driver/Makefile
index ff9499fdf841..02703d4b591a 100644
--- a/sys/modules/sound/driver/Makefile
+++ b/sys/modules/sound/driver/Makefile
@@ -1,5 +1,4 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
+.include "${SRCTOP}/sys/conf/kern.opts.mk"
# Modules that include binary-only blobs of microcode should be selectable by
# MK_SOURCELESS_UCODE option (see below).
diff --git a/sys/modules/sound/driver/hda/Makefile b/sys/modules/sound/driver/hda/Makefile
index 0eec98fc53e1..1e137dc5671c 100644
--- a/sys/modules/sound/driver/hda/Makefile
+++ b/sys/modules/sound/driver/hda/Makefile
@@ -2,7 +2,7 @@
KMOD= snd_hda
SRCS= device_if.h bus_if.h pci_if.h channel_if.h mixer_if.h hdac_if.h
-SRCS+= hdaa.c hdaa.h hdaa_patches.c hdac.c hdac_if.h hdac_if.c
-SRCS+= hdacc.c hdac_private.h hdac_reg.h hda_reg.h hdac.h
+SRCS+= hdaa.c hdaa.h hdaa_patches.c hdacc.c hdac.c hdac_if.c
+SRCS+= hdac_private.h hdac_reg.h hda_reg.h hdac.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/sound/sound/Makefile b/sys/modules/sound/sound/Makefile
index f3978e9bd9cc..169b1a2730ec 100644
--- a/sys/modules/sound/sound/Makefile
+++ b/sys/modules/sound/sound/Makefile
@@ -1,5 +1,4 @@
SYSDIR?=${SRCTOP}/sys
-
.PATH: ${SYSDIR}/dev/sound
.PATH: ${SYSDIR}/dev/sound/pcm
.PATH: ${SYSDIR}/dev/sound/midi
diff --git a/sys/modules/tests/fib_lookup/Makefile b/sys/modules/tests/fib_lookup/Makefile
index 7d6198396911..b78d4309f145 100644
--- a/sys/modules/tests/fib_lookup/Makefile
+++ b/sys/modules/tests/fib_lookup/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
.PATH: ${SYSDIR}/tests/fib_lookup
KMOD= test_lookup
diff --git a/sys/modules/thunderbolt/Makefile b/sys/modules/thunderbolt/Makefile
new file mode 100644
index 000000000000..3b279f4352d4
--- /dev/null
+++ b/sys/modules/thunderbolt/Makefile
@@ -0,0 +1,13 @@
+.PATH: ${SRCTOP}/sys/dev/thunderbolt
+
+KMOD= tb
+SRCS= nhi_pci.c nhi.c tb_pcib.c tb_acpi_pcib.c tb_debug.c nhi_wmi.c
+SRCS+= router.c hcm.c tb_dev.c
+SRCS+= opt_thunderbolt.h
+SRCS+= device_if.h bus_if.h pci_if.h pcib_if.h tb_if.c tb_if.h
+SRCS+= opt_acpi.h opt_acpi_wmi.h acpi_if.h acpi_wmi_if.h
+
+opt_thunderbolt.h:
+ echo "#define THUNDERBOLT_DEBUG 1" > ${.TARGET}
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/uinput/Makefile b/sys/modules/uinput/Makefile
index 66ade2a5bb33..a9e2ec867b91 100644
--- a/sys/modules/uinput/Makefile
+++ b/sys/modules/uinput/Makefile
@@ -2,6 +2,6 @@
KMOD= uinput
SRCS= uinput.c
-SRCS+= opt_evdev.h
+SRCS+= opt_evdev.h opt_kbd.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/usb/Makefile b/sys/modules/usb/Makefile
index 1290b878fa37..d9b1c8635b30 100644
--- a/sys/modules/usb/Makefile
+++ b/sys/modules/usb/Makefile
@@ -46,10 +46,9 @@ SUBDIR = usb
SUBDIR += ${_dwc_otg} ehci ${_musb} ohci uhci xhci ${_uss820dci} \
${_atmegadci} ${_avr32dci} ${_rsu} ${_rsufw} ${_bcm2838_xhci}
SUBDIR += mtw ${_rum} ${_run} ${_runfw} ${_uath} upgt usie ural ${_zyd} ${_urtw}
-SUBDIR += atp cfumass uhid uhid_snes ukbd ums udbp uep wmt wsp ugold uled \
- usbhid
-SUBDIR += ucom u3g uark ubsa ubser uchcom ucycom ufoma uftdi ugensa uipaq ulpt \
- umb umct umcs umodem umoscom uplcom uslcom uvisor uvscom
+SUBDIR += atp cfumass uhid uhid_snes ukbd ums udbp uep wmt wsp ugold uled usbhid
+SUBDIR += ucom u3g uark ubsa ubser uchcom ucycom udbc ufoma uftdi ugensa uipaq
+SUBDIR += ulpt umb umct umcs umodem umoscom uplcom uslcom uvisor uvscom
SUBDIR += i2ctinyusb
SUBDIR += cp2112
SUBDIR += udl
diff --git a/sys/modules/usb/udbc/Makefile b/sys/modules/usb/udbc/Makefile
new file mode 100644
index 000000000000..9996b2e391fb
--- /dev/null
+++ b/sys/modules/usb/udbc/Makefile
@@ -0,0 +1,9 @@
+S= ${SRCTOP}/sys
+
+.PATH: $S/dev/usb/serial
+
+KMOD= udbc
+SRCS= opt_bus.h opt_usb.h device_if.h bus_if.h usb_if.h usbdevs.h \
+ udbc.c
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/usb/usie/Makefile b/sys/modules/usb/usie/Makefile
index 6a5f79248ff8..9edeed082f8d 100644
--- a/sys/modules/usb/usie/Makefile
+++ b/sys/modules/usb/usie/Makefile
@@ -29,6 +29,6 @@
KMOD = usie
SRCS = if_usie.c
SRCS += opt_bus.h opt_usb.h device_if.h bus_if.h \
- usb_if.h usbdevs.h opt_inet.h
+ usb_if.h usbdevs.h opt_inet.h opt_inet6.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/usb/wmt/Makefile b/sys/modules/usb/wmt/Makefile
index 72cf1d814908..8cb5abd7383e 100644
--- a/sys/modules/usb/wmt/Makefile
+++ b/sys/modules/usb/wmt/Makefile
@@ -3,6 +3,6 @@ S= ${SRCTOP}/sys
.PATH: $S/dev/usb/input
KMOD= wmt
-SRCS= opt_bus.h opt_usb.h device_if.h bus_if.h usb_if.h usbdevs.h wmt.c
+SRCS= opt_bus.h opt_kbd.h opt_usb.h device_if.h bus_if.h usb_if.h usbdevs.h wmt.c
.include <bsd.kmod.mk>
diff --git a/sys/modules/vnic/Makefile b/sys/modules/vnic/Makefile
index 7b975bfebe81..53e208328159 100644
--- a/sys/modules/vnic/Makefile
+++ b/sys/modules/vnic/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
CFLAGS+= -DFDT
SUBDIR = mrmlbus thunder_mdio thunder_bgx vnicpf vnicvf
diff --git a/sys/modules/vnic/mrmlbus/Makefile b/sys/modules/vnic/mrmlbus/Makefile
index a3581b7a79a5..a8fe9e5474e1 100644
--- a/sys/modules/vnic/mrmlbus/Makefile
+++ b/sys/modules/vnic/mrmlbus/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
S= ${SRCTOP}/sys
.PATH: $S/dev/vnic
diff --git a/sys/modules/vnic/thunder_bgx/Makefile b/sys/modules/vnic/thunder_bgx/Makefile
index 90df4b25df90..bf46c3194493 100644
--- a/sys/modules/vnic/thunder_bgx/Makefile
+++ b/sys/modules/vnic/thunder_bgx/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
S= ${SRCTOP}/sys
.PATH: $S/dev/vnic
diff --git a/sys/modules/vnic/thunder_mdio/Makefile b/sys/modules/vnic/thunder_mdio/Makefile
index 37032516f3ca..07cc583bfaf8 100644
--- a/sys/modules/vnic/thunder_mdio/Makefile
+++ b/sys/modules/vnic/thunder_mdio/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
S= ${SRCTOP}/sys
.PATH: $S/dev/vnic
diff --git a/sys/modules/vnic/vnicpf/Makefile b/sys/modules/vnic/vnicpf/Makefile
index 37cd29e6fdd8..3cd64d08a788 100644
--- a/sys/modules/vnic/vnicpf/Makefile
+++ b/sys/modules/vnic/vnicpf/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
S= ${SRCTOP}/sys
.PATH: $S/dev/vnic
diff --git a/sys/modules/vnic/vnicvf/Makefile b/sys/modules/vnic/vnicvf/Makefile
index c6ffaaa2c302..da938b7fd073 100644
--- a/sys/modules/vnic/vnicvf/Makefile
+++ b/sys/modules/vnic/vnicvf/Makefile
@@ -1,6 +1,3 @@
-SYSDIR?=${SRCTOP}/sys
-.include "${SYSDIR}/conf/kern.opts.mk"
-
S= ${SRCTOP}/sys
.PATH: $S/dev/vnic
diff --git a/sys/modules/zfs/Makefile b/sys/modules/zfs/Makefile
index 2dd9e2be3f56..ec531ed646a7 100644
--- a/sys/modules/zfs/Makefile
+++ b/sys/modules/zfs/Makefile
@@ -15,6 +15,7 @@ KMOD= zfs
${SRCDIR}/icp/asm-ppc64/sha2 \
${SRCDIR}/icp/asm-ppc64/blake3 \
${SRCDIR}/icp/asm-x86_64/blake3 \
+ ${SRCDIR}/icp/asm-x86_64/modes \
${SRCDIR}/icp/asm-x86_64/sha2 \
${SRCDIR}/os/freebsd/spl \
${SRCDIR}/os/freebsd/zfs \
@@ -40,7 +41,8 @@ CFLAGS+= -D__KERNEL__ -DFREEBSD_NAMECACHE -DBUILDING_ZFS \
.if ${MACHINE_ARCH} == "amd64"
CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
- -DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW
+ -DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW \
+ -DHAVE_VAES -DHAVE_VPCLMULQDQ
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
@@ -82,6 +84,9 @@ SRCS+= blake3_avx2.S \
blake3_avx512.S \
blake3_sse2.S \
blake3_sse41.S
+
+#icp/asm-x86_64/modes
+SRCS+= aesni-gcm-avx2-vaes.S
.endif
#icp/algs/sha2
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index 6561d62b5e26..db1b6f33a8ef 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -258,6 +258,9 @@
/* dops->d_revalidate() takes 4 args */
/* #undef HAVE_D_REVALIDATE_4ARGS */
+/* Define if d_set_d_op() is available */
+/* #undef HAVE_D_SET_D_OP */
+
/* Define to 1 if you have the 'execvpe' function. */
#define HAVE_EXECVPE 1
@@ -483,9 +486,6 @@
/* building against unsupported kernel version */
/* #undef HAVE_LINUX_EXPERIMENTAL */
-/* Define to 1 if you have the <linux/stat.h> header file. */
-/* #undef HAVE_LINUX_STAT_H */
-
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
@@ -582,6 +582,9 @@
/* iops->set_acl() takes 4 args, arg2 is struct dentry * */
/* #undef HAVE_SET_ACL_USERNS_DENTRY_ARG2 */
+/* Define if set_default_d_op() is available */
+/* #undef HAVE_SET_DEFAULT_D_OP */
+
/* shrinker_register exists */
/* #undef HAVE_SHRINKER_REGISTER */
@@ -704,6 +707,11 @@
/* iops->setattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_SETATTR */
+#ifdef __amd64__
+/* Define if host toolchain supports VAES */
+#define HAVE_VAES 1
+#endif
+
/* fops->clone_file_range() is available */
/* #undef HAVE_VFS_CLONE_FILE_RANGE */
@@ -743,6 +751,11 @@
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
+#ifdef __amd64__
+/* Define if host toolchain supports VPCLMULQDQ */
+#define HAVE_VPCLMULQDQ 1
+#endif
+
/* int (*writepage_t)() takes struct folio* */
/* #undef HAVE_WRITEPAGE_T_FOLIO */
@@ -830,7 +843,7 @@
/* #undef ZFS_DEVICE_MINOR */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.3.99-515-FreeBSD_g8302b6e32"
+#define ZFS_META_ALIAS "zfs-2.4.99-95-FreeBSD_g5605a6d79"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
@@ -839,7 +852,7 @@
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
-#define ZFS_META_KVER_MAX "6.15"
+#define ZFS_META_KVER_MAX "6.17"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "4.18"
@@ -860,10 +873,10 @@
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "515-FreeBSD_g8302b6e32"
+#define ZFS_META_RELEASE "95-FreeBSD_g5605a6d79"
/* Define the project version. */
-#define ZFS_META_VERSION "2.3.99"
+#define ZFS_META_VERSION "2.4.99"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index 2f1ffe504350..8a1802f5480b 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1 +1 @@
-#define ZFS_META_GITREV "zfs-2.3.99-515-g8302b6e32"
+#define ZFS_META_GITREV "zfs-2.4.99-95-g5605a6d79"
diff --git a/sys/net/if.c b/sys/net/if.c
index 79c883fd4a0a..b6a798aa0fab 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -74,7 +74,6 @@
#include <vm/uma.h>
#include <net/bpf.h>
-#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_clone.h>
@@ -1102,6 +1101,7 @@ if_detach_internal(struct ifnet *ifp, bool vmove)
struct ifaddr *ifa;
int i;
struct domain *dp;
+ void *if_afdata[AF_MAX];
#ifdef VIMAGE
bool shutdown;
@@ -1225,15 +1225,30 @@ finish_vnet_shutdown:
IF_AFDATA_LOCK(ifp);
i = ifp->if_afdata_initialized;
ifp->if_afdata_initialized = 0;
+ if (i != 0) {
+ /*
+ * Defer the dom_ifdetach call.
+ */
+ _Static_assert(sizeof(if_afdata) == sizeof(ifp->if_afdata),
+ "array size mismatch");
+ memcpy(if_afdata, ifp->if_afdata, sizeof(if_afdata));
+ memset(ifp->if_afdata, 0, sizeof(ifp->if_afdata));
+ }
IF_AFDATA_UNLOCK(ifp);
if (i == 0)
return;
+ /*
+ * XXXZL: This net epoch wait is not necessary if we have done right.
+ * But if we do not, at least we can make a guarantee that threads those
+ * enter net epoch will see NULL address family dependent data,
+ * e.g. if_afdata[AF_INET6]. A clear NULL pointer derefence is much
+ * better than writing to freed memory.
+ */
+ NET_EPOCH_WAIT();
SLIST_FOREACH(dp, &domains, dom_next) {
- if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) {
- (*dp->dom_ifdetach)(ifp,
- ifp->if_afdata[dp->dom_family]);
- ifp->if_afdata[dp->dom_family] = NULL;
- }
+ if (dp->dom_ifdetach != NULL &&
+ if_afdata[dp->dom_family] != NULL)
+ (*dp->dom_ifdetach)(ifp, if_afdata[dp->dom_family]);
}
}
@@ -2589,16 +2604,7 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
* flip. They require special handling because in-kernel
* consumers may indepdently toggle them.
*/
- if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) {
- if (new_flags & IFF_PPROMISC)
- ifp->if_flags |= IFF_PROMISC;
- else if (ifp->if_pcount == 0)
- ifp->if_flags &= ~IFF_PROMISC;
- if (log_promisc_mode_change)
- if_printf(ifp, "permanently promiscuous mode %s\n",
- ((new_flags & IFF_PPROMISC) ?
- "enabled" : "disabled"));
- }
+ if_setppromisc(ifp, new_flags & IFF_PPROMISC);
if ((ifp->if_flags ^ new_flags) & IFF_PALLMULTI) {
if (new_flags & IFF_PALLMULTI)
ifp->if_flags |= IFF_ALLMULTI;
@@ -4456,6 +4462,32 @@ if_getmtu_family(const if_t ifp, int family)
return (ifp->if_mtu);
}
+void
+if_setppromisc(if_t ifp, bool ppromisc)
+{
+ int new_flags;
+
+ if (ppromisc)
+ new_flags = ifp->if_flags | IFF_PPROMISC;
+ else
+ new_flags = ifp->if_flags & ~IFF_PPROMISC;
+ if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) {
+ if (new_flags & IFF_PPROMISC)
+ new_flags |= IFF_PROMISC;
+ /*
+ * Only unset IFF_PROMISC if there are no more consumers of
+ * promiscuity, i.e. the ifp->if_pcount refcount is 0.
+ */
+ else if (ifp->if_pcount == 0)
+ new_flags &= ~IFF_PROMISC;
+ if (log_promisc_mode_change)
+ if_printf(ifp, "permanently promiscuous mode %s\n",
+ ((new_flags & IFF_PPROMISC) ?
+ "enabled" : "disabled"));
+ }
+ ifp->if_flags = new_flags;
+}
+
/*
* Methods for drivers to access interface unicast and multicast
* link level addresses. Driver shall not know 'struct ifaddr' neither
diff --git a/sys/net/if_bridge.c b/sys/net/if_bridge.c
index 1e444be93e9f..d7911a348d87 100644
--- a/sys/net/if_bridge.c
+++ b/sys/net/if_bridge.c
@@ -522,11 +522,11 @@ SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap,
"Log MAC address port flapping");
/* allow IP addresses on bridge members */
-VNET_DEFINE_STATIC(bool, member_ifaddrs) = false;
+VNET_DEFINE_STATIC(bool, member_ifaddrs) = true;
#define V_member_ifaddrs VNET(member_ifaddrs)
SYSCTL_BOOL(_net_link_bridge, OID_AUTO, member_ifaddrs,
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(member_ifaddrs), false,
- "Allow layer 3 addresses on bridge members");
+ "Allow layer 3 addresses on bridge members (deprecated)");
static bool
bridge_member_ifaddrs(void)
@@ -1448,24 +1448,30 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg)
/*
* If member_ifaddrs is disabled, do not allow an interface with
- * assigned IP addresses to be added to a bridge.
+ * assigned IP addresses to be added to a bridge. Skip this check
+ * for gif interfaces, because the IP address assigned to a gif
+ * interface is separate from the bridge's Ethernet segment.
*/
- if (!V_member_ifaddrs) {
+ if (ifs->if_type != IFT_GIF) {
struct ifaddr *ifa;
CK_STAILQ_FOREACH(ifa, &ifs->if_addrhead, ifa_link) {
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- return (EXTERROR(EINVAL,
- "Member interface may not have "
- "an IPv4 address configured"));
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
+ if (ifa->ifa_addr->sa_family != AF_INET &&
+ ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+
+ if (V_member_ifaddrs) {
+ if_printf(sc->sc_ifp,
+ "WARNING: Adding member interface %s which "
+ "has an IP address assigned is deprecated "
+ "and will be unsupported in a future "
+ "release.\n", ifs->if_xname);
+ break;
+ } else {
return (EXTERROR(EINVAL,
"Member interface may not have "
- "an IPv6 address configured"));
-#endif
+ "an IP address assigned"));
+ }
}
}
@@ -1500,8 +1506,7 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg)
bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
bif->bif_savedcaps = ifs->if_capenable;
bif->bif_vlanproto = ETHERTYPE_VLAN;
- if (sc->sc_flags & IFBRF_VLANFILTER)
- bif->bif_pvid = sc->sc_defpvid;
+ bif->bif_pvid = sc->sc_defpvid;
if (sc->sc_flags & IFBRF_DEFQINQ)
bif->bif_flags |= IFBIF_QINQ;
@@ -1970,9 +1975,6 @@ bridge_ioctl_sifpvid(struct bridge_softc *sc, void *arg)
struct ifbreq *req = arg;
struct bridge_iflist *bif;
- if ((sc->sc_flags & IFBRF_VLANFILTER) == 0)
- return (EXTERROR(EINVAL, "VLAN filtering not enabled"));
-
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
if (bif == NULL)
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
@@ -2402,6 +2404,12 @@ bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
return (EINVAL);
}
+ /* Do VLAN filtering. */
+ if (!bridge_vfilter_out(bif, m)) {
+ m_freem(m);
+ return (0);
+ }
+
/* We may be sending a fragment so traverse the mbuf */
for (; m; m = m0) {
m0 = m->m_nextpkt;
@@ -2410,12 +2418,10 @@ bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
mflags = m->m_flags;
/*
- * If VLAN filtering is enabled, and the native VLAN ID of the
- * outgoing interface matches the VLAN ID of the frame, remove
- * the VLAN header.
+ * If the native VLAN ID of the outgoing interface matches the
+ * VLAN ID of the frame, remove the VLAN tag.
*/
- if ((sc->sc_flags & IFBRF_VLANFILTER) &&
- bif->bif_pvid != DOT1Q_VID_NULL &&
+ if (bif->bif_pvid != DOT1Q_VID_NULL &&
VLANTAGOF(m) == bif->bif_pvid) {
m->m_flags &= ~M_VLANTAG;
m->m_pkthdr.ether_vtag = 0;
@@ -2823,10 +2829,6 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
goto drop;
- /* Do VLAN filtering. */
- if (!bridge_vfilter_out(dbif, m))
- goto drop;
-
if ((dbif->bif_flags & IFBIF_STP) &&
dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
goto drop;
@@ -3195,10 +3197,6 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
continue;
- /* Do VLAN filtering. */
- if (!bridge_vfilter_out(dbif, m))
- continue;
-
if ((dbif->bif_flags & IFBIF_STP) &&
dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
continue;
@@ -3296,9 +3294,19 @@ bridge_vfilter_in(const struct bridge_iflist *sbif, struct mbuf *m)
if (vlan > DOT1Q_VID_MAX)
return (false);
- /* If VLAN filtering isn't enabled, pass everything. */
- if ((sbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0)
+ /*
+ * If VLAN filtering isn't enabled, pass everything, but add a tag
+ * if the port has a pvid configured.
+ */
+ if ((sbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0) {
+ if (vlan == DOT1Q_VID_NULL &&
+ sbif->bif_pvid != DOT1Q_VID_NULL) {
+ m->m_pkthdr.ether_vtag = sbif->bif_pvid;
+ m->m_flags |= M_VLANTAG;
+ }
+
return (true);
+ }
/* If Q-in-Q is disabled, check for stacked tags. */
if ((sbif->bif_flags & IFBIF_QINQ) == 0) {
@@ -3354,6 +3362,14 @@ bridge_vfilter_out(const struct bridge_iflist *dbif, const struct mbuf *m)
NET_EPOCH_ASSERT();
+ /*
+ * If the interface is in span mode, then bif_sc will be NULL.
+ * Since the purpose of span interfaces is to receive all frames,
+ * pass everything.
+ */
+ if (dbif->bif_sc == NULL)
+ return (true);
+
/* If VLAN filtering isn't enabled, pass everything. */
if ((dbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0)
return (true);
diff --git a/sys/net/if_bridgevar.h b/sys/net/if_bridgevar.h
index b0f579f688ac..5ed8c19f3128 100644
--- a/sys/net/if_bridgevar.h
+++ b/sys/net/if_bridgevar.h
@@ -159,7 +159,7 @@ struct ifbreq {
uint32_t ifbr_addrexceeded; /* member if addr violations */
ether_vlanid_t ifbr_pvid; /* member if PVID */
uint16_t ifbr_vlanproto; /* member if VLAN protocol */
- uint8_t pad[32];
+ uint8_t pad[28];
};
/* BRDGGIFFLAGS, BRDGSIFFLAGS */
diff --git a/sys/net/if_clone.h b/sys/net/if_clone.h
index 5a74ffa1cc2f..d780e49af25f 100644
--- a/sys/net/if_clone.h
+++ b/sys/net/if_clone.h
@@ -153,7 +153,7 @@ int if_clone_destroy(const char *);
int if_clone_list(struct if_clonereq *);
void if_clone_restoregroup(struct ifnet *);
-/* The below interfaces are used only by epair(4). */
+/* The below interfaces are used only by epair(4) and tun(4)/tap(4). */
void if_clone_addif(struct if_clone *, struct ifnet *);
int if_clone_destroyif(struct if_clone *, struct ifnet *);
diff --git a/sys/net/if_epair.c b/sys/net/if_epair.c
index a213a84e17db..fbffa8f359a0 100644
--- a/sys/net/if_epair.c
+++ b/sys/net/if_epair.c
@@ -67,9 +67,9 @@
#include <net/if_var.h>
#include <net/if_clone.h>
#include <net/if_media.h>
-#include <net/if_var.h>
#include <net/if_private.h>
#include <net/if_types.h>
+#include <net/if_vlan_var.h>
#include <net/netisr.h>
#ifdef RSS
#include <net/rss_config.h>
@@ -435,6 +435,21 @@ epair_media_status(struct ifnet *ifp __unused, struct ifmediareq *imr)
imr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
}
+/*
+ * Update ifp->if_hwassist according to the current value of ifp->if_capenable.
+ */
+static void
+epair_caps_changed(struct ifnet *ifp)
+{
+ uint64_t hwassist = 0;
+
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ hwassist |= CSUM_IP_TCP | CSUM_IP_UDP;
+ if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ hwassist |= CSUM_IP6_TCP | CSUM_IP6_UDP;
+ ifp->if_hwassist = hwassist;
+}
+
static int
epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
@@ -462,6 +477,44 @@ epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
error = 0;
break;
+ case SIOCGIFCAP:
+ ifr->ifr_reqcap = ifp->if_capabilities;
+ ifr->ifr_curcap = ifp->if_capenable;
+ error = 0;
+ break;
+ case SIOCSIFCAP:
+ /*
+ * Enable/disable capabilities as requested, besides
+ * IFCAP_RXCSUM(_IPV6), which always remain enabled.
+ * Incoming packets may have the mbuf flag CSUM_DATA_VALID set.
+ * Without IFCAP_RXCSUM(_IPV6), this flag would have to be
+ * removed, which does not seem helpful.
+ */
+ ifp->if_capenable = ifr->ifr_reqcap | IFCAP_RXCSUM |
+ IFCAP_RXCSUM_IPV6;
+ epair_caps_changed(ifp);
+ /*
+ * If IFCAP_TXCSUM(_IPV6) has been changed, change it on the
+ * other epair interface as well.
+ * A bridge disables IFCAP_TXCSUM(_IPV6) when adding one epair
+ * interface if another interface in the bridge has it disabled.
+ * In that case this capability needs to be disabled on the
+ * other epair interface to avoid sending packets in the bridge
+ * that rely on this capability.
+ */
+ sc = ifp->if_softc;
+ if ((ifp->if_capenable ^ sc->oifp->if_capenable) &
+ (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) {
+ sc->oifp->if_capenable &=
+ ~(IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
+ sc->oifp->if_capenable |= ifp->if_capenable &
+ (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
+ epair_caps_changed(sc->oifp);
+ }
+ VLAN_CAPABILITIES(ifp);
+ error = 0;
+ break;
+
default:
/* Let the common ethernet handler process this. */
error = ether_ioctl(ifp, cmd, data);
@@ -573,8 +626,11 @@ epair_setup_ifp(struct epair_softc *sc, char *name, int unit)
ifp->if_dname = epairname;
ifp->if_dunit = unit;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_capabilities = IFCAP_VLAN_MTU;
- ifp->if_capenable = IFCAP_VLAN_MTU;
+ ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_TXCSUM |
+ IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
+ ifp->if_capenable = IFCAP_VLAN_MTU | IFCAP_TXCSUM |
+ IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
+ epair_caps_changed(ifp);
ifp->if_transmit = epair_transmit;
ifp->if_qflush = epair_qflush;
ifp->if_start = epair_start;
diff --git a/sys/net/if_ethersubr.c b/sys/net/if_ethersubr.c
index 3ae0c01c0efc..9c157bf3d3c2 100644
--- a/sys/net/if_ethersubr.c
+++ b/sys/net/if_ethersubr.c
@@ -695,7 +695,7 @@ ether_input_internal(struct ifnet *ifp, struct mbuf *m)
* seen by upper protocol layers.
*/
if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
- bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
+ memcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
m->m_flags |= M_PROMISC;
}
diff --git a/sys/net/if_ovpn.c b/sys/net/if_ovpn.c
index fe015632f33e..1c18baac3417 100644
--- a/sys/net/if_ovpn.c
+++ b/sys/net/if_ovpn.c
@@ -904,9 +904,11 @@ ovpn_create_kkey_dir(struct ovpn_kkey_dir **kdirp,
kdir->cipher = cipher;
kdir->keylen = keylen;
kdir->tx_seq = 1;
- memcpy(kdir->key, key, keylen);
+ if (keylen != 0)
+ memcpy(kdir->key, key, keylen);
kdir->noncelen = ivlen;
- memcpy(kdir->nonce, iv, ivlen);
+ if (ivlen != 0)
+ memcpy(kdir->nonce, iv, ivlen);
if (kdir->cipher != OVPN_CIPHER_ALG_NONE) {
/* Crypto init */
diff --git a/sys/net/if_pfsync.h b/sys/net/if_pfsync.h
index 1efc220aa8e1..7b3177e1137d 100644
--- a/sys/net/if_pfsync.h
+++ b/sys/net/if_pfsync.h
@@ -62,9 +62,10 @@ enum pfsync_msg_versions {
PFSYNC_MSG_VERSION_UNSPECIFIED = 0,
PFSYNC_MSG_VERSION_1301 = 1301,
PFSYNC_MSG_VERSION_1400 = 1400,
+ PFSYNC_MSG_VERSION_1500 = 1500,
};
-#define PFSYNC_MSG_VERSION_DEFAULT PFSYNC_MSG_VERSION_1400
+#define PFSYNC_MSG_VERSION_DEFAULT PFSYNC_MSG_VERSION_1500
#define PFSYNC_ACT_CLR 0 /* clear all states */
#define PFSYNC_ACT_INS_1301 1 /* insert state */
@@ -81,7 +82,9 @@ enum pfsync_msg_versions {
#define PFSYNC_ACT_EOF 12 /* end of frame */
#define PFSYNC_ACT_INS_1400 13 /* insert state */
#define PFSYNC_ACT_UPD_1400 14 /* update state */
-#define PFSYNC_ACT_MAX 15
+#define PFSYNC_ACT_INS_1500 15 /* insert state */
+#define PFSYNC_ACT_UPD_1500 16 /* update state */
+#define PFSYNC_ACT_MAX 17
/*
* A pfsync frame is built from a header followed by several sections which
@@ -160,8 +163,8 @@ struct pfsync_ins_ack {
struct pfsync_upd_c {
u_int64_t id;
- struct pfsync_state_peer src;
- struct pfsync_state_peer dst;
+ struct pf_state_peer_export src;
+ struct pf_state_peer_export dst;
u_int32_t creatorid;
u_int32_t expire;
u_int8_t timeout;
diff --git a/sys/net/if_tap.h b/sys/net/if_tap.h
index d84cd2eba6f3..8297b8d9e3d2 100644
--- a/sys/net/if_tap.h
+++ b/sys/net/if_tap.h
@@ -57,6 +57,8 @@
#define TAPGIFNAME TUNGIFNAME
#define TAPSVNETHDR _IOW('t', 91, int)
#define TAPGVNETHDR _IOR('t', 94, int)
+#define TAPSTRANSIENT TUNSTRANSIENT
+#define TAPGTRANSIENT TUNGTRANSIENT
/* VMware ioctl's */
#define VMIO_SIOCSIFFLAGS _IOWINT('V', 0)
diff --git a/sys/net/if_tun.h b/sys/net/if_tun.h
index a8fb61db45a2..ccdc25944823 100644
--- a/sys/net/if_tun.h
+++ b/sys/net/if_tun.h
@@ -43,5 +43,7 @@ struct tuninfo {
#define TUNSIFPID _IO('t', 95)
#define TUNSIFHEAD _IOW('t', 96, int)
#define TUNGIFHEAD _IOR('t', 97, int)
+#define TUNSTRANSIENT _IOW('t', 98, int)
+#define TUNGTRANSIENT _IOR('t', 99, int)
#endif /* !_NET_IF_TUN_H_ */
diff --git a/sys/net/if_tuntap.c b/sys/net/if_tuntap.c
index 5e6f65c04b2f..c8dbb6aa8893 100644
--- a/sys/net/if_tuntap.c
+++ b/sys/net/if_tuntap.c
@@ -132,6 +132,7 @@ struct tuntap_softc {
#define TUN_DYING 0x0200
#define TUN_L2 0x0400
#define TUN_VMNET 0x0800
+#define TUN_TRANSIENT 0x1000
#define TUN_DRIVER_IDENT_MASK (TUN_L2 | TUN_VMNET)
#define TUN_READY (TUN_OPEN | TUN_INITED)
@@ -443,6 +444,18 @@ tuntap_name2info(const char *name, int *outunit, int *outflags)
return (0);
}
+static struct if_clone *
+tuntap_cloner_from_flags(int tun_flags)
+{
+
+ for (u_int i = 0; i < NDRV; i++)
+ if ((tun_flags & TUN_DRIVER_IDENT_MASK) ==
+ tuntap_drivers[i].ident_flags)
+ return (V_tuntap_driver_cloners[i]);
+
+ return (NULL);
+}
+
/*
* Get driver information from a set of flags specified. Masks the identifying
* part of the flags and compares it against all of the available
@@ -615,19 +628,39 @@ out:
CURVNET_RESTORE();
}
-static void
-tun_destroy(struct tuntap_softc *tp)
+static int
+tun_destroy(struct tuntap_softc *tp, bool may_intr)
{
+ int error;
TUN_LOCK(tp);
+
+ /*
+ * Transient tunnels may have set TUN_DYING if we're being destroyed as
+ * a result of the last close, which we'll allow.
+ */
+ MPASS((tp->tun_flags & (TUN_DYING | TUN_TRANSIENT)) != TUN_DYING);
tp->tun_flags |= TUN_DYING;
- if (tp->tun_busy != 0)
- cv_wait_unlock(&tp->tun_cv, &tp->tun_mtx);
- else
- TUN_UNLOCK(tp);
+ error = 0;
+ while (tp->tun_busy != 0) {
+ if (may_intr)
+ error = cv_wait_sig(&tp->tun_cv, &tp->tun_mtx);
+ else
+ cv_wait(&tp->tun_cv, &tp->tun_mtx);
+ if (error != 0) {
+ tp->tun_flags &= ~TUN_DYING;
+ TUN_UNLOCK(tp);
+ return (error);
+ }
+ }
+ TUN_UNLOCK(tp);
CURVNET_SET(TUN2IFP(tp)->if_vnet);
+ mtx_lock(&tunmtx);
+ TAILQ_REMOVE(&tunhead, tp, tun_list);
+ mtx_unlock(&tunmtx);
+
/* destroy_dev will take care of any alias. */
destroy_dev(tp->tun_dev);
seldrain(&tp->tun_rsel);
@@ -648,6 +681,8 @@ tun_destroy(struct tuntap_softc *tp)
cv_destroy(&tp->tun_cv);
free(tp, M_TUN);
CURVNET_RESTORE();
+
+ return (0);
}
static int
@@ -655,12 +690,7 @@ tun_clone_destroy(struct if_clone *ifc __unused, struct ifnet *ifp, uint32_t fla
{
struct tuntap_softc *tp = ifp->if_softc;
- mtx_lock(&tunmtx);
- TAILQ_REMOVE(&tunhead, tp, tun_list);
- mtx_unlock(&tunmtx);
- tun_destroy(tp);
-
- return (0);
+ return (tun_destroy(tp, true));
}
static void
@@ -702,9 +732,9 @@ tun_uninit(const void *unused __unused)
mtx_lock(&tunmtx);
while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
- TAILQ_REMOVE(&tunhead, tp, tun_list);
mtx_unlock(&tunmtx);
- tun_destroy(tp);
+ /* tun_destroy() will remove it from the tailq. */
+ tun_destroy(tp, false);
mtx_lock(&tunmtx);
}
mtx_unlock(&tunmtx);
@@ -1217,6 +1247,23 @@ out:
tun_vnethdr_set(ifp, 0);
tun_unbusy_locked(tp);
+ if ((tp->tun_flags & TUN_TRANSIENT) != 0) {
+ struct if_clone *cloner;
+ int error __diagused;
+
+ /* Mark it busy so that nothing can re-open it. */
+ tp->tun_flags |= TUN_DYING;
+ TUN_UNLOCK(tp);
+
+ CURVNET_SET_QUIET(ifp->if_home_vnet);
+ cloner = tuntap_cloner_from_flags(tp->tun_flags);
+ CURVNET_RESTORE();
+
+ error = if_clone_destroyif(cloner, ifp);
+ MPASS(error == 0 || error == EINTR || error == ERESTART);
+ return;
+ }
+
TUN_UNLOCK(tp);
}
@@ -1668,6 +1715,19 @@ tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
case TUNGDEBUG:
*(int *)data = tundebug;
break;
+ case TUNSTRANSIENT:
+ TUN_LOCK(tp);
+ if (*(int *)data)
+ tp->tun_flags |= TUN_TRANSIENT;
+ else
+ tp->tun_flags &= ~TUN_TRANSIENT;
+ TUN_UNLOCK(tp);
+ break;
+ case TUNGTRANSIENT:
+ TUN_LOCK(tp);
+ *(int *)data = (tp->tun_flags & TUN_TRANSIENT) != 0;
+ TUN_UNLOCK(tp);
+ break;
case FIONBIO:
break;
case FIOASYNC:
diff --git a/sys/net/if_var.h b/sys/net/if_var.h
index 08435e7bd5f6..f2df612b19c1 100644
--- a/sys/net/if_var.h
+++ b/sys/net/if_var.h
@@ -622,6 +622,7 @@ int if_setmtu(if_t ifp, int mtu);
int if_getmtu(const if_t ifp);
int if_getmtu_family(const if_t ifp, int family);
void if_notifymtu(if_t ifp);
+void if_setppromisc(const if_t ifp, bool ppromisc);
int if_setflagbits(if_t ifp, int set, int clear);
int if_setflags(if_t ifp, int flags);
int if_getflags(const if_t ifp);
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index 2eca81d54f99..d2625da19cd2 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -142,6 +142,9 @@ struct iflib_ctx;
static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
static void iflib_timer(void *arg);
static void iflib_tqg_detach(if_ctx_t ctx);
+#ifndef ALTQ
+static int iflib_simple_transmit(if_t ifp, struct mbuf *m);
+#endif
typedef struct iflib_filter_info {
driver_filter_t *ifi_filter;
@@ -198,6 +201,9 @@ struct iflib_ctx {
uint8_t ifc_sysctl_use_logical_cores;
uint16_t ifc_sysctl_extra_msix_vectors;
bool ifc_cpus_are_physical_cores;
+ bool ifc_sysctl_simple_tx;
+ uint16_t ifc_sysctl_tx_reclaim_thresh;
+ uint16_t ifc_sysctl_tx_reclaim_ticks;
qidx_t ifc_sysctl_ntxds[8];
qidx_t ifc_sysctl_nrxds[8];
@@ -341,7 +347,9 @@ struct iflib_txq {
uint16_t ift_npending;
uint16_t ift_db_pending;
uint16_t ift_rs_pending;
- /* implicit pad */
+ uint32_t ift_last_reclaim;
+ uint16_t ift_reclaim_thresh;
+ uint16_t ift_reclaim_ticks;
uint8_t ift_txd_size[8];
uint64_t ift_processed;
uint64_t ift_cleaned;
@@ -710,7 +718,7 @@ static uint32_t iflib_txq_can_drain(struct ifmp_ring *);
static void iflib_altq_if_start(if_t ifp);
static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m);
#endif
-static int iflib_register(if_ctx_t);
+static void iflib_register(if_ctx_t);
static void iflib_deregister(if_ctx_t);
static void iflib_unregister_vlan_handlers(if_ctx_t ctx);
static uint16_t iflib_get_mbuf_size_for(unsigned int size);
@@ -725,6 +733,7 @@ static void iflib_free_intr_mem(if_ctx_t ctx);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *iflib_fixup_rx(struct mbuf *m);
#endif
+static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq);
static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
SLIST_HEAD_INITIALIZER(cpu_offsets);
@@ -2624,8 +2633,10 @@ iflib_stop(if_ctx_t ctx)
#endif /* DEV_NETMAP */
CALLOUT_UNLOCK(txq);
- /* clean any enqueued buffers */
- iflib_ifmp_purge(txq);
+ if (!ctx->ifc_sysctl_simple_tx) {
+ /* clean any enqueued buffers */
+ iflib_ifmp_purge(txq);
+ }
/* Free any existing tx buffers. */
for (j = 0; j < txq->ift_size; j++) {
iflib_txsd_free(ctx, txq, j);
@@ -2890,51 +2901,6 @@ iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
return (m);
}
-#if defined(INET6) || defined(INET)
-static void
-iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6)
-{
- CURVNET_SET(if_getvnet(lc->ifp));
-#if defined(INET6)
- *v6 = V_ip6_forwarding;
-#endif
-#if defined(INET)
- *v4 = V_ipforwarding;
-#endif
- CURVNET_RESTORE();
-}
-
-/*
- * Returns true if it's possible this packet could be LROed.
- * if it returns false, it is guaranteed that tcp_lro_rx()
- * would not return zero.
- */
-static bool
-iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding)
-{
- struct ether_header *eh;
-
- eh = mtod(m, struct ether_header *);
- switch (eh->ether_type) {
-#if defined(INET6)
- case htons(ETHERTYPE_IPV6):
- return (!v6_forwarding);
-#endif
-#if defined(INET)
- case htons(ETHERTYPE_IP):
- return (!v4_forwarding);
-#endif
- }
-
- return (false);
-}
-#else
-static void
-iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused)
-{
-}
-#endif
-
static void
_task_fn_rx_watchdog(void *context)
{
@@ -2955,19 +2921,19 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
struct if_rxd_info ri;
int err, budget_left, rx_bytes, rx_pkts;
iflib_fl_t fl;
+#if defined(INET6) || defined(INET)
int lro_enabled;
- bool v4_forwarding, v6_forwarding, lro_possible;
+#endif
uint8_t retval = 0;
/*
* XXX early demux data packets so that if_input processing only handles
* acks in interrupt context
*/
- struct mbuf *m, *mh, *mt, *mf;
+ struct mbuf *m, *mh, *mt;
NET_EPOCH_ASSERT();
- lro_possible = v4_forwarding = v6_forwarding = false;
ifp = ctx->ifc_ifp;
mh = mt = NULL;
MPASS(budget > 0);
@@ -2983,6 +2949,10 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
return (retval);
}
+#if defined(INET6) || defined(INET)
+ lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
+#endif
+
/* pfil needs the vnet to be set */
CURVNET_SET_QUIET(if_getvnet(ifp));
for (budget_left = budget; budget_left > 0 && avail > 0;) {
@@ -3027,7 +2997,17 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
if (__predict_false(m == NULL))
continue;
- /* imm_pkt: -- cxgb */
+#ifndef __NO_STRICT_ALIGNMENT
+ if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
+ continue;
+#endif
+#if defined(INET6) || defined(INET)
+ if (lro_enabled) {
+ tcp_lro_queue_mbuf(&rxq->ifr_lc, m);
+ continue;
+ }
+#endif
+
if (mh == NULL)
mh = mt = m;
else {
@@ -3040,49 +3020,8 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
retval |= iflib_fl_refill_all(ctx, fl);
- lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
- if (lro_enabled)
- iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding);
- mt = mf = NULL;
- while (mh != NULL) {
- m = mh;
- mh = mh->m_nextpkt;
- m->m_nextpkt = NULL;
-#ifndef __NO_STRICT_ALIGNMENT
- if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL)
- continue;
-#endif
-#if defined(INET6) || defined(INET)
- if (lro_enabled) {
- if (!lro_possible) {
- lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding);
- if (lro_possible && mf != NULL) {
- if_input(ifp, mf);
- DBG_COUNTER_INC(rx_if_input);
- mt = mf = NULL;
- }
- }
- if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC | CSUM_L4_VALID)) ==
- (CSUM_L4_CALC | CSUM_L4_VALID)) {
- if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0)
- continue;
- }
- }
-#endif
- if (lro_possible) {
- if_input(ifp, m);
- DBG_COUNTER_INC(rx_if_input);
- continue;
- }
-
- if (mf == NULL)
- mf = m;
- if (mt != NULL)
- mt->m_nextpkt = m;
- mt = m;
- }
- if (mf != NULL) {
- if_input(ifp, mf);
+ if (mh != NULL) {
+ if_input(ifp, mh);
DBG_COUNTER_INC(rx_if_input);
}
@@ -3149,8 +3088,6 @@ txq_max_rs_deferred(iflib_txq_t txq)
#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
-/* XXX we should be setting this to something other than zero */
-#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
#define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \
(ctx)->ifc_softc_ctx.isc_tx_nsegments)
@@ -3707,13 +3644,22 @@ defrag:
* cxgb
*/
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
- txq->ift_no_desc_avail++;
- bus_dmamap_unload(buf_tag, map);
- DBG_COUNTER_INC(encap_txq_avail_fail);
- DBG_COUNTER_INC(encap_txd_encap_fail);
- if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
- GROUPTASK_ENQUEUE(&txq->ift_task);
- return (ENOBUFS);
+ (void)iflib_completed_tx_reclaim(txq);
+ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
+ txq->ift_no_desc_avail++;
+ bus_dmamap_unload(buf_tag, map);
+ DBG_COUNTER_INC(encap_txq_avail_fail);
+ DBG_COUNTER_INC(encap_txd_encap_fail);
+ if (ctx->ifc_sysctl_simple_tx) {
+ *m_headp = m_head = iflib_remove_mbuf(txq);
+ m_freem(*m_headp);
+ DBG_COUNTER_INC(tx_frees);
+ *m_headp = NULL;
+ }
+ if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
+ GROUPTASK_ENQUEUE(&txq->ift_task);
+ return (ENOBUFS);
+ }
}
/*
* On Intel cards we can greatly reduce the number of TX interrupts
@@ -3841,14 +3787,21 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
}
static __inline int
-iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
+iflib_completed_tx_reclaim(iflib_txq_t txq)
{
- int reclaim;
+ int reclaim, thresh;
+ uint32_t now;
if_ctx_t ctx = txq->ift_ctx;
+ thresh = txq->ift_reclaim_thresh;
KASSERT(thresh >= 0, ("invalid threshold to reclaim"));
MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
+ now = ticks;
+ if (now <= (txq->ift_last_reclaim + txq->ift_reclaim_ticks) &&
+ txq->ift_in_use < thresh)
+ return (0);
+ txq->ift_last_reclaim = now;
/*
* Need a rate-limiting check so that this isn't called every time
*/
@@ -3929,7 +3882,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
DBG_COUNTER_INC(txq_drain_notready);
return (0);
}
- reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ reclaimed = iflib_completed_tx_reclaim(txq);
rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending);
avail = IDXDIFF(pidx, cidx, r->size);
@@ -4008,7 +3961,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
}
/* deliberate use of bitwise or to avoid gratuitous short-circuit */
- ring = rang ? false : (iflib_min_tx_latency | err);
+ ring = rang ? false : (iflib_min_tx_latency | err | (!!txq->ift_reclaim_thresh));
iflib_txd_db_check(txq, ring);
if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
@@ -4086,6 +4039,12 @@ _task_fn_tx(void *context)
netmap_tx_irq(ifp, txq->ift_id))
goto skip_ifmp;
#endif
+ if (ctx->ifc_sysctl_simple_tx) {
+ mtx_lock(&txq->ift_mtx);
+ (void)iflib_completed_tx_reclaim(txq);
+ mtx_unlock(&txq->ift_mtx);
+ goto skip_ifmp;
+ }
#ifdef ALTQ
if (if_altq_is_enabled(ifp))
iflib_altq_if_start(ifp);
@@ -4099,9 +4058,8 @@ _task_fn_tx(void *context)
*/
if (abdicate)
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
-#ifdef DEV_NETMAP
+
skip_ifmp:
-#endif
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
else
@@ -4357,6 +4315,10 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
m_freem(m);
DBG_COUNTER_INC(tx_frees);
+ if (err == ENOBUFS)
+ if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
+ else
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
return (err);
@@ -5195,15 +5157,19 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
ctx->ifc_dev = dev;
ctx->ifc_softc = sc;
- if ((err = iflib_register(ctx)) != 0) {
- device_printf(dev, "iflib_register failed %d\n", err);
- goto fail_ctx_free;
- }
+ iflib_register(ctx);
iflib_add_device_sysctl_pre(ctx);
scctx = &ctx->ifc_softc_ctx;
ifp = ctx->ifc_ifp;
-
+ if (ctx->ifc_sysctl_simple_tx) {
+#ifndef ALTQ
+ if_settransmitfn(ifp, iflib_simple_transmit);
+ device_printf(dev, "using simple if_transmit\n");
+#else
+ device_printf(dev, "ALTQ prevents using simple if_transmit\n");
+#endif
+ }
iflib_reset_qvalues(ctx);
IFNET_WLOCK();
CTX_LOCK(ctx);
@@ -5415,7 +5381,6 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
DEBUGNET_SET(ctx->ifc_ifp, iflib);
- if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
iflib_add_device_sysctl_post(ctx);
iflib_add_pfil(ctx);
ctx->ifc_flags |= IFC_INIT_DONE;
@@ -5439,7 +5404,6 @@ fail_unlock:
CTX_UNLOCK(ctx);
IFNET_WUNLOCK();
iflib_deregister(ctx);
-fail_ctx_free:
device_set_softc(ctx->ifc_dev, NULL);
if (ctx->ifc_flags & IFC_SC_ALLOCATED)
free(ctx->ifc_softc, M_IFLIB);
@@ -5737,7 +5701,7 @@ _iflib_pre_assert(if_softc_ctx_t scctx)
MPASS(scctx->isc_txrx->ift_rxd_flush);
}
-static int
+static void
iflib_register(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
@@ -5770,6 +5734,7 @@ iflib_register(if_ctx_t ctx)
if_settransmitfn(ifp, iflib_if_transmit);
#endif
if_setqflushfn(ifp, iflib_if_qflush);
+ if_setgetcounterfn(ifp, iflib_if_get_counter);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
ctx->ifc_vlan_attach_event =
EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx,
@@ -5783,7 +5748,6 @@ iflib_register(if_ctx_t ctx)
ifmedia_init(ctx->ifc_mediap, IFM_IMASK,
iflib_media_change, iflib_media_status);
}
- return (0);
}
static void
@@ -5928,6 +5892,7 @@ iflib_queues_alloc(if_ctx_t ctx)
device_printf(dev, "Unable to allocate buf_ring\n");
goto err_tx_desc;
}
+ txq->ift_reclaim_thresh = ctx->ifc_sysctl_tx_reclaim_thresh;
}
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
@@ -6819,6 +6784,74 @@ mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
return (rc);
}
+static int
+iflib_handle_tx_reclaim_thresh(SYSCTL_HANDLER_ARGS)
+{
+ if_ctx_t ctx = (void *)arg1;
+ iflib_txq_t txq;
+ int i, err;
+ int thresh;
+
+ thresh = ctx->ifc_sysctl_tx_reclaim_thresh;
+ err = sysctl_handle_int(oidp, &thresh, arg2, req);
+ if (err != 0) {
+ return err;
+ }
+
+ if (thresh == ctx->ifc_sysctl_tx_reclaim_thresh)
+ return 0;
+
+ if (thresh > ctx->ifc_softc_ctx.isc_ntxd[0] / 2) {
+ device_printf(ctx->ifc_dev, "TX Reclaim thresh must be <= %d\n",
+ ctx->ifc_softc_ctx.isc_ntxd[0] / 2);
+ return (EINVAL);
+ }
+
+ ctx->ifc_sysctl_tx_reclaim_thresh = thresh;
+ if (ctx->ifc_txqs == NULL)
+ return (err);
+
+ txq = &ctx->ifc_txqs[0];
+ for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
+ txq->ift_reclaim_thresh = thresh;
+ }
+ return (err);
+}
+
+static int
+iflib_handle_tx_reclaim_ticks(SYSCTL_HANDLER_ARGS)
+{
+ if_ctx_t ctx = (void *)arg1;
+ iflib_txq_t txq;
+ int i, err;
+ int ticks;
+
+ ticks = ctx->ifc_sysctl_tx_reclaim_ticks;
+ err = sysctl_handle_int(oidp, &ticks, arg2, req);
+ if (err != 0) {
+ return err;
+ }
+
+ if (ticks == ctx->ifc_sysctl_tx_reclaim_ticks)
+ return 0;
+
+ if (ticks > hz) {
+ device_printf(ctx->ifc_dev,
+ "TX Reclaim ticks must be <= hz (%d)\n", hz);
+ return (EINVAL);
+ }
+
+ ctx->ifc_sysctl_tx_reclaim_ticks = ticks;
+ if (ctx->ifc_txqs == NULL)
+ return (err);
+
+ txq = &ctx->ifc_txqs[0];
+ for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
+ txq->ift_reclaim_ticks = ticks;
+ }
+ return (err);
+}
+
#define NAME_BUFLEN 32
static void
iflib_add_device_sysctl_pre(if_ctx_t ctx)
@@ -6838,6 +6871,9 @@ iflib_add_device_sysctl_pre(if_ctx_t ctx)
SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version");
+ SYSCTL_ADD_BOOL(ctx_list, oid_list, OID_AUTO, "simple_tx",
+ CTLFLAG_RDTUN, &ctx->ifc_sysctl_simple_tx, 0,
+ "use simple tx ring");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
"# of txqs to use, 0 => use default #");
@@ -6904,6 +6940,16 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
node = ctx->ifc_sysctl_node;
child = SYSCTL_CHILDREN(node);
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_thresh",
+ CTLTYPE_INT | CTLFLAG_RWTUN, ctx,
+ 0, iflib_handle_tx_reclaim_thresh, "I",
+ "Number of TX descs outstanding before reclaim is called");
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "tx_reclaim_ticks",
+ CTLTYPE_INT | CTLFLAG_RWTUN, ctx,
+ 0, iflib_handle_tx_reclaim_ticks, "I",
+ "Number of ticks before a TX reclaim is forced");
+
if (scctx->isc_ntxqsets > 100)
qfmt = "txq%03d";
else if (scctx->isc_ntxqsets > 10)
@@ -7151,7 +7197,7 @@ iflib_debugnet_poll(if_t ifp, int count)
return (EBUSY);
txq = &ctx->ifc_txqs[0];
- (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ (void)iflib_completed_tx_reclaim(txq);
NET_EPOCH_ENTER(et);
for (i = 0; i < scctx->isc_nrxqsets; i++)
@@ -7160,3 +7206,54 @@ iflib_debugnet_poll(if_t ifp, int count)
return (0);
}
#endif /* DEBUGNET */
+
+#ifndef ALTQ
+static inline iflib_txq_t
+iflib_simple_select_queue(if_ctx_t ctx, struct mbuf *m)
+{
+ int qidx;
+
+ if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
+ qidx = QIDX(ctx, m);
+ else
+ qidx = NTXQSETS(ctx) + FIRST_QSET(ctx) - 1;
+ return (&ctx->ifc_txqs[qidx]);
+}
+
+static int
+iflib_simple_transmit(if_t ifp, struct mbuf *m)
+{
+ if_ctx_t ctx;
+ iflib_txq_t txq;
+ int error;
+ int bytes_sent = 0, pkt_sent = 0, mcast_sent = 0;
+
+
+ ctx = if_getsoftc(ifp);
+ if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING)
+ return (EBUSY);
+ txq = iflib_simple_select_queue(ctx, m);
+ mtx_lock(&txq->ift_mtx);
+ error = iflib_encap(txq, &m);
+ if (error == 0) {
+ pkt_sent++;
+ bytes_sent += m->m_pkthdr.len;
+ mcast_sent += !!(m->m_flags & M_MCAST);
+ (void)iflib_txd_db_check(txq, true);
+ } else {
+ if (error == ENOBUFS)
+ if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
+ else
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ }
+ (void)iflib_completed_tx_reclaim(txq);
+ mtx_unlock(&txq->ift_mtx);
+ if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
+ if (mcast_sent)
+ if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
+
+ return (error);
+}
+#endif
diff --git a/sys/net/iflib.h b/sys/net/iflib.h
index 3817445228d0..e65c936fc4b4 100644
--- a/sys/net/iflib.h
+++ b/sys/net/iflib.h
@@ -272,7 +272,7 @@ struct if_shared_ctx {
int isc_ntxqs; /* # of tx queues per tx qset - usually 1 */
int isc_nrxqs; /* # of rx queues per rx qset - intel 1, chelsio 2, broadcom 3 */
int __spare0__;
- int isc_tx_reclaim_thresh;
+ int __spare1__;
int isc_flags;
};
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index c397f0b67896..8aefe514946e 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -326,6 +326,7 @@ pf_counter_u64_zero(struct pf_counter_u64 *pfcu64)
_Static_assert(sizeof(time_t) == 4 || sizeof(time_t) == 8, "unexpected time_t size");
SYSCTL_DECL(_net_pf);
+MALLOC_DECLARE(M_PF);
MALLOC_DECLARE(M_PFHASH);
MALLOC_DECLARE(M_PF_RULE_ITEM);
@@ -451,6 +452,16 @@ VNET_DECLARE(struct rmlock, pf_rules_lock);
#define PF_RULES_RASSERT() rm_assert(&V_pf_rules_lock, RA_RLOCKED)
#define PF_RULES_WASSERT() rm_assert(&V_pf_rules_lock, RA_WLOCKED)
+VNET_DECLARE(struct rmlock, pf_tags_lock);
+#define V_pf_tags_lock VNET(pf_tags_lock)
+
+#define PF_TAGS_RLOCK_TRACKER struct rm_priotracker _pf_tags_tracker
+#define PF_TAGS_RLOCK() rm_rlock(&V_pf_tags_lock, &_pf_tags_tracker)
+#define PF_TAGS_RUNLOCK() rm_runlock(&V_pf_tags_lock, &_pf_tags_tracker)
+#define PF_TAGS_WLOCK() rm_wlock(&V_pf_tags_lock)
+#define PF_TAGS_WUNLOCK() rm_wunlock(&V_pf_tags_lock)
+#define PF_TAGS_WASSERT() rm_assert(&V_pf_tags_lock, RA_WLOCKED)
+
extern struct mtx_padalign pf_table_stats_lock;
#define PF_TABLE_STATS_LOCK() mtx_lock(&pf_table_stats_lock)
#define PF_TABLE_STATS_UNLOCK() mtx_unlock(&pf_table_stats_lock)
@@ -645,6 +656,7 @@ struct pf_kpool {
int tblidx;
u_int16_t proxy_port[2];
u_int8_t opts;
+ sa_family_t ipv6_nexthop_af;
};
struct pf_rule_actions {
@@ -859,8 +871,8 @@ struct pf_krule {
u_int8_t keep_state;
sa_family_t af;
u_int8_t proto;
- u_int8_t type;
- u_int8_t code;
+ uint16_t type;
+ uint16_t code;
u_int8_t flags;
u_int8_t flagset;
u_int8_t min_ttl;
@@ -889,6 +901,7 @@ struct pf_krule {
LIST_ENTRY(pf_krule) allrulelist;
bool allrulelinked;
#endif
+ time_t exptime;
};
struct pf_krule_item {
@@ -1020,7 +1033,7 @@ struct pf_state_scrub_export {
#define PF_SCRUB_FLAG_VALID 0x01
uint8_t scrub_flag;
uint32_t pfss_ts_mod; /* timestamp modulation */
-};
+} __packed;
struct pf_state_key_export {
struct pf_addr addr[2];
@@ -1037,7 +1050,7 @@ struct pf_state_peer_export {
uint8_t state; /* active state level */
uint8_t wscale; /* window scaling factor */
uint8_t dummy[6];
-};
+} __packed;
_Static_assert(sizeof(struct pf_state_peer_export) == 32, "size incorrect");
struct pf_state_export {
@@ -1153,7 +1166,6 @@ struct pf_test_ctx {
int rewrite;
u_short reason;
struct pf_src_node *sns[PF_SN_MAX];
- struct pf_krule_slist rules;
struct pf_krule *nr;
struct pf_krule *tr;
struct pf_krule **rm;
@@ -1179,26 +1191,6 @@ struct pf_test_ctx {
* Unified state structures for pulling states out of the kernel
* used by pfsync(4) and the pf(4) ioctl.
*/
-struct pfsync_state_scrub {
- u_int16_t pfss_flags;
- u_int8_t pfss_ttl; /* stashed TTL */
-#define PFSYNC_SCRUB_FLAG_VALID 0x01
- u_int8_t scrub_flag;
- u_int32_t pfss_ts_mod; /* timestamp modulation */
-} __packed;
-
-struct pfsync_state_peer {
- struct pfsync_state_scrub scrub; /* state is scrubbed */
- u_int32_t seqlo; /* Max sequence number sent */
- u_int32_t seqhi; /* Max the other end ACKd + win */
- u_int32_t seqdiff; /* Sequence number modulator */
- u_int16_t max_win; /* largest window (pre scaling) */
- u_int16_t mss; /* Maximum segment size option */
- u_int8_t state; /* active state level */
- u_int8_t wscale; /* window scaling factor */
- u_int8_t pad[6];
-} __packed;
-
struct pfsync_state_key {
struct pf_addr addr[2];
u_int16_t port[2];
@@ -1208,8 +1200,8 @@ struct pfsync_state_1301 {
u_int64_t id;
char ifname[IFNAMSIZ];
struct pfsync_state_key key[2];
- struct pfsync_state_peer src;
- struct pfsync_state_peer dst;
+ struct pf_state_peer_export src;
+ struct pf_state_peer_export dst;
struct pf_addr rt_addr;
u_int32_t rule;
u_int32_t anchor;
@@ -1227,16 +1219,16 @@ struct pfsync_state_1301 {
u_int8_t state_flags;
u_int8_t timeout;
u_int8_t sync_flags;
- u_int8_t updates;
+ u_int8_t updates; /* unused */
} __packed;
struct pfsync_state_1400 {
- /* The beginning of the struct is compatible with previous versions */
+ /* The beginning of the struct is compatible with pfsync_state_1301 */
u_int64_t id;
char ifname[IFNAMSIZ];
struct pfsync_state_key key[2];
- struct pfsync_state_peer src;
- struct pfsync_state_peer dst;
+ struct pf_state_peer_export src;
+ struct pf_state_peer_export dst;
struct pf_addr rt_addr;
u_int32_t rule;
u_int32_t anchor;
@@ -1254,7 +1246,7 @@ struct pfsync_state_1400 {
u_int8_t __spare;
u_int8_t timeout;
u_int8_t sync_flags;
- u_int8_t updates;
+ u_int8_t updates; /* unused */
/* The rest is not */
u_int16_t qid;
u_int16_t pqid;
@@ -1267,12 +1259,54 @@ struct pfsync_state_1400 {
u_int8_t set_prio[2];
u_int8_t rt;
char rt_ifname[IFNAMSIZ];
+} __packed;
+struct pfsync_state_1500 {
+ /* The beginning of the struct is compatible with pfsync_state_1301 */
+ u_int64_t id;
+ char ifname[IFNAMSIZ];
+ struct pfsync_state_key key[2];
+ struct pf_state_peer_export src;
+ struct pf_state_peer_export dst;
+ struct pf_addr rt_addr;
+ u_int32_t rule;
+ u_int32_t anchor;
+ u_int32_t nat_rule;
+ u_int32_t creation;
+ u_int32_t expire;
+ u_int32_t packets[2][2];
+ u_int32_t bytes[2][2];
+ u_int32_t creatorid;
+ /* The rest is not, use the opportunity to fix alignment */
+ char tagname[PF_TAG_NAME_SIZE];
+ char rt_ifname[IFNAMSIZ];
+ char orig_ifname[IFNAMSIZ];
+ int32_t rtableid;
+ u_int16_t state_flags;
+ u_int16_t qid;
+ u_int16_t pqid;
+ u_int16_t dnpipe;
+ u_int16_t dnrpipe;
+ u_int16_t max_mss;
+ sa_family_t wire_af;
+ sa_family_t stack_af;
+ sa_family_t rt_af;
+ u_int8_t wire_proto;
+ u_int8_t stack_proto;
+ u_int8_t log;
+ u_int8_t timeout;
+ u_int8_t direction;
+ u_int8_t rt;
+ u_int8_t min_ttl;
+ u_int8_t set_tos;
+ u_int8_t set_prio[2];
+ u_int8_t spare[3]; /* Improve struct alignment */
} __packed;
union pfsync_state_union {
struct pfsync_state_1301 pfs_1301;
struct pfsync_state_1400 pfs_1400;
+ struct pfsync_state_1500 pfs_1500;
} __packed;
#ifdef _KERNEL
@@ -1323,39 +1357,10 @@ extern pflog_packet_t *pflog_packet_ptr;
/* for copies to/from network byte order */
/* ioctl interface also uses network byte order */
-#define pf_state_peer_hton(s,d) do { \
- (d)->seqlo = htonl((s)->seqlo); \
- (d)->seqhi = htonl((s)->seqhi); \
- (d)->seqdiff = htonl((s)->seqdiff); \
- (d)->max_win = htons((s)->max_win); \
- (d)->mss = htons((s)->mss); \
- (d)->state = (s)->state; \
- (d)->wscale = (s)->wscale; \
- if ((s)->scrub) { \
- (d)->scrub.pfss_flags = \
- htons((s)->scrub->pfss_flags & PFSS_TIMESTAMP); \
- (d)->scrub.pfss_ttl = (s)->scrub->pfss_ttl; \
- (d)->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);\
- (d)->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID; \
- } \
-} while (0)
-
-#define pf_state_peer_ntoh(s,d) do { \
- (d)->seqlo = ntohl((s)->seqlo); \
- (d)->seqhi = ntohl((s)->seqhi); \
- (d)->seqdiff = ntohl((s)->seqdiff); \
- (d)->max_win = ntohs((s)->max_win); \
- (d)->mss = ntohs((s)->mss); \
- (d)->state = (s)->state; \
- (d)->wscale = (s)->wscale; \
- if ((s)->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID && \
- (d)->scrub != NULL) { \
- (d)->scrub->pfss_flags = \
- ntohs((s)->scrub.pfss_flags) & PFSS_TIMESTAMP; \
- (d)->scrub->pfss_ttl = (s)->scrub.pfss_ttl; \
- (d)->scrub->pfss_ts_mod = ntohl((s)->scrub.pfss_ts_mod);\
- } \
-} while (0)
+void pf_state_peer_hton(const struct pf_state_peer *,
+ struct pf_state_peer_export *);
+void pf_state_peer_ntoh(const struct pf_state_peer_export *,
+ struct pf_state_peer *);
#define pf_state_counter_hton(s,d) do { \
d[0] = htonl((s>>32)&0xffffffff); \
@@ -1798,6 +1803,7 @@ struct pf_kstatus {
counter_u64_t lcounters[KLCNT_MAX]; /* limit counters */
struct pf_counter_u64 fcounters[FCNT_MAX]; /* state operation counters */
counter_u64_t scounters[SCNT_MAX]; /* src_node operation counters */
+ counter_u64_t ncounters[NCNT_MAX];
uint32_t states;
uint32_t src_nodes;
uint32_t running;
@@ -2438,8 +2444,6 @@ extern u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t,
extern u_int16_t pf_proto_cksum_fixup(struct mbuf *, u_int16_t,
u_int16_t, u_int16_t, u_int8_t);
-VNET_DECLARE(struct ifnet *, sync_ifp);
-#define V_sync_ifp VNET(sync_ifp);
VNET_DECLARE(struct pf_krule, pf_default_rule);
#define V_pf_default_rule VNET(pf_default_rule)
extern void pf_addrcpy(struct pf_addr *, const struct pf_addr *,
@@ -2470,7 +2474,7 @@ int pf_multihome_scan_init(int, int, struct pf_pdesc *);
int pf_multihome_scan_asconf(int, int, struct pf_pdesc *);
u_int32_t pf_new_isn(struct pf_kstate *);
-void *pf_pull_hdr(const struct mbuf *, int, void *, int, u_short *, u_short *,
+void *pf_pull_hdr(const struct mbuf *, int, void *, int, u_short *,
sa_family_t);
void pf_change_a(void *, u_int16_t *, u_int32_t, u_int8_t);
void pf_change_proto_a(struct mbuf *, void *, u_int16_t *, u_int32_t,
@@ -2487,6 +2491,7 @@ int pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t);
void pf_normalize_init(void);
void pf_normalize_cleanup(void);
+uint64_t pf_normalize_get_frag_count(void);
int pf_normalize_tcp(struct pf_pdesc *);
void pf_normalize_tcp_cleanup(struct pf_kstate *);
int pf_normalize_tcp_init(struct pf_pdesc *,
@@ -2509,6 +2514,10 @@ int pf_translate(struct pf_pdesc *, struct pf_addr *, u_int16_t,
struct pf_addr *, u_int16_t, u_int16_t, int);
int pf_translate_af(struct pf_pdesc *);
bool pf_init_threshold(struct pf_kthreshold *, uint32_t, uint32_t);
+uint16_t pf_tagname2tag(const char *);
+#ifdef ALTQ
+uint16_t pf_qname2qid(const char *, bool);
+#endif /* ALTQ */
void pfr_initialize(void);
void pfr_cleanup(void);
@@ -2590,22 +2599,23 @@ struct mbuf *pf_build_tcp(const struct pf_krule *, sa_family_t,
const struct pf_addr *, const struct pf_addr *,
u_int16_t, u_int16_t, u_int32_t, u_int32_t,
u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
- u_int16_t, u_int16_t, u_int, int);
+ u_int16_t, u_int16_t, u_int, int, u_short *);
void pf_send_tcp(const struct pf_krule *, sa_family_t,
const struct pf_addr *, const struct pf_addr *,
u_int16_t, u_int16_t, u_int32_t, u_int32_t,
u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
- u_int16_t, u_int16_t, int);
+ u_int16_t, u_int16_t, int, u_short *);
void pf_syncookies_init(void);
void pf_syncookies_cleanup(void);
int pf_get_syncookies(struct pfioc_nv *);
int pf_set_syncookies(struct pfioc_nv *);
int pf_synflood_check(struct pf_pdesc *);
-void pf_syncookie_send(struct pf_pdesc *);
+void pf_syncookie_send(struct pf_pdesc *, u_short *);
bool pf_syncookie_check(struct pf_pdesc *);
u_int8_t pf_syncookie_validate(struct pf_pdesc *);
-struct mbuf * pf_syncookie_recreate_syn(struct pf_pdesc *);
+struct mbuf * pf_syncookie_recreate_syn(struct pf_pdesc *,
+ u_short *);
VNET_DECLARE(struct pf_kstatus, pf_status);
#define V_pf_status VNET(pf_status)
@@ -2661,6 +2671,7 @@ struct pf_kruleset *pf_find_kruleset(const char *);
struct pf_kruleset *pf_get_leaf_kruleset(char *, char **);
struct pf_kruleset *pf_find_or_create_kruleset(const char *);
void pf_rs_initialize(void);
+void pf_rule_tree_free(struct pf_krule_global *);
struct pf_krule *pf_krule_alloc(void);
@@ -2712,8 +2723,10 @@ int pf_osfp_match(struct pf_osfp_enlist *, pf_osfp_t);
#ifdef _KERNEL
void pf_print_host(struct pf_addr *, u_int16_t, sa_family_t);
-enum pf_test_status pf_step_into_anchor(struct pf_test_ctx *, struct pf_krule *);
-enum pf_test_status pf_match_rule(struct pf_test_ctx *, struct pf_kruleset *);
+enum pf_test_status pf_step_into_anchor(struct pf_test_ctx *, struct pf_krule *,
+ struct pf_krule_slist *match_rules);
+enum pf_test_status pf_match_rule(struct pf_test_ctx *, struct pf_kruleset *,
+ struct pf_krule_slist *);
void pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *,
int *, struct pf_keth_ruleset **,
struct pf_keth_rule **, struct pf_keth_rule **,
@@ -2729,7 +2742,7 @@ u_short pf_map_addr(sa_family_t, struct pf_krule *,
struct pf_addr *, struct pf_kpool *);
u_short pf_map_addr_sn(u_int8_t, struct pf_krule *,
struct pf_addr *, struct pf_addr *,
- sa_family_t *, struct pfi_kkif **nkif,
+ sa_family_t *, struct pfi_kkif **,
struct pf_addr *, struct pf_kpool *,
pf_sn_types_t);
int pf_get_transaddr_af(struct pf_krule *,
diff --git a/sys/net/route.c b/sys/net/route.c
index 7a50bcc43e06..d2c9f3e39c17 100644
--- a/sys/net/route.c
+++ b/sys/net/route.c
@@ -89,7 +89,7 @@ static int rt_ifdelroute(const struct rtentry *rt, const struct nhop_object *,
* SI_ORDER_MIDDLE.
*/
static void
-route_init(void)
+route_init(void *dummy __unused)
{
nhops_init();
diff --git a/sys/net/route/route_tables.c b/sys/net/route/route_tables.c
index 176ca43fa1c5..3b7bb1385d0e 100644
--- a/sys/net/route/route_tables.c
+++ b/sys/net/route/route_tables.c
@@ -186,7 +186,7 @@ rtables_prison_destructor(void *data)
}
static void
-rtables_init(void)
+rtables_init(void *dummy __unused)
{
osd_method_t methods[PR_MAXMETHOD] = {
[PR_METHOD_ATTACH] = rtables_check_proc_fib,
diff --git a/sys/net/rtsock.c b/sys/net/rtsock.c
index f0dcc973ca7c..be858428bb3e 100644
--- a/sys/net/rtsock.c
+++ b/sys/net/rtsock.c
@@ -309,7 +309,7 @@ rtsock_notify_event(uint32_t fibnum, const struct rib_cmd_info *rc)
}
static void
-rtsock_init(void)
+rtsock_init(void *dummy __unused)
{
rtsbridge_orig_p = rtsock_callback_p;
rtsock_callback_p = &rtsbridge;
diff --git a/sys/net80211/ieee80211.c b/sys/net80211/ieee80211.c
index 2b7cf635b9f5..1299f86ebdc7 100644
--- a/sys/net80211/ieee80211.c
+++ b/sys/net80211/ieee80211.c
@@ -2689,13 +2689,18 @@ ieee80211_channel_type_char(const struct ieee80211_channel *c)
return 'f';
}
-/*
- * Determine whether the given key in the given VAP is a global key.
+/**
+ * @brief Determine whether the given key in the given VAP is a global key.
+ *
* (key index 0..3, shared between all stations on a VAP.)
*
* This is either a WEP key or a GROUP key.
*
* Note this will NOT return true if it is a IGTK key.
+ *
+ * @param vap the current VAP
+ * @param key ieee80211_key to use/check
+ * @returns true if it's a global/WEP key, false otherwise
*/
bool
ieee80211_is_key_global(const struct ieee80211vap *vap,
@@ -2705,8 +2710,23 @@ ieee80211_is_key_global(const struct ieee80211vap *vap,
key < &vap->iv_nw_keys[IEEE80211_WEP_NKID]);
}
-/*
- * Determine whether the given key in the given VAP is a unicast key.
+/**
+ * @brief Determine whether the given key in the given VAP is a unicast key.
+ *
+ * This only returns true if it's a unicast key.
+ *
+ * Note: For now net80211 only supports a single unicast key, stored in
+ * an ieee80211_node entry.
+ *
+ * Code should use this to know if it's a unicast key and then call
+ * ieee80211_crypto_get_keyid() to get the 802.11 key ID (0..3 for
+ * unicast/global keys, 4..5 for IGTK keys.) Since the unicast
+ * and global key indexes "overlap", callers will need to check
+ * both the type and id.
+ *
+ * @param vap the current VAP
+ * @param key ieee80211_key to use/check
+ * @returns true if the key is a unicast key, false if it is not
*/
bool
ieee80211_is_key_unicast(const struct ieee80211vap *vap,
diff --git a/sys/net80211/ieee80211_crypto.c b/sys/net80211/ieee80211_crypto.c
index 1e63ca46f28f..566f0b2e0c23 100644
--- a/sys/net80211/ieee80211_crypto.c
+++ b/sys/net80211/ieee80211_crypto.c
@@ -611,11 +611,15 @@ ieee80211_crypto_setkey(struct ieee80211vap *vap, struct ieee80211_key *key)
return dev_key_set(vap, key);
}
-/*
- * Return index if the key is a WEP key (0..3); -1 otherwise.
+/**
+ * @brief Return index if the key is a WEP key (0..3); -1 otherwise.
*
* This is different to "get_keyid" which defaults to returning
* 0 for unicast keys; it assumes that it won't be used for WEP.
+ *
+ * @param vap the current VAP
+ * @param k ieee80211_key to check
+ * @returns 0..3 if it's a global/WEP key, -1 otherwise.
*/
int
ieee80211_crypto_get_key_wepidx(const struct ieee80211vap *vap,
@@ -628,8 +632,18 @@ ieee80211_crypto_get_key_wepidx(const struct ieee80211vap *vap,
return (-1);
}
-/*
- * Note: only supports a single unicast key (0).
+/**
+ * @brief Return the index of a unicast, global or IGTK key.
+ *
+ * Return the index of a key. For unicast keys the index is 0..1.
+ * For global/WEP keys it's 0..3. For IGTK keys its 4..5.
+ *
+ * TODO: support >1 unicast key
+ * TODO: support IGTK keys
+ *
+ * @param vap the current VAP
+ * @param k ieee80211_key to check
+ * @returns 0..3 for a WEP/global key, 0..1 for unicast key, 4..5 for IGTK key
*/
uint8_t
ieee80211_crypto_get_keyid(struct ieee80211vap *vap, struct ieee80211_key *k)
@@ -641,6 +655,19 @@ ieee80211_crypto_get_keyid(struct ieee80211vap *vap, struct ieee80211_key *k)
return (0);
}
+/**
+ * @param Return the key to use for encrypting an mbuf frame to a node
+ *
+ * This routine chooses a suitable key used to encrypt the given frame with.
+ * It doesn't do the encryption; it only chooses the key. If a key is not
+ * available then the routine will return NULL.
+ *
+ * It's up to the caller to enforce whether a key is absolutely required or not.
+ *
+ * @param ni The ieee80211_node to send the frame to
+ * @param m the mbuf to encrypt
+ * @returns the ieee80211_key to encrypt with, or NULL if there's no suitable key
+ */
struct ieee80211_key *
ieee80211_crypto_get_txkey(struct ieee80211_node *ni, struct mbuf *m)
{
@@ -676,8 +703,28 @@ ieee80211_crypto_get_txkey(struct ieee80211_node *ni, struct mbuf *m)
return &ni->ni_ucastkey;
}
-/*
- * Add privacy headers appropriate for the specified key.
+/**
+ * @brief Privacy encapsulate and encrypt the given mbuf.
+ *
+ * This routine handles the mechanics of encryption - expanding the
+ * mbuf to add privacy headers, IV, ICV, MIC, MMIC, and then encrypts
+ * the given mbuf if required.
+ *
+ * This should be called by the driver in its TX path as part of
+ * encapsulation before passing frames to the hardware/firmware
+ * queues.
+ *
+ * Drivers/hardware which does its own entirely offload path
+ * should still call this for completeness - it indicates to the
+ * driver that the frame itself should be encrypted.
+ *
+ * The driver should have set capability bits in the attach /
+ * key allocation path to disable various encapsulation/encryption
+ * features.
+ *
+ * @param ni ieee80211_node for this frame
+ * @param mbuf mbuf to modify
+ * @returns the key used if the frame is to be encrypted, NULL otherwise
*/
struct ieee80211_key *
ieee80211_crypto_encap(struct ieee80211_node *ni, struct mbuf *m)
@@ -693,9 +740,31 @@ ieee80211_crypto_encap(struct ieee80211_node *ni, struct mbuf *m)
return NULL;
}
-/*
- * Validate and strip privacy headers (and trailer) for a
- * received frame that has the WEP/Privacy bit set.
+/**
+ * @brief Decapsulate and validate an encrypted frame.
+ *
+ * This handles an encrypted frame (one with the privacy bit set.)
+ * It also obeys the key / config / receive packet flags for how
+ * the driver says its already been processed.
+ *
+ * Unlike ieee80211_crypto_encap(), this isn't called in the driver.
+ * Instead, drivers passed the potentially decrypted frame - fully,
+ * partial, or not at all - and net80211 will call this as appropriate.
+ *
+ * This handles NICs (like ath(4)) which have a variable size between
+ * the 802.11 header and 802.11 payload due to DMA alignment / encryption
+ * engine concerns.
+ *
+ * If the frame was decrypted and validated successfully then 1 is returned
+ * and the mbuf can be treated as an 802.11 frame. If it is not decrypted
+ * successfully or it was decrypted but failed validation/checks, then
+ * 0 is returned.
+ *
+ * @param ni ieee80211_node for received frame
+ * @param m mbuf frame to receive
+ * @param hdrlen length of the 802.11 header, including trailing null bytes
+ * @param key pointer to ieee80211_key that will be set if appropriate
+ * @returns 0 if the frame wasn't decrypted/validated, 1 if decrypted/validated.
*/
int
ieee80211_crypto_decap(struct ieee80211_node *ni, struct mbuf *m, int hdrlen,
diff --git a/sys/net80211/ieee80211_ddb.c b/sys/net80211/ieee80211_ddb.c
index d96d7988a864..1dd8e38b9896 100644
--- a/sys/net80211/ieee80211_ddb.c
+++ b/sys/net80211/ieee80211_ddb.c
@@ -296,7 +296,7 @@ _db_show_sta(const struct ieee80211_node *ni)
ni->ni_htparam, ni->ni_htctlchan, ni->ni_ht2ndchan);
db_printf("\thtopmode 0x%x htstbc 0x%x chw %d (%s)\n",
ni->ni_htopmode, ni->ni_htstbc,
- ni->ni_chw, ieee80211_ni_chw_to_str(ni->ni_chw));
+ ni->ni_chw, net80211_ni_chw_to_str(ni->ni_chw));
/* XXX ampdu state */
for (i = 0; i < WME_NUM_TID; i++)
diff --git a/sys/net80211/ieee80211_freebsd.h b/sys/net80211/ieee80211_freebsd.h
index 141b13f9f740..954801d95787 100644
--- a/sys/net80211/ieee80211_freebsd.h
+++ b/sys/net80211/ieee80211_freebsd.h
@@ -93,12 +93,22 @@ typedef struct {
} while (0)
#define IEEE80211_TX_LOCK_OBJ(_ic) (&(_ic)->ic_txlock.mtx)
#define IEEE80211_TX_LOCK_DESTROY(_ic) mtx_destroy(IEEE80211_TX_LOCK_OBJ(_ic))
-#define IEEE80211_TX_LOCK(_ic) mtx_lock(IEEE80211_TX_LOCK_OBJ(_ic))
-#define IEEE80211_TX_UNLOCK(_ic) mtx_unlock(IEEE80211_TX_LOCK_OBJ(_ic))
-#define IEEE80211_TX_LOCK_ASSERT(_ic) \
- mtx_assert(IEEE80211_TX_LOCK_OBJ(_ic), MA_OWNED)
-#define IEEE80211_TX_UNLOCK_ASSERT(_ic) \
- mtx_assert(IEEE80211_TX_LOCK_OBJ(_ic), MA_NOTOWNED)
+#define IEEE80211_TX_LOCK(_ic) do { \
+ if (!IEEE80211_CONF_SEQNO_OFFLOAD(_ic)) \
+ mtx_lock(IEEE80211_TX_LOCK_OBJ(_ic)); \
+ } while (0);
+#define IEEE80211_TX_UNLOCK(_ic) do { \
+ if (!IEEE80211_CONF_SEQNO_OFFLOAD(_ic)) \
+ mtx_unlock(IEEE80211_TX_LOCK_OBJ(_ic)); \
+ } while (0);
+#define IEEE80211_TX_LOCK_ASSERT(_ic) do { \
+ if (!IEEE80211_CONF_SEQNO_OFFLOAD(_ic)) \
+ mtx_assert(IEEE80211_TX_LOCK_OBJ(_ic), MA_OWNED); \
+ } while (0)
+#define IEEE80211_TX_UNLOCK_ASSERT(_ic) { \
+ if (!IEEE80211_CONF_SEQNO_OFFLOAD(_ic)) \
+ mtx_assert(IEEE80211_TX_LOCK_OBJ(_ic), MA_NOTOWNED); \
+ } while (0)
/*
* Stageq / ni_tx_superg lock
@@ -331,11 +341,16 @@ struct mbuf *ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen);
#define M_AGE_SUB(m,adj) (m->m_pkthdr.csum_data -= adj)
/*
- * Store the sequence number.
+ * Store / retrieve the sequence number in an mbuf.
+ *
+ * The sequence number being stored/retreived is the 12 bit
+ * base sequence number, not the 16 bit sequence number field.
+ * I.e., it's from 0..4095 inclusive, with no 4 bit padding for
+ * fragment numbers.
*/
#define M_SEQNO_SET(m, seqno) \
- ((m)->m_pkthdr.tso_segsz = (seqno))
-#define M_SEQNO_GET(m) ((m)->m_pkthdr.tso_segsz)
+ ((m)->m_pkthdr.tso_segsz = ((seqno) % IEEE80211_SEQ_RANGE))
+#define M_SEQNO_GET(m) (((m)->m_pkthdr.tso_segsz) % IEEE80211_SEQ_RANGE)
#define MTAG_ABI_NET80211 1132948340 /* net80211 ABI */
diff --git a/sys/net80211/ieee80211_ht.c b/sys/net80211/ieee80211_ht.c
index c28f124648a1..a8a767785fce 100644
--- a/sys/net80211/ieee80211_ht.c
+++ b/sys/net80211/ieee80211_ht.c
@@ -167,7 +167,7 @@ static ieee80211_send_action_func ht_send_action_ba_delba;
static ieee80211_send_action_func ht_send_action_ht_txchwidth;
static void
-ieee80211_ht_init(void)
+ieee80211_ht_init(void *dummy __unused)
{
/*
* Setup HT parameters that depends on the clock frequency.
@@ -1476,7 +1476,7 @@ ieee80211_ht_wds_init(struct ieee80211_node *ni)
ni->ni_htcap |= IEEE80211_HTCAP_SHORTGI20;
if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
ni->ni_htcap |= IEEE80211_HTCAP_CHWIDTH40;
- ni->ni_chw = IEEE80211_STA_RX_BW_40;
+ ni->ni_chw = NET80211_STA_RX_BW_40;
if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan))
ni->ni_ht2ndchan = IEEE80211_HTINFO_2NDCHAN_ABOVE;
else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
@@ -1484,7 +1484,7 @@ ieee80211_ht_wds_init(struct ieee80211_node *ni)
if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40)
ni->ni_htcap |= IEEE80211_HTCAP_SHORTGI40;
} else {
- ni->ni_chw = IEEE80211_STA_RX_BW_20;
+ ni->ni_chw = NET80211_STA_RX_BW_20;
ni->ni_ht2ndchan = IEEE80211_HTINFO_2NDCHAN_NONE;
}
ni->ni_htctlchan = ni->ni_chan->ic_ieee;
@@ -1580,7 +1580,7 @@ ieee80211_ht_node_join(struct ieee80211_node *ni)
if (ni->ni_flags & IEEE80211_NODE_HT) {
vap->iv_ht_sta_assoc++;
- if (ni->ni_chw == IEEE80211_STA_RX_BW_40)
+ if (ni->ni_chw == NET80211_STA_RX_BW_40)
vap->iv_ht40_sta_assoc++;
}
htinfo_update(vap);
@@ -1598,7 +1598,7 @@ ieee80211_ht_node_leave(struct ieee80211_node *ni)
if (ni->ni_flags & IEEE80211_NODE_HT) {
vap->iv_ht_sta_assoc--;
- if (ni->ni_chw == IEEE80211_STA_RX_BW_40)
+ if (ni->ni_chw == NET80211_STA_RX_BW_40)
vap->iv_ht40_sta_assoc--;
}
htinfo_update(vap);
@@ -1827,7 +1827,7 @@ htinfo_update_chw(struct ieee80211_node *ni, int htflags, int vhtflags)
done:
/* update node's (11n) tx channel width */
ni->ni_chw = IEEE80211_IS_CHAN_HT40(ni->ni_chan) ?
- IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+ NET80211_STA_RX_BW_40 : NET80211_STA_RX_BW_20;
return (ret);
}
@@ -1933,7 +1933,7 @@ ieee80211_vht_get_vhtflags(struct ieee80211_node *ni, uint32_t htflags)
{
#define _RETURN_CHAN_BITS(_cb) \
do { \
- IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, \
+ if (0) IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N, ni, \
"%s:%d: selected %b", __func__, __LINE__, \
(_cb), IEEE80211_CHAN_BITS); \
return (_cb); \
@@ -2689,11 +2689,11 @@ ht_recv_action_ht_txchwidth(struct ieee80211_node *ni,
* here.
*/
chw = (frm[2] == IEEE80211_A_HT_TXCHWIDTH_2040) ?
- IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+ NET80211_STA_RX_BW_40 : NET80211_STA_RX_BW_20;
IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
"%s: HT txchwidth, width %d%s (%s)", __func__,
- chw, ni->ni_chw != chw ? "*" : "", ieee80211_ni_chw_to_str(chw));
+ chw, ni->ni_chw != chw ? "*" : "", net80211_ni_chw_to_str(chw));
if (chw != ni->ni_chw) {
/* XXX does this need to change the ht40 station count? */
ni->ni_chw = chw;
@@ -3832,5 +3832,5 @@ ieee80211_ht_check_tx_ht40(const struct ieee80211_node *ni)
return (IEEE80211_IS_CHAN_HT40(bss_chan) &&
IEEE80211_IS_CHAN_HT40(ni->ni_chan) &&
- (ni->ni_chw == IEEE80211_STA_RX_BW_40));
+ (ni->ni_chw == NET80211_STA_RX_BW_40));
}
diff --git a/sys/net80211/ieee80211_hwmp.c b/sys/net80211/ieee80211_hwmp.c
index b69210768c54..084e67da13db 100644
--- a/sys/net80211/ieee80211_hwmp.c
+++ b/sys/net80211/ieee80211_hwmp.c
@@ -212,7 +212,7 @@ SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, inact,
"mesh route inactivity timeout (ms)");
static void
-ieee80211_hwmp_init(void)
+ieee80211_hwmp_init(void *dummy __unused)
{
/* Default values as per amendment */
ieee80211_hwmp_pathtimeout = msecs_to_ticks(5*1000);
diff --git a/sys/net80211/ieee80211_mesh.c b/sys/net80211/ieee80211_mesh.c
index 3f0410a69e3c..7f2e8bdcb963 100644
--- a/sys/net80211/ieee80211_mesh.c
+++ b/sys/net80211/ieee80211_mesh.c
@@ -548,7 +548,7 @@ mesh_gatemode_cb(void *arg)
}
static void
-ieee80211_mesh_init(void)
+ieee80211_mesh_init(void *dummy __unused)
{
memset(mesh_proto_paths, 0, sizeof(mesh_proto_paths));
diff --git a/sys/net80211/ieee80211_node.c b/sys/net80211/ieee80211_node.c
index a201d1b278f0..49ba00299fee 100644
--- a/sys/net80211/ieee80211_node.c
+++ b/sys/net80211/ieee80211_node.c
@@ -2673,7 +2673,7 @@ ieee80211_dump_node(struct ieee80211_node_table *nt __unused,
ni->ni_htctlchan, ni->ni_ht2ndchan);
net80211_printf("\thtopmode %x htstbc %x htchw %d (%s)\n",
ni->ni_htopmode, ni->ni_htstbc,
- ni->ni_chw, ieee80211_ni_chw_to_str(ni->ni_chw));
+ ni->ni_chw, net80211_ni_chw_to_str(ni->ni_chw));
net80211_printf("\tvhtcap %x freq1 %d freq2 %d vhtbasicmcs %x\n",
ni->ni_vhtcap, (int) ni->ni_vht_chan1, (int) ni->ni_vht_chan2,
(int) ni->ni_vht_basicmcs);
@@ -2831,7 +2831,7 @@ ieee80211_node_join(struct ieee80211_node *ni, int resp)
ni->ni_flags & IEEE80211_NODE_QOS ? ", QoS" : "",
/* XXX update for VHT string */
ni->ni_flags & IEEE80211_NODE_HT ?
- (ni->ni_chw == IEEE80211_STA_RX_BW_40 ? ", HT40" : ", HT20") : "",
+ (ni->ni_chw == NET80211_STA_RX_BW_40 ? ", HT40" : ", HT20") : "",
ni->ni_flags & IEEE80211_NODE_AMPDU ? " (+AMPDU)" : "",
ni->ni_flags & IEEE80211_NODE_AMSDU ? " (+AMSDU)" : "",
ni->ni_flags & IEEE80211_NODE_MIMO_RTS ? " (+SMPS-DYN)" :
diff --git a/sys/net80211/ieee80211_node.h b/sys/net80211/ieee80211_node.h
index ef25fa0d7fdd..f1246dd12419 100644
--- a/sys/net80211/ieee80211_node.h
+++ b/sys/net80211/ieee80211_node.h
@@ -109,33 +109,33 @@ enum ieee80211_mesh_mlstate {
"\20\1IDLE\2OPENSNT\2OPENRCV\3CONFIRMRCV\4ESTABLISHED\5HOLDING"
/*
- * This structure is shared with LinuxKPI 802.11 code describing up-to
- * which channel width the station can receive.
+ * This enum was shared with the LinuxKPI enum ieee80211_sta_rx_bandwidth
+ * describing up-to which channel width the station can receive.
* Rather than using hardcoded MHz values for the channel width use an enum with
* flags. This allows us to keep the uint8_t slot for ni_chw in
- * struct ieee80211_node and means we do not have to sync to the value for
- * LinuxKPI.
+ * struct ieee80211_node it means we do not have to sync to the value for
+ * LinuxKPI (just the names).
*
* NB: BW_20 needs to 0 and values need to be sorted! Cannot make it
* bitfield-alike for use with %b.
*/
-enum ieee80211_sta_rx_bw {
- IEEE80211_STA_RX_BW_20 = 0x00,
- IEEE80211_STA_RX_BW_40,
- IEEE80211_STA_RX_BW_80,
- IEEE80211_STA_RX_BW_160,
- IEEE80211_STA_RX_BW_320,
+enum net80211_sta_rx_bw {
+ NET80211_STA_RX_BW_20 = 0x00,
+ NET80211_STA_RX_BW_40,
+ NET80211_STA_RX_BW_80,
+ NET80211_STA_RX_BW_160,
+ NET80211_STA_RX_BW_320,
} __packed;
static inline const char *
-ieee80211_ni_chw_to_str(enum ieee80211_sta_rx_bw bw)
+net80211_ni_chw_to_str(enum net80211_sta_rx_bw bw)
{
switch (bw) {
- case IEEE80211_STA_RX_BW_20: return ("BW_20");
- case IEEE80211_STA_RX_BW_40: return ("BW_40");
- case IEEE80211_STA_RX_BW_80: return ("BW_80");
- case IEEE80211_STA_RX_BW_160: return ("BW_160");
- case IEEE80211_STA_RX_BW_320: return ("BW_320");
+ case NET80211_STA_RX_BW_20: return ("BW_20");
+ case NET80211_STA_RX_BW_40: return ("BW_40");
+ case NET80211_STA_RX_BW_80: return ("BW_80");
+ case NET80211_STA_RX_BW_160: return ("BW_160");
+ case NET80211_STA_RX_BW_320: return ("BW_320");
}
}
@@ -285,7 +285,7 @@ struct ieee80211_node {
uint8_t ni_ht2ndchan; /* HT 2nd channel */
uint8_t ni_htopmode; /* HT operating mode */
uint8_t ni_htstbc; /* HT */
- enum ieee80211_sta_rx_bw ni_chw; /* negotiated channel width */
+ enum net80211_sta_rx_bw ni_chw; /* negotiated channel width */
struct ieee80211_htrateset ni_htrates; /* negotiated ht rate set */
struct ieee80211_tx_ampdu ni_tx_ampdu[WME_NUM_TID];
struct ieee80211_rx_ampdu ni_rx_ampdu[WME_NUM_TID];
diff --git a/sys/net80211/ieee80211_output.c b/sys/net80211/ieee80211_output.c
index afe83ea0805c..116fc76a9ce1 100644
--- a/sys/net80211/ieee80211_output.c
+++ b/sys/net80211/ieee80211_output.c
@@ -974,7 +974,7 @@ ieee80211_send_setup(
/* NB: zero out i_seq field (for s/w encryption etc) */
*(uint16_t *)&wh->i_seq[0] = 0;
- } else
+ } else if (!IEEE80211_CONF_SEQNO_OFFLOAD(ni->ni_ic))
ieee80211_output_seqno_assign(ni, tid, m);
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
@@ -1082,6 +1082,12 @@ ieee80211_send_nulldata(struct ieee80211_node *ni)
uint8_t *frm;
int ret;
+ /* Don't send NULL frames if we've been configured not to do so. */
+ if ((ic->ic_flags_ext & IEEE80211_FEXT_NO_NULLDATA) != 0) {
+ ieee80211_node_decref(ni);
+ return (0);
+ }
+
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH,
ni, "block %s frame in CAC state", "null data");
@@ -1810,7 +1816,8 @@ ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
* and we don't need the TX lock held.
*/
if ((m->m_flags & M_AMPDU_MPDU) == 0) {
- ieee80211_output_seqno_assign(ni, tid, m);
+ if (!IEEE80211_CONF_SEQNO_OFFLOAD(ic))
+ ieee80211_output_seqno_assign(ni, tid, m);
} else {
/*
* NB: don't assign a sequence # to potential
@@ -1828,7 +1835,9 @@ ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
*(uint16_t *)wh->i_seq = 0;
}
} else {
- ieee80211_output_seqno_assign(ni, IEEE80211_NONQOS_TID, m);
+ if (!IEEE80211_CONF_SEQNO_OFFLOAD(ic))
+ ieee80211_output_seqno_assign(ni, IEEE80211_NONQOS_TID,
+ m);
/*
* XXX TODO: we shouldn't allow EAPOL, etc that would
* be forced to be non-QoS traffic to be A-MSDU encapsulated.
@@ -3856,6 +3865,8 @@ ieee80211_beacon_update(struct ieee80211_node *ni, struct mbuf *m, int mcast)
* If the driver identifies it does its own TX seqno management then
* we can skip this (and still not do the TX seqno.)
*/
+
+ /* TODO: IEEE80211_CONF_SEQNO_OFFLOAD() */
ieee80211_output_beacon_seqno_assign(ni, m);
/* XXX faster to recalculate entirely or just changes? */
diff --git a/sys/net80211/ieee80211_phy.c b/sys/net80211/ieee80211_phy.c
index eb96d74a2bd9..b4d9b16907d2 100644
--- a/sys/net80211/ieee80211_phy.c
+++ b/sys/net80211/ieee80211_phy.c
@@ -348,7 +348,7 @@ ieee80211_setup_ratetable(struct ieee80211_rate_table *rt)
/* Setup all rate tables */
static void
-ieee80211_phy_init(void)
+ieee80211_phy_init(void *dummy __unused)
{
static struct ieee80211_rate_table * const ratetables[] = {
&ieee80211_half_table,
@@ -658,26 +658,26 @@ static uint16_t ieee80211_vht_mcs_allowed_list_160[] = {
*
* See 802.11-2020 21.5 (Parameters for VHT-MCSs) for more details.
*
- * @param bw channel bandwidth, via enum ieee80211_sta_rx_bw
+ * @param bw channel bandwidth, via enum net80211_sta_rx_bw
* @param nss number of spatial streams, 1..8
* @returns bitmask of valid MCS rates from 0..9
*/
uint16_t
-ieee80211_phy_vht_get_mcs_mask(enum ieee80211_sta_rx_bw bw, uint8_t nss)
+ieee80211_phy_vht_get_mcs_mask(enum net80211_sta_rx_bw bw, uint8_t nss)
{
if (nss == 0 || nss > 8)
return (0);
switch (bw) {
- case IEEE80211_STA_RX_BW_20:
+ case NET80211_STA_RX_BW_20:
return (ieee80211_vht_mcs_allowed_list_20[nss - 1]);
- case IEEE80211_STA_RX_BW_40:
+ case NET80211_STA_RX_BW_40:
return (ieee80211_vht_mcs_allowed_list_40[nss - 1]);
- case IEEE80211_STA_RX_BW_80:
+ case NET80211_STA_RX_BW_80:
return (ieee80211_vht_mcs_allowed_list_80[nss - 1]);
- case IEEE80211_STA_RX_BW_160:
+ case NET80211_STA_RX_BW_160:
return (ieee80211_vht_mcs_allowed_list_160[nss - 1]);
- case IEEE80211_STA_RX_BW_320:
+ case NET80211_STA_RX_BW_320:
/* invalid for VHT */
return (0);
}
@@ -689,14 +689,14 @@ ieee80211_phy_vht_get_mcs_mask(enum ieee80211_sta_rx_bw bw, uint8_t nss)
*
* See 802.11-2020 21.5 (Parameters for VHT-MCSs) for more details.
*
- * @param bw channel bandwidth, via enum ieee80211_sta_rx_bw
+ * @param bw channel bandwidth, via enum net80211_sta_rx_bw
* @param nss number of spatial streams, 1..8
* @param mcs MCS rate, 0..9
* @retval true if the NSS / MCS / bandwidth combination is valid
* @retval false if the NSS / MCS / bandwidth combination is not valid
*/
bool
-ieee80211_phy_vht_validate_mcs(enum ieee80211_sta_rx_bw bw, uint8_t nss,
+ieee80211_phy_vht_validate_mcs(enum net80211_sta_rx_bw bw, uint8_t nss,
uint8_t mcs)
{
uint16_t mask;
@@ -737,7 +737,7 @@ static struct mcs_entry mcs_entries[] = {
/**
* @brief Calculate the bitrate of the given VHT MCS rate.
*
- * @param bw Channel bandwidth (enum ieee80211_sta_rx_bw)
+ * @param bw Channel bandwidth (enum net80211_sta_rx_bw)
* @param nss Number of spatial streams, 1..8
* @param mcs MCS, 0..9
* @param is_shortgi True if short guard-interval (400nS)
@@ -746,7 +746,7 @@ static struct mcs_entry mcs_entries[] = {
* @returns The bitrate in kbit/sec.
*/
uint32_t
-ieee80211_phy_vht_get_mcs_kbit(enum ieee80211_sta_rx_bw bw,
+ieee80211_phy_vht_get_mcs_kbit(enum net80211_sta_rx_bw bw,
uint8_t nss, uint8_t mcs, bool is_shortgi)
{
uint32_t sym_len, n_carriers;
@@ -773,16 +773,16 @@ ieee80211_phy_vht_get_mcs_kbit(enum ieee80211_sta_rx_bw bw,
* See 802.11-2020 Table 21-5 (Timing-related constraints.)
*/
switch (bw) {
- case IEEE80211_STA_RX_BW_20:
+ case NET80211_STA_RX_BW_20:
n_carriers = 52;
break;
- case IEEE80211_STA_RX_BW_40:
+ case NET80211_STA_RX_BW_40:
n_carriers = 108;
break;
- case IEEE80211_STA_RX_BW_80:
+ case NET80211_STA_RX_BW_80:
n_carriers = 234;
break;
- case IEEE80211_STA_RX_BW_160:
+ case NET80211_STA_RX_BW_160:
n_carriers = 468;
break;
default:
diff --git a/sys/net80211/ieee80211_phy.h b/sys/net80211/ieee80211_phy.h
index 749b082e34e9..391c8bfc5010 100644
--- a/sys/net80211/ieee80211_phy.h
+++ b/sys/net80211/ieee80211_phy.h
@@ -221,13 +221,13 @@ uint32_t ieee80211_compute_duration_ht(uint32_t frameLen,
uint16_t rate, int streams, int isht40,
int isShortGI);
-enum ieee80211_sta_rx_bw;
+enum net80211_sta_rx_bw;
-uint16_t ieee80211_phy_vht_get_mcs_mask(enum ieee80211_sta_rx_bw,
+uint16_t ieee80211_phy_vht_get_mcs_mask(enum net80211_sta_rx_bw,
uint8_t);
-bool ieee80211_phy_vht_validate_mcs(enum ieee80211_sta_rx_bw,
+bool ieee80211_phy_vht_validate_mcs(enum net80211_sta_rx_bw,
uint8_t, uint8_t);
-uint32_t ieee80211_phy_vht_get_mcs_kbit(enum ieee80211_sta_rx_bw,
+uint32_t ieee80211_phy_vht_get_mcs_kbit(enum net80211_sta_rx_bw,
uint8_t, uint8_t, bool);
#endif /* _KERNEL */
diff --git a/sys/net80211/ieee80211_proto.c b/sys/net80211/ieee80211_proto.c
index 0c161d98a55a..4918bf7d025f 100644
--- a/sys/net80211/ieee80211_proto.c
+++ b/sys/net80211/ieee80211_proto.c
@@ -459,7 +459,7 @@ static const struct ieee80211_authenticator auth_internal = {
* Setup internal authenticators once; they are never unregistered.
*/
static void
-ieee80211_auth_setup(void)
+ieee80211_auth_setup(void *dummy __unused)
{
ieee80211_authenticator_register(IEEE80211_AUTH_OPEN, &auth_internal);
ieee80211_authenticator_register(IEEE80211_AUTH_SHARED, &auth_internal);
diff --git a/sys/net80211/ieee80211_sta.c b/sys/net80211/ieee80211_sta.c
index 463a8b16773b..19e5ffe9a367 100644
--- a/sys/net80211/ieee80211_sta.c
+++ b/sys/net80211/ieee80211_sta.c
@@ -1934,7 +1934,7 @@ sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype,
vap->iv_flags&IEEE80211_F_USEPROT ? ", protection" : "",
ni->ni_flags & IEEE80211_NODE_QOS ? ", QoS" : "",
ni->ni_flags & IEEE80211_NODE_HT ?
- (ni->ni_chw == IEEE80211_STA_RX_BW_40 ? ", HT40" : ", HT20") : "",
+ (ni->ni_chw == NET80211_STA_RX_BW_40 ? ", HT40" : ", HT20") : "",
ni->ni_flags & IEEE80211_NODE_AMPDU ? " (+AMPDU)" : "",
ni->ni_flags & IEEE80211_NODE_AMSDU ? " (+AMSDU)" : "",
ni->ni_flags & IEEE80211_NODE_MIMO_RTS ? " (+SMPS-DYN)" :
diff --git a/sys/net80211/ieee80211_var.h b/sys/net80211/ieee80211_var.h
index a0293f814899..7b45261f59b1 100644
--- a/sys/net80211/ieee80211_var.h
+++ b/sys/net80211/ieee80211_var.h
@@ -700,13 +700,14 @@ MALLOC_DECLARE(M_80211_VAP);
#define IEEE80211_FEXT_QUIET_IE 0x00800000 /* STATUS: quiet IE in a beacon has been added */
#define IEEE80211_FEXT_UAPSD 0x01000000 /* CONF: enable U-APSD */
#define IEEE80211_FEXT_AMPDU_OFFLOAD 0x02000000 /* CONF: driver/fw handles AMPDU[-TX] itself */
+#define IEEE80211_FEXT_NO_NULLDATA 0x04000000 /* CONF: don't originate NULL data frames from net80211 */
#define IEEE80211_FEXT_BITS \
"\20\2INACT\3SCANWAIT\4BGSCAN\5WPS\6TSN\7SCANREQ\10RESUME" \
"\0114ADDR\12NONEPR_PR\13SWBMISS\14DFS\15DOTD\16STATEWAIT\17REINIT" \
"\20BPF\21WDSLEGACY\22PROBECHAN\23UNIQMAC\24SCAN_OFFLOAD\25SEQNO_OFFLOAD" \
"\26FRAG_OFFLOAD\27VHT" \
- "\30QUIET_IE\31UAPSD\32AMPDU_OFFLOAD"
+ "\30QUIET_IE\31UAPSD\32AMPDU_OFFLOAD\33NO_NULLDATA"
/* ic_flags_ht/iv_flags_ht */
#define IEEE80211_FHT_NONHT_PR 0x00000001 /* STATUS: non-HT sta present */
@@ -1012,7 +1013,7 @@ ieee80211_get_node_txpower(struct ieee80211_node *ni)
* Debugging facilities compiled in when IEEE80211_DEBUG is defined.
*
* The intent is that any problem in the net80211 layer can be
- * diagnosed by inspecting the statistics (dumped by the wlanstats
+ * diagnosed by inspecting the statistics (dumped by the wlanstat
* program) and/or the msgs generated by net80211. Messages are
* broken into functional classes and can be controlled with the
* wlandebug program. Certain of these msg groups are for facilities
diff --git a/sys/net80211/ieee80211_vht.c b/sys/net80211/ieee80211_vht.c
index de0b691d4d2a..095c4108c768 100644
--- a/sys/net80211/ieee80211_vht.c
+++ b/sys/net80211/ieee80211_vht.c
@@ -102,7 +102,7 @@ vht_send_action_placeholder(struct ieee80211_node *ni,
}
static void
-ieee80211_vht_init(void)
+ieee80211_vht_init(void *dummy __unused)
{
ieee80211_recv_action_register(IEEE80211_ACTION_CAT_VHT,
@@ -974,7 +974,7 @@ ieee80211_vht_check_tx_vht40(const struct ieee80211_node *ni)
return (IEEE80211_IS_CHAN_VHT40(bss_chan) &&
IEEE80211_IS_CHAN_VHT40(ni->ni_chan) &&
- (ni->ni_chw == IEEE80211_STA_RX_BW_40));
+ (ni->ni_chw == NET80211_STA_RX_BW_40));
}
/*
@@ -1003,7 +1003,7 @@ ieee80211_vht_check_tx_vht80(const struct ieee80211_node *ni)
*/
return (IEEE80211_IS_CHAN_VHT80(bss_chan) &&
IEEE80211_IS_CHAN_VHT80(ni->ni_chan) &&
- (ni->ni_chw != IEEE80211_STA_RX_BW_20));
+ (ni->ni_chw != NET80211_STA_RX_BW_20));
}
/*
@@ -1030,7 +1030,7 @@ ieee80211_vht_check_tx_vht160(const struct ieee80211_node *ni)
* If a HT TX width action frame sets it to 20MHz
* then reject doing 160MHz.
*/
- if (ni->ni_chw == IEEE80211_STA_RX_BW_20)
+ if (ni->ni_chw == NET80211_STA_RX_BW_20)
return (false);
if (IEEE80211_IS_CHAN_VHT160(bss_chan) &&
@@ -1062,19 +1062,19 @@ ieee80211_vht_check_tx_vht160(const struct ieee80211_node *ni)
*/
bool
ieee80211_vht_check_tx_bw(const struct ieee80211_node *ni,
- enum ieee80211_sta_rx_bw bw)
+ enum net80211_sta_rx_bw bw)
{
switch (bw) {
- case IEEE80211_STA_RX_BW_20:
+ case NET80211_STA_RX_BW_20:
return (ieee80211_vht_check_tx_vht(ni));
- case IEEE80211_STA_RX_BW_40:
+ case NET80211_STA_RX_BW_40:
return (ieee80211_vht_check_tx_vht40(ni));
- case IEEE80211_STA_RX_BW_80:
+ case NET80211_STA_RX_BW_80:
return (ieee80211_vht_check_tx_vht80(ni));
- case IEEE80211_STA_RX_BW_160:
+ case NET80211_STA_RX_BW_160:
return (ieee80211_vht_check_tx_vht160(ni));
- case IEEE80211_STA_RX_BW_320:
+ case NET80211_STA_RX_BW_320:
return (false);
default:
return (false);
@@ -1096,7 +1096,7 @@ ieee80211_vht_check_tx_bw(const struct ieee80211_node *ni,
*/
bool
ieee80211_vht_node_check_tx_valid_mcs(const struct ieee80211_node *ni,
- enum ieee80211_sta_rx_bw bw, uint8_t nss, uint8_t mcs)
+ enum net80211_sta_rx_bw bw, uint8_t nss, uint8_t mcs)
{
uint8_t mc;
diff --git a/sys/net80211/ieee80211_vht.h b/sys/net80211/ieee80211_vht.h
index a1529df4a85b..b9b19fbc6008 100644
--- a/sys/net80211/ieee80211_vht.h
+++ b/sys/net80211/ieee80211_vht.h
@@ -65,8 +65,8 @@ void ieee80211_vht_get_vhtinfo_ie(struct ieee80211_node *ni,
bool ieee80211_vht_check_tx_vht(const struct ieee80211_node *);
bool ieee80211_vht_check_tx_bw(const struct ieee80211_node *,
- enum ieee80211_sta_rx_bw);
+ enum net80211_sta_rx_bw);
bool ieee80211_vht_node_check_tx_valid_mcs(const struct ieee80211_node *,
- enum ieee80211_sta_rx_bw bw, uint8_t, uint8_t);
+ enum net80211_sta_rx_bw bw, uint8_t, uint8_t);
#endif /* _NET80211_IEEE80211_VHT_H_ */
diff --git a/sys/netgraph/bluetooth/include/ng_hci.h b/sys/netgraph/bluetooth/include/ng_hci.h
index 44a14e62f4ed..ce3291770740 100644
--- a/sys/netgraph/bluetooth/include/ng_hci.h
+++ b/sys/netgraph/bluetooth/include/ng_hci.h
@@ -448,7 +448,7 @@ typedef struct {
typedef bdaddr_t * bdaddr_p;
/* Any BD_ADDR. Note: This is actually 7 bytes (count '\0' terminator) */
-#define NG_HCI_BDADDR_ANY ((bdaddr_p) "\000\000\000\000\000\000")
+#define NG_HCI_BDADDR_ANY (&(const bdaddr_t){"\000\000\000\000\000\000"})
/* HCI status return parameter */
typedef struct {
diff --git a/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c b/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c
index 6c0a6fda1fb1..73a0897857b2 100644
--- a/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c
+++ b/sys/netgraph/bluetooth/socket/ng_btsocket_rfcomm.c
@@ -113,7 +113,7 @@ static void ng_btsocket_rfcomm_connect_cfm
static int ng_btsocket_rfcomm_session_create
(ng_btsocket_rfcomm_session_p *sp, struct socket *l2so,
- bdaddr_p src, bdaddr_p dst, struct thread *td);
+ const bdaddr_t *src, const bdaddr_t *dst, struct thread *td);
static int ng_btsocket_rfcomm_session_accept
(ng_btsocket_rfcomm_session_p s0);
static int ng_btsocket_rfcomm_session_connect
@@ -1250,7 +1250,7 @@ ng_btsocket_rfcomm_connect_cfm(ng_btsocket_rfcomm_session_p s)
static int
ng_btsocket_rfcomm_session_create(ng_btsocket_rfcomm_session_p *sp,
- struct socket *l2so, bdaddr_p src, bdaddr_p dst,
+ struct socket *l2so, const bdaddr_t *src, const bdaddr_t *dst,
struct thread *td)
{
ng_btsocket_rfcomm_session_p s = NULL;
diff --git a/sys/netgraph/netflow/netflow.c b/sys/netgraph/netflow/netflow.c
index 978d6fd0b54d..05c6062463be 100644
--- a/sys/netgraph/netflow/netflow.c
+++ b/sys/netgraph/netflow/netflow.c
@@ -960,7 +960,7 @@ struct ngnf_show_header *resp)
list_id = 0;
TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
- if (hsh->mtx.mtx_lock & MTX_CONTESTED) {
+ if (hsh->mtx.mtx_lock & MTX_WAITERS) {
resp->hash_id = i;
resp->list_id = list_id;
mtx_unlock(&hsh->mtx);
@@ -1111,7 +1111,7 @@ ng_netflow_expire(void *arg)
* Interrupt thread wants this entry!
* Quick! Quick! Bail out!
*/
- if (hsh->mtx.mtx_lock & MTX_CONTESTED)
+ if (hsh->mtx.mtx_lock & MTX_WAITERS)
break;
/*
@@ -1150,7 +1150,7 @@ ng_netflow_expire(void *arg)
* Interrupt thread wants this entry!
* Quick! Quick! Bail out!
*/
- if (hsh->mtx.mtx_lock & MTX_CONTESTED)
+ if (hsh->mtx.mtx_lock & MTX_WAITERS)
break;
/*
diff --git a/sys/netgraph/ng_device.c b/sys/netgraph/ng_device.c
index e4fcdfc635cb..582f877ff3ed 100644
--- a/sys/netgraph/ng_device.c
+++ b/sys/netgraph/ng_device.c
@@ -32,26 +32,27 @@
*/
#if 0
-#define DBG do { printf("ng_device: %s\n", __func__ ); } while (0)
+#define DBG do { printf("ng_device: %s\n", __func__); } while (0)
#else
#define DBG do {} while (0)
#endif
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/conf.h>
+#include <sys/epoch.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
#include <sys/ioccom.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/poll.h>
#include <sys/proc.h>
-#include <sys/epoch.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/syslog.h>
-#include <sys/systm.h>
#include <sys/uio.h>
-#include <sys/vnode.h>
#include <net/ethernet.h>
#include <net/if.h>
@@ -135,9 +136,7 @@ static d_close_t ngdclose;
static d_open_t ngdopen;
static d_read_t ngdread;
static d_write_t ngdwrite;
-#if 0
static d_ioctl_t ngdioctl;
-#endif
static d_poll_t ngdpoll;
static struct cdevsw ngd_cdevsw = {
@@ -146,16 +145,16 @@ static struct cdevsw ngd_cdevsw = {
.d_close = ngdclose,
.d_read = ngdread,
.d_write = ngdwrite,
-#if 0
.d_ioctl = ngdioctl,
-#endif
.d_poll = ngdpoll,
.d_name = NG_DEVICE_DEVNAME,
};
-/******************************************************************************
+/*
+ *****************************************************************************
* Netgraph methods
- ******************************************************************************/
+ *****************************************************************************
+ */
/*
* Handle loading and unloading for this node type.
@@ -205,13 +204,13 @@ ng_device_constructor(node_p node)
priv->ngddev = make_dev(&ngd_cdevsw, priv->unit, UID_ROOT,
GID_WHEEL, 0600, NG_DEVICE_DEVNAME "%d", priv->unit);
- if(priv->ngddev == NULL) {
- printf("%s(): make_dev() failed\n",__func__);
+ if (priv->ngddev == NULL) {
+ printf("%s(): make_dev() failed\n", __func__);
mtx_destroy(&priv->ngd_mtx);
mtx_destroy(&priv->readq.ifq_mtx);
free_unr(ngd_unit, priv->unit);
free(priv, M_NETGRAPH);
- return(EINVAL);
+ return (EINVAL);
}
/* XXX: race here? */
priv->ngddev->si_drv1 = priv;
@@ -221,7 +220,7 @@ ng_device_constructor(node_p node)
log(LOG_WARNING, "%s: can't acquire netgraph name\n",
devtoname(priv->ngddev));
- return(0);
+ return (0);
}
/*
@@ -289,7 +288,7 @@ ng_device_newhook(node_p node, hook_p hook, const char *name)
priv->hook = hook;
- return(0);
+ return (0);
}
/*
@@ -322,7 +321,7 @@ ng_device_rcvdata(hook_p hook, item_p item)
}
mtx_unlock(&priv->ngd_mtx);
- return(0);
+ return (0);
}
/*
@@ -347,7 +346,7 @@ ng_device_disconnect(hook_p hook)
ng_rmnode_self(NG_HOOK_NODE(hook));
- return(0);
+ return (0);
}
/*
@@ -360,9 +359,11 @@ ng_device_shutdown(node_p node)
return (0);
}
-/******************************************************************************
+/*
+ *****************************************************************************
* Device methods
- ******************************************************************************/
+ *****************************************************************************
+ */
/*
* the device is opened
@@ -370,7 +371,7 @@ ng_device_shutdown(node_p node)
static int
ngdopen(struct cdev *dev, int flag, int mode, struct thread *td)
{
- priv_p priv = (priv_p )dev->si_drv1;
+ priv_p priv = (priv_p)dev->si_drv1;
DBG;
@@ -378,7 +379,7 @@ ngdopen(struct cdev *dev, int flag, int mode, struct thread *td)
priv->flags |= NGDF_OPEN;
mtx_unlock(&priv->ngd_mtx);
- return(0);
+ return (0);
}
/*
@@ -387,14 +388,44 @@ ngdopen(struct cdev *dev, int flag, int mode, struct thread *td)
static int
ngdclose(struct cdev *dev, int flag, int mode, struct thread *td)
{
- priv_p priv = (priv_p )dev->si_drv1;
+ priv_p priv = (priv_p)dev->si_drv1;
DBG;
mtx_lock(&priv->ngd_mtx);
priv->flags &= ~NGDF_OPEN;
mtx_unlock(&priv->ngd_mtx);
- return(0);
+ return (0);
+}
+
+/*
+ * Process IOCTLs
+ *
+ * At this stage we only return success on FIONBIO to allow setting the device
+ * as non-blocking.
+ *
+ */
+static int
+ngdioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
+ struct thread *td)
+{
+ int error;
+
+ switch (cmd) {
+ case FIONBIO:
+ error = 0;
+ break;
+ case FIOASYNC:
+ if (*(int *)data != 0)
+ error = EINVAL;
+ else
+ error = 0;
+ break;
+ default:
+ error = ENOTTY;
+ }
+
+ return (error);
}
#if 0 /*
@@ -408,21 +439,22 @@ ngdclose(struct cdev *dev, int flag, int mode, struct thread *td)
*
*/
static int
-ngdioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
+ngdioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td)
{
struct ngd_softc *sc = &ngd_softc;
- struct ngd_connection * connection = NULL;
- struct ngd_connection * tmp;
+ struct ngd_connection *connection = NULL;
+ struct ngd_connection *tmp;
int error = 0;
struct ng_mesg *msg;
- struct ngd_param_s * datap;
+ struct ngd_param_s *datap;
DBG;
NG_MKMESSAGE(msg, NGM_DEVICE_COOKIE, cmd, sizeof(struct ngd_param_s),
M_NOWAIT);
if (msg == NULL) {
- printf("%s(): msg == NULL\n",__func__);
+ printf("%s(): msg == NULL\n", __func__);
goto nomsg;
}
@@ -431,12 +463,12 @@ ngdioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td
datap->p = addr;
NG_SEND_MSG_HOOK(error, sc->node, msg, connection->active_hook, 0);
- if(error)
- printf("%s(): NG_SEND_MSG_HOOK error: %d\n",__func__,error);
+ if (error)
+ printf("%s(): NG_SEND_MSG_HOOK error: %d\n", __func__, error);
nomsg:
- return(0);
+ return (0);
}
#endif /* if 0 */
@@ -447,7 +479,7 @@ nomsg:
static int
ngdread(struct cdev *dev, struct uio *uio, int flag)
{
- priv_p priv = (priv_p )dev->si_drv1;
+ priv_p priv = (priv_p)dev->si_drv1;
struct mbuf *m;
int len, error = 0;
@@ -457,7 +489,7 @@ ngdread(struct cdev *dev, struct uio *uio, int flag)
do {
IF_DEQUEUE(&priv->readq, m);
if (m == NULL) {
- if (flag & IO_NDELAY)
+ if (flag & O_NONBLOCK)
return (EWOULDBLOCK);
mtx_lock(&priv->ngd_mtx);
priv->flags |= NGDF_RWAIT;
@@ -483,14 +515,14 @@ ngdread(struct cdev *dev, struct uio *uio, int flag)
/*
* This function is called when our device is written to.
- * We read the data from userland into mbuf chain and pass it to the remote hook.
- *
+ * We read the data from userland into mbuf chain and pass it to the remote
+ * hook.
*/
static int
ngdwrite(struct cdev *dev, struct uio *uio, int flag)
{
struct epoch_tracker et;
- priv_p priv = (priv_p )dev->si_drv1;
+ priv_p priv = (priv_p)dev->si_drv1;
struct mbuf *m;
int error = 0;
@@ -520,7 +552,7 @@ ngdwrite(struct cdev *dev, struct uio *uio, int flag)
static int
ngdpoll(struct cdev *dev, int events, struct thread *td)
{
- priv_p priv = (priv_p )dev->si_drv1;
+ priv_p priv = (priv_p)dev->si_drv1;
int revents = 0;
if (events & (POLLIN | POLLRDNORM) &&
diff --git a/sys/netgraph/ng_nat.c b/sys/netgraph/ng_nat.c
index defbe817becd..8b82d777caeb 100644
--- a/sys/netgraph/ng_nat.c
+++ b/sys/netgraph/ng_nat.c
@@ -818,7 +818,8 @@ ng_nat_rcvdata(hook_p hook, item_p item )
if (ip->ip_v != IPVERSION)
goto send; /* other IP version, let it pass */
- if (m->m_pkthdr.len < ipofs + ntohs(ip->ip_len))
+ uint16_t ip_len = ntohs(ip->ip_len);
+ if (m->m_pkthdr.len < (ipofs + ip_len))
goto send; /* packet too short (i.e. fragmented or broken) */
/*
@@ -852,50 +853,68 @@ ng_nat_rcvdata(hook_p hook, item_p item )
if (rval == PKT_ALIAS_RESPOND)
m->m_flags |= M_SKIP_FIREWALL;
- m->m_pkthdr.len = m->m_len = ntohs(ip->ip_len) + ipofs;
- if ((ip->ip_off & htons(IP_OFFMASK)) == 0 &&
- ip->ip_p == IPPROTO_TCP) {
- struct tcphdr *th = (struct tcphdr *)((caddr_t)ip +
- (ip->ip_hl << 2));
+ /* Re-read just in case it has been updated */
+ ip_len = ntohs(ip->ip_len);
+ int new_m_len = ip_len + ipofs;
+ if (new_m_len > (m->m_len + M_TRAILINGSPACE(m))) {
/*
- * Here is our terrible HACK.
- *
- * Sometimes LibAlias edits contents of TCP packet.
- * In this case it needs to recompute full TCP
- * checksum. However, the problem is that LibAlias
- * doesn't have any idea about checksum offloading
- * in kernel. To workaround this, we do not do
- * checksumming in LibAlias, but only mark the
- * packets with TH_RES1 in the th_x2 field. If we
- * receive a marked packet, we calculate correct
- * checksum for it aware of offloading.
- *
- * Why do I do such a terrible hack instead of
- * recalculating checksum for each packet?
- * Because the previous checksum was not checked!
- * Recalculating checksums for EVERY packet will
- * hide ALL transmission errors. Yes, marked packets
- * still suffer from this problem. But, sigh, natd(8)
- * has this problem, too.
+ * This is just a safety railguard to make sure LibAlias has not
+ * screwed the IP packet up somehow, should probably be KASSERT()
+ * at some point. Calling in_delayed_cksum() will parse IP packet
+ * again and reliably panic if there is less data than the IP
+ * header declares, there might be some other places too.
*/
+ log(LOG_ERR, "ng_nat_rcvdata: outgoing packet corrupted, "
+ "not enough data: expected %d, available (%d - %d)\n",
+ ip_len, m->m_len + (int)M_TRAILINGSPACE(m), ipofs);
+ NG_FREE_ITEM(item);
+ return (ENXIO);
+ }
+
+ m->m_pkthdr.len = m->m_len = new_m_len;
- if (tcp_get_flags(th) & TH_RES1) {
- uint16_t ip_len = ntohs(ip->ip_len);
+ if ((ip->ip_off & htons(IP_OFFMASK)) != 0 || ip->ip_p != IPPROTO_TCP)
+ goto send;
- tcp_set_flags(th, tcp_get_flags(th) & ~TH_RES1);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP +
- ip_len - (ip->ip_hl << 2)));
+ uint16_t pl_offset = ip->ip_hl << 2;
+ struct tcphdr *th = (struct tcphdr *)((caddr_t)ip + pl_offset);
- if ((m->m_pkthdr.csum_flags & CSUM_TCP) == 0) {
- m->m_pkthdr.csum_data = offsetof(struct tcphdr,
- th_sum);
- in_delayed_cksum(m);
- }
- }
- }
+ /*
+ * Here is our terrible HACK.
+ *
+ * Sometimes LibAlias edits contents of TCP packet.
+ * In this case it needs to recompute full TCP
+ * checksum. However, the problem is that LibAlias
+ * doesn't have any idea about checksum offloading
+ * in kernel. To workaround this, we do not do
+ * checksumming in LibAlias, but only mark the
+ * packets with TH_RES1 in the th_x2 field. If we
+ * receive a marked packet, we calculate correct
+ * checksum for it aware of offloading.
+ *
+ * Why do I do such a terrible hack instead of
+ * recalculating checksum for each packet?
+ * Because the previous checksum was not checked!
+ * Recalculating checksums for EVERY packet will
+ * hide ALL transmission errors. Yes, marked packets
+ * still suffer from this problem. But, sigh, natd(8)
+ * has this problem, too.
+ */
+
+ if (!(tcp_get_flags(th) & TH_RES1))
+ goto send;
+
+ tcp_set_flags(th, tcp_get_flags(th) & ~TH_RES1);
+ th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons(IPPROTO_TCP + ip_len - pl_offset));
+
+ if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
+ goto send;
+
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ in_delayed_cksum_o(m, ipofs);
send:
if (hook == priv->in)
diff --git a/sys/netgraph/ng_parse.c b/sys/netgraph/ng_parse.c
index 448ecc92f075..5e1a1bb47ac0 100644
--- a/sys/netgraph/ng_parse.c
+++ b/sys/netgraph/ng_parse.c
@@ -1199,14 +1199,14 @@ ng_parse_composite(const struct ng_parse_type *type, const char *s,
int *off, const u_char *const start, u_char *const buf, int *buflen,
const enum comptype ctype)
{
- const int num = ng_get_composite_len(type, start, buf, ctype);
int nextIndex = 0; /* next implicit array index */
u_int index; /* field or element index */
int *foff; /* field value offsets in string */
int align, len, blen, error = 0;
/* Initialize */
- if (num < 0)
+ const int num = ng_get_composite_len(type, start, buf, ctype);
+ if (num < 0 || num > INT_MAX / sizeof(*foff))
return (EINVAL);
foff = malloc(num * sizeof(*foff), M_NETGRAPH_PARSE, M_NOWAIT | M_ZERO);
if (foff == NULL) {
diff --git a/sys/netgraph/ng_tty.c b/sys/netgraph/ng_tty.c
index 6f8667c664bb..0e3230a66f66 100644
--- a/sys/netgraph/ng_tty.c
+++ b/sys/netgraph/ng_tty.c
@@ -427,7 +427,7 @@ ngt_rint_bypass(struct tty *tp, const void *buf, size_t len)
for (mb = m; mb != NULL; mb = mb->m_next) {
length = min(M_TRAILINGSPACE(mb), len - total);
- memcpy(mtod(m, char *), (const char *)buf + total, length);
+ memcpy(mtod(mb, char *), (const char *)buf + total, length);
mb->m_len = length;
total += length;
m->m_pkthdr.len += length;
@@ -485,9 +485,7 @@ ngt_rint(struct tty *tp, char c, int flags)
}
/* Add char to mbuf */
- *mtod(m, u_char *) = c;
- m->m_data++;
- m->m_len++;
+ *(u_char *)mtodo(m, m->m_len++) = c;
m->m_pkthdr.len++;
/* Ship off mbuf if it's time */
diff --git a/sys/netinet/cc/cc.c b/sys/netinet/cc/cc.c
index d85ad4e9f4fd..bc06616dbf93 100644
--- a/sys/netinet/cc/cc.c
+++ b/sys/netinet/cc/cc.c
@@ -271,7 +271,7 @@ cc_check_default(struct cc_algo *remove_cc)
* Initialise CC subsystem on system boot.
*/
static void
-cc_init(void)
+cc_init(void *dummy __unused)
{
CC_LIST_LOCK_INIT();
STAILQ_INIT(&cc_list);
@@ -659,7 +659,7 @@ cc_modevent(module_t mod, int event_type, void *data)
case MOD_SHUTDOWN:
break;
case MOD_QUIESCE:
- /* Stop any new assigments */
+ /* Stop any new assignments */
err = cc_stop_new_assignments(algo);
break;
case MOD_UNLOAD:
diff --git a/sys/netinet/icmp6.h b/sys/netinet/icmp6.h
index 7845b682f3e4..2ca5b3433e47 100644
--- a/sys/netinet/icmp6.h
+++ b/sys/netinet/icmp6.h
@@ -713,9 +713,6 @@ void icmp6_redirect_input(struct mbuf *, int);
void icmp6_redirect_output(struct mbuf *, struct nhop_object *);
int icmp6_ratelimit(const struct in6_addr *, const int, const int);
-struct ip6ctlparam;
-void icmp6_mtudisc_update(struct ip6ctlparam *, int);
-
/* XXX: is this the right place for these macros? */
#define icmp6_ifstat_inc(ifp, tag) \
do { \
diff --git a/sys/netinet/icmp_var.h b/sys/netinet/icmp_var.h
index d6b75e482e35..b39479565bd6 100644
--- a/sys/netinet/icmp_var.h
+++ b/sys/netinet/icmp_var.h
@@ -100,7 +100,6 @@ void kmod_icmpstat_inc(int statnum);
SYSCTL_DECL(_net_inet_icmp);
extern int badport_bandlim(int);
-#define BANDLIM_UNLIMITED -1
#define BANDLIM_ICMP_UNREACH 0
#define BANDLIM_ICMP_ECHO 1
#define BANDLIM_ICMP_TSTAMP 2
diff --git a/sys/netinet/in.c b/sys/netinet/in.c
index 963449d4b4b1..70a61dbf93a3 100644
--- a/sys/netinet/in.c
+++ b/sys/netinet/in.c
@@ -522,9 +522,16 @@ in_aifaddr_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp, struct ucred *cred
/*
* Check if bridge wants to allow adding addrs to member interfaces.
*/
- if (ifp->if_bridge && bridge_member_ifaddrs_p &&
- !bridge_member_ifaddrs_p())
- return (EINVAL);
+ if (ifp->if_bridge != NULL && ifp->if_type != IFT_GIF &&
+ bridge_member_ifaddrs_p != NULL) {
+ if (bridge_member_ifaddrs_p())
+ if_printf(ifp, "WARNING: Assigning an IP address to "
+ "an interface which is also a bridge member is "
+ "deprecated and will be unsupported in a future "
+ "release.\n");
+ else
+ return (EINVAL);
+ }
/*
* See whether address already exist.
@@ -1882,6 +1889,8 @@ in_domifdetach(struct ifnet *ifp, void *aux)
{
struct in_ifinfo *ii = (struct in_ifinfo *)aux;
+ MPASS(ifp->if_afdata[AF_INET] == NULL);
+
igmp_domifdetach(ifp);
lltable_free(ii->ii_llt);
free(ii, M_IFADDR);
diff --git a/sys/netinet/in_fib_algo.c b/sys/netinet/in_fib_algo.c
index 123dacb409e7..95621c300064 100644
--- a/sys/netinet/in_fib_algo.c
+++ b/sys/netinet/in_fib_algo.c
@@ -767,7 +767,7 @@ struct fib_lookup_module flm_radix4 = {
};
static void
-fib4_algo_init(void)
+fib4_algo_init(void *dummy __unused)
{
fib_module_register(&flm_bsearch4);
diff --git a/sys/netinet/in_mcast.c b/sys/netinet/in_mcast.c
index f5b20c49ffd2..ba112afbf002 100644
--- a/sys/netinet/in_mcast.c
+++ b/sys/netinet/in_mcast.c
@@ -159,9 +159,6 @@ static struct ip_moptions *
static int inp_get_source_filters(struct inpcb *, struct sockopt *);
static int inp_join_group(struct inpcb *, struct sockopt *);
static int inp_leave_group(struct inpcb *, struct sockopt *);
-static struct ifnet *
- inp_lookup_mcast_ifp(const struct inpcb *,
- const struct sockaddr_in *, const struct in_addr);
static int inp_block_unblock_source(struct inpcb *, struct sockopt *);
static int inp_set_multicast_if(struct inpcb *, struct sockopt *);
static int inp_set_source_filters(struct inpcb *, struct sockopt *);
@@ -1832,69 +1829,55 @@ inp_getmoptions(struct inpcb *inp, struct sockopt *sopt)
}
/*
- * Look up the ifnet to use for a multicast group membership,
- * given the IPv4 address of an interface, and the IPv4 group address.
- *
- * This routine exists to support legacy multicast applications
- * which do not understand that multicast memberships are scoped to
- * specific physical links in the networking stack, or which need
- * to join link-scope groups before IPv4 addresses are configured.
- *
- * Use this socket's current FIB number for any required FIB lookup.
- * If ina is INADDR_ANY, look up the group address in the unicast FIB,
- * and use its ifp; usually, this points to the default next-hop.
- *
- * If the FIB lookup fails, attempt to use the first non-loopback
- * interface with multicast capability in the system as a
- * last resort. The legacy IPv4 ASM API requires that we do
- * this in order to allow groups to be joined when the routing
- * table has not yet been populated during boot.
- *
- * Returns NULL if no ifp could be found, otherwise return referenced ifp.
+ * Look up the ifnet to join a multicast group membership via legacy
+ * IP_ADD_MEMBERSHIP or via more modern MCAST_JOIN_GROUP.
*
- * FUTURE: Implement IPv4 source-address selection.
+ * If the interface index was specified explicitly, just use it. If the
+ * address was specified (legacy), try to find matching interface. Else
+ * (index == 0 && no address) do a route lookup. If that fails for a modern
+ * MCAST_JOIN_GROUP return failure, for legacy IP_ADD_MEMBERSHIP find first
+ * multicast capable interface.
*/
static struct ifnet *
-inp_lookup_mcast_ifp(const struct inpcb *inp,
- const struct sockaddr_in *gsin, const struct in_addr ina)
+inp_lookup_mcast_ifp(const struct inpcb *inp, const struct in_addr maddr,
+const struct in_addr *ina, const u_int index)
{
struct ifnet *ifp;
struct nhop_object *nh;
NET_EPOCH_ASSERT();
- KASSERT(inp != NULL, ("%s: inp must not be NULL", __func__));
- KASSERT(gsin->sin_family == AF_INET, ("%s: not AF_INET", __func__));
- KASSERT(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr)),
- ("%s: not multicast", __func__));
- ifp = NULL;
- if (!in_nullhost(ina)) {
- INADDR_TO_IFP(ina, ifp);
+ if (index != 0)
+ return (ifnet_byindex_ref(index));
+
+ if (ina != NULL && !in_nullhost(*ina)) {
+ INADDR_TO_IFP(*ina, ifp);
if (ifp != NULL)
if_ref(ifp);
- } else {
- nh = fib4_lookup(inp->inp_inc.inc_fibnum, gsin->sin_addr, 0, NHR_NONE, 0);
- if (nh != NULL) {
- ifp = nh->nh_ifp;
- if_ref(ifp);
- } else {
- struct in_ifaddr *ia;
- struct ifnet *mifp;
-
- mifp = NULL;
- CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
- mifp = ia->ia_ifp;
- if (!(mifp->if_flags & IFF_LOOPBACK) &&
- (mifp->if_flags & IFF_MULTICAST)) {
- ifp = mifp;
- if_ref(ifp);
- break;
- }
+ return (ifp);
+ }
+
+ nh = fib4_lookup(inp->inp_inc.inc_fibnum, maddr, 0, NHR_NONE, 0);
+ if (nh != NULL) {
+ ifp = nh->nh_ifp;
+ if_ref(ifp);
+ return (ifp);
+ }
+
+ if (ina != NULL) {
+ struct in_ifaddr *ia;
+
+ CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ if (!(ia->ia_ifp->if_flags & IFF_LOOPBACK) &&
+ (ia->ia_ifp->if_flags & IFF_MULTICAST)) {
+ ifp = ia->ia_ifp;
+ if_ref(ifp);
+ return (ifp);
}
}
}
- return (ifp);
+ return (NULL);
}
/*
@@ -1926,13 +1909,13 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
switch (sopt->sopt_name) {
case IP_ADD_MEMBERSHIP: {
struct ip_mreqn mreqn;
+ bool mreq;
- if (sopt->sopt_valsize == sizeof(struct ip_mreqn))
- error = sooptcopyin(sopt, &mreqn,
- sizeof(struct ip_mreqn), sizeof(struct ip_mreqn));
- else
- error = sooptcopyin(sopt, &mreqn,
- sizeof(struct ip_mreq), sizeof(struct ip_mreq));
+ mreq = (sopt->sopt_valsize != sizeof(struct ip_mreqn));
+
+ error = sooptcopyin(sopt, &mreqn,
+ mreq ? sizeof(struct ip_mreq) : sizeof(struct ip_mreqn),
+ mreq ? sizeof(struct ip_mreq) : sizeof(struct ip_mreqn));
if (error)
return (error);
@@ -1943,12 +1926,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
return (EINVAL);
NET_EPOCH_ENTER(et);
- if (sopt->sopt_valsize == sizeof(struct ip_mreqn) &&
- mreqn.imr_ifindex != 0)
- ifp = ifnet_byindex_ref(mreqn.imr_ifindex);
- else
- ifp = inp_lookup_mcast_ifp(inp, &gsa->sin,
- mreqn.imr_address);
+ ifp = inp_lookup_mcast_ifp(inp, mreqn.imr_multiaddr,
+ mreq ? &mreqn.imr_address : NULL,
+ mreq ? 0 : mreqn.imr_ifindex);
NET_EPOCH_EXIT(et);
break;
}
@@ -1971,8 +1951,8 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
ssa->sin.sin_addr = mreqs.imr_sourceaddr;
NET_EPOCH_ENTER(et);
- ifp = inp_lookup_mcast_ifp(inp, &gsa->sin,
- mreqs.imr_interface);
+ ifp = inp_lookup_mcast_ifp(inp, mreqs.imr_multiaddr,
+ &mreqs.imr_interface, 0);
NET_EPOCH_EXIT(et);
CTR3(KTR_IGMPV3, "%s: imr_interface = 0x%08x, ifp = %p",
__func__, ntohl(mreqs.imr_interface.s_addr), ifp);
@@ -2013,7 +1993,8 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
return (EINVAL);
NET_EPOCH_ENTER(et);
- ifp = ifnet_byindex_ref(gsr.gsr_interface);
+ ifp = inp_lookup_mcast_ifp(inp, gsa->sin.sin_addr, NULL,
+ gsr.gsr_interface);
NET_EPOCH_EXIT(et);
if (ifp == NULL)
return (EADDRNOTAVAIL);
diff --git a/sys/netinet/ip_carp.c b/sys/netinet/ip_carp.c
index d3d7957cf087..4f553b9aac5e 100644
--- a/sys/netinet/ip_carp.c
+++ b/sys/netinet/ip_carp.c
@@ -1640,18 +1640,31 @@ carp_iamatch(struct ifaddr *ifa, uint8_t **enaddr)
static void
carp_send_na(struct carp_softc *sc)
{
- static struct in6_addr mcast = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
struct ifaddr *ifa;
- struct in6_addr *in6;
+ int flags;
+ /*
+ * Sending Unsolicited Neighbor Advertisements
+ *
+ * If the node is a router, we MUST set the Router flag to one.
+ * We set Override flag to one and send link-layer address option,
+ * thus neighboring nodes will install the new link-layer address.
+ */
+ flags = ND_NA_FLAG_OVERRIDE;
+ if (V_ip6_forwarding)
+ flags |= ND_NA_FLAG_ROUTER;
CARP_FOREACH_IFA(sc, ifa) {
if (ifa->ifa_addr->sa_family != AF_INET6)
continue;
-
- in6 = IFA_IN6(ifa);
- nd6_na_output(sc->sc_carpdev, &mcast, in6,
- ND_NA_FLAG_OVERRIDE, 1, NULL);
- DELAY(1000); /* XXX */
+ /*
+ * We use unspecified address as destination here to avoid
+ * scope initialization for each call.
+ * nd6_na_output() will use all nodes multicast address if
+ * destinaion address is unspecified.
+ */
+ nd6_na_output(sc->sc_carpdev, &in6addr_any, IFA_IN6(ifa),
+ flags, ND6_NA_OPT_LLA | ND6_NA_CARP_MASTER, NULL);
+ DELAY(1000); /* RetransTimer */
}
}
diff --git a/sys/netinet/ip_icmp.c b/sys/netinet/ip_icmp.c
index 71b75d18efd0..fc0848b2c944 100644
--- a/sys/netinet/ip_icmp.c
+++ b/sys/netinet/ip_icmp.c
@@ -391,7 +391,6 @@ stdreply: icmpelen = max(8, min(V_icmp_quotelen, ntohs(oip->ip_len) -
nip->ip_hl = 5;
nip->ip_p = IPPROTO_ICMP;
nip->ip_tos = 0;
- nip->ip_off = 0;
if (V_error_keeptags)
m_tag_copy_chain(m, n, M_NOWAIT);
@@ -872,6 +871,8 @@ match:
mac_netinet_icmp_replyinplace(m);
#endif
ip->ip_src = t;
+ /* ip->ip_tos will be reflected. */
+ ip->ip_off = htons(0);
ip->ip_ttl = V_ip_defttl;
if (optlen > 0) {
@@ -1181,7 +1182,7 @@ badport_bandlim(int which)
{
int64_t pps;
- if (V_icmplim == 0 || which == BANDLIM_UNLIMITED)
+ if (V_icmplim == 0)
return (0);
KASSERT(which >= 0 && which < BANDLIM_MAX,
diff --git a/sys/netinet/ip_output.c b/sys/netinet/ip_output.c
index ec6ba8d92015..ef08b9cfd3d6 100644
--- a/sys/netinet/ip_output.c
+++ b/sys/netinet/ip_output.c
@@ -1044,14 +1044,14 @@ done:
}
void
-in_delayed_cksum(struct mbuf *m)
+in_delayed_cksum_o(struct mbuf *m, uint16_t iph_offset)
{
struct ip *ip;
struct udphdr *uh;
uint16_t cklen, csum, offset;
- ip = mtod(m, struct ip *);
- offset = ip->ip_hl << 2 ;
+ ip = (struct ip *)mtodo(m, iph_offset);
+ offset = iph_offset + (ip->ip_hl << 2);
if (m->m_pkthdr.csum_flags & CSUM_UDP) {
/* if udp header is not in the first mbuf copy udplen */
@@ -1078,6 +1078,13 @@ in_delayed_cksum(struct mbuf *m)
*(u_short *)mtodo(m, offset) = csum;
}
+void
+in_delayed_cksum(struct mbuf *m)
+{
+
+ in_delayed_cksum_o(m, 0);
+}
+
/*
* IP socket option processing.
*/
diff --git a/sys/netinet/ip_var.h b/sys/netinet/ip_var.h
index f782ebc53eb0..c113484079a3 100644
--- a/sys/netinet/ip_var.h
+++ b/sys/netinet/ip_var.h
@@ -271,6 +271,7 @@ VNET_DECLARE(struct pfil_head *, inet_local_pfil_head);
#define PFIL_INET_LOCAL_NAME "inet-local"
void in_delayed_cksum(struct mbuf *m);
+void in_delayed_cksum_o(struct mbuf *m, uint16_t o);
/* Hooks for ipfw, dummynet, divert etc. Most are declared in raw_ip.c */
/*
diff --git a/sys/netinet/sctp_lock_bsd.h b/sys/netinet/sctp_lock_bsd.h
index ec66be0cf371..a60983cb30e3 100644
--- a/sys/netinet/sctp_lock_bsd.h
+++ b/sys/netinet/sctp_lock_bsd.h
@@ -263,10 +263,10 @@
} while (0)
#define SCTP_INP_LOCK_CONTENDED(_inp) \
- ((_inp)->inp_mtx.mtx_lock & MTX_CONTESTED)
+ ((_inp)->inp_mtx.mtx_lock & MTX_WAITERS)
#define SCTP_INP_READ_CONTENDED(_inp) \
- ((_inp)->inp_rdata_mtx.mtx_lock & MTX_CONTESTED)
+ ((_inp)->inp_rdata_mtx.mtx_lock & MTX_WAITERS)
#ifdef SCTP_LOCK_LOGGING
#define SCTP_INP_RLOCK(_inp) do { \
@@ -337,7 +337,7 @@
} while (0)
#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) \
- ((_inp)->inp_create_mtx.mtx_lock & MTX_CONTESTED)
+ ((_inp)->inp_create_mtx.mtx_lock & MTX_WAITERS)
/*
* For the majority of things (once we have found the association) we will
diff --git a/sys/netinet/siftr.c b/sys/netinet/siftr.c
index 374b5595fcbc..5b89ca026e85 100644
--- a/sys/netinet/siftr.c
+++ b/sys/netinet/siftr.c
@@ -519,7 +519,7 @@ siftr_pkt_manager_thread(void *arg)
if (log_buf != NULL) {
alq_post_flags(siftr_alq, log_buf, 0);
}
- for (;cnt > 0; cnt--) {
+ for (; cnt > 0; cnt--) {
pkt_node = STAILQ_FIRST(&tmp_pkt_queue);
STAILQ_REMOVE_HEAD(&tmp_pkt_queue, nodes);
free(pkt_node, M_SIFTR_PKTNODE);
diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c
index dbc966acc56b..df639876a85c 100644
--- a/sys/netinet/tcp_hostcache.c
+++ b/sys/netinet/tcp_hostcache.c
@@ -192,17 +192,17 @@ SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
"Expire all entries on next purge run");
SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
+ CTLFLAG_VNET | CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
0, 0, sysctl_tcp_hc_list, "A",
"List of all hostcache entries");
SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, histo,
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
+ CTLFLAG_VNET | CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
0, 0, sysctl_tcp_hc_histo, "A",
"Print a histogram of hostcache hashbucket utilization");
SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
NULL, 0, sysctl_tcp_hc_purgenow, "I",
"Immediately purge all entries");
diff --git a/sys/netinet/tcp_hpts.c b/sys/netinet/tcp_hpts.c
index b77ebc928809..c54459bb5f01 100644
--- a/sys/netinet/tcp_hpts.c
+++ b/sys/netinet/tcp_hpts.c
@@ -39,15 +39,14 @@
* First, and probably the main thing its used by Rack and BBR, it can
* be used to call tcp_output() of a transport stack at some time in the future.
* The normal way this is done is that tcp_output() of the stack schedules
- * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
- * slot is the time from now that the stack wants to be called but it
- * must be converted to tcp_hpts's notion of slot. This is done with
- * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
+ * itself to be called again by calling tcp_hpts_insert(tcpcb, usecs). The
+ * usecs is the time from now that the stack wants to be called and is
+ * passing time directly in microseconds. So a typical
* call from the tcp_output() routine might look like:
*
- * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
+ * tcp_hpts_insert(tp, 550, NULL);
*
- * The above would schedule tcp_output() to be called in 550 useconds.
+ * The above would schedule tcp_output() to be called in 550 microseconds.
* Note that if using this mechanism the stack will want to add near
* its top a check to prevent unwanted calls (from user land or the
* arrival of incoming ack's). So it would add something like:
@@ -137,8 +136,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
@@ -151,27 +148,44 @@
#include <netinet/tcpip.h>
#include <netinet/cc/cc.h>
#include <netinet/tcp_hpts.h>
+#include <netinet/tcp_hpts_internal.h>
#include <netinet/tcp_log_buf.h>
#ifdef tcp_offload
#include <netinet/tcp_offload.h>
#endif
-/*
- * The hpts uses a 102400 wheel. The wheel
- * defines the time in 10 usec increments (102400 x 10).
- * This gives a range of 10usec - 1024ms to place
- * an entry within. If the user requests more than
- * 1.024 second, a remaineder is attached and the hpts
- * when seeing the remainder will re-insert the
- * inpcb forward in time from where it is until
- * the remainder is zero.
- */
+/* Global instance for TCP HPTS */
+struct tcp_hptsi *tcp_hptsi_pace;
+
+/* Default function table for production use. */
+const struct tcp_hptsi_funcs tcp_hptsi_default_funcs = {
+ .microuptime = microuptime,
+ .swi_add = swi_add,
+ .swi_remove = swi_remove,
+ .swi_sched = swi_sched,
+ .intr_event_bind = intr_event_bind,
+ .intr_event_bind_ithread_cpuset = intr_event_bind_ithread_cpuset,
+ .callout_init = callout_init,
+ .callout_reset_sbt_on = callout_reset_sbt_on,
+ ._callout_stop_safe = _callout_stop_safe,
+};
-#define NUM_OF_HPTSI_SLOTS 102400
+#ifdef TCP_HPTS_KTEST
+#define microuptime pace->funcs->microuptime
+#define swi_add pace->funcs->swi_add
+#define swi_remove pace->funcs->swi_remove
+#define swi_sched pace->funcs->swi_sched
+#define intr_event_bind pace->funcs->intr_event_bind
+#define intr_event_bind_ithread_cpuset pace->funcs->intr_event_bind_ithread_cpuset
+#define callout_init pace->funcs->callout_init
+#define callout_reset_sbt_on pace->funcs->callout_reset_sbt_on
+#define _callout_stop_safe pace->funcs->_callout_stop_safe
+#endif
-/* The number of connections after which the dynamic sleep logic kicks in. */
-#define DEFAULT_CONNECTION_THRESHOLD 100
+static MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
+
+static void tcp_hpts_thread(void *ctx);
/*
* When using the hpts, a TCP stack must make sure
@@ -206,87 +220,22 @@
*
* When we are in the "new" mode i.e. conn_cnt > conn_cnt_thresh
* then we do a dynamic adjustment on the time we sleep.
- * Our threshold is if the lateness of the first client served (in ticks) is
+ * Our threshold is if the lateness of the first client served (in slots) is
* greater than or equal too slots_indicate_more_sleep (10ms
- * or 10000 ticks). If we were that late, the actual sleep time
- * is adjusted down by 50%. If the ticks_ran is less than
- * slots_indicate_more_sleep (100 ticks or 1000usecs).
+ * or 10000 slots). If we were that late, the actual sleep time
+ * is adjusted down by 50%. If the slots_ran is less than
+ * slots_indicate_more_sleep (100 slots or 1000usecs).
*
*/
-/* Each hpts has its own p_mtx which is used for locking */
-#define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
-#define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
-#define HPTS_TRYLOCK(hpts) mtx_trylock(&(hpts)->p_mtx)
-#define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
-struct tcp_hpts_entry {
- /* Cache line 0x00 */
- struct mtx p_mtx; /* Mutex for hpts */
- struct timeval p_mysleep; /* Our min sleep time */
- uint64_t syscall_cnt;
- uint64_t sleeping; /* What the actual sleep was (if sleeping) */
- uint16_t p_hpts_active; /* Flag that says hpts is awake */
- uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
- uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
- uint32_t p_runningslot; /* Current tick we are at if we are running */
- uint32_t p_prev_slot; /* Previous slot we were on */
- uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
- uint32_t p_nxt_slot; /* The next slot outside the current range of
- * slots that the hpts is running on. */
- int32_t p_on_queue_cnt; /* Count on queue in this hpts */
- uint32_t p_lasttick; /* Last tick before the current one */
- uint8_t p_direct_wake :1, /* boolean */
- p_on_min_sleep:1, /* boolean */
- p_hpts_wake_scheduled:1, /* boolean */
- hit_callout_thresh:1,
- p_avail:4;
- uint8_t p_fill[3]; /* Fill to 32 bits */
- /* Cache line 0x40 */
- struct hptsh {
- TAILQ_HEAD(, tcpcb) head;
- uint32_t count;
- uint32_t gencnt;
- } *p_hptss; /* Hptsi wheel */
- uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
- * of 255ms */
- uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
- uint32_t saved_lasttick; /* for logging */
- uint32_t saved_curtick; /* for logging */
- uint32_t saved_curslot; /* for logging */
- uint32_t saved_prev_slot; /* for logging */
- uint32_t p_delayed_by; /* How much were we delayed by */
- /* Cache line 0x80 */
- struct sysctl_ctx_list hpts_ctx;
- struct sysctl_oid *hpts_root;
- struct intr_event *ie;
- void *ie_cookie;
- uint16_t p_num; /* The hpts number one per cpu */
- uint16_t p_cpu; /* The hpts CPU */
- /* There is extra space in here */
- /* Cache line 0x100 */
- struct callout co __aligned(CACHE_LINE_SIZE);
-} __aligned(CACHE_LINE_SIZE);
-
-static struct tcp_hptsi {
- struct cpu_group **grps;
- struct tcp_hpts_entry **rp_ent; /* Array of hptss */
- uint32_t *cts_last_ran;
- uint32_t grp_cnt;
- uint32_t rp_num_hptss; /* Number of hpts threads */
-} tcp_pace;
-
-static MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
#ifdef RSS
-static int tcp_bind_threads = 1;
+int tcp_bind_threads = 1;
#else
-static int tcp_bind_threads = 2;
+int tcp_bind_threads = 2;
#endif
static int tcp_use_irq_cpu = 0;
static int hpts_does_tp_logging = 0;
-
-static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, bool from_callout);
-static void tcp_hpts_thread(void *ctx);
-
+static int32_t tcp_hpts_precision = 120;
int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
static int conn_cnt_thresh = DEFAULT_CONNECTION_THRESHOLD;
static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
@@ -297,23 +246,6 @@ SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"TCP Hpts statistics");
-#define timersub(tvp, uvp, vvp) \
- do { \
- (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
- (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
- if ((vvp)->tv_usec < 0) { \
- (vvp)->tv_sec--; \
- (vvp)->tv_usec += 1000000; \
- } \
- } while (0)
-
-static int32_t tcp_hpts_precision = 120;
-
-static struct hpts_domain_info {
- int count;
- int cpu[MAXCPU];
-} hpts_domains[MAXMEMDOM];
-
counter_u64_t hpts_hopelessly_behind;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
@@ -461,14 +393,14 @@ SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
&tcp_hpts_no_wake_over_thresh, 0,
"When we are over the threshold on the pacer do we prohibit wakeups?");
-static uint16_t
-hpts_random_cpu(void)
+uint16_t
+tcp_hptsi_random_cpu(struct tcp_hptsi *pace)
{
uint16_t cpuid;
uint32_t ran;
ran = arc4random();
- cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
+ cpuid = (((ran & 0xffff) % mp_ncpus) % pace->rp_num_hptss);
return (cpuid);
}
@@ -489,13 +421,11 @@ tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
log.u_bbr.flex2 = hpts->p_cur_slot;
log.u_bbr.flex3 = hpts->p_prev_slot;
log.u_bbr.flex4 = idx;
- log.u_bbr.flex5 = hpts->p_curtick;
log.u_bbr.flex6 = hpts->p_on_queue_cnt;
log.u_bbr.flex7 = hpts->p_cpu;
log.u_bbr.flex8 = (uint8_t)from_callout;
log.u_bbr.inflight = slots_to_run;
log.u_bbr.applimited = hpts->overidden_sleep;
- log.u_bbr.delivered = hpts->saved_curtick;
log.u_bbr.timeStamp = tcp_tv_to_usec(tv);
log.u_bbr.epoch = hpts->saved_curslot;
log.u_bbr.lt_epoch = hpts->saved_prev_slot;
@@ -512,11 +442,67 @@ tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
}
}
+/*
+ * Timeout handler for the HPTS sleep callout. It immediately schedules the SWI
+ * for the HPTS entry to run.
+ */
static void
-tcp_wakehpts(struct tcp_hpts_entry *hpts)
+tcp_hpts_sleep_timeout(void *arg)
{
+#ifdef TCP_HPTS_KTEST
+ struct tcp_hptsi *pace;
+#endif
+ struct tcp_hpts_entry *hpts;
+
+ hpts = (struct tcp_hpts_entry *)arg;
+#ifdef TCP_HPTS_KTEST
+ pace = hpts->p_hptsi;
+#endif
+ swi_sched(hpts->ie_cookie, 0);
+}
+
+/*
+ * Reset the HPTS callout timer with the provided timeval. Returns the results
+ * of the callout_reset_sbt_on() function.
+ */
+static int
+tcp_hpts_sleep(struct tcp_hpts_entry *hpts, struct timeval *tv)
+{
+#ifdef TCP_HPTS_KTEST
+ struct tcp_hptsi *pace;
+#endif
+ sbintime_t sb;
+
+#ifdef TCP_HPTS_KTEST
+ pace = hpts->p_hptsi;
+#endif
+
+ /* Store off to make visible the actual sleep time */
+ hpts->sleeping = tv->tv_usec;
+
+ sb = tvtosbt(*tv);
+ return (callout_reset_sbt_on(
+ &hpts->co, sb, 0, tcp_hpts_sleep_timeout, hpts, hpts->p_cpu,
+ (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision))));
+}
+
+/*
+ * Schedules the SWI for the HTPS entry to run, if not already scheduled or
+ * running.
+ */
+void
+tcp_hpts_wake(struct tcp_hpts_entry *hpts)
+{
+#ifdef TCP_HPTS_KTEST
+ struct tcp_hptsi *pace;
+#endif
+
HPTS_MTX_ASSERT(hpts);
+#ifdef TCP_HPTS_KTEST
+ pace = hpts->p_hptsi;
+#endif
+
if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
hpts->p_direct_wake = 0;
return;
@@ -528,15 +514,6 @@ tcp_wakehpts(struct tcp_hpts_entry *hpts)
}
static void
-hpts_timeout_swi(void *arg)
-{
- struct tcp_hpts_entry *hpts;
-
- hpts = (struct tcp_hpts_entry *)arg;
- swi_sched(hpts->ie_cookie, 0);
-}
-
-static void
tcp_hpts_insert_internal(struct tcpcb *tp, struct tcp_hpts_entry *hpts)
{
struct inpcb *inp = tptoinpcb(tp);
@@ -564,13 +541,13 @@ tcp_hpts_insert_internal(struct tcpcb *tp, struct tcp_hpts_entry *hpts)
}
static struct tcp_hpts_entry *
-tcp_hpts_lock(struct tcpcb *tp)
+tcp_hpts_lock(struct tcp_hptsi *pace, struct tcpcb *tp)
{
struct tcp_hpts_entry *hpts;
INP_LOCK_ASSERT(tptoinpcb(tp));
- hpts = tcp_pace.rp_ent[tp->t_hpts_cpu];
+ hpts = pace->rp_ent[tp->t_hpts_cpu];
HPTS_LOCK(hpts);
return (hpts);
@@ -597,11 +574,10 @@ tcp_hpts_release(struct tcpcb *tp)
* and has never received a first packet.
*/
void
-tcp_hpts_init(struct tcpcb *tp)
+__tcp_hpts_init(struct tcp_hptsi *pace, struct tcpcb *tp)
{
-
if (__predict_true(tp->t_hpts_cpu == HPTS_CPU_NONE)) {
- tp->t_hpts_cpu = hpts_random_cpu();
+ tp->t_hpts_cpu = tcp_hptsi_random_cpu(pace);
MPASS(!(tp->t_flags2 & TF2_HPTS_CPU_SET));
}
}
@@ -613,14 +589,14 @@ tcp_hpts_init(struct tcpcb *tp)
* INP lock and then get the hpts lock.
*/
void
-tcp_hpts_remove(struct tcpcb *tp)
+__tcp_hpts_remove(struct tcp_hptsi *pace, struct tcpcb *tp)
{
struct tcp_hpts_entry *hpts;
struct hptsh *hptsh;
INP_WLOCK_ASSERT(tptoinpcb(tp));
- hpts = tcp_hpts_lock(tp);
+ hpts = tcp_hpts_lock(pace, tp);
if (tp->t_in_hpts == IHPTS_ONQUEUE) {
hptsh = &hpts->p_hptss[tp->t_hpts_slot];
tp->t_hpts_request = 0;
@@ -664,23 +640,19 @@ hpts_slot(uint32_t wheel_slot, uint32_t plus)
{
/*
* Given a slot on the wheel, what slot
- * is that plus ticks out?
+ * is that plus slots out?
*/
- KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
+ KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid slot %u not on wheel", wheel_slot));
return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
}
static inline int
-tick_to_wheel(uint32_t cts_in_wticks)
+cts_to_wheel(uint32_t cts)
{
/*
- * Given a timestamp in ticks (so by
- * default to get it to a real time one
- * would multiply by 10.. i.e the number
- * of ticks in a slot) map it to our limited
- * space wheel.
+ * Given a timestamp in useconds map it to our limited space wheel.
*/
- return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
+ return ((cts / HPTS_USECS_PER_SLOT) % NUM_OF_HPTSI_SLOTS);
}
static inline int
@@ -723,7 +695,7 @@ max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *
if ((hpts->p_hpts_active == 1) &&
(hpts->p_wheel_complete == 0)) {
end_slot = hpts->p_runningslot;
- /* Back up one tick */
+ /* Back up one slot */
if (end_slot == 0)
end_slot = NUM_OF_HPTSI_SLOTS - 1;
else
@@ -736,7 +708,7 @@ max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *
* not active, or we have
* completed the pass over
* the wheel, we can use the
- * prev tick and subtract one from it. This puts us
+ * prev slot and subtract one from it. This puts us
* as far out as possible on the wheel.
*/
end_slot = hpts->p_prev_slot;
@@ -749,7 +721,7 @@ max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *
/*
* Now we have close to the full wheel left minus the
* time it has been since the pacer went to sleep. Note
- * that wheel_tick, passed in, should be the current time
+ * that wheel_slot, passed in, should be the current time
* from the perspective of the caller, mapped to the wheel.
*/
if (hpts->p_prev_slot != wheel_slot)
@@ -826,7 +798,7 @@ max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *
#ifdef INVARIANTS
static void
check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct tcpcb *tp,
- uint32_t hptsslot, int line)
+ uint32_t hptsslot)
{
/*
* Sanity checks for the pacer with invariants
@@ -857,12 +829,13 @@ check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct tcpcb *tp,
}
#endif
-uint32_t
-tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_diag *diag)
+void
+__tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t usecs,
+ struct hpts_diag *diag)
{
struct tcp_hpts_entry *hpts;
struct timeval tv;
- uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
+ uint32_t slot, wheel_cts, last_slot, need_new_to = 0;
int32_t wheel_slot, maxslots;
bool need_wakeup = false;
@@ -871,11 +844,13 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_
MPASS(!(tp->t_in_hpts == IHPTS_ONQUEUE));
/*
+ * Convert microseconds to slots for internal use.
* We now return the next-slot the hpts will be on, beyond its
* current run (if up) or where it was when it stopped if it is
* sleeping.
*/
- hpts = tcp_hpts_lock(tp);
+ slot = HPTS_USEC_TO_SLOTS(usecs);
+ hpts = tcp_hpts_lock(pace, tp);
microuptime(&tv);
if (diag) {
memset(diag, 0, sizeof(struct hpts_diag));
@@ -884,8 +859,6 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_
diag->p_runningslot = hpts->p_runningslot;
diag->p_nxt_slot = hpts->p_nxt_slot;
diag->p_cur_slot = hpts->p_cur_slot;
- diag->p_curtick = hpts->p_curtick;
- diag->p_lasttick = hpts->p_lasttick;
diag->slot_req = slot;
diag->p_on_min_sleep = hpts->p_on_min_sleep;
diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
@@ -912,17 +885,15 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_
* timeout is not 1.
*/
hpts->p_direct_wake = 1;
- tcp_wakehpts(hpts);
+ tcp_hpts_wake(hpts);
}
- slot_on = hpts->p_nxt_slot;
HPTS_UNLOCK(hpts);
- return (slot_on);
+ return;
}
- /* Get the current time relative to the wheel */
- wheel_cts = tcp_tv_to_hpts_slot(&tv);
- /* Map it onto the wheel */
- wheel_slot = tick_to_wheel(wheel_cts);
+ /* Get the current time stamp and map it onto the wheel */
+ wheel_cts = tcp_tv_to_usec(&tv);
+ wheel_slot = cts_to_wheel(wheel_cts);
/* Now what's the max we can place it at? */
maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
if (diag) {
@@ -954,11 +925,11 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_
tp->t_hpts_slot = last_slot;
}
if (diag) {
- diag->slot_remaining = tp->t_hpts_request;
+ diag->time_remaining = tp->t_hpts_request;
diag->inp_hptsslot = tp->t_hpts_slot;
}
#ifdef INVARIANTS
- check_if_slot_would_be_wrong(hpts, tp, tp->t_hpts_slot, line);
+ check_if_slot_would_be_wrong(hpts, tp, tp->t_hpts_slot);
#endif
if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
tcp_hpts_insert_internal(tp, hpts);
@@ -997,12 +968,12 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_
}
/*
* Now how far is the hpts sleeping to? if active is 1, its
- * up and ticking we do nothing, otherwise we may need to
+ * up and running we do nothing, otherwise we may need to
* reschedule its callout if need_new_to is set from above.
*/
if (need_wakeup) {
hpts->p_direct_wake = 1;
- tcp_wakehpts(hpts);
+ tcp_hpts_wake(hpts);
if (diag) {
diag->need_new_to = 0;
diag->co_ret = 0xffff0000;
@@ -1010,7 +981,6 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_
} else if (need_new_to) {
int32_t co_ret;
struct timeval tv;
- sbintime_t sb;
tv.tv_sec = 0;
tv.tv_usec = 0;
@@ -1018,24 +988,18 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_
tv.tv_sec++;
need_new_to -= HPTS_USEC_IN_SEC;
}
- tv.tv_usec = need_new_to;
- sb = tvtosbt(tv);
- co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
- hpts_timeout_swi, hpts, hpts->p_cpu,
- (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
+ tv.tv_usec = need_new_to; /* XXX: Why is this sleeping over the max? */
+ co_ret = tcp_hpts_sleep(hpts, &tv);
if (diag) {
diag->need_new_to = need_new_to;
diag->co_ret = co_ret;
}
}
- slot_on = hpts->p_nxt_slot;
HPTS_UNLOCK(hpts);
-
- return (slot_on);
}
static uint16_t
-hpts_cpuid(struct tcpcb *tp, int *failed)
+hpts_cpuid(struct tcp_hptsi *pace, struct tcpcb *tp, int *failed)
{
struct inpcb *inp = tptoinpcb(tp);
u_int cpuid;
@@ -1062,7 +1026,7 @@ hpts_cpuid(struct tcpcb *tp, int *failed)
#ifdef RSS
cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
if (cpuid == NETISR_CPUID_NONE)
- return (hpts_random_cpu());
+ return (tcp_hptsi_random_cpu(pace));
else
return (cpuid);
#endif
@@ -1073,7 +1037,7 @@ hpts_cpuid(struct tcpcb *tp, int *failed)
*/
if (inp->inp_flowtype == M_HASHTYPE_NONE) {
counter_u64_add(cpu_uses_random, 1);
- return (hpts_random_cpu());
+ return (tcp_hptsi_random_cpu(pace));
}
/*
* Hash to a thread based on the flowid. If we are using numa,
@@ -1088,7 +1052,7 @@ hpts_cpuid(struct tcpcb *tp, int *failed)
#ifdef NUMA
} else {
/* Hash into the cpu's that use that domain */
- di = &hpts_domains[inp->inp_numa_domain];
+ di = &pace->domains[inp->inp_numa_domain];
cpuid = di->cpu[inp->inp_flowid % di->count];
}
#endif
@@ -1120,9 +1084,16 @@ tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
}
}
-static int32_t
+static bool
+tcp_hpts_different_slots(uint32_t cts, uint32_t cts_last_run)
+{
+ return ((cts / HPTS_USECS_PER_SLOT) != (cts_last_run / HPTS_USECS_PER_SLOT));
+}
+
+int32_t
tcp_hptsi(struct tcp_hpts_entry *hpts, bool from_callout)
{
+ struct tcp_hptsi *pace;
struct tcpcb *tp;
struct timeval tv;
int32_t slots_to_run, i, error;
@@ -1132,6 +1103,7 @@ tcp_hptsi(struct tcp_hpts_entry *hpts, bool from_callout)
int32_t wrap_loop_cnt = 0;
int32_t slot_pos_of_endpoint = 0;
int32_t orig_exit_slot;
+ uint32_t cts, cts_last_run;
bool completed_measure, seen_endpoint;
completed_measure = false;
@@ -1139,32 +1111,34 @@ tcp_hptsi(struct tcp_hpts_entry *hpts, bool from_callout)
HPTS_MTX_ASSERT(hpts);
NET_EPOCH_ASSERT();
+
+ pace = hpts->p_hptsi;
+ MPASS(pace != NULL);
+
/* record previous info for any logging */
- hpts->saved_lasttick = hpts->p_lasttick;
- hpts->saved_curtick = hpts->p_curtick;
hpts->saved_curslot = hpts->p_cur_slot;
hpts->saved_prev_slot = hpts->p_prev_slot;
- hpts->p_lasttick = hpts->p_curtick;
- hpts->p_curtick = tcp_gethptstick(&tv);
- tcp_pace.cts_last_ran[hpts->p_num] = tcp_tv_to_usec(&tv);
- orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
+ microuptime(&tv);
+ cts_last_run = pace->cts_last_ran[hpts->p_cpu];
+ pace->cts_last_ran[hpts->p_cpu] = cts = tcp_tv_to_usec(&tv);
+
+ orig_exit_slot = hpts->p_cur_slot = cts_to_wheel(cts);
if ((hpts->p_on_queue_cnt == 0) ||
- (hpts->p_lasttick == hpts->p_curtick)) {
+ !tcp_hpts_different_slots(cts, cts_last_run)) {
/*
- * No time has yet passed,
- * or nothing to do.
+ * Not enough time has yet passed or nothing to do.
*/
hpts->p_prev_slot = hpts->p_cur_slot;
- hpts->p_lasttick = hpts->p_curtick;
goto no_run;
}
again:
hpts->p_wheel_complete = 0;
HPTS_MTX_ASSERT(hpts);
slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
- if (((hpts->p_curtick - hpts->p_lasttick) > (NUM_OF_HPTSI_SLOTS - 1)) &&
- (hpts->p_on_queue_cnt != 0)) {
+ if ((hpts->p_on_queue_cnt != 0) &&
+ ((cts - cts_last_run) >
+ ((NUM_OF_HPTSI_SLOTS-1) * HPTS_USECS_PER_SLOT))) {
/*
* Wheel wrap is occuring, basically we
* are behind and the distance between
@@ -1240,7 +1214,7 @@ again:
uint32_t runningslot;
/*
- * Calculate our delay, if there are no extra ticks there
+ * Calculate our delay, if there are no extra slots there
* was not any (i.e. if slots_to_run == 1, no delay).
*/
hpts->p_delayed_by = (slots_to_run - (i + 1)) *
@@ -1393,7 +1367,7 @@ again:
* gets added to the hpts (not this one)
* :-)
*/
- tcp_set_hpts(tp);
+ __tcp_set_hpts(pace, tp);
}
CURVNET_SET(inp->inp_vnet);
/* Lets do any logging that we might want to */
@@ -1452,10 +1426,12 @@ no_one:
hpts->p_delayed_by = 0;
/*
* Check to see if we took an excess amount of time and need to run
- * more ticks (if we did not hit eno-bufs).
+ * more slots (if we did not hit eno-bufs).
*/
hpts->p_prev_slot = hpts->p_cur_slot;
- hpts->p_lasttick = hpts->p_curtick;
+ microuptime(&tv);
+ cts_last_run = cts;
+ cts = tcp_tv_to_usec(&tv);
if (!from_callout || (loop_cnt > max_pacer_loops)) {
/*
* Something is serious slow we have
@@ -1467,7 +1443,7 @@ no_one:
* can never catch up :(
*
* We will just lie to this thread
- * and let it thing p_curtick is
+ * and let it think p_curslot is
* correct. When it next awakens
* it will find itself further behind.
*/
@@ -1475,20 +1451,19 @@ no_one:
counter_u64_add(hpts_hopelessly_behind, 1);
goto no_run;
}
- hpts->p_curtick = tcp_gethptstick(&tv);
- hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
+
+ hpts->p_cur_slot = cts_to_wheel(cts);
if (!seen_endpoint) {
/* We saw no endpoint but we may be looping */
orig_exit_slot = hpts->p_cur_slot;
}
- if ((wrap_loop_cnt < 2) &&
- (hpts->p_lasttick != hpts->p_curtick)) {
+ if ((wrap_loop_cnt < 2) && tcp_hpts_different_slots(cts, cts_last_run)) {
counter_u64_add(hpts_loops, 1);
loop_cnt++;
goto again;
}
no_run:
- tcp_pace.cts_last_ran[hpts->p_num] = tcp_tv_to_usec(&tv);
+ pace->cts_last_ran[hpts->p_cpu] = cts;
/*
* Set flag to tell that we are done for
* any slot input that happens during
@@ -1496,25 +1471,36 @@ no_run:
*/
hpts->p_wheel_complete = 1;
/*
- * Now did we spend too long running input and need to run more ticks?
- * Note that if wrap_loop_cnt < 2 then we should have the conditions
- * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
- * is greater than 2, then the condtion most likely are *not* true.
- * Also if we are called not from the callout, we don't run the wheel
- * multiple times so the slots may not align either.
- */
- KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
- (wrap_loop_cnt >= 2) || !from_callout),
- ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
- hpts->p_prev_slot, hpts->p_cur_slot));
- KASSERT(((hpts->p_lasttick == hpts->p_curtick)
- || (wrap_loop_cnt >= 2) || !from_callout),
- ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
- hpts->p_lasttick, hpts->p_curtick));
- if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
- hpts->p_curtick = tcp_gethptstick(&tv);
+ * If enough time has elapsed that we should be processing the next
+ * slot(s), then we should have kept running and not marked the wheel as
+ * complete.
+ *
+ * But there are several other conditions where we would have stopped
+ * processing, so the prev/cur slots and cts variables won't match.
+ * These conditions are:
+ *
+ * - Calls not from callouts don't run multiple times
+ * - The wheel is empty
+ * - We've processed more than max_pacer_loops times
+ * - We've wrapped more than 2 times
+ *
+ * This assert catches when the logic above has violated this design.
+ *
+ */
+ KASSERT((!from_callout || (hpts->p_on_queue_cnt == 0) ||
+ (loop_cnt > max_pacer_loops) || (wrap_loop_cnt >= 2) ||
+ ((hpts->p_prev_slot == hpts->p_cur_slot) &&
+ !tcp_hpts_different_slots(cts, cts_last_run))),
+ ("H:%p Shouldn't be done! prev_slot:%u, cur_slot:%u, "
+ "cts_last_run:%u, cts:%u, loop_cnt:%d, wrap_loop_cnt:%d",
+ hpts, hpts->p_prev_slot, hpts->p_cur_slot,
+ cts_last_run, cts, loop_cnt, wrap_loop_cnt));
+
+ if (from_callout && tcp_hpts_different_slots(cts, cts_last_run)) {
+ microuptime(&tv);
+ cts = tcp_tv_to_usec(&tv);
+ hpts->p_cur_slot = cts_to_wheel(cts);
counter_u64_add(hpts_loops, 1);
- hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
goto again;
}
@@ -1528,16 +1514,16 @@ no_run:
}
void
-tcp_set_hpts(struct tcpcb *tp)
+__tcp_set_hpts(struct tcp_hptsi *pace, struct tcpcb *tp)
{
struct tcp_hpts_entry *hpts;
int failed;
INP_WLOCK_ASSERT(tptoinpcb(tp));
- hpts = tcp_hpts_lock(tp);
+ hpts = tcp_hpts_lock(pace, tp);
if (tp->t_in_hpts == IHPTS_NONE && !(tp->t_flags2 & TF2_HPTS_CPU_SET)) {
- tp->t_hpts_cpu = hpts_cpuid(tp, &failed);
+ tp->t_hpts_cpu = hpts_cpuid(pace, tp, &failed);
if (failed == 0)
tp->t_flags2 |= TF2_HPTS_CPU_SET;
}
@@ -1545,33 +1531,35 @@ tcp_set_hpts(struct tcpcb *tp)
}
static struct tcp_hpts_entry *
-tcp_choose_hpts_to_run(void)
+tcp_choose_hpts_to_run(struct tcp_hptsi *pace)
{
+ struct timeval tv;
int i, oldest_idx, start, end;
uint32_t cts, time_since_ran, calc;
- cts = tcp_get_usecs(NULL);
+ microuptime(&tv);
+ cts = tcp_tv_to_usec(&tv);
time_since_ran = 0;
/* Default is all one group */
start = 0;
- end = tcp_pace.rp_num_hptss;
+ end = pace->rp_num_hptss;
/*
* If we have more than one L3 group figure out which one
* this CPU is in.
*/
- if (tcp_pace.grp_cnt > 1) {
- for (i = 0; i < tcp_pace.grp_cnt; i++) {
- if (CPU_ISSET(curcpu, &tcp_pace.grps[i]->cg_mask)) {
- start = tcp_pace.grps[i]->cg_first;
- end = (tcp_pace.grps[i]->cg_last + 1);
+ if (pace->grp_cnt > 1) {
+ for (i = 0; i < pace->grp_cnt; i++) {
+ if (CPU_ISSET(curcpu, &pace->grps[i]->cg_mask)) {
+ start = pace->grps[i]->cg_first;
+ end = (pace->grps[i]->cg_last + 1);
break;
}
}
}
oldest_idx = -1;
for (i = start; i < end; i++) {
- if (TSTMP_GT(cts, tcp_pace.cts_last_ran[i]))
- calc = cts - tcp_pace.cts_last_ran[i];
+ if (TSTMP_GT(cts, pace->cts_last_ran[i]))
+ calc = cts - pace->cts_last_ran[i];
else
calc = 0;
if (calc > time_since_ran) {
@@ -1580,9 +1568,9 @@ tcp_choose_hpts_to_run(void)
}
}
if (oldest_idx >= 0)
- return(tcp_pace.rp_ent[oldest_idx]);
+ return(pace->rp_ent[oldest_idx]);
else
- return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
+ return(pace->rp_ent[(curcpu % pace->rp_num_hptss)]);
}
static void
@@ -1590,9 +1578,9 @@ __tcp_run_hpts(void)
{
struct epoch_tracker et;
struct tcp_hpts_entry *hpts;
- int ticks_ran;
+ int slots_ran;
- hpts = tcp_choose_hpts_to_run();
+ hpts = tcp_choose_hpts_to_run(tcp_hptsi_pace);
if (hpts->p_hpts_active) {
/* Already active */
@@ -1608,12 +1596,11 @@ __tcp_run_hpts(void)
hpts->syscall_cnt++;
counter_u64_add(hpts_direct_call, 1);
hpts->p_hpts_active = 1;
- ticks_ran = tcp_hptsi(hpts, false);
+ slots_ran = tcp_hptsi(hpts, false);
/* We may want to adjust the sleep values here */
if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
- if (ticks_ran > slots_indicate_less_sleep) {
+ if (slots_ran > slots_indicate_less_sleep) {
struct timeval tv;
- sbintime_t sb;
hpts->p_mysleep.tv_usec /= 2;
if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
@@ -1637,13 +1624,8 @@ __tcp_run_hpts(void)
* the dynamic value and set the on_min_sleep
* flag so we will not be awoken.
*/
- sb = tvtosbt(tv);
- /* Store off to make visible the actual sleep time */
- hpts->sleeping = tv.tv_usec;
- callout_reset_sbt_on(&hpts->co, sb, 0,
- hpts_timeout_swi, hpts, hpts->p_cpu,
- (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
- } else if (ticks_ran < slots_indicate_more_sleep) {
+ (void)tcp_hpts_sleep(hpts, &tv);
+ } else if (slots_ran < slots_indicate_more_sleep) {
/* For the further sleep, don't reschedule hpts */
hpts->p_mysleep.tv_usec *= 2;
if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
@@ -1660,17 +1642,22 @@ out_with_mtx:
static void
tcp_hpts_thread(void *ctx)
{
+#ifdef TCP_HPTS_KTEST
+ struct tcp_hptsi *pace;
+#endif
struct tcp_hpts_entry *hpts;
struct epoch_tracker et;
struct timeval tv;
- sbintime_t sb;
- int ticks_ran;
+ int slots_ran;
hpts = (struct tcp_hpts_entry *)ctx;
+#ifdef TCP_HPTS_KTEST
+ pace = hpts->p_hptsi;
+#endif
HPTS_LOCK(hpts);
if (hpts->p_direct_wake) {
/* Signaled by input or output with low occupancy count. */
- callout_stop(&hpts->co);
+ _callout_stop_safe(&hpts->co, 0);
counter_u64_add(hpts_direct_awakening, 1);
} else {
/* Timed out, the normal case. */
@@ -1723,7 +1710,7 @@ tcp_hpts_thread(void *ctx)
}
hpts->sleeping = 0;
hpts->p_hpts_active = 1;
- ticks_ran = tcp_hptsi(hpts, true);
+ slots_ran = tcp_hptsi(hpts, true);
tv.tv_sec = 0;
tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_USECS_PER_SLOT;
if ((hpts->p_on_queue_cnt > conn_cnt_thresh) && (hpts->hit_callout_thresh == 0)) {
@@ -1739,11 +1726,11 @@ tcp_hpts_thread(void *ctx)
* Only adjust sleep time if we were
* called from the callout i.e. direct_wake == 0.
*/
- if (ticks_ran < slots_indicate_more_sleep) {
+ if (slots_ran < slots_indicate_more_sleep) {
hpts->p_mysleep.tv_usec *= 2;
if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
hpts->p_mysleep.tv_usec = dynamic_max_sleep;
- } else if (ticks_ran > slots_indicate_less_sleep) {
+ } else if (slots_ran > slots_indicate_less_sleep) {
hpts->p_mysleep.tv_usec /= 2;
if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
hpts->p_mysleep.tv_usec = dynamic_min_sleep;
@@ -1799,18 +1786,11 @@ tcp_hpts_thread(void *ctx)
hpts->p_hpts_active = 0;
back_to_sleep:
hpts->p_direct_wake = 0;
- sb = tvtosbt(tv);
- /* Store off to make visible the actual sleep time */
- hpts->sleeping = tv.tv_usec;
- callout_reset_sbt_on(&hpts->co, sb, 0,
- hpts_timeout_swi, hpts, hpts->p_cpu,
- (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
+ (void)tcp_hpts_sleep(hpts, &tv);
NET_EPOCH_EXIT(et);
HPTS_UNLOCK(hpts);
}
-#undef timersub
-
static int32_t
hpts_count_level(struct cpu_group *cg)
{
@@ -1847,57 +1827,63 @@ hpts_gather_grps(struct cpu_group **grps, int32_t *at, int32_t max, struct cpu_g
}
}
-static void
-tcp_hpts_mod_load(void)
+/*
+ * Initialize a tcp_hptsi structure. This performs the core initialization
+ * without starting threads.
+ */
+struct tcp_hptsi*
+tcp_hptsi_create(const struct tcp_hptsi_funcs *funcs, bool enable_sysctl)
{
+ struct tcp_hptsi *pace;
struct cpu_group *cpu_top;
- int32_t error __diagused;
- int32_t i, j, bound = 0, created = 0;
+ uint32_t i, j, cts;
+ int32_t count;
size_t sz, asz;
struct timeval tv;
- sbintime_t sb;
struct tcp_hpts_entry *hpts;
- struct pcpu *pc;
char unit[16];
uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
- int count, domain;
+ KASSERT(funcs != NULL, ("funcs is NULL"));
+
+ /* Allocate the main structure */
+ pace = malloc(sizeof(struct tcp_hptsi), M_TCPHPTS, M_WAITOK | M_ZERO);
+ if (pace == NULL)
+ return (NULL);
+
+ memset(pace, 0, sizeof(*pace));
+ pace->funcs = funcs;
+
+ /* Setup CPU topology information */
#ifdef SMP
cpu_top = smp_topo();
#else
cpu_top = NULL;
#endif
- tcp_pace.rp_num_hptss = ncpus;
- hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
- hpts_loops = counter_u64_alloc(M_WAITOK);
- back_tosleep = counter_u64_alloc(M_WAITOK);
- combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
- wheel_wrap = counter_u64_alloc(M_WAITOK);
- hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
- hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
- hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
- hpts_direct_call = counter_u64_alloc(M_WAITOK);
- cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
- cpu_uses_random = counter_u64_alloc(M_WAITOK);
+ pace->rp_num_hptss = ncpus;
- sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
- tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
- sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
- tcp_pace.cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
- tcp_pace.grp_cnt = 0;
+ /* Allocate hpts entry array */
+ sz = (pace->rp_num_hptss * sizeof(struct tcp_hpts_entry *));
+ pace->rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
+
+ /* Allocate timestamp tracking array */
+ sz = (sizeof(uint32_t) * pace->rp_num_hptss);
+ pace->cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
+
+ /* Setup CPU groups */
if (cpu_top == NULL) {
- tcp_pace.grp_cnt = 1;
+ pace->grp_cnt = 1;
} else {
/* Find out how many cache level 3 domains we have */
count = 0;
- tcp_pace.grp_cnt = hpts_count_level(cpu_top);
- if (tcp_pace.grp_cnt == 0) {
- tcp_pace.grp_cnt = 1;
+ pace->grp_cnt = hpts_count_level(cpu_top);
+ if (pace->grp_cnt == 0) {
+ pace->grp_cnt = 1;
}
- sz = (tcp_pace.grp_cnt * sizeof(struct cpu_group *));
- tcp_pace.grps = malloc(sz, M_TCPHPTS, M_WAITOK);
+ sz = (pace->grp_cnt * sizeof(struct cpu_group *));
+ pace->grps = malloc(sz, M_TCPHPTS, M_WAITOK);
/* Now populate the groups */
- if (tcp_pace.grp_cnt == 1) {
+ if (pace->grp_cnt == 1) {
/*
* All we need is the top level all cpu's are in
* the same cache so when we use grp[0]->cg_mask
@@ -1905,193 +1891,290 @@ tcp_hpts_mod_load(void)
* all cpu's in it. The level here is probably
* zero which is ok.
*/
- tcp_pace.grps[0] = cpu_top;
+ pace->grps[0] = cpu_top;
} else {
/*
* Here we must find all the level three cache domains
* and setup our pointers to them.
*/
count = 0;
- hpts_gather_grps(tcp_pace.grps, &count, tcp_pace.grp_cnt, cpu_top);
+ hpts_gather_grps(pace->grps, &count, pace->grp_cnt, cpu_top);
}
}
+
+ /* Cache the current time for initializing the hpts entries */
+ microuptime(&tv);
+ cts = tcp_tv_to_usec(&tv);
+
+ /* Initialize each hpts entry */
asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
- for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
- tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ pace->rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
M_TCPHPTS, M_WAITOK | M_ZERO);
- tcp_pace.rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS, M_WAITOK);
- hpts = tcp_pace.rp_ent[i];
- /*
- * Init all the hpts structures that are not specifically
- * zero'd by the allocations. Also lets attach them to the
- * appropriate sysctl block as well.
- */
- mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
- "hpts", MTX_DEF | MTX_DUPOK);
- for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
- TAILQ_INIT(&hpts->p_hptss[j].head);
- hpts->p_hptss[j].count = 0;
- hpts->p_hptss[j].gencnt = 0;
- }
- sysctl_ctx_init(&hpts->hpts_ctx);
- sprintf(unit, "%d", i);
- hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
- SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
- OID_AUTO,
- unit,
- CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
- "");
- SYSCTL_ADD_INT(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "out_qcnt", CTLFLAG_RD,
- &hpts->p_on_queue_cnt, 0,
- "Count TCB's awaiting output processing");
- SYSCTL_ADD_U16(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "active", CTLFLAG_RD,
- &hpts->p_hpts_active, 0,
- "Is the hpts active");
- SYSCTL_ADD_UINT(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "curslot", CTLFLAG_RD,
- &hpts->p_cur_slot, 0,
- "What the current running pacers goal");
- SYSCTL_ADD_UINT(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "runtick", CTLFLAG_RD,
- &hpts->p_runningslot, 0,
- "What the running pacers current slot is");
- SYSCTL_ADD_UINT(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "curtick", CTLFLAG_RD,
- &hpts->p_curtick, 0,
- "What the running pacers last tick mapped to the wheel was");
- SYSCTL_ADD_UINT(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "lastran", CTLFLAG_RD,
- &tcp_pace.cts_last_ran[i], 0,
- "The last usec tick that this hpts ran");
- SYSCTL_ADD_LONG(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
- &hpts->p_mysleep.tv_usec,
- "What the running pacers is using for p_mysleep.tv_usec");
- SYSCTL_ADD_U64(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "now_sleeping", CTLFLAG_RD,
- &hpts->sleeping, 0,
- "What the running pacers is actually sleeping for");
- SYSCTL_ADD_U64(&hpts->hpts_ctx,
- SYSCTL_CHILDREN(hpts->hpts_root),
- OID_AUTO, "syscall_cnt", CTLFLAG_RD,
- &hpts->syscall_cnt, 0,
- "How many times we had syscalls on this hpts");
+ pace->rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS,
+ M_WAITOK | M_ZERO);
+ hpts = pace->rp_ent[i];
+ /* Basic initialization */
hpts->p_hpts_sleep_time = hpts_sleep_max;
- hpts->p_num = i;
- hpts->p_curtick = tcp_gethptstick(&tv);
- tcp_pace.cts_last_ran[i] = tcp_tv_to_usec(&tv);
- hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
- hpts->p_cpu = 0xffff;
+ hpts->p_cpu = i;
+ pace->cts_last_ran[i] = cts;
+ hpts->p_cur_slot = cts_to_wheel(cts);
+ hpts->p_prev_slot = hpts->p_cur_slot;
hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
callout_init(&hpts->co, 1);
+ hpts->p_hptsi = pace;
+ mtx_init(&hpts->p_mtx, "tcp_hpts_lck", "hpts",
+ MTX_DEF | MTX_DUPOK);
+ for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
+ TAILQ_INIT(&hpts->p_hptss[j].head);
+ }
+
+ /* Setup SYSCTL if requested */
+ if (enable_sysctl) {
+ sysctl_ctx_init(&hpts->hpts_ctx);
+ sprintf(unit, "%d", i);
+ hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
+ SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
+ OID_AUTO,
+ unit,
+ CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "");
+ SYSCTL_ADD_INT(&hpts->hpts_ctx,
+ SYSCTL_CHILDREN(hpts->hpts_root),
+ OID_AUTO, "out_qcnt", CTLFLAG_RD,
+ &hpts->p_on_queue_cnt, 0,
+ "Count TCB's awaiting output processing");
+ SYSCTL_ADD_U16(&hpts->hpts_ctx,
+ SYSCTL_CHILDREN(hpts->hpts_root),
+ OID_AUTO, "active", CTLFLAG_RD,
+ &hpts->p_hpts_active, 0,
+ "Is the hpts active");
+ SYSCTL_ADD_UINT(&hpts->hpts_ctx,
+ SYSCTL_CHILDREN(hpts->hpts_root),
+ OID_AUTO, "curslot", CTLFLAG_RD,
+ &hpts->p_cur_slot, 0,
+ "What the current running pacers goal");
+ SYSCTL_ADD_UINT(&hpts->hpts_ctx,
+ SYSCTL_CHILDREN(hpts->hpts_root),
+ OID_AUTO, "runslot", CTLFLAG_RD,
+ &hpts->p_runningslot, 0,
+ "What the running pacers current slot is");
+ SYSCTL_ADD_UINT(&hpts->hpts_ctx,
+ SYSCTL_CHILDREN(hpts->hpts_root),
+ OID_AUTO, "lastran", CTLFLAG_RD,
+ &pace->cts_last_ran[i], 0,
+ "The last usec timestamp that this hpts ran");
+ SYSCTL_ADD_LONG(&hpts->hpts_ctx,
+ SYSCTL_CHILDREN(hpts->hpts_root),
+ OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
+ &hpts->p_mysleep.tv_usec,
+ "What the running pacers is using for p_mysleep.tv_usec");
+ SYSCTL_ADD_U64(&hpts->hpts_ctx,
+ SYSCTL_CHILDREN(hpts->hpts_root),
+ OID_AUTO, "now_sleeping", CTLFLAG_RD,
+ &hpts->sleeping, 0,
+ "What the running pacers is actually sleeping for");
+ SYSCTL_ADD_U64(&hpts->hpts_ctx,
+ SYSCTL_CHILDREN(hpts->hpts_root),
+ OID_AUTO, "syscall_cnt", CTLFLAG_RD,
+ &hpts->syscall_cnt, 0,
+ "How many times we had syscalls on this hpts");
+ }
}
- /* Don't try to bind to NUMA domains if we don't have any */
- if (vm_ndomains == 1 && tcp_bind_threads == 2)
- tcp_bind_threads = 0;
- /*
- * Now lets start ithreads to handle the hptss.
- */
- for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
- hpts = tcp_pace.rp_ent[i];
- hpts->p_cpu = i;
+ return (pace);
+}
+
+/*
+ * Create threads for a tcp_hptsi structure and starts timers for the current
+ * (minimum) sleep interval.
+ */
+void
+tcp_hptsi_start(struct tcp_hptsi *pace)
+{
+ struct tcp_hpts_entry *hpts;
+ struct pcpu *pc;
+ struct timeval tv;
+ uint32_t i, j;
+ int count, domain;
+ int error __diagused;
+
+ KASSERT(pace != NULL, ("tcp_hptsi_start: pace is NULL"));
+
+ /* Start threads for each hpts entry */
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ hpts = pace->rp_ent[i];
+
+ KASSERT(hpts->ie_cookie == NULL,
+ ("tcp_hptsi_start: hpts[%d]->ie_cookie is not NULL", i));
error = swi_add(&hpts->ie, "hpts",
tcp_hpts_thread, (void *)hpts,
SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
KASSERT(error == 0,
- ("Can't add hpts:%p i:%d err:%d",
- hpts, i, error));
- created++;
- hpts->p_mysleep.tv_sec = 0;
- hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
+ ("Can't add hpts:%p i:%d err:%d", hpts, i, error));
+
if (tcp_bind_threads == 1) {
- if (intr_event_bind(hpts->ie, i) == 0)
- bound++;
+ (void)intr_event_bind(hpts->ie, i);
} else if (tcp_bind_threads == 2) {
/* Find the group for this CPU (i) and bind into it */
- for (j = 0; j < tcp_pace.grp_cnt; j++) {
- if (CPU_ISSET(i, &tcp_pace.grps[j]->cg_mask)) {
+ for (j = 0; j < pace->grp_cnt; j++) {
+ if (CPU_ISSET(i, &pace->grps[j]->cg_mask)) {
if (intr_event_bind_ithread_cpuset(hpts->ie,
- &tcp_pace.grps[j]->cg_mask) == 0) {
- bound++;
+ &pace->grps[j]->cg_mask) == 0) {
pc = pcpu_find(i);
domain = pc->pc_domain;
- count = hpts_domains[domain].count;
- hpts_domains[domain].cpu[count] = i;
- hpts_domains[domain].count++;
+ count = pace->domains[domain].count;
+ pace->domains[domain].cpu[count] = i;
+ pace->domains[domain].count++;
break;
}
}
}
}
+
+ hpts->p_mysleep.tv_sec = 0;
+ hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
tv.tv_sec = 0;
tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_USECS_PER_SLOT;
- hpts->sleeping = tv.tv_usec;
- sb = tvtosbt(tv);
- callout_reset_sbt_on(&hpts->co, sb, 0,
- hpts_timeout_swi, hpts, hpts->p_cpu,
- (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
- }
- /*
- * If we somehow have an empty domain, fall back to choosing
- * among all htps threads.
- */
- for (i = 0; i < vm_ndomains; i++) {
- if (hpts_domains[i].count == 0) {
- tcp_bind_threads = 0;
- break;
- }
+ (void)tcp_hpts_sleep(hpts, &tv);
}
- tcp_hpts_softclock = __tcp_run_hpts;
- tcp_lro_hpts_init();
- printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
- created, bound,
- tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
}
-static void
-tcp_hpts_mod_unload(void)
+/*
+ * Stop all callouts/threads for a tcp_hptsi structure.
+ */
+void
+tcp_hptsi_stop(struct tcp_hptsi *pace)
{
+ struct tcp_hpts_entry *hpts;
int rv __diagused;
+ uint32_t i;
- tcp_lro_hpts_uninit();
- atomic_store_ptr(&tcp_hpts_softclock, NULL);
+ KASSERT(pace != NULL, ("tcp_hptsi_stop: pace is NULL"));
- for (int i = 0; i < tcp_pace.rp_num_hptss; i++) {
- struct tcp_hpts_entry *hpts = tcp_pace.rp_ent[i];
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ hpts = pace->rp_ent[i];
+ KASSERT(hpts != NULL, ("tcp_hptsi_stop: hpts[%d] is NULL", i));
+ KASSERT(hpts->ie_cookie != NULL,
+ ("tcp_hptsi_stop: hpts[%d]->ie_cookie is NULL", i));
- rv = callout_drain(&hpts->co);
+ rv = _callout_stop_safe(&hpts->co, CS_DRAIN);
MPASS(rv != 0);
rv = swi_remove(hpts->ie_cookie);
MPASS(rv == 0);
+ hpts->ie_cookie = NULL;
+ }
+}
- rv = sysctl_ctx_free(&hpts->hpts_ctx);
- MPASS(rv == 0);
+/*
+ * Destroy a tcp_hptsi structure initialized by tcp_hptsi_create.
+ */
+void
+tcp_hptsi_destroy(struct tcp_hptsi *pace)
+{
+ struct tcp_hpts_entry *hpts;
+ uint32_t i;
+
+ KASSERT(pace != NULL, ("tcp_hptsi_destroy: pace is NULL"));
+ KASSERT(pace->rp_ent != NULL, ("tcp_hptsi_destroy: pace->rp_ent is NULL"));
+
+ /* Cleanup each hpts entry */
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ hpts = pace->rp_ent[i];
+ if (hpts != NULL) {
+ /* Cleanup SYSCTL if it was initialized */
+ if (hpts->hpts_root != NULL) {
+ sysctl_ctx_free(&hpts->hpts_ctx);
+ }
- mtx_destroy(&hpts->p_mtx);
- free(hpts->p_hptss, M_TCPHPTS);
- free(hpts, M_TCPHPTS);
+ mtx_destroy(&hpts->p_mtx);
+ free(hpts->p_hptss, M_TCPHPTS);
+ free(hpts, M_TCPHPTS);
+ }
}
- free(tcp_pace.rp_ent, M_TCPHPTS);
- free(tcp_pace.cts_last_ran, M_TCPHPTS);
+ /* Cleanup main arrays */
+ free(pace->rp_ent, M_TCPHPTS);
+ free(pace->cts_last_ran, M_TCPHPTS);
#ifdef SMP
- free(tcp_pace.grps, M_TCPHPTS);
+ free(pace->grps, M_TCPHPTS);
#endif
+ /* Free the main structure */
+ free(pace, M_TCPHPTS);
+}
+
+static int
+tcp_hpts_mod_load(void)
+{
+ int i;
+
+ /* Don't try to bind to NUMA domains if we don't have any */
+ if (vm_ndomains == 1 && tcp_bind_threads == 2)
+ tcp_bind_threads = 0;
+
+ /* Create the tcp_hptsi structure */
+ tcp_hptsi_pace = tcp_hptsi_create(&tcp_hptsi_default_funcs, true);
+ if (tcp_hptsi_pace == NULL)
+ return (ENOMEM);
+
+ /* Initialize global counters */
+ hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
+ hpts_loops = counter_u64_alloc(M_WAITOK);
+ back_tosleep = counter_u64_alloc(M_WAITOK);
+ combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
+ wheel_wrap = counter_u64_alloc(M_WAITOK);
+ hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
+ hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
+ hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
+ hpts_direct_call = counter_u64_alloc(M_WAITOK);
+ cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
+ cpu_uses_random = counter_u64_alloc(M_WAITOK);
+
+ /* Start the threads */
+ tcp_hptsi_start(tcp_hptsi_pace);
+
+ /* Enable the global HPTS softclock function */
+ tcp_hpts_softclock = __tcp_run_hpts;
+
+ /* Initialize LRO HPTS */
+ tcp_lro_hpts_init();
+
+ /*
+ * If we somehow have an empty domain, fall back to choosing among all
+ * HPTS threads.
+ */
+ for (i = 0; i < vm_ndomains; i++) {
+ if (tcp_hptsi_pace->domains[i].count == 0) {
+ tcp_bind_threads = 0;
+ break;
+ }
+ }
+
+ printf("TCP HPTS started %u (%s) swi interrupt threads\n",
+ tcp_hptsi_pace->rp_num_hptss, (tcp_bind_threads == 0) ?
+ "(unbounded)" :
+ (tcp_bind_threads == 1 ? "per-cpu" : "per-NUMA-domain"));
+
+ return (0);
+}
+
+static void
+tcp_hpts_mod_unload(void)
+{
+ tcp_lro_hpts_uninit();
+
+ /* Disable the global HPTS softclock function */
+ atomic_store_ptr(&tcp_hpts_softclock, NULL);
+
+ tcp_hptsi_stop(tcp_hptsi_pace);
+ tcp_hptsi_destroy(tcp_hptsi_pace);
+ tcp_hptsi_pace = NULL;
+
+ /* Cleanup global counters */
counter_u64_free(hpts_hopelessly_behind);
counter_u64_free(hpts_loops);
counter_u64_free(back_tosleep);
@@ -2106,13 +2189,11 @@ tcp_hpts_mod_unload(void)
}
static int
-tcp_hpts_modevent(module_t mod, int what, void *arg)
+tcp_hpts_mod_event(module_t mod, int what, void *arg)
{
-
switch (what) {
case MOD_LOAD:
- tcp_hpts_mod_load();
- return (0);
+ return (tcp_hpts_mod_load());
case MOD_QUIESCE:
/*
* Since we are a dependency of TCP stack modules, they should
@@ -2132,7 +2213,7 @@ tcp_hpts_modevent(module_t mod, int what, void *arg)
static moduledata_t tcp_hpts_module = {
.name = "tcphpts",
- .evhand = tcp_hpts_modevent,
+ .evhand = tcp_hpts_mod_event,
};
DECLARE_MODULE(tcphpts, tcp_hpts_module, SI_SUB_SOFTINTR, SI_ORDER_ANY);
diff --git a/sys/netinet/tcp_hpts.h b/sys/netinet/tcp_hpts.h
index 6172baf2a062..6b05f9701ac2 100644
--- a/sys/netinet/tcp_hpts.h
+++ b/sys/netinet/tcp_hpts.h
@@ -28,19 +28,11 @@
/* Number of useconds represented by an hpts slot */
#define HPTS_USECS_PER_SLOT 10
-#define HPTS_MS_TO_SLOTS(x) ((x * 100) + 1)
-#define HPTS_USEC_TO_SLOTS(x) ((x+9) /10)
#define HPTS_USEC_IN_SEC 1000000
#define HPTS_MSEC_IN_SEC 1000
#define HPTS_USEC_IN_MSEC 1000
static inline uint32_t
-tcp_tv_to_hpts_slot(const struct timeval *sv)
-{
- return ((sv->tv_sec * 100000) + (sv->tv_usec / HPTS_USECS_PER_SLOT));
-}
-
-static inline uint32_t
tcp_tv_to_usec(const struct timeval *sv)
{
return ((uint32_t) ((sv->tv_sec * HPTS_USEC_IN_SEC) + sv->tv_usec));
@@ -66,7 +58,7 @@ struct hpts_diag {
uint32_t p_runningslot; /* bbr->inflight */
uint32_t slot_req; /* bbr->flex3 x */
uint32_t inp_hptsslot; /* bbr->flex4 x */
- uint32_t slot_remaining; /* bbr->flex5 x */
+ uint32_t time_remaining; /* bbr->flex5 x */
uint32_t have_slept; /* bbr->epoch x */
uint32_t hpts_sleep_time; /* bbr->applimited x */
uint32_t yet_to_sleep; /* bbr->lt_epoch x */
@@ -75,8 +67,6 @@ struct hpts_diag {
uint32_t maxslots; /* bbr->delRate x */
uint32_t wheel_cts; /* bbr->rttProp x */
int32_t co_ret; /* bbr->pkts_out x */
- uint32_t p_curtick; /* upper bbr->cur_del_rate */
- uint32_t p_lasttick; /* lower bbr->cur_del_rate */
uint8_t p_on_min_sleep; /* bbr->flex8 x */
};
@@ -92,13 +82,18 @@ struct hpts_diag {
#ifdef _KERNEL
+extern struct tcp_hptsi *tcp_hptsi_pace;
+
/*
* The following are the definitions for the kernel HPTS interface for managing
* the HPTS ring and the TCBs on it.
*/
-void tcp_hpts_init(struct tcpcb *);
-void tcp_hpts_remove(struct tcpcb *);
+void __tcp_hpts_init(struct tcp_hptsi *pace, struct tcpcb *);
+#define tcp_hpts_init(tp) __tcp_hpts_init(tcp_hptsi_pace, tp)
+
+void __tcp_hpts_remove(struct tcp_hptsi *pace, struct tcpcb *);
+#define tcp_hpts_remove(tp) __tcp_hpts_remove(tcp_hptsi_pace, tp)
static inline bool
tcp_in_hpts(struct tcpcb *tp)
@@ -132,12 +127,13 @@ tcp_in_hpts(struct tcpcb *tp)
* that INP_WLOCK() or from destroying your TCB where again
* you should already have the INP_WLOCK().
*/
-uint32_t tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line,
- struct hpts_diag *diag);
-#define tcp_hpts_insert(inp, slot) \
- tcp_hpts_insert_diag((inp), (slot), __LINE__, NULL)
+void __tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t usecs,
+ struct hpts_diag *diag);
+#define tcp_hpts_insert(tp, usecs, diag) \
+ __tcp_hpts_insert(tcp_hptsi_pace, (tp), (usecs), (diag))
-void tcp_set_hpts(struct tcpcb *tp);
+void __tcp_set_hpts(struct tcp_hptsi *pace, struct tcpcb *tp);
+#define tcp_set_hpts(tp) __tcp_set_hpts(tcp_hptsi_pace, tp)
extern int32_t tcp_min_hptsi_time;
@@ -147,17 +143,6 @@ get_hpts_min_sleep_time(void)
return (tcp_min_hptsi_time + HPTS_USECS_PER_SLOT);
}
-static inline uint32_t
-tcp_gethptstick(struct timeval *sv)
-{
- struct timeval tv;
-
- if (sv == NULL)
- sv = &tv;
- microuptime(sv);
- return (tcp_tv_to_hpts_slot(sv));
-}
-
static inline uint64_t
tcp_get_u64_usecs(struct timeval *tv)
{
@@ -180,12 +165,5 @@ tcp_get_usecs(struct timeval *tv)
return (tcp_tv_to_usec(tv));
}
-/*
- * LRO HPTS initialization and uninitialization, only for internal use by the
- * HPTS code.
- */
-void tcp_lro_hpts_init(void);
-void tcp_lro_hpts_uninit(void);
-
#endif /* _KERNEL */
#endif /* __tcp_hpts_h__ */
diff --git a/sys/netinet/tcp_hpts_internal.h b/sys/netinet/tcp_hpts_internal.h
new file mode 100644
index 000000000000..8b33e03a6981
--- /dev/null
+++ b/sys/netinet/tcp_hpts_internal.h
@@ -0,0 +1,184 @@
+/*-
+ * Copyright (c) 2025 Netflix, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __tcp_hpts_internal_h__
+#define __tcp_hpts_internal_h__
+
+/*
+ * TCP High Precision Timer System (HPTS) - Internal Definitions
+ *
+ * This header contains internal structures, constants, and interfaces that are
+ * implemented in tcp_hpts.c but exposed to enable comprehensive unit testing of
+ * the HPTS subsystem.
+ */
+
+#if defined(_KERNEL)
+
+/*
+ * The hpts uses a 102400 wheel. The wheel
+ * defines the time in 10 usec increments (102400 x 10).
+ * This gives a range of 10usec - 1024ms to place
+ * an entry within. If the user requests more than
+ * 1.024 second, a remaineder is attached and the hpts
+ * when seeing the remainder will re-insert the
+ * inpcb forward in time from where it is until
+ * the remainder is zero.
+ */
+
+#define NUM_OF_HPTSI_SLOTS 102400
+
+/* The number of connections after which the dynamic sleep logic kicks in. */
+#define DEFAULT_CONNECTION_THRESHOLD 100
+
+/*
+ * The hpts uses a 102400 wheel. The wheel
+ * defines the time in 10 usec increments (102400 x 10).
+ * This gives a range of 10usec - 1024ms to place
+ * an entry within. If the user requests more than
+ * 1.024 second, a remaineder is attached and the hpts
+ * when seeing the remainder will re-insert the
+ * inpcb forward in time from where it is until
+ * the remainder is zero.
+ */
+
+#define NUM_OF_HPTSI_SLOTS 102400
+
+/* Convert microseconds to HPTS slots */
+#define HPTS_USEC_TO_SLOTS(x) ((x+9) /10)
+
+/* The number of connections after which the dynamic sleep logic kicks in. */
+#define DEFAULT_CONNECTION_THRESHOLD 100
+
+extern int tcp_bind_threads; /* Thread binding configuration
+ * (0=none, 1=cpu, 2=numa) */
+
+/*
+ * Abstraction layer controlling time, interrupts and callouts.
+ */
+struct tcp_hptsi_funcs {
+ void (*microuptime)(struct timeval *tv);
+ int (*swi_add)(struct intr_event **eventp, const char *name,
+ driver_intr_t handler, void *arg, int pri, enum intr_type flags,
+ void **cookiep);
+ int (*swi_remove)(void *cookie);
+ void (*swi_sched)(void *cookie, int flags);
+ int (*intr_event_bind)(struct intr_event *ie, int cpu);
+ int (*intr_event_bind_ithread_cpuset)(struct intr_event *ie,
+ struct _cpuset *mask);
+ void (*callout_init)(struct callout *c, int mpsafe);
+ int (*callout_reset_sbt_on)(struct callout *c, sbintime_t sbt,
+ sbintime_t precision, void (*func)(void *), void *arg, int cpu,
+ int flags);
+ int (*_callout_stop_safe)(struct callout *c, int flags);
+};
+
+/* Default function table for system operation */
+extern const struct tcp_hptsi_funcs tcp_hptsi_default_funcs;
+
+/* Each hpts has its own p_mtx which is used for locking */
+#define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
+#define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
+#define HPTS_TRYLOCK(hpts) mtx_trylock(&(hpts)->p_mtx)
+#define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
+
+struct tcp_hpts_entry {
+ /* Cache line 0x00 */
+ struct mtx p_mtx; /* Mutex for hpts */
+ struct timeval p_mysleep; /* Our min sleep time */
+ uint64_t syscall_cnt;
+ uint64_t sleeping; /* What the actual sleep was (if sleeping) */
+ uint16_t p_hpts_active; /* Flag that says hpts is awake */
+ uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
+ uint32_t p_runningslot; /* Current slot we are at if we are running */
+ uint32_t p_prev_slot; /* Previous slot we were on */
+ uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
+ uint32_t p_nxt_slot; /* The next slot outside the current range
+ * of slots that the hpts is running on. */
+ int32_t p_on_queue_cnt; /* Count on queue in this hpts */
+ uint8_t p_direct_wake :1, /* boolean */
+ p_on_min_sleep:1, /* boolean */
+ p_hpts_wake_scheduled:1,/* boolean */
+ hit_callout_thresh:1,
+ p_avail:4;
+ uint8_t p_fill[3]; /* Fill to 32 bits */
+ /* Cache line 0x40 */
+ struct hptsh {
+ TAILQ_HEAD(, tcpcb) head;
+ uint32_t count;
+ uint32_t gencnt;
+ } *p_hptss; /* Hptsi wheel */
+ uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
+ * of 255ms */
+ uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
+ uint32_t saved_curslot; /* for logging */
+ uint32_t saved_prev_slot; /* for logging */
+ uint32_t p_delayed_by; /* How much were we delayed by */
+ /* Cache line 0x80 */
+ struct sysctl_ctx_list hpts_ctx;
+ struct sysctl_oid *hpts_root;
+ struct intr_event *ie;
+ void *ie_cookie;
+ uint16_t p_cpu; /* The hpts CPU */
+ struct tcp_hptsi *p_hptsi; /* Back pointer to parent hptsi structure */
+ /* There is extra space in here */
+ /* Cache line 0x100 */
+ struct callout co __aligned(CACHE_LINE_SIZE);
+} __aligned(CACHE_LINE_SIZE);
+
+struct tcp_hptsi {
+ struct cpu_group **grps;
+ struct tcp_hpts_entry **rp_ent; /* Array of hptss */
+ uint32_t *cts_last_ran;
+ uint32_t grp_cnt;
+ uint32_t rp_num_hptss; /* Number of hpts threads */
+ struct hpts_domain_info {
+ int count;
+ int cpu[MAXCPU];
+ } domains[MAXMEMDOM]; /* Per-NUMA domain CPU assignments */
+ const struct tcp_hptsi_funcs *funcs; /* Function table for testability */
+};
+
+/*
+ * Core tcp_hptsi structure manipulation functions.
+ */
+struct tcp_hptsi* tcp_hptsi_create(const struct tcp_hptsi_funcs *funcs,
+ bool enable_sysctl);
+void tcp_hptsi_destroy(struct tcp_hptsi *pace);
+void tcp_hptsi_start(struct tcp_hptsi *pace);
+void tcp_hptsi_stop(struct tcp_hptsi *pace);
+uint16_t tcp_hptsi_random_cpu(struct tcp_hptsi *pace);
+int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, bool from_callout);
+
+void tcp_hpts_wake(struct tcp_hpts_entry *hpts);
+
+/*
+ * LRO HPTS initialization and uninitialization, only for internal use by the
+ * HPTS code.
+ */
+void tcp_lro_hpts_init(void);
+void tcp_lro_hpts_uninit(void);
+
+#endif /* defined(_KERNEL) */
+#endif /* __tcp_hpts_internal_h__ */
diff --git a/sys/netinet/tcp_hpts_test.c b/sys/netinet/tcp_hpts_test.c
new file mode 100644
index 000000000000..c5dc9cb5b03b
--- /dev/null
+++ b/sys/netinet/tcp_hpts_test.c
@@ -0,0 +1,1682 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Netflix, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <tests/ktest.h>
+#include <sys/cdefs.h>
+#include "opt_inet.h"
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/errno.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/refcount.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <netinet/in_pcb.h>
+#include <netinet/tcp_seq.h>
+#include <netinet/tcp_var.h>
+#include <netinet/tcp_hpts.h>
+#include <netinet/tcp_hpts_internal.h>
+#include <dev/tcp_log/tcp_log_dev.h>
+#include <netinet/tcp_log_buf.h>
+
+#undef tcp_hpts_init
+#undef tcp_hpts_remove
+#undef tcp_hpts_insert
+#undef tcp_set_hpts
+
+/* Custom definitions that take the tcp_hptsi */
+#define tcp_hpts_init(pace, tp) __tcp_hpts_init((pace), (tp))
+#define tcp_hpts_remove(pace, tp) __tcp_hpts_remove((pace), (tp))
+#define tcp_hpts_insert(pace, tp, usecs, diag) \
+ __tcp_hpts_insert((pace), (tp), (usecs), (diag))
+#define tcp_set_hpts(pace, tp) __tcp_set_hpts((pace), (tp))
+
+static MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts_test", "TCP hpts test");
+
+static int test_exit_on_failure = true;
+SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts_test, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "TCP HPTS test controls");
+SYSCTL_INT(_net_inet_tcp_hpts_test, OID_AUTO, exit_on_failure, CTLFLAG_RW,
+ &test_exit_on_failure, 0,
+ "Exit HPTS test immediately on first failure (1) or continue running all tests (0)");
+
+#define KTEST_VERIFY(x) do { \
+ if (!(x)) { \
+ KTEST_ERR(ctx, "FAIL: %s", #x); \
+ if (test_exit_on_failure) \
+ return (EINVAL); \
+ } else { \
+ KTEST_LOG(ctx, "PASS: %s", #x); \
+ } \
+} while (0)
+
+#define KTEST_EQUAL(x, y) do { \
+ if ((x) != (y)) { \
+ KTEST_ERR(ctx, "FAIL: %s != %s (%d != %d)", #x, #y, (x), (y)); \
+ if (test_exit_on_failure) \
+ return (EINVAL); \
+ } else { \
+ KTEST_LOG(ctx, "PASS: %s == %s", #x, #y); \
+ } \
+} while (0)
+
+#define KTEST_NEQUAL(x, y) do { \
+ if ((x) == (y)) { \
+ KTEST_ERR(ctx, "FAIL: %s == %s (%d == %d)", #x, #y, (x), (y)); \
+ if (test_exit_on_failure) \
+ return (EINVAL); \
+ } else { \
+ KTEST_LOG(ctx, "PASS: %s != %s", #x, #y); \
+ } \
+} while (0)
+
+#define KTEST_GREATER_THAN(x, y) do { \
+ if ((x) <= (y)) { \
+ KTEST_ERR(ctx, "FAIL: %s <= %s (%d <= %d)", #x, #y, (x), (y)); \
+ if (test_exit_on_failure) \
+ return (EINVAL); \
+ } else { \
+ KTEST_LOG(ctx, "PASS: %s > %s", #x, #y); \
+ } \
+} while (0)
+
+#define KTEST_VERIFY_RET(x, y) do { \
+ if (!(x)) { \
+ KTEST_ERR(ctx, "FAIL: %s", #x); \
+ if (test_exit_on_failure) \
+ return (y); \
+ } else { \
+ KTEST_LOG(ctx, "PASS: %s", #x); \
+ } \
+} while (0)
+
+#ifdef TCP_HPTS_KTEST
+
+static void
+dump_hpts_entry(struct ktest_test_context *ctx, struct tcp_hpts_entry *hpts)
+{
+ KTEST_LOG(ctx, "tcp_hpts_entry(%p)", hpts);
+ KTEST_LOG(ctx, " p_cur_slot: %u", hpts->p_cur_slot);
+ KTEST_LOG(ctx, " p_prev_slot: %u", hpts->p_prev_slot);
+ KTEST_LOG(ctx, " p_nxt_slot: %u", hpts->p_nxt_slot);
+ KTEST_LOG(ctx, " p_runningslot: %u", hpts->p_runningslot);
+ KTEST_LOG(ctx, " p_on_queue_cnt: %d", hpts->p_on_queue_cnt);
+ KTEST_LOG(ctx, " p_hpts_active: %u", hpts->p_hpts_active);
+ KTEST_LOG(ctx, " p_wheel_complete: %u", hpts->p_wheel_complete);
+ KTEST_LOG(ctx, " p_direct_wake: %u", hpts->p_direct_wake);
+ KTEST_LOG(ctx, " p_on_min_sleep: %u", hpts->p_on_min_sleep);
+ KTEST_LOG(ctx, " p_hpts_wake_scheduled: %u", hpts->p_hpts_wake_scheduled);
+ KTEST_LOG(ctx, " hit_callout_thresh: %u", hpts->hit_callout_thresh);
+ KTEST_LOG(ctx, " p_hpts_sleep_time: %u", hpts->p_hpts_sleep_time);
+ KTEST_LOG(ctx, " p_delayed_by: %u", hpts->p_delayed_by);
+ KTEST_LOG(ctx, " overidden_sleep: %u", hpts->overidden_sleep);
+ KTEST_LOG(ctx, " saved_curslot: %u", hpts->saved_curslot);
+ KTEST_LOG(ctx, " saved_prev_slot: %u", hpts->saved_prev_slot);
+ KTEST_LOG(ctx, " syscall_cnt: %lu", hpts->syscall_cnt);
+ KTEST_LOG(ctx, " sleeping: %lu", hpts->sleeping);
+ KTEST_LOG(ctx, " p_cpu: %u", hpts->p_cpu);
+ KTEST_LOG(ctx, " ie_cookie: %p", hpts->ie_cookie);
+ KTEST_LOG(ctx, " p_hptsi: %p", hpts->p_hptsi);
+ KTEST_LOG(ctx, " p_mysleep: %ld.%06ld", hpts->p_mysleep.tv_sec, hpts->p_mysleep.tv_usec);
+}
+
+static void
+dump_tcpcb(struct tcpcb *tp)
+{
+ struct ktest_test_context *ctx = tp->t_fb_ptr;
+ struct inpcb *inp = &tp->t_inpcb;
+
+ KTEST_LOG(ctx, "tcp_control_block(%p)", tp);
+
+ /* HPTS-specific fields */
+ KTEST_LOG(ctx, " t_in_hpts: %d", tp->t_in_hpts);
+ KTEST_LOG(ctx, " t_hpts_cpu: %u", tp->t_hpts_cpu);
+ KTEST_LOG(ctx, " t_hpts_slot: %d", tp->t_hpts_slot);
+ KTEST_LOG(ctx, " t_hpts_gencnt: %u", tp->t_hpts_gencnt);
+ KTEST_LOG(ctx, " t_hpts_request: %u", tp->t_hpts_request);
+
+ /* LRO CPU field */
+ KTEST_LOG(ctx, " t_lro_cpu: %u", tp->t_lro_cpu);
+
+ /* TCP flags that affect HPTS */
+ KTEST_LOG(ctx, " t_flags2: 0x%x", tp->t_flags2);
+ KTEST_LOG(ctx, " TF2_HPTS_CPU_SET: %s", (tp->t_flags2 & TF2_HPTS_CPU_SET) ? "YES" : "NO");
+ KTEST_LOG(ctx, " TF2_HPTS_CALLS: %s", (tp->t_flags2 & TF2_HPTS_CALLS) ? "YES" : "NO");
+ KTEST_LOG(ctx, " TF2_SUPPORTS_MBUFQ: %s", (tp->t_flags2 & TF2_SUPPORTS_MBUFQ) ? "YES" : "NO");
+
+ /* Input PCB fields that HPTS uses */
+ KTEST_LOG(ctx, " inp_flags: 0x%x", inp->inp_flags);
+ KTEST_LOG(ctx, " INP_DROPPED: %s", (inp->inp_flags & INP_DROPPED) ? "YES" : "NO");
+ KTEST_LOG(ctx, " inp_flowid: 0x%x", inp->inp_flowid);
+ KTEST_LOG(ctx, " inp_flowtype: %u", inp->inp_flowtype);
+ KTEST_LOG(ctx, " inp_numa_domain: %d", inp->inp_numa_domain);
+}
+
+/* Enum for call counting indices */
+enum test_call_counts {
+ CCNT_MICROUPTIME = 0,
+ CCNT_SWI_ADD,
+ CCNT_SWI_REMOVE,
+ CCNT_SWI_SCHED,
+ CCNT_INTR_EVENT_BIND,
+ CCNT_INTR_EVENT_BIND_CPUSET,
+ CCNT_CALLOUT_INIT,
+ CCNT_CALLOUT_RESET_SBT_ON,
+ CCNT_CALLOUT_STOP_SAFE,
+ CCNT_TCP_OUTPUT,
+ CCNT_TCP_TFB_DO_QUEUED_SEGMENTS,
+ CCNT_MAX
+};
+
+static uint32_t call_counts[CCNT_MAX];
+
+static uint64_t test_time_usec = 0;
+
+/*
+ * Reset all test global variables to a clean state.
+ */
+static void
+test_hpts_init(void)
+{
+ memset(call_counts, 0, sizeof(call_counts));
+ test_time_usec = 0;
+}
+
+static void
+test_microuptime(struct timeval *tv)
+{
+ call_counts[CCNT_MICROUPTIME]++;
+ tv->tv_sec = test_time_usec / 1000000;
+ tv->tv_usec = test_time_usec % 1000000;
+}
+
+static int
+test_swi_add(struct intr_event **eventp, const char *name,
+ driver_intr_t handler, void *arg, int pri, enum intr_type flags,
+ void **cookiep)
+{
+ call_counts[CCNT_SWI_ADD]++;
+ /* Simulate successful SWI creation */
+ *eventp = (struct intr_event *)0xfeedface; /* Mock event */
+ *cookiep = (void *)0xdeadbeef; /* Mock cookie */
+ return (0);
+}
+
+static int
+test_swi_remove(void *cookie)
+{
+ call_counts[CCNT_SWI_REMOVE]++;
+ /* Simulate successful removal */
+ return (0);
+}
+
+static void
+test_swi_sched(void *cookie, int flags)
+{
+ call_counts[CCNT_SWI_SCHED]++;
+ /* Simulate successful SWI scheduling */
+}
+
+static int
+test_intr_event_bind(struct intr_event *ie, int cpu)
+{
+ call_counts[CCNT_INTR_EVENT_BIND]++;
+ /* Simulate successful binding */
+ return (0);
+}
+
+static int
+test_intr_event_bind_ithread_cpuset(struct intr_event *ie, struct _cpuset *mask)
+{
+ call_counts[CCNT_INTR_EVENT_BIND_CPUSET]++;
+ /* Simulate successful cpuset binding */
+ return (0);
+}
+
+static void
+test_callout_init(struct callout *c, int mpsafe)
+{
+ call_counts[CCNT_CALLOUT_INIT]++;
+ memset(c, 0, sizeof(*c));
+}
+
+static int
+test_callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
+ void (*func)(void *), void *arg, int cpu, int flags)
+{
+ call_counts[CCNT_CALLOUT_RESET_SBT_ON]++;
+ /* Return 1 to simulate successful timer scheduling */
+ return (1);
+}
+
+static int
+test_callout_stop_safe(struct callout *c, int flags)
+{
+ call_counts[CCNT_CALLOUT_STOP_SAFE]++;
+ /* Return 1 to simulate successful timer stopping */
+ return (1);
+}
+
+static const struct tcp_hptsi_funcs test_funcs = {
+ .microuptime = test_microuptime,
+ .swi_add = test_swi_add,
+ .swi_remove = test_swi_remove,
+ .swi_sched = test_swi_sched,
+ .intr_event_bind = test_intr_event_bind,
+ .intr_event_bind_ithread_cpuset = test_intr_event_bind_ithread_cpuset,
+ .callout_init = test_callout_init,
+ .callout_reset_sbt_on = test_callout_reset_sbt_on,
+ ._callout_stop_safe = test_callout_stop_safe,
+};
+
+#define TP_REMOVE_FROM_HPTS(tp) tp->bits_spare
+#define TP_LOG_TEST(tp) tp->t_log_state_set
+
+static int
+test_tcp_output(struct tcpcb *tp)
+{
+ struct ktest_test_context *ctx = tp->t_fb_ptr;
+ struct tcp_hptsi *pace = (struct tcp_hptsi*)tp->t_tfo_pending;
+ struct tcp_hpts_entry *hpts = pace->rp_ent[tp->t_hpts_cpu];
+
+ call_counts[CCNT_TCP_OUTPUT]++;
+ if (TP_LOG_TEST(tp)) {
+ KTEST_LOG(ctx, "=> tcp_output(%p)", tp);
+ dump_tcpcb(tp);
+ dump_hpts_entry(ctx, hpts);
+ }
+
+ if ((TP_REMOVE_FROM_HPTS(tp) & 1) != 0) {
+ if (TP_LOG_TEST(tp))
+ KTEST_LOG(ctx, "=> tcp_hpts_remove(%p)", tp);
+ tcp_hpts_remove(pace, tp);
+ }
+
+ if ((TP_REMOVE_FROM_HPTS(tp) & 2) != 0) {
+ INP_WUNLOCK(&tp->t_inpcb); /* tcp_output unlocks on error */
+ return (-1); /* Simulate tcp_output error */
+ }
+
+ return (0);
+}
+
+static int
+test_tfb_do_queued_segments(struct tcpcb *tp, int flag)
+{
+ struct ktest_test_context *ctx = tp->t_fb_ptr;
+ struct tcp_hptsi *pace = (struct tcp_hptsi*)tp->t_tfo_pending;
+ struct tcp_hpts_entry *hpts = pace->rp_ent[tp->t_hpts_cpu];
+
+ call_counts[CCNT_TCP_TFB_DO_QUEUED_SEGMENTS]++;
+ KTEST_LOG(ctx, "=> tfb_do_queued_segments(%p, %d)", tp, flag);
+ dump_tcpcb(tp);
+ dump_hpts_entry(ctx, hpts);
+
+ if ((TP_REMOVE_FROM_HPTS(tp) & 1) != 0) {
+ if (TP_LOG_TEST(tp))
+ KTEST_LOG(ctx, "=> tcp_hpts_remove(%p)", tp);
+ tcp_hpts_remove(pace, tp);
+ }
+
+ if ((TP_REMOVE_FROM_HPTS(tp) & 2) != 0) {
+ INP_WUNLOCK(&tp->t_inpcb); /* do_queued_segments unlocks on error */
+ return (-1); /* Simulate do_queued_segments error */
+ }
+
+ return (0);
+}
+
+static struct tcp_function_block test_tcp_fb = {
+ .tfb_tcp_block_name = "hpts_test_tcp",
+ .tfb_tcp_output = test_tcp_output,
+ .tfb_do_queued_segments = test_tfb_do_queued_segments,
+};
+
+/*
+ * Create a minimally initialized tcpcb that can be safely inserted into HPTS.
+ * This function allocates and initializes all the fields that HPTS code
+ * reads or writes.
+ */
+static struct tcpcb *
+test_hpts_create_tcpcb(struct ktest_test_context *ctx, struct tcp_hptsi *pace)
+{
+ struct tcpcb *tp;
+
+ tp = malloc(sizeof(struct tcpcb), M_TCPHPTS, M_WAITOK | M_ZERO);
+ if (tp) {
+ rw_init_flags(&tp->t_inpcb.inp_lock, "test-inp",
+ RW_RECURSE | RW_DUPOK);
+ refcount_init(&tp->t_inpcb.inp_refcount, 1);
+ tp->t_inpcb.inp_pcbinfo = &V_tcbinfo;
+ tp->t_fb = &test_tcp_fb;
+ tp->t_hpts_cpu = HPTS_CPU_NONE;
+ STAILQ_INIT(&tp->t_inqueue);
+ tcp_hpts_init(pace, tp);
+
+ /* Stuff some pointers in the tcb for test purposes. */
+ tp->t_fb_ptr = ctx;
+ tp->t_tfo_pending = (unsigned int*)pace;
+ }
+
+ return (tp);
+}
+
+/*
+ * Free a test tcpcb created by test_hpts_create_tcpcb()
+ */
+static void
+test_hpts_free_tcpcb(struct tcpcb *tp)
+{
+ if (tp == NULL)
+ return;
+
+ INP_LOCK_DESTROY(&tp->t_inpcb);
+ free(tp, M_TCPHPTS);
+}
+
+/*
+ * ***********************************************
+ * * KTEST functions for testing the HPTS module *
+ * ***********************************************
+ */
+
+/*
+ * Validates that the HPTS module is properly loaded and initialized by checking
+ * that the minimum HPTS time is configured.
+ */
+KTEST_FUNC(module_load)
+{
+ test_hpts_init();
+ KTEST_NEQUAL(tcp_min_hptsi_time, 0);
+ KTEST_VERIFY(tcp_bind_threads >= 0 && tcp_bind_threads <= 2);
+ KTEST_NEQUAL(tcp_hptsi_pace, NULL);
+ return (0);
+}
+
+/*
+ * Validates the creation and destruction of tcp_hptsi structures, ensuring
+ * proper initialization of internal fields and clean destruction.
+ */
+KTEST_FUNC(hptsi_create_destroy)
+{
+ struct tcp_hptsi *pace;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ KTEST_NEQUAL(pace->rp_ent, NULL);
+ KTEST_NEQUAL(pace->cts_last_ran, NULL);
+ KTEST_VERIFY(pace->rp_num_hptss > 0);
+ KTEST_VERIFY(pace->rp_num_hptss <= MAXCPU); /* Reasonable upper bound */
+ KTEST_VERIFY(pace->grp_cnt >= 1); /* At least one group */
+ KTEST_EQUAL(pace->funcs, &test_funcs); /* Verify function pointer was set */
+
+ /* Verify individual HPTS entries are properly initialized */
+ for (uint32_t i = 0; i < pace->rp_num_hptss; i++) {
+ KTEST_NEQUAL(pace->rp_ent[i], NULL);
+ KTEST_EQUAL(pace->rp_ent[i]->p_cpu, i);
+ KTEST_EQUAL(pace->rp_ent[i]->p_hptsi, pace);
+ KTEST_EQUAL(pace->rp_ent[i]->p_on_queue_cnt, 0);
+ }
+
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates that tcp_hptsi structures can be started and stopped properly,
+ * including verification that threads are created during start and cleaned up
+ * during stop operations.
+ */
+KTEST_FUNC(hptsi_start_stop)
+{
+ struct tcp_hptsi *pace;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+
+ tcp_hptsi_start(pace);
+
+ /* Verify that entries have threads started */
+ struct tcp_hpts_entry *hpts = pace->rp_ent[0];
+ KTEST_NEQUAL(hpts->ie_cookie, NULL); /* Should have SWI handler */
+ KTEST_EQUAL(hpts->p_hptsi, pace); /* Should point to our pace */
+
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates that multiple tcp_hptsi instances can coexist independently, with
+ * different configurations and CPU assignments without interfering with each
+ * other.
+ */
+KTEST_FUNC(hptsi_independence)
+{
+ struct tcp_hptsi *pace1, *pace2;
+ uint16_t cpu1, cpu2;
+
+ test_hpts_init();
+
+ pace1 = tcp_hptsi_create(&test_funcs, false);
+ pace2 = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace1, NULL);
+ KTEST_NEQUAL(pace2, NULL);
+ KTEST_NEQUAL(pace2->rp_ent, NULL);
+
+ cpu1 = tcp_hptsi_random_cpu(pace1);
+ cpu2 = tcp_hptsi_random_cpu(pace2);
+ KTEST_VERIFY(cpu1 < pace1->rp_num_hptss);
+ KTEST_VERIFY(cpu2 < pace2->rp_num_hptss);
+
+ /* Verify both instances have independent entry arrays */
+ KTEST_NEQUAL(pace1->rp_ent, pace2->rp_ent);
+ /* Verify they may have different CPU counts but both reasonable */
+ KTEST_VERIFY(pace1->rp_num_hptss > 0 && pace1->rp_num_hptss <= MAXCPU);
+ KTEST_VERIFY(pace2->rp_num_hptss > 0 && pace2->rp_num_hptss <= MAXCPU);
+
+ tcp_hptsi_destroy(pace1);
+ tcp_hptsi_destroy(pace2);
+
+ return (0);
+}
+
+/*
+ * Validates that custom function injection works correctly, ensuring that
+ * test-specific implementations of microuptime and others are properly
+ * called by the HPTS system.
+ */
+KTEST_FUNC(function_injection)
+{
+ struct tcp_hptsi *pace;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ KTEST_EQUAL(pace->funcs, &test_funcs);
+ KTEST_VERIFY(call_counts[CCNT_MICROUPTIME] > 0);
+ KTEST_VERIFY(call_counts[CCNT_CALLOUT_INIT] > 0);
+
+ tcp_hptsi_start(pace);
+ KTEST_VERIFY(call_counts[CCNT_SWI_ADD] > 0);
+ KTEST_VERIFY(tcp_bind_threads == 0 ||
+ call_counts[CCNT_INTR_EVENT_BIND] > 0 ||
+ call_counts[CCNT_INTR_EVENT_BIND_CPUSET] > 0);
+ KTEST_VERIFY(call_counts[CCNT_CALLOUT_RESET_SBT_ON] > 0);
+
+ tcp_hptsi_stop(pace);
+ KTEST_VERIFY(call_counts[CCNT_CALLOUT_STOP_SAFE] > 0);
+ KTEST_VERIFY(call_counts[CCNT_SWI_REMOVE] > 0);
+
+ tcp_hptsi_destroy(pace);
+
+ /* Verify we have a reasonable balance of create/destroy calls */
+ KTEST_EQUAL(call_counts[CCNT_SWI_ADD], call_counts[CCNT_SWI_REMOVE]);
+ KTEST_VERIFY(call_counts[CCNT_CALLOUT_RESET_SBT_ON] <= call_counts[CCNT_CALLOUT_STOP_SAFE]);
+
+ return (0);
+}
+
+/*
+ * Validates that a tcpcb can be properly initialized for HPTS compatibility,
+ * ensuring all required fields are set correctly and function pointers are
+ * valid for safe HPTS operations.
+ */
+KTEST_FUNC(tcpcb_initialization)
+{
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ /* Verify the tcpcb is properly initialized for HPTS */
+ tp = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp, NULL);
+ KTEST_NEQUAL(tp->t_fb, NULL);
+ KTEST_NEQUAL(tp->t_fb->tfb_tcp_output, NULL);
+ KTEST_NEQUAL(tp->t_fb->tfb_do_queued_segments, NULL);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_NONE);
+ KTEST_EQUAL((tp->t_flags2 & (TF2_HPTS_CPU_SET | TF2_HPTS_CALLS)), 0);
+
+ /* Verify that HPTS-specific fields are initialized */
+ KTEST_EQUAL(tp->t_hpts_gencnt, 0);
+ KTEST_EQUAL(tp->t_hpts_slot, 0);
+ KTEST_EQUAL(tp->t_hpts_request, 0);
+ KTEST_EQUAL(tp->t_lro_cpu, 0);
+ KTEST_VERIFY(tp->t_hpts_cpu < pace->rp_num_hptss);
+ KTEST_EQUAL(tp->t_inpcb.inp_refcount, 1);
+ KTEST_VERIFY(!(tp->t_inpcb.inp_flags & INP_DROPPED));
+
+ test_hpts_free_tcpcb(tp);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates that tcpcb structures can be successfully inserted into and removed
+ * from the HPTS wheel, with proper state tracking and slot assignment during
+ * the process.
+ */
+KTEST_FUNC(tcpcb_insertion)
+{
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp;
+ struct tcp_hpts_entry *hpts;
+ uint32_t timeout_usecs = 10;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ tp = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp, NULL);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_NONE);
+ KTEST_EQUAL((tp->t_flags2 & TF2_HPTS_CALLS), 0);
+
+ INP_WLOCK(&tp->t_inpcb);
+ tp->t_flags2 |= TF2_HPTS_CALLS;
+ KTEST_EQUAL(call_counts[CCNT_SWI_SCHED], 0);
+ tcp_hpts_insert(pace, tp, timeout_usecs, NULL);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
+ INP_WUNLOCK(&tp->t_inpcb);
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 0);
+ KTEST_EQUAL(call_counts[CCNT_SWI_SCHED], 1);
+ KTEST_VERIFY(tcp_in_hpts(tp));
+ KTEST_VERIFY(tp->t_hpts_slot >= 0);
+ KTEST_VERIFY(tp->t_hpts_slot < NUM_OF_HPTSI_SLOTS);
+
+ hpts = pace->rp_ent[tp->t_hpts_cpu];
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 1);
+ KTEST_EQUAL(tp->t_hpts_request, 0);
+ KTEST_EQUAL(tp->t_hpts_slot, HPTS_USEC_TO_SLOTS(timeout_usecs));
+ //KTEST_EQUAL(tp->t_hpts_gencnt, 1);
+
+ INP_WLOCK(&tp->t_inpcb);
+ tcp_hpts_remove(pace, tp);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_NONE);
+ INP_WUNLOCK(&tp->t_inpcb);
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 0);
+ KTEST_VERIFY(!tcp_in_hpts(tp));
+
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 0);
+
+ test_hpts_free_tcpcb(tp);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates the core HPTS timer functionality by verifying that scheduled
+ * tcpcb entries trigger tcp_output calls at appropriate times, simulating
+ * real-world timer-driven TCP processing.
+ */
+KTEST_FUNC(timer_functionality)
+{
+ struct epoch_tracker et;
+ struct tcp_hptsi *pace;
+ struct tcp_hpts_entry *hpts;
+ struct tcpcb *tp;
+ int32_t slots_ran;
+ uint32_t i;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ for (i = 0; i < pace->rp_num_hptss; i++)
+ dump_hpts_entry(ctx, pace->rp_ent[i]);
+
+ /* Create and insert the tcpcb into the HPTS wheel to wait for 500 usec */
+ tp = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp, NULL);
+ dump_tcpcb(tp);
+ TP_LOG_TEST(tp) = 1; /* Enable logging for this tcpcb */
+
+ KTEST_LOG(ctx, "=> tcp_hpts_insert(%p)", tp);
+ INP_WLOCK(&tp->t_inpcb);
+ tp->t_flags2 |= TF2_HPTS_CALLS; /* Mark as needing HPTS processing */
+ tcp_hpts_insert(pace, tp, 500, NULL);
+ INP_WUNLOCK(&tp->t_inpcb);
+
+ dump_tcpcb(tp);
+ for (i = 0; i < pace->rp_num_hptss; i++)
+ dump_hpts_entry(ctx, pace->rp_ent[i]);
+
+ hpts = pace->rp_ent[tp->t_hpts_cpu];
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 1);
+ KTEST_EQUAL(hpts->p_prev_slot, 0);
+ KTEST_EQUAL(hpts->p_cur_slot, 0);
+ KTEST_EQUAL(hpts->p_runningslot, 0);
+ KTEST_EQUAL(hpts->p_nxt_slot, 1);
+ KTEST_EQUAL(hpts->p_hpts_active, 0);
+
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
+ KTEST_EQUAL(tp->t_hpts_request, 0);
+ KTEST_EQUAL(tp->t_hpts_slot, HPTS_USEC_TO_SLOTS(500));
+
+ /* Set our test flag to indicate the tcpcb should be removed from the
+ * wheel when tcp_output is called. */
+ TP_REMOVE_FROM_HPTS(tp) = 1;
+
+ /* Test early exit condition: advance time by insufficient amount */
+ KTEST_LOG(ctx, "Testing early exit with insufficient time advancement");
+ test_time_usec += 1; /* Very small advancement - should cause early exit */
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ /* Should return 0 slots due to insufficient time advancement */
+ KTEST_EQUAL(slots_ran, 0);
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 0); /* No processing should occur */
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE); /* Connection still queued */
+
+ /* Wait for 498 more usecs and trigger the HPTS workers and verify
+ * nothing happens yet (total 499 usec) */
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 0);
+ test_time_usec += 498;
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ KTEST_LOG(ctx, "=> tcp_hptsi(%p)", pace->rp_ent[i]);
+ HPTS_LOCK(pace->rp_ent[i]);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(pace->rp_ent[i], true);
+ HPTS_UNLOCK(pace->rp_ent[i]);
+ NET_EPOCH_EXIT(et);
+
+ dump_hpts_entry(ctx, pace->rp_ent[i]);
+ KTEST_VERIFY(slots_ran >= 0);
+ KTEST_EQUAL(pace->rp_ent[i]->p_prev_slot, 49);
+ KTEST_EQUAL(pace->rp_ent[i]->p_cur_slot, 49);
+ }
+
+ dump_tcpcb(tp);
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 0);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
+ KTEST_EQUAL(tp->t_hpts_request, 0);
+ KTEST_EQUAL(tp->t_hpts_slot, HPTS_USEC_TO_SLOTS(500));
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 1);
+
+ /* Wait for 1 more usec and trigger the HPTS workers and verify it
+ * triggers tcp_output this time */
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 0);
+ test_time_usec += 1;
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ KTEST_LOG(ctx, "=> tcp_hptsi(%p)", pace->rp_ent[i]);
+ HPTS_LOCK(pace->rp_ent[i]);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(pace->rp_ent[i], true);
+ HPTS_UNLOCK(pace->rp_ent[i]);
+ NET_EPOCH_EXIT(et);
+
+ dump_hpts_entry(ctx, pace->rp_ent[i]);
+ KTEST_VERIFY(slots_ran >= 0);
+ KTEST_EQUAL(pace->rp_ent[i]->p_prev_slot, 50);
+ KTEST_EQUAL(pace->rp_ent[i]->p_cur_slot, 50);
+ }
+
+ dump_tcpcb(tp);
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 1);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_NONE);
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 0);
+
+ test_hpts_free_tcpcb(tp);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates HPTS scalability by creating and inserting a LOT of tcpcbs into
+ * the HPTS wheel, testing performance under high load conditions.
+ */
+KTEST_FUNC(scalability_tcpcbs)
+{
+ struct tcp_hptsi *pace;
+ struct tcpcb **tcpcbs;
+ uint32_t i, num_tcpcbs = 100000, total_queued = 0;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ /* Allocate array to hold pointers to all tcpcbs */
+ tcpcbs = malloc(num_tcpcbs * sizeof(struct tcpcb *), M_TCPHPTS, M_WAITOK | M_ZERO);
+ KTEST_VERIFY_RET(tcpcbs != NULL, ENOMEM);
+
+ /* Create a LOT of tcpcbs */
+ KTEST_LOG(ctx, "Creating %u tcpcbs...", num_tcpcbs);
+ for (i = 0; i < num_tcpcbs; i++) {
+ tcpcbs[i] = test_hpts_create_tcpcb(ctx, pace);
+ if (tcpcbs[i] == NULL) {
+ KTEST_ERR(ctx, "FAIL: tcpcbs[i] == NULL");
+ return (EINVAL);
+ }
+ }
+
+ /* Insert all created tcpcbs into HPTS */
+ KTEST_LOG(ctx, "Inserting all tcpcbs into HPTS...");
+ for (i = 0; i < num_tcpcbs; i++) {
+ INP_WLOCK(&tcpcbs[i]->t_inpcb);
+ tcpcbs[i]->t_flags2 |= TF2_HPTS_CALLS;
+ /* Insert with varying future timeouts to distribute across slots */
+ tcp_hpts_insert(pace, tcpcbs[i], 100 + (i % 1000), NULL);
+ INP_WUNLOCK(&tcpcbs[i]->t_inpcb);
+ }
+
+ /* Verify total queue counts across all CPUs */
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ total_queued += pace->rp_ent[i]->p_on_queue_cnt;
+ }
+ KTEST_EQUAL(total_queued, num_tcpcbs);
+
+ for (i = 0; i < pace->rp_num_hptss; i++)
+ dump_hpts_entry(ctx, pace->rp_ent[i]);
+
+ /* Remove all tcpcbs from HPTS */
+ KTEST_LOG(ctx, "Removing all tcpcbs from HPTS...");
+ for (i = 0; i < num_tcpcbs; i++) {
+ INP_WLOCK(&tcpcbs[i]->t_inpcb);
+ if (tcpcbs[i]->t_in_hpts != IHPTS_NONE) {
+ tcp_hpts_remove(pace, tcpcbs[i]);
+ }
+ INP_WUNLOCK(&tcpcbs[i]->t_inpcb);
+ }
+
+ /* Verify all queues are now empty */
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ if (pace->rp_ent[i]->p_on_queue_cnt != 0) {
+ KTEST_ERR(ctx, "FAIL: pace->rp_ent[i]->p_on_queue_cnt != 0");
+ return (EINVAL);
+ }
+ }
+
+ for (i = 0; i < num_tcpcbs; i++) {
+ test_hpts_free_tcpcb(tcpcbs[i]);
+ }
+ free(tcpcbs, M_TCPHPTS);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates wheel wrap scenarios where the timer falls significantly behind
+ * and needs to process more than one full wheel revolution worth of slots.
+ */
+KTEST_FUNC(wheel_wrap_recovery)
+{
+ struct epoch_tracker et;
+ struct tcp_hptsi *pace;
+ struct tcpcb **tcpcbs;
+ uint32_t i, timeout_usecs, num_tcpcbs = 500;
+ int32_t slots_ran;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ /* Allocate array to hold pointers to tcpcbs */
+ tcpcbs = malloc(num_tcpcbs * sizeof(struct tcpcb *), M_TCPHPTS, M_WAITOK | M_ZERO);
+ KTEST_VERIFY_RET(tcpcbs != NULL, ENOMEM);
+
+ /* Create tcpcbs and insert them across many slots */
+ for (i = 0; i < num_tcpcbs; i++) {
+ tcpcbs[i] = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tcpcbs[i], NULL);
+ TP_REMOVE_FROM_HPTS(tcpcbs[i]) = 1;
+
+ timeout_usecs = ((i * NUM_OF_HPTSI_SLOTS) / num_tcpcbs) * HPTS_USECS_PER_SLOT; /* Spread across slots */
+
+ INP_WLOCK(&tcpcbs[i]->t_inpcb);
+ tcpcbs[i]->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tcpcbs[i], timeout_usecs, NULL);
+ INP_WUNLOCK(&tcpcbs[i]->t_inpcb);
+ }
+
+ /* Fast forward time significantly to trigger wheel wrap */
+ test_time_usec += (NUM_OF_HPTSI_SLOTS + 5000) * HPTS_USECS_PER_SLOT;
+
+ for (i = 0; i < pace->rp_num_hptss; i++) {
+ KTEST_LOG(ctx, "=> tcp_hptsi(%u)", i);
+ KTEST_NEQUAL(pace->rp_ent[i]->p_on_queue_cnt, 0);
+
+ HPTS_LOCK(pace->rp_ent[i]);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(pace->rp_ent[i], true);
+ HPTS_UNLOCK(pace->rp_ent[i]);
+ NET_EPOCH_EXIT(et);
+
+ KTEST_EQUAL(slots_ran, NUM_OF_HPTSI_SLOTS-1); /* Should process all slots */
+ KTEST_EQUAL(pace->rp_ent[i]->p_on_queue_cnt, 0);
+ KTEST_NEQUAL(pace->rp_ent[i]->p_cur_slot,
+ pace->rp_ent[i]->p_prev_slot);
+ }
+
+ /* Cleanup */
+ for (i = 0; i < num_tcpcbs; i++) {
+ INP_WLOCK(&tcpcbs[i]->t_inpcb);
+ if (tcpcbs[i]->t_in_hpts != IHPTS_NONE) {
+ tcp_hpts_remove(pace, tcpcbs[i]);
+ }
+ INP_WUNLOCK(&tcpcbs[i]->t_inpcb);
+ test_hpts_free_tcpcb(tcpcbs[i]);
+ }
+ free(tcpcbs, M_TCPHPTS);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates proper handling of tcpcbs in the IHPTS_MOVING state, which occurs
+ * when a tcpcb is being processed by the HPTS thread but gets removed.
+ */
+KTEST_FUNC(tcpcb_moving_state)
+{
+ struct epoch_tracker et;
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp1, *tp2;
+ struct tcp_hpts_entry *hpts;
+ int32_t slots_ran;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ /* Create two tcpcbs on the same CPU/slot */
+ tp1 = test_hpts_create_tcpcb(ctx, pace);
+ tp2 = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp1, NULL);
+ KTEST_NEQUAL(tp2, NULL);
+
+ /* Force them to the same CPU for predictable testing */
+ tp1->t_hpts_cpu = 0;
+ tp2->t_hpts_cpu = 0;
+
+ /* Insert both into the same slot */
+ INP_WLOCK(&tp1->t_inpcb);
+ tp1->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp1, 100, NULL);
+ INP_WUNLOCK(&tp1->t_inpcb);
+
+ INP_WLOCK(&tp2->t_inpcb);
+ tp2->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp2, 100, NULL);
+ INP_WUNLOCK(&tp2->t_inpcb);
+
+ hpts = pace->rp_ent[0];
+
+ /* Manually transition tp1 to MOVING state to simulate race condition */
+ HPTS_LOCK(hpts);
+ tp1->t_in_hpts = IHPTS_MOVING;
+ tp1->t_hpts_slot = -1; /* Mark for removal */
+ HPTS_UNLOCK(hpts);
+
+ /* Set time and run HPTS to process the moving state */
+ test_time_usec += 100;
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ KTEST_VERIFY(slots_ran >= 0);
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 1); /* Shouldn't call on both */
+
+ /* tp1 should be cleaned up and removed */
+ KTEST_EQUAL(tp1->t_in_hpts, IHPTS_NONE);
+ /* tp2 should have been processed normally */
+ KTEST_EQUAL(tp2->t_in_hpts, IHPTS_NONE);
+
+ test_hpts_free_tcpcb(tp1);
+ test_hpts_free_tcpcb(tp2);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates that tcpcbs with deferred requests (t_hpts_request > 0) are
+ * properly handled and re-inserted into appropriate future slots after
+ * the wheel processes enough slots to accommodate the original request.
+ */
+KTEST_FUNC(deferred_requests)
+{
+ struct epoch_tracker et;
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp, *tp2;
+ struct tcp_hpts_entry *hpts;
+ uint32_t large_timeout_usecs = (NUM_OF_HPTSI_SLOTS + 5000) * HPTS_USECS_PER_SLOT; /* Beyond wheel capacity */
+ uint32_t huge_timeout_usecs = (NUM_OF_HPTSI_SLOTS * 3) * HPTS_USECS_PER_SLOT; /* 3x wheel capacity */
+ uint32_t initial_request;
+ int32_t slots_ran;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ tp = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp, NULL);
+
+ /* Insert with a request that exceeds current wheel capacity */
+ INP_WLOCK(&tp->t_inpcb);
+ tp->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp, large_timeout_usecs, NULL);
+ INP_WUNLOCK(&tp->t_inpcb);
+
+ /* Verify it was inserted with a deferred request */
+ dump_tcpcb(tp);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
+ KTEST_VERIFY(tp->t_hpts_request > 0);
+ KTEST_VERIFY(tp->t_hpts_slot < NUM_OF_HPTSI_SLOTS);
+
+ hpts = pace->rp_ent[tp->t_hpts_cpu];
+
+ /* Advance time to process deferred requests */
+ test_time_usec += NUM_OF_HPTSI_SLOTS * HPTS_USECS_PER_SLOT;
+
+ /* Process the wheel to handle deferred requests */
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ dump_hpts_entry(ctx, hpts);
+ KTEST_GREATER_THAN(slots_ran, 0);
+ dump_tcpcb(tp);
+ KTEST_EQUAL(tp->t_hpts_request, 0);
+
+ /* Test incremental deferred request processing over multiple cycles */
+ KTEST_LOG(ctx, "Testing incremental deferred request processing");
+
+ /* Create a new connection with an even larger request */
+ tp2 = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp2, NULL);
+ tp2->t_hpts_cpu = tp->t_hpts_cpu; /* Same CPU for predictable testing */
+
+ INP_WLOCK(&tp2->t_inpcb);
+ tp2->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp2, huge_timeout_usecs, NULL);
+ INP_WUNLOCK(&tp2->t_inpcb);
+
+ /* Verify initial deferred request */
+ initial_request = tp2->t_hpts_request;
+ KTEST_VERIFY(initial_request > NUM_OF_HPTSI_SLOTS);
+
+ /* Process one wheel cycle - should reduce but not eliminate request */
+ test_time_usec += NUM_OF_HPTSI_SLOTS * HPTS_USECS_PER_SLOT;
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ /* Request should be reduced but not zero */
+ KTEST_GREATER_THAN(initial_request, tp2->t_hpts_request);
+ KTEST_VERIFY(tp2->t_hpts_request > 0);
+ KTEST_EQUAL(tp2->t_in_hpts, IHPTS_ONQUEUE); /* Still queued */
+
+ /* For huge_timeout_usecs = NUM_OF_HPTSI_SLOTS * 3 * HPTS_USECS_PER_SLOT, we need ~3 cycles to complete.
+ * Each cycle can reduce the request by at most NUM_OF_HPTSI_SLOTS. */
+ test_time_usec += NUM_OF_HPTSI_SLOTS * HPTS_USECS_PER_SLOT;
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ /* After second cycle, request should be reduced significantly (likely by ~NUM_OF_HPTSI_SLOTS) */
+ KTEST_VERIFY(tp2->t_hpts_request < initial_request);
+ KTEST_VERIFY(tp2->t_hpts_request > 0); /* But not yet zero for such a large request */
+
+ /* Clean up second connection */
+ INP_WLOCK(&tp2->t_inpcb);
+ if (tp2->t_in_hpts != IHPTS_NONE) {
+ tcp_hpts_remove(pace, tp2);
+ }
+ INP_WUNLOCK(&tp2->t_inpcb);
+ test_hpts_free_tcpcb(tp2);
+
+ /* Clean up */
+ INP_WLOCK(&tp->t_inpcb);
+ if (tp->t_in_hpts != IHPTS_NONE) {
+ tcp_hpts_remove(pace, tp);
+ }
+ INP_WUNLOCK(&tp->t_inpcb);
+ test_hpts_free_tcpcb(tp);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates CPU assignment and affinity mechanisms, including flowid-based
+ * assignment, random fallback scenarios, and explicit CPU setting. Tests
+ * the actual cpu assignment logic in hpts_cpuid via tcp_set_hpts.
+ */
+KTEST_FUNC(cpu_assignment)
+{
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp1, *tp2, *tp2_dup, *tp3;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+
+ /* Test random CPU assignment (no flowid) */
+ tp1 = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp1, NULL);
+ tp1->t_inpcb.inp_flowtype = M_HASHTYPE_NONE;
+ INP_WLOCK(&tp1->t_inpcb);
+ tcp_set_hpts(pace, tp1);
+ INP_WUNLOCK(&tp1->t_inpcb);
+ KTEST_VERIFY(tp1->t_hpts_cpu < pace->rp_num_hptss);
+ KTEST_VERIFY(tp1->t_flags2 & TF2_HPTS_CPU_SET);
+
+ /* Test flowid-based assignment */
+ tp2 = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp2, NULL);
+ tp2->t_inpcb.inp_flowtype = M_HASHTYPE_RSS_TCP_IPV4;
+ tp2->t_inpcb.inp_flowid = 12345;
+ INP_WLOCK(&tp2->t_inpcb);
+ tcp_set_hpts(pace, tp2);
+ INP_WUNLOCK(&tp2->t_inpcb);
+ KTEST_VERIFY(tp2->t_hpts_cpu < pace->rp_num_hptss);
+ KTEST_VERIFY(tp2->t_flags2 & TF2_HPTS_CPU_SET);
+
+ /* With the same flowid, should get same CPU assignment */
+ tp2_dup = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp2_dup, NULL);
+ tp2_dup->t_inpcb.inp_flowtype = M_HASHTYPE_RSS_TCP_IPV4;
+ tp2_dup->t_inpcb.inp_flowid = 12345;
+ INP_WLOCK(&tp2_dup->t_inpcb);
+ tcp_set_hpts(pace, tp2_dup);
+ INP_WUNLOCK(&tp2_dup->t_inpcb);
+ KTEST_EQUAL(tp2_dup->t_hpts_cpu, tp2->t_hpts_cpu);
+
+ /* Test explicit CPU setting */
+ tp3 = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp3, NULL);
+ tp3->t_hpts_cpu = 1; /* Assume we have at least 2 CPUs */
+ tp3->t_flags2 |= TF2_HPTS_CPU_SET;
+ INP_WLOCK(&tp3->t_inpcb);
+ tcp_set_hpts(pace, tp3);
+ INP_WUNLOCK(&tp3->t_inpcb);
+ KTEST_EQUAL(tp3->t_hpts_cpu, 1);
+
+ test_hpts_free_tcpcb(tp1);
+ test_hpts_free_tcpcb(tp2);
+ test_hpts_free_tcpcb(tp2_dup);
+ test_hpts_free_tcpcb(tp3);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates edge cases in slot calculation including boundary conditions
+ * around slot 0, maximum slots, and slot wrapping arithmetic.
+ */
+KTEST_FUNC(slot_boundary_conditions)
+{
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ /* Test insertion at slot 0 */
+ tp = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp, NULL);
+ INP_WLOCK(&tp->t_inpcb);
+ tp->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp, 0, NULL); /* Should insert immediately (0 timeout) */
+ INP_WUNLOCK(&tp->t_inpcb);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
+ KTEST_VERIFY(tp->t_hpts_slot < NUM_OF_HPTSI_SLOTS);
+
+ INP_WLOCK(&tp->t_inpcb);
+ tcp_hpts_remove(pace, tp);
+ INP_WUNLOCK(&tp->t_inpcb);
+
+ /* Test insertion at maximum slot value */
+ INP_WLOCK(&tp->t_inpcb);
+ tp->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp, (NUM_OF_HPTSI_SLOTS - 1) * HPTS_USECS_PER_SLOT, NULL);
+ INP_WUNLOCK(&tp->t_inpcb);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
+
+ INP_WLOCK(&tp->t_inpcb);
+ tcp_hpts_remove(pace, tp);
+ INP_WUNLOCK(&tp->t_inpcb);
+
+ /* Test very small timeout values */
+ INP_WLOCK(&tp->t_inpcb);
+ tp->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp, 1, NULL);
+ INP_WUNLOCK(&tp->t_inpcb);
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
+ KTEST_EQUAL(tp->t_hpts_slot, HPTS_USEC_TO_SLOTS(1)); /* Should convert 1 usec to slot */
+
+ INP_WLOCK(&tp->t_inpcb);
+ tcp_hpts_remove(pace, tp);
+ INP_WUNLOCK(&tp->t_inpcb);
+
+ test_hpts_free_tcpcb(tp);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates HPTS behavior under high load conditions, including proper
+ * processing of many connections and connection count tracking.
+ */
+KTEST_FUNC(dynamic_sleep_adjustment)
+{
+ struct epoch_tracker et;
+ struct tcp_hptsi *pace;
+ struct tcpcb **tcpcbs;
+ struct tcp_hpts_entry *hpts;
+ uint32_t i, num_tcpcbs = DEFAULT_CONNECTION_THRESHOLD + 50;
+ int32_t slots_ran;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ /* Create many connections to exceed threshold */
+ tcpcbs = malloc(num_tcpcbs * sizeof(struct tcpcb *), M_TCPHPTS, M_WAITOK | M_ZERO);
+ KTEST_VERIFY_RET(tcpcbs != NULL, ENOMEM);
+
+ for (i = 0; i < num_tcpcbs; i++) {
+ tcpcbs[i] = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tcpcbs[i], NULL);
+ tcpcbs[i]->t_hpts_cpu = 0; /* Force all to CPU 0 */
+ INP_WLOCK(&tcpcbs[i]->t_inpcb);
+ tcpcbs[i]->t_flags2 |= TF2_HPTS_CALLS;
+ TP_REMOVE_FROM_HPTS(tcpcbs[i]) = 1; /* Will be removed after output */
+ tcp_hpts_insert(pace, tcpcbs[i], 100, NULL);
+ INP_WUNLOCK(&tcpcbs[i]->t_inpcb);
+ }
+
+ hpts = pace->rp_ent[0];
+ dump_hpts_entry(ctx, hpts);
+
+ /* Verify we're above threshold */
+ KTEST_GREATER_THAN(hpts->p_on_queue_cnt, DEFAULT_CONNECTION_THRESHOLD);
+
+ /* Run HPTS to process many connections */
+ test_time_usec += 100;
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ /* Verify HPTS processed slots and connections correctly */
+ KTEST_GREATER_THAN(slots_ran, 0);
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], num_tcpcbs);
+
+ /* Verify all connections were removed from queue */
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 0);
+
+ /* Cleanup */
+ for (i = 0; i < num_tcpcbs; i++) {
+ test_hpts_free_tcpcb(tcpcbs[i]);
+ }
+ free(tcpcbs, M_TCPHPTS);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates handling of concurrent insert/remove operations and race conditions
+ * between HPTS processing and user operations.
+ */
+KTEST_FUNC(concurrent_operations)
+{
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp1, *tp2;
+ struct tcp_hpts_entry *hpts;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ tp1 = test_hpts_create_tcpcb(ctx, pace);
+ tp2 = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp1, NULL);
+ KTEST_NEQUAL(tp2, NULL);
+
+ /* Force all to CPU 0 */
+ tp1->t_hpts_cpu = 0;
+ tp2->t_hpts_cpu = 0;
+
+ /* Insert tp1 */
+ INP_WLOCK(&tp1->t_inpcb);
+ tp1->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp1, 100, NULL);
+ INP_WUNLOCK(&tp1->t_inpcb);
+
+ /* Insert tp2 into same slot */
+ INP_WLOCK(&tp2->t_inpcb);
+ tp2->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp2, 100, NULL);
+ INP_WUNLOCK(&tp2->t_inpcb);
+
+ /* Verify both are inserted */
+ KTEST_EQUAL(tp1->t_in_hpts, IHPTS_ONQUEUE);
+ KTEST_EQUAL(tp2->t_in_hpts, IHPTS_ONQUEUE);
+
+ /* Verify they're both assigned to the same slot */
+ KTEST_EQUAL(tp1->t_hpts_slot, tp2->t_hpts_slot);
+
+ /* Verify queue count reflects both connections */
+ KTEST_EQUAL(tp1->t_hpts_cpu, tp2->t_hpts_cpu); /* Should be on same CPU */
+ hpts = pace->rp_ent[tp1->t_hpts_cpu];
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 2);
+
+ /* Remove tp1 while tp2 is still there */
+ INP_WLOCK(&tp1->t_inpcb);
+ tcp_hpts_remove(pace, tp1);
+ INP_WUNLOCK(&tp1->t_inpcb);
+
+ /* Verify tp1 removed, tp2 still there */
+ KTEST_EQUAL(tp1->t_in_hpts, IHPTS_NONE);
+ KTEST_EQUAL(tp2->t_in_hpts, IHPTS_ONQUEUE);
+
+ /* Verify queue count decreased by one */
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 1);
+
+ /* Remove tp2 */
+ INP_WLOCK(&tp2->t_inpcb);
+ tcp_hpts_remove(pace, tp2);
+ INP_WUNLOCK(&tp2->t_inpcb);
+
+ KTEST_EQUAL(tp2->t_in_hpts, IHPTS_NONE);
+
+ /* Verify queue is now completely empty */
+ KTEST_EQUAL(hpts->p_on_queue_cnt, 0);
+
+ test_hpts_free_tcpcb(tp1);
+ test_hpts_free_tcpcb(tp2);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates the queued segments processing path via tfb_do_queued_segments,
+ * which is an alternative to direct tcp_output calls.
+ */
+KTEST_FUNC(queued_segments_processing)
+{
+ struct epoch_tracker et;
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp;
+ struct tcp_hpts_entry *hpts;
+ struct mbuf *fake_mbuf;
+ int32_t slots_ran;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ tp = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp, NULL);
+
+ /* Create a minimal fake mbuf that has valid STAILQ pointers */
+ fake_mbuf = malloc(sizeof(struct mbuf), M_TCPHPTS, M_WAITOK | M_ZERO);
+ KTEST_NEQUAL(fake_mbuf, NULL);
+
+ /* Set up for queued segments path */
+ tp->t_flags2 |= (TF2_HPTS_CALLS | TF2_SUPPORTS_MBUFQ);
+ STAILQ_INSERT_TAIL(&tp->t_inqueue, fake_mbuf, m_stailqpkt);
+
+ INP_WLOCK(&tp->t_inpcb);
+ tcp_hpts_insert(pace, tp, 100, NULL);
+ INP_WUNLOCK(&tp->t_inpcb);
+
+ hpts = pace->rp_ent[tp->t_hpts_cpu];
+
+ /* Run HPTS and verify queued segments path is taken */
+ test_time_usec += 100;
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ KTEST_VERIFY(slots_ran >= 0);
+ KTEST_EQUAL(call_counts[CCNT_TCP_TFB_DO_QUEUED_SEGMENTS], 1);
+
+ /* Connection should be removed from HPTS after processing */
+ KTEST_EQUAL(tp->t_in_hpts, IHPTS_NONE);
+
+ /* Clean up the fake mbuf if it's still in the queue */
+ if (!STAILQ_EMPTY(&tp->t_inqueue)) {
+ struct mbuf *m = STAILQ_FIRST(&tp->t_inqueue);
+ STAILQ_REMOVE_HEAD(&tp->t_inqueue, m_stailqpkt);
+ free(m, M_TCPHPTS);
+ }
+
+ test_hpts_free_tcpcb(tp);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates the direct wake mechanism and wake inhibition logic when
+ * the connection count exceeds thresholds.
+ */
+KTEST_FUNC(direct_wake_mechanism)
+{
+ struct tcp_hptsi *pace;
+ struct tcpcb *tp;
+ struct tcp_hpts_entry *hpts;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ tp = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp, NULL);
+ hpts = pace->rp_ent[tp->t_hpts_cpu];
+
+ /* Test direct wake when not over threshold */
+ HPTS_LOCK(hpts);
+ hpts->p_on_queue_cnt = 50; /* Below threshold */
+ hpts->p_hpts_wake_scheduled = 0;
+ tcp_hpts_wake(hpts);
+ KTEST_EQUAL(hpts->p_hpts_wake_scheduled, 1);
+ KTEST_EQUAL(call_counts[CCNT_SWI_SCHED], 1);
+ HPTS_UNLOCK(hpts);
+
+ /* Reset for next test */
+ hpts->p_hpts_wake_scheduled = 0;
+ call_counts[CCNT_SWI_SCHED] = 0;
+
+ /* Test wake inhibition when over threshold */
+ HPTS_LOCK(hpts);
+ hpts->p_on_queue_cnt = 200; /* Above threshold */
+ hpts->p_direct_wake = 1; /* Request direct wake */
+ tcp_hpts_wake(hpts);
+ KTEST_EQUAL(hpts->p_hpts_wake_scheduled, 0); /* Should be inhibited */
+ KTEST_EQUAL(hpts->p_direct_wake, 0); /* Should be cleared */
+ KTEST_EQUAL(call_counts[CCNT_SWI_SCHED], 0); /* No SWI scheduled */
+ HPTS_UNLOCK(hpts);
+
+ test_hpts_free_tcpcb(tp);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates HPTS collision detection when attempting to run HPTS while
+ * it's already active.
+ */
+KTEST_FUNC(hpts_collision_detection)
+{
+ struct epoch_tracker et;
+ struct tcp_hptsi *pace;
+ struct tcp_hpts_entry *hpts;
+ int32_t slots_ran;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ hpts = pace->rp_ent[0];
+
+ /* Mark HPTS as active */
+ HPTS_LOCK(hpts);
+ hpts->p_hpts_active = 1;
+ HPTS_UNLOCK(hpts);
+
+ /* Attempt to run HPTS again - should detect collision */
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, false); /* from_callout = false */
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ /* Should return 0 indicating no work done due to collision */
+ KTEST_EQUAL(slots_ran, 0);
+
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+/*
+ * Validates generation count handling for race condition detection between
+ * HPTS processing and connection insertion/removal operations.
+ */
+KTEST_FUNC(generation_count_validation)
+{
+ struct epoch_tracker et;
+ struct tcp_hptsi *pace;
+ struct tcp_hpts_entry *hpts;
+ struct tcpcb *tp1, *tp2;
+ uint32_t initial_gencnt, slot_to_test = 10;
+ uint32_t timeout_usecs = slot_to_test * HPTS_USECS_PER_SLOT;
+ uint32_t tp2_original_gencnt;
+ int32_t slots_ran;
+
+ test_hpts_init();
+
+ pace = tcp_hptsi_create(&test_funcs, false);
+ KTEST_NEQUAL(pace, NULL);
+ tcp_hptsi_start(pace);
+
+ hpts = pace->rp_ent[0];
+
+ /* Record initial generation count for the test slot */
+ initial_gencnt = hpts->p_hptss[slot_to_test].gencnt;
+
+ /* Create and insert first connection */
+ tp1 = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp1, NULL);
+ tp1->t_hpts_cpu = 0; /* Force to CPU 0 */
+
+ INP_WLOCK(&tp1->t_inpcb);
+ tp1->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp1, timeout_usecs, NULL);
+ INP_WUNLOCK(&tp1->t_inpcb);
+
+ /* Verify connection stored the generation count */
+ KTEST_EQUAL(tp1->t_in_hpts, IHPTS_ONQUEUE);
+ KTEST_EQUAL(tp1->t_hpts_slot, slot_to_test);
+ KTEST_EQUAL(tp1->t_hpts_gencnt, initial_gencnt);
+
+ /* Create second connection but don't insert yet */
+ tp2 = test_hpts_create_tcpcb(ctx, pace);
+ KTEST_NEQUAL(tp2, NULL);
+ tp2->t_hpts_cpu = 0; /* Force to CPU 0 */
+
+ /* Force generation count increment by processing the slot */
+ test_time_usec += (slot_to_test + 1) * HPTS_USECS_PER_SLOT;
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ /* Verify processing occurred */
+ KTEST_VERIFY(slots_ran > 0);
+ KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 1);
+
+ /* Verify generation count was incremented */
+ KTEST_EQUAL(hpts->p_hptss[slot_to_test].gencnt, initial_gencnt + 1);
+
+ /* Verify first connection was processed and removed */
+ KTEST_EQUAL(tp1->t_in_hpts, IHPTS_NONE);
+
+ /* Insert second connection and record its generation count */
+ INP_WLOCK(&tp2->t_inpcb);
+ tp2->t_flags2 |= TF2_HPTS_CALLS;
+ tcp_hpts_insert(pace, tp2, timeout_usecs, NULL);
+ INP_WUNLOCK(&tp2->t_inpcb);
+
+ /* Verify connection was inserted successfully */
+ KTEST_EQUAL(tp2->t_in_hpts, IHPTS_ONQUEUE);
+
+ /* Record the generation count that tp2 received */
+ tp2_original_gencnt = tp2->t_hpts_gencnt;
+
+ /* Test generation count mismatch detection during processing */
+ /* Manually set stale generation count to simulate race condition */
+ tp2->t_hpts_gencnt = tp2_original_gencnt + 100; /* Force a mismatch */
+
+ /* Process the slot to trigger generation count validation */
+ test_time_usec += (slot_to_test + 1) * HPTS_USECS_PER_SLOT;
+ HPTS_LOCK(hpts);
+ NET_EPOCH_ENTER(et);
+ slots_ran = tcp_hptsi(hpts, true);
+ HPTS_UNLOCK(hpts);
+ NET_EPOCH_EXIT(et);
+
+ /* Connection should be processed despite generation count mismatch */
+ KTEST_EQUAL(tp2->t_in_hpts, IHPTS_NONE); /* Processed and released */
+
+ /* The key test: HPTS should handle mismatched generation counts gracefully */
+ KTEST_VERIFY(slots_ran > 0); /* Processing should still occur */
+
+ test_hpts_free_tcpcb(tp1);
+ test_hpts_free_tcpcb(tp2);
+ tcp_hptsi_stop(pace);
+ tcp_hptsi_destroy(pace);
+
+ return (0);
+}
+
+static const struct ktest_test_info tests[] = {
+ KTEST_INFO(module_load),
+ KTEST_INFO(hptsi_create_destroy),
+ KTEST_INFO(hptsi_start_stop),
+ KTEST_INFO(hptsi_independence),
+ KTEST_INFO(function_injection),
+ KTEST_INFO(tcpcb_initialization),
+ KTEST_INFO(tcpcb_insertion),
+ KTEST_INFO(timer_functionality),
+ KTEST_INFO(scalability_tcpcbs),
+ KTEST_INFO(wheel_wrap_recovery),
+ KTEST_INFO(tcpcb_moving_state),
+ KTEST_INFO(deferred_requests),
+ KTEST_INFO(cpu_assignment),
+ KTEST_INFO(slot_boundary_conditions),
+ KTEST_INFO(dynamic_sleep_adjustment),
+ KTEST_INFO(concurrent_operations),
+ KTEST_INFO(queued_segments_processing),
+ KTEST_INFO(direct_wake_mechanism),
+ KTEST_INFO(hpts_collision_detection),
+ KTEST_INFO(generation_count_validation),
+};
+
+#else /* TCP_HPTS_KTEST */
+
+/*
+ * Stub to indicate that the TCP HPTS ktest is not enabled.
+ */
+KTEST_FUNC(module_load_without_tests)
+{
+ KTEST_LOG(ctx, "Warning: TCP HPTS ktest is not enabled");
+ return (0);
+}
+
+static const struct ktest_test_info tests[] = {
+ KTEST_INFO(module_load_without_tests),
+};
+
+#endif
+
+KTEST_MODULE_DECLARE(ktest_tcphpts, tests);
+KTEST_MODULE_DEPEND(ktest_tcphpts, tcphpts);
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index d58cc69b7625..2146b0cac48f 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -219,7 +219,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_do_autorcvbuf), 0,
"Enable automatic receive buffer sizing");
-VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024;
+VNET_DEFINE(int, tcp_autorcvbuf_max) = 8*1024*1024;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_autorcvbuf_max), 0,
"Max size of automatic receive buffer");
@@ -609,7 +609,6 @@ tcp_input_with_port(struct mbuf **mp, int *offp, int proto, uint16_t port)
int tlen = 0, off;
int drop_hdrlen;
int thflags;
- int rstreason = 0; /* For badport_bandlim accounting purposes */
int lookupflag;
uint8_t iptos;
struct m_tag *fwd_tag = NULL;
@@ -905,23 +904,22 @@ findpcb:
* XXX MRT Send RST using which routing table?
*/
if (inp == NULL) {
- if (rstreason != 0) {
+ if ((lookupflag & INPLOOKUP_WILDCARD) == 0) {
/* We came here after second (safety) lookup. */
- MPASS((lookupflag & INPLOOKUP_WILDCARD) == 0);
- goto dropwithreset;
- }
- /*
- * Log communication attempts to ports that are not
- * in use.
- */
- if ((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
- V_tcp_log_in_vain == 2) {
- if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
+ MPASS(!closed_port);
+ } else {
+ /*
+ * Log communication attempts to ports that are not
+ * in use.
+ */
+ if (((V_tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
+ V_tcp_log_in_vain == 2) &&
+ (s = tcp_log_vain(NULL, th, (void *)ip, ip6))) {
log(LOG_INFO, "%s; %s: Connection attempt "
"to closed port\n", s, __func__);
+ }
+ closed_port = true;
}
- rstreason = BANDLIM_TCP_RST;
- closed_port = true;
goto dropwithreset;
}
INP_LOCK_ASSERT(inp);
@@ -1012,13 +1010,11 @@ findpcb:
* down or it is in the CLOSED state. Either way we drop the
* segment and send an appropriate response.
*/
- rstreason = BANDLIM_TCP_RST;
closed_port = true;
goto dropwithreset;
}
if ((tp->t_port != port) && (tp->t_state > TCPS_LISTEN)) {
- rstreason = BANDLIM_TCP_RST;
closed_port = true;
goto dropwithreset;
}
@@ -1102,7 +1098,8 @@ findpcb:
* don't want to sent RST for the second ACK,
* so we perform second lookup without wildcard
* match, hoping to find the new socket. If
- * the ACK is stray indeed, rstreason would
+ * the ACK is stray indeed, the missing
+ * INPLOOKUP_WILDCARD flag in lookupflag would
* hint the above code that the lookup was a
* second attempt.
*
@@ -1110,7 +1107,6 @@ findpcb:
* of the failure cause.
*/
INP_WUNLOCK(inp);
- rstreason = BANDLIM_TCP_RST;
lookupflag &= ~INPLOOKUP_WILDCARD;
goto findpcb;
}
@@ -1134,7 +1130,6 @@ tfo_socket_result:
V_tcp_sc_rst_sock_fail ?
"sending RST" : "try again");
if (V_tcp_sc_rst_sock_fail) {
- rstreason = BANDLIM_UNLIMITED;
goto dropwithreset;
} else
goto dropunlock;
@@ -1201,7 +1196,6 @@ tfo_socket_result:
s, __func__);
syncache_badack(&inc, port); /* XXX: Not needed! */
TCPSTAT_INC(tcps_badsyn);
- rstreason = BANDLIM_TCP_RST;
goto dropwithreset;
}
/*
@@ -1277,7 +1271,6 @@ tfo_socket_result:
"Connection attempt to deprecated "
"IPv6 address rejected\n",
s, __func__);
- rstreason = BANDLIM_TCP_RST;
goto dropwithreset;
}
}
@@ -1398,8 +1391,7 @@ dropwithreset:
* When blackholing do not respond with a RST but
* completely ignore the segment and drop it.
*/
- if (rstreason == BANDLIM_TCP_RST &&
- ((!closed_port && V_blackhole == 3) ||
+ if (((!closed_port && V_blackhole == 3) ||
(closed_port &&
((V_blackhole == 1 && (thflags & TH_SYN)) || V_blackhole > 1))) &&
(V_blackhole_local || (
@@ -1414,7 +1406,7 @@ dropwithreset:
)))
goto dropunlock;
TCP_PROBE5(receive, NULL, tp, m, tp, th);
- tcp_dropwithreset(m, th, tp, tlen, rstreason);
+ tcp_dropwithreset(m, th, tp, tlen);
m = NULL; /* mbuf chain got consumed. */
dropunlock:
@@ -1523,7 +1515,7 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
uint16_t thflags;
int acked, ourfinisacked, needoutput = 0;
sackstatus_t sack_changed;
- int rstreason, todrop, win, incforsyn = 0;
+ int todrop, win, incforsyn = 0;
uint32_t tiwin;
uint16_t nsegs;
char *s;
@@ -1568,7 +1560,6 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
*/
if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
(SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
- rstreason = BANDLIM_UNLIMITED;
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
goto dropwithreset;
}
@@ -1984,7 +1975,6 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
if ((thflags & TH_ACK) &&
(SEQ_LEQ(th->th_ack, tp->snd_una) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
- rstreason = BANDLIM_TCP_RST;
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
goto dropwithreset;
}
@@ -1997,7 +1987,6 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
* FIN, or a RST.
*/
if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
- rstreason = BANDLIM_TCP_RST;
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
goto dropwithreset;
} else if (thflags & TH_SYN) {
@@ -2218,7 +2207,6 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
tp = tcp_drop(tp, ECONNRESET);
- rstreason = BANDLIM_UNLIMITED;
} else {
tcp_ecn_input_syn_sent(tp, thflags, iptos);
tcp_send_challenge_ack(tp, th, m);
@@ -2265,7 +2253,6 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
* for the "LAND" DoS attack.
*/
if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
- rstreason = BANDLIM_TCP_RST;
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
goto dropwithreset;
}
@@ -2347,7 +2334,6 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
TCPSTAT_INC(tcps_rcvafterclose);
- rstreason = BANDLIM_UNLIMITED;
goto dropwithreset;
}
@@ -2576,299 +2562,270 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
hhook_run_tcp_est_in(tp, th, &to);
#endif
- if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
- maxseg = tcp_maxseg(tp);
- if (no_data &&
- (tiwin == tp->snd_wnd ||
- (tp->t_flags & TF_SACK_PERMIT))) {
+ if (SEQ_LT(th->th_ack, tp->snd_una)) {
+ /* This is old ACK information, don't process it. */
+ break;
+ }
+ if (th->th_ack == tp->snd_una) {
+ /* Check if this is a duplicate ACK. */
+ if ((tp->t_flags & TF_SACK_PERMIT) &&
+ V_tcp_do_newsack) {
/*
- * If this is the first time we've seen a
- * FIN from the remote, this is not a
- * duplicate and it needs to be processed
- * normally. This happens during a
- * simultaneous close.
+ * If SEG.ACK == SND.UNA, RFC 6675 requires a
+ * duplicate ACK to selectively acknowledge
+ * at least one byte, which was not selectively
+ * acknowledged before.
*/
- if ((thflags & TH_FIN) &&
- (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
- tp->t_dupacks = 0;
+ if (sack_changed == SACK_NOCHANGE) {
break;
}
- TCPSTAT_INC(tcps_rcvdupack);
- /*
- * If we have outstanding data (other than
- * a window probe), this is a completely
- * duplicate ack (ie, window info didn't
- * change and FIN isn't set),
- * the ack is the biggest we've
- * seen and we've seen exactly our rexmt
- * threshold of them, assume a packet
- * has been dropped and retransmit it.
- * Kludge snd_nxt & the congestion
- * window so we send only this one
- * packet.
- *
- * We know we're losing at the current
- * window size so do congestion avoidance
- * (set ssthresh to half the current window
- * and pull our congestion window back to
- * the new ssthresh).
- *
- * Dup acks mean that packets have left the
- * network (they're now cached at the receiver)
- * so bump cwnd by the amount in the receiver
- * to keep a constant cwnd packets in the
- * network.
- *
- * When using TCP ECN, notify the peer that
- * we reduced the cwnd.
- */
+ } else {
/*
- * Following 2 kinds of acks should not affect
- * dupack counting:
- * 1) Old acks
- * 2) Acks with SACK but without any new SACK
- * information in them. These could result from
- * any anomaly in the network like a switch
- * duplicating packets or a possible DoS attack.
+ * If SEG.ACK == SND.UNA, RFC 5681 requires a
+ * duplicate ACK to have no data on it and to
+ * not be a window update.
*/
- if (th->th_ack != tp->snd_una ||
- (tcp_is_sack_recovery(tp, &to) &&
- (sack_changed == SACK_NOCHANGE))) {
+ if (!no_data || tiwin != tp->snd_wnd) {
break;
- } else if (!tcp_timer_active(tp, TT_REXMT)) {
- tp->t_dupacks = 0;
- } else if (++tp->t_dupacks > tcprexmtthresh ||
- IN_FASTRECOVERY(tp->t_flags)) {
- cc_ack_received(tp, th, nsegs,
- CC_DUPACK);
- if (V_tcp_do_prr &&
+ }
+ }
+ /*
+ * If this is the first time we've seen a
+ * FIN from the remote, this is not a
+ * duplicate ACK and it needs to be processed
+ * normally.
+ * This happens during a simultaneous close.
+ */
+ if ((thflags & TH_FIN) &&
+ (TCPS_HAVERCVDFIN(tp->t_state) == 0)) {
+ tp->t_dupacks = 0;
+ break;
+ }
+ /* Perform duplicate ACK processing. */
+ TCPSTAT_INC(tcps_rcvdupack);
+ maxseg = tcp_maxseg(tp);
+ if (!tcp_timer_active(tp, TT_REXMT)) {
+ tp->t_dupacks = 0;
+ } else if (++tp->t_dupacks > tcprexmtthresh ||
+ IN_FASTRECOVERY(tp->t_flags)) {
+ cc_ack_received(tp, th, nsegs, CC_DUPACK);
+ if (V_tcp_do_prr &&
+ IN_FASTRECOVERY(tp->t_flags) &&
+ (tp->t_flags & TF_SACK_PERMIT)) {
+ tcp_do_prr_ack(tp, th, &to,
+ sack_changed, &maxseg);
+ } else if (tcp_is_sack_recovery(tp, &to) &&
IN_FASTRECOVERY(tp->t_flags) &&
- (tp->t_flags & TF_SACK_PERMIT)) {
- tcp_do_prr_ack(tp, th, &to,
- sack_changed, &maxseg);
- } else if (tcp_is_sack_recovery(tp, &to) &&
- IN_FASTRECOVERY(tp->t_flags) &&
- (tp->snd_nxt == tp->snd_max)) {
- int awnd;
+ (tp->snd_nxt == tp->snd_max)) {
+ int awnd;
- /*
- * Compute the amount of data in flight first.
- * We can inject new data into the pipe iff
- * we have less than ssthresh
- * worth of data in flight.
- */
- awnd = tcp_compute_pipe(tp);
- if (awnd < tp->snd_ssthresh) {
- tp->snd_cwnd += imax(maxseg,
- imin(2 * maxseg,
- tp->sackhint.delivered_data));
- if (tp->snd_cwnd > tp->snd_ssthresh)
- tp->snd_cwnd = tp->snd_ssthresh;
- }
- } else if (tcp_is_sack_recovery(tp, &to) &&
- IN_FASTRECOVERY(tp->t_flags) &&
- SEQ_LT(tp->snd_nxt, tp->snd_max)) {
+ /*
+ * Compute the amount of data in flight first.
+ * We can inject new data into the pipe iff
+ * we have less than ssthresh
+ * worth of data in flight.
+ */
+ awnd = tcp_compute_pipe(tp);
+ if (awnd < tp->snd_ssthresh) {
tp->snd_cwnd += imax(maxseg,
imin(2 * maxseg,
tp->sackhint.delivered_data));
- } else {
- tp->snd_cwnd += maxseg;
+ if (tp->snd_cwnd > tp->snd_ssthresh)
+ tp->snd_cwnd = tp->snd_ssthresh;
}
- (void) tcp_output(tp);
- goto drop;
- } else if (tp->t_dupacks == tcprexmtthresh ||
- (tp->t_flags & TF_SACK_PERMIT &&
- V_tcp_do_newsack &&
- tp->sackhint.sacked_bytes >
- (tcprexmtthresh - 1) * maxseg)) {
+ } else if (tcp_is_sack_recovery(tp, &to) &&
+ IN_FASTRECOVERY(tp->t_flags) &&
+ SEQ_LT(tp->snd_nxt, tp->snd_max)) {
+ tp->snd_cwnd += imax(maxseg,
+ imin(2 * maxseg,
+ tp->sackhint.delivered_data));
+ } else {
+ tp->snd_cwnd += maxseg;
+ }
+ (void) tcp_output(tp);
+ goto drop;
+ } else if (tp->t_dupacks == tcprexmtthresh ||
+ (tp->t_flags & TF_SACK_PERMIT &&
+ V_tcp_do_newsack &&
+ tp->sackhint.sacked_bytes >
+ (tcprexmtthresh - 1) * maxseg)) {
enter_recovery:
- /*
- * Above is the RFC6675 trigger condition of
- * more than (dupthresh-1)*maxseg sacked data.
- * If the count of holes in the
- * scoreboard is >= dupthresh, we could
- * also enter loss recovery, but don't
- * have that value readily available.
- */
- tp->t_dupacks = tcprexmtthresh;
- tcp_seq onxt = tp->snd_nxt;
+ /*
+ * Above is the RFC6675 trigger condition of
+ * more than (dupthresh-1)*maxseg sacked data.
+ * If the count of holes in the
+ * scoreboard is >= dupthresh, we could
+ * also enter loss recovery, but don't
+ * have that value readily available.
+ */
+ tp->t_dupacks = tcprexmtthresh;
+ tcp_seq onxt = tp->snd_nxt;
- /*
- * If we're doing sack, check to
- * see if we're already in sack
- * recovery. If we're not doing sack,
- * check to see if we're in newreno
- * recovery.
- */
- if (tcp_is_sack_recovery(tp, &to)) {
- if (IN_FASTRECOVERY(tp->t_flags)) {
- tp->t_dupacks = 0;
- break;
- }
- } else {
- if (SEQ_LEQ(th->th_ack,
- tp->snd_recover)) {
- tp->t_dupacks = 0;
- break;
- }
+ /*
+ * If we're doing sack, check to
+ * see if we're already in sack
+ * recovery. If we're not doing sack,
+ * check to see if we're in newreno
+ * recovery.
+ */
+ if (tcp_is_sack_recovery(tp, &to)) {
+ if (IN_FASTRECOVERY(tp->t_flags)) {
+ tp->t_dupacks = 0;
+ break;
}
- /* Congestion signal before ack. */
- cc_cong_signal(tp, th, CC_NDUPACK);
- cc_ack_received(tp, th, nsegs,
- CC_DUPACK);
- tcp_timer_activate(tp, TT_REXMT, 0);
- tp->t_rtttime = 0;
- if (V_tcp_do_prr) {
- /*
- * snd_ssthresh and snd_recover are
- * already updated by cc_cong_signal.
- */
- if (tcp_is_sack_recovery(tp, &to)) {
- /*
- * Include Limited Transmit
- * segments here
- */
- tp->sackhint.prr_delivered =
- imin(tp->snd_max - th->th_ack,
- (tp->snd_limited + 1) * maxseg);
- } else {
- tp->sackhint.prr_delivered =
- maxseg;
- }
- tp->sackhint.recover_fs = max(1,
- tp->snd_nxt - tp->snd_una);
+ } else {
+ if (SEQ_LEQ(th->th_ack,
+ tp->snd_recover)) {
+ tp->t_dupacks = 0;
+ break;
}
- tp->snd_limited = 0;
+ }
+ /* Congestion signal before ack. */
+ cc_cong_signal(tp, th, CC_NDUPACK);
+ cc_ack_received(tp, th, nsegs, CC_DUPACK);
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ tp->t_rtttime = 0;
+ if (V_tcp_do_prr) {
+ /*
+ * snd_ssthresh and snd_recover are
+ * already updated by cc_cong_signal.
+ */
if (tcp_is_sack_recovery(tp, &to)) {
- TCPSTAT_INC(tcps_sack_recovery_episode);
/*
- * When entering LR after RTO due to
- * Duplicate ACKs, retransmit existing
- * holes from the scoreboard.
+ * Include Limited Transmit
+ * segments here
*/
- tcp_resend_sackholes(tp);
- /* Avoid inflating cwnd in tcp_output */
- tp->snd_nxt = tp->snd_max;
- tp->snd_cwnd = tcp_compute_pipe(tp) +
+ tp->sackhint.prr_delivered =
+ imin(tp->snd_max - th->th_ack,
+ (tp->snd_limited + 1) * maxseg);
+ } else {
+ tp->sackhint.prr_delivered =
maxseg;
- (void) tcp_output(tp);
- /* Set cwnd to the expected flightsize */
- tp->snd_cwnd = tp->snd_ssthresh;
- if (SEQ_GT(th->th_ack, tp->snd_una)) {
- goto resume_partialack;
- }
- goto drop;
}
- tp->snd_nxt = th->th_ack;
- tp->snd_cwnd = maxseg;
- (void) tcp_output(tp);
- KASSERT(tp->snd_limited <= 2,
- ("%s: tp->snd_limited too big",
- __func__));
- tp->snd_cwnd = tp->snd_ssthresh +
- maxseg *
- (tp->t_dupacks - tp->snd_limited);
- if (SEQ_GT(onxt, tp->snd_nxt))
- tp->snd_nxt = onxt;
- goto drop;
- } else if (V_tcp_do_rfc3042) {
- /*
- * Process first and second duplicate
- * ACKs. Each indicates a segment
- * leaving the network, creating room
- * for more. Make sure we can send a
- * packet on reception of each duplicate
- * ACK by increasing snd_cwnd by one
- * segment. Restore the original
- * snd_cwnd after packet transmission.
- */
- cc_ack_received(tp, th, nsegs, CC_DUPACK);
- uint32_t oldcwnd = tp->snd_cwnd;
- tcp_seq oldsndmax = tp->snd_max;
- u_int sent;
- int avail;
-
- KASSERT(tp->t_dupacks == 1 ||
- tp->t_dupacks == 2,
- ("%s: dupacks not 1 or 2",
- __func__));
- if (tp->t_dupacks == 1)
- tp->snd_limited = 0;
- if ((tp->snd_nxt == tp->snd_max) &&
- (tp->t_rxtshift == 0))
- tp->snd_cwnd =
- SEQ_SUB(tp->snd_nxt,
- tp->snd_una) -
- tcp_sack_adjust(tp);
- tp->snd_cwnd +=
- (tp->t_dupacks - tp->snd_limited) *
- maxseg - tcp_sack_adjust(tp);
+ tp->sackhint.recover_fs = max(1,
+ tp->snd_nxt - tp->snd_una);
+ }
+ tp->snd_limited = 0;
+ if (tcp_is_sack_recovery(tp, &to)) {
+ TCPSTAT_INC(tcps_sack_recovery_episode);
/*
- * Only call tcp_output when there
- * is new data available to be sent
- * or we need to send an ACK.
+ * When entering LR after RTO due to
+ * Duplicate ACKs, retransmit existing
+ * holes from the scoreboard.
*/
- SOCK_SENDBUF_LOCK(so);
- avail = sbavail(&so->so_snd);
- SOCK_SENDBUF_UNLOCK(so);
- if (tp->t_flags & TF_ACKNOW ||
- (avail >=
- SEQ_SUB(tp->snd_nxt, tp->snd_una))) {
- (void) tcp_output(tp);
- }
- sent = SEQ_SUB(tp->snd_max, oldsndmax);
- if (sent > maxseg) {
- KASSERT((tp->t_dupacks == 2 &&
- tp->snd_limited == 0) ||
- (sent == maxseg + 1 &&
- tp->t_flags & TF_SENTFIN) ||
- (sent < 2 * maxseg &&
- tp->t_flags & TF_NODELAY),
- ("%s: sent too much: %u>%u",
- __func__, sent, maxseg));
- tp->snd_limited = 2;
- } else if (sent > 0) {
- ++tp->snd_limited;
- }
- tp->snd_cwnd = oldcwnd;
+ tcp_resend_sackholes(tp);
+ /* Avoid inflating cwnd in tcp_output */
+ tp->snd_nxt = tp->snd_max;
+ tp->snd_cwnd = tcp_compute_pipe(tp) +
+ maxseg;
+ (void) tcp_output(tp);
+ /* Set cwnd to the expected flightsize */
+ tp->snd_cwnd = tp->snd_ssthresh;
goto drop;
}
- }
- break;
- } else {
- /*
- * This ack is advancing the left edge, reset the
- * counter.
- */
- tp->t_dupacks = 0;
- /*
- * If this ack also has new SACK info, increment the
- * counter as per rfc6675. The variable
- * sack_changed tracks all changes to the SACK
- * scoreboard, including when partial ACKs without
- * SACK options are received, and clear the scoreboard
- * from the left side. Such partial ACKs should not be
- * counted as dupacks here.
- */
- if (tcp_is_sack_recovery(tp, &to) &&
- (((tp->t_rxtshift == 0) && (sack_changed != SACK_NOCHANGE)) ||
- ((tp->t_rxtshift > 0) && (sack_changed == SACK_NEWLOSS))) &&
- (tp->snd_nxt == tp->snd_max)) {
- tp->t_dupacks++;
- /* limit overhead by setting maxseg last */
- if (!IN_FASTRECOVERY(tp->t_flags) &&
- (tp->sackhint.sacked_bytes >
- ((tcprexmtthresh - 1) *
- (maxseg = tcp_maxseg(tp))))) {
- goto enter_recovery;
+ tp->snd_nxt = th->th_ack;
+ tp->snd_cwnd = maxseg;
+ (void) tcp_output(tp);
+ KASSERT(tp->snd_limited <= 2,
+ ("%s: tp->snd_limited too big",
+ __func__));
+ tp->snd_cwnd = tp->snd_ssthresh +
+ maxseg *
+ (tp->t_dupacks - tp->snd_limited);
+ if (SEQ_GT(onxt, tp->snd_nxt))
+ tp->snd_nxt = onxt;
+ goto drop;
+ } else if (V_tcp_do_rfc3042) {
+ /*
+ * Process first and second duplicate
+ * ACKs. Each indicates a segment
+ * leaving the network, creating room
+ * for more. Make sure we can send a
+ * packet on reception of each duplicate
+ * ACK by increasing snd_cwnd by one
+ * segment. Restore the original
+ * snd_cwnd after packet transmission.
+ */
+ cc_ack_received(tp, th, nsegs, CC_DUPACK);
+ uint32_t oldcwnd = tp->snd_cwnd;
+ tcp_seq oldsndmax = tp->snd_max;
+ u_int sent;
+ int avail;
+
+ KASSERT(tp->t_dupacks == 1 ||
+ tp->t_dupacks == 2,
+ ("%s: dupacks not 1 or 2",
+ __func__));
+ if (tp->t_dupacks == 1)
+ tp->snd_limited = 0;
+ if ((tp->snd_nxt == tp->snd_max) &&
+ (tp->t_rxtshift == 0))
+ tp->snd_cwnd =
+ SEQ_SUB(tp->snd_nxt, tp->snd_una);
+ tp->snd_cwnd +=
+ (tp->t_dupacks - tp->snd_limited) * maxseg;
+ tp->snd_cwnd -= tcp_sack_adjust(tp);
+ /*
+ * Only call tcp_output when there
+ * is new data available to be sent
+ * or we need to send an ACK.
+ */
+ SOCK_SENDBUF_LOCK(so);
+ avail = sbavail(&so->so_snd);
+ SOCK_SENDBUF_UNLOCK(so);
+ if (tp->t_flags & TF_ACKNOW ||
+ (avail >=
+ SEQ_SUB(tp->snd_nxt, tp->snd_una))) {
+ (void) tcp_output(tp);
+ }
+ sent = SEQ_SUB(tp->snd_max, oldsndmax);
+ if (sent > maxseg) {
+ KASSERT((tp->t_dupacks == 2 &&
+ tp->snd_limited == 0) ||
+ (sent == maxseg + 1 &&
+ tp->t_flags & TF_SENTFIN) ||
+ (sent < 2 * maxseg &&
+ tp->t_flags & TF_NODELAY),
+ ("%s: sent too much: %u>%u",
+ __func__, sent, maxseg));
+ tp->snd_limited = 2;
+ } else if (sent > 0) {
+ ++tp->snd_limited;
}
+ tp->snd_cwnd = oldcwnd;
+ goto drop;
}
+ break;
}
-
-resume_partialack:
KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
- ("%s: th_ack <= snd_una", __func__));
-
+ ("%s: SEQ_LEQ(th_ack, snd_una)", __func__));
+ /*
+ * This ack is advancing the left edge, reset the
+ * counter.
+ */
+ tp->t_dupacks = 0;
+ /*
+ * If this ack also has new SACK info, increment the
+ * t_dupacks as per RFC 6675. The variable
+ * sack_changed tracks all changes to the SACK
+ * scoreboard, including when partial ACKs without
+ * SACK options are received, and clear the scoreboard
+ * from the left side. Such partial ACKs should not be
+ * counted as dupacks here.
+ */
+ if (V_tcp_do_newsack &&
+ tcp_is_sack_recovery(tp, &to) &&
+ (((tp->t_rxtshift == 0) && (sack_changed != SACK_NOCHANGE)) ||
+ ((tp->t_rxtshift > 0) && (sack_changed == SACK_NEWLOSS))) &&
+ (tp->snd_nxt == tp->snd_max)) {
+ tp->t_dupacks++;
+ /* limit overhead by setting maxseg last */
+ if (!IN_FASTRECOVERY(tp->t_flags) &&
+ (tp->sackhint.sacked_bytes >
+ (tcprexmtthresh - 1) * (maxseg = tcp_maxseg(tp)))) {
+ goto enter_recovery;
+ }
+ }
/*
* If the congestion window was inflated to account
* for the other side's cached packets, retract it.
@@ -3444,7 +3401,6 @@ dropafterack:
if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
(SEQ_GT(tp->snd_una, th->th_ack) ||
SEQ_GT(th->th_ack, tp->snd_max)) ) {
- rstreason = BANDLIM_TCP_RST;
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
goto dropwithreset;
}
@@ -3456,11 +3412,10 @@ dropafterack:
return;
dropwithreset:
+ tcp_dropwithreset(m, th, tp, tlen);
if (tp != NULL) {
- tcp_dropwithreset(m, th, tp, tlen, rstreason);
INP_WUNLOCK(inp);
- } else
- tcp_dropwithreset(m, th, NULL, tlen, rstreason);
+ }
return;
drop:
@@ -3480,8 +3435,7 @@ drop:
* tp may be NULL.
*/
void
-tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
- int tlen, int rstreason)
+tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, int tlen)
{
#ifdef INET
struct ip *ip;
@@ -3521,7 +3475,7 @@ tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
#endif
/* Perform bandwidth limiting. */
- if (badport_bandlim(rstreason) < 0)
+ if (badport_bandlim(BANDLIM_TCP_RST) < 0)
goto drop;
/* tcp_respond consumes the mbuf chain. */
diff --git a/sys/netinet/tcp_log_buf.c b/sys/netinet/tcp_log_buf.c
index e24790ece43d..473c534ef83d 100644
--- a/sys/netinet/tcp_log_buf.c
+++ b/sys/netinet/tcp_log_buf.c
@@ -61,6 +61,9 @@
#include <net/vnet.h>
#include <netinet/in.h>
+#ifdef DDB
+#include <netinet/in_kdtrace.h>
+#endif
#include <netinet/in_pcb.h>
#include <netinet/in_var.h>
#include <netinet/tcp_var.h>
diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c
index 7512679bd4e9..9b5baf115855 100644
--- a/sys/netinet/tcp_lro.c
+++ b/sys/netinet/tcp_lro.c
@@ -1428,17 +1428,6 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
{
int error;
- if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
- ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
- (m->m_pkthdr.csum_data != 0xffff)) {
- /*
- * The checksum either did not have hardware offload
- * or it was a bad checksum. We can't LRO such
- * a packet.
- */
- counter_u64_add(tcp_bad_csums, 1);
- return (TCP_LRO_CANNOT);
- }
/* get current time */
binuptime(&lc->lro_last_queue_time);
CURVNET_SET(lc->ifp->if_vnet);
@@ -1486,10 +1475,11 @@ tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
}
/* create sequence number */
- lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
- (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
- (((uint64_t)mb->m_pkthdr.flowid) << 24) |
- ((uint64_t)lc->lro_mbuf_count);
+ lc->lro_mbuf_data[lc->lro_mbuf_count].seq = lc->lro_mbuf_count;
+ if (M_HASHTYPE_ISHASH(mb))
+ lc->lro_mbuf_data[lc->lro_mbuf_count].seq |=
+ (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
+ (((uint64_t)mb->m_pkthdr.flowid) << 24);
/* enter mbuf */
lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb;
diff --git a/sys/netinet/tcp_lro_hpts.c b/sys/netinet/tcp_lro_hpts.c
index 43587285fe26..ac1a27a4290a 100644
--- a/sys/netinet/tcp_lro_hpts.c
+++ b/sys/netinet/tcp_lro_hpts.c
@@ -29,6 +29,8 @@
#include "opt_inet6.h"
#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
@@ -62,6 +64,7 @@
#include <netinet/tcp_lro.h>
#include <netinet/tcp_var.h>
#include <netinet/tcp_hpts.h>
+#include <netinet/tcp_hpts_internal.h>
#ifdef TCP_BLACKBOX
#include <netinet/tcp_log_buf.h>
#endif
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index 2dfb7faf56e3..208f72c4661c 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -123,7 +123,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_autosndbuf_inc), 0,
"Incrementor step size of automatic send buffer");
-VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024;
+VNET_DEFINE(int, tcp_autosndbuf_max) = 8*1024*1024;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_autosndbuf_max), 0,
"Max size of automatic send buffer");
diff --git a/sys/netinet/tcp_sack.c b/sys/netinet/tcp_sack.c
index b6c55fac50b3..6e08ad2796a8 100644
--- a/sys/netinet/tcp_sack.c
+++ b/sys/netinet/tcp_sack.c
@@ -128,8 +128,25 @@ SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
"Enable/Disable TCP SACK support");
VNET_DEFINE(int, tcp_do_newsack) = 1;
-SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, revised, CTLFLAG_VNET | CTLFLAG_RW,
- &VNET_NAME(tcp_do_newsack), 0,
+
+static int
+sysctl_net_inet_tcp_sack_revised(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int new;
+
+ new = V_tcp_do_newsack;
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if (error == 0 && req->newptr) {
+ V_tcp_do_newsack = new;
+ gone_in(16, "net.inet.tcp.sack.revised will be deprecated."
+ " net.inet.tcp.sack.enable will always follow RFC6675 SACK.\n");
+ }
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_tcp_sack, OID_AUTO, revised, CTLFLAG_VNET | CTLFLAG_RW | CTLTYPE_INT,
+ &VNET_NAME(tcp_do_newsack), 0, sysctl_net_inet_tcp_sack_revised, "CU",
"Use revised SACK loss recovery per RFC 6675");
VNET_DEFINE(int, tcp_do_lrd) = 1;
diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c
index ce4e9f30020c..66983edcdd73 100644
--- a/sys/netinet/tcp_stacks/bbr.c
+++ b/sys/netinet/tcp_stacks/bbr.c
@@ -78,8 +78,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
@@ -482,7 +480,7 @@ bbr_find_lowest_rsm(struct tcp_bbr *bbr);
static __inline uint32_t
bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type);
static void
-bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot,
+bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t pacing_delay,
uint8_t which);
static void
bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts,
@@ -491,7 +489,7 @@ bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts,
static void
bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag);
static void
-bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot,
+bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t pacing_delay,
uint32_t del_by, uint32_t cts, uint32_t sloton,
uint32_t prev_delay);
static void
@@ -726,7 +724,7 @@ bbr_minseg(struct tcp_bbr *bbr)
}
static void
-bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_t frm, int32_t slot, uint32_t tot_len)
+bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_t frm, int32_t pacing_delay, uint32_t tot_len)
{
struct inpcb *inp = tptoinpcb(tp);
struct hpts_diag diag;
@@ -753,40 +751,40 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
bbr->r_ctl.rc_timer_exp = 0;
prev_delay = bbr->r_ctl.rc_last_delay_val;
if (bbr->r_ctl.rc_last_delay_val &&
- (slot == 0)) {
+ (pacing_delay == 0)) {
/*
* If a previous pacer delay was in place we
* are not coming from the output side (where
* we calculate a delay, more likely a timer).
*/
- slot = bbr->r_ctl.rc_last_delay_val;
+ pacing_delay = bbr->r_ctl.rc_last_delay_val;
if (TSTMP_GT(cts, bbr->rc_pacer_started)) {
/* Compensate for time passed */
delay_calc = cts - bbr->rc_pacer_started;
- if (delay_calc <= slot)
- slot -= delay_calc;
+ if (delay_calc <= pacing_delay)
+ pacing_delay -= delay_calc;
}
}
/* Do we have early to make up for by pushing out the pacing time? */
if (bbr->r_agg_early_set) {
- bbr_log_pacing_delay_calc(bbr, 0, bbr->r_ctl.rc_agg_early, cts, slot, 0, bbr->r_agg_early_set, 2);
- slot += bbr->r_ctl.rc_agg_early;
+ bbr_log_pacing_delay_calc(bbr, 0, bbr->r_ctl.rc_agg_early, cts, pacing_delay, 0, bbr->r_agg_early_set, 2);
+ pacing_delay += bbr->r_ctl.rc_agg_early;
bbr->r_ctl.rc_agg_early = 0;
bbr->r_agg_early_set = 0;
}
/* Are we running a total debt that needs to be compensated for? */
if (bbr->r_ctl.rc_hptsi_agg_delay) {
- if (slot > bbr->r_ctl.rc_hptsi_agg_delay) {
+ if (pacing_delay > bbr->r_ctl.rc_hptsi_agg_delay) {
/* We nuke the delay */
- slot -= bbr->r_ctl.rc_hptsi_agg_delay;
+ pacing_delay -= bbr->r_ctl.rc_hptsi_agg_delay;
bbr->r_ctl.rc_hptsi_agg_delay = 0;
} else {
/* We nuke some of the delay, put in a minimal 100usecs */
- bbr->r_ctl.rc_hptsi_agg_delay -= slot;
- bbr->r_ctl.rc_last_delay_val = slot = 100;
+ bbr->r_ctl.rc_hptsi_agg_delay -= pacing_delay;
+ bbr->r_ctl.rc_last_delay_val = pacing_delay = 100;
}
}
- bbr->r_ctl.rc_last_delay_val = slot;
+ bbr->r_ctl.rc_last_delay_val = pacing_delay;
hpts_timeout = bbr_timer_start(tp, bbr, cts);
if (tp->t_flags & TF_DELACK) {
if (bbr->rc_in_persist == 0) {
@@ -812,7 +810,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
bbr->r_ctl.rc_hpts_flags = PACE_TMR_DELACK;
hpts_timeout = delayed_ack;
}
- if (slot) {
+ if (pacing_delay) {
/* Mark that we have a pacing timer up */
BBR_STAT_INC(bbr_paced_segments);
bbr->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
@@ -822,7 +820,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
* wheel, we resort to a keep-alive timer if its configured.
*/
if ((hpts_timeout == 0) &&
- (slot == 0)) {
+ (pacing_delay == 0)) {
if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
(tp->t_state <= TCPS_CLOSING)) {
/*
@@ -851,7 +849,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
if (left < hpts_timeout)
hpts_timeout = left;
}
- if (bbr->r_ctl.rc_incr_tmrs && slot &&
+ if (bbr->r_ctl.rc_incr_tmrs && pacing_delay &&
(bbr->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
/*
* If configured to do so, and the timer is either
@@ -869,7 +867,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
* this extra delay but this is easier and being more
* conservative is probably better.
*/
- hpts_timeout += slot;
+ hpts_timeout += pacing_delay;
}
if (hpts_timeout) {
/*
@@ -881,10 +879,10 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
bbr->r_ctl.rc_timer_exp = cts + hpts_timeout;
} else
bbr->r_ctl.rc_timer_exp = 0;
- if ((slot) &&
+ if ((pacing_delay) &&
(bbr->rc_use_google ||
bbr->output_error_seen ||
- (slot <= hpts_timeout)) ) {
+ (pacing_delay <= hpts_timeout)) ) {
/*
* Tell LRO that it can queue packets while
* we pace.
@@ -902,17 +900,15 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
bbr->rc_pacer_started = cts;
- (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot),
- __LINE__, &diag);
+ tcp_hpts_insert(tp, pacing_delay, &diag);
bbr->rc_timer_first = 0;
bbr->bbr_timer_src = frm;
- bbr_log_to_start(bbr, cts, hpts_timeout, slot, 1);
+ bbr_log_to_start(bbr, cts, hpts_timeout, pacing_delay, 1);
bbr_log_hpts_diag(bbr, cts, &diag);
} else if (hpts_timeout) {
- (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout),
- __LINE__, &diag);
+ tcp_hpts_insert(tp, hpts_timeout, &diag);
/*
- * We add the flag here as well if the slot is set,
+ * We add the flag here as well if the pacing delay is set,
* since hpts will call in to clear the queue first before
* calling the output routine (which does our timers).
* We don't want to set the flag if its just a timer
@@ -921,7 +917,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
* on a keep-alive timer and a request comes in for
* more data.
*/
- if (slot)
+ if (pacing_delay)
bbr->rc_pacer_started = cts;
if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
(bbr->rc_cwnd_limited == 0)) {
@@ -938,12 +934,12 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
TF2_DONT_SACK_QUEUE);
}
bbr->bbr_timer_src = frm;
- bbr_log_to_start(bbr, cts, hpts_timeout, slot, 0);
+ bbr_log_to_start(bbr, cts, hpts_timeout, pacing_delay, 0);
bbr_log_hpts_diag(bbr, cts, &diag);
bbr->rc_timer_first = 1;
}
bbr->rc_tmr_stopped = 0;
- bbr_log_type_bbrsnd(bbr, tot_len, slot, delay_calc, cts, frm, prev_delay);
+ bbr_log_type_bbrsnd(bbr, tot_len, pacing_delay, delay_calc, cts, frm, prev_delay);
}
static void
@@ -1035,8 +1031,8 @@ bbr_timer_audit(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, struct sock
}
/*
* Ok the timer originally started is not what we want now. We will
- * force the hpts to be stopped if any, and restart with the slot
- * set to what was in the saved slot.
+ * force the hpts to be stopped if any, and restart with the pacing
+ * delay set to what was in the saved delay.
*/
wrong_timer:
if ((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) {
@@ -2399,7 +2395,7 @@ bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag)
log.u_bbr.flex2 = diag->p_cur_slot;
log.u_bbr.flex3 = diag->slot_req;
log.u_bbr.flex4 = diag->inp_hptsslot;
- log.u_bbr.flex5 = diag->slot_remaining;
+ log.u_bbr.flex5 = diag->time_remaining;
log.u_bbr.flex6 = diag->need_new_to;
log.u_bbr.flex7 = diag->p_hpts_active;
log.u_bbr.flex8 = diag->p_on_min_sleep;
@@ -2413,9 +2409,6 @@ bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag)
log.u_bbr.bw_inuse = diag->wheel_slot;
log.u_bbr.rttProp = diag->wheel_cts;
log.u_bbr.delRate = diag->maxslots;
- log.u_bbr.cur_del_rate = diag->p_curtick;
- log.u_bbr.cur_del_rate <<= 32;
- log.u_bbr.cur_del_rate |= diag->p_lasttick;
TCP_LOG_EVENTP(bbr->rc_tp, NULL,
&bbr->rc_inp->inp_socket->so_rcv,
&bbr->rc_inp->inp_socket->so_snd,
@@ -2475,7 +2468,7 @@ bbr_log_pacing_delay_calc(struct tcp_bbr *bbr, uint16_t gain, uint32_t len,
}
static void
-bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
+bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t pacing_delay, uint8_t which)
{
if (tcp_bblogging_on(bbr->rc_tp)) {
union tcp_log_stackspecific log;
@@ -2485,7 +2478,7 @@ bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, u
log.u_bbr.flex1 = bbr->bbr_timer_src;
log.u_bbr.flex2 = to;
log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags;
- log.u_bbr.flex4 = slot;
+ log.u_bbr.flex4 = pacing_delay;
log.u_bbr.flex5 = bbr->rc_tp->t_hpts_slot;
log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur);
log.u_bbr.pkts_out = bbr->rc_tp->t_flags2;
@@ -2735,13 +2728,13 @@ bbr_type_log_hdwr_pacing(struct tcp_bbr *bbr, const struct ifnet *ifp,
}
static void
-bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot, uint32_t del_by, uint32_t cts, uint32_t line, uint32_t prev_delay)
+bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t pacing_delay, uint32_t del_by, uint32_t cts, uint32_t line, uint32_t prev_delay)
{
if (tcp_bblogging_on(bbr->rc_tp)) {
union tcp_log_stackspecific log;
bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
- log.u_bbr.flex1 = slot;
+ log.u_bbr.flex1 = pacing_delay;
log.u_bbr.flex2 = del_by;
log.u_bbr.flex3 = prev_delay;
log.u_bbr.flex4 = line;
@@ -5207,7 +5200,7 @@ bbr_process_timers(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, uint8_t
left = bbr->r_ctl.rc_timer_exp - cts;
ret = -3;
bbr_log_to_processing(bbr, cts, ret, left, hpts_calling);
- tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(left));
+ tcp_hpts_insert(tp, left, NULL);
return (1);
}
bbr->rc_tmr_stopped = 0;
@@ -5256,7 +5249,7 @@ bbr_timer_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts)
else
time_since_send = 0;
if (bbr->r_ctl.rc_last_delay_val > time_since_send) {
- /* Cut down our slot time */
+ /* Cut down our pacing_delay time */
bbr->r_ctl.rc_last_delay_val -= time_since_send;
} else {
bbr->r_ctl.rc_last_delay_val = 0;
@@ -5890,7 +5883,7 @@ bbr_log_output(struct tcp_bbr *bbr, struct tcpcb *tp, struct tcpopt *to, int32_t
* sequence 1 for 10 bytes. In such an example the r_start would be
* 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
* This means that r_end is actually the first sequence for the next
- * slot (11).
+ * pacing delay (11).
*
*/
INP_WLOCK_ASSERT(tptoinpcb(tp));
@@ -7863,7 +7856,7 @@ nothing_left:
/* tcp_close will kill the inp pre-log the Reset */
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
BBR_STAT_INC(bbr_dropped_af_data);
return (1);
}
@@ -8763,7 +8756,7 @@ bbr_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
(SEQ_LEQ(th->th_ack, tp->iss) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
@@ -8965,7 +8958,7 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
(SEQ_LEQ(th->th_ack, tp->snd_una) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if (tp->t_flags & TF_FASTOPEN) {
@@ -8977,7 +8970,7 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
} else if (thflags & TH_SYN) {
/* non-initial SYN is ignored */
@@ -9010,7 +9003,7 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if (SEQ_LT(th->th_seq, tp->irs)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
@@ -9288,7 +9281,7 @@ bbr_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9385,7 +9378,7 @@ bbr_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9405,7 +9398,7 @@ close_now:
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
KMOD_TCPSTAT_INC(tcps_rcvafterclose);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
+ ctf_do_dropwithreset(m, tp, th, *tlen);
return (1);
}
if (sbavail(&so->so_snd) == 0)
@@ -9535,7 +9528,7 @@ bbr_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9637,7 +9630,7 @@ bbr_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9739,7 +9732,7 @@ bbr_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -9848,7 +9841,7 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -11510,7 +11503,7 @@ bbr_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
(SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
if (tiwin > bbr->r_ctl.rc_high_rwnd)
@@ -11858,7 +11851,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv)
struct bbr_sendmap *rsm = NULL;
int32_t tso, mtu;
struct tcpopt to;
- int32_t slot = 0;
+ int32_t pacing_delay = 0;
struct inpcb *inp;
struct sockbuf *sb;
bool hpts_calling;
@@ -11988,8 +11981,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv)
delay_calc -= bbr->r_ctl.rc_last_delay_val;
else {
/*
- * We are early setup to adjust
- * our slot time.
+ * We are early setup to adjust out pacing delay.
*/
uint64_t merged_val;
@@ -12106,7 +12098,7 @@ again:
#endif
error = 0;
tso = 0;
- slot = 0;
+ pacing_delay = 0;
mtu = 0;
sendwin = min(tp->snd_wnd, tp->snd_cwnd);
sb_offset = tp->snd_max - tp->snd_una;
@@ -12128,7 +12120,7 @@ recheck_resend:
tot_len = tp->t_maxseg;
if (hpts_calling)
/* Retry in a ms */
- slot = 1001;
+ pacing_delay = 1001;
goto just_return_nolock;
}
TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_free, rsm, r_next);
@@ -12701,9 +12693,9 @@ just_return:
SOCK_SENDBUF_UNLOCK(so);
just_return_nolock:
if (tot_len)
- slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
+ pacing_delay = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
if (bbr->rc_no_pacing)
- slot = 0;
+ pacing_delay = 0;
if (tot_len == 0) {
if ((ctf_outstanding(tp) + min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) >=
tp->snd_wnd) {
@@ -12753,7 +12745,7 @@ just_return_nolock:
/* Dont update the time if we did not send */
bbr->r_ctl.rc_last_delay_val = 0;
bbr->rc_output_starts_timer = 1;
- bbr_start_hpts_timer(bbr, tp, cts, 9, slot, tot_len);
+ bbr_start_hpts_timer(bbr, tp, cts, 9, pacing_delay, tot_len);
bbr_log_type_just_return(bbr, cts, tot_len, hpts_calling, app_limited, p_maxseg, len);
if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
/* Make sure snd_nxt is drug up */
@@ -12789,7 +12781,7 @@ send:
flags &= ~TH_FIN;
if ((len == 0) && ((tp->t_flags & TF_ACKNOW) == 0)) {
/* Lets not send this */
- slot = 0;
+ pacing_delay = 0;
goto just_return;
}
}
@@ -13055,7 +13047,7 @@ send:
/*
* We have outstanding data, don't send a fin by itself!.
*/
- slot = 0;
+ pacing_delay = 0;
goto just_return;
}
/*
@@ -13765,7 +13757,7 @@ nomore:
if (tp->snd_cwnd < maxseg)
tp->snd_cwnd = maxseg;
}
- slot = (bbr_error_base_paceout + 1) << bbr->oerror_cnt;
+ pacing_delay = (bbr_error_base_paceout + 1) << bbr->oerror_cnt;
BBR_STAT_INC(bbr_saw_enobuf);
if (bbr->bbr_hdrw_pacing)
counter_u64_add(bbr_hdwr_pacing_enobuf, 1);
@@ -13814,18 +13806,18 @@ nomore:
}
/*
* Nuke all other things that can interfere
- * with slot
+ * with pacing delay
*/
if ((tot_len + len) && (len >= tp->t_maxseg)) {
- slot = bbr_get_pacing_delay(bbr,
+ pacing_delay = bbr_get_pacing_delay(bbr,
bbr->r_ctl.rc_bbr_hptsi_gain,
(tot_len + len), cts, 0);
- if (slot < bbr_error_base_paceout)
- slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
+ if (pacing_delay < bbr_error_base_paceout)
+ pacing_delay = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
} else
- slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
+ pacing_delay = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
bbr->rc_output_starts_timer = 1;
- bbr_start_hpts_timer(bbr, tp, cts, 10, slot,
+ bbr_start_hpts_timer(bbr, tp, cts, 10, pacing_delay,
tot_len);
return (error);
}
@@ -13843,9 +13835,9 @@ nomore:
}
/* FALLTHROUGH */
default:
- slot = (bbr_error_base_paceout + 3) << bbr->oerror_cnt;
+ pacing_delay = (bbr_error_base_paceout + 3) << bbr->oerror_cnt;
bbr->rc_output_starts_timer = 1;
- bbr_start_hpts_timer(bbr, tp, cts, 11, slot, 0);
+ bbr_start_hpts_timer(bbr, tp, cts, 11, pacing_delay, 0);
return (error);
}
#ifdef STATS
@@ -13983,12 +13975,12 @@ skip_again:
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
if (((flags & (TH_RST | TH_SYN | TH_FIN)) == 0) && tot_len) {
/*
- * Calculate/Re-Calculate the hptsi slot in usecs based on
+ * Calculate/Re-Calculate the hptsi timeout in usecs based on
* what we have sent so far
*/
- slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
+ pacing_delay = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
if (bbr->rc_no_pacing)
- slot = 0;
+ pacing_delay = 0;
}
tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
enobufs:
@@ -14001,8 +13993,8 @@ enobufs:
(more_to_rxt ||
((bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts)) != NULL))) {
/* Rack cheats and shotguns out all rxt's 1ms apart */
- if (slot > 1000)
- slot = 1000;
+ if (pacing_delay > 1000)
+ pacing_delay = 1000;
}
if (bbr->bbr_hdrw_pacing && (bbr->hw_pacing_set == 0)) {
/*
@@ -14016,7 +14008,7 @@ enobufs:
tcp_bbr_tso_size_check(bbr, cts);
}
}
- bbr_start_hpts_timer(bbr, tp, cts, 12, slot, tot_len);
+ bbr_start_hpts_timer(bbr, tp, cts, 12, pacing_delay, tot_len);
if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
/* Make sure snd_nxt is drug up */
tp->snd_nxt = tp->snd_max;
@@ -14134,8 +14126,7 @@ bbr_switch_failed(struct tcpcb *tp)
}
} else
toval = HPTS_USECS_PER_SLOT;
- (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval),
- __LINE__, &diag);
+ tcp_hpts_insert(tp, toval, &diag);
bbr_log_hpts_diag(bbr, cts, &diag);
}
diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c
index d6bbfeb886d9..c7962b57a69e 100644
--- a/sys/netinet/tcp_stacks/rack.c
+++ b/sys/netinet/tcp_stacks/rack.c
@@ -77,8 +77,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
@@ -252,11 +250,11 @@ static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the co
static int32_t rack_persist_min = 250000; /* 250usec */
static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */
static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */
-static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */
+static uint32_t rack_max_reduce = 10; /* Percent we can reduce pacing delay by */
static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
static int32_t rack_limit_time_with_srtt = 0;
static int32_t rack_autosndbuf_inc = 20; /* In percentage form */
-static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */
+static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost pacing delay using time_between */
static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */
static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */
static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */
@@ -280,7 +278,7 @@ static int32_t rack_hptsi_segments = 40;
static int32_t rack_rate_sample_method = USE_RTT_LOW;
static int32_t rack_pace_every_seg = 0;
static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */
-static int32_t rack_slot_reduction = 4;
+static int32_t rack_pacing_delay_reduction = 4;
static int32_t rack_wma_divisor = 8; /* For WMA calculation */
static int32_t rack_cwnd_block_ends_measure = 0;
static int32_t rack_rwnd_block_ends_measure = 0;
@@ -480,7 +478,7 @@ rack_log_alt_to_to_cancel(struct tcp_rack *rack,
uint16_t flex7, uint8_t mod);
static void
-rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
+rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay,
uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line,
struct rack_sendmap *rsm, uint8_t quality);
static struct rack_sendmap *
@@ -1109,7 +1107,7 @@ rack_init_sysctls(void)
SYSCTL_ADD_S32(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_pacing),
OID_AUTO, "burst_reduces", CTLFLAG_RW,
- &rack_slot_reduction, 4,
+ &rack_pacing_delay_reduction, 4,
"When doing only burst mitigation what is the reduce divisor");
SYSCTL_ADD_S32(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_sysctl_root),
@@ -1401,7 +1399,7 @@ rack_init_sysctls(void)
SYSCTL_CHILDREN(rack_timers),
OID_AUTO, "hpts_max_reduce", CTLFLAG_RW,
&rack_max_reduce, 10,
- "Max percentage we will reduce slot by for pacing when we are behind");
+ "Max percentage we will reduce pacing delay by for pacing when we are behind");
SYSCTL_ADD_U32(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_timers),
OID_AUTO, "persmin", CTLFLAG_RW,
@@ -2702,7 +2700,7 @@ rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t
}
static void
-rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
+rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t pacing_delay, uint8_t which)
{
if (tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
@@ -2712,7 +2710,7 @@ rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot
log.u_bbr.flex1 = rack->rc_tp->t_srtt;
log.u_bbr.flex2 = to;
log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
- log.u_bbr.flex4 = slot;
+ log.u_bbr.flex4 = pacing_delay;
log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot;
log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
log.u_bbr.flex7 = rack->rc_in_persist;
@@ -3036,14 +3034,14 @@ rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,
}
static void
-rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line)
+rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay, uint32_t cts, struct timeval *tv, int line)
{
if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
- log.u_bbr.flex1 = slot;
+ log.u_bbr.flex1 = pacing_delay;
if (rack->rack_no_prr)
log.u_bbr.flex2 = 0;
else
@@ -3141,7 +3139,7 @@ rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg
}
static void
-rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
+rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t pacing_delay,
uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
{
if (tcp_bblogging_on(rack->rc_tp)) {
@@ -3150,7 +3148,7 @@ rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, ui
memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
- log.u_bbr.flex1 = slot;
+ log.u_bbr.flex1 = pacing_delay;
log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
log.u_bbr.flex4 = reason;
if (rack->rack_no_prr)
@@ -6484,7 +6482,7 @@ rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
log.u_bbr.flex2 = diag->p_cur_slot;
log.u_bbr.flex3 = diag->slot_req;
log.u_bbr.flex4 = diag->inp_hptsslot;
- log.u_bbr.flex5 = diag->slot_remaining;
+ log.u_bbr.flex5 = diag->time_remaining;
log.u_bbr.flex6 = diag->need_new_to;
log.u_bbr.flex7 = diag->p_hpts_active;
log.u_bbr.flex8 = diag->p_on_min_sleep;
@@ -6499,9 +6497,6 @@ rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
log.u_bbr.rttProp = diag->wheel_cts;
log.u_bbr.timeStamp = cts;
log.u_bbr.delRate = diag->maxslots;
- log.u_bbr.cur_del_rate = diag->p_curtick;
- log.u_bbr.cur_del_rate <<= 32;
- log.u_bbr.cur_del_rate |= diag->p_lasttick;
TCP_LOG_EVENTP(rack->rc_tp, NULL,
&rack->rc_inp->inp_socket->so_rcv,
&rack->rc_inp->inp_socket->so_snd,
@@ -6534,14 +6529,14 @@ rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uin
static void
rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
- int32_t slot, uint32_t tot_len_this_send, int sup_rack)
+ int32_t usecs, uint32_t tot_len_this_send, int sup_rack)
{
struct hpts_diag diag;
struct inpcb *inp = tptoinpcb(tp);
struct timeval tv;
uint32_t delayed_ack = 0;
uint32_t hpts_timeout;
- uint32_t entry_slot = slot;
+ uint32_t entry_usecs = usecs;
uint8_t stopped;
uint32_t left = 0;
uint32_t us_cts;
@@ -6562,7 +6557,7 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
rack->r_ctl.rc_hpts_flags = 0;
us_cts = tcp_get_usecs(&tv);
/* Now early/late accounting */
- rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0);
+ rack_log_pacing_delay_calc(rack, entry_usecs, usecs, 0, 0, 0, 26, __LINE__, NULL, 0);
if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
/*
* We have a early carry over set,
@@ -6573,7 +6568,7 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* penalize the next timer for being awoke
* by an ack aka the rc_agg_early (non-paced mode).
*/
- slot += rack->r_ctl.rc_agg_early;
+ usecs += rack->r_ctl.rc_agg_early;
rack->r_early = 0;
rack->r_ctl.rc_agg_early = 0;
}
@@ -6585,29 +6580,29 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* really depends on what
* the current pacing time is.
*/
- if (rack->r_ctl.rc_agg_delayed >= slot) {
+ if (rack->r_ctl.rc_agg_delayed >= usecs) {
/*
* We can't compensate for it all.
* And we have to have some time
* on the clock. We always have a min
- * 10 slots (10 x 10 i.e. 100 usecs).
+ * 10 HPTS timer units (10 x 10 i.e. 100 usecs).
*/
- if (slot <= HPTS_USECS_PER_SLOT) {
+ if (usecs <= HPTS_USECS_PER_SLOT) {
/* We gain delay */
- rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - slot);
- slot = HPTS_USECS_PER_SLOT;
+ rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - usecs);
+ usecs = HPTS_USECS_PER_SLOT;
} else {
/* We take off some */
- rack->r_ctl.rc_agg_delayed -= (slot - HPTS_USECS_PER_SLOT);
- slot = HPTS_USECS_PER_SLOT;
+ rack->r_ctl.rc_agg_delayed -= (usecs - HPTS_USECS_PER_SLOT);
+ usecs = HPTS_USECS_PER_SLOT;
}
} else {
- slot -= rack->r_ctl.rc_agg_delayed;
+ usecs -= rack->r_ctl.rc_agg_delayed;
rack->r_ctl.rc_agg_delayed = 0;
/* Make sure we have 100 useconds at minimum */
- if (slot < HPTS_USECS_PER_SLOT) {
- rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - slot;
- slot = HPTS_USECS_PER_SLOT;
+ if (usecs < HPTS_USECS_PER_SLOT) {
+ rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - usecs;
+ usecs = HPTS_USECS_PER_SLOT;
}
if (rack->r_ctl.rc_agg_delayed == 0)
rack->r_late = 0;
@@ -6616,17 +6611,17 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
/* r_use_hpts_min is on and so is DGP */
uint32_t max_red;
- max_red = (slot * rack->r_ctl.max_reduction) / 100;
+ max_red = (usecs * rack->r_ctl.max_reduction) / 100;
if (max_red >= rack->r_ctl.rc_agg_delayed) {
- slot -= rack->r_ctl.rc_agg_delayed;
+ usecs -= rack->r_ctl.rc_agg_delayed;
rack->r_ctl.rc_agg_delayed = 0;
} else {
- slot -= max_red;
+ usecs -= max_red;
rack->r_ctl.rc_agg_delayed -= max_red;
}
}
if ((rack->r_use_hpts_min == 1) &&
- (slot > 0) &&
+ (usecs > 0) &&
(rack->dgp_on == 1)) {
/*
* We are enforcing a min pacing timer
@@ -6635,8 +6630,8 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
uint32_t min;
min = get_hpts_min_sleep_time();
- if (min > slot) {
- slot = min;
+ if (min > usecs) {
+ usecs = min;
}
}
hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack);
@@ -6654,7 +6649,7 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* wheel, we resort to a keep-alive timer if its configured.
*/
if ((hpts_timeout == 0) &&
- (slot == 0)) {
+ (usecs == 0)) {
if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
(tp->t_state <= TCPS_CLOSING)) {
/*
@@ -6711,10 +6706,10 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
hpts_timeout = 0x7ffffffe;
rack->r_ctl.rc_timer_exp = cts + hpts_timeout;
}
- rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0);
+ rack_log_pacing_delay_calc(rack, entry_usecs, usecs, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0);
if ((rack->gp_ready == 0) &&
(rack->use_fixed_rate == 0) &&
- (hpts_timeout < slot) &&
+ (hpts_timeout < usecs) &&
(rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
/*
* We have no good estimate yet for the
@@ -6724,7 +6719,7 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* pace that long since we know the calculation
* so far is not accurate.
*/
- slot = hpts_timeout;
+ usecs = hpts_timeout;
}
/**
* Turn off all the flags for queuing by default. The
@@ -6756,11 +6751,11 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* so LRO can call into us.
*/
tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY);
- if (slot) {
+ if (usecs) {
rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
- rack->r_ctl.rc_last_output_to = us_cts + slot;
+ rack->r_ctl.rc_last_output_to = us_cts + usecs;
/*
- * A pacing timer (slot) is being set, in
+ * A pacing timer (usecs microseconds) is being set, in
* such a case we cannot send (we are blocked by
* the timer). So lets tell LRO that it should not
* wake us unless there is a SACK. Note this only
@@ -6801,20 +6796,18 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
}
if ((rack->use_rack_rr) &&
(rack->r_rr_config < 2) &&
- ((hpts_timeout) && (hpts_timeout < slot))) {
+ ((hpts_timeout) && (hpts_timeout < usecs))) {
/*
* Arrange for the hpts to kick back in after the
* t-o if the t-o does not cause a send.
*/
- (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout),
- __LINE__, &diag);
+ tcp_hpts_insert(tp, hpts_timeout, &diag);
rack_log_hpts_diag(rack, us_cts, &diag, &tv);
- rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
+ rack_log_to_start(rack, cts, hpts_timeout, usecs, 0);
} else {
- (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot),
- __LINE__, &diag);
+ tcp_hpts_insert(tp, usecs, &diag);
rack_log_hpts_diag(rack, us_cts, &diag, &tv);
- rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
+ rack_log_to_start(rack, cts, hpts_timeout, usecs, 1);
}
} else if (hpts_timeout) {
/*
@@ -6826,22 +6819,21 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* at the start of this block) are good enough.
*/
rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
- (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(hpts_timeout),
- __LINE__, &diag);
+ tcp_hpts_insert(tp, hpts_timeout, &diag);
rack_log_hpts_diag(rack, us_cts, &diag, &tv);
- rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
+ rack_log_to_start(rack, cts, hpts_timeout, usecs, 0);
} else {
/* No timer starting */
#ifdef INVARIANTS
if (SEQ_GT(tp->snd_max, tp->snd_una)) {
- panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?",
- tp, rack, tot_len_this_send, cts, slot, hpts_timeout);
+ panic("tp:%p rack:%p tlts:%d cts:%u usecs:%u pto:%u -- no timer started?",
+ tp, rack, tot_len_this_send, cts, usecs, hpts_timeout);
}
#endif
}
rack->rc_tmr_stopped = 0;
- if (slot)
- rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv, __LINE__);
+ if (usecs)
+ rack_log_type_bbrsnd(rack, tot_len_this_send, usecs, us_cts, &tv, __LINE__);
}
static void
@@ -8018,7 +8010,7 @@ rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8
rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
ret = -3;
left = rack->r_ctl.rc_timer_exp - cts;
- tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left));
+ tcp_hpts_insert(tp, left, NULL);
rack_log_to_processing(rack, cts, ret, left);
return (1);
}
@@ -12038,7 +12030,7 @@ rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
/* tcp_close will kill the inp pre-log the Reset */
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
}
@@ -12876,7 +12868,7 @@ rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
(SEQ_LEQ(th->th_ack, tp->iss) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) {
@@ -13090,7 +13082,7 @@ rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
(SEQ_LEQ(th->th_ack, tp->snd_una) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if (tp->t_flags & TF_FASTOPEN) {
@@ -13103,7 +13095,7 @@ rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
} else if (thflags & TH_SYN) {
/* non-initial SYN is ignored */
@@ -13137,7 +13129,7 @@ rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if (SEQ_LT(th->th_seq, tp->irs)) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return (1);
}
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
@@ -13400,7 +13392,7 @@ rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (sbavail(&so->so_snd)) {
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13496,7 +13488,7 @@ rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13518,7 +13510,7 @@ rack_check_data_after_close(struct mbuf *m,
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
tp = tcp_close(tp);
KMOD_TCPSTAT_INC(tcps_rcvafterclose);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen));
+ ctf_do_dropwithreset(m, tp, th, *tlen);
return (1);
}
if (sbavail(&so->so_snd) == 0)
@@ -13646,7 +13638,7 @@ rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13747,7 +13739,7 @@ rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13849,7 +13841,7 @@ rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -13953,7 +13945,7 @@ rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_progress_timeout_check(tp, true)) {
rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr,
tp, tick, PROGRESS_DROP, __LINE__);
- ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset_conn(m, tp, th, tlen);
return (1);
}
}
@@ -14379,8 +14371,7 @@ rack_switch_failed(struct tcpcb *tp)
}
} else
toval = HPTS_USECS_PER_SLOT;
- (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval),
- __LINE__, &diag);
+ tcp_hpts_insert(tp, toval, &diag);
rack_log_hpts_diag(rack, cts, &diag, &tv);
}
@@ -14975,8 +14966,7 @@ rack_init(struct tcpcb *tp, void **ptr)
if (tov) {
struct hpts_diag diag;
- (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(tov),
- __LINE__, &diag);
+ tcp_hpts_insert(tp, tov, &diag);
rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time);
}
}
@@ -16369,7 +16359,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
struct rack_sendmap *rsm;
int32_t prev_state = 0;
int no_output = 0;
- int slot_remaining = 0;
+ int time_remaining = 0;
#ifdef TCP_ACCOUNTING
int ack_val_set = 0xf;
#endif
@@ -16418,7 +16408,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
* could be, if a sack is present, we want to be awoken and
* so should process the packets.
*/
- slot_remaining = rack->r_ctl.rc_last_output_to - us_cts;
+ time_remaining = rack->r_ctl.rc_last_output_to - us_cts;
if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) {
no_output = 1;
} else {
@@ -16438,7 +16428,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
(*ts_ptr == TCP_LRO_TS_OPTION)))
no_output = 1;
}
- if ((no_output == 1) && (slot_remaining < tcp_min_hptsi_time)) {
+ if ((no_output == 1) && (time_remaining < tcp_min_hptsi_time)) {
/*
* It is unrealistic to think we can pace in less than
* the minimum granularity of the pacer (def:250usec). So
@@ -16653,7 +16643,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) &&
(SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) {
tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT);
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
#ifdef TCP_ACCOUNTING
sched_unpin();
#endif
@@ -16921,10 +16911,10 @@ do_output_now:
(tcp_in_hpts(rack->rc_tp) == 0)) {
/*
* We are not in hpts and we had a pacing timer up. Use
- * the remaining time (slot_remaining) to restart the timer.
+ * the remaining time (time_remaining) to restart the timer.
*/
- KASSERT ((slot_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp));
- rack_start_hpts_timer(rack, tp, cts, slot_remaining, 0, 0);
+ KASSERT ((time_remaining != 0), ("slot remaining is zero for rack:%p tp:%p", rack, tp));
+ rack_start_hpts_timer(rack, tp, cts, time_remaining, 0, 0);
rack_free_trim(rack);
}
/* Clear the flag, it may have been cleared by output but we may not have */
@@ -17104,7 +17094,7 @@ check_it:
}
static void
-rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot,
+rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay,
uint64_t bw_est, uint64_t bw, uint64_t len_time, int method,
int line, struct rack_sendmap *rsm, uint8_t quality)
{
@@ -17127,7 +17117,7 @@ rack_log_pacing_delay_calc (struct tcp_rack *rack, uint32_t len, uint32_t slot,
}
}
memset(&log, 0, sizeof(log));
- log.u_bbr.flex1 = slot;
+ log.u_bbr.flex1 = pacing_delay;
log.u_bbr.flex2 = len;
log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs;
log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs;
@@ -17286,25 +17276,25 @@ rack_arrive_at_discounted_rate(struct tcp_rack *rack, uint64_t window_input, uin
}
static int32_t
-pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
+pace_to_fill_cwnd(struct tcp_rack *rack, int32_t pacing_delay, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced)
{
uint64_t lentim, fill_bw;
rack->r_via_fill_cw = 0;
if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use)
- return (slot);
+ return (pacing_delay);
if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd)
- return (slot);
+ return (pacing_delay);
if (rack->r_ctl.rc_last_us_rtt == 0)
- return (slot);
+ return (pacing_delay);
if (rack->rc_pace_fill_if_rttin_range &&
(rack->r_ctl.rc_last_us_rtt >=
(get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) {
/* The rtt is huge, N * smallest, lets not fill */
- return (slot);
+ return (pacing_delay);
}
if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap)
- return (slot);
+ return (pacing_delay);
/*
* first lets calculate the b/w based on the last us-rtt
* and the the smallest send window.
@@ -17370,7 +17360,7 @@ at_lt_bw:
if (non_paced)
*rate_wanted = fill_bw;
if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted))
- return (slot);
+ return (pacing_delay);
rack->r_via_fill_cw = 1;
if (rack->r_rack_hw_rate_caps &&
(rack->r_ctl.crte != NULL)) {
@@ -17425,19 +17415,19 @@ at_lt_bw:
lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC;
lentim /= fill_bw;
*rate_wanted = fill_bw;
- if (non_paced || (lentim < slot)) {
- rack_log_pacing_delay_calc(rack, len, slot, fill_bw,
+ if (non_paced || (lentim < pacing_delay)) {
+ rack_log_pacing_delay_calc(rack, len, pacing_delay, fill_bw,
0, lentim, 12, __LINE__, NULL, 0);
return ((int32_t)lentim);
} else
- return (slot);
+ return (pacing_delay);
}
static int32_t
rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line)
{
uint64_t srtt;
- int32_t slot = 0;
+ int32_t pacing_delay = 0;
int can_start_hw_pacing = 1;
int err;
int pace_one;
@@ -17485,25 +17475,25 @@ rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, str
* cwnd. Which in that case we are just waiting for
* a ACK.
*/
- slot = len / tr_perms;
+ pacing_delay = len / tr_perms;
/* Now do we reduce the time so we don't run dry? */
- if (slot && rack_slot_reduction) {
- reduce = (slot / rack_slot_reduction);
- if (reduce < slot) {
- slot -= reduce;
+ if (pacing_delay && rack_pacing_delay_reduction) {
+ reduce = (pacing_delay / rack_pacing_delay_reduction);
+ if (reduce < pacing_delay) {
+ pacing_delay -= reduce;
} else
- slot = 0;
+ pacing_delay = 0;
} else
reduce = 0;
- slot *= HPTS_USEC_IN_MSEC;
+ pacing_delay *= HPTS_USEC_IN_MSEC;
if (rack->rc_pace_to_cwnd) {
uint64_t rate_wanted = 0;
- slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1);
+ pacing_delay = pace_to_fill_cwnd(rack, pacing_delay, len, segsiz, NULL, &rate_wanted, 1);
rack->rc_ack_can_sendout_data = 1;
- rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0);
+ rack_log_pacing_delay_calc(rack, len, pacing_delay, rate_wanted, 0, 0, 14, __LINE__, NULL, 0);
} else
- rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0);
+ rack_log_pacing_delay_calc(rack, len, pacing_delay, tr_perms, reduce, 0, 7, __LINE__, NULL, 0);
/*******************************************************/
/* RRS: We insert non-paced call to stats here for len */
/*******************************************************/
@@ -17577,7 +17567,7 @@ rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, str
segs *= oh;
lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC;
res = lentim / rate_wanted;
- slot = (uint32_t)res;
+ pacing_delay = (uint32_t)res;
if (rack_hw_rate_min &&
(rate_wanted < rack_hw_rate_min)) {
can_start_hw_pacing = 0;
@@ -17637,7 +17627,7 @@ rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, str
* We want to pace at our rate *or* faster to
* fill the cwnd to the max if its not full.
*/
- slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0);
+ pacing_delay = pace_to_fill_cwnd(rack, pacing_delay, (len+segs), segsiz, &capped, &rate_wanted, 0);
/* Re-check to make sure we are not exceeding our max b/w */
if ((rack->r_ctl.crte != NULL) &&
(tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) {
@@ -17788,15 +17778,15 @@ rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, str
srtt = rack->rc_tp->t_srtt;
else
srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */
- if (srtt < (uint64_t)slot) {
- rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0);
- slot = srtt;
+ if (srtt < (uint64_t)pacing_delay) {
+ rack_log_pacing_delay_calc(rack, srtt, pacing_delay, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0);
+ pacing_delay = srtt;
}
}
/*******************************************************************/
/* RRS: We insert paced call to stats here for len and rate_wanted */
/*******************************************************************/
- rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0);
+ rack_log_pacing_delay_calc(rack, len, pacing_delay, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0);
}
if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) {
/*
@@ -17813,9 +17803,9 @@ rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, str
hw_boost_delay = rack_enobuf_hw_max;
else if (hw_boost_delay < rack_enobuf_hw_min)
hw_boost_delay = rack_enobuf_hw_min;
- slot += hw_boost_delay;
+ pacing_delay += hw_boost_delay;
}
- return (slot);
+ return (pacing_delay);
}
static void
@@ -18484,7 +18474,7 @@ rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendma
struct tcpopt to;
u_char opt[TCP_MAXOLEN];
uint32_t hdrlen, optlen;
- int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0;
+ int32_t pacing_delay, segsiz, max_val, tso = 0, error = 0, ulen = 0;
uint16_t flags;
uint32_t if_hw_tsomaxsegcount = 0, startseq;
uint32_t if_hw_tsomaxsegsize;
@@ -18690,9 +18680,9 @@ rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendma
}
if (rack->r_ctl.crte != NULL) {
/* See if we can send via the hw queue */
- slot = rack_check_queue_level(rack, tp, tv, cts, len, segsiz);
+ pacing_delay = rack_check_queue_level(rack, tp, tv, cts, len, segsiz);
/* If there is nothing in queue (no pacing time) we can send via the hw queue */
- if (slot == 0)
+ if (pacing_delay == 0)
ip_sendflag = 0;
}
tcp_set_flags(th, flags);
@@ -18957,20 +18947,20 @@ rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendma
rack_log_queue_level(tp, rack, len, tv, cts);
} else
tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
- slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
+ pacing_delay = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
if (rack->rc_enobuf < 0x7f)
rack->rc_enobuf++;
- if (slot < (10 * HPTS_USEC_IN_MSEC))
- slot = 10 * HPTS_USEC_IN_MSEC;
+ if (pacing_delay < (10 * HPTS_USEC_IN_MSEC))
+ pacing_delay = 10 * HPTS_USEC_IN_MSEC;
if (rack->r_ctl.crte != NULL) {
counter_u64_add(rack_saw_enobuf_hw, 1);
tcp_rl_log_enobuf(rack->r_ctl.crte);
}
counter_u64_add(rack_saw_enobuf, 1);
} else {
- slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__);
+ pacing_delay = rack_get_pacing_delay(rack, tp, len, NULL, segsiz, __LINE__);
}
- rack_start_hpts_timer(rack, tp, cts, slot, len, 0);
+ rack_start_hpts_timer(rack, tp, cts, pacing_delay, len, 0);
#ifdef TCP_ACCOUNTING
crtsc = get_cyclecount();
if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
@@ -19073,7 +19063,7 @@ rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val,
#ifdef TCP_ACCOUNTING
int cnt_thru = 1;
#endif
- int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0;
+ int32_t pacing_delay, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0;
uint16_t flags;
uint32_t s_soff;
uint32_t if_hw_tsomaxsegcount = 0, startseq;
@@ -19521,8 +19511,8 @@ again:
}
tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
counter_u64_add(rack_fto_send, 1);
- slot = rack_get_pacing_delay(rack, tp, *tot_len, NULL, segsiz, __LINE__);
- rack_start_hpts_timer(rack, tp, cts, slot, *tot_len, 0);
+ pacing_delay = rack_get_pacing_delay(rack, tp, *tot_len, NULL, segsiz, __LINE__);
+ rack_start_hpts_timer(rack, tp, cts, pacing_delay, *tot_len, 0);
#ifdef TCP_ACCOUNTING
crtsc = get_cyclecount();
if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
@@ -19709,7 +19699,7 @@ rack_output(struct tcpcb *tp)
struct rack_sendmap *rsm = NULL;
int32_t tso, mtu;
struct tcpopt to;
- int32_t slot = 0;
+ int32_t pacing_delay = 0;
int32_t sup_rack = 0;
uint32_t cts, ms_cts, delayed, early;
uint32_t add_flag = RACK_SENT_SP;
@@ -20072,7 +20062,7 @@ again:
if (rsm == NULL) {
if (hpts_calling)
/* Retry in a ms */
- slot = (1 * HPTS_USEC_IN_MSEC);
+ pacing_delay = (1 * HPTS_USEC_IN_MSEC);
so = inp->inp_socket;
sb = &so->so_snd;
goto just_return_nolock;
@@ -20879,7 +20869,7 @@ just_return_nolock:
}
if (tot_len_this_send > 0) {
rack->r_ctl.fsb.recwin = recwin;
- slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__);
+ pacing_delay = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz, __LINE__);
if ((error == 0) &&
rack_use_rfo &&
((flags & (TH_SYN|TH_FIN)) == 0) &&
@@ -21062,8 +21052,8 @@ just_return_nolock:
/* Yes lets make sure to move to persist before timer-start */
rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una);
}
- rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack);
- rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use);
+ rack_start_hpts_timer(rack, tp, cts, pacing_delay, tot_len_this_send, sup_rack);
+ rack_log_type_just_return(rack, cts, tot_len_this_send, pacing_delay, hpts_calling, app_limited, cwnd_to_use);
}
#ifdef NETFLIX_SHARED_CWND
if ((sbavail(sb) == 0) &&
@@ -21102,8 +21092,8 @@ send:
* we come around to again, the flag will be clear.
*/
check_done = 1;
- slot = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz);
- if (slot) {
+ pacing_delay = rack_check_queue_level(rack, tp, &tv, cts, len, segsiz);
+ if (pacing_delay) {
rack->r_ctl.rc_agg_delayed = 0;
rack->r_ctl.rc_agg_early = 0;
rack->r_early = 0;
@@ -22360,11 +22350,11 @@ nomore:
rack_log_queue_level(tp, rack, len, &tv, cts);
} else
tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF);
- slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
+ pacing_delay = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC);
if (rack->rc_enobuf < 0x7f)
rack->rc_enobuf++;
- if (slot < (10 * HPTS_USEC_IN_MSEC))
- slot = 10 * HPTS_USEC_IN_MSEC;
+ if (pacing_delay < (10 * HPTS_USEC_IN_MSEC))
+ pacing_delay = 10 * HPTS_USEC_IN_MSEC;
if (rack->r_ctl.crte != NULL) {
counter_u64_add(rack_saw_enobuf_hw, 1);
tcp_rl_log_enobuf(rack->r_ctl.crte);
@@ -22391,8 +22381,8 @@ nomore:
goto again;
}
}
- slot = 10 * HPTS_USEC_IN_MSEC;
- rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
+ pacing_delay = 10 * HPTS_USEC_IN_MSEC;
+ rack_start_hpts_timer(rack, tp, cts, pacing_delay, 0, 0);
#ifdef TCP_ACCOUNTING
crtsc = get_cyclecount();
if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
@@ -22414,8 +22404,8 @@ nomore:
}
/* FALLTHROUGH */
default:
- slot = 10 * HPTS_USEC_IN_MSEC;
- rack_start_hpts_timer(rack, tp, cts, slot, 0, 0);
+ pacing_delay = 10 * HPTS_USEC_IN_MSEC;
+ rack_start_hpts_timer(rack, tp, cts, pacing_delay, 0, 0);
#ifdef TCP_ACCOUNTING
crtsc = get_cyclecount();
if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
@@ -22458,18 +22448,18 @@ enobufs:
/*
* We don't send again after sending a RST.
*/
- slot = 0;
+ pacing_delay = 0;
sendalot = 0;
if (error == 0)
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
- } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) {
+ } else if ((pacing_delay == 0) && (sendalot == 0) && tot_len_this_send) {
/*
* Get our pacing rate, if an error
* occurred in sending (ENOBUF) we would
* hit the else if with slot preset. Other
* errors return.
*/
- slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__);
+ pacing_delay = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__);
}
/* We have sent clear the flag */
rack->r_ent_rec_ns = 0;
@@ -22501,7 +22491,7 @@ enobufs:
*/
tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY);
}
- if (slot) {
+ if (pacing_delay) {
/* set the rack tcb into the slot N */
if ((error == 0) &&
rack_use_rfo &&
@@ -22566,7 +22556,7 @@ skip_all_send:
/* Assure when we leave that snd_nxt will point to top */
if (SEQ_GT(tp->snd_max, tp->snd_nxt))
tp->snd_nxt = tp->snd_max;
- rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0);
+ rack_start_hpts_timer(rack, tp, cts, pacing_delay, tot_len_this_send, 0);
#ifdef TCP_ACCOUNTING
crtsc = get_cyclecount() - ts_val;
if (tot_len_this_send) {
diff --git a/sys/netinet/tcp_stacks/rack_bbr_common.c b/sys/netinet/tcp_stacks/rack_bbr_common.c
index fb013d3d17f0..4a0a5fc118f6 100644
--- a/sys/netinet/tcp_stacks/rack_bbr_common.c
+++ b/sys/netinet/tcp_stacks/rack_bbr_common.c
@@ -76,8 +76,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
@@ -507,9 +505,9 @@ ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked)
void
ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
- int32_t rstreason, int32_t tlen)
+ int32_t tlen)
{
- tcp_dropwithreset(m, th, tp, tlen, rstreason);
+ tcp_dropwithreset(m, th, tp, tlen);
if (tp != NULL)
INP_WUNLOCK(tptoinpcb(tp));
}
@@ -670,7 +668,7 @@ ctf_do_dropafterack(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th, int32_t
(SEQ_GT(tp->snd_una, th->th_ack) ||
SEQ_GT(th->th_ack, tp->snd_max))) {
*ret_val = 1;
- ctf_do_dropwithreset(m, tp, th, BANDLIM_TCP_RST, tlen);
+ ctf_do_dropwithreset(m, tp, th, tlen);
return;
} else
*ret_val = 0;
@@ -864,10 +862,10 @@ ctf_calc_rwin(struct socket *so, struct tcpcb *tp)
void
ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
- int32_t rstreason, int32_t tlen)
+ int32_t tlen)
{
- tcp_dropwithreset(m, th, tp, tlen, rstreason);
+ tcp_dropwithreset(m, th, tp, tlen);
tp = tcp_drop(tp, ETIMEDOUT);
if (tp)
INP_WUNLOCK(tptoinpcb(tp));
diff --git a/sys/netinet/tcp_stacks/rack_bbr_common.h b/sys/netinet/tcp_stacks/rack_bbr_common.h
index 6a8a056d89b0..cd33cb8ce50b 100644
--- a/sys/netinet/tcp_stacks/rack_bbr_common.h
+++ b/sys/netinet/tcp_stacks/rack_bbr_common.h
@@ -101,7 +101,7 @@ ctf_do_dropafterack(struct mbuf *m, struct tcpcb *tp,
void
ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp,
- struct tcphdr *th, int32_t rstreason, int32_t tlen);
+ struct tcphdr *th, int32_t tlen);
void
ctf_do_drop(struct mbuf *m, struct tcpcb *tp);
@@ -125,7 +125,7 @@ ctf_calc_rwin(struct socket *so, struct tcpcb *tp);
void
ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
- int32_t rstreason, int32_t tlen);
+ int32_t tlen);
uint32_t
ctf_fixed_maxseg(struct tcpcb *tp);
diff --git a/sys/netinet/tcp_stacks/rack_pcm.c b/sys/netinet/tcp_stacks/rack_pcm.c
index 759bfda98357..1a51097f627c 100644
--- a/sys/netinet/tcp_stacks/rack_pcm.c
+++ b/sys/netinet/tcp_stacks/rack_pcm.c
@@ -78,8 +78,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
diff --git a/sys/netinet/tcp_stacks/tailq_hash.c b/sys/netinet/tcp_stacks/tailq_hash.c
index 5ba3e7cd36c0..ff01640524b6 100644
--- a/sys/netinet/tcp_stacks/tailq_hash.c
+++ b/sys/netinet/tcp_stacks/tailq_hash.c
@@ -51,8 +51,6 @@
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
-#include <netinet/ip_icmp.h> /* required for icmp_var.h */
-#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 26e7e53d540c..c817c79881d6 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -82,6 +82,7 @@
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
#include <netinet/ip_var.h>
+#include <netinet/icmp_var.h>
#ifdef INET6
#include <netinet/icmp6.h>
#include <netinet/ip6.h>
@@ -644,14 +645,14 @@ out:
static int
sysctl_net_inet_default_tcp_functions(SYSCTL_HANDLER_ARGS)
{
- int error = ENOENT;
struct tcp_function_set fs;
struct tcp_function_block *blk;
+ int error;
- memset(&fs, 0, sizeof(fs));
+ memset(&fs, 0, sizeof(struct tcp_function_set));
rw_rlock(&tcp_function_lock);
blk = find_tcp_fb_locked(V_tcp_func_set_ptr, NULL);
- if (blk) {
+ if (blk != NULL) {
/* Found him */
strcpy(fs.function_set_name, blk->tfb_tcp_block_name);
fs.pcbcnt = blk->tfb_refcnt;
@@ -2147,38 +2148,57 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
}
/*
- * Send a challenge ack (no data, no SACK option), but not more than
- * V_tcp_ack_war_cnt per V_tcp_ack_war_time_window (per TCP connection).
+ * Check that no more than V_tcp_ack_war_cnt per V_tcp_ack_war_time_window
+ * are sent. *epoch_end is the end of the current epoch and is updated, if the
+ * current epoch ended in the past. *ack_cnt is the counter used during the
+ * current epoch. It might be reset and incremented.
+ * The function returns true if a challenge ACK should be sent.
*/
-void
-tcp_send_challenge_ack(struct tcpcb *tp, struct tcphdr *th, struct mbuf *m)
+bool
+tcp_challenge_ack_check(sbintime_t *epoch_end, uint32_t *ack_cnt)
{
sbintime_t now;
- bool send_challenge_ack;
+
+ /*
+ * The sending of a challenge ACK could be triggered by a blind attacker
+ * to detect an existing TCP connection. To mitigate that, increment
+ * also the global counter which would be incremented if the attacker
+ * would have guessed wrongly.
+ */
+ (void)badport_bandlim(BANDLIM_TCP_RST);
if (V_tcp_ack_war_time_window == 0 || V_tcp_ack_war_cnt == 0) {
/* ACK war protection is disabled. */
- send_challenge_ack = true;
+ return (true);
} else {
/* Start new epoch, if the previous one is already over. */
now = getsbinuptime();
- if (tp->t_challenge_ack_end < now) {
- tp->t_challenge_ack_cnt = 0;
- tp->t_challenge_ack_end = now +
- V_tcp_ack_war_time_window * SBT_1MS;
+ if (*epoch_end < now) {
+ *ack_cnt = 0;
+ *epoch_end = now + V_tcp_ack_war_time_window * SBT_1MS;
}
/*
* Send a challenge ACK, if less than tcp_ack_war_cnt have been
* sent in the current epoch.
*/
- if (tp->t_challenge_ack_cnt < V_tcp_ack_war_cnt) {
- send_challenge_ack = true;
- tp->t_challenge_ack_cnt++;
+ if (*ack_cnt < V_tcp_ack_war_cnt) {
+ (*ack_cnt)++;
+ return (true);
} else {
- send_challenge_ack = false;
+ return (false);
}
}
- if (send_challenge_ack) {
+}
+
+/*
+ * Send a challenge ack (no data, no SACK option), but not more than
+ * V_tcp_ack_war_cnt per V_tcp_ack_war_time_window (per TCP connection).
+ */
+void
+tcp_send_challenge_ack(struct tcpcb *tp, struct tcphdr *th, struct mbuf *m)
+{
+ if (tcp_challenge_ack_check(&tp->t_challenge_ack_end,
+ &tp->t_challenge_ack_cnt)) {
tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt,
tp->snd_nxt, TH_ACK);
tp->last_ack_sent = tp->rcv_nxt;
@@ -3197,7 +3217,7 @@ tcp6_next_pmtu(const struct icmp6_hdr *icmp6)
* small, set to the min.
*/
if (mtu < IPV6_MMTU)
- mtu = IPV6_MMTU - 8; /* XXXNP: what is the adjustment for? */
+ mtu = IPV6_MMTU;
return (mtu);
}
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index 80e6b53d10df..f842a5678fa1 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -102,15 +102,15 @@
#include <security/mac/mac_framework.h>
-VNET_DEFINE_STATIC(int, tcp_syncookies) = 1;
+VNET_DEFINE_STATIC(bool, tcp_syncookies) = true;
#define V_tcp_syncookies VNET(tcp_syncookies)
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_VNET | CTLFLAG_RW,
+SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_syncookies), 0,
"Use TCP SYN cookies if the syncache overflows");
-VNET_DEFINE_STATIC(int, tcp_syncookiesonly) = 0;
+VNET_DEFINE_STATIC(bool, tcp_syncookiesonly) = false;
#define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_VNET | CTLFLAG_RW,
+SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_VNET | CTLFLAG_RW,
&VNET_NAME(tcp_syncookiesonly), 0,
"Use only TCP SYN cookies");
@@ -122,6 +122,7 @@ static void syncache_drop(struct syncache *, struct syncache_head *);
static void syncache_free(struct syncache *);
static void syncache_insert(struct syncache *, struct syncache_head *);
static int syncache_respond(struct syncache *, const struct mbuf *, int);
+static void syncache_send_challenge_ack(struct syncache *, struct mbuf *);
static struct socket *syncache_socket(struct syncache *, struct socket *,
struct mbuf *m);
static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
@@ -553,9 +554,8 @@ syncache_timer(void *xsch)
static inline bool
syncache_cookiesonly(void)
{
-
- return (V_tcp_syncookies && (V_tcp_syncache.paused ||
- V_tcp_syncookiesonly));
+ return ((V_tcp_syncookies && V_tcp_syncache.paused) ||
+ V_tcp_syncookiesonly);
}
/*
@@ -695,13 +695,7 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th, struct mbuf *m,
"sending challenge ACK\n",
s, __func__,
th->th_seq, sc->sc_irs + 1, sc->sc_wnd);
- if (syncache_respond(sc, m, TH_ACK) == 0) {
- TCPSTAT_INC(tcps_sndacks);
- TCPSTAT_INC(tcps_sndtotal);
- } else {
- syncache_drop(sc, sch);
- TCPSTAT_INC(tcps_sc_dropped);
- }
+ syncache_send_challenge_ack(sc, m);
}
} else {
if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
@@ -964,6 +958,10 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
if (sc->sc_rxmits > 1)
tp->snd_cwnd = 1;
+ /* Copy over the challenge ACK state. */
+ tp->t_challenge_ack_end = sc->sc_challenge_ack_end;
+ tp->t_challenge_ack_cnt = sc->sc_challenge_ack_cnt;
+
#ifdef TCP_OFFLOAD
/*
* Allow a TOE driver to install its hooks. Note that we hold the
@@ -1083,40 +1081,48 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
#endif
if (sc == NULL) {
- /*
- * There is no syncache entry, so see if this ACK is
- * a returning syncookie. To do this, first:
- * A. Check if syncookies are used in case of syncache
- * overflows
- * B. See if this socket has had a syncache entry dropped in
- * the recent past. We don't want to accept a bogus
- * syncookie if we've never received a SYN or accept it
- * twice.
- * C. check that the syncookie is valid. If it is, then
- * cobble up a fake syncache entry, and return.
- */
- if (locked && !V_tcp_syncookies) {
- SCH_UNLOCK(sch);
- TCPSTAT_INC(tcps_sc_spurcookie);
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
- log(LOG_DEBUG, "%s; %s: Spurious ACK, "
- "segment rejected (syncookies disabled)\n",
- s, __func__);
- goto failed;
- }
- if (locked && !V_tcp_syncookiesonly &&
- sch->sch_last_overflow < time_uptime - SYNCOOKIE_LIFETIME) {
+ if (locked) {
+ /*
+ * The syncache is currently in use (neither disabled,
+ * nor paused), but no entry was found.
+ */
+ if (!V_tcp_syncookies) {
+ /*
+ * Since no syncookies are used in case of
+ * a bucket overflow, don't even check for
+ * a valid syncookie.
+ */
+ SCH_UNLOCK(sch);
+ TCPSTAT_INC(tcps_sc_spurcookie);
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Spurious ACK, "
+ "segment rejected "
+ "(syncookies disabled)\n",
+ s, __func__);
+ goto failed;
+ }
+ if (sch->sch_last_overflow <
+ time_uptime - SYNCOOKIE_LIFETIME) {
+ /*
+ * Since the bucket did not overflow recently,
+ * don't even check for a valid syncookie.
+ */
+ SCH_UNLOCK(sch);
+ TCPSTAT_INC(tcps_sc_spurcookie);
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Spurious ACK, "
+ "segment rejected "
+ "(no syncache entry)\n",
+ s, __func__);
+ goto failed;
+ }
SCH_UNLOCK(sch);
- TCPSTAT_INC(tcps_sc_spurcookie);
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
- log(LOG_DEBUG, "%s; %s: Spurious ACK, "
- "segment rejected (no syncache entry)\n",
- s, __func__);
- goto failed;
}
- if (locked)
- SCH_UNLOCK(sch);
bzero(&scs, sizeof(scs));
+ /*
+ * Now check, if the syncookie is valid. If it is, create an on
+ * stack syncache entry.
+ */
if (syncookie_expand(inc, sch, &scs, th, to, *lsop, port)) {
sc = &scs;
TCPSTAT_INC(tcps_sc_recvcookie);
@@ -1195,7 +1201,6 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
*/
if (sc->sc_flags & SCF_TIMESTAMP && to->to_flags & TOF_TS &&
TSTMP_LT(to->to_tsval, sc->sc_tsreflect)) {
- SCH_UNLOCK(sch);
if ((s = tcp_log_addrs(inc, th, NULL, NULL))) {
log(LOG_DEBUG,
"%s; %s: SEG.TSval %u < TS.Recent %u, "
@@ -1203,6 +1208,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
to->to_tsval, sc->sc_tsreflect);
free(s, M_TCPLOG);
}
+ SCH_UNLOCK(sch);
return (-1); /* Do not send RST */
}
@@ -1251,6 +1257,38 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
return (-1); /* Do not send RST */
}
}
+
+ /*
+ * SEG.SEQ validation:
+ * The SEG.SEQ must be in the window starting at our
+ * initial receive sequence number + 1.
+ */
+ if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
+ SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, "
+ "sending challenge ACK\n",
+ s, __func__, th->th_seq, sc->sc_irs + 1);
+ syncache_send_challenge_ack(sc, m);
+ SCH_UNLOCK(sch);
+ free(s, M_TCPLOG);
+ return (-1); /* Do not send RST */
+ }
+
+ /*
+ * SEG.ACK validation:
+ * SEG.ACK must match our initial send sequence number + 1.
+ */
+ if (th->th_ack != sc->sc_iss + 1) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, "
+ "segment rejected\n",
+ s, __func__, th->th_ack, sc->sc_iss + 1);
+ SCH_UNLOCK(sch);
+ free(s, M_TCPLOG);
+ return (0); /* Do send RST, do not free sc. */
+ }
+
TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
sch->sch_length--;
#ifdef TCP_OFFLOAD
@@ -1263,38 +1301,14 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
SCH_UNLOCK(sch);
}
- /*
- * Segment validation:
- * ACK must match our initial sequence number + 1 (the SYN|ACK).
- */
- if (th->th_ack != sc->sc_iss + 1) {
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
- log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
- "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
- goto failed;
- }
-
- /*
- * The SEQ must fall in the window starting at the received
- * initial receive sequence number + 1 (the SYN).
- */
- if (SEQ_LEQ(th->th_seq, sc->sc_irs) ||
- SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
- if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
- log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
- "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
- goto failed;
- }
-
*lsop = syncache_socket(sc, *lsop, m);
if (__predict_false(*lsop == NULL)) {
TCPSTAT_INC(tcps_sc_aborted);
TCPSTATES_DEC(TCPS_SYN_RECEIVED);
- } else
+ } else if (sc != &scs)
TCPSTAT_INC(tcps_sc_completed);
-/* how do we find the inp for the new socket? */
if (sc != &scs)
syncache_free(sc);
return (1);
@@ -1669,7 +1683,7 @@ syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
sc->sc_tsoff = tcp_new_ts_offset(inc);
}
if ((to->to_flags & TOF_SCALE) && (V_tcp_do_rfc1323 != 3)) {
- int wscale = 0;
+ u_int wscale = 0;
/*
* Pick the smallest possible scaling factor that
@@ -1719,13 +1733,13 @@ syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
if (V_tcp_do_ecn && (tp->t_flags2 & TF2_CANNOT_DO_ECN) == 0)
sc->sc_flags |= tcp_ecn_syncache_add(tcp_get_flags(th), iptos);
- if (V_tcp_syncookies)
+ if (V_tcp_syncookies || V_tcp_syncookiesonly)
sc->sc_iss = syncookie_generate(sch, sc);
else
sc->sc_iss = arc4random();
#ifdef INET6
if (autoflowlabel) {
- if (V_tcp_syncookies)
+ if (V_tcp_syncookies || V_tcp_syncookiesonly)
sc->sc_flowlabel = sc->sc_iss;
else
sc->sc_flowlabel = ip6_randomflowlabel();
@@ -2047,6 +2061,18 @@ syncache_respond(struct syncache *sc, const struct mbuf *m0, int flags)
return (error);
}
+static void
+syncache_send_challenge_ack(struct syncache *sc, struct mbuf *m)
+{
+ if (tcp_challenge_ack_check(&sc->sc_challenge_ack_end,
+ &sc->sc_challenge_ack_cnt)) {
+ if (syncache_respond(sc, m, TH_ACK) == 0) {
+ TCPSTAT_INC(tcps_sndacks);
+ TCPSTAT_INC(tcps_sndtotal);
+ }
+ }
+}
+
/*
* The purpose of syncookies is to handle spoofed SYN flooding DoS attacks
* that exceed the capacity of the syncache by avoiding the storage of any
@@ -2265,7 +2291,7 @@ syncookie_expand(struct in_conninfo *inc, const struct syncache_head *sch,
uint32_t hash;
uint8_t *secbits;
tcp_seq ack, seq;
- int wnd, wscale = 0;
+ int wnd;
union syncookie cookie;
/*
@@ -2316,12 +2342,14 @@ syncookie_expand(struct in_conninfo *inc, const struct syncache_head *sch,
sc->sc_peer_mss = tcp_sc_msstab[cookie.flags.mss_idx];
- /* We can simply recompute receive window scale we sent earlier. */
- while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < sb_max)
- wscale++;
-
/* Only use wscale if it was enabled in the orignal SYN. */
if (cookie.flags.wscale_idx > 0) {
+ u_int wscale = 0;
+
+ /* Recompute the receive window scale that was sent earlier. */
+ while (wscale < TCP_MAX_WINSHIFT &&
+ (TCP_MAXWIN << wscale) < sb_max)
+ wscale++;
sc->sc_requested_r_scale = wscale;
sc->sc_requested_s_scale = tcp_sc_wstab[cookie.flags.wscale_idx];
sc->sc_flags |= SCF_WINSCALE;
diff --git a/sys/netinet/tcp_syncache.h b/sys/netinet/tcp_syncache.h
index 55e062e35a54..37f6ff3d6ca9 100644
--- a/sys/netinet/tcp_syncache.h
+++ b/sys/netinet/tcp_syncache.h
@@ -50,7 +50,7 @@ int syncache_pcblist(struct sysctl_req *);
struct syncache {
TAILQ_ENTRY(syncache) sc_hash;
- struct in_conninfo sc_inc; /* addresses */
+ struct in_conninfo sc_inc; /* addresses */
int sc_rxttime; /* retransmit time */
u_int16_t sc_rxmits; /* retransmit counter */
u_int16_t sc_port; /* remote UDP encaps port */
@@ -59,7 +59,7 @@ struct syncache {
u_int32_t sc_flowlabel; /* IPv6 flowlabel */
tcp_seq sc_irs; /* seq from peer */
tcp_seq sc_iss; /* our ISS */
- struct mbuf *sc_ipopts; /* source route */
+ struct mbuf *sc_ipopts; /* source route */
u_int16_t sc_peer_mss; /* peer's MSS */
u_int16_t sc_wnd; /* advertised window */
u_int8_t sc_ip_ttl; /* TTL / Hop Limit */
@@ -67,6 +67,8 @@ struct syncache {
u_int8_t sc_requested_s_scale:4,
sc_requested_r_scale:4;
u_int16_t sc_flags;
+ u_int32_t sc_challenge_ack_cnt; /* chall. ACKs sent in epoch */
+ sbintime_t sc_challenge_ack_end; /* End of chall. ack epoch */
#if defined(TCP_OFFLOAD)
struct toedev *sc_tod; /* entry added by this TOE */
void *sc_todctx; /* TOE driver context */
diff --git a/sys/netinet/tcp_timewait.c b/sys/netinet/tcp_timewait.c
index c095fc8f7765..ce63fcf9ffc0 100644
--- a/sys/netinet/tcp_timewait.c
+++ b/sys/netinet/tcp_timewait.c
@@ -101,7 +101,7 @@ sysctl_net_inet_tcp_nolocaltimewait(SYSCTL_HANDLER_ARGS)
if (error == 0 && req->newptr) {
V_nolocaltimewait = new;
gone_in(16, "net.inet.tcp.nolocaltimewait is obsolete."
- " Use net.inet.tcp.local_msl instead.\n");
+ " Use net.inet.tcp.msl_local instead.\n");
}
return (error);
}
diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h
index b90f65e83cb1..c3be95c80798 100644
--- a/sys/netinet/tcp_var.h
+++ b/sys/netinet/tcp_var.h
@@ -1379,8 +1379,7 @@ int tcp_reass(struct tcpcb *, struct tcphdr *, tcp_seq *, int *,
void tcp_reass_global_init(void);
void tcp_reass_flush(struct tcpcb *);
void tcp_dooptions(struct tcpopt *, u_char *, int, int);
-void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
- struct tcpcb *, int, int);
+void tcp_dropwithreset(struct mbuf *, struct tcphdr *, struct tcpcb *, int);
void tcp_pulloutofband(struct socket *,
struct tcphdr *, struct mbuf *, int);
void tcp_xmit_timer(struct tcpcb *, int);
@@ -1463,6 +1462,7 @@ int tcp_default_output(struct tcpcb *);
void tcp_state_change(struct tcpcb *, int);
void tcp_respond(struct tcpcb *, void *,
struct tcphdr *, struct mbuf *, tcp_seq, tcp_seq, uint16_t);
+bool tcp_challenge_ack_check(sbintime_t *, uint32_t *);
void tcp_send_challenge_ack(struct tcpcb *, struct tcphdr *, struct mbuf *);
bool tcp_twcheck(struct inpcb *, struct tcpopt *, struct tcphdr *,
struct mbuf *, int);
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index df8f293f9426..cea8a916679b 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -223,16 +223,18 @@ VNET_SYSUNINIT(udp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, udp_destroy, NULL);
* udp_append() will convert to a sockaddr_in6 before passing the address
* into the socket code.
*
- * In the normal case udp_append() will return 0, indicating that you
- * must unlock the inp. However if a tunneling protocol is in place we increment
- * the inpcb refcnt and unlock the inp, on return from the tunneling protocol we
- * then decrement the reference count. If the inp_rele returns 1, indicating the
- * inp is gone, we return that to the caller to tell them *not* to unlock
- * the inp. In the case of multi-cast this will cause the distribution
- * to stop (though most tunneling protocols known currently do *not* use
- * multicast).
+ * In the normal case udp_append() will return 'false', indicating that you
+ * must unlock the inpcb. However if a tunneling protocol is in place we
+ * increment the inpcb refcnt and unlock the inpcb, on return from the tunneling
+ * protocol we then decrement the reference count. If in_pcbrele_rlocked()
+ * returns 'true', indicating the inpcb is gone, we return that to the caller
+ * to tell them *not* to unlock the inpcb. In the case of multicast this will
+ * cause the distribution to stop (though most tunneling protocols known
+ * currently do *not* use multicast).
+ *
+ * The mbuf is always consumed.
*/
-static int
+static bool
udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
struct sockaddr_in *udp_in)
{
@@ -255,15 +257,16 @@ udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
in_pcbref(inp);
INP_RUNLOCK(inp);
- filtered = (*up->u_tun_func)(n, off, inp, (struct sockaddr *)&udp_in[0],
- up->u_tun_ctx);
+ filtered = (*up->u_tun_func)(n, off, inp,
+ (struct sockaddr *)&udp_in[0], up->u_tun_ctx);
INP_RLOCK(inp);
- if (in_pcbrele_rlocked(inp))
- return (1);
- if (filtered) {
- INP_RUNLOCK(inp);
- return (1);
+ if (in_pcbrele_rlocked(inp)) {
+ if (!filtered)
+ m_freem(n);
+ return (true);
}
+ if (filtered)
+ return (false);
}
off += sizeof(struct udphdr);
@@ -273,18 +276,18 @@ udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
if (IPSEC_ENABLED(ipv4) &&
IPSEC_CHECK_POLICY(ipv4, n, inp) != 0) {
m_freem(n);
- return (0);
+ return (false);
}
if (up->u_flags & UF_ESPINUDP) {/* IPSec UDP encaps. */
if (IPSEC_ENABLED(ipv4) &&
UDPENCAP_INPUT(ipv4, n, off, AF_INET) != 0)
- return (0); /* Consumed. */
+ return (false);
}
#endif /* IPSEC */
#ifdef MAC
if (mac_inpcb_check_deliver(inp, n) != 0) {
m_freem(n);
- return (0);
+ return (false);
}
#endif /* MAC */
if (inp->inp_flags & INP_CONTROLOPTS ||
@@ -330,7 +333,7 @@ udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
UDPSTAT_INC(udps_fullsock);
} else
sorwakeup_locked(so);
- return (0);
+ return (false);
}
static bool
@@ -448,7 +451,7 @@ udp_multi_input(struct mbuf *m, int proto, struct sockaddr_in *udp_in)
/*
* No matching pcb found; discard datagram. (No need
* to send an ICMP Port Unreachable for a broadcast
- * or multicast datgram.)
+ * or multicast datagram.)
*/
UDPSTAT_INC(udps_noport);
if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)))
@@ -654,7 +657,11 @@ udp_input(struct mbuf **mp, int *offp, int proto)
else
UDP_PROBE(receive, NULL, NULL, ip, NULL, uh);
UDPSTAT_INC(udps_noport);
- if (m->m_flags & (M_BCAST | M_MCAST)) {
+ if (m->m_flags & M_MCAST) {
+ UDPSTAT_INC(udps_noportmcast);
+ goto badunlocked;
+ }
+ if (m->m_flags & M_BCAST) {
UDPSTAT_INC(udps_noportbcast);
goto badunlocked;
}
@@ -695,7 +702,7 @@ udp_input(struct mbuf **mp, int *offp, int proto)
UDPLITE_PROBE(receive, NULL, inp, ip, inp, uh);
else
UDP_PROBE(receive, NULL, inp, ip, inp, uh);
- if (udp_append(inp, ip, m, iphlen, udp_in) == 0)
+ if (!udp_append(inp, ip, m, iphlen, udp_in))
INP_RUNLOCK(inp);
return (IPPROTO_DONE);
diff --git a/sys/netinet6/icmp6.c b/sys/netinet6/icmp6.c
index d89515d7eda5..f98381499b2d 100644
--- a/sys/netinet6/icmp6.c
+++ b/sys/netinet6/icmp6.c
@@ -159,6 +159,7 @@ static int ni6_addrs(struct icmp6_nodeinfo *, struct mbuf *,
static int ni6_store_addrs(struct icmp6_nodeinfo *, struct icmp6_nodeinfo *,
struct ifnet *, int);
static int icmp6_notify_error(struct mbuf **, int, int);
+static void icmp6_mtudisc_update(struct ip6ctlparam *ip6cp);
/*
* Kernel module interface for updating icmp6stat. The argument is an index
@@ -1115,7 +1116,7 @@ icmp6_notify_error(struct mbuf **mp, int off, int icmp6len)
if (icmp6type == ICMP6_PACKET_TOO_BIG) {
notifymtu = ntohl(icmp6->icmp6_mtu);
ip6cp.ip6c_cmdarg = (void *)&notifymtu;
- icmp6_mtudisc_update(&ip6cp, 1); /*XXX*/
+ icmp6_mtudisc_update(&ip6cp);
}
if (ip6_ctlprotox[nxt] != NULL)
@@ -1130,47 +1131,18 @@ icmp6_notify_error(struct mbuf **mp, int off, int icmp6len)
return (-1);
}
-void
-icmp6_mtudisc_update(struct ip6ctlparam *ip6cp, int validated)
+static void
+icmp6_mtudisc_update(struct ip6ctlparam *ip6cp)
{
struct in6_addr *dst = &ip6cp->ip6c_finaldst->sin6_addr;
struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6;
- struct mbuf *m = ip6cp->ip6c_m; /* will be necessary for scope issue */
+ struct mbuf *m = ip6cp->ip6c_m;
u_int mtu = ntohl(icmp6->icmp6_mtu);
struct in_conninfo inc;
uint32_t max_mtu;
-#if 0
- /*
- * RFC2460 section 5, last paragraph.
- * even though minimum link MTU for IPv6 is IPV6_MMTU,
- * we may see ICMPv6 too big with mtu < IPV6_MMTU
- * due to packet translator in the middle.
- * see ip6_output() and ip6_getpmtu() "alwaysfrag" case for
- * special handling.
- */
if (mtu < IPV6_MMTU)
return;
-#endif
-
- /*
- * we reject ICMPv6 too big with abnormally small value.
- * XXX what is the good definition of "abnormally small"?
- */
- if (mtu < sizeof(struct ip6_hdr) + sizeof(struct ip6_frag) + 8)
- return;
-
- if (!validated)
- return;
-
- /*
- * In case the suggested mtu is less than IPV6_MMTU, we
- * only need to remember that it was for above mentioned
- * "alwaysfrag" case.
- * Try to be as close to the spec as possible.
- */
- if (mtu < IPV6_MMTU)
- mtu = IPV6_MMTU - 8;
bzero(&inc, sizeof(inc));
inc.inc_fibnum = M_GETFIB(m);
diff --git a/sys/netinet6/in6.c b/sys/netinet6/in6.c
index ce0655408a28..b98703bdfbfe 100644
--- a/sys/netinet6/in6.c
+++ b/sys/netinet6/in6.c
@@ -1235,11 +1235,20 @@ in6_addifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *i
int carp_attached = 0;
int error;
- /* Check if this interface is a bridge member */
- if (ifp->if_bridge && bridge_member_ifaddrs_p &&
- !bridge_member_ifaddrs_p()) {
- error = EINVAL;
- goto out;
+ /*
+ * Check if bridge wants to allow adding addrs to member interfaces.
+ */
+ if (ifp->if_bridge != NULL && ifp->if_type != IFT_GIF &&
+ bridge_member_ifaddrs_p != NULL) {
+ if (bridge_member_ifaddrs_p()) {
+ if_printf(ifp, "WARNING: Assigning an IP address to "
+ "an interface which is also a bridge member is "
+ "deprecated and will be unsupported in a future "
+ "release.\n");
+ } else {
+ error = EINVAL;
+ goto out;
+ }
}
/*
@@ -1286,8 +1295,8 @@ in6_addifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra, struct in6_ifaddr *i
*/
bzero(&pr0, sizeof(pr0));
pr0.ndpr_ifp = ifp;
- pr0.ndpr_plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr,
- NULL);
+ pr0.ndpr_plen = ia->ia_plen =
+ in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, NULL);
if (pr0.ndpr_plen == 128) {
/* we don't need to install a host route. */
goto aifaddr_out;
@@ -1481,16 +1490,16 @@ in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp)
* positive reference.
*/
remove_lle = 0;
- if (ia->ia6_ndpr == NULL) {
- nd6log((LOG_NOTICE,
- "in6_unlink_ifa: autoconf'ed address "
- "%s has no prefix\n", ip6_sprintf(ip6buf, IA6_IN6(ia))));
- } else {
+ if (ia->ia6_ndpr != NULL) {
ia->ia6_ndpr->ndpr_addrcnt--;
/* Do not delete lles within prefix if refcont != 0 */
if (ia->ia6_ndpr->ndpr_addrcnt == 0)
remove_lle = 1;
ia->ia6_ndpr = NULL;
+ } else if (ia->ia_plen < 128) {
+ nd6log((LOG_NOTICE,
+ "in6_unlink_ifa: autoconf'ed address "
+ "%s has no prefix\n", ip6_sprintf(ip6buf, IA6_IN6(ia))));
}
nd6_rem_ifa_lle(ia, remove_lle);
@@ -2618,6 +2627,8 @@ in6_domifdetach(struct ifnet *ifp, void *aux)
{
struct in6_ifextra *ext = (struct in6_ifextra *)aux;
+ MPASS(ifp->if_afdata[AF_INET6] == NULL);
+
mld_domifdetach(ifp);
scope6_ifdetach(ext->scope6_id);
nd6_ifdetach(ifp, ext->nd_ifinfo);
diff --git a/sys/netinet6/in6.h b/sys/netinet6/in6.h
index 1ca846ebf514..a7fe03b9c3d7 100644
--- a/sys/netinet6/in6.h
+++ b/sys/netinet6/in6.h
@@ -358,11 +358,11 @@ extern const struct in6_addr in6addr_linklocal_allv2routers;
#define IFA6_IS_DEPRECATED(a) \
((a)->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME && \
- (u_int32_t)((time_uptime - (a)->ia6_updatetime)) > \
+ (u_int32_t)((time_uptime - (a)->ia6_updatetime)) >= \
(a)->ia6_lifetime.ia6t_pltime)
#define IFA6_IS_INVALID(a) \
((a)->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME && \
- (u_int32_t)((time_uptime - (a)->ia6_updatetime)) > \
+ (u_int32_t)((time_uptime - (a)->ia6_updatetime)) >= \
(a)->ia6_lifetime.ia6t_vltime)
#endif /* _KERNEL */
@@ -609,6 +609,8 @@ struct ip6_mtuinfo {
/* IPV6CTL_RTMINEXPIRE 26 deprecated */
/* IPV6CTL_RTMAXCACHE 27 deprecated */
+#define IPV6CTL_STABLEADDR_NETIFSRC 30 /* semantically opaque addresses (RFC7217) hash algo netif parameter src */
+#define IPV6CTL_STABLEADDR_MAXRETRIES 31 /* semantically opaque addresses (RFC7217) max DAD retries */
#define IPV6CTL_USETEMPADDR 32 /* use temporary addresses (RFC3041) */
#define IPV6CTL_TEMPPLTIME 33 /* preferred lifetime for tmpaddrs */
#define IPV6CTL_TEMPVLTIME 34 /* valid lifetime for tmpaddrs */
@@ -617,6 +619,7 @@ struct ip6_mtuinfo {
#define IPV6CTL_PREFER_TEMPADDR 37 /* prefer temporary addr as src */
#define IPV6CTL_ADDRCTLPOLICY 38 /* get/set address selection policy */
#define IPV6CTL_USE_DEFAULTZONE 39 /* use default scope zone */
+#define IPV6CTL_USESTABLEADDR 40 /* use semantically opaque addresses (RFC7217) */
#define IPV6CTL_MAXFRAGS 41 /* max fragments */
#if 0
diff --git a/sys/netinet6/in6_fib_algo.c b/sys/netinet6/in6_fib_algo.c
index 10ffe7ab0265..ef5cfc6d5ef6 100644
--- a/sys/netinet6/in6_fib_algo.c
+++ b/sys/netinet6/in6_fib_algo.c
@@ -351,7 +351,7 @@ struct fib_lookup_module flm_radix6 = {
};
static void
-fib6_algo_init(void)
+fib6_algo_init(void *dummy __unused)
{
fib_module_register(&flm_radix6_lockless);
diff --git a/sys/netinet6/in6_ifattach.c b/sys/netinet6/in6_ifattach.c
index f284f7fa5ffc..090ba610460b 100644
--- a/sys/netinet6/in6_ifattach.c
+++ b/sys/netinet6/in6_ifattach.c
@@ -33,6 +33,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/counter.h>
#include <sys/malloc.h>
#include <sys/socket.h>
#include <sys/sockio.h>
@@ -70,6 +71,9 @@
#include <netinet6/mld6_var.h>
#include <netinet6/scope6_var.h>
+#include <crypto/sha2/sha256.h>
+#include <machine/atomic.h>
+
#ifdef IP6_AUTO_LINKLOCAL
VNET_DEFINE(int, ip6_auto_linklocal) = IP6_AUTO_LINKLOCAL;
#else
@@ -79,11 +83,12 @@ VNET_DEFINE(int, ip6_auto_linklocal) = 1; /* enabled by default */
VNET_DEFINE(struct callout, in6_tmpaddrtimer_ch);
#define V_in6_tmpaddrtimer_ch VNET(in6_tmpaddrtimer_ch)
+VNET_DEFINE(int, ip6_stableaddr_netifsource) = IP6_STABLEADDR_NETIFSRC_NAME; /* Use interface name by default */
+
VNET_DECLARE(struct inpcbinfo, ripcbinfo);
#define V_ripcbinfo VNET(ripcbinfo)
static int get_rand_ifid(struct ifnet *, struct in6_addr *);
-static int get_ifid(struct ifnet *, struct ifnet *, struct in6_addr *);
static int in6_ifattach_linklocal(struct ifnet *, struct ifnet *);
static int in6_ifattach_loopback(struct ifnet *);
static void in6_purgemaddrs(struct ifnet *);
@@ -99,6 +104,9 @@ static void in6_purgemaddrs(struct ifnet *);
#define IFID_LOCAL(in6) (!EUI64_LOCAL(in6))
#define IFID_UNIVERSAL(in6) (!EUI64_UNIVERSAL(in6))
+#define HMAC_IPAD 0x36
+#define HMAC_OPAD 0x5C
+
/*
* Generate a last-resort interface identifier, when the machine has no
* IEEE802/EUI64 address sources.
@@ -148,22 +156,14 @@ get_rand_ifid(struct ifnet *ifp, struct in6_addr *in6)
}
-/*
- * Get interface identifier for the specified interface.
- * XXX assumes single sockaddr_dl (AF_LINK address) per an interface
- *
- * in6 - upper 64bits are preserved
+/**
+ * Get interface link level sockaddr
*/
-int
-in6_get_hw_ifid(struct ifnet *ifp, struct in6_addr *in6)
+static struct sockaddr_dl *
+get_interface_link_level(struct ifnet *ifp)
{
struct ifaddr *ifa;
struct sockaddr_dl *sdl;
- u_int8_t *addr;
- size_t addrlen;
- static u_int8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
- static u_int8_t allone[8] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
NET_EPOCH_ASSERT();
@@ -176,14 +176,30 @@ in6_get_hw_ifid(struct ifnet *ifp, struct in6_addr *in6)
if (sdl->sdl_alen == 0)
continue;
- goto found;
+ return sdl;
}
- return -1;
+ return NULL;
+}
+
+/*
+ * Get hwaddr from link interface
+ */
+static uint8_t *
+in6_get_interface_hwaddr(struct ifnet *ifp, size_t *len)
+{
+ struct sockaddr_dl *sdl;
+ u_int8_t *addr;
+ static u_int8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ static u_int8_t allone[8] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ sdl = get_interface_link_level(ifp);
+ if (sdl == NULL)
+ return (NULL);
-found:
addr = LLADDR(sdl);
- addrlen = sdl->sdl_alen;
+ *len = sdl->sdl_alen;
/* get EUI64 */
switch (ifp->if_type) {
@@ -194,36 +210,21 @@ found:
case IFT_IEEE1394:
/* IEEE802/EUI64 cases - what others? */
/* IEEE1394 uses 16byte length address starting with EUI64 */
- if (addrlen > 8)
- addrlen = 8;
+ if (*len > 8)
+ *len = 8;
/* look at IEEE802/EUI64 only */
- if (addrlen != 8 && addrlen != 6)
- return -1;
+ if (*len != 8 && *len != 6)
+ return (NULL);
/*
* check for invalid MAC address - on bsdi, we see it a lot
* since wildboar configures all-zero MAC on pccard before
* card insertion.
*/
- if (bcmp(addr, allzero, addrlen) == 0)
- return -1;
- if (bcmp(addr, allone, addrlen) == 0)
- return -1;
-
- /* make EUI64 address */
- if (addrlen == 8)
- bcopy(addr, &in6->s6_addr[8], 8);
- else if (addrlen == 6) {
- in6->s6_addr[8] = addr[0];
- in6->s6_addr[9] = addr[1];
- in6->s6_addr[10] = addr[2];
- in6->s6_addr[11] = 0xff;
- in6->s6_addr[12] = 0xfe;
- in6->s6_addr[13] = addr[3];
- in6->s6_addr[14] = addr[4];
- in6->s6_addr[15] = addr[5];
- }
+ if (memcmp(addr, allzero, *len) == 0 || memcmp(addr, allone, *len) == 0)
+ return (NULL);
+
break;
case IFT_GIF:
@@ -234,16 +235,51 @@ found:
* identifier source (can be renumbered).
* we don't do this.
*/
- return -1;
+ return (NULL);
case IFT_INFINIBAND:
- if (addrlen != 20)
- return -1;
- bcopy(addr + 12, &in6->s6_addr[8], 8);
+ if (*len != 20)
+ return (NULL);
+ *len = 8;
+ addr += 12;
break;
default:
+ return (NULL);
+ }
+
+ return addr;
+}
+
+ /*
+ * Get interface identifier for the specified interface.
+ * XXX assumes single sockaddr_dl (AF_LINK address) per an interface
+ *
+ * in6 - upper 64bits are preserved
+ */
+int
+in6_get_hw_ifid(struct ifnet *ifp, struct in6_addr *in6)
+{
+ size_t hwaddr_len;
+ uint8_t *hwaddr;
+ static u_int8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ hwaddr = in6_get_interface_hwaddr(ifp, &hwaddr_len);
+ if (hwaddr == NULL || (hwaddr_len != 6 && hwaddr_len != 8))
return -1;
+
+ /* make EUI64 address */
+ if (hwaddr_len == 8)
+ memcpy(&in6->s6_addr[8], hwaddr, 8);
+ else if (hwaddr_len == 6) {
+ in6->s6_addr[8] = hwaddr[0];
+ in6->s6_addr[9] = hwaddr[1];
+ in6->s6_addr[10] = hwaddr[2];
+ in6->s6_addr[11] = 0xff;
+ in6->s6_addr[12] = 0xfe;
+ in6->s6_addr[13] = hwaddr[3];
+ in6->s6_addr[14] = hwaddr[4];
+ in6->s6_addr[15] = hwaddr[5];
}
/* sanity check: g bit must not indicate "group" */
@@ -265,21 +301,175 @@ found:
}
/*
+ * Validate generated interface id to make sure it does not fall in any reserved range:
+ *
+ * https://www.iana.org/assignments/ipv6-interface-ids/ipv6-interface-ids.xhtml
+ */
+static bool
+validate_ifid(uint8_t *iid)
+{
+ static uint8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ static uint8_t reserved_eth[5] = { 0x02, 0x00, 0x5E, 0xFF, 0xFE };
+ static uint8_t reserved_anycast[7] = { 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ /* Subnet-Router Anycast (RFC 4291)*/
+ if (memcmp(iid, allzero, 8) == 0)
+ return (false);
+
+ /*
+ * Reserved IPv6 Interface Identifiers corresponding to the IANA Ethernet Block (RFC 4291)
+ * and
+ * Proxy Mobile IPv6 (RFC 6543)
+ */
+ if (memcmp(iid, reserved_eth, 5) == 0)
+ return (false);
+
+ /* Reserved Subnet Anycast Addresses (RFC 2526) */
+ if (memcmp(iid, reserved_anycast, 7) == 0 && iid[7] >= 0x80)
+ return (false);
+
+ return (true);
+}
+
+/*
+ * Get interface identifier for the specified interface, according to
+ * RFC 7217 Stable and Opaque IDs with SLAAC, using HMAC-SHA256 digest.
+ *
+ * in6 - upper 64bits are preserved
+ */
+bool
+in6_get_stableifid(struct ifnet *ifp, struct in6_addr *in6, int prefixlen)
+{
+ struct sockaddr_dl *sdl;
+ const uint8_t *netiface;
+ size_t netiface_len, hostuuid_len;
+ uint8_t hostuuid[HOSTUUIDLEN + 1], hmac_key[SHA256_BLOCK_LENGTH],
+ hk_ipad[SHA256_BLOCK_LENGTH], hk_opad[SHA256_BLOCK_LENGTH];
+ uint64_t dad_failures;
+ SHA256_CTX ctxt;
+
+ switch (V_ip6_stableaddr_netifsource) {
+ case IP6_STABLEADDR_NETIFSRC_ID:
+ sdl = get_interface_link_level(ifp);
+ if (sdl == NULL)
+ return (false);
+ netiface = (uint8_t *)&LLINDEX(sdl);
+ netiface_len = sizeof(u_short); /* real return type of LLINDEX */
+ break;
+
+ case IP6_STABLEADDR_NETIFSRC_MAC:
+ netiface = in6_get_interface_hwaddr(ifp, &netiface_len);
+ if (netiface == NULL)
+ return (false);
+ break;
+
+ case IP6_STABLEADDR_NETIFSRC_NAME:
+ default:
+ netiface = (const uint8_t *)if_name(ifp);
+ netiface_len = strlen(netiface);
+ break;
+ }
+
+ /* Use hostuuid as constant "secret" key */
+ getcredhostuuid(curthread->td_ucred, hostuuid, sizeof(hostuuid));
+ if (strncmp(hostuuid, DEFAULT_HOSTUUID, sizeof(hostuuid)) == 0) {
+ // If hostuuid is not set, use a random value
+ arc4rand(hostuuid, HOSTUUIDLEN, 0);
+ hostuuid[HOSTUUIDLEN] = '\0';
+ }
+ hostuuid_len = strlen(hostuuid);
+
+ dad_failures = atomic_load_int(&DAD_FAILURES(ifp));
+
+ /*
+ * RFC 7217 section 7
+ *
+ * default max retries
+ */
+ if (dad_failures > V_ip6_stableaddr_maxretries)
+ return (false);
+
+ /*
+ * Use hostuuid as basis for HMAC key
+ */
+ memset(hmac_key, 0, sizeof(hmac_key));
+ if (hostuuid_len <= SHA256_BLOCK_LENGTH) {
+ /* copy to hmac key variable, zero padded */
+ memcpy(hmac_key, hostuuid, hostuuid_len);
+ } else {
+ /* if longer than block length, use hash of the value, zero padded */
+ SHA256_Init(&ctxt);
+ SHA256_Update(&ctxt, hostuuid, hostuuid_len);
+ SHA256_Final(hmac_key, &ctxt);
+ }
+ /* XOR key with ipad and opad values */
+ for (uint16_t i = 0; i < sizeof(hmac_key); i++) {
+ hk_ipad[i] = hmac_key[i] ^ HMAC_IPAD;
+ hk_opad[i] = hmac_key[i] ^ HMAC_OPAD;
+ }
+
+ /*
+ * Generate interface id in a loop, adding an offset to be factored in the hash function.
+ * This is necessary, because if the generated interface id happens to be invalid we
+ * want to force the hash function to generate a different one, otherwise we would end up
+ * in an infinite loop trying the same invalid interface id over and over again.
+ *
+ * Using an uint8 counter for the offset, so limit iteration at UINT8_MAX. This is a safety
+ * measure, this will never iterate more than once or twice in practice.
+ */
+ for(uint8_t offset = 0; offset < UINT8_MAX; offset++) {
+ uint8_t digest[SHA256_DIGEST_LENGTH];
+
+ /* Calculate inner hash */
+ SHA256_Init(&ctxt);
+ SHA256_Update(&ctxt, hk_ipad, sizeof(hk_ipad));
+ SHA256_Update(&ctxt, in6->s6_addr, prefixlen / 8);
+ SHA256_Update(&ctxt, netiface, netiface_len);
+ SHA256_Update(&ctxt, (uint8_t *)&dad_failures, 8);
+ SHA256_Update(&ctxt, hostuuid, hostuuid_len);
+ SHA256_Update(&ctxt, &offset, 1);
+ SHA256_Final(digest, &ctxt);
+
+ /* Calculate outer hash */
+ SHA256_Init(&ctxt);
+ SHA256_Update(&ctxt, hk_opad, sizeof(hk_opad));
+ SHA256_Update(&ctxt, digest, sizeof(digest));
+ SHA256_Final(digest, &ctxt);
+
+ if (validate_ifid(digest)) {
+ /* assumes sizeof(digest) > sizeof(ifid) */
+ memcpy(&in6->s6_addr[8], digest, 8);
+
+ return (true);
+ }
+ }
+
+ return (false);
+}
+
+/*
* Get interface identifier for the specified interface. If it is not
* available on ifp0, borrow interface identifier from other information
* sources.
*
* altifp - secondary EUI64 source
*/
-static int
-get_ifid(struct ifnet *ifp0, struct ifnet *altifp,
+int
+in6_get_ifid(struct ifnet *ifp0, struct ifnet *altifp,
struct in6_addr *in6)
{
struct ifnet *ifp;
NET_EPOCH_ASSERT();
- /* first, try to get it from the interface itself */
+ /* first, try to get it from the interface itself, with stable algorithm, if configured */
+ if ((ND_IFINFO(ifp0)->flags & ND6_IFF_STABLEADDR) && in6_get_stableifid(ifp0, in6, 64) == 0) {
+ nd6log((LOG_DEBUG, "%s: got interface identifier from itself (stable private)\n",
+ if_name(ifp0)));
+ goto success;
+ }
+
+ /* then/otherwise try to get it from the interface itself */
if (in6_get_hw_ifid(ifp0, in6) == 0) {
nd6log((LOG_DEBUG, "%s: got interface identifier from itself\n",
if_name(ifp0)));
@@ -356,7 +546,7 @@ in6_ifattach_linklocal(struct ifnet *ifp, struct ifnet *altifp)
ifra.ifra_addr.sin6_addr.s6_addr32[3] = htonl(1);
} else {
NET_EPOCH_ENTER(et);
- error = get_ifid(ifp, altifp, &ifra.ifra_addr.sin6_addr);
+ error = in6_get_ifid(ifp, altifp, &ifra.ifra_addr.sin6_addr);
NET_EPOCH_EXIT(et);
if (error != 0) {
nd6log((LOG_ERR,
diff --git a/sys/netinet6/in6_ifattach.h b/sys/netinet6/in6_ifattach.h
index 897926e90078..75b2ca4fa018 100644
--- a/sys/netinet6/in6_ifattach.h
+++ b/sys/netinet6/in6_ifattach.h
@@ -39,8 +39,11 @@ void in6_ifattach(struct ifnet *, struct ifnet *);
void in6_ifattach_destroy(void);
void in6_ifdetach(struct ifnet *);
void in6_ifdetach_destroy(struct ifnet *);
+int in6_get_tmpifid(struct ifnet *, u_int8_t *, const u_int8_t *, int);
+bool in6_get_stableifid(struct ifnet *, struct in6_addr *, int);
void in6_tmpaddrtimer(void *);
int in6_get_hw_ifid(struct ifnet *, struct in6_addr *);
+int in6_get_ifid(struct ifnet *, struct ifnet *, struct in6_addr *);
int in6_nigroup(struct ifnet *, const char *, int, struct in6_addr *);
int in6_nigroup_oldmcprefix(struct ifnet *, const char *, int, struct in6_addr *);
#endif /* _KERNEL */
diff --git a/sys/netinet6/in6_proto.c b/sys/netinet6/in6_proto.c
index b289d4eeb0a2..f567b42b42ca 100644
--- a/sys/netinet6/in6_proto.c
+++ b/sys/netinet6/in6_proto.c
@@ -167,6 +167,7 @@ VNET_DEFINE(int, ip6_rr_prune) = 5; /* router renumbering prefix
* walk list every 5 sec. */
VNET_DEFINE(int, ip6_mcast_pmtu) = 0; /* enable pMTU discovery for multicast? */
VNET_DEFINE(int, ip6_v6only) = 1;
+VNET_DEFINE(u_int, ip6_stableaddr_maxretries) = IP6_IDGEN_RETRIES;
#ifdef IPSTEALTH
VNET_DEFINE(int, ip6stealth) = 0;
@@ -313,6 +314,15 @@ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_RR_PRUNE, rr_prune,
SYSCTL_INT(_net_inet6_ip6, IPV6CTL_USETEMPADDR, use_tempaddr,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_use_tempaddr), 0,
"Create RFC3041 temporary addresses for autoconfigured addresses");
+SYSCTL_BOOL(_net_inet6_ip6, IPV6CTL_USESTABLEADDR, use_stableaddr,
+ CTLFLAG_VNET | CTLFLAG_RWTUN, &VNET_NAME(ip6_use_stableaddr), 0,
+ "Create RFC7217 semantically opaque address for autoconfigured addresses (default for new interfaces)");
+SYSCTL_UINT(_net_inet6_ip6, IPV6CTL_STABLEADDR_MAXRETRIES, stableaddr_maxretries,
+ CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_stableaddr_maxretries), IP6_IDGEN_RETRIES,
+ "RFC7217 semantically opaque address DAD max retries");
+SYSCTL_INT(_net_inet6_ip6, IPV6CTL_STABLEADDR_NETIFSRC, stableaddr_netifsource,
+ CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_stableaddr_netifsource), IP6_STABLEADDR_NETIFSRC_NAME,
+ "RFC7217 semantically opaque address Net_Iface source (0 - name, 1 - ID, 2 - MAC addr)");
SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_TEMPPLTIME, temppltime,
CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
NULL, 0, sysctl_ip6_temppltime, "I",
diff --git a/sys/netinet6/in6_src.c b/sys/netinet6/in6_src.c
index dd6864482b3c..3e55c6e5fc05 100644
--- a/sys/netinet6/in6_src.c
+++ b/sys/netinet6/in6_src.c
@@ -132,8 +132,8 @@ static int in6_selectif(struct sockaddr_in6 *, struct ip6_pktopts *,
struct ip6_moptions *, struct ifnet **,
struct ifnet *, u_int);
static int in6_selectsrc(uint32_t, struct sockaddr_in6 *,
- struct ip6_pktopts *, struct inpcb *, struct ucred *,
- struct ifnet **, struct in6_addr *);
+ struct ip6_pktopts *, struct ip6_moptions *, struct inpcb *,
+ struct ucred *, struct ifnet **, struct in6_addr *);
static struct in6_addrpolicy *lookup_addrsel_policy(struct sockaddr_in6 *);
@@ -173,8 +173,8 @@ static struct in6_addrpolicy *match_addrsel_policy(struct sockaddr_in6 *);
static int
in6_selectsrc(uint32_t fibnum, struct sockaddr_in6 *dstsock,
- struct ip6_pktopts *opts, struct inpcb *inp, struct ucred *cred,
- struct ifnet **ifpp, struct in6_addr *srcp)
+ struct ip6_pktopts *opts, struct ip6_moptions *mopts, struct inpcb *inp,
+ struct ucred *cred, struct ifnet **ifpp, struct in6_addr *srcp)
{
struct rm_priotracker in6_ifa_tracker;
struct in6_addr dst, tmp;
@@ -186,7 +186,6 @@ in6_selectsrc(uint32_t fibnum, struct sockaddr_in6 *dstsock,
u_int32_t odstzone;
int prefer_tempaddr;
int error;
- struct ip6_moptions *mopts;
NET_EPOCH_ASSERT();
KASSERT(srcp != NULL, ("%s: srcp is NULL", __func__));
@@ -205,13 +204,6 @@ in6_selectsrc(uint32_t fibnum, struct sockaddr_in6 *dstsock,
*ifpp = NULL;
}
- if (inp != NULL) {
- INP_LOCK_ASSERT(inp);
- mopts = inp->in6p_moptions;
- } else {
- mopts = NULL;
- }
-
/*
* If the source address is explicitly specified by the caller,
* check if the requested source address is indeed a unicast address
@@ -552,10 +544,13 @@ in6_selectsrc_socket(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
uint32_t fibnum;
int error;
+ INP_LOCK_ASSERT(inp);
+
fibnum = inp->inp_inc.inc_fibnum;
retifp = NULL;
- error = in6_selectsrc(fibnum, dstsock, opts, inp, cred, &retifp, srcp);
+ error = in6_selectsrc(fibnum, dstsock, opts, inp->in6p_moptions,
+ inp, cred, &retifp, srcp);
if (error != 0)
return (error);
@@ -583,7 +578,7 @@ in6_selectsrc_socket(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
* Stores selected address to @srcp.
* Returns 0 on success.
*
- * Used by non-socket based consumers (ND code mostly)
+ * Used by non-socket based consumers
*/
int
in6_selectsrc_addr(uint32_t fibnum, const struct in6_addr *dst,
@@ -602,13 +597,42 @@ in6_selectsrc_addr(uint32_t fibnum, const struct in6_addr *dst,
dst_sa.sin6_scope_id = scopeid;
sa6_embedscope(&dst_sa, 0);
- error = in6_selectsrc(fibnum, &dst_sa, NULL, NULL, NULL, &retifp, srcp);
+ error = in6_selectsrc(fibnum, &dst_sa, NULL, NULL,
+ NULL, NULL, &retifp, srcp);
if (hlim != NULL)
*hlim = in6_selecthlim(NULL, retifp);
return (error);
}
+/*
+ * Select source address based on @fibnum, @dst and @mopts.
+ * Stores selected address to @srcp.
+ * Returns 0 on success.
+ *
+ * Used by non-socket based consumers (ND code mostly)
+ */
+int
+in6_selectsrc_nbr(uint32_t fibnum, const struct in6_addr *dst,
+ struct ip6_moptions *mopts, struct ifnet *ifp, struct in6_addr *srcp)
+{
+ struct sockaddr_in6 dst_sa;
+ struct ifnet *retifp;
+ int error;
+
+ retifp = ifp;
+ bzero(&dst_sa, sizeof(dst_sa));
+ dst_sa.sin6_family = AF_INET6;
+ dst_sa.sin6_len = sizeof(dst_sa);
+ dst_sa.sin6_addr = *dst;
+ dst_sa.sin6_scope_id = ntohs(in6_getscope(dst));
+ sa6_embedscope(&dst_sa, 0);
+
+ error = in6_selectsrc(fibnum, &dst_sa, NULL, mopts,
+ NULL, NULL, &retifp, srcp);
+ return (error);
+}
+
static struct nhop_object *
cache_route(uint32_t fibnum, const struct sockaddr_in6 *dst, struct route_in6 *ro,
uint32_t flowid)
diff --git a/sys/netinet6/in6_var.h b/sys/netinet6/in6_var.h
index e5ab83e6a2a1..1414cc71388d 100644
--- a/sys/netinet6/in6_var.h
+++ b/sys/netinet6/in6_var.h
@@ -106,9 +106,11 @@ struct in6_ifextra {
struct scope6_id *scope6_id;
struct lltable *lltable;
struct mld_ifsoftc *mld_ifinfo;
+ u_int dad_failures; /* DAD failures when using RFC 7217 stable addresses */
};
#define LLTABLE6(ifp) (((struct in6_ifextra *)(ifp)->if_afdata[AF_INET6])->lltable)
+#define DAD_FAILURES(ifp) (((struct in6_ifextra *)(ifp)->if_afdata[AF_INET6])->dad_failures)
#ifdef _KERNEL
diff --git a/sys/netinet6/ip6_input.c b/sys/netinet6/ip6_input.c
index 45fd23ea6c21..99dad1e7c309 100644
--- a/sys/netinet6/ip6_input.c
+++ b/sys/netinet6/ip6_input.c
@@ -235,6 +235,7 @@ ip6_vnet_init(void *arg __unused)
&V_ip6_auto_linklocal);
TUNABLE_INT_FETCH("net.inet6.ip6.accept_rtadv", &V_ip6_accept_rtadv);
TUNABLE_INT_FETCH("net.inet6.ip6.no_radr", &V_ip6_no_radr);
+ TUNABLE_BOOL_FETCH("net.inet6.ip6.use_stableaddr", &V_ip6_use_stableaddr);
CK_STAILQ_INIT(&V_in6_ifaddrhead);
V_in6_ifaddrhashtbl = hashinit(IN6ADDR_NHASH, M_IFADDR,
@@ -1197,8 +1198,8 @@ ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp,
{
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
-#ifdef SO_TIMESTAMP
- if ((inp->inp_socket->so_options & SO_TIMESTAMP) != 0) {
+#if defined(SO_TIMESTAMP) && defined(SO_BINTIME)
+ if ((inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) != 0) {
union {
struct timeval tv;
struct bintime bt;
@@ -1206,47 +1207,66 @@ ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp,
} t;
struct bintime boottimebin, bt1;
struct timespec ts1;
+ int ts_clock;
bool stamped;
+ ts_clock = inp->inp_socket->so_ts_clock;
stamped = false;
- switch (inp->inp_socket->so_ts_clock) {
- case SO_TS_REALTIME_MICRO:
+
+ /*
+ * Handle BINTIME first. We create the same output options
+ * for both SO_BINTIME and the case where SO_TIMESTAMP is
+ * set with the timestamp clock set to SO_TS_BINTIME.
+ */
+ if ((inp->inp_socket->so_options & SO_BINTIME) != 0 ||
+ ts_clock == SO_TS_BINTIME) {
if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR |
M_TSTMP)) {
mbuf_tstmp2timespec(m, &ts1);
- timespec2bintime(&ts1, &bt1);
+ timespec2bintime(&ts1, &t.bt);
getboottimebin(&boottimebin);
- bintime_add(&bt1, &boottimebin);
- bintime2timeval(&bt1, &t.tv);
+ bintime_add(&t.bt, &boottimebin);
} else {
- microtime(&t.tv);
+ bintime(&t.bt);
}
- *mp = sbcreatecontrol(&t.tv, sizeof(t.tv),
- SCM_TIMESTAMP, SOL_SOCKET, M_NOWAIT);
+ *mp = sbcreatecontrol(&t.bt, sizeof(t.bt), SCM_BINTIME,
+ SOL_SOCKET, M_NOWAIT);
if (*mp != NULL) {
mp = &(*mp)->m_next;
stamped = true;
}
- break;
- case SO_TS_BINTIME:
+ /*
+ * Suppress other timestamps if SO_TIMESTAMP is not
+ * set.
+ */
+ if ((inp->inp_socket->so_options & SO_TIMESTAMP) == 0)
+ ts_clock = SO_TS_BINTIME;
+ }
+
+ switch (ts_clock) {
+ case SO_TS_REALTIME_MICRO:
if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR |
M_TSTMP)) {
mbuf_tstmp2timespec(m, &ts1);
- timespec2bintime(&ts1, &t.bt);
+ timespec2bintime(&ts1, &bt1);
getboottimebin(&boottimebin);
- bintime_add(&t.bt, &boottimebin);
+ bintime_add(&bt1, &boottimebin);
+ bintime2timeval(&bt1, &t.tv);
} else {
- bintime(&t.bt);
+ microtime(&t.tv);
}
- *mp = sbcreatecontrol(&t.bt, sizeof(t.bt), SCM_BINTIME,
- SOL_SOCKET, M_NOWAIT);
+ *mp = sbcreatecontrol(&t.tv, sizeof(t.tv),
+ SCM_TIMESTAMP, SOL_SOCKET, M_NOWAIT);
if (*mp != NULL) {
mp = &(*mp)->m_next;
stamped = true;
}
break;
+ case SO_TS_BINTIME:
+ break;
+
case SO_TS_REALTIME:
if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR |
M_TSTMP)) {
diff --git a/sys/netinet6/ip6_output.c b/sys/netinet6/ip6_output.c
index ed71c58fffbe..6299ce6e146b 100644
--- a/sys/netinet6/ip6_output.c
+++ b/sys/netinet6/ip6_output.c
@@ -145,11 +145,10 @@ static int ip6_insertfraghdr(struct mbuf *, struct mbuf *, int,
struct ip6_frag **);
static int ip6_insert_jumboopt(struct ip6_exthdrs *, u_int32_t);
static int ip6_splithdr(struct mbuf *, struct ip6_exthdrs *);
-static int ip6_getpmtu(struct route_in6 *, int,
- struct ifnet *, const struct in6_addr *, u_long *, int *, u_int,
- u_int);
-static int ip6_calcmtu(struct ifnet *, const struct in6_addr *, u_long,
- u_long *, int *, u_int);
+static void ip6_getpmtu(struct route_in6 *, int,
+ struct ifnet *, const struct in6_addr *, u_long *, u_int, u_int);
+static void ip6_calcmtu(struct ifnet *, const struct in6_addr *, u_long,
+ u_long *, u_int);
static int ip6_getpmtu_ctl(u_int, const struct in6_addr *, u_long *);
static int copypktopts(struct ip6_pktopts *, struct ip6_pktopts *, int);
@@ -418,7 +417,7 @@ ip6_output(struct mbuf *m0, struct ip6_pktopts *opt,
int vlan_pcp = -1;
struct in6_ifaddr *ia = NULL;
u_long mtu;
- int alwaysfrag, dontfrag;
+ int dontfrag;
u_int32_t optlen, plen = 0, unfragpartlen;
struct ip6_exthdrs exthdrs;
struct in6_addr src0, dst0;
@@ -939,12 +938,10 @@ nonh6lookup:
*ifpp = ifp;
/* Determine path MTU. */
- if ((error = ip6_getpmtu(ro_pmtu, ro != ro_pmtu, ifp, &ip6->ip6_dst,
- &mtu, &alwaysfrag, fibnum, *nexthdrp)) != 0)
- goto bad;
- KASSERT(mtu > 0, ("%s:%d: mtu %ld, ro_pmtu %p ro %p ifp %p "
- "alwaysfrag %d fibnum %u\n", __func__, __LINE__, mtu, ro_pmtu, ro,
- ifp, alwaysfrag, fibnum));
+ ip6_getpmtu(ro_pmtu, ro != ro_pmtu, ifp, &ip6->ip6_dst, &mtu, fibnum,
+ *nexthdrp);
+ KASSERT(mtu > 0, ("%s:%d: mtu %ld, ro_pmtu %p ro %p ifp %p fibnum %u",
+ __func__, __LINE__, mtu, ro_pmtu, ro, ifp, fibnum));
/*
* The caller of this function may specify to use the minimum MTU
@@ -1121,20 +1118,13 @@ passout:
* Send the packet to the outgoing interface.
* If necessary, do IPv6 fragmentation before sending.
*
- * The logic here is rather complex:
- * 1: normal case (dontfrag == 0, alwaysfrag == 0)
+ * 1: normal case (dontfrag == 0)
* 1-a: send as is if tlen <= path mtu
* 1-b: fragment if tlen > path mtu
*
* 2: if user asks us not to fragment (dontfrag == 1)
* 2-a: send as is if tlen <= interface mtu
* 2-b: error if tlen > interface mtu
- *
- * 3: if we always need to attach fragment header (alwaysfrag == 1)
- * always fragment
- *
- * 4: if dontfrag == 1 && alwaysfrag == 1
- * error, as we cannot handle this conflicting request.
*/
sw_csum = m->m_pkthdr.csum_flags;
if (!hdrsplit) {
@@ -1157,14 +1147,9 @@ passout:
dontfrag = 1;
else
dontfrag = 0;
- if (dontfrag && alwaysfrag) { /* Case 4. */
- /* Conflicting request - can't transmit. */
- error = EMSGSIZE;
- goto bad;
- }
if (dontfrag && tlen > IN6_LINKMTU(ifp) && !tso) { /* Case 2-b. */
/*
- * Even if the DONTFRAG option is specified, we cannot send the
+ * If the DONTFRAG option is specified, we cannot send the
* packet when the data length is larger than the MTU of the
* outgoing interface.
* Notify the error by sending IPV6_PATHMTU ancillary data if
@@ -1178,7 +1163,7 @@ passout:
}
/* Transmit packet without fragmentation. */
- if (dontfrag || (!alwaysfrag && tlen <= mtu)) { /* Cases 1-a and 2-a. */
+ if (dontfrag || tlen <= mtu) { /* Cases 1-a and 2-a. */
struct in6_ifaddr *ia6;
ip6 = mtod(m, struct ip6_hdr *);
@@ -1194,7 +1179,7 @@ passout:
goto done;
}
- /* Try to fragment the packet. Cases 1-b and 3. */
+ /* Try to fragment the packet. Case 1-b. */
if (mtu < IPV6_MMTU) {
/* Path MTU cannot be less than IPV6_MMTU. */
error = EMSGSIZE;
@@ -1478,9 +1463,10 @@ ip6_getpmtu_ctl(u_int fibnum, const struct in6_addr *dst, u_long *mtup)
NET_EPOCH_ENTER(et);
nh = fib6_lookup(fibnum, &kdst, scopeid, NHR_NONE, 0);
- if (nh != NULL)
- error = ip6_calcmtu(nh->nh_ifp, dst, nh->nh_mtu, mtup, NULL, 0);
- else
+ if (nh != NULL) {
+ ip6_calcmtu(nh->nh_ifp, dst, nh->nh_mtu, mtup, 0);
+ error = 0;
+ } else
error = EHOSTUNREACH;
NET_EPOCH_EXIT(et);
@@ -1494,13 +1480,12 @@ ip6_getpmtu_ctl(u_int fibnum, const struct in6_addr *dst, u_long *mtup)
* inside @ro_pmtu to avoid subsequent route lookups after packet
* filter processing.
*
- * Stores mtu and always-frag value into @mtup and @alwaysfragp.
- * Returns 0 on success.
+ * Stores mtu into @mtup.
*/
-static int
+static void
ip6_getpmtu(struct route_in6 *ro_pmtu, int do_lookup,
struct ifnet *ifp, const struct in6_addr *dst, u_long *mtup,
- int *alwaysfragp, u_int fibnum, u_int proto)
+ u_int fibnum, u_int proto)
{
struct nhop_object *nh;
struct in6_addr kdst;
@@ -1544,65 +1529,41 @@ ip6_getpmtu(struct route_in6 *ro_pmtu, int do_lookup,
if (ro_pmtu != NULL && ro_pmtu->ro_nh != NULL)
mtu = ro_pmtu->ro_nh->nh_mtu;
- return (ip6_calcmtu(ifp, dst, mtu, mtup, alwaysfragp, proto));
+ ip6_calcmtu(ifp, dst, mtu, mtup, proto);
}
/*
* Calculate MTU based on transmit @ifp, route mtu @rt_mtu and
* hostcache data for @dst.
- * Stores mtu and always-frag value into @mtup and @alwaysfragp.
- *
- * Returns 0 on success.
+ * Stores mtu into @mtup.
*/
-static int
+static void
ip6_calcmtu(struct ifnet *ifp, const struct in6_addr *dst, u_long rt_mtu,
- u_long *mtup, int *alwaysfragp, u_int proto)
+ u_long *mtup, u_int proto)
{
u_long mtu = 0;
- int alwaysfrag = 0;
- int error = 0;
if (rt_mtu > 0) {
- u_int32_t ifmtu;
- struct in_conninfo inc;
-
- bzero(&inc, sizeof(inc));
- inc.inc_flags |= INC_ISIPV6;
- inc.inc6_faddr = *dst;
-
- ifmtu = IN6_LINKMTU(ifp);
+ /* Skip the hostcache if the protocol handles PMTU changes. */
+ if (proto != IPPROTO_TCP && proto != IPPROTO_SCTP) {
+ struct in_conninfo inc = {
+ .inc_flags = INC_ISIPV6,
+ .inc6_faddr = *dst,
+ };
- /* TCP is known to react to pmtu changes so skip hc */
- if (proto != IPPROTO_TCP)
mtu = tcp_hc_getmtu(&inc);
+ }
if (mtu)
mtu = min(mtu, rt_mtu);
else
mtu = rt_mtu;
- if (mtu == 0)
- mtu = ifmtu;
- else if (mtu < IPV6_MMTU) {
- /*
- * RFC2460 section 5, last paragraph:
- * if we record ICMPv6 too big message with
- * mtu < IPV6_MMTU, transmit packets sized IPV6_MMTU
- * or smaller, with framgent header attached.
- * (fragment header is needed regardless from the
- * packet size, for translators to identify packets)
- */
- alwaysfrag = 1;
- mtu = IPV6_MMTU;
- }
- } else if (ifp) {
+ }
+
+ if (mtu == 0)
mtu = IN6_LINKMTU(ifp);
- } else
- error = EHOSTUNREACH; /* XXX */
*mtup = mtu;
- if (alwaysfragp)
- *alwaysfragp = alwaysfrag;
- return (error);
}
/*
diff --git a/sys/netinet6/ip6_var.h b/sys/netinet6/ip6_var.h
index 12b00d4f9934..db1631736c4a 100644
--- a/sys/netinet6/ip6_var.h
+++ b/sys/netinet6/ip6_var.h
@@ -338,8 +338,20 @@ VNET_DECLARE(int, ip6_use_tempaddr); /* Whether to use temporary addresses */
VNET_DECLARE(int, ip6_prefer_tempaddr); /* Whether to prefer temporary
* addresses in the source address
* selection */
+VNET_DECLARE(bool, ip6_use_stableaddr); /* Whether to use stable address generation (RFC 7217) */
#define V_ip6_use_tempaddr VNET(ip6_use_tempaddr)
#define V_ip6_prefer_tempaddr VNET(ip6_prefer_tempaddr)
+#define V_ip6_use_stableaddr VNET(ip6_use_stableaddr)
+
+#define IP6_IDGEN_RETRIES 3 /* RFC 7217 section 7 default max retries */
+VNET_DECLARE(u_int, ip6_stableaddr_maxretries);
+#define V_ip6_stableaddr_maxretries VNET(ip6_stableaddr_maxretries)
+
+#define IP6_STABLEADDR_NETIFSRC_NAME 0
+#define IP6_STABLEADDR_NETIFSRC_ID 1
+#define IP6_STABLEADDR_NETIFSRC_MAC 2
+VNET_DECLARE(int, ip6_stableaddr_netifsource);
+#define V_ip6_stableaddr_netifsource VNET(ip6_stableaddr_netifsource)
VNET_DECLARE(int, ip6_use_defzone); /* Whether to use the default scope
* zone when unspecified */
@@ -428,6 +440,8 @@ int in6_selectsrc_socket(struct sockaddr_in6 *, struct ip6_pktopts *,
struct inpcb *, struct ucred *, int, struct in6_addr *, int *);
int in6_selectsrc_addr(uint32_t, const struct in6_addr *,
uint32_t, struct ifnet *, struct in6_addr *, int *);
+int in6_selectsrc_nbr(uint32_t, const struct in6_addr *,
+ struct ip6_moptions *, struct ifnet *, struct in6_addr *);
int in6_selectroute(struct sockaddr_in6 *, struct ip6_pktopts *,
struct ip6_moptions *, struct route_in6 *, struct ifnet **,
struct nhop_object **, u_int, uint32_t);
diff --git a/sys/netinet6/nd6.c b/sys/netinet6/nd6.c
index 8480e7fc90e3..00df5efcef92 100644
--- a/sys/netinet6/nd6.c
+++ b/sys/netinet6/nd6.c
@@ -324,6 +324,11 @@ nd6_ifattach(struct ifnet *ifp)
/* XXX: we cannot call nd6_setmtu since ifp is not fully initialized */
nd6_setmtu0(ifp, nd);
+ /* Configure default value for stable addresses algorithm, skip loopback interface */
+ if (V_ip6_use_stableaddr && !(ifp->if_flags & IFF_LOOPBACK)) {
+ nd->flags |= ND6_IFF_STABLEADDR;
+ }
+
return nd;
}
diff --git a/sys/netinet6/nd6.h b/sys/netinet6/nd6.h
index 9cb2571da58b..e484c709e29a 100644
--- a/sys/netinet6/nd6.h
+++ b/sys/netinet6/nd6.h
@@ -89,6 +89,7 @@ struct nd_ifinfo {
#define ND6_IFF_NO_RADR 0x40
#define ND6_IFF_NO_PREFER_IFACE 0x80 /* XXX: not related to ND. */
#define ND6_IFF_NO_DAD 0x100
+#define ND6_IFF_STABLEADDR 0x800
#ifdef EXPERIMENTAL
/* XXX: not related to ND. */
#define ND6_IFF_IPV6_ONLY 0x200 /* draft-ietf-6man-ipv6only-flag */
@@ -170,6 +171,10 @@ struct in6_ndifreq {
#define NDPRF_ONLINK 0x1
#define NDPRF_DETACHED 0x2
+/* ND6 NA output flags */
+#define ND6_NA_OPT_LLA 0x01
+#define ND6_NA_CARP_MASTER 0x02
+
/* protocol constants */
#define MAX_RTR_SOLICITATION_DELAY 1 /* 1sec */
#define RTR_SOLICITATION_INTERVAL 4 /* 4sec */
diff --git a/sys/netinet6/nd6_nbr.c b/sys/netinet6/nd6_nbr.c
index 640348a1d198..29151b29a071 100644
--- a/sys/netinet6/nd6_nbr.c
+++ b/sys/netinet6/nd6_nbr.c
@@ -38,6 +38,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/counter.h>
#include <sys/eventhandler.h>
#include <sys/malloc.h>
#include <sys/libkern.h>
@@ -76,6 +77,8 @@
#include <netinet/ip_carp.h>
#include <netinet6/send.h>
+#include <machine/atomic.h>
+
#define SDL(s) ((struct sockaddr_dl *)s)
struct dadq;
@@ -244,10 +247,9 @@ nd6_ns_input(struct mbuf *m, int off, int icmp6len)
* In implementation, we add target link-layer address by default.
* We do not add one in MUST NOT cases.
*/
- if (!IN6_IS_ADDR_MULTICAST(&daddr6))
- tlladdr = 0;
- else
- tlladdr = 1;
+ tlladdr = 0;
+ if (IN6_IS_ADDR_MULTICAST(&daddr6))
+ tlladdr |= ND6_NA_OPT_LLA;
/*
* Target address (taddr6) must be either:
@@ -256,9 +258,11 @@ nd6_ns_input(struct mbuf *m, int off, int icmp6len)
* (3) "tentative" address on which DAD is being performed.
*/
/* (1) and (3) check. */
- if (ifp->if_carp)
+ if (ifp->if_carp) {
ifa = (*carp_iamatch6_p)(ifp, &taddr6);
- else
+ if (ifa != NULL)
+ tlladdr |= ND6_NA_CARP_MASTER;
+ } else
ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6);
/* (2) check. */
@@ -322,32 +326,28 @@ nd6_ns_input(struct mbuf *m, int off, int icmp6len)
}
/*
+ * If the Target Address is either an anycast address or a unicast
+ * address for which the node is providing proxy service, or the Target
+ * Link-Layer Address option is not included, the Override flag SHOULD
+ * be set to zero. Otherwise, the Override flag SHOULD be set to one.
+ */
+ if (anycast == 0 && proxy == 0 && (tlladdr & ND6_NA_OPT_LLA) != 0)
+ rflag |= ND_NA_FLAG_OVERRIDE;
+ /*
* If the source address is unspecified address, entries must not
* be created or updated.
- * It looks that sender is performing DAD. Output NA toward
- * all-node multicast address, to tell the sender that I'm using
- * the address.
+ * It looks that sender is performing DAD. nd6_na_output() will
+ * send NA toward all-node multicast address, to tell the sender
+ * that I'm using the address.
* S bit ("solicited") must be zero.
*/
- if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) {
- struct in6_addr in6_all;
-
- in6_all = in6addr_linklocal_allnodes;
- if (in6_setscope(&in6_all, ifp, NULL) != 0)
- goto bad;
- nd6_na_output_fib(ifp, &in6_all, &taddr6,
- ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) |
- rflag, tlladdr, proxy ? (struct sockaddr *)&proxydl : NULL,
- M_GETFIB(m));
- goto freeit;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&saddr6)) {
+ nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen,
+ ND_NEIGHBOR_SOLICIT, 0);
+ rflag |= ND_NA_FLAG_SOLICITED;
}
- nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen,
- ND_NEIGHBOR_SOLICIT, 0);
-
- nd6_na_output_fib(ifp, &saddr6, &taddr6,
- ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) |
- rflag | ND_NA_FLAG_SOLICITED, tlladdr,
+ nd6_na_output_fib(ifp, &saddr6, &taddr6, rflag, tlladdr,
proxy ? (struct sockaddr *)&proxydl : NULL, M_GETFIB(m));
freeit:
if (ifa != NULL)
@@ -439,13 +439,6 @@ nd6_ns_output_fib(struct ifnet *ifp, const struct in6_addr *saddr6,
return;
M_SETFIB(m, fibnum);
- if (daddr6 == NULL || IN6_IS_ADDR_MULTICAST(daddr6)) {
- m->m_flags |= M_MCAST;
- im6o.im6o_multicast_ifp = ifp;
- im6o.im6o_multicast_hlim = 255;
- im6o.im6o_multicast_loop = 0;
- }
-
icmp6len = sizeof(*nd_ns);
m->m_pkthdr.len = m->m_len = sizeof(*ip6) + icmp6len;
m->m_data += max_linkhdr; /* or M_ALIGN() equivalent? */
@@ -470,6 +463,12 @@ nd6_ns_output_fib(struct ifnet *ifp, const struct in6_addr *saddr6,
if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0)
goto bad;
}
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ m->m_flags |= M_MCAST;
+ im6o.im6o_multicast_ifp = ifp;
+ im6o.im6o_multicast_hlim = 255;
+ im6o.im6o_multicast_loop = 0;
+ }
if (nonce == NULL) {
char ip6buf[INET6_ADDRSTRLEN];
struct ifaddr *ifa = NULL;
@@ -491,20 +490,16 @@ nd6_ns_output_fib(struct ifnet *ifp, const struct in6_addr *saddr6,
ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, saddr6);
if (ifa == NULL) {
int error;
- struct in6_addr dst6, src6;
- uint32_t scopeid;
- in6_splitscope(&ip6->ip6_dst, &dst6, &scopeid);
- error = in6_selectsrc_addr(fibnum, &dst6,
- scopeid, ifp, &src6, NULL);
+ error = in6_selectsrc_nbr(fibnum, &ip6->ip6_dst, &im6o,
+ ifp, &ip6->ip6_src);
if (error) {
nd6log((LOG_DEBUG, "%s: source can't be "
"determined: dst=%s, error=%d\n", __func__,
- ip6_sprintf(ip6buf, &dst6),
+ ip6_sprintf(ip6buf, &ip6->ip6_dst),
error));
goto bad;
}
- ip6->ip6_src = src6;
} else
ip6->ip6_src = *saddr6;
@@ -967,7 +962,9 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len)
* - proxy advertisement delay rule (RFC2461 7.2.8, last paragraph, SHOULD)
* - anycast advertisement delay rule (RFC2461 7.2.7, SHOULD)
*
- * tlladdr - 1 if include target link-layer address
+ * tlladdr:
+ * - 0x01 if include target link-layer address
+ * - 0x02 if target address is CARP MASTER
* sdl0 - sockaddr_dl (= proxy NA) or NULL
*/
static void
@@ -980,8 +977,7 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
struct ip6_hdr *ip6;
struct nd_neighbor_advert *nd_na;
struct ip6_moptions im6o;
- struct in6_addr daddr6, dst6, src6;
- uint32_t scopeid;
+ struct in6_addr daddr6;
NET_EPOCH_ASSERT();
@@ -1005,13 +1001,6 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
return;
M_SETFIB(m, fibnum);
- if (IN6_IS_ADDR_MULTICAST(&daddr6)) {
- m->m_flags |= M_MCAST;
- im6o.im6o_multicast_ifp = ifp;
- im6o.im6o_multicast_hlim = 255;
- im6o.im6o_multicast_loop = 0;
- }
-
icmp6len = sizeof(*nd_na);
m->m_pkthdr.len = m->m_len = sizeof(struct ip6_hdr) + icmp6len;
m->m_data += max_linkhdr; /* or M_ALIGN() equivalent? */
@@ -1023,26 +1012,24 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
ip6->ip6_vfc |= IPV6_VERSION;
ip6->ip6_nxt = IPPROTO_ICMPV6;
ip6->ip6_hlim = 255;
+
if (IN6_IS_ADDR_UNSPECIFIED(&daddr6)) {
/* reply to DAD */
- daddr6.s6_addr16[0] = IPV6_ADDR_INT16_MLL;
- daddr6.s6_addr16[1] = 0;
- daddr6.s6_addr32[1] = 0;
- daddr6.s6_addr32[2] = 0;
- daddr6.s6_addr32[3] = IPV6_ADDR_INT32_ONE;
+ daddr6 = in6addr_linklocal_allnodes;
if (in6_setscope(&daddr6, ifp, NULL))
goto bad;
flags &= ~ND_NA_FLAG_SOLICITED;
}
- ip6->ip6_dst = daddr6;
+ if (IN6_IS_ADDR_MULTICAST(&daddr6)) {
+ m->m_flags |= M_MCAST;
+ im6o.im6o_multicast_ifp = ifp;
+ im6o.im6o_multicast_hlim = 255;
+ im6o.im6o_multicast_loop = 0;
+ }
- /*
- * Select a source whose scope is the same as that of the dest.
- */
- in6_splitscope(&daddr6, &dst6, &scopeid);
- error = in6_selectsrc_addr(fibnum, &dst6,
- scopeid, ifp, &src6, NULL);
+ ip6->ip6_dst = daddr6;
+ error = in6_selectsrc_nbr(fibnum, &daddr6, &im6o, ifp, &ip6->ip6_src);
if (error) {
char ip6buf[INET6_ADDRSTRLEN];
nd6log((LOG_DEBUG, "nd6_na_output: source can't be "
@@ -1050,7 +1037,6 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
ip6_sprintf(ip6buf, &daddr6), error));
goto bad;
}
- ip6->ip6_src = src6;
nd_na = (struct nd_neighbor_advert *)(ip6 + 1);
nd_na->nd_na_type = ND_NEIGHBOR_ADVERT;
nd_na->nd_na_code = 0;
@@ -1058,20 +1044,24 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
in6_clearscope(&nd_na->nd_na_target); /* XXX */
/*
+ * If we respond from CARP address, we need to prepare mac address
+ * for carp_output().
+ */
+ if (ifp->if_carp && (tlladdr & ND6_NA_CARP_MASTER))
+ mac = (*carp_macmatch6_p)(ifp, m, taddr6);
+ /*
* "tlladdr" indicates NS's condition for adding tlladdr or not.
* see nd6_ns_input() for details.
* Basically, if NS packet is sent to unicast/anycast addr,
* target lladdr option SHOULD NOT be included.
*/
- if (tlladdr) {
+ if (tlladdr & ND6_NA_OPT_LLA) {
/*
* sdl0 != NULL indicates proxy NA. If we do proxy, use
* lladdr in sdl0. If we are not proxying (sending NA for
* my address) use lladdr configured for the interface.
*/
if (sdl0 == NULL) {
- if (ifp->if_carp)
- mac = (*carp_macmatch6_p)(ifp, m, taddr6);
if (mac == NULL)
mac = nd6_ifptomac(ifp);
} else if (sdl0->sa_family == AF_LINK) {
@@ -1081,7 +1071,7 @@ nd6_na_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6_0,
mac = LLADDR(sdl);
}
}
- if (tlladdr && mac) {
+ if ((tlladdr & ND6_NA_OPT_LLA) && mac != NULL) {
int optlen = sizeof(struct nd_opt_hdr) + ifp->if_addrlen;
struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd_na + 1);
@@ -1466,9 +1456,14 @@ nd6_dad_timer(void *arg)
* No duplicate address found. Check IFDISABLED flag
* again in case that it is changed between the
* beginning of this function and here.
+ *
+ * Reset DAD failures counter if using stable addresses.
*/
- if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) == 0)
+ if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) == 0) {
ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
+ if ((ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY))
+ atomic_store_int(&DAD_FAILURES(ifp), 0);
+ }
nd6log((LOG_DEBUG,
"%s: DAD complete for %s - no duplicates found\n",
@@ -1497,20 +1492,39 @@ nd6_dad_duplicated(struct ifaddr *ifa, struct dadq *dp)
struct ifnet *ifp;
char ip6buf[INET6_ADDRSTRLEN];
+ ifp = ifa->ifa_ifp;
+
log(LOG_ERR, "%s: DAD detected duplicate IPv6 address %s: "
"NS in/out/loopback=%d/%d/%d, NA in=%d\n",
- if_name(ifa->ifa_ifp), ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
+ if_name(ifp), ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
dp->dad_ns_icount, dp->dad_ns_ocount, dp->dad_ns_lcount,
dp->dad_na_icount);
ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
ia->ia6_flags |= IN6_IFF_DUPLICATED;
- ifp = ifa->ifa_ifp;
log(LOG_ERR, "%s: DAD complete for %s - duplicate found\n",
if_name(ifp), ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr));
- log(LOG_ERR, "%s: manual intervention required\n",
- if_name(ifp));
+
+ /*
+ * For RFC 7217 stable addresses, increment failure counter here if we still have retries.
+ * More addresses will be generated as long as retries are not exhausted.
+ */
+ if ((ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) && !(ia->ia6_flags & IN6_IFF_TEMPORARY)) {
+ u_int dad_failures = atomic_load_int(&DAD_FAILURES(ifp));
+
+ if (dad_failures <= V_ip6_stableaddr_maxretries) {
+ atomic_add_int(&DAD_FAILURES(ifp), 1);
+ /* if retries exhausted, output an informative error message */
+ if (dad_failures == V_ip6_stableaddr_maxretries)
+ log(LOG_ERR, "%s: manual intervention required, consider disabling \"stableaddr\" on the interface"
+ " or checking hostuuid for uniqueness\n",
+ if_name(ifp));
+ }
+ } else {
+ log(LOG_ERR, "%s: manual intervention required\n",
+ if_name(ifp));
+ }
/*
* If the address is a link-local address formed from an interface
diff --git a/sys/netinet6/nd6_rtr.c b/sys/netinet6/nd6_rtr.c
index b9af0a78a584..10f0342f2bc4 100644
--- a/sys/netinet6/nd6_rtr.c
+++ b/sys/netinet6/nd6_rtr.c
@@ -74,6 +74,8 @@
#include <netinet/icmp6.h>
#include <netinet6/scope6_var.h>
+#include <machine/atomic.h>
+
static struct nd_defrouter *defrtrlist_update(struct nd_defrouter *);
static int prelist_update(struct nd_prefixctl *, struct nd_defrouter *,
struct mbuf *, int);
@@ -92,6 +94,7 @@ VNET_DEFINE(int, nd6_defifindex);
#define V_nd6_defifp VNET(nd6_defifp)
VNET_DEFINE(int, ip6_use_tempaddr) = 0;
+VNET_DEFINE(bool, ip6_use_stableaddr) = 0;
VNET_DEFINE(int, ip6_desync_factor);
VNET_DEFINE(uint32_t, ip6_temp_max_desync_factor) = TEMP_MAX_DESYNC_FACTOR_BASE;
@@ -1182,9 +1185,9 @@ in6_ifadd(struct nd_prefixctl *pr, int mcast)
struct ifnet *ifp = pr->ndpr_ifp;
struct ifaddr *ifa;
struct in6_aliasreq ifra;
- struct in6_ifaddr *ia, *ib;
+ struct in6_ifaddr *ia = NULL, *ib = NULL;
int error, plen0;
- struct in6_addr mask;
+ struct in6_addr *ifid_addr = NULL, mask, newaddr;
int prefixlen = pr->ndpr_plen;
int updateflags;
char ip6buf[INET6_ADDRSTRLEN];
@@ -1210,37 +1213,69 @@ in6_ifadd(struct nd_prefixctl *pr, int mcast)
* (4) it is easier to manage when an interface has addresses
* with the same interface identifier, than to have multiple addresses
* with different interface identifiers.
+ *
+ * If using stable privacy generation, generate a new address with
+ * the algorithm specified in RFC 7217 section 5
*/
- ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, 0); /* 0 is OK? */
- if (ifa)
- ib = (struct in6_ifaddr *)ifa;
- else
- return NULL;
-
- /* prefixlen + ifidlen must be equal to 128 */
- plen0 = in6_mask2len(&ib->ia_prefixmask.sin6_addr, NULL);
- if (prefixlen != plen0) {
- ifa_free(ifa);
- nd6log((LOG_INFO,
- "%s: wrong prefixlen for %s (prefix=%d ifid=%d)\n",
- __func__, if_name(ifp), prefixlen, 128 - plen0));
- return NULL;
- }
/* make ifaddr */
in6_prepare_ifra(&ifra, &pr->ndpr_prefix.sin6_addr, &mask);
+ if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR) {
+ memcpy(&newaddr, &pr->ndpr_prefix.sin6_addr, sizeof(pr->ndpr_prefix.sin6_addr));
+
+ if(!in6_get_stableifid(ifp, &newaddr, prefixlen))
+ return NULL;
+ } else {
+ ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, 0); /* 0 is OK? */
+ if (ifa) {
+ ib = (struct in6_ifaddr *)ifa;
+ ifid_addr = &ib->ia_addr.sin6_addr;
+
+ /* prefixlen + ifidlen must be equal to 128 */
+ plen0 = in6_mask2len(&ib->ia_prefixmask.sin6_addr, NULL);
+ if (prefixlen != plen0) {
+ ifa_free(ifa);
+ ifid_addr = NULL;
+ nd6log((LOG_DEBUG,
+ "%s: wrong prefixlen for %s (prefix=%d ifid=%d)\n",
+ __func__, if_name(ifp), prefixlen, 128 - plen0));
+ }
+ }
+
+ /* No suitable LL address, get the ifid directly */
+ if (ifid_addr == NULL) {
+ ifa = ifa_alloc(sizeof(struct in6_ifaddr), M_NOWAIT);
+ if (ifa != NULL) {
+ ib = (struct in6_ifaddr *)ifa;
+ ifid_addr = &ib->ia_addr.sin6_addr;
+ if(in6_get_ifid(ifp, NULL, ifid_addr) != 0) {
+ nd6log((LOG_DEBUG,
+ "%s: failed to get ifid for %s\n",
+ __func__, if_name(ifp)));
+ ifa_free(ifa);
+ ifid_addr = NULL;
+ }
+ }
+ }
+
+ if (ifid_addr == NULL) {
+ nd6log((LOG_INFO,
+ "%s: could not determine ifid for %s\n",
+ __func__, if_name(ifp)));
+ return NULL;
+ }
+
+ memcpy(&newaddr, &ib->ia_addr.sin6_addr, sizeof(ib->ia_addr.sin6_addr));
+ ifa_free(ifa);
+ }
+
IN6_MASK_ADDR(&ifra.ifra_addr.sin6_addr, &mask);
/* interface ID */
- ifra.ifra_addr.sin6_addr.s6_addr32[0] |=
- (ib->ia_addr.sin6_addr.s6_addr32[0] & ~mask.s6_addr32[0]);
- ifra.ifra_addr.sin6_addr.s6_addr32[1] |=
- (ib->ia_addr.sin6_addr.s6_addr32[1] & ~mask.s6_addr32[1]);
- ifra.ifra_addr.sin6_addr.s6_addr32[2] |=
- (ib->ia_addr.sin6_addr.s6_addr32[2] & ~mask.s6_addr32[2]);
- ifra.ifra_addr.sin6_addr.s6_addr32[3] |=
- (ib->ia_addr.sin6_addr.s6_addr32[3] & ~mask.s6_addr32[3]);
- ifa_free(ifa);
+ ifra.ifra_addr.sin6_addr.s6_addr32[0] |= (newaddr.s6_addr32[0] & ~mask.s6_addr32[0]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[1] |= (newaddr.s6_addr32[1] & ~mask.s6_addr32[1]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[2] |= (newaddr.s6_addr32[2] & ~mask.s6_addr32[2]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[3] |= (newaddr.s6_addr32[3] & ~mask.s6_addr32[3]);
/* lifetimes. */
ifra.ifra_lifetime.ia6t_vltime = pr->ndpr_vltime;
@@ -1471,6 +1506,7 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
int auth;
struct in6_addrlifetime lt6_tmp;
char ip6buf[INET6_ADDRSTRLEN];
+ bool has_temporary = false;
NET_EPOCH_ASSERT();
@@ -1616,9 +1652,6 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
if (ifa6->ia6_ndpr != pr)
continue;
- if (ia6_match == NULL) /* remember the first one */
- ia6_match = ifa6;
-
/*
* An already autoconfigured address matched. Now that we
* are sure there is at least one matched address, we can
@@ -1678,6 +1711,13 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
if ((ifa6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
u_int32_t maxvltime, maxpltime;
+ /*
+ * if stable addresses (RFC 7217) are enabled, mark that a temporary address has been found
+ * to avoid generating uneeded extra ones.
+ */
+ if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR)
+ has_temporary = true;
+
if (V_ip6_temp_valid_lifetime >
(u_int32_t)((time_uptime - ifa6->ia6_createtime) +
V_ip6_desync_factor)) {
@@ -1706,6 +1746,24 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
}
ifa6->ia6_lifetime = lt6_tmp;
ifa6->ia6_updatetime = time_uptime;
+
+ /*
+ * If using stable addresses (RFC 7217) and we still have retries to perform, ignore
+ * addresses already marked as duplicated, since a new one will be generated.
+ * Also ignore addresses marked as temporary, since their generation is orthogonal to
+ * opaque stable ones.
+ *
+ * There is a small race condition, in that the dad_counter could be incremented
+ * between here and when a new address is generated, but this will cause that generation
+ * to fail and no further retries should happen.
+ */
+ if (ND_IFINFO(ifp)->flags & ND6_IFF_STABLEADDR &&
+ atomic_load_int(&DAD_FAILURES(ifp)) <= V_ip6_stableaddr_maxretries &&
+ ifa6->ia6_flags & (IN6_IFF_DUPLICATED | IN6_IFF_TEMPORARY))
+ continue;
+
+ if (ia6_match == NULL) /* remember the first one */
+ ia6_match = ifa6;
}
if (ia6_match == NULL && new->ndpr_vltime) {
int ifidlen;
@@ -1756,8 +1814,11 @@ prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
* immediately together with a new set of temporary
* addresses. Thus, we specifiy 1 as the 2nd arg of
* in6_tmpifadd().
+ *
+ * Skip this if a temporary address has been marked as
+ * found (happens only if stable addresses (RFC 7217) is in use)
*/
- if (V_ip6_use_tempaddr) {
+ if (V_ip6_use_tempaddr && !has_temporary) {
int e;
if ((e = in6_tmpifadd(ia6, 1, 1)) != 0) {
nd6log((LOG_NOTICE, "%s: failed to "
diff --git a/sys/netinet6/udp6_usrreq.c b/sys/netinet6/udp6_usrreq.c
index 0027cf3bd230..1a32365f5d1d 100644
--- a/sys/netinet6/udp6_usrreq.c
+++ b/sys/netinet6/udp6_usrreq.c
@@ -341,7 +341,7 @@ udp6_multi_input(struct mbuf *m, int off, int proto,
/*
* No matching pcb found; discard datagram. (No need
* to send an ICMP Port Unreachable for a broadcast
- * or multicast datgram.)
+ * or multicast datagram.)
*/
UDPSTAT_INC(udps_noport);
UDPSTAT_INC(udps_noportmcast);
diff --git a/sys/netipsec/xform_ipcomp.c b/sys/netipsec/xform_ipcomp.c
index 737d4a50098a..05a01b75e0bb 100644
--- a/sys/netipsec/xform_ipcomp.c
+++ b/sys/netipsec/xform_ipcomp.c
@@ -750,7 +750,7 @@ static struct xformsw ipcomp_xformsw = {
};
static void
-ipcomp_attach(void)
+ipcomp_attach(void *dummy __unused)
{
#ifdef INET
@@ -763,7 +763,7 @@ ipcomp_attach(void)
}
static void
-ipcomp_detach(void)
+ipcomp_detach(void *dummy __unused)
{
#ifdef INET
diff --git a/sys/netlink/netlink_io.c b/sys/netlink/netlink_io.c
index e7908d6f3a44..2391d8ea752c 100644
--- a/sys/netlink/netlink_io.c
+++ b/sys/netlink/netlink_io.c
@@ -216,16 +216,17 @@ nl_send(struct nl_writer *nw, struct nlpcb *nlp)
hdr->nlmsg_len);
}
- if (nlp->nl_linux && linux_netlink_p != NULL &&
- __predict_false(!linux_netlink_p->msgs_to_linux(nw, nlp))) {
+ if (nlp->nl_linux && linux_netlink_p != NULL) {
+ nb = linux_netlink_p->msgs_to_linux(nw->buf, nlp);
nl_buf_free(nw->buf);
nw->buf = NULL;
- return (false);
+ if (nb == NULL)
+ return (false);
+ } else {
+ nb = nw->buf;
+ nw->buf = NULL;
}
- nb = nw->buf;
- nw->buf = NULL;
-
SOCK_RECVBUF_LOCK(so);
if (!nw->ignore_limit && __predict_false(sb->sb_hiwat <= sb->sb_ccc)) {
SOCK_RECVBUF_UNLOCK(so);
diff --git a/sys/netlink/netlink_linux.h b/sys/netlink/netlink_linux.h
index d4c451d470b2..794065692901 100644
--- a/sys/netlink/netlink_linux.h
+++ b/sys/netlink/netlink_linux.h
@@ -37,7 +37,7 @@ struct nlpcb;
struct nl_pstate;
struct nl_writer;
-typedef bool msgs_to_linux_cb_t(struct nl_writer *nw, struct nlpcb *nlp);
+typedef struct nl_buf * msgs_to_linux_cb_t(struct nl_buf *, struct nlpcb *);
typedef int msg_from_linux_cb_t(int netlink_family, struct nlmsghdr **hdr,
struct nl_pstate *npt);
diff --git a/sys/netlink/netlink_message_writer.h b/sys/netlink/netlink_message_writer.h
index 83f925e8d93d..ad2099a4d636 100644
--- a/sys/netlink/netlink_message_writer.h
+++ b/sys/netlink/netlink_message_writer.h
@@ -284,6 +284,12 @@ nlattr_add_s64(struct nl_writer *nw, uint16_t attrtype, int64_t value)
}
static inline bool
+nlattr_add_time_t(struct nl_writer *nw, uint16_t attrtype, time_t value)
+{
+ return (nlattr_add(nw, attrtype, sizeof(time_t), &value));
+}
+
+static inline bool
nlattr_add_flag(struct nl_writer *nw, uint16_t attrtype)
{
return (nlattr_add(nw, attrtype, 0, NULL));
diff --git a/sys/netlink/netlink_snl.h b/sys/netlink/netlink_snl.h
index 586716776bc5..57f7e1e29d08 100644
--- a/sys/netlink/netlink_snl.h
+++ b/sys/netlink/netlink_snl.h
@@ -631,6 +631,17 @@ snl_attr_get_int64(struct snl_state *ss, struct nlattr *nla, const void *arg,
}
static inline bool
+snl_attr_get_time_t(struct snl_state *ss __unused, struct nlattr *nla,
+ const void *arg __unused, void *target)
+{
+ if (NLA_DATA_LEN(nla) == sizeof(time_t)) {
+ memcpy(target, NLA_DATA_CONST(nla), sizeof(time_t));
+ return (true);
+ }
+ return (false);
+}
+
+static inline bool
snl_attr_get_string(struct snl_state *ss __unused, struct nlattr *nla,
const void *arg __unused, void *target)
{
@@ -1057,14 +1068,14 @@ snl_init_writer(struct snl_state *ss, struct snl_writer *nw)
{
nw->size = SNL_WRITER_BUFFER_SIZE;
nw->base = (char *)snl_allocz(ss, nw->size);
- if (nw->base == NULL) {
+ if (__predict_false(nw->base == NULL)) {
nw->error = true;
nw->size = 0;
- }
+ } else
+ nw->error = false;
nw->offset = 0;
nw->hdr = NULL;
- nw->error = false;
nw->ss = ss;
}
diff --git a/sys/netlink/route/iface.c b/sys/netlink/route/iface.c
index 8b871576d0b2..9beb80792af4 100644
--- a/sys/netlink/route/iface.c
+++ b/sys/netlink/route/iface.c
@@ -403,6 +403,7 @@ static const struct nlattr_parser nla_p_linfo[] = {
NL_DECLARE_ATTR_PARSER(linfo_parser, nla_p_linfo);
static const struct nlattr_parser nla_p_if[] = {
+ { .type = IFLA_ADDRESS, .off = _OUT(ifla_address), .cb = nlattr_get_nla },
{ .type = IFLA_IFNAME, .off = _OUT(ifla_ifname), .cb = nlattr_get_string },
{ .type = IFLA_MTU, .off = _OUT(ifla_mtu), .cb = nlattr_get_uint32 },
{ .type = IFLA_LINK, .off = _OUT(ifla_link), .cb = nlattr_get_uint32 },
diff --git a/sys/netlink/route/iface_drivers.c b/sys/netlink/route/iface_drivers.c
index 4bf913d9c978..4f1540740ead 100644
--- a/sys/netlink/route/iface_drivers.c
+++ b/sys/netlink/route/iface_drivers.c
@@ -82,26 +82,55 @@ _nl_modify_ifp_generic(struct ifnet *ifp, struct nl_parsed_link *lattrs,
}
}
- if ((lattrs->ifi_change & IFF_UP) && (lattrs->ifi_flags & IFF_UP) == 0) {
- /* Request to down the interface */
- if_down(ifp);
+ if ((lattrs->ifi_change & IFF_UP) != 0 || lattrs->ifi_change == 0) {
+ /* Request to up or down the interface */
+ if (lattrs->ifi_flags & IFF_UP)
+ if_up(ifp);
+ else
+ if_down(ifp);
}
if (lattrs->ifla_mtu > 0) {
if (nlp_has_priv(npt->nlp, PRIV_NET_SETIFMTU)) {
struct ifreq ifr = { .ifr_mtu = lattrs->ifla_mtu };
- error = ifhwioctl(SIOCSIFMTU, ifp, (char *)&ifr, curthread);
+ error = ifhwioctl(SIOCSIFMTU, ifp, (char *)&ifr,
+ curthread);
+ if (error != 0) {
+ nlmsg_report_err_msg(npt, "Failed to set mtu");
+ return (error);
+ }
} else {
nlmsg_report_err_msg(npt, "Not enough privileges to set mtu");
return (EPERM);
}
}
- if (lattrs->ifi_change & IFF_PROMISC) {
- error = ifpromisc(ifp, lattrs->ifi_flags & IFF_PROMISC);
- if (error != 0) {
- nlmsg_report_err_msg(npt, "unable to set promisc");
- return (error);
+ if ((lattrs->ifi_change & IFF_PROMISC) != 0 ||
+ lattrs->ifi_change == 0)
+ /*
+ * When asking for IFF_PROMISC, set permanent flag instead
+ * (IFF_PPROMISC) as we have no way of doing promiscuity
+ * reference counting through ifpromisc(). Every call to this
+ * function either sets or unsets IFF_PROMISC, and ifi_change
+ * is usually set to 0xFFFFFFFF.
+ */
+ if_setppromisc(ifp, (lattrs->ifi_flags & IFF_PROMISC) != 0);
+
+ if (lattrs->ifla_address != NULL) {
+ if (nlp_has_priv(npt->nlp, PRIV_NET_SETIFMAC)) {
+ error = if_setlladdr(ifp,
+ NLA_DATA(lattrs->ifla_address),
+ NLA_DATA_LEN(lattrs->ifla_address));
+ if (error != 0) {
+ nlmsg_report_err_msg(npt,
+ "setting IFLA_ADDRESS failed with error code: %d",
+ error);
+ return (error);
+ }
+ } else {
+ nlmsg_report_err_msg(npt,
+ "Not enough privileges to set IFLA_ADDRESS");
+ return (EPERM);
}
}
diff --git a/sys/netlink/route/route_var.h b/sys/netlink/route/route_var.h
index b84b34461e35..41f110038b54 100644
--- a/sys/netlink/route/route_var.h
+++ b/sys/netlink/route/route_var.h
@@ -69,6 +69,7 @@ struct nl_parsed_link {
char *ifla_cloner;
char *ifla_ifalias;
struct nlattr *ifla_idata;
+ struct nlattr *ifla_address;
unsigned short ifi_type;
int ifi_index;
uint32_t ifla_link;
diff --git a/sys/netpfil/ipfw/ip_dn_io.c b/sys/netpfil/ipfw/ip_dn_io.c
index 03116cb0641c..3a8de2b2bfee 100644
--- a/sys/netpfil/ipfw/ip_dn_io.c
+++ b/sys/netpfil/ipfw/ip_dn_io.c
@@ -43,6 +43,7 @@
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
+#include <sys/sdt.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/sysctl.h>
@@ -70,6 +71,9 @@
#endif
#include <netpfil/ipfw/dn_sched.h>
+SDT_PROVIDER_DEFINE(dummynet);
+SDT_PROBE_DEFINE2(dummynet, , , drop, "struct mbuf *", "struct dn_queue *");
+
/*
* We keep a private variable for the simulation time, but we could
* probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
@@ -545,6 +549,7 @@ dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
drop:
V_dn_cfg.io_pkt_drop++;
+ SDT_PROBE2(dummynet, , , drop, m, q);
q->ni.drops++;
ni->drops++;
FREE_PKT(m);
@@ -1001,6 +1006,7 @@ done:
dropit:
V_dn_cfg.io_pkt_drop++;
+ SDT_PROBE2(dummynet, , , drop, m, q);
DN_BH_WUNLOCK();
if (m)
FREE_PKT(m);
diff --git a/sys/netpfil/ipfw/ip_dummynet.c b/sys/netpfil/ipfw/ip_dummynet.c
index b3f52322425f..d522f9da0fbe 100644
--- a/sys/netpfil/ipfw/ip_dummynet.c
+++ b/sys/netpfil/ipfw/ip_dummynet.c
@@ -1150,7 +1150,7 @@ copy_data_helper(void *_o, void *_arg)
return 0; /* not a pipe */
/* see if the object is within one of our ranges */
- for (;r < lim; r += 2) {
+ for (; r < lim; r += 2) {
if (n < r[0] || n > r[1])
continue;
/* Found a valid entry, copy and we are done */
@@ -1183,7 +1183,7 @@ copy_data_helper(void *_o, void *_arg)
if (n >= DN_MAX_ID)
return 0;
/* see if the object is within one of our ranges */
- for (;r < lim; r += 2) {
+ for (; r < lim; r += 2) {
if (n < r[0] || n > r[1])
continue;
if (copy_flowset(a, fs, 0))
diff --git a/sys/netpfil/ipfw/ip_fw2.c b/sys/netpfil/ipfw/ip_fw2.c
index 3f810533b7fc..d15d7760d7f1 100644
--- a/sys/netpfil/ipfw/ip_fw2.c
+++ b/sys/netpfil/ipfw/ip_fw2.c
@@ -67,6 +67,7 @@
#include <net/route/nhop.h>
#include <net/pfil.h>
#include <net/vnet.h>
+#include <net/if_gif.h>
#include <net/if_pfsync.h>
#include <netpfil/pf/pf_mtag.h>
@@ -1757,6 +1758,12 @@ do { \
PULLUP_TO(hlen, ulp, struct ip);
break;
+ case IPPROTO_ETHERIP: /* RFC 3378 */
+ PULLUP_LEN(hlen, ulp,
+ sizeof(struct etherip_header) +
+ sizeof(struct ether_header));
+ break;
+
case IPPROTO_PFSYNC:
PULLUP_TO(hlen, ulp, struct pfsync_header);
break;
@@ -3571,11 +3578,9 @@ sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS)
/*
* Stuff that must be initialised only on boot or module load
*/
-static int
-ipfw_init(void)
+static void
+ipfw_init(void *dummy __unused)
{
- int error = 0;
-
/*
* Only print out this stuff the first time around,
* when called from the sysinit code.
@@ -3620,14 +3625,13 @@ ipfw_init(void)
ipfw_init_sopt_handler();
ipfw_init_obj_rewriter();
ipfw_iface_init();
- return (error);
}
/*
* Called for the removal of the last instance only on module unload.
*/
static void
-ipfw_destroy(void)
+ipfw_destroy(void *dummy __unused)
{
ipfw_iface_destroy();
diff --git a/sys/netpfil/ipfw/ip_fw_nat.c b/sys/netpfil/ipfw/ip_fw_nat.c
index 1e2ff1bca290..8bd27f6885ab 100644
--- a/sys/netpfil/ipfw/ip_fw_nat.c
+++ b/sys/netpfil/ipfw/ip_fw_nat.c
@@ -999,9 +999,11 @@ ipfw_nat_del(struct sockopt *sopt)
{
struct cfg_nat *ptr;
struct ip_fw_chain *chain = &V_layer3_chain;
- int i;
+ int error, i;
- sooptcopyin(sopt, &i, sizeof i, sizeof i);
+ error = sooptcopyin(sopt, &i, sizeof i, sizeof i);
+ if (error != 0)
+ return (error);
/* XXX validate i */
IPFW_UH_WLOCK(chain);
ptr = lookup_nat(&chain->nat, i);
@@ -1104,7 +1106,7 @@ ipfw_nat_get_log(struct sockopt *sopt)
{
uint8_t *data;
struct cfg_nat *ptr;
- int i, size;
+ int error, i, size;
struct ip_fw_chain *chain;
IPFW_RLOCK_TRACKER;
@@ -1134,9 +1136,9 @@ ipfw_nat_get_log(struct sockopt *sopt)
i += LIBALIAS_BUF_SIZE;
}
IPFW_RUNLOCK(chain);
- sooptcopyout(sopt, data, size);
+ error = sooptcopyout(sopt, data, size);
free(data, M_IPFW);
- return(0);
+ return (error);
}
static int
@@ -1166,7 +1168,7 @@ vnet_ipfw_nat_uninit(const void *arg __unused)
}
static void
-ipfw_nat_init(void)
+ipfw_nat_init(void *dummy __unused)
{
/* init ipfw hooks */
@@ -1183,7 +1185,7 @@ ipfw_nat_init(void)
}
static void
-ipfw_nat_destroy(void)
+ipfw_nat_destroy(void *dummy __unused)
{
EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_event_tag);
diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c
index e34c08c8c4db..66bc99df2afa 100644
--- a/sys/netpfil/pf/if_pfsync.c
+++ b/sys/netpfil/pf/if_pfsync.c
@@ -123,8 +123,8 @@ union inet_template {
sizeof(struct pfsync_header) + \
sizeof(struct pfsync_subheader) )
-static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *,
- struct pfsync_state_peer *);
+static int pfsync_upd_tcp(struct pf_kstate *, struct pf_state_peer_export *,
+ struct pf_state_peer_export *);
static int pfsync_in_clr(struct mbuf *, int, int, int, int);
static int pfsync_in_ins(struct mbuf *, int, int, int, int);
static int pfsync_in_iack(struct mbuf *, int, int, int, int);
@@ -153,6 +153,8 @@ static int (*pfsync_acts[])(struct mbuf *, int, int, int, int) = {
pfsync_in_eof, /* PFSYNC_ACT_EOF */
pfsync_in_ins, /* PFSYNC_ACT_INS_1400 */
pfsync_in_upd, /* PFSYNC_ACT_UPD_1400 */
+ pfsync_in_ins, /* PFSYNC_ACT_INS_1500 */
+ pfsync_in_upd, /* PFSYNC_ACT_UPD_1500 */
};
struct pfsync_q {
@@ -165,9 +167,11 @@ struct pfsync_q {
enum pfsync_q_id {
PFSYNC_Q_INS_1301,
PFSYNC_Q_INS_1400,
+ PFSYNC_Q_INS_1500,
PFSYNC_Q_IACK,
PFSYNC_Q_UPD_1301,
PFSYNC_Q_UPD_1400,
+ PFSYNC_Q_UPD_1500,
PFSYNC_Q_UPD_C,
PFSYNC_Q_DEL_C,
PFSYNC_Q_COUNT,
@@ -176,6 +180,7 @@ enum pfsync_q_id {
/* Functions for building messages for given queue */
static void pfsync_out_state_1301(struct pf_kstate *, void *);
static void pfsync_out_state_1400(struct pf_kstate *, void *);
+static void pfsync_out_state_1500(struct pf_kstate *, void *);
static void pfsync_out_iack(struct pf_kstate *, void *);
static void pfsync_out_upd_c(struct pf_kstate *, void *);
static void pfsync_out_del_c(struct pf_kstate *, void *);
@@ -184,9 +189,11 @@ static void pfsync_out_del_c(struct pf_kstate *, void *);
static struct pfsync_q pfsync_qs[] = {
{ pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_INS_1301 },
{ pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_INS_1400 },
+ { pfsync_out_state_1500, sizeof(struct pfsync_state_1500), PFSYNC_ACT_INS_1500 },
{ pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
{ pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_UPD_1301 },
{ pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_UPD_1400 },
+ { pfsync_out_state_1500, sizeof(struct pfsync_state_1500), PFSYNC_ACT_UPD_1500 },
{ pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
{ pfsync_out_del_c, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
};
@@ -195,9 +202,11 @@ static struct pfsync_q pfsync_qs[] = {
static u_int8_t pfsync_qid_sstate[] = {
PFSYNC_S_INS, /* PFSYNC_Q_INS_1301 */
PFSYNC_S_INS, /* PFSYNC_Q_INS_1400 */
+ PFSYNC_S_INS, /* PFSYNC_Q_INS_1500 */
PFSYNC_S_IACK, /* PFSYNC_Q_IACK */
PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1301 */
PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1400 */
+ PFSYNC_S_UPD, /* PFSYNC_Q_UPD_1500 */
PFSYNC_S_UPD_C, /* PFSYNC_Q_UPD_C */
PFSYNC_S_DEL_C, /* PFSYNC_Q_DEL_C */
};
@@ -330,7 +339,7 @@ SYSCTL_UINT(_net_pfsync, OID_AUTO, defer_delay, CTLFLAG_VNET | CTLFLAG_RW,
static int pfsync_clone_create(struct if_clone *, int, caddr_t);
static void pfsync_clone_destroy(struct ifnet *);
-static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
+static int pfsync_alloc_scrub_memory(struct pf_state_peer_export *,
struct pf_state_peer *);
static int pfsyncoutput(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *);
@@ -502,7 +511,7 @@ pfsync_clone_destroy(struct ifnet *ifp)
}
static int
-pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
+pfsync_alloc_scrub_memory(struct pf_state_peer_export *s,
struct pf_state_peer *d)
{
if (s->scrub.scrub_flag && d->scrub == NULL) {
@@ -525,13 +534,15 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
struct pf_kstate *st = NULL;
struct pf_state_key *skw = NULL, *sks = NULL;
struct pf_krule *r = NULL;
- struct pfi_kkif *kif;
+ struct pfi_kkif *kif, *orig_kif;
struct pfi_kkif *rt_kif = NULL;
struct pf_kpooladdr *rpool_first;
int error;
+ int n = 0;
sa_family_t rt_af = 0;
uint8_t rt = 0;
- int n = 0;
+ sa_family_t wire_af, stack_af;
+ u_int8_t wire_proto, stack_proto;
PF_RULES_RASSERT();
@@ -542,7 +553,11 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
return (EINVAL);
}
- if ((kif = pfi_kkif_find(sp->pfs_1301.ifname)) == NULL) {
+ /*
+ * Check interfaces early on. Do it before allocating memory etc.
+ * Because there is a high chance there will be a lot more such states.
+ */
+ if ((kif = orig_kif = pfi_kkif_find(sp->pfs_1301.ifname)) == NULL) {
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("%s: unknown interface: %s\n", __func__,
sp->pfs_1301.ifname);
@@ -552,6 +567,30 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
}
/*
+ * States created with floating interface policy can be synchronized to
+ * hosts with different interfaces, because they are bound to V_pfi_all.
+ * But s->orig_kif still points to a real interface. Don't abort
+ * importing the state if orig_kif does not exists on the importing host
+ * but the state is not interface-bound.
+ */
+ if (msg_version == PFSYNC_MSG_VERSION_1500) {
+ orig_kif = pfi_kkif_find(sp->pfs_1500.orig_ifname);
+ if (orig_kif == NULL) {
+ if (kif == V_pfi_all) {
+ orig_kif = kif;
+ } else {
+ if (V_pf_status.debug >= PF_DEBUG_MISC)
+ printf("%s: unknown original interface:"
+ " %s\n", __func__,
+ sp->pfs_1500.orig_ifname);
+ if (flags & PFSYNC_SI_IOCTL)
+ return (EINVAL);
+ return (0); /* skip this state */
+ }
+ }
+ }
+
+ /*
* If the ruleset checksums match or the state is coming from the ioctl,
* it's safe to associate the state with the rule of that number.
*/
@@ -565,10 +604,6 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
} else
r = &V_pf_default_rule;
- /*
- * Check routing interface early on. Do it before allocating memory etc.
- * because there is a high chance there will be a lot more such states.
- */
switch (msg_version) {
case PFSYNC_MSG_VERSION_1301:
/*
@@ -605,7 +640,8 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
rt_kif = rpool_first->kif;
/*
* Guess the AF of the route address, FreeBSD 13 does
- * not support af-to so it should be safe.
+ * not support af-to nor prefer-ipv6-nexthop
+ * so it should be safe.
*/
rt_af = r->af;
} else if (!PF_AZERO(&sp->pfs_1301.rt_addr, sp->pfs_1301.af)) {
@@ -618,10 +654,12 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
"because of different ruleset", __func__);
return ((flags & PFSYNC_SI_IOCTL) ? EINVAL : 0);
}
+ wire_af = stack_af = sp->pfs_1301.af;
+ wire_proto = stack_proto = sp->pfs_1301.proto;
break;
case PFSYNC_MSG_VERSION_1400:
/*
- * On FreeBSD 14 and above we're not taking any chances.
+ * On FreeBSD 14 we're not taking any chances.
* We use the information synced to us.
*/
if (sp->pfs_1400.rt) {
@@ -634,11 +672,35 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
}
rt = sp->pfs_1400.rt;
/*
- * Guess the AF of the route address, FreeBSD 13 does
- * not support af-to so it should be safe.
+ * Guess the AF of the route address, FreeBSD 14 does
+ * not support af-to nor prefer-ipv6-nexthop
+ * so it should be safe.
*/
rt_af = sp->pfs_1400.af;
}
+ wire_af = stack_af = sp->pfs_1400.af;
+ wire_proto = stack_proto = sp->pfs_1400.proto;
+ break;
+ case PFSYNC_MSG_VERSION_1500:
+ /*
+ * On FreeBSD 15 and above we're not taking any chances.
+ * We use the information synced to us.
+ */
+ if (sp->pfs_1500.rt) {
+ rt_kif = pfi_kkif_find(sp->pfs_1500.rt_ifname);
+ if (rt_kif == NULL) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ "%s: unknown route interface: %s",
+ __func__, sp->pfs_1500.rt_ifname);
+ return ((flags & PFSYNC_SI_IOCTL) ? EINVAL : 0);
+ }
+ rt = sp->pfs_1500.rt;
+ rt_af = sp->pfs_1500.rt_af;
+ }
+ wire_af = sp->pfs_1500.wire_af;
+ stack_af = sp->pfs_1500.stack_af;
+ wire_proto = sp->pfs_1500.wire_proto;
+ stack_proto = sp->pfs_1500.stack_proto;
break;
}
@@ -665,8 +727,9 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
ks = &sp->pfs_1301.key[PF_SK_STACK];
#endif
- if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->pfs_1301.af) ||
- PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->pfs_1301.af) ||
+ if (wire_af != stack_af ||
+ PF_ANEQ(&kw->addr[0], &ks->addr[0], wire_af) ||
+ PF_ANEQ(&kw->addr[1], &ks->addr[1], wire_af) ||
kw->port[0] != ks->port[0] ||
kw->port[1] != ks->port[1]) {
sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
@@ -685,36 +748,19 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
skw->addr[1] = kw->addr[1];
skw->port[0] = kw->port[0];
skw->port[1] = kw->port[1];
- skw->proto = sp->pfs_1301.proto;
- skw->af = sp->pfs_1301.af;
+ skw->proto = wire_proto;
+ skw->af = wire_af;
if (sks != skw) {
sks->addr[0] = ks->addr[0];
sks->addr[1] = ks->addr[1];
sks->port[0] = ks->port[0];
sks->port[1] = ks->port[1];
- sks->proto = sp->pfs_1301.proto;
- sks->af = sp->pfs_1301.af;
+ sks->proto = stack_proto;
+ sks->af = stack_af;
}
/* copy to state */
- bcopy(&sp->pfs_1301.rt_addr, &st->act.rt_addr, sizeof(st->act.rt_addr));
st->creation = (time_uptime - ntohl(sp->pfs_1301.creation)) * 1000;
- st->expire = pf_get_uptime();
- if (sp->pfs_1301.expire) {
- uint32_t timeout;
-
- timeout = r->timeout[sp->pfs_1301.timeout];
- if (!timeout)
- timeout = V_pf_default_rule.timeout[sp->pfs_1301.timeout];
-
- /* sp->expire may have been adaptively scaled by export. */
- st->expire -= (timeout - ntohl(sp->pfs_1301.expire)) * 1000;
- }
-
- st->direction = sp->pfs_1301.direction;
- st->act.log = sp->pfs_1301.log;
- st->timeout = sp->pfs_1301.timeout;
-
st->act.rt = rt;
st->act.rt_kif = rt_kif;
st->act.rt_af = rt_af;
@@ -722,6 +768,12 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
switch (msg_version) {
case PFSYNC_MSG_VERSION_1301:
st->state_flags = sp->pfs_1301.state_flags;
+ st->direction = sp->pfs_1301.direction;
+ st->act.log = sp->pfs_1301.log;
+ st->timeout = sp->pfs_1301.timeout;
+ if (rt)
+ bcopy(&sp->pfs_1301.rt_addr, &st->act.rt_addr,
+ sizeof(st->act.rt_addr));
/*
* In FreeBSD 13 pfsync lacks many attributes. Copy them
* from the rule if possible. If rule can't be matched
@@ -760,6 +812,9 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
break;
case PFSYNC_MSG_VERSION_1400:
st->state_flags = ntohs(sp->pfs_1400.state_flags);
+ st->direction = sp->pfs_1400.direction;
+ st->act.log = sp->pfs_1400.log;
+ st->timeout = sp->pfs_1400.timeout;
st->act.qid = ntohs(sp->pfs_1400.qid);
st->act.pqid = ntohs(sp->pfs_1400.pqid);
st->act.dnpipe = ntohs(sp->pfs_1400.dnpipe);
@@ -770,12 +825,47 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
st->act.max_mss = ntohs(sp->pfs_1400.max_mss);
st->act.set_prio[0] = sp->pfs_1400.set_prio[0];
st->act.set_prio[1] = sp->pfs_1400.set_prio[1];
+ if (rt)
+ bcopy(&sp->pfs_1400.rt_addr, &st->act.rt_addr,
+ sizeof(st->act.rt_addr));
+ break;
+ case PFSYNC_MSG_VERSION_1500:
+ st->state_flags = ntohs(sp->pfs_1500.state_flags);
+ st->direction = sp->pfs_1500.direction;
+ st->act.log = sp->pfs_1500.log;
+ st->timeout = sp->pfs_1500.timeout;
+ st->act.qid = ntohs(sp->pfs_1500.qid);
+ st->act.pqid = ntohs(sp->pfs_1500.pqid);
+ st->act.dnpipe = ntohs(sp->pfs_1500.dnpipe);
+ st->act.dnrpipe = ntohs(sp->pfs_1500.dnrpipe);
+ st->act.rtableid = ntohl(sp->pfs_1500.rtableid);
+ st->act.min_ttl = sp->pfs_1500.min_ttl;
+ st->act.set_tos = sp->pfs_1500.set_tos;
+ st->act.max_mss = ntohs(sp->pfs_1500.max_mss);
+ st->act.set_prio[0] = sp->pfs_1500.set_prio[0];
+ st->act.set_prio[1] = sp->pfs_1500.set_prio[1];
+ if (rt)
+ bcopy(&sp->pfs_1500.rt_addr, &st->act.rt_addr,
+ sizeof(st->act.rt_addr));
+ if (sp->pfs_1500.tagname[0] != 0)
+ st->tag = pf_tagname2tag(sp->pfs_1500.tagname);
break;
default:
panic("%s: Unsupported pfsync_msg_version %d",
__func__, msg_version);
}
+ st->expire = pf_get_uptime();
+ if (sp->pfs_1301.expire) {
+ uint32_t timeout;
+ timeout = r->timeout[st->timeout];
+ if (!timeout)
+ timeout = V_pf_default_rule.timeout[st->timeout];
+
+ /* sp->expire may have been adaptively scaled by export. */
+ st->expire -= (timeout - ntohl(sp->pfs_1301.expire)) * 1000;
+ }
+
if (! (st->act.rtableid == -1 ||
(st->act.rtableid >= 0 && st->act.rtableid < rt_numfibs)))
goto cleanup;
@@ -795,7 +885,7 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
if (!(flags & PFSYNC_SI_IOCTL))
st->state_flags |= PFSTATE_NOSYNC;
- if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0)
+ if ((error = pf_state_insert(kif, orig_kif, skw, sks, st)) != 0)
goto cleanup_state;
/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
@@ -1087,23 +1177,29 @@ pfsync_in_ins(struct mbuf *m, int offset, int count, int flags, int action)
struct mbuf *mp;
union pfsync_state_union *sa, *sp;
int i, offp, total_len, msg_version, msg_len;
+ u_int8_t timeout, direction;
+ sa_family_t af;
switch (action) {
case PFSYNC_ACT_INS_1301:
msg_len = sizeof(struct pfsync_state_1301);
- total_len = msg_len * count;
msg_version = PFSYNC_MSG_VERSION_1301;
break;
case PFSYNC_ACT_INS_1400:
msg_len = sizeof(struct pfsync_state_1400);
- total_len = msg_len * count;
msg_version = PFSYNC_MSG_VERSION_1400;
break;
+ case PFSYNC_ACT_INS_1500:
+ msg_len = sizeof(struct pfsync_state_1500);
+ msg_version = PFSYNC_MSG_VERSION_1500;
+ break;
default:
V_pfsyncstats.pfsyncs_badver++;
return (-1);
}
+ total_len = msg_len * count;
+
mp = m_pulldown(m, offset, total_len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
@@ -1114,13 +1210,26 @@ pfsync_in_ins(struct mbuf *m, int offset, int count, int flags, int action)
for (i = 0; i < count; i++) {
sp = (union pfsync_state_union *)((char *)sa + msg_len * i);
+ switch (msg_version) {
+ case PFSYNC_MSG_VERSION_1301:
+ case PFSYNC_MSG_VERSION_1400:
+ af = sp->pfs_1301.af;
+ timeout = sp->pfs_1301.timeout;
+ direction = sp->pfs_1301.direction;
+ break;
+ case PFSYNC_MSG_VERSION_1500:
+ af = sp->pfs_1500.wire_af;
+ timeout = sp->pfs_1500.timeout;
+ direction = sp->pfs_1500.direction;
+ break;
+ }
+
/* Check for invalid values. */
- if (sp->pfs_1301.timeout >= PFTM_MAX ||
+ if (timeout >= PFTM_MAX ||
sp->pfs_1301.src.state > PF_TCPS_PROXY_DST ||
sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST ||
- sp->pfs_1301.direction > PF_OUT ||
- (sp->pfs_1301.af != AF_INET &&
- sp->pfs_1301.af != AF_INET6)) {
+ direction > PF_OUT ||
+ (af != AF_INET && af != AF_INET6)) {
if (V_pf_status.debug >= PF_DEBUG_MISC)
printf("%s: invalid value\n", __func__);
V_pfsyncstats.pfsyncs_badval++;
@@ -1172,8 +1281,8 @@ pfsync_in_iack(struct mbuf *m, int offset, int count, int flags, int action)
}
static int
-pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src,
- struct pfsync_state_peer *dst)
+pfsync_upd_tcp(struct pf_kstate *st, struct pf_state_peer_export *src,
+ struct pf_state_peer_export *dst)
{
int sync = 0;
@@ -1213,23 +1322,28 @@ pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action)
struct pf_kstate *st;
struct mbuf *mp;
int sync, offp, i, total_len, msg_len, msg_version;
+ u_int8_t timeout;
switch (action) {
case PFSYNC_ACT_UPD_1301:
msg_len = sizeof(struct pfsync_state_1301);
- total_len = msg_len * count;
msg_version = PFSYNC_MSG_VERSION_1301;
break;
case PFSYNC_ACT_UPD_1400:
msg_len = sizeof(struct pfsync_state_1400);
- total_len = msg_len * count;
msg_version = PFSYNC_MSG_VERSION_1400;
break;
+ case PFSYNC_ACT_UPD_1500:
+ msg_len = sizeof(struct pfsync_state_1500);
+ msg_version = PFSYNC_MSG_VERSION_1500;
+ break;
default:
V_pfsyncstats.pfsyncs_badact++;
return (-1);
}
+ total_len = msg_len * count;
+
mp = m_pulldown(m, offset, total_len, &offp);
if (mp == NULL) {
V_pfsyncstats.pfsyncs_badlen++;
@@ -1240,8 +1354,18 @@ pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action)
for (i = 0; i < count; i++) {
sp = (union pfsync_state_union *)((char *)sa + msg_len * i);
+ switch (msg_version) {
+ case PFSYNC_MSG_VERSION_1301:
+ case PFSYNC_MSG_VERSION_1400:
+ timeout = sp->pfs_1301.timeout;
+ break;
+ case PFSYNC_MSG_VERSION_1500:
+ timeout = sp->pfs_1500.timeout;
+ break;
+ }
+
/* check for invalid values */
- if (sp->pfs_1301.timeout >= PFTM_MAX ||
+ if (timeout >= PFTM_MAX ||
sp->pfs_1301.src.state > PF_TCPS_PROXY_DST ||
sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST) {
if (V_pf_status.debug >= PF_DEBUG_MISC) {
@@ -1286,7 +1410,7 @@ pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action)
pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst);
pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
st->expire = pf_get_uptime();
- st->timeout = sp->pfs_1301.timeout;
+ st->timeout = timeout;
}
st->pfsync_time = time_uptime;
@@ -1741,16 +1865,16 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE)
return (EINVAL);
- data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK);
+ data = malloc(ifr->ifr_cap_nv.length, M_PF, M_WAITOK);
if ((error = copyin(ifr->ifr_cap_nv.buffer, data,
ifr->ifr_cap_nv.length)) != 0) {
- free(data, M_TEMP);
+ free(data, M_PF);
return (error);
}
if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) {
- free(data, M_TEMP);
+ free(data, M_PF);
return (EINVAL);
}
@@ -1758,7 +1882,7 @@ pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
pfsync_nvstatus_to_kstatus(nvl, &status);
nvlist_destroy(nvl);
- free(data, M_TEMP);
+ free(data, M_PF);
error = pfsync_kstatus_to_softc(&status, sc);
return (error);
@@ -1787,6 +1911,14 @@ pfsync_out_state_1400(struct pf_kstate *st, void *buf)
}
static void
+pfsync_out_state_1500(struct pf_kstate *st, void *buf)
+{
+ union pfsync_state_union *sp = buf;
+
+ pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1500);
+}
+
+static void
pfsync_out_iack(struct pf_kstate *st, void *buf)
{
struct pfsync_ins_ack *iack = buf;
@@ -2453,6 +2585,8 @@ pfsync_sstate_to_qid(u_int8_t sync_state)
return PFSYNC_Q_INS_1301;
case PFSYNC_MSG_VERSION_1400:
return PFSYNC_Q_INS_1400;
+ case PFSYNC_MSG_VERSION_1500:
+ return PFSYNC_Q_INS_1500;
}
break;
case PFSYNC_S_IACK:
@@ -2463,6 +2597,8 @@ pfsync_sstate_to_qid(u_int8_t sync_state)
return PFSYNC_Q_UPD_1301;
case PFSYNC_MSG_VERSION_1400:
return PFSYNC_Q_UPD_1400;
+ case PFSYNC_MSG_VERSION_1500:
+ return PFSYNC_Q_UPD_1500;
}
break;
case PFSYNC_S_UPD_C:
@@ -3019,6 +3155,7 @@ pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc)
break;
case PFSYNC_MSG_VERSION_1301:
case PFSYNC_MSG_VERSION_1400:
+ case PFSYNC_MSG_VERSION_1500:
sc->sc_version = status->version;
break;
default:
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index 3fa7789efcfe..d6fc24a23fe9 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -344,10 +344,12 @@ static int pf_test_eth_rule(int, struct pfi_kkif *,
struct mbuf **);
static int pf_test_rule(struct pf_krule **, struct pf_kstate **,
struct pf_pdesc *, struct pf_krule **,
- struct pf_kruleset **, u_short *, struct inpcb *);
+ struct pf_kruleset **, u_short *, struct inpcb *,
+ struct pf_krule_slist *);
static int pf_create_state(struct pf_krule *,
struct pf_test_ctx *,
- struct pf_kstate **, u_int16_t, u_int16_t);
+ struct pf_kstate **, u_int16_t, u_int16_t,
+ struct pf_krule_slist *match_rules);
static int pf_state_key_addr_setup(struct pf_pdesc *,
struct pf_state_key_cmp *, int);
static int pf_tcp_track_full(struct pf_kstate *,
@@ -393,7 +395,7 @@ static bool pf_src_connlimit(struct pf_kstate *);
static int pf_match_rcvif(struct mbuf *, struct pf_krule *);
static void pf_counters_inc(int, struct pf_pdesc *,
struct pf_kstate *, struct pf_krule *,
- struct pf_krule *);
+ struct pf_krule *, struct pf_krule_slist *);
static void pf_log_matches(struct pf_pdesc *, struct pf_krule *,
struct pf_krule *, struct pf_kruleset *,
struct pf_krule_slist *);
@@ -489,26 +491,30 @@ BOUND_IFACE(struct pf_kstate *st, struct pf_pdesc *pd)
counter_u64_add(s->anchor->states_cur, 1); \
counter_u64_add(s->anchor->states_tot, 1); \
} \
- if (s->nat_rule != NULL) { \
- counter_u64_add(s->nat_rule->states_cur, 1);\
- counter_u64_add(s->nat_rule->states_tot, 1);\
+ if (s->nat_rule != NULL && s->nat_rule != s->rule) { \
+ counter_u64_add(s->nat_rule->states_cur, 1); \
+ counter_u64_add(s->nat_rule->states_tot, 1); \
} \
SLIST_FOREACH(mrm, &s->match_rules, entry) { \
- counter_u64_add(mrm->r->states_cur, 1); \
- counter_u64_add(mrm->r->states_tot, 1); \
+ if (s->nat_rule != mrm->r) { \
+ counter_u64_add(mrm->r->states_cur, 1); \
+ counter_u64_add(mrm->r->states_tot, 1); \
+ } \
} \
} while (0)
#define STATE_DEC_COUNTERS(s) \
do { \
struct pf_krule_item *mrm; \
- if (s->nat_rule != NULL) \
- counter_u64_add(s->nat_rule->states_cur, -1);\
- if (s->anchor != NULL) \
- counter_u64_add(s->anchor->states_cur, -1); \
counter_u64_add(s->rule->states_cur, -1); \
+ if (s->anchor != NULL) \
+ counter_u64_add(s->anchor->states_cur, -1); \
+ if (s->nat_rule != NULL && s->nat_rule != s->rule) \
+ counter_u64_add(s->nat_rule->states_cur, -1); \
SLIST_FOREACH(mrm, &s->match_rules, entry) \
- counter_u64_add(mrm->r->states_cur, -1); \
+ if (s->nat_rule != mrm->r) { \
+ counter_u64_add(mrm->r->states_cur, -1);\
+ } \
} while (0)
MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
@@ -1347,6 +1353,8 @@ pf_cleanup(void)
uma_zdestroy(V_pf_state_z);
uma_zdestroy(V_pf_state_key_z);
uma_zdestroy(V_pf_udp_mapping_z);
+ uma_zdestroy(V_pf_anchor_z);
+ uma_zdestroy(V_pf_eth_anchor_z);
}
static int
@@ -1665,7 +1673,6 @@ pf_state_key_addr_setup(struct pf_pdesc *pd,
#ifdef INET6
struct nd_neighbor_solicit nd;
struct pf_addr *target;
- u_short action, reason;
if (pd->af == AF_INET || pd->proto != IPPROTO_ICMPV6)
goto copy;
@@ -1674,7 +1681,8 @@ pf_state_key_addr_setup(struct pf_pdesc *pd,
case ND_NEIGHBOR_SOLICIT:
if (multi)
return (-1);
- if (!pf_pull_hdr(pd->m, pd->off, &nd, sizeof(nd), &action, &reason, pd->af))
+ if (!pf_pull_hdr(pd->m, pd->off, &nd, sizeof(nd), NULL,
+ pd->af))
return (-1);
target = (struct pf_addr *)&nd.nd_ns_target;
daddr = target;
@@ -1682,7 +1690,8 @@ pf_state_key_addr_setup(struct pf_pdesc *pd,
case ND_NEIGHBOR_ADVERT:
if (multi)
return (-1);
- if (!pf_pull_hdr(pd->m, pd->off, &nd, sizeof(nd), &action, &reason, pd->af))
+ if (!pf_pull_hdr(pd->m, pd->off, &nd, sizeof(nd), NULL,
+ pd->af))
return (-1);
target = (struct pf_addr *)&nd.nd_ns_target;
saddr = target;
@@ -2067,6 +2076,44 @@ pf_find_state_all_exists(const struct pf_state_key_cmp *key, u_int dir)
return (false);
}
+void
+pf_state_peer_hton(const struct pf_state_peer *s, struct pf_state_peer_export *d)
+{
+ d->seqlo = htonl(s->seqlo);
+ d->seqhi = htonl(s->seqhi);
+ d->seqdiff = htonl(s->seqdiff);
+ d->max_win = htons(s->max_win);
+ d->mss = htons(s->mss);
+ d->state = s->state;
+ d->wscale = s->wscale;
+ if (s->scrub) {
+ d->scrub.pfss_flags = htons(
+ s->scrub->pfss_flags & PFSS_TIMESTAMP);
+ d->scrub.pfss_ttl = (s)->scrub->pfss_ttl;
+ d->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);
+ d->scrub.scrub_flag = PF_SCRUB_FLAG_VALID;
+ }
+}
+
+void
+pf_state_peer_ntoh(const struct pf_state_peer_export *s, struct pf_state_peer *d)
+{
+ d->seqlo = ntohl(s->seqlo);
+ d->seqhi = ntohl(s->seqhi);
+ d->seqdiff = ntohl(s->seqdiff);
+ d->max_win = ntohs(s->max_win);
+ d->mss = ntohs(s->mss);
+ d->state = s->state;
+ d->wscale = s->wscale;
+ if (s->scrub.scrub_flag == PF_SCRUB_FLAG_VALID &&
+ d->scrub != NULL) {
+ d->scrub->pfss_flags = ntohs(s->scrub.pfss_flags) &
+ PFSS_TIMESTAMP;
+ d->scrub->pfss_ttl = s->scrub.pfss_ttl;
+ d->scrub->pfss_ts_mod = ntohl(s->scrub.pfss_ts_mod);
+ }
+}
+
struct pf_udp_mapping *
pf_udp_mapping_create(sa_family_t af, struct pf_addr *src_addr, uint16_t src_port,
struct pf_addr *nat_addr, uint16_t nat_port)
@@ -2793,7 +2840,7 @@ pf_remove_state(struct pf_kstate *s)
s->key[PF_SK_WIRE]->port[0],
s->src.seqhi, s->src.seqlo + 1,
TH_RST|TH_ACK, 0, 0, 0, M_SKIP_FIREWALL, s->tag, 0,
- s->act.rtableid);
+ s->act.rtableid, NULL);
}
LIST_REMOVE(s, entry);
@@ -2828,20 +2875,24 @@ pf_alloc_state(int flags)
return (uma_zalloc(V_pf_state_z, flags | M_ZERO));
}
+static __inline void
+pf_free_match_rules(struct pf_krule_slist *match_rules) {
+ struct pf_krule_item *ri;
+
+ while ((ri = SLIST_FIRST(match_rules))) {
+ SLIST_REMOVE_HEAD(match_rules, entry);
+ free(ri, M_PF_RULE_ITEM);
+ }
+}
+
void
pf_free_state(struct pf_kstate *cur)
{
- struct pf_krule_item *ri;
-
KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
cur->timeout));
- while ((ri = SLIST_FIRST(&cur->match_rules))) {
- SLIST_REMOVE_HEAD(&cur->match_rules, entry);
- free(ri, M_PF_RULE_ITEM);
- }
-
+ pf_free_match_rules(&(cur->match_rules));
pf_normalize_tcp_cleanup(cur);
uma_zfree(V_pf_state_z, cur);
pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
@@ -3313,7 +3364,7 @@ pf_change_ap(struct pf_pdesc *pd, struct pf_addr *a, u_int16_t *p,
u_int16_t po;
uint8_t u = pd->virtual_proto == IPPROTO_UDP;
- MPASS(pd->pcksum);
+ MPASS(pd->pcksum != NULL);
if (pd->af == AF_INET) {
MPASS(pd->ip_sum);
}
@@ -3592,6 +3643,18 @@ pf_translate_af(struct pf_pdesc *pd)
pd->src = (struct pf_addr *)&ip4->ip_src;
pd->dst = (struct pf_addr *)&ip4->ip_dst;
pd->off = sizeof(struct ip);
+ if (pd->m->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
+ pd->m->m_pkthdr.csum_flags &= ~CSUM_TCP_IPV6;
+ pd->m->m_pkthdr.csum_flags |= CSUM_TCP;
+ }
+ if (pd->m->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
+ pd->m->m_pkthdr.csum_flags &= ~CSUM_UDP_IPV6;
+ pd->m->m_pkthdr.csum_flags |= CSUM_UDP;
+ }
+ if (pd->m->m_pkthdr.csum_flags & CSUM_SCTP_IPV6) {
+ pd->m->m_pkthdr.csum_flags &= ~CSUM_SCTP_IPV6;
+ pd->m->m_pkthdr.csum_flags |= CSUM_SCTP;
+ }
break;
case AF_INET6:
ip6 = mtod(pd->m, struct ip6_hdr *);
@@ -3609,6 +3672,18 @@ pf_translate_af(struct pf_pdesc *pd)
pd->src = (struct pf_addr *)&ip6->ip6_src;
pd->dst = (struct pf_addr *)&ip6->ip6_dst;
pd->off = sizeof(struct ip6_hdr);
+ if (pd->m->m_pkthdr.csum_flags & CSUM_TCP) {
+ pd->m->m_pkthdr.csum_flags &= ~CSUM_TCP;
+ pd->m->m_pkthdr.csum_flags |= CSUM_TCP_IPV6;
+ }
+ if (pd->m->m_pkthdr.csum_flags & CSUM_UDP) {
+ pd->m->m_pkthdr.csum_flags &= ~CSUM_UDP;
+ pd->m->m_pkthdr.csum_flags |= CSUM_UDP_IPV6;
+ }
+ if (pd->m->m_pkthdr.csum_flags & CSUM_SCTP) {
+ pd->m->m_pkthdr.csum_flags &= ~CSUM_SCTP;
+ pd->m->m_pkthdr.csum_flags |= CSUM_SCTP_IPV6;
+ }
/*
* If we're dealing with a reassembled packet we need to adjust
@@ -3979,7 +4054,7 @@ pf_modulate_sack(struct pf_pdesc *pd, struct tcphdr *th,
optsoff = pd->off + sizeof(struct tcphdr);
#define TCPOLEN_MINSACK (TCPOLEN_SACK + 2)
if (olen < TCPOLEN_MINSACK ||
- !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, NULL, pd->af))
+ !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, pd->af))
return (0);
eoh = opts + olen;
@@ -4015,7 +4090,7 @@ pf_build_tcp(const struct pf_krule *r, sa_family_t af,
u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl,
int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, u_int sack,
- int rtableid)
+ int rtableid, u_short *reason)
{
struct mbuf *m;
int len, tlen;
@@ -4055,13 +4130,16 @@ pf_build_tcp(const struct pf_krule *r, sa_family_t af,
}
m = m_gethdr(M_NOWAIT, MT_DATA);
- if (m == NULL)
+ if (m == NULL) {
+ REASON_SET(reason, PFRES_MEMORY);
return (NULL);
+ }
#ifdef MAC
mac_netinet_firewall_send(m);
#endif
if ((pf_mtag = pf_get_mtag(m)) == NULL) {
+ REASON_SET(reason, PFRES_MEMORY);
m_freem(m);
return (NULL);
}
@@ -4281,13 +4359,14 @@ pf_send_tcp(const struct pf_krule *r, sa_family_t af,
const struct pf_addr *saddr, const struct pf_addr *daddr,
u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl,
- int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid)
+ int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid,
+ u_short *reason)
{
struct pf_send_entry *pfse;
struct mbuf *m;
m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, tcp_flags,
- win, mss, ttl, mbuf_flags, mtag_tag, mtag_flags, 0, rtableid);
+ win, mss, ttl, mbuf_flags, mtag_tag, mtag_flags, 0, rtableid, reason);
if (m == NULL)
return;
@@ -4295,6 +4374,7 @@ pf_send_tcp(const struct pf_krule *r, sa_family_t af,
pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
if (pfse == NULL) {
m_freem(m);
+ REASON_SET(reason, PFRES_MEMORY);
return;
}
@@ -4356,9 +4436,10 @@ pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
if (tcp_get_flags(th) & TH_FIN)
ack++;
pf_send_tcp(r, pd->af, pd->dst,
- pd->src, th->th_dport, th->th_sport,
- ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
- r->return_ttl, M_SKIP_FIREWALL, 0, 0, rtableid);
+ pd->src, th->th_dport, th->th_sport,
+ ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
+ r->return_ttl, M_SKIP_FIREWALL, 0, 0, rtableid,
+ reason);
}
} else if (pd->proto == IPPROTO_SCTP &&
(r->rule_flag & PFRULE_RETURN)) {
@@ -4409,7 +4490,8 @@ pf_icmp_to_bandlim(uint8_t type)
static void
pf_send_challenge_ack(struct pf_pdesc *pd, struct pf_kstate *s,
- struct pf_state_peer *src, struct pf_state_peer *dst)
+ struct pf_state_peer *src, struct pf_state_peer *dst,
+ u_short *reason)
{
/*
* We are sending challenge ACK as a response to SYN packet, which
@@ -4423,7 +4505,7 @@ pf_send_challenge_ack(struct pf_pdesc *pd, struct pf_kstate *s,
pf_send_tcp(s->rule, pd->af, pd->dst, pd->src,
pd->hdr.tcp.th_dport, pd->hdr.tcp.th_sport, dst->seqlo,
src->seqlo, TH_ACK, 0, 0, s->rule->return_ttl, 0, 0, 0,
- s->rule->rtableid);
+ s->rule->rtableid, reason);
}
static void
@@ -4668,7 +4750,8 @@ pf_tag_packet(struct pf_pdesc *pd, int tag)
} while (0)
enum pf_test_status
-pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r)
+pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r,
+ struct pf_krule_slist *match_rules)
{
enum pf_test_status rv;
@@ -4686,7 +4769,7 @@ pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r)
struct pf_kanchor *child;
rv = PF_TEST_OK;
RB_FOREACH(child, pf_kanchor_node, &r->anchor->children) {
- rv = pf_match_rule(ctx, &child->ruleset);
+ rv = pf_match_rule(ctx, &child->ruleset, match_rules);
if ((rv == PF_TEST_QUICK) || (rv == PF_TEST_FAIL)) {
/*
* we either hit a rule with quick action
@@ -4697,7 +4780,7 @@ pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r)
}
}
} else {
- rv = pf_match_rule(ctx, &r->anchor->ruleset);
+ rv = pf_match_rule(ctx, &r->anchor->ruleset, match_rules);
/*
* Unless errors occured, stop iff any rule matched
* within quick anchors.
@@ -5042,7 +5125,7 @@ pf_get_wscale(struct pf_pdesc *pd)
olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
if (olen < TCPOLEN_WINDOW || !pf_pull_hdr(pd->m,
- pd->off + sizeof(struct tcphdr), opts, olen, NULL, NULL, pd->af))
+ pd->off + sizeof(struct tcphdr), opts, olen, NULL, pd->af))
return (0);
opt = opts;
@@ -5067,7 +5150,7 @@ pf_get_mss(struct pf_pdesc *pd)
olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
if (olen < TCPOLEN_MAXSEG || !pf_pull_hdr(pd->m,
- pd->off + sizeof(struct tcphdr), opts, olen, NULL, NULL, pd->af))
+ pd->off + sizeof(struct tcphdr), opts, olen, NULL, pd->af))
return (0);
opt = opts;
@@ -5546,9 +5629,10 @@ pf_rule_apply_nat(struct pf_test_ctx *ctx, struct pf_krule *r)
}
enum pf_test_status
-pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
+pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset,
+ struct pf_krule_slist *match_rules)
{
- struct pf_krule_item *ri;
+ struct pf_krule_item *ri, *rt;
struct pf_krule *r;
struct pf_krule *save_a;
struct pf_kruleset *save_aruleset;
@@ -5561,6 +5645,9 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
*ctx->rm = ctx->pd->related_rule;
break;
}
+ PF_TEST_ATTRIB(r->rule_flag & PFRULE_EXPIRED,
+ TAILQ_NEXT(r, entries));
+ /* Don't count expired rule evaluations. */
pf_counter_u64_add(&r->evaluations, 1);
PF_TEST_ATTRIB(pfi_kkif_match(r->kif, pd->kif) == r->ifnot,
r->skip[PF_SKIP_IFP]);
@@ -5664,6 +5751,21 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
if (r->tag)
ctx->tag = r->tag;
if (r->anchor == NULL) {
+
+ if (r->rule_flag & PFRULE_ONCE) {
+ uint32_t rule_flag;
+
+ rule_flag = r->rule_flag;
+ if ((rule_flag & PFRULE_EXPIRED) == 0 &&
+ atomic_cmpset_int(&r->rule_flag, rule_flag,
+ rule_flag | PFRULE_EXPIRED)) {
+ r->exptime = time_uptime;
+ } else {
+ r = TAILQ_NEXT(r, entries);
+ continue;
+ }
+ }
+
if (r->action == PF_MATCH) {
/*
* Apply translations before increasing counters,
@@ -5687,11 +5789,14 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
return (PF_TEST_FAIL);
}
ri->r = r;
- SLIST_INSERT_HEAD(&ctx->rules, ri, entry);
- pf_counter_u64_critical_enter();
- pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
- pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
- pf_counter_u64_critical_exit();
+
+ if (SLIST_EMPTY(match_rules)) {
+ SLIST_INSERT_HEAD(match_rules, ri, entry);
+ } else {
+ SLIST_INSERT_AFTER(rt, ri, entry);
+ }
+ rt = ri;
+
pf_rule_to_actions(r, &pd->act);
if (r->log)
PFLOG_PACKET(r->action, PFRES_MATCH, r,
@@ -5715,7 +5820,7 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
ctx->arsm = ctx->aruleset;
}
if (pd->act.log & PF_LOG_MATCHES)
- pf_log_matches(pd, r, ctx->a, ruleset, &ctx->rules);
+ pf_log_matches(pd, r, ctx->a, ruleset, match_rules);
if (r->quick) {
ctx->test_status = PF_TEST_QUICK;
break;
@@ -5732,7 +5837,7 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
* Note: we don't need to restore if we are not going
* to continue with ruleset evaluation.
*/
- if (pf_step_into_anchor(ctx, r) != PF_TEST_OK) {
+ if (pf_step_into_anchor(ctx, r, match_rules) != PF_TEST_OK) {
break;
}
ctx->a = save_a;
@@ -5741,17 +5846,18 @@ pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
r = TAILQ_NEXT(r, entries);
}
+
return (ctx->test_status);
}
static int
pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
struct pf_pdesc *pd, struct pf_krule **am,
- struct pf_kruleset **rsm, u_short *reason, struct inpcb *inp)
+ struct pf_kruleset **rsm, u_short *reason, struct inpcb *inp,
+ struct pf_krule_slist *match_rules)
{
struct pf_krule *r = NULL;
struct pf_kruleset *ruleset = NULL;
- struct pf_krule_item *ri;
struct pf_test_ctx ctx;
u_short transerror;
int action = PF_PASS;
@@ -5768,7 +5874,6 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
ctx.rsm = rsm;
ctx.th = &pd->hdr.tcp;
ctx.reason = *reason;
- SLIST_INIT(&ctx.rules);
pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
@@ -5860,44 +5965,49 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
ctx.nat_pool = &(ctx.nr->rdr);
}
- ruleset = &pf_main_ruleset;
- rv = pf_match_rule(&ctx, ruleset);
- if (rv == PF_TEST_FAIL) {
- /*
- * Reason has been set in pf_match_rule() already.
- */
- goto cleanup;
- }
-
- r = *ctx.rm; /* matching rule */
- ctx.a = *ctx.am; /* rule that defines an anchor containing 'r' */
- ruleset = *ctx.rsm; /* ruleset of the anchor defined by the rule 'a' */
- ctx.aruleset = ctx.arsm; /* ruleset of the 'a' rule itself */
+ if (ctx.nr && ctx.nr->natpass) {
+ r = ctx.nr;
+ ruleset = *ctx.rsm;
+ } else {
+ ruleset = &pf_main_ruleset;
+ rv = pf_match_rule(&ctx, ruleset, match_rules);
+ if (rv == PF_TEST_FAIL) {
+ /*
+ * Reason has been set in pf_match_rule() already.
+ */
+ goto cleanup;
+ }
- REASON_SET(&ctx.reason, PFRES_MATCH);
+ r = *ctx.rm; /* matching rule */
+ ctx.a = *ctx.am; /* rule that defines an anchor containing 'r' */
+ ruleset = *ctx.rsm; /* ruleset of the anchor defined by the rule 'a' */
+ ctx.aruleset = ctx.arsm; /* ruleset of the 'a' rule itself */
- /* apply actions for last matching pass/block rule */
- pf_rule_to_actions(r, &pd->act);
- transerror = pf_rule_apply_nat(&ctx, r);
- switch (transerror) {
- case PFRES_MATCH:
- /* Translation action found in rule and applied successfully */
- case PFRES_MAX:
- /* No translation action found in rule */
- break;
- default:
- /* Translation action found in rule but failed to apply */
- REASON_SET(&ctx.reason, transerror);
- goto cleanup;
+ /* apply actions for last matching pass/block rule */
+ pf_rule_to_actions(r, &pd->act);
+ transerror = pf_rule_apply_nat(&ctx, r);
+ switch (transerror) {
+ case PFRES_MATCH:
+ /* Translation action found in rule and applied successfully */
+ case PFRES_MAX:
+ /* No translation action found in rule */
+ break;
+ default:
+ /* Translation action found in rule but failed to apply */
+ REASON_SET(&ctx.reason, transerror);
+ goto cleanup;
+ }
}
+ REASON_SET(&ctx.reason, PFRES_MATCH);
+
if (r->log) {
if (ctx.rewrite)
m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any);
PFLOG_PACKET(r->action, ctx.reason, r, ctx.a, ruleset, pd, 1, NULL);
}
if (pd->act.log & PF_LOG_MATCHES)
- pf_log_matches(pd, r, ctx.a, ruleset, &ctx.rules);
+ pf_log_matches(pd, r, ctx.a, ruleset, match_rules);
if (pd->virtual_proto != PF_VPROTO_FRAGMENT &&
(r->action == PF_DROP) &&
((r->rule_flag & PFRULE_RETURNRST) ||
@@ -5920,7 +6030,9 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
if (r->rt) {
/*
* Set act.rt here instead of in pf_rule_to_actions() because
- * it is applied only from the last pass rule.
+ * it is applied only from the last pass rule. For rules
+ * with the prefer-ipv6-nexthop option act.rt_af is a hint
+ * about AF of the forwarded packet and might be changed.
*/
pd->act.rt = r->rt;
if (r->rt == PF_REPLYTO)
@@ -5940,7 +6052,8 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
(pd->flags & PFDESC_TCP_NORM)))) {
bool nat64;
- action = pf_create_state(r, &ctx, sm, bproto_sum, bip_sum);
+ action = pf_create_state(r, &ctx, sm, bproto_sum, bip_sum,
+ match_rules);
ctx.sk = ctx.nk = NULL;
if (action != PF_PASS) {
pf_udp_mapping_release(ctx.udp_mapping);
@@ -5986,11 +6099,6 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
action = PF_AFRT;
}
} else {
- while ((ri = SLIST_FIRST(&ctx.rules))) {
- SLIST_REMOVE_HEAD(&ctx.rules, entry);
- free(ri, M_PF_RULE_ITEM);
- }
-
uma_zfree(V_pf_state_key_z, ctx.sk);
uma_zfree(V_pf_state_key_z, ctx.nk);
ctx.sk = ctx.nk = NULL;
@@ -6018,11 +6126,6 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
return (action);
cleanup:
- while ((ri = SLIST_FIRST(&ctx.rules))) {
- SLIST_REMOVE_HEAD(&ctx.rules, entry);
- free(ri, M_PF_RULE_ITEM);
- }
-
uma_zfree(V_pf_state_key_z, ctx.sk);
uma_zfree(V_pf_state_key_z, ctx.nk);
pf_udp_mapping_release(ctx.udp_mapping);
@@ -6033,7 +6136,8 @@ cleanup:
static int
pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
- struct pf_kstate **sm, u_int16_t bproto_sum, u_int16_t bip_sum)
+ struct pf_kstate **sm, u_int16_t bproto_sum, u_int16_t bip_sum,
+ struct pf_krule_slist *match_rules)
{
struct pf_pdesc *pd = ctx->pd;
struct pf_kstate *s = NULL;
@@ -6047,7 +6151,6 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
struct tcphdr *th = &pd->hdr.tcp;
u_int16_t mss = V_tcp_mssdflt;
u_short sn_reason;
- struct pf_krule_item *ri;
/* check maximums */
if (r->max_states &&
@@ -6099,7 +6202,7 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
s->rule = r;
s->nat_rule = ctx->nr;
s->anchor = ctx->a;
- memcpy(&s->match_rules, &ctx->rules, sizeof(s->match_rules));
+ s->match_rules = *match_rules;
memcpy(&s->act, &pd->act, sizeof(struct pf_rule_actions));
if (pd->act.allow_opts)
@@ -6253,7 +6356,7 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
TH_SYN|TH_ACK, 0, s->src.mss, 0, M_SKIP_FIREWALL, 0, 0,
- pd->act.rtableid);
+ pd->act.rtableid, &ctx->reason);
REASON_SET(&ctx->reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
}
@@ -6263,11 +6366,6 @@ pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
return (PF_PASS);
csfailed:
- while ((ri = SLIST_FIRST(&ctx->rules))) {
- SLIST_REMOVE_HEAD(&ctx->rules, entry);
- free(ri, M_PF_RULE_ITEM);
- }
-
uma_zfree(V_pf_state_key_z, ctx->sk);
uma_zfree(V_pf_state_key_z, ctx->nk);
@@ -6701,8 +6799,12 @@ pf_tcp_track_full(struct pf_kstate *state, struct pf_pdesc *pd,
(ackskew <= (MAXACKWINDOW << sws)) &&
/* Acking not more than one window forward */
((tcp_get_flags(th) & TH_RST) == 0 || orig_seq == src->seqlo ||
- (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) {
+ (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
/* Require an exact/+1 sequence match on resets when possible */
+ (SEQ_GEQ(orig_seq, src->seqlo - (dst->max_win << dws)) &&
+ SEQ_LEQ(orig_seq, src->seqlo + 1) && ackskew == 0 &&
+ (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)))) {
+ /* Allow resets to match sequence window if ack is perfect match */
if (dst->scrub || src->scrub) {
if (pf_normalize_tcp_stateful(pd, reason, th,
@@ -6843,7 +6945,7 @@ pf_tcp_track_full(struct pf_kstate *state, struct pf_pdesc *pd,
th->th_sport, ntohl(th->th_ack), 0,
TH_RST, 0, 0,
state->rule->return_ttl, M_SKIP_FIREWALL,
- 0, 0, state->act.rtableid);
+ 0, 0, state->act.rtableid, reason);
src->seqlo = 0;
src->seqhi = 1;
src->max_win = 1;
@@ -6968,7 +7070,8 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate *state, u_short *reason)
pd->src, th->th_dport, th->th_sport,
state->src.seqhi, ntohl(th->th_seq) + 1,
TH_SYN|TH_ACK, 0, state->src.mss, 0,
- M_SKIP_FIREWALL, 0, 0, state->act.rtableid);
+ M_SKIP_FIREWALL, 0, 0, state->act.rtableid,
+ reason);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if ((tcp_get_flags(th) & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
@@ -7001,7 +7104,8 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate *state, u_short *reason)
state->dst.seqhi, 0, TH_SYN, 0,
state->src.mss, 0,
state->orig_kif->pfik_ifp == V_loif ? M_LOOP : 0,
- state->tag, 0, state->act.rtableid);
+ state->tag, 0, state->act.rtableid,
+ reason);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if (((tcp_get_flags(th) & (TH_SYN|TH_ACK)) !=
@@ -7016,13 +7120,15 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate *state, u_short *reason)
pd->src, th->th_dport, th->th_sport,
ntohl(th->th_ack), ntohl(th->th_seq) + 1,
TH_ACK, state->src.max_win, 0, 0, 0,
- state->tag, 0, state->act.rtableid);
+ state->tag, 0, state->act.rtableid,
+ reason);
pf_send_tcp(state->rule, pd->af,
&sk->addr[pd->sidx], &sk->addr[pd->didx],
sk->port[pd->sidx], sk->port[pd->didx],
state->src.seqhi + 1, state->src.seqlo + 1,
TH_ACK, state->dst.max_win, 0, 0,
- M_SKIP_FIREWALL, 0, 0, state->act.rtableid);
+ M_SKIP_FIREWALL, 0, 0, state->act.rtableid,
+ reason);
state->src.seqdiff = state->dst.seqhi -
state->src.seqlo;
state->dst.seqdiff = state->src.seqhi -
@@ -7122,7 +7228,7 @@ pf_test_state(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason)
* ACK enables all parties (firewall and peers)
* to get in sync again.
*/
- pf_send_challenge_ack(pd, *state, src, dst);
+ pf_send_challenge_ack(pd, *state, src, dst, reason);
return (PF_DROP);
}
}
@@ -7417,6 +7523,7 @@ static void
pf_sctp_multihome_delayed(struct pf_pdesc *pd, struct pfi_kkif *kif,
struct pf_kstate *s, int action)
{
+ struct pf_krule_slist match_rules;
struct pf_sctp_multihome_job *j, *tmp;
struct pf_sctp_source *i;
int ret;
@@ -7464,8 +7571,14 @@ again:
if (s->rule->rule_flag & PFRULE_ALLOW_RELATED) {
j->pd.related_rule = s->rule;
}
+ SLIST_INIT(&match_rules);
ret = pf_test_rule(&r, &sm,
- &j->pd, &ra, &rs, &reason, NULL);
+ &j->pd, &ra, &rs, &reason, NULL, &match_rules);
+ /*
+ * Nothing to do about match rules, the processed
+ * packet has already increased the counters.
+ */
+ pf_free_match_rules(&match_rules);
PF_RULES_RUNLOCK();
SDT_PROBE4(pf, sctp, multihome, test, kif, r, j->pd.m, ret);
if (ret != PF_DROP && sm != NULL) {
@@ -7526,6 +7639,7 @@ again:
nj->pd.m = j->pd.m;
nj->op = j->op;
+ MPASS(nj->pd.pcksum);
TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, nj, next);
}
PF_SCTP_ENDPOINTS_UNLOCK();
@@ -7593,7 +7707,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
while (off < len) {
struct sctp_paramhdr h;
- if (!pf_pull_hdr(pd->m, start + off, &h, sizeof(h), NULL, NULL,
+ if (!pf_pull_hdr(pd->m, start + off, &h, sizeof(h), NULL,
pd->af))
return (PF_DROP);
@@ -7613,7 +7727,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
return (PF_DROP);
if (!pf_pull_hdr(pd->m, start + off + sizeof(h), &t, sizeof(t),
- NULL, NULL, pd->af))
+ NULL, pd->af))
return (PF_DROP);
if (in_nullhost(t))
@@ -7645,6 +7759,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
job->pd.m = pd->m;
job->op = op;
+ MPASS(job->pd.pcksum);
TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next);
break;
}
@@ -7657,7 +7772,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
return (PF_DROP);
if (!pf_pull_hdr(pd->m, start + off + sizeof(h), &t, sizeof(t),
- NULL, NULL, pd->af))
+ NULL, pd->af))
return (PF_DROP);
if (memcmp(&t, &pd->src->v6, sizeof(t)) == 0)
break;
@@ -7678,6 +7793,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
job->pd.m = pd->m;
job->op = op;
+ MPASS(job->pd.pcksum);
TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next);
break;
}
@@ -7687,7 +7803,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
struct sctp_asconf_paramhdr ah;
if (!pf_pull_hdr(pd->m, start + off, &ah, sizeof(ah),
- NULL, NULL, pd->af))
+ NULL, pd->af))
return (PF_DROP);
ret = pf_multihome_scan(start + off + sizeof(ah),
@@ -7702,7 +7818,7 @@ pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
struct sctp_asconf_paramhdr ah;
if (!pf_pull_hdr(pd->m, start + off, &ah, sizeof(ah),
- NULL, NULL, pd->af))
+ NULL, pd->af))
return (PF_DROP);
ret = pf_multihome_scan(start + off + sizeof(ah),
ntohs(ah.ph.param_length) - sizeof(ah), pd,
@@ -7984,7 +8100,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
ipoff2 = pd->off + ICMP_MINLEN;
if (!pf_pull_hdr(pd->m, ipoff2, &h2, sizeof(h2),
- NULL, reason, pd2.af)) {
+ reason, pd2.af)) {
DPFPRINTF(PF_DEBUG_MISC,
"pf: ICMP error message too short "
"(ip)");
@@ -8005,6 +8121,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
return (PF_DROP);
pd2.tot_len = ntohs(h2.ip_len);
+ pd2.ttl = h2.ip_ttl;
pd2.src = (struct pf_addr *)&h2.ip_src;
pd2.dst = (struct pf_addr *)&h2.ip_dst;
pd2.ip_sum = &h2.ip_sum;
@@ -8015,7 +8132,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
ipoff2 = pd->off + sizeof(struct icmp6_hdr);
if (!pf_pull_hdr(pd->m, ipoff2, &h2_6, sizeof(h2_6),
- NULL, reason, pd2.af)) {
+ reason, pd2.af)) {
DPFPRINTF(PF_DEBUG_MISC,
"pf: ICMP error message too short "
"(ip6)");
@@ -8027,6 +8144,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
pd2.tot_len = ntohs(h2_6.ip6_plen) +
sizeof(struct ip6_hdr);
+ pd2.ttl = h2_6.ip6_hlim;
pd2.src = (struct pf_addr *)&h2_6.ip6_src;
pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
pd2.ip_sum = NULL;
@@ -8067,7 +8185,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
* expected. Don't access any TCP header fields after
* th_seq, an ackskew test is not possible.
*/
- if (!pf_pull_hdr(pd->m, pd2.off, th, 8, NULL, reason,
+ if (!pf_pull_hdr(pd->m, pd2.off, th, 8, reason,
pd2.af)) {
DPFPRINTF(PF_DEBUG_MISC,
"pf: ICMP error message too short "
@@ -8263,7 +8381,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
int action;
if (!pf_pull_hdr(pd->m, pd2.off, uh, sizeof(*uh),
- NULL, reason, pd2.af)) {
+ reason, pd2.af)) {
DPFPRINTF(PF_DEBUG_MISC,
"pf: ICMP error message too short "
"(udp)");
@@ -8394,7 +8512,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
int copyback = 0;
int action;
- if (! pf_pull_hdr(pd->m, pd2.off, sh, sizeof(*sh), NULL, reason,
+ if (! pf_pull_hdr(pd->m, pd2.off, sh, sizeof(*sh), reason,
pd2.af)) {
DPFPRINTF(PF_DEBUG_MISC,
"pf: ICMP error message too short "
@@ -8550,7 +8668,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
}
if (!pf_pull_hdr(pd->m, pd2.off, iih, ICMP_MINLEN,
- NULL, reason, pd2.af)) {
+ reason, pd2.af)) {
DPFPRINTF(PF_DEBUG_MISC,
"pf: ICMP error message too short i"
"(icmp)");
@@ -8670,7 +8788,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
}
if (!pf_pull_hdr(pd->m, pd2.off, iih,
- sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
+ sizeof(struct icmp6_hdr), reason, pd2.af)) {
DPFPRINTF(PF_DEBUG_MISC,
"pf: ICMP error message too short "
"(icmp6)");
@@ -8785,6 +8903,11 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
default: {
int action;
+ /*
+ * Placeholder value, so future calls to pf_change_ap()
+ * don't try to update a NULL checksum pointer.
+ */
+ pd->pcksum = &pd->sctp_dummy_sum;
key.af = pd2.af;
key.proto = pd2.proto;
pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
@@ -8847,7 +8970,7 @@ pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
*/
void *
pf_pull_hdr(const struct mbuf *m, int off, void *p, int len,
- u_short *actionp, u_short *reasonp, sa_family_t af)
+ u_short *reasonp, sa_family_t af)
{
int iplen = 0;
switch (af) {
@@ -8857,12 +8980,7 @@ pf_pull_hdr(const struct mbuf *m, int off, void *p, int len,
u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
if (fragoff) {
- if (fragoff >= len)
- ACTION_SET(actionp, PF_PASS);
- else {
- ACTION_SET(actionp, PF_DROP);
- REASON_SET(reasonp, PFRES_FRAG);
- }
+ REASON_SET(reasonp, PFRES_FRAG);
return (NULL);
}
iplen = ntohs(h->ip_len);
@@ -8879,7 +8997,6 @@ pf_pull_hdr(const struct mbuf *m, int off, void *p, int len,
#endif /* INET6 */
}
if (m->m_pkthdr.len < off + len || iplen < off + len) {
- ACTION_SET(actionp, PF_DROP);
REASON_SET(reasonp, PFRES_SHORT);
return (NULL);
}
@@ -8934,9 +9051,10 @@ pf_route(struct pf_krule *r, struct ifnet *oifp,
struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
{
struct mbuf *m0, *m1, *md;
- struct route ro;
- const struct sockaddr *gw = &ro.ro_dst;
- struct sockaddr_in *dst;
+ struct route_in6 ro;
+ union sockaddr_union rt_gw;
+ const union sockaddr_union *gw = (const union sockaddr_union *)&ro.ro_dst;
+ union sockaddr_union *dst;
struct ip *ip;
struct ifnet *ifp = NULL;
int error = 0;
@@ -9031,10 +9149,35 @@ pf_route(struct pf_krule *r, struct ifnet *oifp,
ip = mtod(m0, struct ip *);
bzero(&ro, sizeof(ro));
- dst = (struct sockaddr_in *)&ro.ro_dst;
- dst->sin_family = AF_INET;
- dst->sin_len = sizeof(struct sockaddr_in);
- dst->sin_addr.s_addr = pd->act.rt_addr.v4.s_addr;
+ dst = (union sockaddr_union *)&ro.ro_dst;
+ dst->sin.sin_family = AF_INET;
+ dst->sin.sin_len = sizeof(struct sockaddr_in);
+ dst->sin.sin_addr = ip->ip_dst;
+ if (ifp) { /* Only needed in forward direction and route-to */
+ bzero(&rt_gw, sizeof(rt_gw));
+ ro.ro_flags |= RT_HAS_GW;
+ gw = &rt_gw;
+ switch (pd->act.rt_af) {
+#ifdef INET
+ case AF_INET:
+ rt_gw.sin.sin_family = AF_INET;
+ rt_gw.sin.sin_len = sizeof(struct sockaddr_in);
+ rt_gw.sin.sin_addr.s_addr = pd->act.rt_addr.v4.s_addr;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ rt_gw.sin6.sin6_family = AF_INET6;
+ rt_gw.sin6.sin6_len = sizeof(struct sockaddr_in6);
+ pf_addrcpy((struct pf_addr *)&rt_gw.sin6.sin6_addr,
+ &pd->act.rt_addr, AF_INET6);
+ break;
+#endif /* INET6 */
+ default:
+ /* Normal af-to without route-to */
+ break;
+ }
+ }
if (pd->dir == PF_IN) {
if (ip->ip_ttl <= IPTTLDEC) {
@@ -9058,34 +9201,19 @@ pf_route(struct pf_krule *r, struct ifnet *oifp,
/* Use the gateway if needed. */
if (nh->nh_flags & NHF_GATEWAY) {
- gw = &nh->gw_sa;
+ gw = (const union sockaddr_union *)&nh->gw_sa;
ro.ro_flags |= RT_HAS_GW;
} else {
- dst->sin_addr = ip->ip_dst;
+ dst->sin.sin_addr = ip->ip_dst;
}
-
- /*
- * Bind to the correct interface if we're
- * if-bound. We don't know which interface
- * that will be until here, so we've inserted
- * the state on V_pf_all. Fix that now.
- */
- if (s->kif == V_pfi_all && ifp != NULL &&
- r->rule_flag & PFRULE_IFBOUND)
- s->kif = ifp->if_pf_kif;
}
}
-
- if (r->rule_flag & PFRULE_IFBOUND &&
- pd->act.rt == PF_REPLYTO &&
- s->kif == V_pfi_all) {
- s->kif = pd->act.rt_kif;
- s->orig_kif = oifp->if_pf_kif;
- }
-
PF_STATE_UNLOCK(s);
}
+ /* It must have been either set from rt_af or from fib4_lookup */
+ KASSERT(gw->sin.sin_family != 0, ("%s: gw address family undetermined", __func__));
+
if (ifp == NULL) {
m0 = pd->m;
pd->m = NULL;
@@ -9094,28 +9222,55 @@ pf_route(struct pf_krule *r, struct ifnet *oifp,
goto bad;
}
- if (r->rt == PF_DUPTO)
+ /*
+ * Bind to the correct interface if we're if-bound. We don't know which
+ * interface that will be until here, so we've inserted the state
+ * on V_pf_all. Fix that now.
+ */
+ if (s != NULL && s->kif == V_pfi_all && r->rule_flag & PFRULE_IFBOUND) {
+ /* Verify that we're here because of BOUND_IFACE */
+ MPASS(r->rt == PF_REPLYTO || (pd->af != pd->naf && s->direction == PF_IN));
+ s->kif = ifp->if_pf_kif;
+ if (pd->act.rt == PF_REPLYTO) {
+ s->orig_kif = oifp->if_pf_kif;
+ }
+ }
+
+ if (r->rt == PF_DUPTO || (pd->af != pd->naf && s->direction == PF_IN))
skip_test = true;
- if (pd->dir == PF_IN && !skip_test) {
- if (pf_test(AF_INET, PF_OUT, PFIL_FWD, ifp, &m0, inp,
- &pd->act) != PF_PASS) {
- action = PF_DROP;
- SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
- goto bad;
- } else if (m0 == NULL) {
- action = PF_DROP;
- SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
- goto done;
- }
- if (m0->m_len < sizeof(struct ip)) {
- DPFPRINTF(PF_DEBUG_URGENT,
- "%s: m0->m_len < sizeof(struct ip)", __func__);
- SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
- action = PF_DROP;
- goto bad;
+ if (pd->dir == PF_IN) {
+ if (skip_test) {
+ struct pfi_kkif *out_kif = (struct pfi_kkif *)ifp->if_pf_kif;
+ MPASS(s != NULL);
+ pf_counter_u64_critical_enter();
+ pf_counter_u64_add_protected(
+ &out_kif->pfik_bytes[pd->naf == AF_INET6][1]
+ [action != PF_PASS && action != PF_AFRT], pd->tot_len);
+ pf_counter_u64_add_protected(
+ &out_kif->pfik_packets[pd->naf == AF_INET6][1]
+ [action != PF_PASS && action != PF_AFRT], 1);
+ pf_counter_u64_critical_exit();
+ } else {
+ if (pf_test(AF_INET, PF_OUT, PFIL_FWD, ifp, &m0, inp,
+ &pd->act) != PF_PASS) {
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
+ goto bad;
+ } else if (m0 == NULL) {
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
+ goto done;
+ }
+ if (m0->m_len < sizeof(struct ip)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ "%s: m0->m_len < sizeof(struct ip)", __func__);
+ SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
+ action = PF_DROP;
+ goto bad;
+ }
+ ip = mtod(m0, struct ip *);
}
- ip = mtod(m0, struct ip *);
}
if (ifp->if_flags & IFF_LOOPBACK)
@@ -9170,9 +9325,11 @@ pf_route(struct pf_krule *r, struct ifnet *oifp,
m_clrprotoflags(m0); /* Avoid confusing lower layers. */
md = m0;
- error = pf_dummynet_route(pd, s, r, ifp, gw, &md);
+ error = pf_dummynet_route(pd, s, r, ifp,
+ (const struct sockaddr *)gw, &md);
if (md != NULL) {
- error = (*ifp->if_output)(ifp, md, gw, &ro);
+ error = (*ifp->if_output)(ifp, md,
+ (const struct sockaddr *)gw, (struct route *)&ro);
SDT_PROBE2(pf, ip, route_to, output, ifp, error);
}
goto done;
@@ -9213,9 +9370,11 @@ pf_route(struct pf_krule *r, struct ifnet *oifp,
md = m0;
pd->pf_mtag = pf_find_mtag(md);
error = pf_dummynet_route(pd, s, r, ifp,
- gw, &md);
+ (const struct sockaddr *)gw, &md);
if (md != NULL) {
- error = (*ifp->if_output)(ifp, md, gw, &ro);
+ error = (*ifp->if_output)(ifp, md,
+ (const struct sockaddr *)gw,
+ (struct route *)&ro);
SDT_PROBE2(pf, ip, route_to, output, ifp, error);
}
} else
@@ -9370,26 +9529,8 @@ pf_route6(struct pf_krule *r, struct ifnet *oifp,
sizeof(dst.sin6_addr));
else
dst.sin6_addr = ip6->ip6_dst;
-
- /*
- * Bind to the correct interface if we're
- * if-bound. We don't know which interface
- * that will be until here, so we've inserted
- * the state on V_pf_all. Fix that now.
- */
- if (s->kif == V_pfi_all && ifp != NULL &&
- r->rule_flag & PFRULE_IFBOUND)
- s->kif = ifp->if_pf_kif;
}
}
-
- if (r->rule_flag & PFRULE_IFBOUND &&
- pd->act.rt == PF_REPLYTO &&
- s->kif == V_pfi_all) {
- s->kif = pd->act.rt_kif;
- s->orig_kif = oifp->if_pf_kif;
- }
-
PF_STATE_UNLOCK(s);
}
@@ -9411,29 +9552,56 @@ pf_route6(struct pf_krule *r, struct ifnet *oifp,
goto bad;
}
- if (r->rt == PF_DUPTO)
+ /*
+ * Bind to the correct interface if we're if-bound. We don't know which
+ * interface that will be until here, so we've inserted the state
+ * on V_pf_all. Fix that now.
+ */
+ if (s != NULL && s->kif == V_pfi_all && r->rule_flag & PFRULE_IFBOUND) {
+ /* Verify that we're here because of BOUND_IFACE */
+ MPASS(r->rt == PF_REPLYTO || (pd->af != pd->naf && s->direction == PF_IN));
+ s->kif = ifp->if_pf_kif;
+ if (pd->act.rt == PF_REPLYTO) {
+ s->orig_kif = oifp->if_pf_kif;
+ }
+ }
+
+ if (r->rt == PF_DUPTO || (pd->af != pd->naf && s->direction == PF_IN))
skip_test = true;
- if (pd->dir == PF_IN && !skip_test) {
- if (pf_test(AF_INET6, PF_OUT, PFIL_FWD | PF_PFIL_NOREFRAGMENT,
- ifp, &m0, inp, &pd->act) != PF_PASS) {
- action = PF_DROP;
- SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
- goto bad;
- } else if (m0 == NULL) {
- action = PF_DROP;
- SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
- goto done;
- }
- if (m0->m_len < sizeof(struct ip6_hdr)) {
- DPFPRINTF(PF_DEBUG_URGENT,
- "%s: m0->m_len < sizeof(struct ip6_hdr)",
- __func__);
- action = PF_DROP;
- SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
- goto bad;
+ if (pd->dir == PF_IN) {
+ if (skip_test) {
+ struct pfi_kkif *out_kif = (struct pfi_kkif *)ifp->if_pf_kif;
+ MPASS(s != NULL);
+ pf_counter_u64_critical_enter();
+ pf_counter_u64_add_protected(
+ &out_kif->pfik_bytes[pd->naf == AF_INET6][1]
+ [action != PF_PASS && action != PF_AFRT], pd->tot_len);
+ pf_counter_u64_add_protected(
+ &out_kif->pfik_packets[pd->naf == AF_INET6][1]
+ [action != PF_PASS && action != PF_AFRT], 1);
+ pf_counter_u64_critical_exit();
+ } else {
+ if (pf_test(AF_INET6, PF_OUT, PFIL_FWD | PF_PFIL_NOREFRAGMENT,
+ ifp, &m0, inp, &pd->act) != PF_PASS) {
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
+ goto bad;
+ } else if (m0 == NULL) {
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
+ goto done;
+ }
+ if (m0->m_len < sizeof(struct ip6_hdr)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ "%s: m0->m_len < sizeof(struct ip6_hdr)",
+ __func__);
+ action = PF_DROP;
+ SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
+ goto bad;
+ }
+ ip6 = mtod(m0, struct ip6_hdr *);
}
- ip6 = mtod(m0, struct ip6_hdr *);
}
if (ifp->if_flags & IFF_LOOPBACK)
@@ -9652,6 +9820,7 @@ pf_pdesc_to_dnflow(const struct pf_pdesc *pd, const struct pf_krule *r,
const struct pf_kstate *s, struct ip_fw_args *dnflow)
{
int dndir = r->direction;
+ sa_family_t af = pd->naf;
if (s && dndir == PF_INOUT) {
dndir = s->direction;
@@ -9692,20 +9861,46 @@ pf_pdesc_to_dnflow(const struct pf_pdesc *pd, const struct pf_krule *r,
dnflow->f_id.proto = pd->proto;
dnflow->f_id.extra = dnflow->rule.info;
- switch (pd->naf) {
+ if (s)
+ af = s->key[PF_SK_STACK]->af;
+
+ switch (af) {
case AF_INET:
dnflow->f_id.addr_type = 4;
- dnflow->f_id.src_ip = ntohl(pd->src->v4.s_addr);
- dnflow->f_id.dst_ip = ntohl(pd->dst->v4.s_addr);
+ if (s) {
+ dnflow->f_id.src_ip = htonl(
+ s->key[PF_SK_STACK]->addr[pd->sidx].v4.s_addr);
+ dnflow->f_id.dst_ip = htonl(
+ s->key[PF_SK_STACK]->addr[pd->didx].v4.s_addr);
+ } else {
+ dnflow->f_id.src_ip = ntohl(pd->src->v4.s_addr);
+ dnflow->f_id.dst_ip = ntohl(pd->dst->v4.s_addr);
+ }
break;
case AF_INET6:
- dnflow->flags |= IPFW_ARGS_IP6;
dnflow->f_id.addr_type = 6;
- dnflow->f_id.src_ip6 = pd->src->v6;
- dnflow->f_id.dst_ip6 = pd->dst->v6;
+
+ if (s) {
+ dnflow->f_id.src_ip6 =
+ s->key[PF_SK_STACK]->addr[pd->sidx].v6;
+ dnflow->f_id.dst_ip6 =
+ s->key[PF_SK_STACK]->addr[pd->didx].v6;
+ } else {
+ dnflow->f_id.src_ip6 = pd->src->v6;
+ dnflow->f_id.dst_ip6 = pd->dst->v6;
+ }
break;
}
+ /*
+ * Separate this out, because while we pass the pre-NAT addresses to
+ * dummynet we want the post-nat address family in case of nat64.
+ * Dummynet may call ip_output/ip6_output itself, and we need it to
+ * call the correct one.
+ */
+ if (pd->naf == AF_INET6)
+ dnflow->flags |= IPFW_ARGS_IP6;
+
return (true);
}
@@ -9922,9 +10117,12 @@ pf_walk_header(struct pf_pdesc *pd, struct ip *h, u_short *reason)
pd->proto = h->ip_p;
/* IGMP packets have router alert options, allow them */
if (pd->proto == IPPROTO_IGMP) {
- /* According to RFC 1112 ttl must be set to 1. */
- if ((h->ip_ttl != 1) ||
- !IN_MULTICAST(ntohl(h->ip_dst.s_addr))) {
+ /*
+ * According to RFC 1112 ttl must be set to 1 in all IGMP
+ * packets sent to 224.0.0.1
+ */
+ if ((h->ip_ttl != 1) &&
+ (h->ip_dst.s_addr == INADDR_ALLHOSTS_GROUP)) {
DPFPRINTF(PF_DEBUG_MISC, "Invalid IGMP");
REASON_SET(reason, PFRES_IPOPTIONS);
return (PF_DROP);
@@ -9942,7 +10140,7 @@ pf_walk_header(struct pf_pdesc *pd, struct ip *h, u_short *reason)
end < pd->off + sizeof(ext))
return (PF_PASS);
if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
- NULL, reason, AF_INET)) {
+ reason, AF_INET)) {
DPFPRINTF(PF_DEBUG_MISC, "IP short exthdr");
return (PF_DROP);
}
@@ -9968,7 +10166,7 @@ pf_walk_option6(struct pf_pdesc *pd, struct ip6_hdr *h, int off, int end,
while (off < end) {
if (!pf_pull_hdr(pd->m, off, &opt.ip6o_type,
- sizeof(opt.ip6o_type), NULL, reason, AF_INET6)) {
+ sizeof(opt.ip6o_type), reason, AF_INET6)) {
DPFPRINTF(PF_DEBUG_MISC, "IPv6 short opt type");
return (PF_DROP);
}
@@ -9976,7 +10174,7 @@ pf_walk_option6(struct pf_pdesc *pd, struct ip6_hdr *h, int off, int end,
off++;
continue;
}
- if (!pf_pull_hdr(pd->m, off, &opt, sizeof(opt), NULL,
+ if (!pf_pull_hdr(pd->m, off, &opt, sizeof(opt),
reason, AF_INET6)) {
DPFPRINTF(PF_DEBUG_MISC, "IPv6 short opt");
return (PF_DROP);
@@ -10001,7 +10199,7 @@ pf_walk_option6(struct pf_pdesc *pd, struct ip6_hdr *h, int off, int end,
REASON_SET(reason, PFRES_IPOPTIONS);
return (PF_DROP);
}
- if (!pf_pull_hdr(pd->m, off, &jumbo, sizeof(jumbo), NULL,
+ if (!pf_pull_hdr(pd->m, off, &jumbo, sizeof(jumbo),
reason, AF_INET6)) {
DPFPRINTF(PF_DEBUG_MISC, "IPv6 short jumbo");
return (PF_DROP);
@@ -10050,7 +10248,7 @@ pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason)
break;
case IPPROTO_HOPOPTS:
if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
- NULL, reason, AF_INET6)) {
+ reason, AF_INET6)) {
DPFPRINTF(PF_DEBUG_MISC, "IPv6 short exthdr");
return (PF_DROP);
}
@@ -10077,7 +10275,7 @@ pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason)
return (PF_DROP);
}
if (!pf_pull_hdr(pd->m, pd->off, &frag, sizeof(frag),
- NULL, reason, AF_INET6)) {
+ reason, AF_INET6)) {
DPFPRINTF(PF_DEBUG_MISC, "IPv6 short fragment");
return (PF_DROP);
}
@@ -10105,7 +10303,7 @@ pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason)
return (PF_PASS);
}
if (!pf_pull_hdr(pd->m, pd->off, &rthdr, sizeof(rthdr),
- NULL, reason, AF_INET6)) {
+ reason, AF_INET6)) {
DPFPRINTF(PF_DEBUG_MISC, "IPv6 short rthdr");
return (PF_DROP);
}
@@ -10126,7 +10324,7 @@ pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason)
case IPPROTO_AH:
case IPPROTO_DSTOPTS:
if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
- NULL, reason, AF_INET6)) {
+ reason, AF_INET6)) {
DPFPRINTF(PF_DEBUG_MISC, "IPv6 short exthdr");
return (PF_DROP);
}
@@ -10159,7 +10357,7 @@ pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason)
return (PF_PASS);
}
if (!pf_pull_hdr(pd->m, pd->off, &icmp6, sizeof(icmp6),
- NULL, reason, AF_INET6)) {
+ reason, AF_INET6)) {
DPFPRINTF(PF_DEBUG_MISC,
"IPv6 short icmp6hdr");
return (PF_DROP);
@@ -10253,28 +10451,28 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
__func__);
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
h = mtod(pd->m, struct ip *);
if (pd->m->m_pkthdr.len < ntohs(h->ip_len)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
if (pf_normalize_ip(reason, pd) != PF_PASS) {
/* We do IP header normalization and packet reassembly here */
*m0 = pd->m;
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
*m0 = pd->m;
h = mtod(pd->m, struct ip *);
if (pf_walk_header(pd, h, reason) != PF_PASS) {
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
pd->src = (struct pf_addr *)&h->ip_src;
@@ -10304,7 +10502,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
", pullup failed", __func__);
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
h = mtod(pd->m, struct ip6_hdr *);
@@ -10312,7 +10510,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
sizeof(struct ip6_hdr) + ntohs(h->ip6_plen)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
/*
@@ -10321,12 +10519,12 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
*/
if (htons(h->ip6_plen) == 0) {
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
if (pf_walk_header6(pd, h, reason) != PF_PASS) {
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
h = mtod(pd->m, struct ip6_hdr *);
@@ -10348,13 +10546,13 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
PF_PASS) {
*m0 = pd->m;
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
*m0 = pd->m;
if (pd->m == NULL) {
/* packet sits in reassembly queue, no error */
*action = PF_PASS;
- return (-1);
+ return (PF_DROP);
}
/* Update pointers into the packet. */
@@ -10366,7 +10564,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
if (pf_walk_header6(pd, h, reason) != PF_PASS) {
*action = PF_DROP;
- return (-1);
+ return (PF_DROP);
}
if (m_tag_find(pd->m, PACKET_TAG_PF_REASSEMBLED, NULL) != NULL) {
@@ -10392,11 +10590,11 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
case IPPROTO_TCP: {
struct tcphdr *th = &pd->hdr.tcp;
- if (!pf_pull_hdr(pd->m, pd->off, th, sizeof(*th), action,
+ if (!pf_pull_hdr(pd->m, pd->off, th, sizeof(*th),
reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->hdrlen = sizeof(*th);
pd->p_len = pd->tot_len - pd->off - (th->th_off << 2);
@@ -10408,11 +10606,11 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
case IPPROTO_UDP: {
struct udphdr *uh = &pd->hdr.udp;
- if (!pf_pull_hdr(pd->m, pd->off, uh, sizeof(*uh), action,
+ if (!pf_pull_hdr(pd->m, pd->off, uh, sizeof(*uh),
reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->hdrlen = sizeof(*uh);
if (uh->uh_dport == 0 ||
@@ -10420,7 +10618,7 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
ntohs(uh->uh_ulen) < sizeof(struct udphdr)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->sport = &uh->uh_sport;
pd->dport = &uh->uh_dport;
@@ -10429,10 +10627,10 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
}
case IPPROTO_SCTP: {
if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.sctp, sizeof(pd->hdr.sctp),
- action, reason, af)) {
+ reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->hdrlen = sizeof(pd->hdr.sctp);
pd->p_len = pd->tot_len - pd->off;
@@ -10442,27 +10640,31 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
if (pd->hdr.sctp.src_port == 0 || pd->hdr.sctp.dest_port == 0) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
- }
- if (pf_scan_sctp(pd) != PF_PASS) {
- *action = PF_DROP;
- REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
+
/*
* Placeholder. The SCTP checksum is 32-bits, but
* pf_test_state() expects to update a 16-bit checksum.
* Provide a dummy value which we'll subsequently ignore.
+ * Do this before pf_scan_sctp() so any jobs we enqueue
+ * have a pcksum set.
*/
pd->pcksum = &pd->sctp_dummy_sum;
+
+ if (pf_scan_sctp(pd) != PF_PASS) {
+ *action = PF_DROP;
+ REASON_SET(reason, PFRES_SHORT);
+ return (PF_DROP);
+ }
break;
}
case IPPROTO_ICMP: {
if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp, ICMP_MINLEN,
- action, reason, af)) {
+ reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->pcksum = &pd->hdr.icmp.icmp_cksum;
pd->hdrlen = ICMP_MINLEN;
@@ -10473,10 +10675,10 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
size_t icmp_hlen = sizeof(struct icmp6_hdr);
if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen,
- action, reason, af)) {
+ reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
/* ICMP headers we look further into to match state */
switch (pd->hdr.icmp6.icmp6_type) {
@@ -10499,16 +10701,23 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
}
if (icmp_hlen > sizeof(struct icmp6_hdr) &&
!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen,
- action, reason, af)) {
+ reason, af)) {
*action = PF_DROP;
REASON_SET(reason, PFRES_SHORT);
- return (-1);
+ return (PF_DROP);
}
pd->hdrlen = icmp_hlen;
pd->pcksum = &pd->hdr.icmp6.icmp6_cksum;
break;
}
#endif /* INET6 */
+ default:
+ /*
+ * Placeholder value, so future calls to pf_change_ap() don't
+ * try to update a NULL checksum pointer.
+ */
+ pd->pcksum = &pd->sctp_dummy_sum;
+ break;
}
if (pd->sport)
@@ -10516,111 +10725,175 @@ pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
if (pd->dport)
pd->odport = pd->ndport = *pd->dport;
- return (0);
+ MPASS(pd->pcksum != NULL);
+
+ return (PF_PASS);
+}
+
+static __inline void
+pf_rule_counters_inc(struct pf_pdesc *pd, struct pf_krule *r, int dir_out,
+ int op_pass, sa_family_t af, struct pf_addr *src_host,
+ struct pf_addr *dst_host)
+{
+ pf_counter_u64_add_protected(&(r->packets[dir_out]), 1);
+ pf_counter_u64_add_protected(&(r->bytes[dir_out]), pd->tot_len);
+ pf_update_timestamp(r);
+
+ if (r->src.addr.type == PF_ADDR_TABLE)
+ pfr_update_stats(r->src.addr.p.tbl, src_host, af,
+ pd->tot_len, dir_out, op_pass, r->src.neg);
+ if (r->dst.addr.type == PF_ADDR_TABLE)
+ pfr_update_stats(r->dst.addr.p.tbl, dst_host, af,
+ pd->tot_len, dir_out, op_pass, r->dst.neg);
}
static void
-pf_counters_inc(int action, struct pf_pdesc *pd,
- struct pf_kstate *s, struct pf_krule *r, struct pf_krule *a)
+pf_counters_inc(int action, struct pf_pdesc *pd, struct pf_kstate *s,
+ struct pf_krule *r, struct pf_krule *a, struct pf_krule_slist *match_rules)
{
- struct pf_krule *tr;
- int dir = pd->dir;
- int dirndx;
+ struct pf_krule_slist *mr = match_rules;
+ struct pf_krule_item *ri;
+ struct pf_krule *nr = NULL;
+ struct pf_addr *src_host = pd->src;
+ struct pf_addr *dst_host = pd->dst;
+ struct pf_state_key *key;
+ int dir_out = (pd->dir == PF_OUT);
+ int op_r_pass = (r->action == PF_PASS);
+ int op_pass = (action == PF_PASS || action == PF_AFRT);
+ int s_dir_in, s_dir_out, s_dir_rev;
+ sa_family_t af = pd->af;
pf_counter_u64_critical_enter();
+
+ /*
+ * Set AF for interface counters, it will be later overwritten for
+ * rule and state counters with value from proper state key.
+ */
+ if (action == PF_AFRT) {
+ MPASS(s != NULL);
+ if (s->direction == PF_OUT && dir_out)
+ af = pd->naf;
+ }
+
pf_counter_u64_add_protected(
- &pd->kif->pfik_bytes[pd->af == AF_INET6][dir == PF_OUT][action != PF_PASS],
+ &pd->kif->pfik_bytes[af == AF_INET6][dir_out][!op_pass],
pd->tot_len);
pf_counter_u64_add_protected(
- &pd->kif->pfik_packets[pd->af == AF_INET6][dir == PF_OUT][action != PF_PASS],
+ &pd->kif->pfik_packets[af == AF_INET6][dir_out][!op_pass],
1);
- if (action == PF_PASS || action == PF_AFRT || r->action == PF_DROP) {
- dirndx = (dir == PF_OUT);
- pf_counter_u64_add_protected(&r->packets[dirndx], 1);
- pf_counter_u64_add_protected(&r->bytes[dirndx], pd->tot_len);
- pf_update_timestamp(r);
+ /* If the rule has failed to apply, don't increase its counters */
+ if (!(op_pass || r->action == PF_DROP)) {
+ pf_counter_u64_critical_exit();
+ return;
+ }
- if (a != NULL) {
- pf_counter_u64_add_protected(&a->packets[dirndx], 1);
- pf_counter_u64_add_protected(&a->bytes[dirndx], pd->tot_len);
+ if (s != NULL) {
+ PF_STATE_LOCK_ASSERT(s);
+ mr = &(s->match_rules);
+
+ /*
+ * For af-to on the inbound direction we can determine
+ * the direction of passing packet only by checking direction
+ * of AF translation. The af-to in "in" direction covers both
+ * the inbound and the outbound side of state tracking,
+ * so pd->dir is always PF_IN. We set dir_out and s_dir_rev
+ * in a way to count packets as if the state was outbound,
+ * because pfctl -ss shows the state with "->", as if it was
+ * oubound.
+ */
+ if (action == PF_AFRT && s->direction == PF_IN) {
+ dir_out = (pd->naf == s->rule->naf);
+ s_dir_in = 1;
+ s_dir_out = 0;
+ s_dir_rev = (pd->naf == s->rule->af);
+ } else {
+ dir_out = (pd->dir == PF_OUT);
+ s_dir_in = (s->direction == PF_IN);
+ s_dir_out = (s->direction == PF_OUT);
+ s_dir_rev = (pd->dir != s->direction);
}
- if (s != NULL) {
- struct pf_krule_item *ri;
- if (s->nat_rule != NULL) {
- pf_counter_u64_add_protected(&s->nat_rule->packets[dirndx],
+ /* pd->tot_len is a problematic with af-to rules. Sure, we can
+ * agree that it's the post-af-to packet length that was
+ * forwarded through a state, but what about tables which match
+ * on pre-af-to addresses? We don't have access the the original
+ * packet length anymore.
+ */
+ s->packets[s_dir_rev]++;
+ s->bytes[s_dir_rev] += pd->tot_len;
+
+ /*
+ * Source nodes are accessed unlocked here. But since we are
+ * operating with stateful tracking and the state is locked,
+ * those SNs could not have been freed.
+ */
+ for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++) {
+ if (s->sns[sn_type] != NULL) {
+ counter_u64_add(
+ s->sns[sn_type]->packets[dir_out],
1);
- pf_counter_u64_add_protected(&s->nat_rule->bytes[dirndx],
+ counter_u64_add(
+ s->sns[sn_type]->bytes[dir_out],
pd->tot_len);
}
- /*
- * Source nodes are accessed unlocked here.
- * But since we are operating with stateful tracking
- * and the state is locked, those SNs could not have
- * been freed.
- */
- for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++) {
- if (s->sns[sn_type] != NULL) {
- counter_u64_add(
- s->sns[sn_type]->packets[dirndx],
- 1);
- counter_u64_add(
- s->sns[sn_type]->bytes[dirndx],
- pd->tot_len);
- }
- }
- dirndx = (dir == s->direction) ? 0 : 1;
- s->packets[dirndx]++;
- s->bytes[dirndx] += pd->tot_len;
-
- SLIST_FOREACH(ri, &s->match_rules, entry) {
- pf_counter_u64_add_protected(&ri->r->packets[dirndx], 1);
- pf_counter_u64_add_protected(&ri->r->bytes[dirndx], pd->tot_len);
+ }
- if (ri->r->src.addr.type == PF_ADDR_TABLE)
- pfr_update_stats(ri->r->src.addr.p.tbl,
- (s == NULL) ? pd->src :
- &s->key[(s->direction == PF_IN)]->
- addr[(s->direction == PF_OUT)],
- pd->af, pd->tot_len, dir == PF_OUT,
- r->action == PF_PASS, ri->r->src.neg);
- if (ri->r->dst.addr.type == PF_ADDR_TABLE)
- pfr_update_stats(ri->r->dst.addr.p.tbl,
- (s == NULL) ? pd->dst :
- &s->key[(s->direction == PF_IN)]->
- addr[(s->direction == PF_IN)],
- pd->af, pd->tot_len, dir == PF_OUT,
- r->action == PF_PASS, ri->r->dst.neg);
+ /* Start with pre-NAT addresses */
+ key = s->key[(s->direction == PF_OUT)];
+ src_host = &(key->addr[s_dir_out]);
+ dst_host = &(key->addr[s_dir_in]);
+ af = key->af;
+ if (s->nat_rule) {
+ /* Old-style NAT rules */
+ if (s->nat_rule->action == PF_NAT ||
+ s->nat_rule->action == PF_RDR ||
+ s->nat_rule->action == PF_BINAT) {
+ nr = s->nat_rule;
+ pf_rule_counters_inc(pd, s->nat_rule, dir_out,
+ op_r_pass, af, src_host, dst_host);
+ /* Use post-NAT addresses from now on */
+ key = s->key[s_dir_in];
+ src_host = &(key->addr[s_dir_out]);
+ dst_host = &(key->addr[s_dir_in]);
+ af = key->af;
}
}
+ }
- tr = r;
- if (s != NULL && s->nat_rule != NULL &&
- r == &V_pf_default_rule)
- tr = s->nat_rule;
-
- if (tr->src.addr.type == PF_ADDR_TABLE)
- pfr_update_stats(tr->src.addr.p.tbl,
- (s == NULL) ? pd->src :
- &s->key[(s->direction == PF_IN)]->
- addr[(s->direction == PF_OUT)],
- pd->af, pd->tot_len, dir == PF_OUT,
- r->action == PF_PASS, tr->src.neg);
- if (tr->dst.addr.type == PF_ADDR_TABLE)
- pfr_update_stats(tr->dst.addr.p.tbl,
- (s == NULL) ? pd->dst :
- &s->key[(s->direction == PF_IN)]->
- addr[(s->direction == PF_IN)],
- pd->af, pd->tot_len, dir == PF_OUT,
- r->action == PF_PASS, tr->dst.neg);
+ SLIST_FOREACH(ri, mr, entry) {
+ pf_rule_counters_inc(pd, ri->r, dir_out, op_r_pass, af,
+ src_host, dst_host);
+ if (s && s->nat_rule == ri->r) {
+ /* Use post-NAT addresses after a match NAT rule */
+ key = s->key[s_dir_in];
+ src_host = &(key->addr[s_dir_out]);
+ dst_host = &(key->addr[s_dir_in]);
+ af = key->af;
+ }
+ }
+
+ if (s == NULL) {
+ pf_free_match_rules(mr);
}
+
+ if (a != NULL) {
+ pf_rule_counters_inc(pd, a, dir_out, op_r_pass, af,
+ src_host, dst_host);
+ }
+
+ if (r != nr) {
+ pf_rule_counters_inc(pd, r, dir_out, op_r_pass, af,
+ src_host, dst_host);
+ }
+
pf_counter_u64_critical_exit();
}
+
static void
pf_log_matches(struct pf_pdesc *pd, struct pf_krule *rm,
struct pf_krule *am, struct pf_kruleset *ruleset,
- struct pf_krule_slist *matchrules)
+ struct pf_krule_slist *match_rules)
{
struct pf_krule_item *ri;
@@ -10628,7 +10901,7 @@ pf_log_matches(struct pf_pdesc *pd, struct pf_krule *rm,
if (rm->log & PF_LOG_MATCHES)
return;
- SLIST_FOREACH(ri, matchrules, entry)
+ SLIST_FOREACH(ri, match_rules, entry)
if (ri->r->log & PF_LOG_MATCHES)
PFLOG_PACKET(rm->action, PFRES_MATCH, rm, am,
ruleset, pd, 1, ri->r);
@@ -10645,6 +10918,8 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
struct pf_krule *a = NULL, *r = &V_pf_default_rule;
struct pf_kstate *s = NULL;
struct pf_kruleset *ruleset = NULL;
+ struct pf_krule_item *ri;
+ struct pf_krule_slist match_rules;
struct pf_pdesc pd;
int use_2nd_queue = 0;
uint16_t tag;
@@ -10681,6 +10956,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
}
pf_init_pdesc(&pd, *m0);
+ SLIST_INIT(&match_rules);
if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_MTAG_FLAG_ROUTE_TO)) {
pd.pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO;
@@ -10713,7 +10989,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
PF_RULES_RLOCK();
if (pf_setup_pdesc(af, dir, &pd, m0, &action, &reason,
- kif, default_actions) == -1) {
+ kif, default_actions) != PF_PASS) {
if (action != PF_PASS)
pd.act.log |= PF_LOG_FORCE;
goto done;
@@ -10777,7 +11053,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
action = PF_DROP;
else
action = pf_test_rule(&r, &s, &pd, &a,
- &ruleset, &reason, inp);
+ &ruleset, &reason, inp, &match_rules);
if (action != PF_PASS)
REASON_SET(&reason, PFRES_FRAG);
break;
@@ -10786,7 +11062,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
/* Respond to SYN with a syncookie. */
if ((tcp_get_flags(&pd.hdr.tcp) & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN &&
pd.dir == PF_IN && pf_synflood_check(&pd)) {
- pf_syncookie_send(&pd);
+ pf_syncookie_send(&pd, &reason);
action = PF_DROP;
break;
}
@@ -10810,7 +11086,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
pd.dir == PF_IN) {
struct mbuf *msyn;
- msyn = pf_syncookie_recreate_syn(&pd);
+ msyn = pf_syncookie_recreate_syn(&pd, &reason);
if (msyn == NULL) {
action = PF_DROP;
break;
@@ -10835,7 +11111,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
break;
} else {
action = pf_test_rule(&r, &s, &pd,
- &a, &ruleset, &reason, inp);
+ &a, &ruleset, &reason, inp, &match_rules);
}
}
break;
@@ -10856,7 +11132,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
a = s->anchor;
} else if (s == NULL) {
action = pf_test_rule(&r, &s,
- &pd, &a, &ruleset, &reason, inp);
+ &pd, &a, &ruleset, &reason, inp, &match_rules);
}
break;
@@ -10884,7 +11160,7 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
a = s->anchor;
} else if (s == NULL)
action = pf_test_rule(&r, &s, &pd,
- &a, &ruleset, &reason, inp);
+ &a, &ruleset, &reason, inp, &match_rules);
break;
}
@@ -10893,8 +11169,11 @@ pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0
done:
PF_RULES_RUNLOCK();
- if (pd.m == NULL)
+ /* if packet sits in reassembly queue, return without error */
+ if (pd.m == NULL) {
+ pf_free_match_rules(&match_rules);
goto eat_pkt;
+ }
if (s)
memcpy(&pd.act, &s->act, sizeof(s->act));
@@ -10991,6 +11270,8 @@ done:
(dir == PF_IN) ? PF_DIVERT_MTAG_DIR_IN :
PF_DIVERT_MTAG_DIR_OUT;
+ pf_counters_inc(action, &pd, s, r, a, &match_rules);
+
if (s)
PF_STATE_UNLOCK(s);
@@ -11032,7 +11313,6 @@ done:
if (pd.act.log) {
struct pf_krule *lr;
- struct pf_krule_item *ri;
if (s != NULL && s->nat_rule != NULL &&
s->nat_rule->log & PF_LOG_ALL)
@@ -11051,7 +11331,7 @@ done:
}
}
- pf_counters_inc(action, &pd, s, r, a);
+ pf_counters_inc(action, &pd, s, r, a, &match_rules);
switch (action) {
case PF_SYNPROXY_DROP:
diff --git a/sys/netpfil/pf/pf.h b/sys/netpfil/pf/pf.h
index 51b3fd6390e1..bcd66fd17d5d 100644
--- a/sys/netpfil/pf/pf.h
+++ b/sys/netpfil/pf/pf.h
@@ -131,6 +131,7 @@ enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL,
#define PF_POOL_TYPEMASK 0x0f
#define PF_POOL_STICKYADDR 0x20
#define PF_POOL_ENDPI 0x40
+#define PF_POOL_IPV6NH 0x80
#define PF_WSCALE_FLAG 0x80
#define PF_WSCALE_MASK 0x0f
@@ -246,6 +247,12 @@ enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL,
#define SCNT_SRC_NODE_REMOVALS 2
#define SCNT_MAX 3
+/* fragment counters */
+#define NCNT_FRAG_SEARCH 0
+#define NCNT_FRAG_INSERT 1
+#define NCNT_FRAG_REMOVALS 2
+#define NCNT_MAX 3
+
#define PF_TABLE_NAME_SIZE 32
#define PF_QNAME_SIZE 64
@@ -630,6 +637,8 @@ struct pf_rule {
#define PFRULE_PFLOW 0x00040000
#define PFRULE_ALLOW_RELATED 0x00080000
#define PFRULE_AFTO 0x00200000 /* af-to rule */
+#define PFRULE_ONCE 0x00400000 /* one shot rule */
+#define PFRULE_EXPIRED 0x00800000 /* one shot rule hit by pkt */
#ifdef _KERNEL
#define PFRULE_REFS 0x0080 /* rule has references */
diff --git a/sys/netpfil/pf/pf_if.c b/sys/netpfil/pf/pf_if.c
index e2200c15c704..f3be036ef745 100644
--- a/sys/netpfil/pf/pf_if.c
+++ b/sys/netpfil/pf/pf_if.c
@@ -655,8 +655,10 @@ pfi_kkif_update(struct pfi_kkif *kif)
/* again for all groups kif is member of */
if (kif->pfik_ifp != NULL) {
CK_STAILQ_FOREACH(ifgl, &kif->pfik_ifp->if_groups, ifgl_next)
- pfi_kkif_update((struct pfi_kkif *)
- ifgl->ifgl_group->ifg_pf_kif);
+ if (ifgl->ifgl_group->ifg_pf_kif) {
+ pfi_kkif_update((struct pfi_kkif *)
+ ifgl->ifgl_group->ifg_pf_kif);
+ }
}
}
diff --git a/sys/netpfil/pf/pf_ioctl.c b/sys/netpfil/pf/pf_ioctl.c
index b6f5d74b5b42..a4557f139ae5 100644
--- a/sys/netpfil/pf/pf_ioctl.c
+++ b/sys/netpfil/pf/pf_ioctl.c
@@ -116,7 +116,6 @@ static int pf_rollback_altq(u_int32_t);
static int pf_commit_altq(u_int32_t);
static int pf_enable_altq(struct pf_altq *);
static int pf_disable_altq(struct pf_altq *);
-static uint16_t pf_qname2qid(const char *);
static void pf_qid_unref(uint16_t);
#endif /* ALTQ */
static int pf_begin_rules(u_int32_t *, int, const char *);
@@ -187,6 +186,7 @@ VNET_DEFINE(uma_zone_t, pf_tag_z);
#define V_pf_tag_z VNET(pf_tag_z)
static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
+MALLOC_DEFINE(M_PF, "pf", "pf(4)");
#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
@@ -213,8 +213,7 @@ static void pf_init_tagset(struct pf_tagset *, unsigned int *,
static void pf_cleanup_tagset(struct pf_tagset *);
static uint16_t tagname2hashindex(const struct pf_tagset *, const char *);
static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t);
-static u_int16_t tagname2tag(struct pf_tagset *, const char *);
-static u_int16_t pf_tagname2tag(const char *);
+static u_int16_t tagname2tag(struct pf_tagset *, const char *, bool);
static void tag_unref(struct pf_tagset *, u_int16_t);
struct cdev *pf_dev;
@@ -260,7 +259,7 @@ static void dehook_pf_eth(void);
static void dehook_pf(void);
static int shutdown_pf(void);
static int pf_load(void);
-static void pf_unload(void);
+static void pf_unload(void *);
static struct cdevsw pf_cdevsw = {
.d_ioctl = pfioctl,
@@ -285,6 +284,7 @@ int pf_end_threads;
struct proc *pf_purge_proc;
VNET_DEFINE(struct rmlock, pf_rules_lock);
+VNET_DEFINE(struct rmlock, pf_tags_lock);
VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
#define V_pf_ioctl_lock VNET(pf_ioctl_lock)
struct sx pf_end_lock;
@@ -420,6 +420,8 @@ pfattach_vnet(void)
pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
for (int i = 0; i < SCNT_MAX; i++)
V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
+ for (int i = 0; i < NCNT_MAX; i++)
+ V_pf_status.ncounters[i] = counter_u64_alloc(M_WAITOK);
if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
INTR_MPSAFE, &V_pf_swi_cookie) != 0)
@@ -684,19 +686,50 @@ tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
}
static u_int16_t
-tagname2tag(struct pf_tagset *ts, const char *tagname)
+tagname2tag(struct pf_tagset *ts, const char *tagname, bool add_new)
{
struct pf_tagname *tag;
u_int32_t index;
u_int16_t new_tagid;
- PF_RULES_WASSERT();
+ PF_TAGS_RLOCK_TRACKER;
+
+ PF_TAGS_RLOCK();
index = tagname2hashindex(ts, tagname);
TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
if (strcmp(tagname, tag->name) == 0) {
tag->ref++;
- return (tag->tag);
+ new_tagid = tag->tag;
+ PF_TAGS_RUNLOCK();
+ return (new_tagid);
+ }
+
+ /*
+ * When used for pfsync with queues we must not create new entries.
+ * Pf tags can be created just fine by this function, but queues
+ * require additional configuration. If they are missing on the target
+ * system we just ignore them
+ */
+ if (add_new == false) {
+ printf("%s: Not creating a new tag\n", __func__);
+ PF_TAGS_RUNLOCK();
+ return (0);
+ }
+
+ /*
+ * If a new entry must be created do it under a write lock.
+ * But first search again, somebody could have created the tag
+ * between unlocking the read lock and locking the write lock.
+ */
+ PF_TAGS_RUNLOCK();
+ PF_TAGS_WLOCK();
+ TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
+ if (strcmp(tagname, tag->name) == 0) {
+ tag->ref++;
+ new_tagid = tag->tag;
+ PF_TAGS_WUNLOCK();
+ return (new_tagid);
}
/*
@@ -713,16 +746,20 @@ tagname2tag(struct pf_tagset *ts, const char *tagname)
* to rounding of the number of bits in the vector up to a multiple
* of the vector word size at declaration/allocation time.
*/
- if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
+ if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) {
+ PF_TAGS_WUNLOCK();
return (0);
+ }
/* Mark the tag as in use. Bits are 0-based for BIT_CLR() */
BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
/* allocate and fill new struct pf_tagname */
tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
- if (tag == NULL)
+ if (tag == NULL) {
+ PF_TAGS_WUNLOCK();
return (0);
+ }
strlcpy(tag->name, tagname, sizeof(tag->name));
tag->tag = new_tagid;
tag->ref = 1;
@@ -734,7 +771,29 @@ tagname2tag(struct pf_tagset *ts, const char *tagname)
index = tag2hashindex(ts, new_tagid);
TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
- return (tag->tag);
+ PF_TAGS_WUNLOCK();
+ return (new_tagid);
+}
+
+static char *
+tag2tagname(struct pf_tagset *ts, u_int16_t tag)
+{
+ struct pf_tagname *t;
+ uint16_t index;
+
+ PF_TAGS_RLOCK_TRACKER;
+
+ PF_TAGS_RLOCK();
+
+ index = tag2hashindex(ts, tag);
+ TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
+ if (tag == t->tag) {
+ PF_TAGS_RUNLOCK();
+ return (t->name);
+ }
+
+ PF_TAGS_RUNLOCK();
+ return (NULL);
}
static void
@@ -743,7 +802,7 @@ tag_unref(struct pf_tagset *ts, u_int16_t tag)
struct pf_tagname *t;
uint16_t index;
- PF_RULES_WASSERT();
+ PF_TAGS_WLOCK();
index = tag2hashindex(ts, tag);
TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
@@ -760,12 +819,20 @@ tag_unref(struct pf_tagset *ts, u_int16_t tag)
}
break;
}
+
+ PF_TAGS_WUNLOCK();
}
-static uint16_t
+uint16_t
pf_tagname2tag(const char *tagname)
{
- return (tagname2tag(&V_pf_tags, tagname));
+ return (tagname2tag(&V_pf_tags, tagname, true));
+}
+
+static const char *
+pf_tag2tagname(uint16_t tag)
+{
+ return (tag2tagname(&V_pf_tags, tag));
}
static int
@@ -896,10 +963,16 @@ pf_commit_eth(uint32_t ticket, const char *anchor)
}
#ifdef ALTQ
-static uint16_t
-pf_qname2qid(const char *qname)
+uint16_t
+pf_qname2qid(const char *qname, bool add_new)
{
- return (tagname2tag(&V_pf_qids, qname));
+ return (tagname2tag(&V_pf_qids, qname, add_new));
+}
+
+static const char *
+pf_qid2qname(uint16_t qid)
+{
+ return (tag2tagname(&V_pf_qids, qid));
}
static void
@@ -1148,7 +1221,7 @@ pf_altq_ifnet_event(struct ifnet *ifp, int remove)
}
bcopy(a1, a2, sizeof(struct pf_altq));
- if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
+ if ((a2->qid = pf_qname2qid(a2->qname, true)) == 0) {
error = EBUSY;
free(a2, M_PFALTQ);
break;
@@ -1181,18 +1254,18 @@ pf_rule_tree_alloc(int flags)
{
struct pf_krule_global *tree;
- tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
+ tree = malloc(sizeof(struct pf_krule_global), M_PF, flags);
if (tree == NULL)
return (NULL);
RB_INIT(tree);
return (tree);
}
-static void
+void
pf_rule_tree_free(struct pf_krule_global *tree)
{
- free(tree, M_TEMP);
+ free(tree, M_PF);
}
static int
@@ -1211,7 +1284,7 @@ pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
return (ENOMEM);
rs = pf_find_or_create_kruleset(anchor);
if (rs == NULL) {
- free(tree, M_TEMP);
+ pf_rule_tree_free(tree);
return (EINVAL);
}
pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
@@ -1432,7 +1505,7 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
rs->rules[rs_num].inactive.rcount = 0;
rs->rules[rs_num].inactive.open = 0;
pf_remove_if_empty_kruleset(rs);
- free(old_tree, M_TEMP);
+ pf_rule_tree_free(old_tree);
return (0);
}
@@ -1603,7 +1676,7 @@ pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
#define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
#define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
SATU32(q->pq_u.hfsc_opts.x)
-
+
ASSIGN_OPT_SATU32(rtsc_m1);
ASSIGN_OPT(rtsc_d);
ASSIGN_OPT_SATU32(rtsc_m2);
@@ -1617,7 +1690,7 @@ pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
ASSIGN_OPT_SATU32(ulsc_m2);
ASSIGN_OPT(flags);
-
+
#undef ASSIGN_OPT
#undef ASSIGN_OPT_SATU32
} else
@@ -1725,7 +1798,7 @@ pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
ASSIGN_OPT(ulsc_m2);
ASSIGN_OPT(flags);
-
+
#undef ASSIGN_OPT
} else
COPY(pq_u);
@@ -1757,7 +1830,7 @@ pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
ASSIGN(qid);
break;
}
- default:
+ default:
panic("%s: unhandled struct pfioc_altq version", __func__);
break;
}
@@ -2188,11 +2261,11 @@ pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
#ifdef ALTQ
/* set queue IDs */
if (rule->qname[0] != 0) {
- if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
+ if ((rule->qid = pf_qname2qid(rule->qname, true)) == 0)
ERROUT(EBUSY);
else if (rule->pqname[0] != 0) {
if ((rule->pqid =
- pf_qname2qid(rule->pqname)) == 0)
+ pf_qname2qid(rule->pqname, true)) == 0)
ERROUT(EBUSY);
} else
rule->pqid = rule->qid;
@@ -2276,6 +2349,7 @@ pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
rule->route.cur = TAILQ_FIRST(&rule->route.list);
+ rule->route.ipv6_nexthop_af = AF_INET6;
TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
rule, entries);
ruleset->rules[rs_num].inactive.rcount++;
@@ -2506,6 +2580,8 @@ pf_ioctl_clear_status(void)
pf_counter_u64_zero(&V_pf_status.fcounters[i]);
for (int i = 0; i < SCNT_MAX; i++)
counter_u64_zero(V_pf_status.scounters[i]);
+ for (int i = 0; i < NCNT_MAX; i++)
+ counter_u64_zero(V_pf_status.ncounters[i]);
for (int i = 0; i < KLCNT_MAX; i++)
counter_u64_zero(V_pf_status.lcounters[i]);
V_pf_status.since = time_uptime;
@@ -3308,7 +3384,7 @@ DIOCGETETHRULE_error:
#ifdef ALTQ
/* set queue IDs */
if (rule->qname[0] != 0) {
- if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
+ if ((rule->qid = pf_qname2qid(rule->qname, true)) == 0)
error = EBUSY;
else
rule->qid = rule->qid;
@@ -3859,11 +3935,11 @@ DIOCGETRULENV_error:
/* set queue IDs */
if (newrule->qname[0] != 0) {
if ((newrule->qid =
- pf_qname2qid(newrule->qname)) == 0)
+ pf_qname2qid(newrule->qname, true)) == 0)
error = EBUSY;
else if (newrule->pqname[0] != 0) {
if ((newrule->pqid =
- pf_qname2qid(newrule->pqname)) == 0)
+ pf_qname2qid(newrule->pqname, true)) == 0)
error = EBUSY;
} else
newrule->pqid = newrule->qid;
@@ -4076,7 +4152,7 @@ DIOCCHANGERULE_error:
out = ps->ps_states;
pstore = mallocarray(slice_count,
- sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
+ sizeof(struct pfsync_state_1301), M_PF, M_WAITOK | M_ZERO);
nr = 0;
for (i = 0; i <= V_pf_hashmask; i++) {
@@ -4098,10 +4174,10 @@ DIOCGETSTATES_retry:
if (count > slice_count) {
PF_HASHROW_UNLOCK(ih);
- free(pstore, M_TEMP);
+ free(pstore, M_PF);
slice_count = count * 2;
pstore = mallocarray(slice_count,
- sizeof(struct pfsync_state_1301), M_TEMP,
+ sizeof(struct pfsync_state_1301), M_PF,
M_WAITOK | M_ZERO);
goto DIOCGETSTATES_retry;
}
@@ -4123,13 +4199,15 @@ DIOCGETSTATES_retry:
PF_HASHROW_UNLOCK(ih);
error = copyout(pstore, out,
sizeof(struct pfsync_state_1301) * count);
- if (error)
+ if (error) {
+ free(pstore, M_PF);
goto fail;
+ }
out = ps->ps_states + nr;
}
DIOCGETSTATES_full:
ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
- free(pstore, M_TEMP);
+ free(pstore, M_PF);
break;
}
@@ -4155,7 +4233,7 @@ DIOCGETSTATES_full:
out = ps->ps_states;
pstore = mallocarray(slice_count,
- sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
+ sizeof(struct pf_state_export), M_PF, M_WAITOK | M_ZERO);
nr = 0;
for (i = 0; i <= V_pf_hashmask; i++) {
@@ -4177,10 +4255,10 @@ DIOCGETSTATESV2_retry:
if (count > slice_count) {
PF_HASHROW_UNLOCK(ih);
- free(pstore, M_TEMP);
+ free(pstore, M_PF);
slice_count = count * 2;
pstore = mallocarray(slice_count,
- sizeof(struct pf_state_export), M_TEMP,
+ sizeof(struct pf_state_export), M_PF,
M_WAITOK | M_ZERO);
goto DIOCGETSTATESV2_retry;
}
@@ -4201,13 +4279,15 @@ DIOCGETSTATESV2_retry:
PF_HASHROW_UNLOCK(ih);
error = copyout(pstore, out,
sizeof(struct pf_state_export) * count);
- if (error)
+ if (error) {
+ free(pstore, M_PF);
goto fail;
+ }
out = ps->ps_states + nr;
}
DIOCGETSTATESV2_full:
ps->ps_len = nr * sizeof(struct pf_state_export);
- free(pstore, M_TEMP);
+ free(pstore, M_PF);
break;
}
@@ -4390,7 +4470,7 @@ DIOCGETSTATESV2_full:
* copy the necessary fields
*/
if (altq->qname[0] != 0) {
- if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
+ if ((altq->qid = pf_qname2qid(altq->qname, true)) == 0) {
PF_RULES_WUNLOCK();
error = EBUSY;
free(altq, M_PFALTQ);
@@ -4737,17 +4817,17 @@ DIOCCHANGEADDR_error:
totlen = io->pfrio_size * sizeof(struct pfr_table);
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfrts, totlen);
if (error) {
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
goto fail;
}
PF_RULES_WLOCK();
error = pfr_add_tables(pfrts, io->pfrio_size,
&io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
PF_RULES_WUNLOCK();
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
break;
}
@@ -4769,17 +4849,17 @@ DIOCCHANGEADDR_error:
totlen = io->pfrio_size * sizeof(struct pfr_table);
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfrts, totlen);
if (error) {
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
goto fail;
}
PF_RULES_WLOCK();
error = pfr_del_tables(pfrts, io->pfrio_size,
&io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
PF_RULES_WUNLOCK();
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
break;
}
@@ -4805,7 +4885,7 @@ DIOCCHANGEADDR_error:
totlen = io->pfrio_size * sizeof(struct pfr_table);
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
- M_TEMP, M_NOWAIT | M_ZERO);
+ M_PF, M_NOWAIT | M_ZERO);
if (pfrts == NULL) {
error = ENOMEM;
PF_RULES_RUNLOCK();
@@ -4816,7 +4896,7 @@ DIOCCHANGEADDR_error:
PF_RULES_RUNLOCK();
if (error == 0)
error = copyout(pfrts, io->pfrio_buffer, totlen);
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
break;
}
@@ -4843,7 +4923,7 @@ DIOCCHANGEADDR_error:
totlen = io->pfrio_size * sizeof(struct pfr_tstats);
pfrtstats = mallocarray(io->pfrio_size,
- sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
+ sizeof(struct pfr_tstats), M_PF, M_NOWAIT | M_ZERO);
if (pfrtstats == NULL) {
error = ENOMEM;
PF_RULES_RUNLOCK();
@@ -4856,7 +4936,7 @@ DIOCCHANGEADDR_error:
PF_TABLE_STATS_UNLOCK();
if (error == 0)
error = copyout(pfrtstats, io->pfrio_buffer, totlen);
- free(pfrtstats, M_TEMP);
+ free(pfrtstats, M_PF);
break;
}
@@ -4881,10 +4961,10 @@ DIOCCHANGEADDR_error:
totlen = io->pfrio_size * sizeof(struct pfr_table);
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfrts, totlen);
if (error) {
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
goto fail;
}
@@ -4894,7 +4974,7 @@ DIOCCHANGEADDR_error:
&io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
PF_RULES_RUNLOCK();
PF_TABLE_STATS_UNLOCK();
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
break;
}
@@ -4922,10 +5002,10 @@ DIOCCHANGEADDR_error:
totlen = io->pfrio_size * sizeof(struct pfr_table);
pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfrts, totlen);
if (error) {
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -4933,7 +5013,7 @@ DIOCCHANGEADDR_error:
io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
&io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
PF_RULES_WUNLOCK();
- free(pfrts, M_TEMP);
+ free(pfrts, M_PF);
break;
}
@@ -4968,10 +5048,10 @@ DIOCCHANGEADDR_error:
}
totlen = io->pfrio_size * sizeof(struct pfr_addr);
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfras, totlen);
if (error) {
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -4982,7 +5062,7 @@ DIOCCHANGEADDR_error:
PF_RULES_WUNLOCK();
if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
error = copyout(pfras, io->pfrio_buffer, totlen);
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
break;
}
@@ -5003,10 +5083,10 @@ DIOCCHANGEADDR_error:
}
totlen = io->pfrio_size * sizeof(struct pfr_addr);
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfras, totlen);
if (error) {
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -5016,7 +5096,7 @@ DIOCCHANGEADDR_error:
PF_RULES_WUNLOCK();
if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
error = copyout(pfras, io->pfrio_buffer, totlen);
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
break;
}
@@ -5040,11 +5120,11 @@ DIOCCHANGEADDR_error:
goto fail;
}
totlen = count * sizeof(struct pfr_addr);
- pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
+ pfras = mallocarray(count, sizeof(struct pfr_addr), M_PF,
M_WAITOK);
error = copyin(io->pfrio_buffer, pfras, totlen);
if (error) {
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -5055,7 +5135,7 @@ DIOCCHANGEADDR_error:
PF_RULES_WUNLOCK();
if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
error = copyout(pfras, io->pfrio_buffer, totlen);
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
break;
}
@@ -5076,14 +5156,14 @@ DIOCCHANGEADDR_error:
}
totlen = io->pfrio_size * sizeof(struct pfr_addr);
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
- M_TEMP, M_WAITOK | M_ZERO);
+ M_PF, M_WAITOK | M_ZERO);
PF_RULES_RLOCK();
error = pfr_get_addrs(&io->pfrio_table, pfras,
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
PF_RULES_RUNLOCK();
if (error == 0)
error = copyout(pfras, io->pfrio_buffer, totlen);
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
break;
}
@@ -5104,14 +5184,14 @@ DIOCCHANGEADDR_error:
}
totlen = io->pfrio_size * sizeof(struct pfr_astats);
pfrastats = mallocarray(io->pfrio_size,
- sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
+ sizeof(struct pfr_astats), M_PF, M_WAITOK | M_ZERO);
PF_RULES_RLOCK();
error = pfr_get_astats(&io->pfrio_table, pfrastats,
&io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
PF_RULES_RUNLOCK();
if (error == 0)
error = copyout(pfrastats, io->pfrio_buffer, totlen);
- free(pfrastats, M_TEMP);
+ free(pfrastats, M_PF);
break;
}
@@ -5132,10 +5212,10 @@ DIOCCHANGEADDR_error:
}
totlen = io->pfrio_size * sizeof(struct pfr_addr);
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfras, totlen);
if (error) {
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -5145,7 +5225,7 @@ DIOCCHANGEADDR_error:
PF_RULES_WUNLOCK();
if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
error = copyout(pfras, io->pfrio_buffer, totlen);
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
break;
}
@@ -5166,10 +5246,10 @@ DIOCCHANGEADDR_error:
}
totlen = io->pfrio_size * sizeof(struct pfr_addr);
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfras, totlen);
if (error) {
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
goto fail;
}
PF_RULES_RLOCK();
@@ -5179,7 +5259,7 @@ DIOCCHANGEADDR_error:
PF_RULES_RUNLOCK();
if (error == 0)
error = copyout(pfras, io->pfrio_buffer, totlen);
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
break;
}
@@ -5200,10 +5280,10 @@ DIOCCHANGEADDR_error:
}
totlen = io->pfrio_size * sizeof(struct pfr_addr);
pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->pfrio_buffer, pfras, totlen);
if (error) {
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -5211,7 +5291,7 @@ DIOCCHANGEADDR_error:
io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
PF_RULES_WUNLOCK();
- free(pfras, M_TEMP);
+ free(pfras, M_PF);
break;
}
@@ -5249,10 +5329,10 @@ DIOCCHANGEADDR_error:
}
totlen = sizeof(struct pfioc_trans_e) * io->size;
ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->array, ioes, totlen);
if (error) {
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -5262,7 +5342,7 @@ DIOCCHANGEADDR_error:
case PF_RULESET_ETH:
if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail;
}
break;
@@ -5270,13 +5350,13 @@ DIOCCHANGEADDR_error:
case PF_RULESET_ALTQ:
if (ioe->anchor[0]) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
error = EINVAL;
goto fail;
}
if ((error = pf_begin_altq(&ioe->ticket))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail;
}
break;
@@ -5291,7 +5371,7 @@ DIOCCHANGEADDR_error:
if ((error = pfr_ina_begin(&table,
&ioe->ticket, NULL, 0))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail;
}
break;
@@ -5300,7 +5380,7 @@ DIOCCHANGEADDR_error:
if ((error = pf_begin_rules(&ioe->ticket,
ioe->rs_num, ioe->anchor))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail;
}
break;
@@ -5308,7 +5388,7 @@ DIOCCHANGEADDR_error:
}
PF_RULES_WUNLOCK();
error = copyout(ioes, io->array, totlen);
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
break;
}
@@ -5330,10 +5410,10 @@ DIOCCHANGEADDR_error:
}
totlen = sizeof(struct pfioc_trans_e) * io->size;
ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->array, ioes, totlen);
if (error) {
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -5344,7 +5424,7 @@ DIOCCHANGEADDR_error:
if ((error = pf_rollback_eth(ioe->ticket,
ioe->anchor))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail; /* really bad */
}
break;
@@ -5352,13 +5432,13 @@ DIOCCHANGEADDR_error:
case PF_RULESET_ALTQ:
if (ioe->anchor[0]) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
error = EINVAL;
goto fail;
}
if ((error = pf_rollback_altq(ioe->ticket))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail; /* really bad */
}
break;
@@ -5373,7 +5453,7 @@ DIOCCHANGEADDR_error:
if ((error = pfr_ina_rollback(&table,
ioe->ticket, NULL, 0))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail; /* really bad */
}
break;
@@ -5382,14 +5462,14 @@ DIOCCHANGEADDR_error:
if ((error = pf_rollback_rules(ioe->ticket,
ioe->rs_num, ioe->anchor))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail; /* really bad */
}
break;
}
}
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
break;
}
@@ -5415,10 +5495,10 @@ DIOCCHANGEADDR_error:
totlen = sizeof(struct pfioc_trans_e) * io->size;
ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
- M_TEMP, M_WAITOK);
+ M_PF, M_WAITOK);
error = copyin(io->array, ioes, totlen);
if (error) {
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail;
}
PF_RULES_WLOCK();
@@ -5431,7 +5511,7 @@ DIOCCHANGEADDR_error:
if (ers == NULL || ioe->ticket == 0 ||
ioe->ticket != ers->inactive.ticket) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
error = EINVAL;
goto fail;
}
@@ -5440,14 +5520,14 @@ DIOCCHANGEADDR_error:
case PF_RULESET_ALTQ:
if (ioe->anchor[0]) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
error = EINVAL;
goto fail;
}
if (!V_altqs_inactive_open || ioe->ticket !=
V_ticket_altqs_inactive) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
error = EBUSY;
goto fail;
}
@@ -5458,7 +5538,7 @@ DIOCCHANGEADDR_error:
if (rs == NULL || !rs->topen || ioe->ticket !=
rs->tticket) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
error = EBUSY;
goto fail;
}
@@ -5467,7 +5547,7 @@ DIOCCHANGEADDR_error:
if (ioe->rs_num < 0 || ioe->rs_num >=
PF_RULESET_MAX) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
error = EINVAL;
goto fail;
}
@@ -5477,7 +5557,7 @@ DIOCCHANGEADDR_error:
rs->rules[ioe->rs_num].inactive.ticket !=
ioe->ticket) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
error = EBUSY;
goto fail;
}
@@ -5490,7 +5570,7 @@ DIOCCHANGEADDR_error:
case PF_RULESET_ETH:
if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail; /* really bad */
}
break;
@@ -5498,7 +5578,7 @@ DIOCCHANGEADDR_error:
case PF_RULESET_ALTQ:
if ((error = pf_commit_altq(ioe->ticket))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail; /* really bad */
}
break;
@@ -5513,7 +5593,7 @@ DIOCCHANGEADDR_error:
if ((error = pfr_ina_commit(&table,
ioe->ticket, NULL, NULL, 0))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail; /* really bad */
}
break;
@@ -5522,7 +5602,7 @@ DIOCCHANGEADDR_error:
if ((error = pf_commit_rules(ioe->ticket,
ioe->rs_num, ioe->anchor))) {
PF_RULES_WUNLOCK();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
goto fail; /* really bad */
}
break;
@@ -5536,7 +5616,7 @@ DIOCCHANGEADDR_error:
else
dehook_pf_eth();
- free(ioes, M_TEMP);
+ free(ioes, M_PF);
break;
}
@@ -5565,7 +5645,7 @@ DIOCCHANGEADDR_error:
nr = 0;
- p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
+ p = pstore = malloc(psn->psn_len, M_PF, M_WAITOK | M_ZERO);
for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
i++, sh++) {
PF_HASHROW_LOCK(sh);
@@ -5584,11 +5664,11 @@ DIOCCHANGEADDR_error:
error = copyout(pstore, psn->psn_src_nodes,
sizeof(struct pf_src_node) * nr);
if (error) {
- free(pstore, M_TEMP);
+ free(pstore, M_PF);
goto fail;
}
psn->psn_len = sizeof(struct pf_src_node) * nr;
- free(pstore, M_TEMP);
+ free(pstore, M_PF);
break;
}
@@ -5655,13 +5735,13 @@ DIOCCHANGEADDR_error:
bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
- M_TEMP, M_WAITOK | M_ZERO);
+ M_PF, M_WAITOK | M_ZERO);
PF_RULES_RLOCK();
pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
PF_RULES_RUNLOCK();
error = copyout(ifstore, io->pfiio_buffer, bufsiz);
- free(ifstore, M_TEMP);
+ free(ifstore, M_PF);
break;
}
@@ -5713,6 +5793,7 @@ fail:
void
pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
{
+ const char *tagname;
bzero(sp, sizeof(union pfsync_state_union));
/* copy from state key */
@@ -5724,8 +5805,6 @@ pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_
sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
- sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
- sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
/* copy from state */
strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
@@ -5737,16 +5816,31 @@ pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_
else
sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
- sp->pfs_1301.direction = st->direction;
- sp->pfs_1301.log = st->act.log;
- sp->pfs_1301.timeout = st->timeout;
-
switch (msg_version) {
case PFSYNC_MSG_VERSION_1301:
sp->pfs_1301.state_flags = st->state_flags;
+ sp->pfs_1301.direction = st->direction;
+ sp->pfs_1301.log = st->act.log;
+ sp->pfs_1301.timeout = st->timeout;
+ sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
+ sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
+ /*
+ * XXX Why do we bother pfsyncing source node information if source
+ * nodes are not synced? Showing users that there is source tracking
+ * when there is none seems useless.
+ */
+ if (st->sns[PF_SN_LIMIT] != NULL)
+ sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
+ if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
+ sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
break;
case PFSYNC_MSG_VERSION_1400:
sp->pfs_1400.state_flags = htons(st->state_flags);
+ sp->pfs_1400.direction = st->direction;
+ sp->pfs_1400.log = st->act.log;
+ sp->pfs_1400.timeout = st->timeout;
+ sp->pfs_1400.proto = st->key[PF_SK_WIRE]->proto;
+ sp->pfs_1400.af = st->key[PF_SK_WIRE]->af;
sp->pfs_1400.qid = htons(st->act.qid);
sp->pfs_1400.pqid = htons(st->act.pqid);
sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
@@ -5762,22 +5856,53 @@ pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_
strlcpy(sp->pfs_1400.rt_ifname,
st->act.rt_kif->pfik_name,
sizeof(sp->pfs_1400.rt_ifname));
+ /*
+ * XXX Why do we bother pfsyncing source node information if source
+ * nodes are not synced? Showing users that there is source tracking
+ * when there is none seems useless.
+ */
+ if (st->sns[PF_SN_LIMIT] != NULL)
+ sp->pfs_1400.sync_flags |= PFSYNC_FLAG_SRCNODE;
+ if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
+ sp->pfs_1400.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
+ break;
+ case PFSYNC_MSG_VERSION_1500:
+ sp->pfs_1500.state_flags = htons(st->state_flags);
+ sp->pfs_1500.direction = st->direction;
+ sp->pfs_1500.log = st->act.log;
+ sp->pfs_1500.timeout = st->timeout;
+ sp->pfs_1500.wire_proto = st->key[PF_SK_WIRE]->proto;
+ sp->pfs_1500.wire_af = st->key[PF_SK_WIRE]->af;
+ sp->pfs_1500.stack_proto = st->key[PF_SK_STACK]->proto;
+ sp->pfs_1500.stack_af = st->key[PF_SK_STACK]->af;
+ sp->pfs_1500.qid = htons(st->act.qid);
+ sp->pfs_1500.pqid = htons(st->act.pqid);
+ sp->pfs_1500.dnpipe = htons(st->act.dnpipe);
+ sp->pfs_1500.dnrpipe = htons(st->act.dnrpipe);
+ sp->pfs_1500.rtableid = htonl(st->act.rtableid);
+ sp->pfs_1500.min_ttl = st->act.min_ttl;
+ sp->pfs_1500.set_tos = st->act.set_tos;
+ sp->pfs_1500.max_mss = htons(st->act.max_mss);
+ sp->pfs_1500.set_prio[0] = st->act.set_prio[0];
+ sp->pfs_1500.set_prio[1] = st->act.set_prio[1];
+ sp->pfs_1500.rt = st->act.rt;
+ sp->pfs_1500.rt_af = st->act.rt_af;
+ if (st->act.rt_kif)
+ strlcpy(sp->pfs_1500.rt_ifname,
+ st->act.rt_kif->pfik_name,
+ sizeof(sp->pfs_1500.rt_ifname));
+ strlcpy(sp->pfs_1500.orig_ifname,
+ st->orig_kif->pfik_name,
+ sizeof(sp->pfs_1500.orig_ifname));
+ if ((tagname = pf_tag2tagname(st->tag)) != NULL)
+ strlcpy(sp->pfs_1500.tagname, tagname,
+ sizeof(sp->pfs_1500.tagname));
break;
default:
panic("%s: Unsupported pfsync_msg_version %d",
__func__, msg_version);
}
- /*
- * XXX Why do we bother pfsyncing source node information if source
- * nodes are not synced? Showing users that there is source tracking
- * when there is none seems useless.
- */
- if (st->sns[PF_SN_LIMIT] != NULL)
- sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
- if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
- sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
-
sp->pfs_1301.id = st->id;
sp->pfs_1301.creatorid = st->creatorid;
pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
@@ -6444,19 +6569,14 @@ shutdown_pf(void)
int error = 0;
u_int32_t t[5];
char nn = '\0';
- struct pf_kanchor *anchor;
- struct pf_keth_anchor *eth_anchor;
+ struct pf_kanchor *anchor, *tmp_anchor;
+ struct pf_keth_anchor *eth_anchor, *tmp_eth_anchor;
int rs_num;
do {
/* Unlink rules of all user defined anchors */
- RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
- /* Wildcard based anchors may not have a respective
- * explicit anchor rule or they may be left empty
- * without rules. It leads to anchor.refcnt=0, and the
- * rest of the logic does not expect it. */
- if (anchor->refcnt == 0)
- anchor->refcnt = 1;
+ RB_FOREACH_SAFE(anchor, pf_kanchor_global, &V_pf_anchors,
+ tmp_anchor) {
for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
if ((error = pf_begin_rules(&t[rs_num], rs_num,
anchor->path)) != 0) {
@@ -6474,14 +6594,8 @@ shutdown_pf(void)
}
/* Unlink rules of all user defined ether anchors */
- RB_FOREACH(eth_anchor, pf_keth_anchor_global,
- &V_pf_keth_anchors) {
- /* Wildcard based anchors may not have a respective
- * explicit anchor rule or they may be left empty
- * without rules. It leads to anchor.refcnt=0, and the
- * rest of the logic does not expect it. */
- if (eth_anchor->refcnt == 0)
- eth_anchor->refcnt = 1;
+ RB_FOREACH_SAFE(eth_anchor, pf_keth_anchor_global,
+ &V_pf_keth_anchors, tmp_eth_anchor) {
if ((error = pf_begin_eth(&t[0], eth_anchor->path))
!= 0) {
DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
@@ -6552,6 +6666,11 @@ shutdown_pf(void)
pf_kill_srcnodes(NULL);
+ for (int i = 0; i < PF_RULESET_MAX; i++) {
+ pf_rule_tree_free(pf_main_ruleset.rules[i].active.tree);
+ pf_rule_tree_free(pf_main_ruleset.rules[i].inactive.tree);
+ }
+
/* status does not use malloced mem so no need to cleanup */
/* fingerprints and interfaces have their own cleanup code */
} while(0);
@@ -6838,6 +6957,7 @@ pf_load_vnet(void)
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
+ rm_init_flags(&V_pf_tags_lock, "pf tags and queues", RM_RECURSE);
sx_init(&V_pf_ioctl_lock, "pf ioctl");
pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
@@ -6954,13 +7074,15 @@ pf_unload_vnet(void)
pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
for (int i = 0; i < SCNT_MAX; i++)
counter_u64_free(V_pf_status.scounters[i]);
+ for (int i = 0; i < NCNT_MAX; i++)
+ counter_u64_free(V_pf_status.ncounters[i]);
rm_destroy(&V_pf_rules_lock);
sx_destroy(&V_pf_ioctl_lock);
}
static void
-pf_unload(void)
+pf_unload(void *dummy __unused)
{
sx_xlock(&pf_end_lock);
@@ -6987,7 +7109,7 @@ vnet_pf_init(void *unused __unused)
pf_load_vnet();
}
-VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
+VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
vnet_pf_init, NULL);
static void
@@ -6995,7 +7117,7 @@ vnet_pf_uninit(const void *unused __unused)
{
pf_unload_vnet();
-}
+}
SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
vnet_pf_uninit, NULL);
diff --git a/sys/netpfil/pf/pf_lb.c b/sys/netpfil/pf/pf_lb.c
index bc9e1dc72902..fb1b121d0bc0 100644
--- a/sys/netpfil/pf/pf_lb.c
+++ b/sys/netpfil/pf/pf_lb.c
@@ -73,7 +73,7 @@ VNET_DEFINE_STATIC(int, pf_rdr_srcport_rewrite_tries) = 16;
static uint64_t pf_hash(struct pf_addr *, struct pf_addr *,
struct pf_poolhashkey *, sa_family_t);
-struct pf_krule *pf_match_translation(int, struct pf_test_ctx *);
+static struct pf_krule *pf_match_translation(int, struct pf_test_ctx *);
static enum pf_test_status pf_step_into_translation_anchor(int, struct pf_test_ctx *,
struct pf_krule *);
static int pf_get_sport(struct pf_pdesc *, struct pf_krule *,
@@ -273,7 +273,7 @@ pf_step_into_translation_anchor(int rs_num, struct pf_test_ctx *ctx, struct pf_k
return (rv);
}
-struct pf_krule *
+static struct pf_krule *
pf_match_translation(int rs_num, struct pf_test_ctx *ctx)
{
enum pf_test_status rv;
@@ -545,11 +545,18 @@ pf_map_addr(sa_family_t saf, struct pf_krule *r, struct pf_addr *saddr,
uint64_t hashidx;
int cnt;
sa_family_t wanted_af;
+ u_int8_t pool_type;
+ bool prefer_ipv6_nexthop = rpool->opts & PF_POOL_IPV6NH;
KASSERT(saf != 0, ("%s: saf == 0", __func__));
KASSERT(naf != NULL, ("%s: naf = NULL", __func__));
KASSERT((*naf) != 0, ("%s: *naf = 0", __func__));
+ /*
+ * Given (*naf) is a hint about AF of the forwarded packet.
+ * It might be changed if prefer_ipv6_nexthop is enabled and
+ * the combination of nexthop AF and packet AF allows for it.
+ */
wanted_af = (*naf);
mtx_lock(&rpool->mtx);
@@ -594,19 +601,38 @@ pf_map_addr(sa_family_t saf, struct pf_krule *r, struct pf_addr *saddr,
} else {
raddr = &rpool->cur->addr.v.a.addr;
rmask = &rpool->cur->addr.v.a.mask;
- /*
- * For single addresses check their address family. Unless they
- * have none, which happens when addresses are added with
- * the old ioctl mechanism. In such case trust that the address
- * has the proper AF.
- */
- if (rpool->cur->af && rpool->cur->af != wanted_af) {
- reason = PFRES_MAPFAILED;
- goto done_pool_mtx;
+ }
+
+ /*
+ * For pools with a single host with the prefer-ipv6-nexthop option
+ * we can return pool address of any AF, unless the forwarded packet
+ * is IPv6, then we can return only if pool address is IPv6.
+ * For non-prefer-ipv6-nexthop we can return pool address only
+ * of wanted AF, unless the pool address'es AF is unknown, which
+ * happens in case old ioctls have been used to set up the pool.
+ *
+ * Round-robin pools have their own logic for retrying next addresses.
+ */
+ pool_type = rpool->opts & PF_POOL_TYPEMASK;
+ if (pool_type == PF_POOL_NONE || pool_type == PF_POOL_BITMASK ||
+ ((pool_type == PF_POOL_RANDOM || pool_type == PF_POOL_SRCHASH) &&
+ rpool->cur->addr.type != PF_ADDR_TABLE &&
+ rpool->cur->addr.type != PF_ADDR_DYNIFTL)) {
+ if (prefer_ipv6_nexthop) {
+ if (rpool->cur->af == AF_INET && (*naf) == AF_INET6) {
+ reason = PFRES_MAPFAILED;
+ goto done_pool_mtx;
+ }
+ wanted_af = rpool->cur->af;
+ } else {
+ if (rpool->cur->af != 0 && rpool->cur->af != (*naf)) {
+ reason = PFRES_MAPFAILED;
+ goto done_pool_mtx;
+ }
}
}
- switch (rpool->opts & PF_POOL_TYPEMASK) {
+ switch (pool_type) {
case PF_POOL_NONE:
pf_addrcpy(naddr, raddr, wanted_af);
break;
@@ -631,10 +657,22 @@ pf_map_addr(sa_family_t saf, struct pf_krule *r, struct pf_addr *saddr,
else
rpool->tblidx = (int)arc4random_uniform(cnt);
memset(&rpool->counter, 0, sizeof(rpool->counter));
+ if (prefer_ipv6_nexthop)
+ wanted_af = AF_INET6;
+ retry_other_af_random:
if (pfr_pool_get(kt, &rpool->tblidx, &rpool->counter,
wanted_af, pf_islinklocal, false)) {
- reason = PFRES_MAPFAILED;
- goto done_pool_mtx; /* unsupported */
+ /* Retry with IPv4 nexthop for IPv4 traffic */
+ if (prefer_ipv6_nexthop &&
+ wanted_af == AF_INET6 &&
+ (*naf) == AF_INET) {
+ wanted_af = AF_INET;
+ goto retry_other_af_random;
+ } else {
+ /* no hosts in wanted AF */
+ reason = PFRES_MAPFAILED;
+ goto done_pool_mtx;
+ }
}
pf_addrcpy(naddr, &rpool->counter, wanted_af);
} else if (init_addr != NULL && PF_AZERO(init_addr,
@@ -702,10 +740,22 @@ pf_map_addr(sa_family_t saf, struct pf_krule *r, struct pf_addr *saddr,
else
rpool->tblidx = (int)(hashidx % cnt);
memset(&rpool->counter, 0, sizeof(rpool->counter));
+ if (prefer_ipv6_nexthop)
+ wanted_af = AF_INET6;
+ retry_other_af_srchash:
if (pfr_pool_get(kt, &rpool->tblidx, &rpool->counter,
wanted_af, pf_islinklocal, false)) {
- reason = PFRES_MAPFAILED;
- goto done_pool_mtx; /* unsupported */
+ /* Retry with IPv4 nexthop for IPv4 traffic */
+ if (prefer_ipv6_nexthop &&
+ wanted_af == AF_INET6 &&
+ (*naf) == AF_INET) {
+ wanted_af = AF_INET;
+ goto retry_other_af_srchash;
+ } else {
+ /* no hosts in wanted AF */
+ reason = PFRES_MAPFAILED;
+ goto done_pool_mtx;
+ }
}
pf_addrcpy(naddr, &rpool->counter, wanted_af);
} else {
@@ -718,6 +768,9 @@ pf_map_addr(sa_family_t saf, struct pf_krule *r, struct pf_addr *saddr,
{
struct pf_kpooladdr *acur = rpool->cur;
+ retry_other_af_rr:
+ if (prefer_ipv6_nexthop)
+ wanted_af = rpool->ipv6_nexthop_af;
if (rpool->cur->addr.type == PF_ADDR_TABLE) {
if (!pfr_pool_get(rpool->cur->addr.p.tbl,
&rpool->tblidx, &rpool->counter, wanted_af,
@@ -728,46 +781,55 @@ pf_map_addr(sa_family_t saf, struct pf_krule *r, struct pf_addr *saddr,
&rpool->tblidx, &rpool->counter, wanted_af,
pf_islinklocal, true))
goto get_addr;
- } else if (pf_match_addr(0, raddr, rmask, &rpool->counter,
- wanted_af))
+ } else if (rpool->cur->af == wanted_af &&
+ pf_match_addr(0, raddr, rmask, &rpool->counter, wanted_af))
goto get_addr;
-
+ if (prefer_ipv6_nexthop &&
+ (*naf) == AF_INET && wanted_af == AF_INET6) {
+ /* Reset table index when changing wanted AF. */
+ rpool->tblidx = -1;
+ rpool->ipv6_nexthop_af = AF_INET;
+ goto retry_other_af_rr;
+ }
try_next:
+ /* Reset prefer-ipv6-nexthop search to IPv6 when iterating pools. */
+ rpool->ipv6_nexthop_af = AF_INET6;
if (TAILQ_NEXT(rpool->cur, entries) == NULL)
rpool->cur = TAILQ_FIRST(&rpool->list);
else
rpool->cur = TAILQ_NEXT(rpool->cur, entries);
+ try_next_ipv6_nexthop_rr:
+ /* Reset table index when iterating pools or changing wanted AF. */
rpool->tblidx = -1;
+ if (prefer_ipv6_nexthop)
+ wanted_af = rpool->ipv6_nexthop_af;
if (rpool->cur->addr.type == PF_ADDR_TABLE) {
- if (pfr_pool_get(rpool->cur->addr.p.tbl,
+ if (!pfr_pool_get(rpool->cur->addr.p.tbl,
&rpool->tblidx, &rpool->counter, wanted_af, NULL,
- true)) {
- /* table contains no address of type 'wanted_af' */
- if (rpool->cur != acur)
- goto try_next;
- reason = PFRES_MAPFAILED;
- goto done_pool_mtx;
- }
+ true))
+ goto get_addr;
} else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
- if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
- &rpool->tblidx, &rpool->counter, wanted_af,
- pf_islinklocal, true)) {
- /* interface has no address of type 'wanted_af' */
- if (rpool->cur != acur)
- goto try_next;
- reason = PFRES_MAPFAILED;
- goto done_pool_mtx;
- }
+ if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
+ &rpool->tblidx, &rpool->counter, wanted_af, pf_islinklocal,
+ true))
+ goto get_addr;
} else {
- raddr = &rpool->cur->addr.v.a.addr;
- rmask = &rpool->cur->addr.v.a.mask;
- if (rpool->cur->af && rpool->cur->af != wanted_af) {
- reason = PFRES_MAPFAILED;
- goto done_pool_mtx;
+ if (rpool->cur->af == wanted_af) {
+ raddr = &rpool->cur->addr.v.a.addr;
+ rmask = &rpool->cur->addr.v.a.mask;
+ pf_addrcpy(&rpool->counter, raddr, wanted_af);
+ goto get_addr;
}
- pf_addrcpy(&rpool->counter, raddr, wanted_af);
}
-
+ if (prefer_ipv6_nexthop &&
+ (*naf) == AF_INET && wanted_af == AF_INET6) {
+ rpool->ipv6_nexthop_af = AF_INET;
+ goto try_next_ipv6_nexthop_rr;
+ }
+ if (rpool->cur != acur)
+ goto try_next;
+ reason = PFRES_MAPFAILED;
+ goto done_pool_mtx;
get_addr:
pf_addrcpy(naddr, &rpool->counter, wanted_af);
if (init_addr != NULL && PF_AZERO(init_addr, wanted_af))
@@ -777,9 +839,16 @@ pf_map_addr(sa_family_t saf, struct pf_krule *r, struct pf_addr *saddr,
}
}
+ if (wanted_af == 0) {
+ reason = PFRES_MAPFAILED;
+ goto done_pool_mtx;
+ }
+
if (nkif)
*nkif = rpool->cur->kif;
+ (*naf) = wanted_af;
+
done_pool_mtx:
mtx_unlock(&rpool->mtx);
diff --git a/sys/netpfil/pf/pf_nl.c b/sys/netpfil/pf/pf_nl.c
index 09754359ec2d..082b9b565153 100644
--- a/sys/netpfil/pf/pf_nl.c
+++ b/sys/netpfil/pf/pf_nl.c
@@ -118,7 +118,7 @@ dump_state_peer(struct nl_writer *nw, int attr, const struct pf_state_peer *peer
nlattr_add_u16(nw, PF_STP_PFSS_FLAGS, pfss_flags);
nlattr_add_u32(nw, PF_STP_PFSS_TS_MOD, sc->pfss_ts_mod);
nlattr_add_u8(nw, PF_STP_PFSS_TTL, sc->pfss_ttl);
- nlattr_add_u8(nw, PF_STP_SCRUB_FLAG, PFSYNC_SCRUB_FLAG_VALID);
+ nlattr_add_u8(nw, PF_STP_SCRUB_FLAG, PF_SCRUB_FLAG_VALID);
}
nlattr_set_len(nw, off);
@@ -763,6 +763,8 @@ static const struct nlattr_parser nla_p_rule[] = {
{ .type = PF_RT_RCV_IFNOT, .off = _OUT(rcvifnot), .cb = nlattr_get_bool },
{ .type = PF_RT_PKTRATE, .off = _OUT(pktrate), .arg = &threshold_parser, .cb = nlattr_get_nested },
{ .type = PF_RT_MAX_PKT_SIZE, .off = _OUT(max_pkt_size), .cb = nlattr_get_uint16 },
+ { .type = PF_RT_TYPE_2, .off = _OUT(type), .cb = nlattr_get_uint16 },
+ { .type = PF_RT_CODE_2, .off = _OUT(code), .cb = nlattr_get_uint16 },
};
NL_DECLARE_ATTR_PARSER(rule_parser, nla_p_rule);
#undef _OUT
@@ -984,8 +986,12 @@ pf_handle_getrule(struct nlmsghdr *hdr, struct nl_pstate *npt)
nlattr_add_u8(nw, PF_RT_AF, rule->af);
nlattr_add_u8(nw, PF_RT_NAF, rule->naf);
nlattr_add_u8(nw, PF_RT_PROTO, rule->proto);
+
nlattr_add_u8(nw, PF_RT_TYPE, rule->type);
nlattr_add_u8(nw, PF_RT_CODE, rule->code);
+ nlattr_add_u16(nw, PF_RT_TYPE_2, rule->type);
+ nlattr_add_u16(nw, PF_RT_CODE_2, rule->code);
+
nlattr_add_u8(nw, PF_RT_FLAGS, rule->flags);
nlattr_add_u8(nw, PF_RT_FLAGSET, rule->flagset);
nlattr_add_u8(nw, PF_RT_MIN_TTL, rule->min_ttl);
@@ -1019,6 +1025,7 @@ pf_handle_getrule(struct nlmsghdr *hdr, struct nl_pstate *npt)
nlattr_add_u64(nw, PF_RT_SRC_NODES_NAT, counter_u64_fetch(rule->src_nodes[PF_SN_NAT]));
nlattr_add_u64(nw, PF_RT_SRC_NODES_ROUTE, counter_u64_fetch(rule->src_nodes[PF_SN_ROUTE]));
nlattr_add_pf_threshold(nw, PF_RT_PKTRATE, &rule->pktrate);
+ nlattr_add_time_t(nw, PF_RT_EXPTIME, time_second - (time_uptime - rule->exptime));
error = pf_kanchor_copyout(ruleset, rule, anchor_call, sizeof(anchor_call));
MPASS(error == 0);
@@ -1228,6 +1235,9 @@ pf_handle_get_status(struct nlmsghdr *hdr, struct nl_pstate *npt)
V_pf_status.fcounters);
nlattr_add_counters(nw, PF_GS_SCOUNTERS, SCNT_MAX, pf_fcounter,
V_pf_status.scounters);
+ nlattr_add_counters(nw, PF_GS_NCOUNTERS, NCNT_MAX, pf_fcounter,
+ V_pf_status.ncounters);
+ nlattr_add_u64(nw, PF_GS_FRAGMENTS, pf_normalize_get_frag_count());
pfi_update_status(V_pf_status.ifname, &s);
nlattr_add_u64_array(nw, PF_GS_BCOUNTERS, 2 * 2, (uint64_t *)s.bcounters);
@@ -1945,7 +1955,7 @@ pf_handle_get_tstats(struct nlmsghdr *hdr, struct nl_pstate *npt)
n = pfr_table_count(&attrs.pfrio_table, attrs.pfrio_flags);
pfrtstats = mallocarray(n,
- sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
+ sizeof(struct pfr_tstats), M_PF, M_NOWAIT | M_ZERO);
error = pfr_get_tstats(&attrs.pfrio_table, pfrtstats,
&n, attrs.pfrio_flags | PFR_FLAG_USERIOCTL);
@@ -1997,7 +2007,7 @@ pf_handle_get_tstats(struct nlmsghdr *hdr, struct nl_pstate *npt)
}
}
}
- free(pfrtstats, M_TEMP);
+ free(pfrtstats, M_PF);
if (!nlmsg_end_dump(npt->nw, error, hdr)) {
NL_LOG(LOG_DEBUG, "Unable to finalize the dump");
diff --git a/sys/netpfil/pf/pf_nl.h b/sys/netpfil/pf/pf_nl.h
index 87daac393821..c46c8f2b2592 100644
--- a/sys/netpfil/pf/pf_nl.h
+++ b/sys/netpfil/pf/pf_nl.h
@@ -283,6 +283,9 @@ enum pf_rule_type_t {
PF_RT_SRC_NODES_ROUTE = 81, /* u64 */
PF_RT_PKTRATE = 82, /* nested, pf_threshold_type_t */
PF_RT_MAX_PKT_SIZE = 83, /* u16 */
+ PF_RT_TYPE_2 = 84, /* u16 */
+ PF_RT_CODE_2 = 85, /* u16 */
+ PF_RT_EXPTIME = 86, /* time_t */
};
enum pf_addrule_type_t {
@@ -350,6 +353,8 @@ enum pf_get_status_types_t {
PF_GS_CHKSUM = 14, /* byte array */
PF_GS_PCOUNTERS = 15, /* u64 array */
PF_GS_BCOUNTERS = 16, /* u64 array */
+ PF_GS_NCOUNTERS = 17, /* nested, */
+ PF_GS_FRAGMENTS = 18, /* u64, */
};
enum pf_natlook_types_t {
diff --git a/sys/netpfil/pf/pf_norm.c b/sys/netpfil/pf/pf_norm.c
index a684d778ab42..53010222dd07 100644
--- a/sys/netpfil/pf/pf_norm.c
+++ b/sys/netpfil/pf/pf_norm.c
@@ -211,6 +211,12 @@ pf_normalize_cleanup(void)
mtx_destroy(&V_pf_frag_mtx);
}
+uint64_t
+pf_normalize_get_frag_count(void)
+{
+ return (uma_zone_get_cur(V_pf_frent_z));
+}
+
static int
pf_frnode_compare(struct pf_frnode *a, struct pf_frnode *b)
{
@@ -314,6 +320,7 @@ pf_free_fragment(struct pf_fragment *frag)
/* Free all fragment entries */
while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+ counter_u64_add(V_pf_status.ncounters[NCNT_FRAG_REMOVALS], 1);
m_freem(frent->fe_m);
uma_zfree(V_pf_frent_z, frent);
@@ -331,6 +338,7 @@ pf_find_fragment(struct pf_frnode *key, uint32_t id)
PF_FRAG_ASSERT();
frnode = RB_FIND(pf_frnode_tree, &V_pf_frnode_tree, key);
+ counter_u64_add(V_pf_status.ncounters[NCNT_FRAG_SEARCH], 1);
if (frnode == NULL)
return (NULL);
MPASS(frnode->fn_fragments >= 1);
@@ -438,6 +446,7 @@ pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
("overlapping fragment"));
TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
}
+ counter_u64_add(V_pf_status.ncounters[NCNT_FRAG_INSERT], 1);
if (frag->fr_firstoff[index] == NULL) {
KASSERT(prev == NULL || pf_frent_index(prev) < index,
@@ -496,6 +505,7 @@ pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
}
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+ counter_u64_add(V_pf_status.ncounters[NCNT_FRAG_REMOVALS], 1);
KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
frag->fr_entries[index]--;
@@ -768,6 +778,7 @@ pf_join_fragment(struct pf_fragment *frag)
frent = TAILQ_FIRST(&frag->fr_queue);
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+ counter_u64_add(V_pf_status.ncounters[NCNT_FRAG_REMOVALS], 1);
m = frent->fe_m;
if ((frent->fe_hdrlen + frent->fe_len) < m->m_pkthdr.len)
@@ -775,6 +786,7 @@ pf_join_fragment(struct pf_fragment *frag)
uma_zfree(V_pf_frent_z, frent);
while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+ counter_u64_add(V_pf_status.ncounters[NCNT_FRAG_REMOVALS], 1);
m2 = frent->fe_m;
/* Strip off ip header. */
@@ -1354,7 +1366,7 @@ pf_normalize_ip6(int off, u_short *reason,
pf_rule_to_actions(r, &pd->act);
}
- if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6))
+ if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), reason, AF_INET6))
return (PF_DROP);
/* Offset now points to data portion. */
@@ -1542,7 +1554,7 @@ pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th,
olen = (th->th_off << 2) - sizeof(*th);
if (olen < TCPOLEN_TIMESTAMP || !pf_pull_hdr(pd->m,
- pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af))
+ pd->off + sizeof(*th), opts, olen, NULL, pd->af))
return (0);
opt = opts;
@@ -1645,7 +1657,7 @@ pf_normalize_tcp_stateful(struct pf_pdesc *pd,
if (olen >= TCPOLEN_TIMESTAMP &&
((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
(dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
- pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af)) {
+ pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, olen, NULL, pd->af)) {
/* Modulate the timestamps. Can be used for NAT detection, OS
* uptime determination or reboot detection.
*/
@@ -1975,7 +1987,7 @@ pf_normalize_mss(struct pf_pdesc *pd)
olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
optsoff = pd->off + sizeof(struct tcphdr);
if (olen < TCPOLEN_MAXSEG ||
- !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, NULL, pd->af))
+ !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, pd->af))
return (0);
opt = opts;
@@ -2009,7 +2021,7 @@ pf_scan_sctp(struct pf_pdesc *pd)
int ret;
while (pd->off + chunk_off < pd->tot_len) {
- if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL,
+ if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch),
NULL, pd->af))
return (PF_DROP);
@@ -2026,7 +2038,7 @@ pf_scan_sctp(struct pf_pdesc *pd)
struct sctp_init_chunk init;
if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init,
- sizeof(init), NULL, NULL, pd->af))
+ sizeof(init), NULL, pd->af))
return (PF_DROP);
/*
diff --git a/sys/netpfil/pf/pf_nv.c b/sys/netpfil/pf/pf_nv.c
index 89486928e6e1..2f484e2dabc6 100644
--- a/sys/netpfil/pf/pf_nv.c
+++ b/sys/netpfil/pf/pf_nv.c
@@ -505,6 +505,7 @@ int
pf_nvrule_to_krule(const nvlist_t *nvl, struct pf_krule *rule)
{
int error = 0;
+ uint8_t tmp;
#define ERROUT(x) ERROUT_FUNCTION(errout, x)
@@ -610,8 +611,10 @@ pf_nvrule_to_krule(const nvlist_t *nvl, struct pf_krule *rule)
PFNV_CHK(pf_nvuint8(nvl, "keep_state", &rule->keep_state));
PFNV_CHK(pf_nvuint8(nvl, "af", &rule->af));
PFNV_CHK(pf_nvuint8(nvl, "proto", &rule->proto));
- PFNV_CHK(pf_nvuint8(nvl, "type", &rule->type));
- PFNV_CHK(pf_nvuint8(nvl, "code", &rule->code));
+ PFNV_CHK(pf_nvuint8(nvl, "type", &tmp));
+ rule->type = tmp;
+ PFNV_CHK(pf_nvuint8(nvl, "code", &tmp));
+ rule->code = tmp;
PFNV_CHK(pf_nvuint8(nvl, "flags", &rule->flags));
PFNV_CHK(pf_nvuint8(nvl, "flagset", &rule->flagset));
PFNV_CHK(pf_nvuint8(nvl, "min_ttl", &rule->min_ttl));
diff --git a/sys/netpfil/pf/pf_osfp.c b/sys/netpfil/pf/pf_osfp.c
index 150626c5f3fb..8c041d45eae8 100644
--- a/sys/netpfil/pf/pf_osfp.c
+++ b/sys/netpfil/pf/pf_osfp.c
@@ -82,7 +82,7 @@ pf_osfp_fingerprint(struct pf_pdesc *pd, const struct tcphdr *tcp)
ip6 = mtod(pd->m, struct ip6_hdr *);
break;
}
- if (!pf_pull_hdr(pd->m, pd->off, hdr, tcp->th_off << 2, NULL, NULL,
+ if (!pf_pull_hdr(pd->m, pd->off, hdr, tcp->th_off << 2, NULL,
pd->af)) return (NULL);
return (pf_osfp_fingerprint_hdr(ip, ip6, (struct tcphdr *)hdr));
diff --git a/sys/netpfil/pf/pf_ruleset.c b/sys/netpfil/pf/pf_ruleset.c
index 039908a53126..4e16eaa76f9d 100644
--- a/sys/netpfil/pf/pf_ruleset.c
+++ b/sys/netpfil/pf/pf_ruleset.c
@@ -59,8 +59,8 @@
#error "Kernel only file. Please use sbin/pfctl/pf_ruleset.c instead."
#endif
-#define rs_malloc(x) malloc(x, M_TEMP, M_NOWAIT|M_ZERO)
-#define rs_free(x) free(x, M_TEMP)
+#define rs_malloc(x) malloc(x, M_PF, M_NOWAIT|M_ZERO)
+#define rs_free(x) free(x, M_PF)
VNET_DEFINE(struct pf_kanchor_global, pf_anchors);
VNET_DEFINE(struct pf_kanchor, pf_main_anchor);
@@ -346,6 +346,12 @@ pf_remove_if_empty_kruleset(struct pf_kruleset *ruleset)
!TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
ruleset->rules[i].inactive.open)
return;
+ for (int i = 0; i < PF_RULESET_MAX; i++) {
+ pf_rule_tree_free(ruleset->rules[i].active.tree);
+ ruleset->rules[i].active.tree = NULL;
+ pf_rule_tree_free(ruleset->rules[i].inactive.tree);
+ ruleset->rules[i].inactive.tree = NULL;
+ }
RB_REMOVE(pf_kanchor_global, &V_pf_anchors, ruleset->anchor);
if ((parent = ruleset->anchor->parent) != NULL)
RB_REMOVE(pf_kanchor_node, &parent->children,
diff --git a/sys/netpfil/pf/pf_syncookies.c b/sys/netpfil/pf/pf_syncookies.c
index 4a935bc65767..d11551ffb6ae 100644
--- a/sys/netpfil/pf/pf_syncookies.c
+++ b/sys/netpfil/pf/pf_syncookies.c
@@ -287,7 +287,7 @@ pf_synflood_check(struct pf_pdesc *pd)
}
void
-pf_syncookie_send(struct pf_pdesc *pd)
+pf_syncookie_send(struct pf_pdesc *pd, u_short *reason)
{
uint16_t mss;
uint32_t iss;
@@ -297,7 +297,7 @@ pf_syncookie_send(struct pf_pdesc *pd)
pf_send_tcp(NULL, pd->af, pd->dst, pd->src, *pd->dport, *pd->sport,
iss, ntohl(pd->hdr.tcp.th_seq) + 1, TH_SYN|TH_ACK, 0, mss,
0, M_SKIP_FIREWALL | (pd->m->m_flags & M_LOOP), 0, 0,
- pd->act.rtableid);
+ pd->act.rtableid, reason);
counter_u64_add(V_pf_status.lcounters[KLCNT_SYNCOOKIES_SENT], 1);
/* XXX Maybe only in adaptive mode? */
atomic_add_64(&V_pf_status.syncookies_inflight[V_pf_syncookie_status.oddeven],
@@ -495,7 +495,7 @@ pf_syncookie_generate(struct pf_pdesc *pd, uint16_t mss)
}
struct mbuf *
-pf_syncookie_recreate_syn(struct pf_pdesc *pd)
+pf_syncookie_recreate_syn(struct pf_pdesc *pd, u_short *reason)
{
uint8_t wscale;
uint16_t mss;
@@ -516,5 +516,5 @@ pf_syncookie_recreate_syn(struct pf_pdesc *pd)
return (pf_build_tcp(NULL, pd->af, pd->src, pd->dst, *pd->sport,
*pd->dport, seq, 0, TH_SYN, wscale, mss, pd->ttl,
(pd->m->m_flags & M_LOOP), 0, PF_MTAG_FLAG_SYNCOOKIE_RECREATED,
- cookie.flags.sack_ok, pd->act.rtableid));
+ cookie.flags.sack_ok, pd->act.rtableid, reason));
}
diff --git a/sys/nfs/nfs_diskless.c b/sys/nfs/nfs_diskless.c
index 42cfee63d184..0f0cf80feeec 100644
--- a/sys/nfs/nfs_diskless.c
+++ b/sys/nfs/nfs_diskless.c
@@ -428,7 +428,7 @@ decode_nfshandle(char *ev, u_char *fh, int maxfh)
#if !defined(BOOTP_NFSROOT)
static void
-nfs_rootconf(void)
+nfs_rootconf(void *dummy __unused)
{
nfs_setup_diskless();
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 796b1719b8ba..01bf4c7e90a8 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -297,7 +297,7 @@ static u_int moea64_clear_bit(vm_page_t, uint64_t);
static void moea64_kremove(vm_offset_t);
static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
vm_paddr_t pa, vm_size_t sz);
-static void moea64_pmap_init_qpages(void);
+static void moea64_pmap_init_qpages(void *);
static void moea64_remove_locked(pmap_t, vm_offset_t,
vm_offset_t, struct pvo_dlist *);
@@ -1284,7 +1284,7 @@ moea64_late_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
}
static void
-moea64_pmap_init_qpages(void)
+moea64_pmap_init_qpages(void *dummy __unused)
{
struct pcpu *pc;
int i;
diff --git a/sys/powerpc/conf/GENERIC64 b/sys/powerpc/conf/GENERIC64
index 85711c8fc3ff..91c91d58d058 100644
--- a/sys/powerpc/conf/GENERIC64
+++ b/sys/powerpc/conf/GENERIC64
@@ -234,9 +234,9 @@ device wlan # 802.11 support
options IEEE80211_SUPPORT_MESH # enable 802.11s draft support
options IEEE80211_DEBUG # enable debug msgs
device wlan_wep # 802.11 WEP support
+device wlan_tkip # 802.11 TKIP support
device wlan_ccmp # 802.11 CCMP support
device wlan_gcmp # 802.11 GCMP support
-device wlan_tkip # 802.11 TKIP support
device wlan_amrr # AMRR transmit rate control algorithm
device ath # Atheros CardBus/PCI NICs
device ath_hal # Atheros CardBus/PCI chip support
diff --git a/sys/powerpc/conf/GENERIC64LE b/sys/powerpc/conf/GENERIC64LE
index a56feb6574a4..5fb9715de655 100644
--- a/sys/powerpc/conf/GENERIC64LE
+++ b/sys/powerpc/conf/GENERIC64LE
@@ -230,9 +230,9 @@ device wlan # 802.11 support
options IEEE80211_SUPPORT_MESH # enable 802.11s draft support
options IEEE80211_DEBUG # enable debug msgs
device wlan_wep # 802.11 WEP support
+device wlan_tkip # 802.11 TKIP support
device wlan_ccmp # 802.11 CCMP support
device wlan_gcmp # 802.11 GCMP support
-device wlan_tkip # 802.11 TKIP support
device wlan_amrr # AMRR transmit rate control algorithm
device ath # Atheros CardBus/PCI NICs
device ath_hal # Atheros CardBus/PCI chip support
diff --git a/sys/powerpc/cpufreq/pmcr.c b/sys/powerpc/cpufreq/pmcr.c
index dd489b607606..6ae0777a8ac7 100644
--- a/sys/powerpc/cpufreq/pmcr.c
+++ b/sys/powerpc/cpufreq/pmcr.c
@@ -40,7 +40,8 @@ static int pstate_ids[256];
static int pstate_freqs[256];
static int npstates;
-static void parse_pstates(void)
+static void
+parse_pstates(void *dummy __unused)
{
phandle_t node;
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index 015a283e2de7..b2d7549e5bd0 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -1137,7 +1137,38 @@ atomic_thread_fence_seq_cst(void)
#define atomic_cmpset_short atomic_cmpset_16
#define atomic_fcmpset_char atomic_fcmpset_8
#define atomic_fcmpset_short atomic_fcmpset_16
-#endif
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+#else
+
+static __inline void
+atomic_set_short(volatile u_short *p, u_short bit)
+{
+ u_short v;
+
+ v = atomic_load_short(p);
+ for (;;) {
+ if (atomic_fcmpset_16(p, &v, v | bit))
+ break;
+ }
+}
+
+static __inline void
+atomic_clear_short(volatile u_short *p, u_short bit)
+{
+ u_short v;
+
+ v = atomic_load_short(p);
+ for (;;) {
+ if (atomic_fcmpset_16(p, &v, v & ~bit))
+ break;
+ }
+}
+
+#define atomic_set_16 atomic_set_short
+#define atomic_clear_16 atomic_clear_short
+
+#endif /* ISA_206_ATOMICS */
/* These need sys/_atomic_subword.h on non-ISA-2.06-atomic platforms. */
ATOMIC_CMPSET_ACQ_REL(char);
diff --git a/sys/powerpc/mpc85xx/mpc85xx_gpio.c b/sys/powerpc/mpc85xx/mpc85xx_gpio.c
index cb96d768adef..7353ed7bac7b 100644
--- a/sys/powerpc/mpc85xx/mpc85xx_gpio.c
+++ b/sys/powerpc/mpc85xx/mpc85xx_gpio.c
@@ -228,12 +228,13 @@ mpc85xx_gpio_attach(device_t dev)
OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
mpc85xx_gpio_detach(dev);
return (ENOMEM);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c
index 65f90aa4affa..65a07c7ebc39 100644
--- a/sys/powerpc/powerpc/busdma_machdep.c
+++ b/sys/powerpc/powerpc/busdma_machdep.c
@@ -648,6 +648,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
sgsize = MIN(buflen, PAGE_SIZE - (curaddr & PAGE_MASK));
if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
sgsize = roundup2(sgsize, dmat->alignment);
+ sgsize = MIN(sgsize, buflen);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
sgsize);
}
diff --git a/sys/riscv/include/atomic.h b/sys/riscv/include/atomic.h
index 74ffc171b028..c90cb02c482c 100644
--- a/sys/riscv/include/atomic.h
+++ b/sys/riscv/include/atomic.h
@@ -656,4 +656,7 @@ atomic_thread_fence_seq_cst(void)
#include <sys/_atomic_subword.h>
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+
#endif /* _MACHINE_ATOMIC_H_ */
diff --git a/sys/riscv/include/ieeefp.h b/sys/riscv/include/ieeefp.h
index 03a96e8a000f..84b554a04c65 100644
--- a/sys/riscv/include/ieeefp.h
+++ b/sys/riscv/include/ieeefp.h
@@ -5,4 +5,9 @@
/* TODO */
typedef int fp_except_t;
+__BEGIN_DECLS
+extern fp_except_t fpgetmask(void);
+extern fp_except_t fpsetmask(fp_except_t);
+__END_DECLS
+
#endif /* _MACHINE_IEEEFP_H_ */
diff --git a/sys/riscv/include/vmm.h b/sys/riscv/include/vmm.h
index 1221521be368..bc00474ed0fd 100644
--- a/sys/riscv/include/vmm.h
+++ b/sys/riscv/include/vmm.h
@@ -49,6 +49,7 @@ enum vm_suspend_how {
VM_SUSPEND_RESET,
VM_SUSPEND_POWEROFF,
VM_SUSPEND_HALT,
+ VM_SUSPEND_DESTROY,
VM_SUSPEND_LAST
};
@@ -122,6 +123,29 @@ struct vm_eventinfo {
int *iptr; /* reqidle cookie */
};
+#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \
+ ret_type vmmops_##opname args
+
+DECLARE_VMMOPS_FUNC(int, modinit, (void));
+DECLARE_VMMOPS_FUNC(int, modcleanup, (void));
+DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap));
+DECLARE_VMMOPS_FUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
+ uint64_t gla, int prot, uint64_t *gpa, int *is_fault));
+DECLARE_VMMOPS_FUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
+ struct vm_eventinfo *info));
+DECLARE_VMMOPS_FUNC(void, cleanup, (void *vmi));
+DECLARE_VMMOPS_FUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
+ int vcpu_id));
+DECLARE_VMMOPS_FUNC(void, vcpu_cleanup, (void *vcpui));
+DECLARE_VMMOPS_FUNC(int, exception, (void *vcpui, uint64_t scause));
+DECLARE_VMMOPS_FUNC(int, getreg, (void *vcpui, int num, uint64_t *retval));
+DECLARE_VMMOPS_FUNC(int, setreg, (void *vcpui, int num, uint64_t val));
+DECLARE_VMMOPS_FUNC(int, getcap, (void *vcpui, int num, int *retval));
+DECLARE_VMMOPS_FUNC(int, setcap, (void *vcpui, int num, int val));
+DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
+ vm_offset_t max));
+DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace));
+
int vm_create(const char *name, struct vm **retvm);
struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
void vm_disable_vcpu_creation(struct vm *vm);
@@ -211,7 +235,6 @@ vcpu_should_yield(struct vcpu *vcpu)
void *vcpu_stats(struct vcpu *vcpu);
void vcpu_notify_event(struct vcpu *vcpu);
-struct vmspace *vm_vmspace(struct vm *vm);
struct vm_mem *vm_mem(struct vm *vm);
enum vm_reg_name vm_segment_name(int seg_encoding);
diff --git a/sys/riscv/riscv/busdma_bounce.c b/sys/riscv/riscv/busdma_bounce.c
index f652f08bf5dc..9d9556fc72f9 100644
--- a/sys/riscv/riscv/busdma_bounce.c
+++ b/sys/riscv/riscv/busdma_bounce.c
@@ -672,6 +672,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
map->pagesneeded != 0 &&
addr_needs_bounce(dmat, curaddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
+ sgsize = MIN(sgsize, buflen);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
sgsize);
} else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
diff --git a/sys/riscv/sifive/sifive_gpio.c b/sys/riscv/sifive/sifive_gpio.c
index ef68d2b39da3..98bff2f72082 100644
--- a/sys/riscv/sifive/sifive_gpio.c
+++ b/sys/riscv/sifive/sifive_gpio.c
@@ -157,13 +157,14 @@ sfgpio_attach(device_t dev)
sc->gpio_pins[i].gp_name[GPIOMAXNAME - 1] = '\0';
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "Cannot attach gpiobus\n");
error = ENXIO;
goto fail;
}
+ bus_attach_children(dev);
return (0);
fail:
diff --git a/sys/riscv/starfive/jh7110_gpio.c b/sys/riscv/starfive/jh7110_gpio.c
index 452a3306b4a1..1ed7d9f42259 100644
--- a/sys/riscv/starfive/jh7110_gpio.c
+++ b/sys/riscv/starfive/jh7110_gpio.c
@@ -321,13 +321,14 @@ jh7110_gpio_attach(device_t dev)
JH7110_GPIO_WRITE(sc, GPIOE_1, 0);
JH7110_GPIO_WRITE(sc, GPIOEN, 1);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "Cannot attach gpiobus\n");
jh7110_gpio_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/riscv/starfive/jh7110_pcie.c b/sys/riscv/starfive/jh7110_pcie.c
index 2d0a4be69b2c..5181252ab2dc 100644
--- a/sys/riscv/starfive/jh7110_pcie.c
+++ b/sys/riscv/starfive/jh7110_pcie.c
@@ -483,6 +483,16 @@ jh7110_pcie_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc)
}
static void
+jh7110_pcie_msi_post_filter(device_t dev, struct intr_irqsrc *isrc)
+{
+}
+
+static void
+jh7110_pcie_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+}
+
+static void
jh7110_pcie_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
{
struct jh7110_pcie_softc *sc;
@@ -1008,6 +1018,8 @@ static device_method_t jh7110_pcie_methods[] = {
/* Interrupt controller interface */
DEVMETHOD(pic_enable_intr, jh7110_pcie_msi_enable_intr),
DEVMETHOD(pic_disable_intr, jh7110_pcie_msi_disable_intr),
+ DEVMETHOD(pic_post_filter, jh7110_pcie_msi_post_filter),
+ DEVMETHOD(pic_post_ithread, jh7110_pcie_msi_post_ithread),
DEVMETHOD(pic_pre_ithread, jh7110_pcie_msi_pre_ithread),
/* OFW bus interface */
diff --git a/sys/riscv/vmm/riscv.h b/sys/riscv/vmm/riscv.h
index 870d0d6c5cd1..917a333520ed 100644
--- a/sys/riscv/vmm/riscv.h
+++ b/sys/riscv/vmm/riscv.h
@@ -122,29 +122,6 @@ struct hyptrap {
uint64_t htinst;
};
-#define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \
- ret_type vmmops_##opname args;
-
-DEFINE_VMMOPS_IFUNC(int, modinit, (void))
-DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
-DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
-DEFINE_VMMOPS_IFUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
- uint64_t gla, int prot, uint64_t *gpa, int *is_fault))
-DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
- struct vm_eventinfo *info))
-DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
-DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
- int vcpu_id))
-DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
-DEFINE_VMMOPS_IFUNC(int, exception, (void *vcpui, uint64_t scause))
-DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
-DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
-DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
-DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
-DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
- vm_offset_t max))
-DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
-
#define dprintf(fmt, ...)
struct hypctx *riscv_get_active_vcpu(void);
diff --git a/sys/riscv/vmm/vmm.c b/sys/riscv/vmm/vmm.c
index 7528ef6e4698..790dcc576507 100644
--- a/sys/riscv/vmm/vmm.c
+++ b/sys/riscv/vmm/vmm.c
@@ -92,7 +92,6 @@ struct vcpu {
struct fpreg *guestfpu; /* (a,i) guest fpu state */
};
-#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
#define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
@@ -121,7 +120,6 @@ struct vm {
bool dying; /* (o) is dying */
volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
- struct vmspace *vmspace; /* (o) guest's address space */
struct vm_mem mem; /* (i) [m+v] guest memory */
char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
struct vcpu **vcpu; /* (i) guest vcpus */
@@ -174,6 +172,7 @@ vcpu_cleanup(struct vcpu *vcpu, bool destroy)
vmm_stat_free(vcpu->stats);
fpu_save_area_free(vcpu->guestfpu);
vcpu_lock_destroy(vcpu);
+ free(vcpu, M_VMM);
}
}
@@ -285,7 +284,7 @@ vm_init(struct vm *vm, bool create)
{
int i;
- vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
+ vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
MPASS(vm->cookie != NULL);
CPU_ZERO(&vm->active_cpus);
@@ -362,7 +361,7 @@ int
vm_create(const char *name, struct vm **retvm)
{
struct vm *vm;
- struct vmspace *vmspace;
+ int error;
/*
* If vmm.ko could not be successfully initialized then don't attempt
@@ -374,14 +373,13 @@ vm_create(const char *name, struct vm **retvm)
if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
return (EINVAL);
- vmspace = vmmops_vmspace_alloc(0, 1ul << 39);
- if (vmspace == NULL)
- return (ENOMEM);
-
vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO);
+ error = vm_mem_init(&vm->mem, 0, 1ul << 39);
+ if (error != 0) {
+ free(vm, M_VMM);
+ return (error);
+ }
strcpy(vm->name, name);
- vm->vmspace = vmspace;
- vm_mem_init(&vm->mem);
sx_init(&vm->vcpus_init_lock, "vm vcpus");
vm->sockets = 1;
@@ -450,11 +448,6 @@ vm_cleanup(struct vm *vm, bool destroy)
if (destroy) {
vm_mem_destroy(vm);
- vmmops_vmspace_free(vm->vmspace);
- vm->vmspace = NULL;
-
- for (i = 0; i < vm->maxcpus; i++)
- free(vm->vcpu[i], M_VMM);
free(vm->vcpu, M_VMM);
sx_destroy(&vm->vcpus_init_lock);
}
@@ -760,12 +753,6 @@ vcpu_notify_event(struct vcpu *vcpu)
vcpu_unlock(vcpu);
}
-struct vmspace *
-vm_vmspace(struct vm *vm)
-{
- return (vm->vmspace);
-}
-
struct vm_mem *
vm_mem(struct vm *vm)
{
@@ -1036,10 +1023,14 @@ vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot,
static int
vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
{
+ struct vm *vm;
+ vm = vcpu->vm;
vcpu_lock(vcpu);
-
while (1) {
+ if (vm->suspend)
+ break;
+
if (aplic_check_pending(vcpu->cookie))
break;
@@ -1080,7 +1071,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
vm = vcpu->vm;
vme = &vcpu->exitinfo;
- pmap = vmspace_pmap(vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vm));
addr = (vme->htval << 2) & ~(PAGE_SIZE - 1);
dprintf("%s: %lx\n", __func__, addr);
@@ -1103,7 +1094,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
if (pmap_fault(pmap, addr, ftype))
return (0);
- map = &vm->vmspace->vm_map;
+ map = &vm_vmspace(vm)->vm_map;
rv = vm_fault(map, addr, ftype, VM_FAULT_NORMAL, NULL);
if (rv != KERN_SUCCESS) {
printf("%s: vm_fault failed, addr %lx, ftype %d, err %d\n",
@@ -1185,7 +1176,7 @@ vm_run(struct vcpu *vcpu)
if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
return (EINVAL);
- pmap = vmspace_pmap(vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vm));
vme = &vcpu->exitinfo;
evinfo.rptr = NULL;
evinfo.sptr = &vm->suspend;
diff --git a/sys/rpc/auth.h b/sys/rpc/auth.h
index 33c33ffd594d..648fb99a3a27 100644
--- a/sys/rpc/auth.h
+++ b/sys/rpc/auth.h
@@ -354,6 +354,10 @@ __END_DECLS
#define RPCSEC_GSS 6 /* RPCSEC_GSS */
#define AUTH_TLS 7 /* Initiate RPC-over-TLS */
+/* RFC 5531's prescribed limits for variable-lenth arrays. */
+#define AUTH_SYS_MAX_HOSTNAME 255
+#define AUTH_SYS_MAX_GROUPS 16 /* Supplementary groups. */
+
/*
* Pseudo auth flavors for RPCSEC_GSS.
*/
diff --git a/sys/rpc/authunix_prot.c b/sys/rpc/authunix_prot.c
index b107d5541c50..ff4c12c3f52e 100644
--- a/sys/rpc/authunix_prot.c
+++ b/sys/rpc/authunix_prot.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* authunix_prot.c
* XDR for UNIX style authentication parameters for RPC
@@ -40,8 +39,7 @@
#include <sys/param.h>
#include <sys/jail.h>
-#include <sys/kernel.h>
-#include <sys/systm.h>
+#include <sys/libkern.h>
#include <sys/ucred.h>
#include <rpc/types.h>
@@ -50,9 +48,6 @@
#include <rpc/rpc_com.h>
-/* gids compose part of a credential; there may not be more than 16 of them */
-#define NGRPS 16
-
/*
* XDR for unix authentication parameters.
*/
@@ -60,25 +55,23 @@ bool_t
xdr_authunix_parms(XDR *xdrs, uint32_t *time, struct xucred *cred)
{
uint32_t namelen;
- uint32_t ngroups, i;
+ uint32_t supp_ngroups, i;
uint32_t junk;
char hostbuf[MAXHOSTNAMELEN];
+ if (xdrs->x_op == XDR_FREE)
+ /* This function does not allocate auxiliary memory. */
+ return (TRUE);
+
if (xdrs->x_op == XDR_ENCODE) {
- /*
- * Restrict name length to 255 according to RFC 1057.
- */
getcredhostname(NULL, hostbuf, sizeof(hostbuf));
namelen = strlen(hostbuf);
- if (namelen > 255)
- namelen = 255;
- } else {
+ if (namelen > AUTH_SYS_MAX_HOSTNAME)
+ namelen = AUTH_SYS_MAX_HOSTNAME;
+ } else
namelen = 0;
- }
- junk = 0;
- if (!xdr_uint32_t(xdrs, time)
- || !xdr_uint32_t(xdrs, &namelen))
+ if (!xdr_uint32_t(xdrs, time) || !xdr_uint32_t(xdrs, &namelen))
return (FALSE);
/*
@@ -88,43 +81,65 @@ xdr_authunix_parms(XDR *xdrs, uint32_t *time, struct xucred *cred)
if (!xdr_opaque(xdrs, hostbuf, namelen))
return (FALSE);
} else {
+ if (namelen > AUTH_SYS_MAX_HOSTNAME)
+ return (FALSE);
xdr_setpos(xdrs, xdr_getpos(xdrs) + RNDUP(namelen));
}
if (!xdr_uint32_t(xdrs, &cred->cr_uid))
return (FALSE);
+
+ /*
+ * Safety check: The protocol needs at least one group (access to
+ * 'cr_gid', decrementation of 'cr_ngroups' below).
+ */
+ if (xdrs->x_op == XDR_ENCODE && cred->cr_ngroups == 0)
+ return (FALSE);
if (!xdr_uint32_t(xdrs, &cred->cr_gid))
return (FALSE);
if (xdrs->x_op == XDR_ENCODE) {
/*
- * Note that this is a `struct xucred`, which maintains its
- * historical layout of preserving the egid in cr_ngroups and
- * cr_groups[0] == egid.
+ * Note that this is a 'struct xucred', which still has the
+ * historical layout where the effective GID is in cr_groups[0]
+ * and is accounted in 'cr_ngroups'. We substract 1 to obtain
+ * the number of "supplementary" groups, passed in the AUTH_SYS
+ * credentials variable-length array called gids[] in RFC 5531.
*/
- ngroups = cred->cr_ngroups - 1;
- if (ngroups > NGRPS)
- ngroups = NGRPS;
+ MPASS(cred->cr_ngroups <= XU_NGROUPS);
+ supp_ngroups = cred->cr_ngroups - 1;
+ if (supp_ngroups > AUTH_SYS_MAX_GROUPS)
+ /* With current values, this should never execute. */
+ supp_ngroups = AUTH_SYS_MAX_GROUPS;
}
- if (!xdr_uint32_t(xdrs, &ngroups))
+ if (!xdr_uint32_t(xdrs, &supp_ngroups))
return (FALSE);
- for (i = 0; i < ngroups; i++) {
- if (i < ngroups_max) {
- if (!xdr_uint32_t(xdrs, &cred->cr_groups[i + 1]))
- return (FALSE);
- } else {
- if (!xdr_uint32_t(xdrs, &junk))
- return (FALSE);
- }
- }
- if (xdrs->x_op == XDR_DECODE) {
- if (ngroups > ngroups_max)
- cred->cr_ngroups = ngroups_max + 1;
- else
- cred->cr_ngroups = ngroups + 1;
- }
+ /*
+ * Because we cannot store more than XU_NGROUPS in total (16 at time of
+ * this writing), for now we choose to be strict with respect to RFC
+ * 5531's maximum number of supplementary groups (AUTH_SYS_MAX_GROUPS).
+ * That would also be an accidental DoS prevention measure if the
+ * request handling code didn't try to reassemble it in full without any
+ * size limits. Although AUTH_SYS_MAX_GROUPS and XU_NGROUPS are equal,
+ * since the latter includes the "effective" GID, we cannot store the
+ * last group of a message with exactly AUTH_SYS_MAX_GROUPS
+ * supplementary groups. We accept such messages so as not to violate
+ * the protocol, silently dropping the last group on the floor.
+ */
+
+ if (xdrs->x_op != XDR_ENCODE && supp_ngroups > AUTH_SYS_MAX_GROUPS)
+ return (FALSE);
+
+ junk = 0;
+ for (i = 0; i < supp_ngroups; ++i)
+ if (!xdr_uint32_t(xdrs, i < XU_NGROUPS - 1 ?
+ &cred->cr_sgroups[i] : &junk))
+ return (FALSE);
+
+ if (xdrs->x_op != XDR_ENCODE)
+ cred->cr_ngroups = MIN(supp_ngroups + 1, XU_NGROUPS);
return (TRUE);
}
diff --git a/sys/rpc/svc_auth_unix.c b/sys/rpc/svc_auth_unix.c
index 963f4f272964..aa0fc585865f 100644
--- a/sys/rpc/svc_auth_unix.c
+++ b/sys/rpc/svc_auth_unix.c
@@ -41,18 +41,12 @@
*/
#include <sys/param.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/systm.h>
#include <sys/ucred.h>
#include <rpc/rpc.h>
#include <rpc/rpc_com.h>
-#define MAX_MACHINE_NAME 255
-#define NGRPS 16
-
/*
* Unix longhand authenticator
*/
@@ -62,11 +56,8 @@ _svcauth_unix(struct svc_req *rqst, struct rpc_msg *msg)
enum auth_stat stat;
XDR xdrs;
int32_t *buf;
- uint32_t time;
struct xucred *xcr;
- u_int auth_len;
- size_t str_len, gid_len;
- u_int i;
+ uint32_t auth_len, time;
xcr = rqst->rq_clntcred;
auth_len = (u_int)msg->rm_call.cb_cred.oa_length;
@@ -74,51 +65,58 @@ _svcauth_unix(struct svc_req *rqst, struct rpc_msg *msg)
XDR_DECODE);
buf = XDR_INLINE(&xdrs, auth_len);
if (buf != NULL) {
+ /* 'time', 'str_len', UID, GID and 'supp_ngroups'. */
+ const uint32_t min_len = 5 * BYTES_PER_XDR_UNIT;
+ uint32_t str_len, supp_ngroups;
+
+ if (auth_len < min_len)
+ goto badcred;
time = IXDR_GET_UINT32(buf);
- str_len = (size_t)IXDR_GET_UINT32(buf);
- if (str_len > MAX_MACHINE_NAME) {
- stat = AUTH_BADCRED;
- goto done;
- }
+ str_len = IXDR_GET_UINT32(buf);
+ if (str_len > AUTH_SYS_MAX_HOSTNAME)
+ goto badcred;
str_len = RNDUP(str_len);
+ /*
+ * Recheck message length now that we know the value of
+ * 'str_len' (and that it won't cause an overflow in additions
+ * below) to protect access to the credentials part.
+ */
+ if (auth_len < min_len + str_len)
+ goto badcred;
buf += str_len / sizeof (int32_t);
xcr->cr_uid = IXDR_GET_UINT32(buf);
xcr->cr_gid = IXDR_GET_UINT32(buf);
- gid_len = (size_t)IXDR_GET_UINT32(buf);
- if (gid_len > NGRPS) {
- stat = AUTH_BADCRED;
- goto done;
- }
- for (i = 0; i < gid_len; i++) {
- /*
- * Note that this is a `struct xucred`, which maintains
- * its historical layout of preserving the egid in
- * cr_ngroups and cr_groups[0] == egid.
- */
- if (i + 1 < XU_NGROUPS)
- xcr->cr_groups[i + 1] = IXDR_GET_INT32(buf);
- else
- buf++;
- }
- if (gid_len + 1 > XU_NGROUPS)
- xcr->cr_ngroups = XU_NGROUPS;
- else
- xcr->cr_ngroups = gid_len + 1;
+ supp_ngroups = IXDR_GET_UINT32(buf);
+ /*
+ * See the herald comment before a similar test at the end of
+ * xdr_authunix_parms() for why we strictly respect RFC 5531 and
+ * why we may have to drop the last supplementary group when
+ * there are AUTH_SYS_MAX_GROUPS of them.
+ */
+ if (supp_ngroups > AUTH_SYS_MAX_GROUPS)
+ goto badcred;
+ /*
+ * Final message length check, as we now know how much we will
+ * read in total.
+ */
+ if (auth_len < min_len + str_len +
+ supp_ngroups * BYTES_PER_XDR_UNIT)
+ goto badcred;
/*
- * five is the smallest unix credentials structure -
- * timestamp, hostname len (0), uid, gid, and gids len (0).
+ * Note that 'xcr' is a 'struct xucred', which still has the
+ * historical layout where the effective GID is in cr_groups[0]
+ * and is accounted in 'cr_ngroups'.
*/
- if ((5 + gid_len) * BYTES_PER_XDR_UNIT + str_len > auth_len) {
- (void) printf("bad auth_len gid %ld str %ld auth %u\n",
- (long)gid_len, (long)str_len, auth_len);
- stat = AUTH_BADCRED;
- goto done;
+ for (uint32_t i = 0; i < supp_ngroups; ++i) {
+ if (i < XU_NGROUPS - 1)
+ xcr->cr_sgroups[i] = IXDR_GET_INT32(buf);
+ else
+ buf++;
}
- } else if (! xdr_authunix_parms(&xdrs, &time, xcr)) {
- stat = AUTH_BADCRED;
- goto done;
- }
+ xcr->cr_ngroups = MIN(supp_ngroups + 1, XU_NGROUPS);
+ } else if (!xdr_authunix_parms(&xdrs, &time, xcr))
+ goto badcred;
rqst->rq_verf = _null_auth;
stat = AUTH_OK;
@@ -126,6 +124,10 @@ done:
XDR_DESTROY(&xdrs);
return (stat);
+
+badcred:
+ stat = AUTH_BADCRED;
+ goto done;
}
diff --git a/sys/security/audit/audit.c b/sys/security/audit/audit.c
index 7ec50d990d4e..876776e5f62e 100644
--- a/sys/security/audit/audit.c
+++ b/sys/security/audit/audit.c
@@ -329,7 +329,7 @@ audit_record_dtor(void *mem, int size, void *arg)
* call into the BSM assembly code to initialize it.
*/
static void
-audit_init(void)
+audit_init(void *dummy __unused)
{
audit_trail_enabled = 0;
diff --git a/sys/security/audit/audit_syscalls.c b/sys/security/audit/audit_syscalls.c
index 40b2fb3d1c9f..f50c627b7515 100644
--- a/sys/security/audit/audit_syscalls.c
+++ b/sys/security/audit/audit_syscalls.c
@@ -54,6 +54,29 @@
#ifdef AUDIT
+static int
+audit_priv_check_cred(struct ucred *cred, int priv)
+{
+ int error;
+
+ error = priv_check_cred(cred, priv);
+ if (error == EPERM && jailed(cred)) {
+ /*
+ * The audit system calls historically returned ENOSYS when
+ * invoked from within a jail, and some userspace applications
+ * handle that case specially. Thus, convert the error here.
+ */
+ error = ENOSYS;
+ }
+ return (error);
+}
+
+static int
+audit_priv_check(struct thread *td, int priv)
+{
+ return (audit_priv_check_cred(td->td_ucred, priv));
+}
+
/*
* System call to allow a user space application to submit a BSM audit record
* to the kernel for inclusion in the audit log. This function does little
@@ -592,9 +615,7 @@ sys_getauid(struct thread *td, struct getauid_args *uap)
{
int error;
- if (jailed(td->td_ucred))
- return (ENOSYS);
- error = priv_check(td, PRIV_AUDIT_GETAUDIT);
+ error = audit_priv_check(td, PRIV_AUDIT_GETAUDIT);
if (error)
return (error);
return (copyout(&td->td_ucred->cr_audit.ai_auid, uap->auid,
@@ -609,8 +630,6 @@ sys_setauid(struct thread *td, struct setauid_args *uap)
au_id_t id;
int error;
- if (jailed(td->td_ucred))
- return (ENOSYS);
error = copyin(uap->auid, &id, sizeof(id));
if (error)
return (error);
@@ -624,7 +643,7 @@ sys_setauid(struct thread *td, struct setauid_args *uap)
if (error)
goto fail;
#endif
- error = priv_check_cred(oldcred, PRIV_AUDIT_SETAUDIT);
+ error = audit_priv_check_cred(oldcred, PRIV_AUDIT_SETAUDIT);
if (error)
goto fail;
newcred->cr_audit.ai_auid = id;
@@ -650,9 +669,7 @@ sys_getaudit(struct thread *td, struct getaudit_args *uap)
int error;
cred = td->td_ucred;
- if (jailed(cred))
- return (ENOSYS);
- error = priv_check(td, PRIV_AUDIT_GETAUDIT);
+ error = audit_priv_check(td, PRIV_AUDIT_GETAUDIT);
if (error)
return (error);
if (cred->cr_audit.ai_termid.at_type == AU_IPv6)
@@ -674,8 +691,6 @@ sys_setaudit(struct thread *td, struct setaudit_args *uap)
struct auditinfo ai;
int error;
- if (jailed(td->td_ucred))
- return (ENOSYS);
error = copyin(uap->auditinfo, &ai, sizeof(ai));
if (error)
return (error);
@@ -689,7 +704,7 @@ sys_setaudit(struct thread *td, struct setaudit_args *uap)
if (error)
goto fail;
#endif
- error = priv_check_cred(oldcred, PRIV_AUDIT_SETAUDIT);
+ error = audit_priv_check_cred(oldcred, PRIV_AUDIT_SETAUDIT);
if (error)
goto fail;
bzero(&newcred->cr_audit, sizeof(newcred->cr_audit));
@@ -715,11 +730,9 @@ sys_getaudit_addr(struct thread *td, struct getaudit_addr_args *uap)
{
int error;
- if (jailed(td->td_ucred))
- return (ENOSYS);
if (uap->length < sizeof(*uap->auditinfo_addr))
return (EOVERFLOW);
- error = priv_check(td, PRIV_AUDIT_GETAUDIT);
+ error = audit_priv_check(td, PRIV_AUDIT_GETAUDIT);
if (error)
return (error);
return (copyout(&td->td_ucred->cr_audit, uap->auditinfo_addr,
@@ -734,8 +747,6 @@ sys_setaudit_addr(struct thread *td, struct setaudit_addr_args *uap)
struct auditinfo_addr aia;
int error;
- if (jailed(td->td_ucred))
- return (ENOSYS);
error = copyin(uap->auditinfo_addr, &aia, sizeof(aia));
if (error)
return (error);
@@ -752,7 +763,7 @@ sys_setaudit_addr(struct thread *td, struct setaudit_addr_args *uap)
if (error)
goto fail;
#endif
- error = priv_check_cred(oldcred, PRIV_AUDIT_SETAUDIT);
+ error = audit_priv_check_cred(oldcred, PRIV_AUDIT_SETAUDIT);
if (error)
goto fail;
newcred->cr_audit = aia;
diff --git a/sys/security/mac/mac_framework.c b/sys/security/mac/mac_framework.c
index d742b5dcbc3a..b0776160cc74 100644
--- a/sys/security/mac/mac_framework.c
+++ b/sys/security/mac/mac_framework.c
@@ -320,7 +320,7 @@ mac_policy_xlock_assert(void)
* Initialize the MAC subsystem, including appropriate SMP locks.
*/
static void
-mac_init(void)
+mac_init(void *dummy __unused)
{
LIST_INIT(&mac_static_policy_list);
@@ -340,7 +340,7 @@ mac_init(void)
* kernel, or loaded before the kernel startup.
*/
static void
-mac_late_init(void)
+mac_late_init(void *dummy __unused)
{
mac_late = 1;
diff --git a/sys/security/mac_bsdextended/mac_bsdextended.c b/sys/security/mac_bsdextended/mac_bsdextended.c
index 8a6549214380..bf95c008e2f2 100644
--- a/sys/security/mac_bsdextended/mac_bsdextended.c
+++ b/sys/security/mac_bsdextended/mac_bsdextended.c
@@ -246,7 +246,9 @@ ugidfw_rulecheck(struct mac_bsdextended_rule *rule,
}
if (rule->mbr_subject.mbs_flags & MBS_GID_DEFINED) {
- match = ((cred->cr_rgid <= rule->mbr_subject.mbs_gid_max &&
+ match = ((cred->cr_gid <= rule->mbr_subject.mbs_gid_max &&
+ cred->cr_gid >= rule->mbr_subject.mbs_gid_min) ||
+ (cred->cr_rgid <= rule->mbr_subject.mbs_gid_max &&
cred->cr_rgid >= rule->mbr_subject.mbs_gid_min) ||
(cred->cr_svgid <= rule->mbr_subject.mbs_gid_max &&
cred->cr_svgid >= rule->mbr_subject.mbs_gid_min));
diff --git a/sys/security/mac_do/mac_do.c b/sys/security/mac_do/mac_do.c
index 8856be5fa1a3..2bcff7bba973 100644
--- a/sys/security/mac_do/mac_do.c
+++ b/sys/security/mac_do/mac_do.c
@@ -44,7 +44,7 @@ SYSCTL_INT(_security_mac_do, OID_AUTO, print_parse_error, CTLFLAG_RWTUN,
&print_parse_error, 0, "Print parse errors on setting rules "
"(via sysctl(8)).");
-static MALLOC_DEFINE(M_DO, "do_rule", "Rules for mac_do");
+static MALLOC_DEFINE(M_MAC_DO, "mac_do", "mac_do(4) security module");
#define MAC_RULE_STRING_LEN 1024
@@ -319,17 +319,17 @@ toast_rules(struct rules *const rules)
struct rule *rule, *rule_next;
STAILQ_FOREACH_SAFE(rule, head, r_entries, rule_next) {
- free(rule->uids, M_DO);
- free(rule->gids, M_DO);
- free(rule, M_DO);
+ free(rule->uids, M_MAC_DO);
+ free(rule->gids, M_MAC_DO);
+ free(rule, M_MAC_DO);
}
- free(rules, M_DO);
+ free(rules, M_MAC_DO);
}
static struct rules *
alloc_rules(void)
{
- struct rules *const rules = malloc(sizeof(*rules), M_DO, M_WAITOK);
+ struct rules *const rules = malloc(sizeof(*rules), M_MAC_DO, M_WAITOK);
_Static_assert(MAC_RULE_STRING_LEN > 0, "MAC_RULE_STRING_LEN <= 0!");
rules->string[0] = 0;
@@ -433,7 +433,7 @@ static void
make_parse_error(struct parse_error **const parse_error, const size_t pos,
const char *const fmt, ...)
{
- struct parse_error *const err = malloc(sizeof(*err), M_DO, M_WAITOK);
+ struct parse_error *const err = malloc(sizeof(*err), M_MAC_DO, M_WAITOK);
va_list ap;
err->pos = pos;
@@ -448,7 +448,7 @@ make_parse_error(struct parse_error **const parse_error, const size_t pos,
static void
free_parse_error(struct parse_error *const parse_error)
{
- free(parse_error, M_DO);
+ free(parse_error, M_MAC_DO);
}
static int
@@ -733,7 +733,7 @@ parse_target_clause(char *to, struct rule *const rule,
"Too many target clauses of type '%s'.", to_type);
return (EOVERFLOW);
}
- ie = malloc(sizeof(*ie), M_DO, M_WAITOK);
+ ie = malloc(sizeof(*ie), M_MAC_DO, M_WAITOK);
ie->spec = is;
STAILQ_INSERT_TAIL(list, ie, ie_entries);
check_type_and_id_spec(type, &is);
@@ -784,7 +784,7 @@ pour_list_into_rule(const id_type_t type, struct id_list *const list,
STAILQ_FOREACH_SAFE(ie, list, ie_entries, ie_next) {
MPASS(idx < *nb);
array[idx] = ie->spec;
- free(ie, M_DO);
+ free(ie, M_MAC_DO);
++idx;
}
MPASS(idx == *nb);
@@ -874,7 +874,7 @@ parse_single_rule(char *rule, struct rules *const rules,
STAILQ_INIT(&gid_list);
/* Freed when the 'struct rules' container is freed. */
- new = malloc(sizeof(*new), M_DO, M_WAITOK | M_ZERO);
+ new = malloc(sizeof(*new), M_MAC_DO, M_WAITOK | M_ZERO);
from_type = strsep_noblanks(&rule, "=");
MPASS(from_type != NULL); /* Because 'rule' was not NULL. */
@@ -933,7 +933,7 @@ parse_single_rule(char *rule, struct rules *const rules,
} while (to_list != NULL);
if (new->uids_nb != 0) {
- new->uids = malloc(sizeof(*new->uids) * new->uids_nb, M_DO,
+ new->uids = malloc(sizeof(*new->uids) * new->uids_nb, M_MAC_DO,
M_WAITOK);
error = pour_list_into_rule(IT_UID, &uid_list, new->uids,
&new->uids_nb, parse_error);
@@ -949,7 +949,7 @@ parse_single_rule(char *rule, struct rules *const rules,
}
if (new->gids_nb != 0) {
- new->gids = malloc(sizeof(*new->gids) * new->gids_nb, M_DO,
+ new->gids = malloc(sizeof(*new->gids) * new->gids_nb, M_MAC_DO,
M_WAITOK);
error = pour_list_into_rule(IT_GID, &gid_list, new->gids,
&new->gids_nb, parse_error);
@@ -969,13 +969,13 @@ parse_single_rule(char *rule, struct rules *const rules,
return (0);
einval:
- free(new->gids, M_DO);
- free(new->uids, M_DO);
- free(new, M_DO);
+ free(new->gids, M_MAC_DO);
+ free(new->uids, M_MAC_DO);
+ free(new, M_MAC_DO);
STAILQ_FOREACH_SAFE(ie, &gid_list, ie_entries, ie_next)
- free(ie, M_DO);
+ free(ie, M_MAC_DO);
STAILQ_FOREACH_SAFE(ie, &uid_list, ie_entries, ie_next)
- free(ie, M_DO);
+ free(ie, M_MAC_DO);
MPASS(*parse_error != NULL);
return (EINVAL);
}
@@ -1028,7 +1028,7 @@ parse_rules(const char *const string, struct rules **const rulesp,
bcopy(string, rules->string, len + 1);
MPASS(rules->string[len] == '\0'); /* Catch some races. */
- copy = malloc(len + 1, M_DO, M_WAITOK);
+ copy = malloc(len + 1, M_MAC_DO, M_WAITOK);
bcopy(string, copy, len + 1);
MPASS(copy[len] == '\0'); /* Catch some races. */
@@ -1046,7 +1046,7 @@ parse_rules(const char *const string, struct rules **const rulesp,
*rulesp = rules;
out:
- free(copy, M_DO);
+ free(copy, M_MAC_DO);
return (error);
}
@@ -1226,7 +1226,7 @@ parse_and_set_rules(struct prison *const pr, const char *rules_string,
static int
mac_do_sysctl_rules(SYSCTL_HANDLER_ARGS)
{
- char *const buf = malloc(MAC_RULE_STRING_LEN, M_DO, M_WAITOK);
+ char *const buf = malloc(MAC_RULE_STRING_LEN, M_MAC_DO, M_WAITOK);
struct prison *const td_pr = req->td->td_ucred->cr_prison;
struct prison *pr;
struct rules *rules;
@@ -1250,7 +1250,7 @@ mac_do_sysctl_rules(SYSCTL_HANDLER_ARGS)
free_parse_error(parse_error);
}
out:
- free(buf, M_DO);
+ free(buf, M_MAC_DO);
return (error);
}
@@ -1573,7 +1573,7 @@ set_data_header(void *const data, const size_t size, const int priv,
static void *
alloc_data(void *const data, const size_t size)
{
- struct mac_do_data_header *const hdr = realloc(data, size, M_DO,
+ struct mac_do_data_header *const hdr = realloc(data, size, M_MAC_DO,
M_WAITOK);
MPASS(size >= sizeof(struct mac_do_data_header));
@@ -1602,7 +1602,7 @@ alloc_data(void *const data, const size_t size)
static void
dealloc_thread_osd(void *const value)
{
- free(value, M_DO);
+ free(value, M_MAC_DO);
}
/*
@@ -1650,7 +1650,7 @@ rule_grant_supplementary_groups(const struct rule *const rule,
const bool current_has_supp = (gid_flags & MDF_CURRENT) != 0 &&
(gid_flags & MDF_SUPP_MASK) != 0;
id_nb_t rule_idx = 0;
- int old_idx = 1, new_idx = 1;
+ int old_idx = 0, new_idx = 0;
if ((gid_flags & MDF_ANY_SUPP) != 0 &&
(gid_flags & MDF_MAY_REJ_SUPP) == 0)
@@ -1992,6 +1992,10 @@ check_proc(void)
/*
* Only grant privileges if requested by the right executable.
*
+ * As MAC/do configuration is per-jail, in order to avoid confused
+ * deputy situations in chroots (privileged or unprivileged), make sure
+ * to check the path from the current jail's root.
+ *
* XXXOC: We may want to base this check on a tunable path and/or
* a specific MAC label. Going even further, e.g., envisioning to
* completely replace the path check with the latter, we would need to
@@ -2003,7 +2007,7 @@ check_proc(void)
* setting a MAC label per file (perhaps via additions to mtree(1)). So
* this probably isn't going to happen overnight, if ever.
*/
- if (vn_fullpath(curproc->p_textvp, &path, &to_free) != 0)
+ if (vn_fullpath_jail(curproc->p_textvp, &path, &to_free) != 0)
return (EPERM);
error = strcmp(path, "/usr/bin/mdo") == 0 ? 0 : EPERM;
free(to_free, M_TEMP);
diff --git a/sys/sys/_atomic_subword.h b/sys/sys/_atomic_subword.h
index dee5a3bed871..284e2bfa340f 100644
--- a/sys/sys/_atomic_subword.h
+++ b/sys/sys/_atomic_subword.h
@@ -205,4 +205,32 @@ atomic_load_acq_16(const volatile uint16_t *p)
#undef _ATOMIC_BYTE_SHIFT
#undef _ATOMIC_HWORD_SHIFT
+#ifndef atomic_set_16
+static __inline void
+atomic_set_16(volatile uint16_t *p, uint16_t bit)
+{
+ uint16_t v;
+
+ v = atomic_load_16(p);
+ for (;;) {
+ if (atomic_fcmpset_16(p, &v, v | bit))
+ break;
+ }
+}
+#endif
+
+#ifndef atomic_clear_16
+static __inline void
+atomic_clear_16(volatile uint16_t *p, uint16_t bit)
+{
+ uint16_t v;
+
+ v = atomic_load_16(p);
+ for (;;) {
+ if (atomic_fcmpset_16(p, &v, v & ~bit))
+ break;
+ }
+}
+#endif
+
#endif /* _SYS__ATOMIC_SUBWORD_H_ */
diff --git a/sys/sys/bus.h b/sys/sys/bus.h
index dda27f4737b2..e7ce152160f8 100644
--- a/sys/sys/bus.h
+++ b/sys/sys/bus.h
@@ -78,7 +78,7 @@ typedef enum device_property_type {
* The strings are placed one after the other, separated by NUL characters.
* Fields should be added after the last one and order maintained for compatibility
*/
-#define BUS_USER_BUFFER (3*1024)
+#define BUS_USER_BUFFER (3 * 1024)
struct u_device {
uintptr_t dv_handle;
uintptr_t dv_parent;
@@ -247,8 +247,8 @@ typedef struct devclass *devclass_t;
* and may use regular mutexes. However, it is prohibited from
* sleeping on a sleep queue.
*/
-typedef int driver_filter_t(void*);
-typedef void driver_intr_t(void*);
+typedef int driver_filter_t(void *);
+typedef void driver_intr_t(void *);
/**
* @brief Interrupt type bits.
@@ -476,18 +476,18 @@ int bus_generic_resume(device_t dev);
int bus_generic_resume_child(device_t dev, device_t child);
int bus_generic_setup_intr(device_t dev, device_t child,
struct resource *irq, int flags,
- driver_filter_t *filter, driver_intr_t *intr,
+ driver_filter_t *filter, driver_intr_t *intr,
void *arg, void **cookiep);
struct resource *
- bus_generic_rl_alloc_resource (device_t, device_t, int, int *,
- rman_res_t, rman_res_t, rman_res_t, u_int);
-void bus_generic_rl_delete_resource (device_t, device_t, int, int);
-int bus_generic_rl_get_resource (device_t, device_t, int, int, rman_res_t *,
- rman_res_t *);
-int bus_generic_rl_set_resource (device_t, device_t, int, int, rman_res_t,
- rman_res_t);
-int bus_generic_rl_release_resource (device_t, device_t, struct resource *);
+ bus_generic_rl_alloc_resource(device_t, device_t, int, int *,
+ rman_res_t, rman_res_t, rman_res_t, u_int);
+void bus_generic_rl_delete_resource(device_t, device_t, int, int);
+int bus_generic_rl_get_resource(device_t, device_t, int, int, rman_res_t *,
+ rman_res_t *);
+int bus_generic_rl_set_resource(device_t, device_t, int, int, rman_res_t,
+ rman_res_t);
+int bus_generic_rl_release_resource(device_t, device_t, struct resource *);
struct resource *
bus_generic_rman_alloc_resource(device_t dev, device_t child, int type,
int *rid, rman_res_t start,
@@ -562,7 +562,7 @@ int bus_get_domain(device_t dev, int *domain);
int bus_release_resource(device_t dev, struct resource *r);
int bus_free_resource(device_t dev, int type, struct resource *r);
int bus_setup_intr(device_t dev, struct resource *r, int flags,
- driver_filter_t filter, driver_intr_t handler,
+ driver_filter_t filter, driver_intr_t handler,
void *arg, void **cookiep);
int bus_teardown_intr(device_t dev, struct resource *r, void *cookie);
int bus_suspend_intr(device_t dev, struct resource *r);
@@ -687,9 +687,9 @@ int device_probe_child(device_t bus, device_t dev);
int device_quiesce(device_t dev);
void device_quiet(device_t dev);
void device_quiet_children(device_t dev);
-void device_set_desc(device_t dev, const char* desc);
-void device_set_descf(device_t dev, const char* fmt, ...) __printflike(2, 3);
-void device_set_desc_copy(device_t dev, const char* desc);
+void device_set_desc(device_t dev, const char *desc);
+void device_set_descf(device_t dev, const char *fmt, ...) __printflike(2, 3);
+void device_set_desc_copy(device_t dev, const char *desc);
int device_set_devclass(device_t dev, const char *classname);
int device_set_devclass_fixed(device_t dev, const char *classname);
bool device_is_devclass_fixed(device_t dev);
@@ -887,7 +887,8 @@ DECLARE_MODULE(_name##_##busname, _name##_##busname##_mod, \
*/
#define __BUS_ACCESSOR(varp, var, ivarp, ivar, type) \
\
-static __inline type varp ## _get_ ## var(device_t dev) \
+static __inline type \
+varp ## _get_ ## var(device_t dev) \
{ \
uintptr_t v; \
int e __diagused; \
@@ -899,7 +900,8 @@ static __inline type varp ## _get_ ## var(device_t dev) \
return ((type) v); \
} \
\
-static __inline void varp ## _set_ ## var(device_t dev, type t) \
+static __inline void \
+varp ## _set_ ## var(device_t dev, type t) \
{ \
uintptr_t v = (uintptr_t) t; \
int e __diagused; \
diff --git a/sys/sys/conf.h b/sys/sys/conf.h
index 1646aa108701..a830c9d4c622 100644
--- a/sys/sys/conf.h
+++ b/sys/sys/conf.h
@@ -159,6 +159,7 @@ typedef int dumper_hdr_t(struct dumperinfo *di, struct kerneldumpheader *kdh);
#define GID_RT_PRIO 47
#define GID_ID_PRIO 48
#define GID_DIALER 68
+#define GID_U2F 116
#define GID_NOGROUP 65533
#define GID_NOBODY 65534
diff --git a/sys/sys/cpu.h b/sys/sys/cpu.h
index b6a0094f0c51..5bb55679a05b 100644
--- a/sys/sys/cpu.h
+++ b/sys/sys/cpu.h
@@ -40,25 +40,31 @@
#define CPU_IVAR_CPUID_SIZE 3
#define CPU_IVAR_CPUID 4
-static __inline struct pcpu *cpu_get_pcpu(device_t dev)
+static __inline struct pcpu *
+cpu_get_pcpu(device_t dev)
{
uintptr_t v = 0;
+
BUS_READ_IVAR(device_get_parent(dev), dev, CPU_IVAR_PCPU, &v);
return ((struct pcpu *)v);
}
-static __inline int32_t cpu_get_nominal_mhz(device_t dev)
+static __inline int32_t
+cpu_get_nominal_mhz(device_t dev)
{
uintptr_t v = 0;
+
if (BUS_READ_IVAR(device_get_parent(dev), dev,
CPU_IVAR_NOMINAL_MHZ, &v) != 0)
return (-1);
return ((int32_t)v);
}
-static __inline const uint32_t *cpu_get_cpuid(device_t dev, size_t *count)
+static __inline const uint32_t *
+cpu_get_cpuid(device_t dev, size_t *count)
{
uintptr_t v = 0;
+
if (BUS_READ_IVAR(device_get_parent(dev), dev,
CPU_IVAR_CPUID_SIZE, &v) != 0)
return (NULL);
@@ -124,10 +130,10 @@ TAILQ_HEAD(cf_level_lst, cf_level);
* state. It is probably a bug to not combine this with "info only"
*/
#define CPUFREQ_TYPE_MASK 0xffff
-#define CPUFREQ_TYPE_RELATIVE (1<<0)
-#define CPUFREQ_TYPE_ABSOLUTE (1<<1)
-#define CPUFREQ_FLAG_INFO_ONLY (1<<16)
-#define CPUFREQ_FLAG_UNCACHED (1<<17)
+#define CPUFREQ_TYPE_RELATIVE (1 << 0)
+#define CPUFREQ_TYPE_ABSOLUTE (1 << 1)
+#define CPUFREQ_FLAG_INFO_ONLY (1 << 16)
+#define CPUFREQ_FLAG_UNCACHED (1 << 17)
/*
* When setting a level, the caller indicates the priority of this request.
@@ -162,7 +168,7 @@ int cpufreq_settings_changed(device_t dev);
* The new level and the result of the change (0 is success) is passed in.
* If the driver wishes to revoke the change from cpufreq_pre_change, it
* stores a non-zero error code in the result parameter and the change will
- * not be made. If the post-change eventhandler gets a non-zero result,
+ * not be made. If the post-change eventhandler gets a non-zero result,
* no change was made and the previous level remains in effect. If a change
* is revoked, the post-change eventhandler is still called with the error
* value supplied by the revoking driver. This gives listeners who cached
diff --git a/sys/sys/efi.h b/sys/sys/efi.h
index 89c8b15519de..f82c733898b4 100644
--- a/sys/sys/efi.h
+++ b/sys/sys/efi.h
@@ -40,9 +40,9 @@
{0xf2fd1544,0x9794,0x4a2c,{0x99,0x2e,0xe5,0xbb,0xcf,0x20,0xe3,0x94}}
#define EFI_TABLE_ESRT \
{0xb122a263,0x3661,0x4f68,{0x99,0x29,0x78,0xf8,0xb0,0xd6,0x21,0x80}}
-#define EFI_PROPERTIES_TABLE \
+#define EFI_PROPERTIES_TABLE \
{0x880aaca3,0x4adc,0x4a04,{0x90,0x79,0xb7,0x47,0x34,0x08,0x25,0xe5}}
-#define EFI_MEMORY_ATTRIBUTES_TABLE \
+#define EFI_MEMORY_ATTRIBUTES_TABLE \
{0xdcfa911d,0x26eb,0x469f,{0xa2,0x20,0x38,0xb7,0xdc,0x46,0x12,0x20}}
#define LINUX_EFI_MEMRESERVE_TABLE \
{0x888eb0c6,0x8ede,0x4ff5,{0xa8,0xf0,0x9a,0xee,0x5c,0xb9,0x77,0xc2}}
@@ -54,7 +54,7 @@ enum efi_reset {
};
typedef uint16_t efi_char;
-typedef unsigned long efi_status;
+typedef unsigned long efi_status;
/*
* This type-puns to a struct uuid, but all the EDK2 headers use this variation,
@@ -62,10 +62,10 @@ typedef unsigned long efi_status;
* can use EDK2 definitions both places.
*/
typedef struct efi_guid {
- uint32_t Data1;
- uint16_t Data2;
- uint16_t Data3;
- uint8_t Data4[8];
+ uint32_t Data1;
+ uint16_t Data2;
+ uint16_t Data3;
+ uint8_t Data4[8];
} efi_guid_t; /* Type puns with GUID and EFI_GUID */
struct efi_cfgtbl {
@@ -293,100 +293,99 @@ struct efi_ops {
extern const struct efi_ops *active_efi_ops;
/* Public MI EFI functions */
-static inline int efi_rt_ok(void)
+static inline int
+efi_rt_ok(void)
{
-
if (active_efi_ops->rt_ok == NULL)
return (ENXIO);
return (active_efi_ops->rt_ok());
}
-static inline int efi_get_table(efi_guid_t *guid, void **ptr)
+static inline int
+efi_get_table(efi_guid_t *guid, void **ptr)
{
-
if (active_efi_ops->get_table == NULL)
return (ENXIO);
return (active_efi_ops->get_table(guid, ptr));
}
-static inline int efi_copy_table(efi_guid_t *guid, void **buf,
- size_t buf_len, size_t *table_len)
+static inline int
+efi_copy_table(efi_guid_t *guid, void **buf, size_t buf_len, size_t *table_len)
{
-
if (active_efi_ops->copy_table == NULL)
return (ENXIO);
return (active_efi_ops->copy_table(guid, buf, buf_len, table_len));
}
-static inline int efi_get_time(struct efi_tm *tm)
+static inline int
+efi_get_time(struct efi_tm *tm)
{
-
if (active_efi_ops->get_time == NULL)
return (ENXIO);
return (active_efi_ops->get_time(tm));
}
-static inline int efi_get_time_capabilities(struct efi_tmcap *tmcap)
+static inline int
+efi_get_time_capabilities(struct efi_tmcap *tmcap)
{
-
if (active_efi_ops->get_time_capabilities == NULL)
return (ENXIO);
return (active_efi_ops->get_time_capabilities(tmcap));
}
-static inline int efi_reset_system(enum efi_reset type)
+static inline int
+efi_reset_system(enum efi_reset type)
{
-
if (active_efi_ops->reset_system == NULL)
return (ENXIO);
return (active_efi_ops->reset_system(type));
}
-static inline int efi_set_time(struct efi_tm *tm)
+static inline int
+efi_set_time(struct efi_tm *tm)
{
-
if (active_efi_ops->set_time == NULL)
return (ENXIO);
return (active_efi_ops->set_time(tm));
}
-static inline int efi_get_waketime(uint8_t *enabled, uint8_t *pending,
- struct efi_tm *tm)
+static inline int
+efi_get_waketime(uint8_t *enabled, uint8_t *pending, struct efi_tm *tm)
{
if (active_efi_ops->get_waketime == NULL)
return (ENXIO);
return (active_efi_ops->get_waketime(enabled, pending, tm));
}
-static inline int efi_set_waketime(uint8_t enable, struct efi_tm *tm)
+static inline int
+efi_set_waketime(uint8_t enable, struct efi_tm *tm)
{
if (active_efi_ops->set_waketime == NULL)
return (ENXIO);
return (active_efi_ops->set_waketime(enable, tm));
}
-static inline int efi_var_get(uint16_t *name, efi_guid_t *vendor,
- uint32_t *attrib, size_t *datasize, void *data)
+static inline int
+efi_var_get(uint16_t *name, efi_guid_t *vendor, uint32_t *attrib,
+ size_t *datasize, void *data)
{
-
if (active_efi_ops->var_get == NULL)
return (ENXIO);
return (active_efi_ops->var_get(name, vendor, attrib, datasize, data));
}
-static inline int efi_var_nextname(size_t *namesize, uint16_t *name,
- efi_guid_t *vendor)
+static inline int
+efi_var_nextname(size_t *namesize, uint16_t *name, efi_guid_t *vendor)
{
-
if (active_efi_ops->var_nextname == NULL)
return (ENXIO);
return (active_efi_ops->var_nextname(namesize, name, vendor));
}
-static inline int efi_var_set(uint16_t *name, efi_guid_t *vendor,
- uint32_t attrib, size_t datasize, void *data)
+static inline int
+efi_var_set(uint16_t *name, efi_guid_t *vendor, uint32_t attrib,
+ size_t datasize, void *data)
{
-
if (active_efi_ops->var_set == NULL)
return (ENXIO);
return (active_efi_ops->var_set(name, vendor, attrib, datasize, data));
diff --git a/sys/sys/event.h b/sys/sys/event.h
index 1b30e4292de8..084eaafcbdc0 100644
--- a/sys/sys/event.h
+++ b/sys/sys/event.h
@@ -45,7 +45,9 @@
#define EVFILT_USER (-11) /* User events */
#define EVFILT_SENDFILE (-12) /* attached to sendfile requests */
#define EVFILT_EMPTY (-13) /* empty send socket buf */
-#define EVFILT_SYSCOUNT 13
+#define EVFILT_JAIL (-14) /* attached to struct prison */
+#define EVFILT_JAILDESC (-15) /* attached to jail descriptors */
+#define EVFILT_SYSCOUNT 15
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define EV_SET(kevp_, a, b, c, d, e, f) do { \
@@ -205,10 +207,18 @@ struct freebsd11_kevent32 {
#define NOTE_PDATAMASK 0x000fffff /* mask for pid */
/* additional flags for EVFILT_PROC */
-#define NOTE_TRACK 0x00000001 /* follow across forks */
+#define NOTE_TRACK 0x00000001 /* follow across fork/create */
#define NOTE_TRACKERR 0x00000002 /* could not track child */
#define NOTE_CHILD 0x00000004 /* am a child process */
+/* data/hint flags for EVFILT_JAIL and EVFILT_JAILDESC */
+#define NOTE_JAIL_CHILD 0x80000000 /* child jail was created */
+#define NOTE_JAIL_SET 0x40000000 /* jail was modified */
+#define NOTE_JAIL_ATTACH 0x20000000 /* jail was attached to */
+#define NOTE_JAIL_REMOVE 0x10000000 /* jail was removed */
+#define NOTE_JAIL_MULTI 0x08000000 /* multiple child or attach */
+#define NOTE_JAIL_CTRLMASK 0xf0000000 /* mask for hint bits */
+
/* additional flags for EVFILT_TIMER */
#define NOTE_SECONDS 0x00000001 /* data is seconds */
#define NOTE_MSECONDS 0x00000002 /* data is milliseconds */
@@ -309,6 +319,7 @@ struct knote {
struct proc *p_proc; /* proc pointer */
struct kaiocb *p_aio; /* AIO job pointer */
struct aioliojob *p_lio; /* LIO job pointer */
+ struct prison *p_prison; /* prison pointer */
void *p_v; /* generic other pointer */
} kn_ptr;
const struct filterops *kn_fop;
diff --git a/sys/sys/eventhandler.h b/sys/sys/eventhandler.h
index c0d9811dd1b9..29a16b393b52 100644
--- a/sys/sys/eventhandler.h
+++ b/sys/sys/eventhandler.h
@@ -33,6 +33,7 @@
#include <sys/lock.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
+#include <sys/power.h>
#include <sys/queue.h>
#ifdef VIMAGE
@@ -201,7 +202,7 @@ EVENTHANDLER_DECLARE(shutdown_post_sync, shutdown_fn); /* after fs sync */
EVENTHANDLER_DECLARE(shutdown_final, shutdown_fn);
/* Power state change events */
-typedef void (*power_change_fn)(void *);
+typedef void (*power_change_fn)(void *, enum power_stype stype);
EVENTHANDLER_DECLARE(power_resume, power_change_fn);
EVENTHANDLER_DECLARE(power_suspend, power_change_fn);
EVENTHANDLER_DECLARE(power_suspend_early, power_change_fn);
diff --git a/sys/sys/exterrvar.h b/sys/sys/exterrvar.h
index 7bf1d264ff5e..6783a0d2d84f 100644
--- a/sys/sys/exterrvar.h
+++ b/sys/sys/exterrvar.h
@@ -31,7 +31,7 @@
#error "Specify error category before including sys/exterrvar.h"
#endif
-#ifdef BLOAT_KERNEL_WITH_EXTERR
+#ifdef EXTERR_STRINGS
#define SET_ERROR_MSG(mmsg) (mmsg)
#else
#define SET_ERROR_MSG(mmsg) NULL
diff --git a/sys/sys/file.h b/sys/sys/file.h
index 63313926c4f0..c44fd0f28929 100644
--- a/sys/sys/file.h
+++ b/sys/sys/file.h
@@ -72,6 +72,7 @@ struct nameidata;
#define DTYPE_EVENTFD 13 /* eventfd */
#define DTYPE_TIMERFD 14 /* timerfd */
#define DTYPE_INOTIFY 15 /* inotify descriptor */
+#define DTYPE_JAILDESC 16 /* jail descriptor */
#ifdef _KERNEL
@@ -92,6 +93,8 @@ void foffset_lock_pair(struct file *fp1, off_t *off1p, struct file *fp2,
void foffset_lock_uio(struct file *fp, struct uio *uio, int flags);
void foffset_unlock(struct file *fp, off_t val, int flags);
void foffset_unlock_uio(struct file *fp, struct uio *uio, int flags);
+void fsetfl_lock(struct file *fp);
+void fsetfl_unlock(struct file *fp);
static inline off_t
foffset_get(struct file *fp)
@@ -196,7 +199,7 @@ struct file {
struct vnode *f_vnode; /* NULL or applicable vnode */
struct ucred *f_cred; /* associated credentials. */
short f_type; /* descriptor type */
- short f_vnread_flags; /* (f) Sleep lock for f_offset */
+ short f_vflags; /* (f) Sleep lock flags for members */
/*
* DTYPE_VNODE specific fields.
*/
@@ -219,8 +222,10 @@ struct file {
#define f_cdevpriv f_vnun.fvn_cdevpriv
#define f_advice f_vnun.fvn_advice
-#define FOFFSET_LOCKED 0x1
-#define FOFFSET_LOCK_WAITING 0x2
+#define FILE_V_FOFFSET_LOCKED 0x0001
+#define FILE_V_FOFFSET_LOCK_WAITING 0x0002
+#define FILE_V_SETFL_LOCKED 0x0004
+#define FILE_V_SETFL_LOCK_WAITING 0x0008
#endif /* __BSD_VISIBLE */
#endif /* _KERNEL || _WANT_FILE */
diff --git a/sys/sys/imgact_elf.h b/sys/sys/imgact_elf.h
index 2845a9dbc1e2..9e2a233248b4 100644
--- a/sys/sys/imgact_elf.h
+++ b/sys/sys/imgact_elf.h
@@ -86,7 +86,7 @@ typedef struct {
struct sysentvec *sysvec;
const char *interp_newpath;
int flags;
- Elf_Brandnote *brand_note;
+ const Elf_Brandnote *brand_note;
bool (*header_supported)(const struct image_params *,
const int32_t *, const uint32_t *);
/* High 8 bits of flags is private to the ABI */
@@ -111,9 +111,9 @@ struct sseg_closure {
size_t size; /* Total size of all writable segments. */
};
-bool __elfN(brand_inuse)(Elf_Brandinfo *entry);
-int __elfN(insert_brand_entry)(Elf_Brandinfo *entry);
-int __elfN(remove_brand_entry)(Elf_Brandinfo *entry);
+bool __elfN(brand_inuse)(const Elf_Brandinfo *entry);
+int __elfN(insert_brand_entry)(const Elf_Brandinfo *entry);
+int __elfN(remove_brand_entry)(const Elf_Brandinfo *entry);
int __elfN(freebsd_fixup)(uintptr_t *, struct image_params *);
int __elfN(coredump)(struct thread *, struct coredump_writer *, off_t, int);
size_t __elfN(populate_note)(int, void *, void *, size_t, void **);
diff --git a/sys/sys/jail.h b/sys/sys/jail.h
index d2655c52e832..e6a13e6719dd 100644
--- a/sys/sys/jail.h
+++ b/sys/sys/jail.h
@@ -99,8 +99,12 @@ enum prison_state {
#define JAIL_UPDATE 0x02 /* Update parameters of existing jail */
#define JAIL_ATTACH 0x04 /* Attach to jail upon creation */
#define JAIL_DYING 0x08 /* Allow getting a dying jail */
-#define JAIL_SET_MASK 0x0f /* JAIL_DYING is deprecated/ignored here */
-#define JAIL_GET_MASK 0x08
+#define JAIL_USE_DESC 0x10 /* Get/set jail in descriptor */
+#define JAIL_AT_DESC 0x20 /* Find/add jail under descriptor */
+#define JAIL_GET_DESC 0x40 /* Return a new jail descriptor */
+#define JAIL_OWN_DESC 0x80 /* Return a new owning jail descriptor */
+#define JAIL_SET_MASK 0xff /* JAIL_DYING is deprecated/ignored here */
+#define JAIL_GET_MASK 0xf8
#define JAIL_SYS_DISABLE 0
#define JAIL_SYS_NEW 1
@@ -115,7 +119,9 @@ int jail(struct jail *);
int jail_set(struct iovec *, unsigned int, int);
int jail_get(struct iovec *, unsigned int, int);
int jail_attach(int);
+int jail_attach_jd(int);
int jail_remove(int);
+int jail_remove_jd(int);
__END_DECLS
#else /* _KERNEL */
@@ -144,6 +150,8 @@ MALLOC_DECLARE(M_PRISON);
#define JAIL_META_PRIVATE "meta"
#define JAIL_META_SHARED "env"
+struct jaildesc;
+struct knlist;
struct racct;
struct prison_racct;
@@ -189,7 +197,9 @@ struct prison {
struct vnode *pr_root; /* (c) vnode to rdir */
struct prison_ip *pr_addrs[PR_FAMILY_MAX]; /* (p,n) IPs of jail */
struct prison_racct *pr_prison_racct; /* (c) racct jail proxy */
- void *pr_sparep[3];
+ struct knlist *pr_klist; /* (m) attached knotes */
+ LIST_HEAD(, jaildesc) pr_descs; /* (a) attached descriptors */
+ void *pr_sparep;
int pr_childcount; /* (a) number of child jails */
int pr_childmax; /* (p) maximum child jails */
unsigned pr_allow; /* (p) PR_ALLOW_* flags */
@@ -261,6 +271,7 @@ struct prison_racct {
#define PR_ALLOW_SETTIME 0x00100000
#define PR_ALLOW_ROUTING 0x00200000
#define PR_ALLOW_UNPRIV_PARENT_TAMPER 0x00400000
+#define PR_ALLOW_SETAUDIT 0x00800000
/*
* PR_ALLOW_PRISON0 are the allow flags that we apply by default to prison0,
@@ -268,7 +279,7 @@ struct prison_racct {
* build time. PR_ALLOW_ALL_STATIC should contain any bit above that we expect
* to be used on the system, while PR_ALLOW_PRISON0 will be some subset of that.
*/
-#define PR_ALLOW_ALL_STATIC 0x007f87ff
+#define PR_ALLOW_ALL_STATIC 0x00ff87ff
#define PR_ALLOW_PRISON0 \
(PR_ALLOW_ALL_STATIC & ~(PR_ALLOW_UNPRIV_PARENT_TAMPER))
@@ -425,10 +436,11 @@ SYSCTL_DECL(_security_jail_param);
/*
* Kernel support functions for jail().
*/
-struct ucred;
+struct knote;
struct mount;
struct sockaddr;
struct statfs;
+struct ucred;
struct vfsconf;
/*
@@ -463,6 +475,7 @@ void prison_proc_free(struct prison *);
void prison_proc_link(struct prison *, struct proc *);
void prison_proc_unlink(struct prison *, struct proc *);
void prison_proc_iterate(struct prison *, void (*)(struct proc *, void *), void *);
+void prison_remove(struct prison *);
void prison_set_allow(struct ucred *cred, unsigned flag, int enable);
bool prison_ischild(struct prison *, struct prison *);
bool prison_isalive(const struct prison *);
diff --git a/sys/sys/jaildesc.h b/sys/sys/jaildesc.h
new file mode 100644
index 000000000000..fda270d62e70
--- /dev/null
+++ b/sys/sys/jaildesc.h
@@ -0,0 +1,87 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 James Gritton.
+ * All rights reserved.
+ *
+ * This software was developed at the University of Cambridge Computer
+ * Laboratory with support from a grant from Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _SYS_JAILDESC_H_
+#define _SYS_JAILDESC_H_
+
+#ifdef _KERNEL
+
+#include <sys/queue.h>
+#include <sys/selinfo.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+#include <sys/_types.h>
+
+struct prison;
+
+/*-
+ * struct jaildesc describes a jail descriptor, which points to a struct
+ * prison. struct prison in turn has a linked list of struct jaildesc.
+ *
+ * Locking key:
+ * (c) set on creation, remains unchanged
+ * (d) jd_lock
+ * (p) jd_prison->pr_mtx
+ */
+struct jaildesc {
+ LIST_ENTRY(jaildesc) jd_list; /* (d,p) this prison's descs */
+ struct prison *jd_prison; /* (d) the prison */
+ struct mtx jd_lock;
+ struct selinfo jd_selinfo; /* (d) event notification */
+ unsigned jd_flags; /* (d) JDF_* flags */
+};
+
+/*
+ * Locking macros for the jaildesc.
+ */
+#define JAILDESC_LOCK_DESTROY(jd) mtx_destroy(&(jd)->jd_lock)
+#define JAILDESC_LOCK_INIT(jd) mtx_init(&(jd)->jd_lock, "jaildesc", \
+ NULL, MTX_DEF)
+#define JAILDESC_LOCK(jd) mtx_lock(&(jd)->jd_lock)
+#define JAILDESC_UNLOCK(jd) mtx_unlock(&(jd)->jd_lock)
+
+/*
+ * Flags for the jd_flags field
+ */
+#define JDF_SELECTED 0x00000001 /* issue selwakeup() */
+#define JDF_REMOVED 0x00000002 /* jail was removed */
+#define JDF_OWNING 0x00000004 /* closing descriptor removes jail */
+
+int jaildesc_find(struct thread *td, int fd, struct prison **prp,
+ struct ucred **ucredp);
+int jaildesc_alloc(struct thread *td, struct file **fpp, int *fdp, int owning);
+void jaildesc_set_prison(struct file *jd, struct prison *pr);
+void jaildesc_prison_cleanup(struct prison *pr);
+void jaildesc_knote(struct prison *pr, long hint);
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_JAILDESC_H_ */
diff --git a/sys/sys/kernel.h b/sys/sys/kernel.h
index 380099092107..417afd4dbbe4 100644
--- a/sys/sys/kernel.h
+++ b/sys/sys/kernel.h
@@ -249,15 +249,8 @@ struct sysinit_tslog {
const void *data;
const char *name;
};
-static inline void
-sysinit_tslog_shim(const void *data)
-{
- const struct sysinit_tslog *x = data;
-
- TSRAW(curthread, TS_ENTER, "SYSINIT", x->name);
- (x->func)(x->data);
- TSRAW(curthread, TS_EXIT, "SYSINIT", x->name);
-}
+void sysinit_tslog_shim(const void *);
+
#define C_SYSINIT(uniquifier, subsystem, order, func, ident) \
static struct sysinit_tslog uniquifier ## _sys_init_tslog = { \
func, \
@@ -322,7 +315,7 @@ void sysinit_add(struct sysinit **set, struct sysinit **set_end);
* int
* please avoid using for new tunables!
*/
-extern void tunable_int_init(void *);
+extern void tunable_int_init(const void *);
struct tunable_int {
const char *path;
int *var;
@@ -341,7 +334,7 @@ struct tunable_int {
/*
* long
*/
-extern void tunable_long_init(void *);
+extern void tunable_long_init(const void *);
struct tunable_long {
const char *path;
long *var;
@@ -360,7 +353,7 @@ struct tunable_long {
/*
* unsigned long
*/
-extern void tunable_ulong_init(void *);
+extern void tunable_ulong_init(const void *);
struct tunable_ulong {
const char *path;
unsigned long *var;
@@ -379,7 +372,7 @@ struct tunable_ulong {
/*
* int64_t
*/
-extern void tunable_int64_init(void *);
+extern void tunable_int64_init(const void *);
struct tunable_int64 {
const char *path;
int64_t *var;
@@ -398,7 +391,7 @@ struct tunable_int64 {
/*
* uint64_t
*/
-extern void tunable_uint64_init(void *);
+extern void tunable_uint64_init(const void *);
struct tunable_uint64 {
const char *path;
uint64_t *var;
@@ -417,7 +410,7 @@ struct tunable_uint64 {
/*
* quad
*/
-extern void tunable_quad_init(void *);
+extern void tunable_quad_init(const void *);
struct tunable_quad {
const char *path;
quad_t *var;
@@ -436,7 +429,7 @@ struct tunable_quad {
/*
* bool
*/
-extern void tunable_bool_init(void *);
+extern void tunable_bool_init(const void *);
struct tunable_bool {
const char *path;
bool *var;
@@ -452,7 +445,7 @@ struct tunable_bool {
#define TUNABLE_BOOL_FETCH(path, var) getenv_bool((path), (var))
-extern void tunable_str_init(void *);
+extern void tunable_str_init(const void *);
struct tunable_str {
const char *path;
char *var;
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index f6480b173a5c..18f85192f6c3 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -1007,6 +1007,7 @@ struct mntarg *mount_argsu(struct mntarg *ma, const char *name, const void *val,
void statfs_scale_blocks(struct statfs *sf, long max_size);
struct vfsconf *vfs_byname(const char *);
struct vfsconf *vfs_byname_kld(const char *, struct thread *td, int *);
+void vfs_unref_vfsconf(struct vfsconf *vfsp);
void vfs_mount_destroy(struct mount *);
void vfs_event_signal(fsid_t *, u_int32_t, intptr_t);
void vfs_freeopts(struct vfsoptlist *opts);
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index 56c03a1b0be9..4f6b45d78a88 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -68,9 +68,9 @@
*/
#define MTX_UNOWNED 0x00000000 /* Cookie for free mutex */
#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
-#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
+#define MTX_WAITERS 0x00000002 /* lock has waiters (for MTX_DEF only) */
#define MTX_DESTROYED 0x00000004 /* lock destroyed */
-#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED)
+#define MTX_FLAGMASK (MTX_RECURSED | MTX_WAITERS | MTX_DESTROYED)
/*
* Prototypes
@@ -91,7 +91,7 @@
void _mtx_init(volatile uintptr_t *c, const char *name, const char *type,
int opts);
void _mtx_destroy(volatile uintptr_t *c);
-void mtx_sysinit(void *arg);
+void mtx_sysinit(const void *arg);
int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF);
int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
int line);
@@ -217,14 +217,10 @@ void _thread_lock(struct thread *);
#define _mtx_obtain_lock_fetch(mp, vp, tid) \
atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid))
-/* Try to release mtx_lock if it is unrecursed and uncontested. */
+/* Try to release mtx_lock if it is unrecursed and without waiters. */
#define _mtx_release_lock(mp, tid) \
atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
-/* Release mtx_lock quickly, assuming we own it. */
-#define _mtx_release_lock_quick(mp) \
- atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
-
#define _mtx_release_lock_fetch(mp, vp) \
atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, (vp), MTX_UNOWNED)
@@ -246,10 +242,10 @@ void _thread_lock(struct thread *);
})
/*
- * Lock a spin mutex. For spinlocks, we handle recursion inline (it
- * turns out that function calls can be significantly expensive on
- * some architectures). Since spin locks are not _too_ common,
- * inlining this code is not too big a deal.
+ * Lock a spin mutex.
+ *
+ * FIXME: spinlock_enter is a function call, defeating the point of inlining in
+ * this.
*/
#ifdef SMP
#define __mtx_lock_spin(mp, tid, opts, file, line) __extension__ ({ \
@@ -317,10 +313,10 @@ void _thread_lock(struct thread *);
})
/*
- * Unlock a spin mutex. For spinlocks, we can handle everything
- * inline, as it's pretty simple and a function call would be too
- * expensive (at least on some architectures). Since spin locks are
- * not _too_ common, inlining this code is not too big a deal.
+ * Unlock a spin mutex.
+ *
+ * FIXME: spinlock_exit is a function call, defeating the point of inlining in
+ * this.
*
* Since we always perform a spinlock_enter() when attempting to acquire a
* spin lock, we need to always perform a matching spinlock_exit() when
@@ -332,7 +328,7 @@ void _thread_lock(struct thread *);
(mp)->mtx_recurse--; \
else { \
LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(spin__release, mp); \
- _mtx_release_lock_quick((mp)); \
+ atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED); \
} \
spinlock_exit(); \
})
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 72061bd8134e..8a71693cff3d 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -74,7 +74,7 @@
* cannot include sys/param.h and should only be updated here.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1500058
+#define __FreeBSD_version 1600001
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/pciio.h b/sys/sys/pciio.h
index 6467e82b1b3d..64c0b32cb8e2 100644
--- a/sys/sys/pciio.h
+++ b/sys/sys/pciio.h
@@ -77,6 +77,9 @@ struct pci_conf {
u_int8_t pc_revid; /* chip revision ID */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_long pd_unit; /* device unit number */
+ int pd_numa_domain; /* device NUMA domain */
+ size_t pc_reported_len;/* length of PCI data reported */
+ char pc_spare[64]; /* space for future fields */
};
struct pci_match_conf {
@@ -165,7 +168,6 @@ struct pci_bar_ioreq {
#define PCIIO_BAR_MMAP_RW 0x04
#define PCIIO_BAR_MMAP_ACTIVATE 0x08
-#define PCIOCGETCONF _IOWR('p', 5, struct pci_conf_io)
#define PCIOCREAD _IOWR('p', 2, struct pci_io)
#define PCIOCWRITE _IOWR('p', 3, struct pci_io)
#define PCIOCATTACHED _IOWR('p', 4, struct pci_io)
@@ -173,5 +175,6 @@ struct pci_bar_ioreq {
#define PCIOCLISTVPD _IOWR('p', 7, struct pci_list_vpd_io)
#define PCIOCBARMMAP _IOWR('p', 8, struct pci_bar_mmap)
#define PCIOCBARIO _IOWR('p', 9, struct pci_bar_ioreq)
+#define PCIOCGETCONF _IOWR('p', 10, struct pci_conf_io)
#endif /* !_SYS_PCIIO_H_ */
diff --git a/sys/sys/power.h b/sys/sys/power.h
index 9afa55dd403a..33ace400bfd2 100644
--- a/sys/sys/power.h
+++ b/sys/sys/power.h
@@ -3,6 +3,10 @@
*
* Copyright (c) 2001 Mitsuru IWASAKI
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +32,10 @@
#ifndef _SYS_POWER_H_
#define _SYS_POWER_H_
+#ifdef _KERNEL
#include <sys/_eventhandler.h>
+#include <sys/types.h>
/* Power management system type */
#define POWER_PM_TYPE_ACPI 0x01
@@ -38,13 +44,55 @@
/* Commands for Power management function */
#define POWER_CMD_SUSPEND 0x00
-/* Sleep state */
+/*
+ * Sleep state.
+ *
+ * These are high-level sleep states that the system can enter. They map to
+ * a specific generic sleep type (enum power_stype).
+ */
#define POWER_SLEEP_STATE_STANDBY 0x00
#define POWER_SLEEP_STATE_SUSPEND 0x01
#define POWER_SLEEP_STATE_HIBERNATE 0x02
-typedef int (*power_pm_fn_t)(u_long, void*, ...);
-extern int power_pm_register(u_int, power_pm_fn_t, void *);
+/*
+ * Sleep type.
+ *
+ * These are the specific generic methods of entering a sleep state. E.g.
+ * POWER_SLEEP_STATE_SUSPEND could be set to enter either suspend-to-RAM (which
+ * is S3 on ACPI systems), or suspend-to-idle (S0ix on ACPI systems). This
+ * would be done through the kern.power.suspend sysctl.
+ */
+enum power_stype {
+ POWER_STYPE_AWAKE,
+ POWER_STYPE_STANDBY,
+ POWER_STYPE_SUSPEND_TO_MEM,
+ POWER_STYPE_SUSPEND_TO_IDLE,
+ POWER_STYPE_HIBERNATE,
+ POWER_STYPE_POWEROFF,
+ POWER_STYPE_COUNT,
+ POWER_STYPE_UNKNOWN,
+};
+
+static const char * const power_stype_names[POWER_STYPE_COUNT] = {
+ [POWER_STYPE_AWAKE] = "awake",
+ [POWER_STYPE_STANDBY] = "standby",
+ [POWER_STYPE_SUSPEND_TO_MEM] = "s2mem",
+ [POWER_STYPE_SUSPEND_TO_IDLE] = "s2idle",
+ [POWER_STYPE_HIBERNATE] = "hibernate",
+ [POWER_STYPE_POWEROFF] = "poweroff",
+};
+
+extern enum power_stype power_standby_stype;
+extern enum power_stype power_suspend_stype;
+extern enum power_stype power_hibernate_stype;
+
+extern enum power_stype power_name_to_stype(const char *_name);
+extern const char *power_stype_to_name(enum power_stype _stype);
+
+typedef int (*power_pm_fn_t)(u_long _cmd, void* _arg, enum power_stype _stype);
+extern int power_pm_register(u_int _pm_type, power_pm_fn_t _pm_fn,
+ void *_pm_arg,
+ bool _pm_supported[static POWER_STYPE_COUNT]);
extern u_int power_pm_get_type(void);
extern void power_pm_suspend(int);
@@ -60,4 +108,5 @@ extern void power_profile_set_state(int);
typedef void (*power_profile_change_hook)(void *, int);
EVENTHANDLER_DECLARE(power_profile_change, power_profile_change_hook);
+#endif /* _KERNEL */
#endif /* !_SYS_POWER_H_ */
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 9140cee56885..8c0729d3ec66 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -741,7 +741,7 @@ struct proc {
reaper which spawned
our subtree. */
uint64_t p_elf_flags; /* (x) ELF flags */
- void *p_elf_brandinfo; /* (x) Elf_Brandinfo, NULL for
+ const void *p_elf_brandinfo; /* (x) Elf_Brandinfo, NULL for
non ELF binaries. */
sbintime_t p_umtx_min_timeout;
/* End area that is copied on creation. */
diff --git a/sys/sys/random.h b/sys/sys/random.h
index 5abf762cd200..2a68f0c99b6d 100644
--- a/sys/sys/random.h
+++ b/sys/sys/random.h
@@ -142,9 +142,6 @@ random_harvest_direct(const void *entropy, u_int size, enum random_entropy_sourc
random_harvest_direct_(entropy, size, origin);
}
-void random_harvest_register_source(enum random_entropy_source);
-void random_harvest_deregister_source(enum random_entropy_source);
-
#if defined(RANDOM_ENABLE_UMA)
#define random_harvest_fast_uma(a, b, c) random_harvest_fast(a, b, c)
#else /* !defined(RANDOM_ENABLE_UMA) */
diff --git a/sys/sys/rmlock.h b/sys/sys/rmlock.h
index 664356998438..eae7342527e3 100644
--- a/sys/sys/rmlock.h
+++ b/sys/sys/rmlock.h
@@ -52,7 +52,7 @@ void rm_init(struct rmlock *rm, const char *name);
void rm_init_flags(struct rmlock *rm, const char *name, int opts);
void rm_destroy(struct rmlock *rm);
int rm_wowned(const struct rmlock *rm);
-void rm_sysinit(void *arg);
+void rm_sysinit(const void *arg);
void _rm_wlock_debug(struct rmlock *rm, const char *file, int line);
void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line);
diff --git a/sys/sys/rwlock.h b/sys/sys/rwlock.h
index 0ebe90e09bed..929f78c1d204 100644
--- a/sys/sys/rwlock.h
+++ b/sys/sys/rwlock.h
@@ -128,7 +128,7 @@
*/
void _rw_init_flags(volatile uintptr_t *c, const char *name, int opts);
void _rw_destroy(volatile uintptr_t *c);
-void rw_sysinit(void *arg);
+void rw_sysinit(const void *arg);
int _rw_wowned(const volatile uintptr_t *c);
void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line);
int __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
diff --git a/sys/sys/sockbuf.h b/sys/sys/sockbuf.h
index b4593f38f592..739723754b7d 100644
--- a/sys/sys/sockbuf.h
+++ b/sys/sys/sockbuf.h
@@ -62,7 +62,7 @@
#include <sys/_sx.h>
#include <sys/_task.h>
-#define SB_MAX (2*1024*1024) /* default for max chars in sockbuf */
+#define SB_MAX (8*1024*1024) /* default for max chars in sockbuf */
struct ktls_session;
struct mbuf;
diff --git a/sys/sys/socket.h b/sys/sys/socket.h
index cdd4fa3b4b89..cf1d95da6168 100644
--- a/sys/sys/socket.h
+++ b/sys/sys/socket.h
@@ -396,6 +396,7 @@ struct sockproto {
#define PF_NETLINK AF_NETLINK
#define PF_INET_SDP AF_INET_SDP
#define PF_INET6_SDP AF_INET6_SDP
+#define PF_HYPERV AF_HYPERV
#define PF_DIVERT AF_DIVERT
#define PF_IPFWLOG AF_IPFWLOG
diff --git a/sys/sys/sockopt.h b/sys/sys/sockopt.h
index bfe12d8510d7..d2b0ff5ed2c8 100644
--- a/sys/sys/sockopt.h
+++ b/sys/sys/sockopt.h
@@ -57,8 +57,10 @@ struct sockopt {
int sosetopt(struct socket *so, struct sockopt *sopt);
int sogetopt(struct socket *so, struct sockopt *sopt);
-int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen);
-int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len);
+int __result_use_check sooptcopyin(struct sockopt *sopt, void *buf, size_t len,
+ size_t minlen);
+int __result_use_check sooptcopyout(struct sockopt *sopt, const void *buf,
+ size_t len);
int soopt_getm(struct sockopt *sopt, struct mbuf **mp);
int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m);
int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m);
diff --git a/sys/sys/sx.h b/sys/sys/sx.h
index deb277decc75..d28cae9d01e5 100644
--- a/sys/sys/sx.h
+++ b/sys/sys/sx.h
@@ -99,7 +99,7 @@
* Function prototipes. Routines that start with an underscore are not part
* of the public interface and are wrappered with a macro.
*/
-void sx_sysinit(void *arg);
+void sx_sysinit(const void *arg);
#define sx_init(sx, desc) sx_init_flags((sx), (desc), 0)
void sx_init_flags(struct sx *sx, const char *description, int opts);
void sx_destroy(struct sx *sx);
diff --git a/sys/sys/syscall.h b/sys/sys/syscall.h
index d703a11fda01..cff27b8be316 100644
--- a/sys/sys/syscall.h
+++ b/sys/sys/syscall.h
@@ -85,8 +85,8 @@
/* 76 is obsolete vhangup */
/* 77 is obsolete vlimit */
#define SYS_mincore 78
-#define SYS_getgroups 79
-#define SYS_setgroups 80
+#define SYS_freebsd14_getgroups 79
+#define SYS_freebsd14_setgroups 80
#define SYS_getpgrp 81
#define SYS_setpgid 82
#define SYS_setitimer 83
@@ -533,4 +533,8 @@
#define SYS_exterrctl 592
#define SYS_inotify_add_watch_at 593
#define SYS_inotify_rm_watch 594
-#define SYS_MAXSYSCALL 595
+#define SYS_getgroups 595
+#define SYS_setgroups 596
+#define SYS_jail_attach_jd 597
+#define SYS_jail_remove_jd 598
+#define SYS_MAXSYSCALL 599
diff --git a/sys/sys/syscall.mk b/sys/sys/syscall.mk
index b7ded62cacb4..443dbadcfbff 100644
--- a/sys/sys/syscall.mk
+++ b/sys/sys/syscall.mk
@@ -65,8 +65,8 @@ MIASM = \
mprotect.o \
madvise.o \
mincore.o \
- getgroups.o \
- setgroups.o \
+ freebsd14_getgroups.o \
+ freebsd14_setgroups.o \
getpgrp.o \
setpgid.o \
setitimer.o \
@@ -436,4 +436,8 @@ MIASM = \
setcred.o \
exterrctl.o \
inotify_add_watch_at.o \
- inotify_rm_watch.o
+ inotify_rm_watch.o \
+ getgroups.o \
+ setgroups.o \
+ jail_attach_jd.o \
+ jail_remove_jd.o
diff --git a/sys/sys/sysent.h b/sys/sys/sysent.h
index 1714fa5a7416..6de391dcc03e 100644
--- a/sys/sys/sysent.h
+++ b/sys/sys/sysent.h
@@ -343,8 +343,7 @@ void exec_free_abi_mappings(struct proc *p);
void exec_onexec_old(struct thread *td);
#define INIT_SYSENTVEC(name, sv) \
- SYSINIT(name, SI_SUB_EXEC, SI_ORDER_ANY, \
- (sysinit_cfunc_t)exec_sysvec_init, sv);
+ SYSINIT(name, SI_SUB_EXEC, SI_ORDER_ANY, exec_sysvec_init, sv)
#endif /* _KERNEL */
diff --git a/sys/sys/sysproto.h b/sys/sys/sysproto.h
index 8d666f9c8ee9..8dda4b4533ea 100644
--- a/sys/sys/sysproto.h
+++ b/sys/sys/sysproto.h
@@ -273,14 +273,6 @@ struct mincore_args {
char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)];
char vec_l_[PADL_(char *)]; char * vec; char vec_r_[PADR_(char *)];
};
-struct getgroups_args {
- char gidsetsize_l_[PADL_(int)]; int gidsetsize; char gidsetsize_r_[PADR_(int)];
- char gidset_l_[PADL_(gid_t *)]; gid_t * gidset; char gidset_r_[PADR_(gid_t *)];
-};
-struct setgroups_args {
- char gidsetsize_l_[PADL_(int)]; int gidsetsize; char gidsetsize_r_[PADR_(int)];
- char gidset_l_[PADL_(const gid_t *)]; const gid_t * gidset; char gidset_r_[PADR_(const gid_t *)];
-};
struct getpgrp_args {
syscallarg_t dummy;
};
@@ -1901,6 +1893,20 @@ struct inotify_rm_watch_args {
char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)];
char wd_l_[PADL_(int)]; int wd; char wd_r_[PADR_(int)];
};
+struct getgroups_args {
+ char gidsetsize_l_[PADL_(int)]; int gidsetsize; char gidsetsize_r_[PADR_(int)];
+ char gidset_l_[PADL_(gid_t *)]; gid_t * gidset; char gidset_r_[PADR_(gid_t *)];
+};
+struct setgroups_args {
+ char gidsetsize_l_[PADL_(int)]; int gidsetsize; char gidsetsize_r_[PADR_(int)];
+ char gidset_l_[PADL_(const gid_t *)]; const gid_t * gidset; char gidset_r_[PADR_(const gid_t *)];
+};
+struct jail_attach_jd_args {
+ char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)];
+};
+struct jail_remove_jd_args {
+ char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)];
+};
int sys__exit(struct thread *, struct _exit_args *);
int sys_fork(struct thread *, struct fork_args *);
int sys_read(struct thread *, struct read_args *);
@@ -1957,8 +1963,6 @@ int sys_munmap(struct thread *, struct munmap_args *);
int sys_mprotect(struct thread *, struct mprotect_args *);
int sys_madvise(struct thread *, struct madvise_args *);
int sys_mincore(struct thread *, struct mincore_args *);
-int sys_getgroups(struct thread *, struct getgroups_args *);
-int sys_setgroups(struct thread *, struct setgroups_args *);
int sys_getpgrp(struct thread *, struct getpgrp_args *);
int sys_setpgid(struct thread *, struct setpgid_args *);
int sys_setitimer(struct thread *, struct setitimer_args *);
@@ -2305,6 +2309,10 @@ int sys_setcred(struct thread *, struct setcred_args *);
int sys_exterrctl(struct thread *, struct exterrctl_args *);
int sys_inotify_add_watch_at(struct thread *, struct inotify_add_watch_at_args *);
int sys_inotify_rm_watch(struct thread *, struct inotify_rm_watch_args *);
+int sys_getgroups(struct thread *, struct getgroups_args *);
+int sys_setgroups(struct thread *, struct setgroups_args *);
+int sys_jail_attach_jd(struct thread *, struct jail_attach_jd_args *);
+int sys_jail_remove_jd(struct thread *, struct jail_remove_jd_args *);
#ifdef COMPAT_43
@@ -2799,6 +2807,16 @@ int freebsd13_swapoff(struct thread *, struct freebsd13_swapoff_args *);
#ifdef COMPAT_FREEBSD14
+struct freebsd14_getgroups_args {
+ char gidsetsize_l_[PADL_(int)]; int gidsetsize; char gidsetsize_r_[PADR_(int)];
+ char gidset_l_[PADL_(gid_t *)]; gid_t * gidset; char gidset_r_[PADR_(gid_t *)];
+};
+struct freebsd14_setgroups_args {
+ char gidsetsize_l_[PADL_(int)]; int gidsetsize; char gidsetsize_r_[PADR_(int)];
+ char gidset_l_[PADL_(const gid_t *)]; const gid_t * gidset; char gidset_r_[PADR_(const gid_t *)];
+};
+int freebsd14_getgroups(struct thread *, struct freebsd14_getgroups_args *);
+int freebsd14_setgroups(struct thread *, struct freebsd14_setgroups_args *);
#endif /* COMPAT_FREEBSD14 */
@@ -2873,8 +2891,8 @@ int freebsd13_swapoff(struct thread *, struct freebsd13_swapoff_args *);
#define SYS_AUE_mprotect AUE_MPROTECT
#define SYS_AUE_madvise AUE_MADVISE
#define SYS_AUE_mincore AUE_MINCORE
-#define SYS_AUE_getgroups AUE_GETGROUPS
-#define SYS_AUE_setgroups AUE_SETGROUPS
+#define SYS_AUE_freebsd14_getgroups AUE_GETGROUPS
+#define SYS_AUE_freebsd14_setgroups AUE_SETGROUPS
#define SYS_AUE_getpgrp AUE_GETPGRP
#define SYS_AUE_setpgid AUE_SETPGRP
#define SYS_AUE_setitimer AUE_SETITIMER
@@ -3289,6 +3307,10 @@ int freebsd13_swapoff(struct thread *, struct freebsd13_swapoff_args *);
#define SYS_AUE_exterrctl AUE_NULL
#define SYS_AUE_inotify_add_watch_at AUE_INOTIFY
#define SYS_AUE_inotify_rm_watch AUE_INOTIFY
+#define SYS_AUE_getgroups AUE_GETGROUPS
+#define SYS_AUE_setgroups AUE_SETGROUPS
+#define SYS_AUE_jail_attach_jd AUE_JAIL_ATTACH
+#define SYS_AUE_jail_remove_jd AUE_JAIL_REMOVE
#undef PAD_
#undef PADL_
diff --git a/sys/sys/tree.h b/sys/sys/tree.h
index c11bccfb387c..194ad505b038 100644
--- a/sys/sys/tree.h
+++ b/sys/sys/tree.h
@@ -334,10 +334,13 @@ struct { \
#define _RB_L ((__uintptr_t)1)
#define _RB_R ((__uintptr_t)2)
#define _RB_LR ((__uintptr_t)3)
-#define _RB_BITS(elm) (*(__uintptr_t *)&elm)
+#define _RB_BITS(elm) ((__uintptr_t)elm)
#define _RB_BITSUP(elm, field) _RB_BITS(_RB_UP(elm, field))
-#define _RB_PTR(elm) (__typeof(elm)) \
- ((__uintptr_t)elm & ~_RB_LR)
+#define _RB_PTR_OP(elm, op, dir) ((__typeof(elm)) \
+ ((__uintptr_t)(elm) op (dir)))
+#define _RB_PTR(elm) _RB_PTR_OP((elm), &, ~_RB_LR)
+#define _RB_MOD_OR(elm, dir) ((elm) = _RB_PTR_OP((elm), |, (dir)))
+#define _RB_MOD_XOR(elm, dir) ((elm) = _RB_PTR_OP((elm), ^, (dir)))
#define RB_PARENT(elm, field) _RB_PTR(_RB_UP(elm, field))
#define RB_LEFT(elm, field) _RB_LINK(elm, _RB_L, field)
@@ -346,8 +349,8 @@ struct { \
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
#define RB_SET_PARENT(dst, src, field) do { \
- _RB_BITSUP(dst, field) = (__uintptr_t)src | \
- (_RB_BITSUP(dst, field) & _RB_LR); \
+ _RB_UP(dst, field) = (__typeof(src))((__uintptr_t)src | \
+ (_RB_BITSUP(dst, field) & _RB_LR)); \
} while (/*CONSTCOND*/ 0)
#define RB_SET(elm, parent, field) do { \
@@ -546,12 +549,12 @@ name##_RB_INSERT_COLOR(struct name *head, \
elmdir = RB_RIGHT(parent, field) == elm ? _RB_R : _RB_L; \
if (_RB_BITS(gpar) & elmdir) { \
/* shorten the parent-elm edge to rebalance */ \
- _RB_BITSUP(parent, field) ^= elmdir; \
+ _RB_MOD_XOR(_RB_UP(parent, field), elmdir); \
return (NULL); \
} \
sibdir = elmdir ^ _RB_LR; \
/* the other edge must change length */ \
- _RB_BITSUP(parent, field) ^= sibdir; \
+ _RB_MOD_XOR(_RB_UP(parent, field), sibdir); \
if ((_RB_BITS(gpar) & _RB_LR) == 0) { \
/* both edges now short, retry from parent */ \
child = elm; \
@@ -583,11 +586,14 @@ name##_RB_INSERT_COLOR(struct name *head, \
RB_ROTATE(elm, child, elmdir, field); \
child_up = _RB_UP(child, field); \
if (_RB_BITS(child_up) & sibdir) \
- _RB_BITSUP(parent, field) ^= elmdir; \
+ _RB_MOD_XOR(_RB_UP(parent, field), \
+ elmdir); \
if (_RB_BITS(child_up) & elmdir) \
- _RB_BITSUP(elm, field) ^= _RB_LR; \
+ _RB_MOD_XOR(_RB_UP(elm, field), \
+ _RB_LR); \
else \
- _RB_BITSUP(elm, field) ^= elmdir; \
+ _RB_MOD_XOR(_RB_UP(elm, field), \
+ elmdir); \
/* if child is a leaf, don't augment elm, \
* since it is restored to be a leaf again. */ \
if ((_RB_BITS(child_up) & _RB_LR) == 0) \
@@ -656,7 +662,7 @@ name##_RB_REMOVE_COLOR(struct name *head, \
/* the rank of the tree rooted at elm shrank */ \
gpar = _RB_UP(parent, field); \
elmdir = RB_RIGHT(parent, field) == elm ? _RB_R : _RB_L; \
- _RB_BITS(gpar) ^= elmdir; \
+ _RB_MOD_XOR(gpar, elmdir); \
if (_RB_BITS(gpar) & elmdir) { \
/* lengthen the parent-elm edge to rebalance */ \
_RB_UP(parent, field) = gpar; \
@@ -664,7 +670,7 @@ name##_RB_REMOVE_COLOR(struct name *head, \
} \
if (_RB_BITS(gpar) & _RB_LR) { \
/* shorten other edge, retry from parent */ \
- _RB_BITS(gpar) ^= _RB_LR; \
+ _RB_MOD_XOR(gpar, _RB_LR); \
_RB_UP(parent, field) = gpar; \
gpar = _RB_PTR(gpar); \
continue; \
@@ -672,7 +678,7 @@ name##_RB_REMOVE_COLOR(struct name *head, \
sibdir = elmdir ^ _RB_LR; \
sib = _RB_LINK(parent, sibdir, field); \
up = _RB_UP(sib, field); \
- _RB_BITS(up) ^= _RB_LR; \
+ _RB_MOD_XOR(up, _RB_LR); \
if ((_RB_BITS(up) & _RB_LR) == 0) { \
/* shorten edges descending from sib, retry */ \
_RB_UP(sib, field) = up; \
@@ -703,24 +709,29 @@ name##_RB_REMOVE_COLOR(struct name *head, \
/* elm is a 1-child. First rotate at elm. */ \
RB_ROTATE(sib, elm, sibdir, field); \
up = _RB_UP(elm, field); \
- _RB_BITSUP(parent, field) ^= \
- (_RB_BITS(up) & elmdir) ? _RB_LR : elmdir; \
- _RB_BITSUP(sib, field) ^= \
- (_RB_BITS(up) & sibdir) ? _RB_LR : sibdir; \
- _RB_BITSUP(elm, field) |= _RB_LR; \
+ _RB_MOD_XOR(_RB_UP(parent, field), \
+ (_RB_BITS(up) & elmdir) ? _RB_LR : elmdir); \
+ _RB_MOD_XOR(_RB_UP(sib, field), \
+ (_RB_BITS(up) & sibdir) ? _RB_LR : sibdir); \
+ _RB_MOD_OR(_RB_UP(elm, field), _RB_LR); \
} else { \
if ((_RB_BITS(up) & elmdir) == 0 && \
RB_STRICT_HST && elm != NULL) { \
/* if parent does not become a leaf, \
do not demote parent yet. */ \
- _RB_BITSUP(parent, field) ^= sibdir; \
- _RB_BITSUP(sib, field) ^= _RB_LR; \
+ _RB_MOD_XOR(_RB_UP(parent, field), \
+ sibdir); \
+ _RB_MOD_XOR(_RB_UP(sib, field), \
+ _RB_LR); \
} else if ((_RB_BITS(up) & elmdir) == 0) { \
/* demote parent. */ \
- _RB_BITSUP(parent, field) ^= elmdir; \
- _RB_BITSUP(sib, field) ^= sibdir; \
+ _RB_MOD_XOR(_RB_UP(parent, field), \
+ elmdir); \
+ _RB_MOD_XOR(_RB_UP(sib, field), \
+ sibdir); \
} else \
- _RB_BITSUP(sib, field) ^= sibdir; \
+ _RB_MOD_XOR(_RB_UP(sib, field), \
+ sibdir); \
elm = sib; \
} \
\
diff --git a/sys/sys/ttycom.h b/sys/sys/ttycom.h
index d7ddc66b09fb..43e8b98a5bc4 100644
--- a/sys/sys/ttycom.h
+++ b/sys/sys/ttycom.h
@@ -69,8 +69,8 @@
/* 89-91 conflicts: tun and tap */
#define TIOCTIMESTAMP _IOR('t', 89, struct timeval) /* enable/get timestamp
* of last input event */
-#define TIOCMGDTRWAIT _IOR('t', 90, int) /* modem: get wait on close */
-#define TIOCMSDTRWAIT _IOW('t', 91, int) /* modem: set wait on close */
+/* TIOCMGDTRWAIT _IOR('t', 90, int) * was modem: get wait on close */
+/* TIOCMSDTRWAIT _IOW('t', 91, int) * was modem: set wait on close */
/* 92-93 tun and tap */
/* 94-97 conflicts: tun and tap */
#define TIOCDRAIN _IO('t', 94) /* wait till output drained */
diff --git a/sys/sys/types.h b/sys/sys/types.h
index fd375139a092..8311c1901b7e 100644
--- a/sys/sys/types.h
+++ b/sys/sys/types.h
@@ -297,9 +297,11 @@ typedef struct vm_page *vm_page_t;
#if defined(_KERNEL) || defined(_STANDALONE)
#if !defined(__bool_true_false_are_defined) && !defined(__cplusplus)
#define __bool_true_false_are_defined 1
+#if __STDC_VERSION__ < 202311L
#define false 0
#define true 1
typedef _Bool bool;
+#endif /* __STDC_VERSION__ < 202311L */
#endif /* !__bool_true_false_are_defined && !__cplusplus */
#endif /* KERNEL || _STANDALONE */
diff --git a/sys/sys/ucred.h b/sys/sys/ucred.h
index 9c1d8545af34..254f58841993 100644
--- a/sys/sys/ucred.h
+++ b/sys/sys/ucred.h
@@ -112,15 +112,21 @@ struct xucred {
short cr_ngroups; /* number of groups (incl. cr_gid). */
union {
/*
- * Special little hack to avoid needing a cr_gid macro, which
- * would cause problems if one were to use it with struct ucred
- * which also has a cr_groups member.
+ * The effective GID has been the first element of cr_groups[]
+ * for historical reasons. It should be accessed using the
+ * 'cr_gid' identifier. Supplementary groups should be accessed
+ * using cr_sgroups[]. Note that 'cr_ngroups' currently
+ * includes the effective GID.
+ *
+ * XXXOC: On the next API change (requires versioning), please
+ * replace this union with a true unaliased field 'cr_gid' and
+ * make sure that cr_groups[]/'cr_ngroups' only account for
+ * supplementary groups.
*/
struct {
gid_t cr_gid; /* effective group id */
gid_t cr_sgroups[XU_NGROUPS - 1];
};
-
gid_t cr_groups[XU_NGROUPS]; /* groups */
};
union {
diff --git a/sys/sys/unistd.h b/sys/sys/unistd.h
index 7ab2f021e408..5743dc1c8033 100644
--- a/sys/sys/unistd.h
+++ b/sys/sys/unistd.h
@@ -216,6 +216,15 @@
#define CLOSE_RANGE_CLOEXEC (1<<2)
#define CLOSE_RANGE_CLOFORK (1<<3)
+/*
+ * copy_file_range flags visible to user space.
+ * High order 8 bits reserved for kernel flags.
+ * Allocate from bit 23 down, to try and avoid conflicts with
+ * future Linux flags.
+ */
+#define COPY_FILE_RANGE_CLONE 0x00800000 /* Require cloning. */
+#define COPY_FILE_RANGE_USERFLAGS (COPY_FILE_RANGE_CLONE)
+
#endif /* __BSD_VISIBLE */
#endif /* !_SYS_UNISTD_H_ */
diff --git a/sys/sys/user.h b/sys/sys/user.h
index 103236b6ed1b..1704bc089d85 100644
--- a/sys/sys/user.h
+++ b/sys/sys/user.h
@@ -266,6 +266,7 @@ struct user {
#define KF_TYPE_EVENTFD 13
#define KF_TYPE_TIMERFD 14
#define KF_TYPE_INOTIFY 15
+#define KF_TYPE_JAILDESC 16
#define KF_TYPE_UNKNOWN 255
#define KF_VTYPE_VNON 0
@@ -453,6 +454,9 @@ struct kinfo_file {
uint64_t kf_timerfd_addr;
} kf_timerfd;
struct {
+ int32_t kf_jid;
+ } kf_jail;
+ struct {
uint64_t kf_kqueue_addr;
int32_t kf_kqueue_count;
int32_t kf_kqueue_state;
@@ -613,7 +617,8 @@ struct kinfo_vmobject {
} kvo_type_spec; /* Type-specific union */
uint64_t kvo_me; /* Uniq handle for anon obj */
uint64_t kvo_laundry; /* Number of laundry pages. */
- uint64_t _kvo_qspare[5];
+ uint64_t kvo_wired; /* Number of wired pages. */
+ uint64_t _kvo_qspare[4];
uint32_t kvo_swapped; /* Number of swapped pages */
uint32_t kvo_flags;
uint32_t _kvo_ispare[6];
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 8080e9edd8c3..0bf438a1b821 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -67,6 +67,11 @@ __enum_uint8_decl(vtype) {
VLASTTYPE = VMARKER,
};
+/*
+ * We frequently need to test is something is a device node.
+ */
+#define VTYPE_ISDEV(vtype) ((vtype) == VCHR || (vtype) == VBLK)
+
__enum_uint8_decl(vstate) {
VSTATE_UNINITIALIZED,
VSTATE_CONSTRUCTED,
@@ -199,6 +204,8 @@ struct vnode {
int v_seqc_users; /* i modifications pending */
};
+#define VN_ISDEV(vp) VTYPE_ISDEV((vp)->v_type)
+
#ifndef DEBUG_LOCKS
#ifdef _LP64
/*
@@ -309,6 +316,8 @@ struct vattr {
long va_spare; /* remain quad aligned */
};
+#define VATTR_ISDEV(vap) VTYPE_ISDEV((vap)->va_type)
+
/*
* Flags for va_vaflags.
*/
@@ -397,21 +406,8 @@ struct vattr {
*/
#define VLKTIMEOUT (hz / 20 + 1)
-/* copy_file_range flags */
-#define COPY_FILE_RANGE_KFLAGS 0xff000000
-
-/*
- * copy_file_range flags visible to user space.
- * Allocate high bits first, to try and avoid conflicting with Linux.
- */
-#define COPY_FILE_RANGE_CLONE 0x00800000 /* Require cloning. */
-#define COPY_FILE_RANGE_USERFLAGS (COPY_FILE_RANGE_CLONE)
-
#ifdef _KERNEL
-/* copy_file_range flags only usable in the kernel */
-#define COPY_FILE_RANGE_TIMEO1SEC 0x01000000 /* Return after 1sec. */
-
#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_VNODE);
#endif
@@ -455,16 +451,12 @@ extern int vttoif_tab[];
#define VS_SKIP_UNMOUNT 0x0001 /* vfs_write_suspend: fail if the
filesystem is being unmounted */
-#define VREF(vp) vref(vp)
-
#ifdef DIAGNOSTIC
#define VATTR_NULL(vap) vattr_null(vap)
#else
#define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */
#endif /* DIAGNOSTIC */
-#define NULLVP ((struct vnode *)NULL)
-
/*
* Global vnode data.
*/
@@ -598,11 +590,6 @@ void assert_vop_unlocked(struct vnode *vp, const char *str);
#endif /* INVARIANTS */
-/*
- * This call works for vnodes in the kernel.
- */
-#define VCALL(c) ((c)->a_desc->vdesc_call(c))
-
#define DOINGASYNC(vp) \
(((vp)->v_mount->mnt_kern_flag & MNTK_ASYNC) != 0 && \
((curthread->td_pflags & TDP_SYNCIO) == 0))
@@ -634,6 +621,10 @@ typedef void vop_getpages_iodone_t(void *, vm_page_t *, int, int);
#define VN_OPEN_INVFS 0x00000008
#define VN_OPEN_WANTIOCTLCAPS 0x00000010
+/* copy_file_range kernel flags */
+#define COPY_FILE_RANGE_KFLAGS 0xff000000
+#define COPY_FILE_RANGE_TIMEO1SEC 0x01000000 /* Return after 1sec. */
+
/*
* Public vnode manipulation functions.
*/
@@ -723,6 +714,7 @@ int speedup_syncer(void);
int vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen);
int vn_getcwd(char *buf, char **retbuf, size_t *buflen);
int vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf);
+int vn_fullpath_jail(struct vnode *vp, char **retbuf, char **freebuf);
int vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf);
int vn_fullpath_hardlink(struct vnode *vp, struct vnode *dvp,
const char *hdrl_name, size_t hrdl_name_length, char **retbuf,
diff --git a/sys/sys/watchdog.h b/sys/sys/watchdog.h
index 4a16b18509f5..8401d343a6b7 100644
--- a/sys/sys/watchdog.h
+++ b/sys/sys/watchdog.h
@@ -32,15 +32,16 @@
#define _SYS_WATCHDOG_H
#include <sys/ioccom.h>
+#include <sys/_types.h>
#define _PATH_WATCHDOG "fido"
-#define WDIOCPATPAT _IOW('W', 42, u_int) /* pat the watchdog */
-#define WDIOC_SETTIMEOUT _IOW('W', 43, int) /* set/reset the timer */
-#define WDIOC_GETTIMEOUT _IOR('W', 44, int) /* get total timeout */
-#define WDIOC_GETTIMELEFT _IOR('W', 45, int) /* get time left */
-#define WDIOC_GETPRETIMEOUT _IOR('W', 46, int) /* get the pre-timeout */
-#define WDIOC_SETPRETIMEOUT _IOW('W', 47, int) /* set the pre-timeout */
+#define WDIOC_PATPAT _IOW('W', 52, sbintime_t) /* pat the watchdog */
+#define WDIOC_SETTIMEOUT _IOW('W', 53, sbintime_t) /* set/reset the timer */
+#define WDIOC_GETTIMEOUT _IOR('W', 54, sbintime_t) /* get total timeout */
+#define WDIOC_GETTIMELEFT _IOR('W', 55, sbintime_t) /* get time left */
+#define WDIOC_GETPRETIMEOUT _IOR('W', 56, sbintime_t) /* get the pre-timeout */
+#define WDIOC_SETPRETIMEOUT _IOW('W', 57, sbintime_t) /* set the pre-timeout */
/* set the action when a pre-timeout occurs see: WD_SOFT_* */
#define WDIOC_SETPRETIMEOUTACT _IOW('W', 48, int)
@@ -48,6 +49,8 @@
#define WDIOC_SETSOFT _IOW('W', 49, int)
#define WDIOC_SETSOFTTIMEOUTACT _IOW('W', 50, int)
+#define WDIOC_CONTROL _IOW('W', 51, int) /* configure watchdog */
+
#define WD_ACTIVE 0x8000000
/*
* Watchdog reset, timeout set to value in WD_INTERVAL field.
@@ -93,6 +96,11 @@
#define WD_TO_64SEC 36
#define WD_TO_128SEC 37
+/* Control options for WDIOC_CONTROL */
+#define WD_CTRL_DISABLE 0x00000000
+#define WD_CTRL_ENABLE 0x00000001
+#define WD_CTRL_RESET 0x00000002
+
/* action on pre-timeout trigger */
#define WD_SOFT_PANIC 0x01 /* panic */
#define WD_SOFT_DDB 0x02 /* enter debugger */
@@ -105,11 +113,16 @@
#include <sys/_eventhandler.h>
typedef void (*watchdog_fn)(void *, u_int, int *);
+typedef void (*watchdog_sbt_fn)(void *, sbintime_t, sbintime_t *, int *);
EVENTHANDLER_DECLARE(watchdog_list, watchdog_fn);
+EVENTHANDLER_DECLARE(watchdog_sbt_list, watchdog_sbt_fn);
u_int wdog_kern_last_timeout(void);
int wdog_kern_pat(u_int utim);
+sbintime_t wdog_kern_last_timeout_sbt(void);
+int wdog_kern_pat_sbt(sbintime_t utim);
+int wdog_control(int ctrl);
/*
* The following function pointer is used to attach a software watchdog
diff --git a/sys/tests/ktest.h b/sys/tests/ktest.h
index c767aa31e8e5..75d7a75e2fff 100644
--- a/sys/tests/ktest.h
+++ b/sys/tests/ktest.h
@@ -57,6 +57,8 @@ struct ktest_test_info {
ktest_parse_t parse;
};
+#define KTEST_FUNC(X) static int __ktest_##X(struct ktest_test_context *ctx)
+
struct ktest_module_info {
const char *name;
const struct ktest_test_info *tests;
@@ -64,6 +66,8 @@ struct ktest_module_info {
void *module_ptr;
};
+#define KTEST_INFO(X) { "test_" #X, "Test " #X, __ktest_##X, NULL }
+
int ktest_default_modevent(module_t mod, int type, void *arg);
bool ktest_start_msg(struct ktest_test_context *ctx);
@@ -84,6 +88,9 @@ void ktest_end_msg(struct ktest_test_context *ctx);
#define KTEST_LOG(_ctx, _fmt, ...) \
KTEST_LOG_LEVEL(_ctx, LOG_DEBUG, _fmt, ## __VA_ARGS__)
+#define KTEST_ERR(_ctx, _fmt, ...) \
+ KTEST_LOG_LEVEL(_ctx, LOG_ERR, _fmt, ## __VA_ARGS__)
+
#define KTEST_MAX_BUF 512
#define KTEST_MODULE_DECLARE(_n, _t) \
@@ -104,6 +111,9 @@ MODULE_VERSION(ktest_##_n, 1); \
MODULE_DEPEND(ktest_##_n, ktestmod, 1, 1, 1); \
MODULE_DEPEND(ktest_##_n, netlink, 1, 1, 1); \
+#define KTEST_MODULE_DEPEND(_n, _d) \
+MODULE_DEPEND(ktest_##_n, _d, 1, 1, 1); \
+
#endif /* _KERNEL */
/* genetlink definitions */
diff --git a/sys/tools/amd64_ia32_vdso.sh b/sys/tools/amd64_ia32_vdso.sh
index 85d2299b45d0..e5865639d398 100644
--- a/sys/tools/amd64_ia32_vdso.sh
+++ b/sys/tools/amd64_ia32_vdso.sh
@@ -58,7 +58,7 @@ then
exit 1
fi
-${CC} ${DEBUG} -x assembler-with-cpp -DLOCORE -fPIC -nostdinc -c \
+${CC} -x assembler-with-cpp -DLOCORE -fPIC -nostdinc -c \
-o elf-vdso32.so.o -I. -I"${S}" -include opt_global.h \
-DVDSO_NAME=elf_vdso32_so_1 -DVDSO_FILE=\"elf-vdso32.so.1\" \
"${S}"/tools/vdso_wrap.S
diff --git a/sys/tools/amd64_vdso.sh b/sys/tools/amd64_vdso.sh
index 2a83ae874ab7..ed91ddc8abb5 100644
--- a/sys/tools/amd64_vdso.sh
+++ b/sys/tools/amd64_vdso.sh
@@ -67,7 +67,7 @@ then
exit 1
fi
-${CC} ${DEBUG} -x assembler-with-cpp -DLOCORE -fPIC -nostdinc -c \
+${CC} -x assembler-with-cpp -DLOCORE -fPIC -nostdinc -c \
-o elf-vdso.so.o -I. -I"${S}" -include opt_global.h \
-DVDSO_NAME=elf_vdso_so_1 -DVDSO_FILE=\"elf-vdso.so.1\" \
"${S}"/tools/vdso_wrap.S
diff --git a/sys/tools/gdb/README.txt b/sys/tools/gdb/README.txt
new file mode 100644
index 000000000000..8c31565ddc42
--- /dev/null
+++ b/sys/tools/gdb/README.txt
@@ -0,0 +1,21 @@
+This directory contains Python scripts that can be loaded by GDB to help debug
+FreeBSD kernel crashes.
+
+Add new commands and functions in their own files. Functions with general
+utility should be added to freebsd.py. sys/tools/kernel-gdb.py is installed
+into the kernel debug directory (typically /usr/lib/debug/boot/kernel). It will
+be automatically loaded by kgdb when opening a vmcore, so if you add new GDB
+commands or functions, that script should be updated to import them, and you
+should document them here.
+
+To provide some rudimentary testing, selftest.py tries to exercise all of the
+commands and functions defined here. To use it, run selftest.sh to panic the
+system. Then, create a kernel dump or attach to the panicked kernel, and invoke
+the script with "python import selftest" in (k)gdb.
+
+Commands:
+acttrace Display a backtrace for all on-CPU threads
+
+Functions:
+$PCPU(<field>[, <cpuid>]) Display the value of a PCPU/DPCPU field
+$V(<variable>[, <vnet>]) Display the value of a VNET variable
diff --git a/sys/tools/gdb/acttrace.py b/sys/tools/gdb/acttrace.py
new file mode 100644
index 000000000000..147effbbddf1
--- /dev/null
+++ b/sys/tools/gdb/acttrace.py
@@ -0,0 +1,48 @@
+#
+# Copyright (c) 2022 The FreeBSD Foundation
+#
+# This software was developed by Mark Johnston under sponsorship from the
+# FreeBSD Foundation.
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+from freebsd import *
+from pcpu import *
+
+class acttrace(gdb.Command):
+ """
+ Register an acttrace command with gdb.
+
+ When run, acttrace prints the stack trace of all threads that were on-CPU
+ at the time of the panic.
+ """
+ def __init__(self):
+ super(acttrace, self).__init__("acttrace", gdb.COMMAND_USER)
+
+ def invoke(self, arg, from_tty):
+ # Save the current thread so that we can switch back after.
+ curthread = gdb.selected_thread()
+
+ for pcpu in pcpu_foreach():
+ td = pcpu['pc_curthread']
+ tid = td['td_tid']
+
+ gdb_thread = tid_to_gdb_thread(tid)
+ if gdb_thread is None:
+ raise gdb.error(f"failed to find GDB thread with TID {tid}")
+ else:
+ gdb_thread.switch()
+
+ p = td['td_proc']
+ print("Tracing command {} pid {} tid {} (CPU {})".format(
+ p['p_comm'], p['p_pid'], td['td_tid'], pcpu['pc_cpuid']))
+ gdb.execute("bt")
+ print()
+
+ curthread.switch()
+
+
+# Registers the command with gdb, doesn't do anything.
+acttrace()
diff --git a/sys/tools/gdb/freebsd.py b/sys/tools/gdb/freebsd.py
new file mode 100644
index 000000000000..81ea60373348
--- /dev/null
+++ b/sys/tools/gdb/freebsd.py
@@ -0,0 +1,75 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+
+def symval(name):
+ sym = gdb.lookup_global_symbol(name)
+ if sym is None:
+ sym = gdb.lookup_static_symbol(name)
+ if sym is None:
+ raise gdb.GdbError(f"Symbol '{name}' not found")
+ return sym.value()
+
+
+def _queue_foreach(head, field, headf, nextf):
+ elm = head[headf]
+ while elm != 0:
+ yield elm
+ elm = elm[field][nextf]
+
+
+def list_foreach(head, field):
+ """sys/queue.h-style iterator."""
+ return _queue_foreach(head, field, "lh_first", "le_next")
+
+
+def tailq_foreach(head, field):
+ """sys/queue.h-style iterator."""
+ return _queue_foreach(head, field, "tqh_first", "tqe_next")
+
+
+def linker_file_foreach():
+ """Iterate over loaded linker files."""
+ return tailq_foreach(symval("linker_files"), "link")
+
+
+def pcpu_foreach():
+ mp_maxid = symval("mp_maxid")
+ cpuid_to_pcpu = symval("cpuid_to_pcpu")
+
+ cpu = 0
+ while cpu <= mp_maxid:
+ pcpu = cpuid_to_pcpu[cpu]
+ if pcpu:
+ yield pcpu
+ cpu = cpu + 1
+
+
+def tid_to_gdb_thread(tid):
+ """Convert a FreeBSD kernel thread ID to a gdb inferior thread."""
+ for thread in gdb.inferiors()[0].threads():
+ if thread.ptid[2] == tid:
+ return thread
+ else:
+ return None
+
+
+def tdfind(tid, pid=-1):
+ """Convert a FreeBSD kernel thread ID to a struct thread pointer."""
+ td = tdfind.cached_threads.get(int(tid))
+ if td:
+ return td
+
+ for p in list_foreach(symval("allproc"), "p_list"):
+ if pid != -1 and pid != p['p_pid']:
+ continue
+ for td in tailq_foreach(p['p_threads'], "td_plist"):
+ ntid = td['td_tid']
+ tdfind.cached_threads[int(ntid)] = td
+ if ntid == tid:
+ return td
+tdfind.cached_threads = dict()
diff --git a/sys/tools/gdb/pcpu.py b/sys/tools/gdb/pcpu.py
new file mode 100644
index 000000000000..aadc4b2d42df
--- /dev/null
+++ b/sys/tools/gdb/pcpu.py
@@ -0,0 +1,77 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+from freebsd import *
+
+class pcpu(gdb.Function):
+ """
+ Register a function to lookup PCPU and DPCPU variables by name.
+
+ To look up the value of the PCPU field foo on CPU n, use
+ $PCPU("foo", n). This works for DPCPU fields too. If the CPU ID is
+ omitted, and the currently selected thread is on-CPU, that CPU is
+ used, otherwise an error is raised.
+ """
+ def __init__(self):
+ super(pcpu, self).__init__("PCPU")
+
+ def invoke(self, field, cpuid=-1):
+ if cpuid == -1:
+ cpuid = tdfind(gdb.selected_thread().ptid[2])['td_oncpu']
+ if cpuid == -1:
+ raise gdb.error("Currently selected thread is off-CPU")
+ if cpuid < 0 or cpuid > symval("mp_maxid"):
+ raise gdb.error(f"Currently selected on invalid CPU {cpuid}")
+ pcpu = symval("cpuid_to_pcpu")[cpuid]
+
+ # Are we dealing with a PCPU or DPCPU field?
+ field = field.string()
+ for f in gdb.lookup_type("struct pcpu").fields():
+ if f.name == "pc_" + field:
+ return pcpu["pc_" + field]
+
+ def uintptr_t(val):
+ return val.cast(gdb.lookup_type("uintptr_t"))
+
+ # We're dealing with a DPCPU field. This is handled similarly
+ # to VNET symbols, see vnet.py for comments.
+ pcpu_base = pcpu['pc_dynamic']
+ pcpu_entry = symval("pcpu_entry_" + field)
+ pcpu_entry_addr = uintptr_t(pcpu_entry.address)
+
+ for lf in linker_file_foreach():
+ block = gdb.block_for_pc(lf['ops']['cls']['methods'][0]['func'])
+ elf_file_t = gdb.lookup_type("elf_file_t", block).target()
+ ef = lf.cast(elf_file_t)
+
+ file_type = lf['ops']['cls']['name'].string()
+ if file_type == "elf64":
+ start = uintptr_t(ef['pcpu_start'])
+ if start == 0:
+ continue
+ end = uintptr_t(ef['pcpu_stop'])
+ base = uintptr_t(ef['pcpu_base'])
+ elif file_type == "elf64_obj":
+ for i in range(ef['nprogtab']):
+ pe = ef['progtab'][i]
+ if pe['name'].string() == "set_pcpu":
+ start = uintptr_t(pe['origaddr'])
+ end = start + uintptr_t(pe['size'])
+ base = uintptr_t(pe['addr'])
+ break
+ else:
+ continue
+ else:
+ path = lf['pathname'].string()
+ raise gdb.error(f"{path} has unexpected linker file type {file_type}")
+
+ if pcpu_entry_addr >= start and pcpu_entry_addr < end:
+ obj = gdb.Value(pcpu_base + pcpu_entry_addr - start + base)
+ return obj.cast(pcpu_entry.type.pointer()).dereference()
+
+# Register with gdb.
+pcpu()
diff --git a/sys/tools/gdb/selftest.py b/sys/tools/gdb/selftest.py
new file mode 100644
index 000000000000..41e9211c4bb3
--- /dev/null
+++ b/sys/tools/gdb/selftest.py
@@ -0,0 +1,31 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+
+cmds = ["acttrace",
+ "p $V(\"tcbinfo\")",
+ "p $V(\"tcbinfo\", vnet0)",
+ "p $V(\"pf_status\")",
+ "p $V(\"pf_status\", \"gdbselftest\")",
+ "p $PCPU(\"curthread\")",
+ "p $PCPU(\"curthread\", 0)",
+ "p/x $PCPU(\"hardclocktime\", 1)",
+ "p $PCPU(\"pqbatch\")[0][0]",
+ "p $PCPU(\"ss\", 1)",
+ ]
+
+for cmd in cmds:
+ try:
+ print(f"Running command: '{cmd}'")
+ gdb.execute(cmd)
+ except gdb.error as e:
+ print(f"Command '{cmd}' failed: {e}")
+ break
+
+# We didn't hit any unexpected errors. This isn't as good as actually
+# verifying the output, but it's better than nothing.
+print("Everything seems OK")
diff --git a/sys/tools/gdb/selftest.sh b/sys/tools/gdb/selftest.sh
new file mode 100644
index 000000000000..252fae14af17
--- /dev/null
+++ b/sys/tools/gdb/selftest.sh
@@ -0,0 +1,23 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+set -e
+
+n=$(sysctl -n hw.ncpu)
+if [ $n -lt 2 ]; then
+ echo "This test requires at least 2 CPUs"
+ exit 1
+fi
+
+# Set up some things expected by selftest.py.
+kldload -n pf siftr
+pfctl -e || true
+jail -c name=gdbselftest vnet persist
+
+echo "I'm about to panic your system, ctrl-C now if that's not what you want."
+sleep 10
+sysctl debug.debugger_on_panic=0
+sysctl debug.kdb.panic=1
diff --git a/sys/tools/gdb/vnet.py b/sys/tools/gdb/vnet.py
new file mode 100644
index 000000000000..36b4d512a3eb
--- /dev/null
+++ b/sys/tools/gdb/vnet.py
@@ -0,0 +1,100 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import gdb
+import traceback
+from freebsd import *
+
+class vnet(gdb.Function):
+ """
+ Register a function to look up VNET variables by name.
+
+ To look at the value of a VNET variable V_foo, print $V("foo"). The
+ currently selected thread's VNET is used by default, but can be optionally
+ specified as a second parameter, e.g., $V("foo", <vnet>), where <vnet> is a
+ pointer to a struct vnet (e.g., vnet0 or allprison.tqh_first->pr_vnet) or a
+ string naming a jail.
+ """
+ def __init__(self):
+ super(vnet, self).__init__("V")
+
+ def invoke(self, sym, vnet=None):
+ sym = sym.string()
+ if sym.startswith("V_"):
+ sym = sym[len("V_"):]
+ if gdb.lookup_symbol("sysctl___kern_features_vimage")[0] is None:
+ return symval(sym)
+
+ # Look up the VNET's base address.
+ if vnet is None:
+ vnet = tdfind(gdb.selected_thread().ptid[2])['td_vnet']
+ if not vnet:
+ # If curthread->td_vnet == NULL, vnet0 is the current vnet.
+ vnet = symval("vnet0")
+ elif vnet.type.is_string_like:
+ vnet = vnet.string()
+ for prison in tailq_foreach(symval("allprison"), "pr_list"):
+ if prison['pr_name'].string() == vnet:
+ vnet = prison['pr_vnet']
+ break
+ else:
+ raise gdb.error(f"No prison named {vnet}")
+
+ def uintptr_t(val):
+ return val.cast(gdb.lookup_type("uintptr_t"))
+
+ # Now the tricky part: compute the address of the symbol relative
+ # to the selected VNET. In the compiled kernel this is done at
+ # load time by applying a magic transformation to relocations
+ # against symbols in the vnet linker set. Here we have to apply
+ # the transformation manually.
+ vnet_data_base = vnet['vnet_data_base']
+ vnet_entry = symval("vnet_entry_" + sym)
+ vnet_entry_addr = uintptr_t(vnet_entry.address)
+
+ # First, which kernel module does the symbol belong to?
+ for lf in linker_file_foreach():
+ # Find the bounds of this linker file's VNET linker set. The
+ # struct containing the bounds depends on the type of the linker
+ # file, and unfortunately both are called elf_file_t. So we use a
+ # PC value from the compilation unit (either link_elf.c or
+ # link_elf_obj.c) to disambiguate.
+ block = gdb.block_for_pc(lf['ops']['cls']['methods'][0]['func'])
+ elf_file_t = gdb.lookup_type("elf_file_t", block).target()
+ ef = lf.cast(elf_file_t)
+
+ file_type = lf['ops']['cls']['name'].string()
+ if file_type == "elf64":
+ start = uintptr_t(ef['vnet_start'])
+ if start == 0:
+ # This linker file doesn't have a VNET linker set.
+ continue
+ end = uintptr_t(ef['vnet_stop'])
+ base = uintptr_t(ef['vnet_base'])
+ elif file_type == "elf64_obj":
+ for i in range(ef['nprogtab']):
+ pe = ef['progtab'][i]
+ if pe['name'].string() == "set_vnet":
+ start = uintptr_t(pe['origaddr'])
+ end = start + uintptr_t(pe['size'])
+ base = uintptr_t(pe['addr'])
+ break
+ else:
+ # This linker file doesn't have a VNET linker set.
+ continue
+ else:
+ path = lf['pathname'].string()
+ raise gdb.error(f"{path} has unexpected linker file type {file_type}")
+
+ if vnet_entry_addr >= start and vnet_entry_addr < end:
+ # The symbol belongs to this linker file, so compute the final
+ # address.
+ obj = gdb.Value(vnet_data_base + vnet_entry_addr - start + base)
+ return obj.cast(vnet_entry.type.pointer()).dereference()
+
+
+# Register with gdb.
+vnet()
diff --git a/sys/tools/kernel-gdb.py b/sys/tools/kernel-gdb.py
new file mode 100644
index 000000000000..8a41ef6efab1
--- /dev/null
+++ b/sys/tools/kernel-gdb.py
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2025 Mark Johnston <markj@FreeBSD.org>
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), "gdb"))
+
+# Import FreeBSD kernel debugging commands and modules below.
+import acttrace
+import pcpu
+import vnet
diff --git a/sys/tools/makeobjops.awk b/sys/tools/makeobjops.awk
index 5ea658c5a3b3..522fb04ec4d1 100644
--- a/sys/tools/makeobjops.awk
+++ b/sys/tools/makeobjops.awk
@@ -315,7 +315,7 @@ function handle_method (static, doc)
printh("\t" join(";\n\t", arguments, num_arguments) ";");
}
else {
- prototype = "static __inline " ret " " umname "(";
+ prototype = "static __inline " ret "\n" umname "(";
printh(format_line(prototype argument_list ")",
line_width, length(prototype)));
}
@@ -327,7 +327,7 @@ function handle_method (static, doc)
firstvar = "((kobj_t)" firstvar ")";
if (prolog != "")
printh(prolog);
- printh("\tKOBJOPLOOKUP(" firstvar "->ops," mname ");");
+ printh("\tKOBJOPLOOKUP(" firstvar "->ops, " mname ");");
rceq = (ret != "void") ? "rc = " : "";
printh("\t" rceq "((" mname "_t *) _m)(" varname_list ");");
if (epilog != "")
diff --git a/sys/tools/vnode_if.awk b/sys/tools/vnode_if.awk
index 74b11e6cb27d..8e39cc2da3da 100644
--- a/sys/tools/vnode_if.awk
+++ b/sys/tools/vnode_if.awk
@@ -324,6 +324,10 @@ while ((getline < srcfile) > 0) {
printh("extern struct vnodeop_desc " name "_desc;");
printh("");
+ printh("SDT_PROBE_DECLARE(vfs, vop, " name ", entry);\n");
+ printh("SDT_PROBE_DECLARE(vfs, vop, " name ", return);\n");
+ printh("");
+
# Print out function prototypes.
printh("int " uname "_AP(struct " name "_args *);");
printh("int " uname "_APV(const struct vop_vector *vop, struct " name "_args *);");
@@ -341,10 +345,11 @@ while ((getline < srcfile) > 0) {
printh("\ta.a_" args[i] " = " args[i] ";");
if (can_inline(name)) {
printh("\n#if !defined(INVARIANTS) && !defined(KTR)");
- printh("\tif (!SDT_PROBES_ENABLED())");
- printh("\t\treturn (" args[0]"->v_op->"name"(&a));");
- printh("\telse");
- printh("\t\treturn (" uname "_APV("args[0]"->v_op, &a));");
+ printh("\tint rc;")
+ printh("\tSDT_PROBE2(vfs, vop, " name ", entry, a.a_" args[0] ", &a);");
+ printh("\trc = " args[0]"->v_op->"name"(&a);");
+ printh("\tSDT_PROBE3(vfs, vop, " name ", return, a.a_" args[0] ", &a, rc);");
+ printh("\treturn (rc);")
printh("#else");
}
printh("\treturn (" uname "_APV("args[0]"->v_op, &a));");
diff --git a/sys/ufs/ffs/ffs_inode.c b/sys/ufs/ffs/ffs_inode.c
index 970536a13aa5..f47cfd08f75a 100644
--- a/sys/ufs/ffs/ffs_inode.c
+++ b/sys/ufs/ffs/ffs_inode.c
@@ -653,8 +653,8 @@ done:
for (i = 0; i < UFS_NDADDR; i++)
if (newblks[i] != DIP(ip, i_db[i]))
panic("ffs_truncate2: blkno %d newblks %jd != i_db %jd",
- i, (intmax_t)newblks[UFS_NDADDR + level],
- (intmax_t)DIP(ip, i_ib[level]));
+ i, (intmax_t)newblks[i],
+ (intmax_t)DIP(ip, i_db[i]));
BO_LOCK(bo);
if (length == 0 &&
(fs->fs_magic != FS_UFS2_MAGIC || ip->i_din2->di_extsize == 0) &&
diff --git a/sys/ufs/ffs/ffs_rawread.c b/sys/ufs/ffs/ffs_rawread.c
index 9db0bee0d66d..ef0b2ff4f788 100644
--- a/sys/ufs/ffs/ffs_rawread.c
+++ b/sys/ufs/ffs/ffs_rawread.c
@@ -281,7 +281,7 @@ ffs_rawread_main(struct vnode *vp,
if (error != 0)
break;
- if (resid > bp->b_bufsize) { /* Setup fist readahead */
+ if (resid > bp->b_bufsize) { /* Setup first readahead */
if (rawreadahead != 0)
nbp = uma_zalloc(ffsraw_pbuf_zone,
M_NOWAIT);
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index b5212ce5875f..297c8257bd22 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -2148,7 +2148,7 @@ retry_flush:
#ifdef QUOTA
UFS_LOCK(ump);
for (i = 0; i < MAXQUOTAS; i++) {
- if (ump->um_quotas[i] != NULLVP)
+ if (ump->um_quotas[i] != NULL)
morework = 1;
}
UFS_UNLOCK(ump);
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index 75f5fe716c31..de6b32795698 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -1112,7 +1112,7 @@ ffs_mountfs(struct vnode *odevvp, struct mount *mp, struct thread *td)
ump->um_bptrtodb = fs->fs_fsbtodb;
ump->um_seqinc = fs->fs_frag;
for (i = 0; i < MAXQUOTAS; i++)
- ump->um_quotas[i] = NULLVP;
+ ump->um_quotas[i] = NULL;
#ifdef UFS_EXTATTR
ufs_extattr_uepm_init(&ump->um_extattr);
#endif
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index 897a21032907..c7e2b3f4b8e6 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -1550,7 +1550,7 @@ ffs_openextattr(
} */ *ap)
{
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
@@ -1572,7 +1572,7 @@ ffs_closeextattr(
struct vnode *vp;
vp = ap->a_vp;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (ap->a_commit && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0)
return (EROFS);
@@ -1610,7 +1610,7 @@ ffs_deleteextattr(
vp = ap->a_vp;
ip = VTOI(vp);
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (strlen(ap->a_name) == 0)
return (EINVAL);
@@ -1688,7 +1688,7 @@ ffs_getextattr(
ip = VTOI(ap->a_vp);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1738,7 +1738,7 @@ ffs_listextattr(
ip = VTOI(ap->a_vp);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1803,7 +1803,7 @@ ffs_setextattr(
ip = VTOI(vp);
fs = ITOFS(ip);
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (strlen(ap->a_name) == 0)
return (EINVAL);
diff --git a/sys/ufs/ufs/ufs_lookup.c b/sys/ufs/ufs/ufs_lookup.c
index 9221aa63184d..419ec3de8572 100644
--- a/sys/ufs/ufs/ufs_lookup.c
+++ b/sys/ufs/ufs/ufs_lookup.c
@@ -597,7 +597,7 @@ found:
return (error);
}
if (dp->i_number == ino) {
- VREF(vdp);
+ vref(vdp);
*vpp = vdp;
vput(tdp);
return (0);
@@ -707,7 +707,7 @@ found:
*vpp = tdp;
} else if (dp->i_number == ino) {
- VREF(vdp); /* we want ourself, ie "." */
+ vref(vdp); /* we want ourself, ie "." */
/*
* When we lookup "." we still can be asked to lock it
* differently.
diff --git a/sys/ufs/ufs/ufs_quota.c b/sys/ufs/ufs/ufs_quota.c
index ac125902b74b..55c4bf98e502 100644
--- a/sys/ufs/ufs/ufs_quota.c
+++ b/sys/ufs/ufs/ufs_quota.c
@@ -467,7 +467,7 @@ chkdquot(struct inode *ip)
UFS_LOCK(ump);
for (i = 0; i < MAXQUOTAS; i++) {
- if (ump->um_quotas[i] == NULLVP ||
+ if (ump->um_quotas[i] == NULL ||
(ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
continue;
if (ip->i_dquot[i] == NODQUOT) {
@@ -594,12 +594,12 @@ quotaon(struct thread *td, struct mount *mp, int type, void *fname,
ump->um_cred[type] = crhold(td->td_ucred);
ump->um_btime[type] = MAX_DQ_TIME;
ump->um_itime[type] = MAX_IQ_TIME;
- if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
+ if (dqget(NULL, 0, ump, type, &dq) == 0) {
if (dq->dq_btime > 0)
ump->um_btime[type] = dq->dq_btime;
if (dq->dq_itime > 0)
ump->um_itime[type] = dq->dq_itime;
- dqrele(NULLVP, dq);
+ dqrele(NULL, dq);
}
/*
* Search vnodes associated with this mount point,
@@ -655,7 +655,7 @@ quotaoff1(struct thread *td, struct mount *mp, int type)
UFS_LOCK(ump);
KASSERT((ump->um_qflags[type] & QTF_CLOSING) != 0,
("quotaoff1: flags are invalid"));
- if ((qvp = ump->um_quotas[type]) == NULLVP) {
+ if ((qvp = ump->um_quotas[type]) == NULL) {
UFS_UNLOCK(ump);
return (0);
}
@@ -692,7 +692,7 @@ again:
* access to the closed vnode from dqget/dqsync
*/
UFS_LOCK(ump);
- ump->um_quotas[type] = NULLVP;
+ ump->um_quotas[type] = NULL;
ump->um_cred[type] = NOCRED;
UFS_UNLOCK(ump);
@@ -750,7 +750,7 @@ quotaoff_inchange(struct thread *td, struct mount *mp, int type)
UFS_LOCK(ump);
ump->um_qflags[type] &= ~QTF_CLOSING;
for (i = 0; i < MAXQUOTAS; i++)
- if (ump->um_quotas[i] != NULLVP)
+ if (ump->um_quotas[i] != NULL)
break;
if (i == MAXQUOTAS) {
MNT_ILOCK(mp);
@@ -820,11 +820,11 @@ _getquota(struct thread *td, struct mount *mp, uint64_t id, int type,
}
dq = NODQUOT;
- error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq);
+ error = dqget(NULL, id, VFSTOUFS(mp), type, &dq);
if (error)
return (error);
*dqb = dq->dq_dqb;
- dqrele(NULLVP, dq);
+ dqrele(NULL, dq);
return (error);
}
@@ -850,7 +850,7 @@ _setquota(struct thread *td, struct mount *mp, uint64_t id, int type,
ndq = NODQUOT;
ump = VFSTOUFS(mp);
- error = dqget(NULLVP, id, ump, type, &ndq);
+ error = dqget(NULL, id, ump, type, &ndq);
if (error)
return (error);
dq = ndq;
@@ -887,7 +887,7 @@ _setquota(struct thread *td, struct mount *mp, uint64_t id, int type,
dq->dq_flags &= ~DQ_FAKE;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
- dqrele(NULLVP, dq);
+ dqrele(NULL, dq);
return (0);
}
@@ -913,7 +913,7 @@ _setuse(struct thread *td, struct mount *mp, uint64_t id, int type,
ump = VFSTOUFS(mp);
ndq = NODQUOT;
- error = dqget(NULLVP, id, ump, type, &ndq);
+ error = dqget(NULL, id, ump, type, &ndq);
if (error)
return (error);
dq = ndq;
@@ -937,7 +937,7 @@ _setuse(struct thread *td, struct mount *mp, uint64_t id, int type,
dq->dq_flags &= ~DQ_INODS;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
- dqrele(NULLVP, dq);
+ dqrele(NULL, dq);
return (0);
}
@@ -1038,7 +1038,7 @@ getquotasize(struct thread *td, struct mount *mp, uint64_t id, int type,
int bitsize;
UFS_LOCK(ump);
- if (ump->um_quotas[type] == NULLVP ||
+ if (ump->um_quotas[type] == NULL ||
(ump->um_qflags[type] & QTF_CLOSING)) {
UFS_UNLOCK(ump);
return (EINVAL);
@@ -1067,7 +1067,7 @@ qsync(struct mount *mp)
* If not, simply return.
*/
for (i = 0; i < MAXQUOTAS; i++)
- if (ump->um_quotas[i] != NULLVP)
+ if (ump->um_quotas[i] != NULL)
break;
if (i == MAXQUOTAS)
return (0);
@@ -1114,7 +1114,7 @@ qsyncvp(struct vnode *vp)
* If not, simply return.
*/
for (i = 0; i < MAXQUOTAS; i++)
- if (ump->um_quotas[i] != NULLVP)
+ if (ump->um_quotas[i] != NULL)
break;
if (i == MAXQUOTAS)
return (0);
@@ -1278,10 +1278,10 @@ dqget(struct vnode *vp, uint64_t id, struct ufsmount *ump, int type,
struct uio auio;
int dqvplocked, error;
- if (vp != NULLVP)
+ if (vp != NULL)
ASSERT_VOP_ELOCKED(vp, "dqget");
- if (vp != NULLVP && *dqp != NODQUOT) {
+ if (vp != NULL && *dqp != NODQUOT) {
return (0);
}
@@ -1293,7 +1293,7 @@ dqget(struct vnode *vp, uint64_t id, struct ufsmount *ump, int type,
UFS_LOCK(ump);
dqvp = ump->um_quotas[type];
- if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
+ if (dqvp == NULL || (ump->um_qflags[type] & QTF_CLOSING)) {
*dqp = NODQUOT;
UFS_UNLOCK(ump);
return (EINVAL);
@@ -1561,7 +1561,7 @@ dqsync(struct vnode *vp, struct dquot *dq)
if ((ump = dq->dq_ump) == NULL)
return (0);
UFS_LOCK(ump);
- if ((dqvp = ump->um_quotas[dq->dq_type]) == NULLVP) {
+ if ((dqvp = ump->um_quotas[dq->dq_type]) == NULL) {
if (vp == NULL) {
UFS_UNLOCK(ump);
return (0);
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index ffc993aef9fc..736c5a66267e 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -156,30 +156,30 @@ ufs_itimes_locked(struct vnode *vp)
if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) == 0)
return;
- if ((vp->v_type == VBLK || vp->v_type == VCHR) && !DOINGSOFTDEP(vp))
+ if (VN_ISDEV(vp) && !DOINGSOFTDEP(vp))
UFS_INODE_SET_FLAG(ip, IN_LAZYMOD);
else if (((vp->v_mount->mnt_kern_flag &
- (MNTK_SUSPENDED | MNTK_SUSPEND)) == 0) ||
- (ip->i_flag & (IN_CHANGE | IN_UPDATE)))
+ (MNTK_SUSPENDED | MNTK_SUSPEND)) == 0) ||
+ (ip->i_flag & (IN_CHANGE | IN_UPDATE)) != 0)
UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
- else if (ip->i_flag & IN_ACCESS)
+ else if ((ip->i_flag & IN_ACCESS) != 0)
UFS_INODE_SET_FLAG(ip, IN_LAZYACCESS);
vfs_timestamp(&ts);
- if (ip->i_flag & IN_ACCESS) {
+ if ((ip->i_flag & IN_ACCESS) != 0) {
DIP_SET(ip, i_atime, ts.tv_sec);
DIP_SET(ip, i_atimensec, ts.tv_nsec);
}
- if (ip->i_flag & IN_UPDATE) {
+ if ((ip->i_flag & IN_UPDATE) != 0) {
DIP_SET(ip, i_mtime, ts.tv_sec);
DIP_SET(ip, i_mtimensec, ts.tv_nsec);
}
- if (ip->i_flag & IN_CHANGE) {
+ if ((ip->i_flag & IN_CHANGE) != 0) {
DIP_SET(ip, i_ctime, ts.tv_sec);
DIP_SET(ip, i_ctimensec, ts.tv_nsec);
DIP_SET(ip, i_modrev, DIP(ip, i_modrev) + 1);
}
- out:
+out:
ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE);
}
@@ -319,7 +319,7 @@ ufs_open(struct vop_open_args *ap)
struct vnode *vp = ap->a_vp;
struct inode *ip;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
ip = VTOI(vp);
@@ -540,7 +540,7 @@ ufs_stat(struct vop_stat_args *ap)
sb->st_uid = ip->i_uid;
sb->st_gid = ip->i_gid;
if (I_IS_UFS1(ip)) {
- sb->st_rdev = ip->i_din1->di_rdev;
+ sb->st_rdev = VN_ISDEV(vp) ? ip->i_din1->di_rdev : NODEV;
sb->st_size = ip->i_din1->di_size;
sb->st_mtim.tv_sec = ip->i_din1->di_mtime;
sb->st_mtim.tv_nsec = ip->i_din1->di_mtimensec;
@@ -551,7 +551,7 @@ ufs_stat(struct vop_stat_args *ap)
sb->st_blocks = dbtob((uint64_t)ip->i_din1->di_blocks) / S_BLKSIZE;
sb->st_filerev = ip->i_din1->di_modrev;
} else {
- sb->st_rdev = ip->i_din2->di_rdev;
+ sb->st_rdev = VN_ISDEV(vp) ? ip->i_din2->di_rdev : NODEV;
sb->st_size = ip->i_din2->di_size;
sb->st_mtim.tv_sec = ip->i_din2->di_mtime;
sb->st_mtim.tv_nsec = ip->i_din2->di_mtimensec;
@@ -603,7 +603,7 @@ ufs_getattr(
vap->va_uid = ip->i_uid;
vap->va_gid = ip->i_gid;
if (I_IS_UFS1(ip)) {
- vap->va_rdev = ip->i_din1->di_rdev;
+ vap->va_rdev = VN_ISDEV(vp) ? ip->i_din1->di_rdev : NODEV;
vap->va_size = ip->i_din1->di_size;
vap->va_mtime.tv_sec = ip->i_din1->di_mtime;
vap->va_mtime.tv_nsec = ip->i_din1->di_mtimensec;
@@ -612,7 +612,7 @@ ufs_getattr(
vap->va_bytes = dbtob((uint64_t)ip->i_din1->di_blocks);
vap->va_filerev = ip->i_din1->di_modrev;
} else {
- vap->va_rdev = ip->i_din2->di_rdev;
+ vap->va_rdev = VN_ISDEV(vp) ? ip->i_din2->di_rdev : NODEV;
vap->va_size = ip->i_din2->di_size;
vap->va_mtime.tv_sec = ip->i_din2->di_mtime;
vap->va_mtime.tv_nsec = ip->i_din2->di_mtimensec;
@@ -2592,8 +2592,12 @@ ufs_print(
printf("\tnlink=%d, effnlink=%d, size=%jd", ip->i_nlink,
ip->i_effnlink, (intmax_t)ip->i_size);
- if (I_IS_UFS2(ip))
- printf(", extsize %d", ip->i_din2->di_extsize);
+ if (I_IS_UFS2(ip)) {
+ if (ip->i_din2 == NULL)
+ printf(", dinode=NULL (fields omitted)");
+ else
+ printf(", extsize=%d", ip->i_din2->di_extsize);
+ }
printf("\n\tgeneration=%jx, uid=%d, gid=%d, flags=0x%b\n",
(uintmax_t)ip->i_gen, ip->i_uid, ip->i_gid,
(uint32_t)ip->i_flags, PRINT_INODE_FLAGS);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 5189f7405400..b80b5cc781f7 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -4009,20 +4009,15 @@ restart:
/*
* Use the keg's policy if upper layers haven't already specified a
* domain (as happens with first-touch zones).
- *
- * To avoid races we run the iterator with the keg lock held, but that
- * means that we cannot allow the vm_domainset layer to sleep. Thus,
- * clear M_WAITOK and handle low memory conditions locally.
*/
rr = rdomain == UMA_ANYDOMAIN;
+ aflags = flags;
if (rr) {
- aflags = (flags & ~M_WAITOK) | M_NOWAIT;
- vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
- &aflags);
- } else {
- aflags = flags;
+ if (vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
+ &aflags) != 0)
+ return (NULL);
+ } else
domain = rdomain;
- }
for (;;) {
slab = keg_fetch_free_slab(keg, domain, rr, flags);
@@ -4052,13 +4047,8 @@ restart:
if ((flags & M_WAITOK) == 0)
break;
vm_wait_domain(domain);
- } else if (vm_domainset_iter_policy(&di, &domain) != 0) {
- if ((flags & M_WAITOK) != 0) {
- vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
- goto restart;
- }
+ } else if (vm_domainset_iter_policy(&di, &domain) != 0)
break;
- }
}
/*
@@ -5244,9 +5234,10 @@ uma_prealloc(uma_zone_t zone, int items)
KEG_GET(zone, keg);
slabs = howmany(items, keg->uk_ipers);
while (slabs-- > 0) {
- aflags = M_NOWAIT;
- vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
- &aflags);
+ aflags = M_WAITOK;
+ if (vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
+ &aflags) != 0)
+ panic("%s: Domainset is empty", __func__);
for (;;) {
slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
aflags);
@@ -5264,7 +5255,8 @@ uma_prealloc(uma_zone_t zone, int items)
break;
}
if (vm_domainset_iter_policy(&di, &domain) != 0)
- vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
+ panic("%s: Cannot allocate from any domain",
+ __func__);
}
}
}
diff --git a/sys/vm/vm_domainset.c b/sys/vm/vm_domainset.c
index b44bdb96b0d4..9fa17da954f7 100644
--- a/sys/vm/vm_domainset.c
+++ b/sys/vm/vm_domainset.c
@@ -58,6 +58,9 @@
static int vm_domainset_default_stride = 64;
+static bool vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain);
+
+
/*
* Determine which policy is to be used for this allocation.
*/
@@ -93,28 +96,15 @@ vm_domainset_iter_init(struct vm_domainset_iter *di, struct domainset *ds,
pindex += (((uintptr_t)obj) / sizeof(*obj));
di->di_offset = pindex;
}
- /* Skip domains below min on the first pass. */
- di->di_minskip = true;
}
static void
vm_domainset_iter_rr(struct vm_domainset_iter *di, int *domain)
{
+ /* Grab the next domain in 'ds_order'. */
*domain = di->di_domain->ds_order[
- ++(*di->di_iter) % di->di_domain->ds_cnt];
-}
-
-static void
-vm_domainset_iter_prefer(struct vm_domainset_iter *di, int *domain)
-{
- int d;
-
- do {
- d = di->di_domain->ds_order[
- ++(*di->di_iter) % di->di_domain->ds_cnt];
- } while (d == di->di_domain->ds_prefer);
- *domain = d;
+ (*di->di_iter)++ % di->di_domain->ds_cnt];
}
static void
@@ -127,79 +117,144 @@ vm_domainset_iter_interleave(struct vm_domainset_iter *di, int *domain)
*domain = di->di_domain->ds_order[d];
}
-static void
-vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain)
+/*
+ * Internal function determining the current phase's first candidate domain.
+ *
+ * Returns whether these is an eligible domain, which is returned through
+ * '*domain'. '*domain' can be modified even if there is no eligible domain.
+ *
+ * See herald comment of vm_domainset_iter_first() below about phases.
+ */
+static bool
+vm_domainset_iter_phase_first(struct vm_domainset_iter *di, int *domain)
{
-
- KASSERT(di->di_n > 0, ("%s: Invalid n %d", __func__, di->di_n));
switch (di->di_policy) {
case DOMAINSET_POLICY_FIRSTTOUCH:
- /*
- * To prevent impossible allocations we convert an invalid
- * first-touch to round-robin.
- */
- /* FALLTHROUGH */
- case DOMAINSET_POLICY_INTERLEAVE:
- /* FALLTHROUGH */
+ *domain = PCPU_GET(domain);
+ break;
case DOMAINSET_POLICY_ROUNDROBIN:
vm_domainset_iter_rr(di, domain);
break;
case DOMAINSET_POLICY_PREFER:
- vm_domainset_iter_prefer(di, domain);
+ *domain = di->di_domain->ds_prefer;
+ break;
+ case DOMAINSET_POLICY_INTERLEAVE:
+ vm_domainset_iter_interleave(di, domain);
break;
default:
panic("%s: Unknown policy %d", __func__, di->di_policy);
}
KASSERT(*domain < vm_ndomains,
("%s: Invalid domain %d", __func__, *domain));
+
+ /*
+ * Has the policy's start domain already been visited?
+ */
+ if (!DOMAINSET_ISSET(*domain, &di->di_remain_mask))
+ return (vm_domainset_iter_next(di, domain));
+
+ DOMAINSET_CLR(*domain, &di->di_remain_mask);
+
+ /* Does it have enough free pages (phase 1)? */
+ if (di->di_minskip && vm_page_count_min_domain(*domain)) {
+ /* Mark the domain as eligible for phase 2. */
+ DOMAINSET_SET(*domain, &di->di_min_mask);
+ return (vm_domainset_iter_next(di, domain));
+ }
+
+ return (true);
}
-static void
+/*
+ * Resets an iterator to point to the first candidate domain.
+ *
+ * Returns whether there is an eligible domain to start with. '*domain' may be
+ * modified even if there is none.
+ *
+ * There must have been one call to vm_domainset_iter_init() before.
+ *
+ * This function must be called at least once before calling
+ * vm_domainset_iter_next(). Note that functions wrapping
+ * vm_domainset_iter_init() usually do that themselves.
+ *
+ * This function may be called again to reset the iterator to the policy's first
+ * candidate domain. After each reset, the iterator will visit the same domains
+ * as in the previous iteration minus those on which vm_domainset_iter_ignore()
+ * has been called. Note that the first candidate domain may change at each
+ * reset (at time of this writing, only on the DOMAINSET_POLICY_ROUNDROBIN
+ * policy).
+ *
+ * Domains which have a number of free pages over 'v_free_min' are always
+ * visited first (this is called the "phase 1" in comments, "phase 2" being the
+ * examination of the remaining domains; no domains are ever visited twice).
+ */
+static bool
vm_domainset_iter_first(struct vm_domainset_iter *di, int *domain)
{
+ /* Initialize the mask of domains to visit. */
+ DOMAINSET_COPY(&di->di_valid_mask, &di->di_remain_mask);
+ /*
+ * No candidate domains for phase 2 at start. This will be filled by
+ * phase 1.
+ */
+ DOMAINSET_ZERO(&di->di_min_mask);
+ /* Skip domains below 'v_free_min' on phase 1. */
+ di->di_minskip = true;
- switch (di->di_policy) {
- case DOMAINSET_POLICY_FIRSTTOUCH:
- *domain = PCPU_GET(domain);
- if (DOMAINSET_ISSET(*domain, &di->di_valid_mask)) {
- /*
- * Add an extra iteration because we will visit the
- * current domain a second time in the rr iterator.
- */
- di->di_n = di->di_domain->ds_cnt + 1;
- break;
- }
- /*
- * To prevent impossible allocations we convert an invalid
- * first-touch to round-robin.
- */
- /* FALLTHROUGH */
- case DOMAINSET_POLICY_ROUNDROBIN:
- di->di_n = di->di_domain->ds_cnt;
+ return (vm_domainset_iter_phase_first(di, domain));
+}
+
+/*
+ * Advances the iterator to the next candidate domain.
+ *
+ * Returns whether there was another domain to visit. '*domain' may be modified
+ * even if there is none.
+ *
+ * vm_domainset_iter_first() must have been called at least once before using
+ * this function (see its herald comment for more details on iterators).
+ */
+static bool
+vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain)
+{
+ /* Loop while there remains domains to visit in the current phase. */
+ while (!DOMAINSET_EMPTY(&di->di_remain_mask)) {
+ /* Grab the next domain in 'ds_order'. */
vm_domainset_iter_rr(di, domain);
- break;
- case DOMAINSET_POLICY_PREFER:
- *domain = di->di_domain->ds_prefer;
- di->di_n = di->di_domain->ds_cnt;
- break;
- case DOMAINSET_POLICY_INTERLEAVE:
- vm_domainset_iter_interleave(di, domain);
- di->di_n = di->di_domain->ds_cnt;
- break;
- default:
- panic("%s: Unknown policy %d", __func__, di->di_policy);
+ KASSERT(*domain < vm_ndomains,
+ ("%s: Invalid domain %d", __func__, *domain));
+
+ if (DOMAINSET_ISSET(*domain, &di->di_remain_mask)) {
+ DOMAINSET_CLR(*domain, &di->di_remain_mask);
+ if (!di->di_minskip || !vm_page_count_min_domain(*domain))
+ return (true);
+ DOMAINSET_SET(*domain, &di->di_min_mask);
+ }
}
- KASSERT(di->di_n > 0, ("%s: Invalid n %d", __func__, di->di_n));
- KASSERT(*domain < vm_ndomains,
- ("%s: Invalid domain %d", __func__, *domain));
+
+ /*
+ * If phase 1 (skip low memory domains) is over, start phase 2 (consider
+ * low memory domains).
+ */
+ if (di->di_minskip) {
+ di->di_minskip = false;
+ /* Browse domains that were under 'v_free_min'. */
+ DOMAINSET_COPY(&di->di_min_mask, &di->di_remain_mask);
+ return (vm_domainset_iter_phase_first(di, domain));
+ }
+
+ return (false);
}
-void
+int
vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj,
- vm_pindex_t pindex, int *domain, int *req, struct pctrie_iter *pages)
+ vm_pindex_t pindex, int *domain, int *req)
{
struct domainset_ref *dr;
+ di->di_flags = *req;
+ *req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) |
+ VM_ALLOC_NOWAIT;
+
/*
* Object policy takes precedence over thread policy. The policies
* are immutable and unsynchronized. Updates can race but pointer
@@ -209,36 +264,21 @@ vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj,
dr = &obj->domain;
else
dr = &curthread->td_domain;
+
vm_domainset_iter_init(di, dr->dr_policy, &dr->dr_iter, obj, pindex);
- di->di_flags = *req;
- *req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) |
- VM_ALLOC_NOWAIT;
- vm_domainset_iter_first(di, domain);
- if (vm_page_count_min_domain(*domain))
- vm_domainset_iter_page(di, obj, domain, pages);
+ /*
+ * XXXOC: Shouldn't we just panic on 'false' if VM_ALLOC_WAITOK was
+ * passed?
+ */
+ return (vm_domainset_iter_first(di, domain) ? 0 : ENOMEM);
}
int
vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj,
int *domain, struct pctrie_iter *pages)
{
- if (__predict_false(DOMAINSET_EMPTY(&di->di_valid_mask)))
- return (ENOMEM);
-
- /* If there are more domains to visit we run the iterator. */
- while (--di->di_n != 0) {
- vm_domainset_iter_next(di, domain);
- if (DOMAINSET_ISSET(*domain, &di->di_valid_mask) &&
- (!di->di_minskip || !vm_page_count_min_domain(*domain)))
- return (0);
- }
-
- /* If we skipped domains below min restart the search. */
- if (di->di_minskip) {
- di->di_minskip = false;
- vm_domainset_iter_first(di, domain);
+ if (vm_domainset_iter_next(di, domain))
return (0);
- }
/* If we visited all domains and this was a NOWAIT we return error. */
if ((di->di_flags & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) == 0)
@@ -257,61 +297,43 @@ vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj,
return (ENOMEM);
/* Restart the search. */
- vm_domainset_iter_first(di, domain);
-
- return (0);
+ /* XXXOC: Shouldn't we just panic on 'false'? */
+ return (vm_domainset_iter_first(di, domain) ? 0 : ENOMEM);
}
-static void
+static int
_vm_domainset_iter_policy_init(struct vm_domainset_iter *di, int *domain,
int *flags)
{
-
di->di_flags = *flags;
*flags = (di->di_flags & ~M_WAITOK) | M_NOWAIT;
- vm_domainset_iter_first(di, domain);
- if (vm_page_count_min_domain(*domain))
- vm_domainset_iter_policy(di, domain);
+ /* XXXOC: Shouldn't we just panic on 'false' if M_WAITOK was passed? */
+ return (vm_domainset_iter_first(di, domain) ? 0 : ENOMEM);
}
-void
+int
vm_domainset_iter_policy_init(struct vm_domainset_iter *di,
struct domainset *ds, int *domain, int *flags)
{
vm_domainset_iter_init(di, ds, &curthread->td_domain.dr_iter, NULL, 0);
- _vm_domainset_iter_policy_init(di, domain, flags);
+ return (_vm_domainset_iter_policy_init(di, domain, flags));
}
-void
+int
vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *di,
struct domainset_ref *dr, int *domain, int *flags)
{
vm_domainset_iter_init(di, dr->dr_policy, &dr->dr_iter, NULL, 0);
- _vm_domainset_iter_policy_init(di, domain, flags);
+ return (_vm_domainset_iter_policy_init(di, domain, flags));
}
int
vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain)
{
- if (DOMAINSET_EMPTY(&di->di_valid_mask))
- return (ENOMEM);
-
- /* If there are more domains to visit we run the iterator. */
- while (--di->di_n != 0) {
- vm_domainset_iter_next(di, domain);
- if (DOMAINSET_ISSET(*domain, &di->di_valid_mask) &&
- (!di->di_minskip || !vm_page_count_min_domain(*domain)))
- return (0);
- }
-
- /* If we skipped domains below min restart the search. */
- if (di->di_minskip) {
- di->di_minskip = false;
- vm_domainset_iter_first(di, domain);
+ if (vm_domainset_iter_next(di, domain))
return (0);
- }
/* If we visited all domains and this was a NOWAIT we return error. */
if ((di->di_flags & M_WAITOK) == 0)
@@ -321,9 +343,8 @@ vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain)
vm_wait_doms(&di->di_valid_mask, 0);
/* Restart the search. */
- vm_domainset_iter_first(di, domain);
-
- return (0);
+ /* XXXOC: Shouldn't we just panic on 'false'? */
+ return (vm_domainset_iter_first(di, domain) ? 0 : ENOMEM);
}
void
@@ -345,12 +366,12 @@ vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj,
return (EJUSTRETURN);
}
-void
+int
vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj,
- vm_pindex_t pindex, int *domain, int *flags, struct pctrie_iter *pages)
+ vm_pindex_t pindex, int *domain, int *flags)
{
-
*domain = 0;
+ return (0);
}
int
@@ -360,20 +381,20 @@ vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain)
return (EJUSTRETURN);
}
-void
+int
vm_domainset_iter_policy_init(struct vm_domainset_iter *di,
struct domainset *ds, int *domain, int *flags)
{
-
*domain = 0;
+ return (0);
}
-void
+int
vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *di,
struct domainset_ref *dr, int *domain, int *flags)
{
-
*domain = 0;
+ return (0);
}
void
diff --git a/sys/vm/vm_domainset.h b/sys/vm/vm_domainset.h
index 0d325a642f40..ef86c8ccb5e4 100644
--- a/sys/vm/vm_domainset.h
+++ b/sys/vm/vm_domainset.h
@@ -33,23 +33,26 @@ struct pctrie_iter;
struct vm_domainset_iter {
struct domainset *di_domain;
unsigned int *di_iter;
+ /* Initialized from 'di_domain', initial value after reset. */
domainset_t di_valid_mask;
+ /* Domains to browse in the current phase. */
+ domainset_t di_remain_mask;
+ /* Domains skipped in phase 1 because under 'v_free_min'. */
+ domainset_t di_min_mask;
vm_pindex_t di_offset;
int di_flags;
uint16_t di_policy;
- domainid_t di_n;
bool di_minskip;
};
int vm_domainset_iter_page(struct vm_domainset_iter *, struct vm_object *,
int *, struct pctrie_iter *);
-void vm_domainset_iter_page_init(struct vm_domainset_iter *,
- struct vm_object *, vm_pindex_t, int *, int *,
- struct pctrie_iter *);
+int vm_domainset_iter_page_init(struct vm_domainset_iter *,
+ struct vm_object *, vm_pindex_t, int *, int *);
int vm_domainset_iter_policy(struct vm_domainset_iter *, int *);
-void vm_domainset_iter_policy_init(struct vm_domainset_iter *,
+int vm_domainset_iter_policy_init(struct vm_domainset_iter *,
struct domainset *, int *, int *);
-void vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *,
+int vm_domainset_iter_policy_ref_init(struct vm_domainset_iter *,
struct domainset_ref *, int *, int *);
void vm_domainset_iter_ignore(struct vm_domainset_iter *, int);
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 93ec6014c27d..1fd6518cf4ed 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -91,6 +91,8 @@ void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
vm_ooffset_t *);
int vm_fault_disable_pagefaults(void);
void vm_fault_enable_pagefaults(int save);
+int vm_fault_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
+ vm_prot_t prot, vm_page_t *ma, int max_count, int *ppages_count);
int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
vm_prot_t prot, vm_page_t *ma, int max_count);
int vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 3e57e8d4f1d0..2e150b368d71 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -71,11 +71,9 @@
* Page fault handling module.
*/
-#include <sys/cdefs.h>
#include "opt_ktrace.h"
#include "opt_vm.h"
-#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
@@ -204,7 +202,10 @@ vm_fault_page_release(vm_page_t *mp)
* pageout while optimizing fault restarts.
*/
vm_page_deactivate(m);
- vm_page_xunbusy(m);
+ if (vm_page_xbusied(m))
+ vm_page_xunbusy(m);
+ else
+ vm_page_sunbusy(m);
*mp = NULL;
}
}
@@ -260,6 +261,12 @@ vm_fault_unlock_vp(struct faultstate *fs)
}
}
+static bool
+vm_fault_might_be_cow(struct faultstate *fs)
+{
+ return (fs->object != fs->first_object);
+}
+
static void
vm_fault_deallocate(struct faultstate *fs)
{
@@ -267,7 +274,7 @@ vm_fault_deallocate(struct faultstate *fs)
vm_fault_page_release(&fs->m_cow);
vm_fault_page_release(&fs->m);
vm_object_pip_wakeup(fs->object);
- if (fs->object != fs->first_object) {
+ if (vm_fault_might_be_cow(fs)) {
VM_OBJECT_WLOCK(fs->first_object);
vm_fault_page_free(&fs->first_m);
VM_OBJECT_WUNLOCK(fs->first_object);
@@ -329,6 +336,13 @@ vm_fault_dirty(struct faultstate *fs, vm_page_t m)
}
+static bool
+vm_fault_is_read(const struct faultstate *fs)
+{
+ return ((fs->prot & VM_PROT_WRITE) == 0 &&
+ (fs->fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) == 0);
+}
+
/*
* Unlocks fs.first_object and fs.map on success.
*/
@@ -694,21 +708,18 @@ _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT");
#endif
/*
- * vm_fault_trap:
- *
- * Handle a page fault occurring at the given address,
- * requiring the given permissions, in the map specified.
- * If successful, the page is inserted into the
- * associated physical map.
+ * vm_fault_trap:
*
- * NOTE: the given address should be truncated to the
- * proper page address.
+ * Helper for the page fault trap handlers, wrapping vm_fault().
+ * Issues ktrace(2) tracepoints for the faults.
*
- * KERN_SUCCESS is returned if the page fault is handled; otherwise,
- * a standard error specifying why the fault is fatal is returned.
+ * If a fault cannot be handled successfully by satisfying the
+ * required mapping, and the faulted instruction cannot be restarted,
+ * the signal number and si_code values are returned for trapsignal()
+ * to deliver.
*
- * The map in question must be referenced, and remains so.
- * Caller may hold no locks.
+ * Returns Mach error codes, but callers should only check for
+ * KERN_SUCCESS.
*/
int
vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
@@ -1002,12 +1013,22 @@ vm_fault_relookup(struct faultstate *fs)
return (KERN_SUCCESS);
}
+static bool
+vm_fault_can_cow_rename(struct faultstate *fs)
+{
+ return (
+ /* Only one shadow object and no other refs. */
+ fs->object->shadow_count == 1 && fs->object->ref_count == 1 &&
+ /* No other ways to look the object up. */
+ fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0);
+}
+
static void
vm_fault_cow(struct faultstate *fs)
{
- bool is_first_object_locked;
+ bool is_first_object_locked, rename_cow;
- KASSERT(fs->object != fs->first_object,
+ KASSERT(vm_fault_might_be_cow(fs),
("source and target COW objects are identical"));
/*
@@ -1019,21 +1040,29 @@ vm_fault_cow(struct faultstate *fs)
* object so that it will go out to swap when needed.
*/
is_first_object_locked = false;
- if (
- /*
- * Only one shadow object and no other refs.
- */
- fs->object->shadow_count == 1 && fs->object->ref_count == 1 &&
- /*
- * No other ways to look the object up
- */
- fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 &&
- /*
- * We don't chase down the shadow chain and we can acquire locks.
- */
- (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) &&
- fs->object == fs->first_object->backing_object &&
- VM_OBJECT_TRYWLOCK(fs->object)) {
+ rename_cow = false;
+
+ if (vm_fault_can_cow_rename(fs) && vm_page_xbusied(fs->m)) {
+ /*
+ * Check that we don't chase down the shadow chain and
+ * we can acquire locks. Recheck the conditions for
+ * rename after the shadow chain is stable after the
+ * object locking.
+ */
+ is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object);
+ if (is_first_object_locked &&
+ fs->object == fs->first_object->backing_object) {
+ if (VM_OBJECT_TRYWLOCK(fs->object)) {
+ rename_cow = vm_fault_can_cow_rename(fs);
+ if (!rename_cow)
+ VM_OBJECT_WUNLOCK(fs->object);
+ }
+ }
+ }
+
+ if (rename_cow) {
+ vm_page_assert_xbusied(fs->m);
+
/*
* Remove but keep xbusy for replace. fs->m is moved into
* fs->first_object and left busy while fs->first_m is
@@ -1090,8 +1119,12 @@ vm_fault_cow(struct faultstate *fs)
* address space. If OBJ_ONEMAPPING is set after the check,
* removing mappings will at worse trigger some unnecessary page
* faults.
+ *
+ * In the fs->m shared busy case, the xbusy state of
+ * fs->first_m prevents new mappings of fs->m from
+ * being created because a parallel fault on this
+ * shadow chain should wait for xbusy on fs->first_m.
*/
- vm_page_assert_xbusied(fs->m_cow);
if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0)
pmap_remove_all(fs->m_cow);
}
@@ -1171,7 +1204,7 @@ vm_fault_zerofill(struct faultstate *fs)
* If there's no object left, fill the page in the top
* object with zeros.
*/
- if (fs->object != fs->first_object) {
+ if (vm_fault_might_be_cow(fs)) {
vm_object_pip_wakeup(fs->object);
fs->object = fs->first_object;
fs->pindex = fs->first_pindex;
@@ -1420,14 +1453,13 @@ vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp)
* and we could end up trying to pagein and pageout the same page
* simultaneously.
*
- * We can theoretically allow the busy case on a read fault if the page
- * is marked valid, but since such pages are typically already pmap'd,
- * putting that special case in might be more effort then it is worth.
- * We cannot under any circumstances mess around with a shared busied
- * page except, perhaps, to pmap it.
+ * We allow the busy case on a read fault if the page is valid. We
+ * cannot under any circumstances mess around with a shared busied
+ * page except, perhaps, to pmap it. This is controlled by the
+ * VM_ALLOC_SBUSY bit in the allocflags argument.
*/
static void
-vm_fault_busy_sleep(struct faultstate *fs)
+vm_fault_busy_sleep(struct faultstate *fs, int allocflags)
{
/*
* Reference the page before unlocking and
@@ -1435,13 +1467,13 @@ vm_fault_busy_sleep(struct faultstate *fs)
* likely to reclaim it.
*/
vm_page_aflag_set(fs->m, PGA_REFERENCED);
- if (fs->object != fs->first_object) {
+ if (vm_fault_might_be_cow(fs)) {
vm_fault_page_release(&fs->first_m);
vm_object_pip_wakeup(fs->first_object);
}
vm_object_pip_wakeup(fs->object);
vm_fault_unlock_map(fs);
- if (!vm_page_busy_sleep(fs->m, "vmpfw", 0))
+ if (!vm_page_busy_sleep(fs->m, "vmpfw", allocflags))
VM_OBJECT_UNLOCK(fs->object);
VM_CNT_INC(v_intrans);
vm_object_deallocate(fs->first_object);
@@ -1487,8 +1519,53 @@ vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
vm_page_iter_init(&pages, fs->object);
fs->m = vm_radix_iter_lookup(&pages, fs->pindex);
if (fs->m != NULL) {
+ /*
+ * If the found page is valid, will be either shadowed
+ * or mapped read-only, and will not be renamed for
+ * COW, then busy it in shared mode. This allows
+ * other faults needing this page to proceed in
+ * parallel.
+ *
+ * Unlocked check for validity, rechecked after busy
+ * is obtained.
+ */
+ if (vm_page_all_valid(fs->m) &&
+ /*
+ * No write permissions for the new fs->m mapping,
+ * or the first object has only one mapping, so
+ * other writeable COW mappings of fs->m cannot
+ * appear under us.
+ */
+ (vm_fault_is_read(fs) || vm_fault_might_be_cow(fs)) &&
+ /*
+ * fs->m cannot be renamed from object to
+ * first_object. These conditions will be
+ * re-checked with proper synchronization in
+ * vm_fault_cow().
+ */
+ (!vm_fault_can_cow_rename(fs) ||
+ fs->object != fs->first_object->backing_object)) {
+ if (!vm_page_trysbusy(fs->m)) {
+ vm_fault_busy_sleep(fs, VM_ALLOC_SBUSY);
+ return (FAULT_RESTART);
+ }
+
+ /*
+ * Now make sure that racily checked
+ * conditions are still valid.
+ */
+ if (__predict_true(vm_page_all_valid(fs->m) &&
+ (vm_fault_is_read(fs) ||
+ vm_fault_might_be_cow(fs)))) {
+ VM_OBJECT_UNLOCK(fs->object);
+ return (FAULT_SOFT);
+ }
+
+ vm_page_sunbusy(fs->m);
+ }
+
if (!vm_page_tryxbusy(fs->m)) {
- vm_fault_busy_sleep(fs);
+ vm_fault_busy_sleep(fs, 0);
return (FAULT_RESTART);
}
@@ -1546,6 +1623,27 @@ vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
return (res);
}
+/*
+ * vm_fault:
+ *
+ * Handle a page fault occurring at the given address, requiring the
+ * given permissions, in the map specified. If successful, the page
+ * is inserted into the associated physical map, and optionally
+ * referenced and returned in *m_hold.
+ *
+ * The given address should be truncated to the proper page address.
+ *
+ * KERN_SUCCESS is returned if the page fault is handled; otherwise, a
+ * Mach error specifying why the fault is fatal is returned.
+ *
+ * The map in question must be alive, either being the map for current
+ * process, or the owner process hold count incremented to prevent
+ * exit().
+ *
+ * If the thread private TDP_NOFAULTING flag is set, any fault results
+ * in immediate protection failure. Otherwise the fault is processed,
+ * and caller may hold no locks.
+ */
int
vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags, vm_page_t *m_hold)
@@ -1701,10 +1799,15 @@ RetryFault:
found:
/*
- * A valid page has been found and exclusively busied. The
- * object lock must no longer be held.
+ * A valid page has been found and busied. The object lock
+ * must no longer be held if the page was busied.
+ *
+ * Regardless of the busy state of fs.m, fs.first_m is always
+ * exclusively busied after the first iteration of the loop
+ * calling vm_fault_object(). This is an ordering point for
+ * the parallel faults occuring in on the same page.
*/
- vm_page_assert_xbusied(fs.m);
+ vm_page_assert_busied(fs.m);
VM_OBJECT_ASSERT_UNLOCKED(fs.object);
/*
@@ -1712,7 +1815,7 @@ found:
* top-level object, we have to copy it into a new page owned by the
* top-level object.
*/
- if (fs.object != fs.first_object) {
+ if (vm_fault_might_be_cow(&fs)) {
/*
* We only really need to copy if we want to write it.
*/
@@ -1773,7 +1876,7 @@ found:
* Page must be completely valid or it is not fit to
* map into user space. vm_pager_get_pages() ensures this.
*/
- vm_page_assert_xbusied(fs.m);
+ vm_page_assert_busied(fs.m);
KASSERT(vm_page_all_valid(fs.m),
("vm_fault: page %p partially invalid", fs.m));
@@ -1805,7 +1908,13 @@ found:
(*fs.m_hold) = fs.m;
vm_page_wire(fs.m);
}
- vm_page_xunbusy(fs.m);
+
+ KASSERT(fs.first_object == fs.object || vm_page_xbusied(fs.first_m),
+ ("first_m must be xbusy"));
+ if (vm_page_xbusied(fs.m))
+ vm_page_xunbusy(fs.m);
+ else
+ vm_page_sunbusy(fs.m);
fs.m = NULL;
/*
@@ -1995,32 +2104,43 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
}
/*
- * Hold each of the physical pages that are mapped by the specified range of
- * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
- * and allow the specified types of access, "prot". If all of the implied
- * pages are successfully held, then the number of held pages is returned
- * together with pointers to those pages in the array "ma". However, if any
- * of the pages cannot be held, -1 is returned.
+ * Hold each of the physical pages that are mapped by the specified
+ * range of virtual addresses, ["addr", "addr" + "len"), if those
+ * mappings are valid and allow the specified types of access, "prot".
+ * If all of the implied pages are successfully held, then the number
+ * of held pages is assigned to *ppages_count, together with pointers
+ * to those pages in the array "ma". The returned value is zero.
+ *
+ * However, if any of the pages cannot be held, an error is returned,
+ * and no pages are held.
+ * Error values:
+ * ENOMEM - the range is not valid
+ * EINVAL - the provided vm_page array is too small to hold all pages
+ * EAGAIN - a page was not mapped, and the thread is in nofaulting mode
+ * EFAULT - a page with requested permissions cannot be mapped
+ * (more detailed result from vm_fault() is lost)
*/
int
-vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
- vm_prot_t prot, vm_page_t *ma, int max_count)
+vm_fault_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
+ vm_prot_t prot, vm_page_t *ma, int max_count, int *ppages_count)
{
vm_offset_t end, va;
vm_page_t *mp;
- int count;
+ int count, error;
boolean_t pmap_failed;
- if (len == 0)
+ if (len == 0) {
+ *ppages_count = 0;
return (0);
+ }
end = round_page(addr + len);
addr = trunc_page(addr);
if (!vm_map_range_valid(map, addr, end))
- return (-1);
+ return (ENOMEM);
if (atop(end - addr) > max_count)
- panic("vm_fault_quick_hold_pages: count > max_count");
+ return (EINVAL);
count = atop(end - addr);
/*
@@ -2062,19 +2182,49 @@ vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
* the proper behaviour explicitly.
*/
if ((prot & VM_PROT_QUICK_NOFAULT) != 0 &&
- (curthread->td_pflags & TDP_NOFAULTING) != 0)
- goto error;
- for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE)
+ (curthread->td_pflags & TDP_NOFAULTING) != 0) {
+ error = EAGAIN;
+ goto fail;
+ }
+ for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) {
if (*mp == NULL && vm_fault(map, va, prot,
- VM_FAULT_NORMAL, mp) != KERN_SUCCESS)
- goto error;
+ VM_FAULT_NORMAL, mp) != KERN_SUCCESS) {
+ error = EFAULT;
+ goto fail;
+ }
+ }
}
- return (count);
-error:
+ *ppages_count = count;
+ return (0);
+fail:
for (mp = ma; mp < ma + count; mp++)
if (*mp != NULL)
vm_page_unwire(*mp, PQ_INACTIVE);
- return (-1);
+ return (error);
+}
+
+ /*
+ * Hold each of the physical pages that are mapped by the specified range of
+ * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
+ * and allow the specified types of access, "prot". If all of the implied
+ * pages are successfully held, then the number of held pages is returned
+ * together with pointers to those pages in the array "ma". However, if any
+ * of the pages cannot be held, -1 is returned.
+ */
+int
+vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
+ vm_prot_t prot, vm_page_t *ma, int max_count)
+{
+ int error, pages_count;
+
+ error = vm_fault_hold_pages(map, addr, len, prot, ma,
+ max_count, &pages_count);
+ if (error != 0) {
+ if (error == EINVAL)
+ panic("vm_fault_quick_hold_pages: count > max_count");
+ return (-1);
+ }
+ return (pages_count);
}
/*
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 94df2c2f9a9e..e0f1807a1b32 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -453,7 +453,7 @@ vm_thread_stack_create(struct domainset *ds, int pages)
obj = vm_thread_kstack_size_to_obj(pages);
if (vm_ndomains > 1)
obj->domain.dr_policy = ds;
- vm_domainset_iter_page_init(&di, obj, 0, &domain, &req, NULL);
+ vm_domainset_iter_page_init(&di, obj, 0, &domain, &req);
do {
/*
* Get a kernel virtual address for this thread's kstack.
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index e7d7b6726d2c..ac327aa37b72 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -323,7 +323,9 @@ kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
start_segind = -1;
- vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
+ if (vm_domainset_iter_policy_init(&di, ds, &domain, &flags) != 0)
+ return (NULL);
+
do {
addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
memattr);
@@ -417,7 +419,9 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
start_segind = -1;
- vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
+ if (vm_domainset_iter_policy_init(&di, ds, &domain, &flags))
+ return (NULL);
+
do {
addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
alignment, boundary, memattr);
@@ -517,7 +521,9 @@ kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
void *addr;
int domain;
- vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
+ if (vm_domainset_iter_policy_init(&di, ds, &domain, &flags) != 0)
+ return (NULL);
+
do {
addr = kmem_malloc_domain(domain, size, flags);
if (addr != NULL)
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index fef28bb883e4..fee50f49c844 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -96,7 +96,7 @@ struct vmmeter __read_mostly vm_cnt = {
u_long __exclusive_cache_line vm_user_wire_count;
static void
-vmcounter_startup(void)
+vmcounter_startup(void *dummy __unused)
{
counter_u64_t *cnt = (counter_u64_t *)&vm_cnt;
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 6d9ea8bf9d93..5b4517d2bf0c 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -2522,15 +2522,13 @@ vm_object_list_handler(struct sysctl_req *req, bool swap_only)
continue;
}
mtx_unlock(&vm_object_list_mtx);
+
+ memset(kvo, 0, sizeof(*kvo));
kvo->kvo_size = ptoa(obj->size);
kvo->kvo_resident = obj->resident_page_count;
kvo->kvo_ref_count = obj->ref_count;
kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count);
kvo->kvo_memattr = obj->memattr;
- kvo->kvo_active = 0;
- kvo->kvo_inactive = 0;
- kvo->kvo_laundry = 0;
- kvo->kvo_flags = 0;
if (!swap_only) {
vm_page_iter_init(&pages, obj);
VM_RADIX_FOREACH(m, &pages) {
@@ -2549,12 +2547,12 @@ vm_object_list_handler(struct sysctl_req *req, bool swap_only)
kvo->kvo_inactive++;
else if (vm_page_in_laundry(m))
kvo->kvo_laundry++;
+
+ if (vm_page_wired(m))
+ kvo->kvo_wired++;
}
}
- kvo->kvo_vn_fileid = 0;
- kvo->kvo_vn_fsid = 0;
- kvo->kvo_vn_fsid_freebsd11 = 0;
freepath = NULL;
fullpath = "";
vp = NULL;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 20e9fc1fcdcd..e58fae5f0090 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -235,9 +235,7 @@ extern struct mtx vm_object_list_mtx; /* lock for object list and count */
extern struct vm_object kernel_object_store;
-/* kernel and kmem are aliased for backwards KPI compat. */
#define kernel_object (&kernel_object_store)
-#define kmem_object (&kernel_object_store)
#define VM_OBJECT_ASSERT_LOCKED(object) \
rw_assert(&(object)->lock, RA_LOCKED)
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index abad5efb8a79..16878604fa11 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -2015,8 +2015,9 @@ vm_page_alloc_iter(vm_object_t object, vm_pindex_t pindex, int req,
vm_page_t m;
int domain;
- vm_domainset_iter_page_init(&di, object, pindex, &domain, &req,
- pages);
+ if (vm_domainset_iter_page_init(&di, object, pindex, &domain, &req) != 0)
+ return (NULL);
+
do {
m = vm_page_alloc_domain_iter(object, pindex, domain, req,
pages);
@@ -2268,7 +2269,9 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
start_segind = -1;
- vm_domainset_iter_page_init(&di, object, pindex, &domain, &req, NULL);
+ if (vm_domainset_iter_page_init(&di, object, pindex, &domain, &req) != 0)
+ return (NULL);
+
do {
m = vm_page_alloc_contig_domain(object, pindex, domain, req,
npages, low, high, alignment, boundary, memattr);
@@ -2596,7 +2599,9 @@ vm_page_alloc_noobj(int req)
vm_page_t m;
int domain;
- vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req, NULL);
+ if (vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req) != 0)
+ return (NULL);
+
do {
m = vm_page_alloc_noobj_domain(domain, req);
if (m != NULL)
@@ -2615,7 +2620,9 @@ vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
vm_page_t m;
int domain;
- vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req, NULL);
+ if (vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req) != 0)
+ return (NULL);
+
do {
m = vm_page_alloc_noobj_contig_domain(domain, req, npages, low,
high, alignment, boundary, memattr);
@@ -3334,7 +3341,9 @@ vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
ret = ERANGE;
- vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req, NULL);
+ if (vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req) != 0)
+ return (ret);
+
do {
status = vm_page_reclaim_contig_domain(domain, req, npages, low,
high, alignment, boundary);
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 3f1be78342c9..418a9cff8abf 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -120,7 +120,7 @@
/* the kernel process "vm_pageout"*/
static void vm_pageout(void);
-static void vm_pageout_init(void);
+static void vm_pageout_init(void *);
static int vm_pageout_clean(vm_page_t m, int *numpagedout);
static int vm_pageout_cluster(vm_page_t m);
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
@@ -2333,7 +2333,7 @@ vm_pageout_init_domain(int domain)
}
static void
-vm_pageout_init(void)
+vm_pageout_init(void *dummy __unused)
{
u_long freecount;
int i;
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index de8a6c52c08f..244aa31ea703 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -901,8 +901,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
int error, before, after, rbehind, rahead, poff, i;
int bytecount, secmask;
- KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
- ("%s does not support devices", __func__));
+ KASSERT(!VN_ISDEV(vp), ("%s does not support devices", __func__));
if (VN_IS_DOOMED(vp))
return (VM_PAGER_BAD);
diff --git a/sys/x86/acpica/acpi_apm.c b/sys/x86/acpica/acpi_apm.c
index be161cd6171b..8e5785cf0ed6 100644
--- a/sys/x86/acpica/acpi_apm.c
+++ b/sys/x86/acpica/acpi_apm.c
@@ -235,7 +235,7 @@ apmdtor(void *data)
acpi_sc = clone->acpi_sc;
/* We are about to lose a reference so check if suspend should occur */
- if (acpi_sc->acpi_next_sstate != 0 &&
+ if (acpi_sc->acpi_next_stype != POWER_STYPE_AWAKE &&
clone->notify_status != APM_EV_ACKED)
acpi_AckSleepState(clone, 0);
@@ -283,10 +283,10 @@ apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td
case APMIO_SUSPEND:
if ((flag & FWRITE) == 0)
return (EPERM);
- if (acpi_sc->acpi_next_sstate == 0) {
- if (acpi_sc->acpi_suspend_sx != ACPI_STATE_S5) {
+ if (acpi_sc->acpi_next_stype == POWER_STYPE_AWAKE) {
+ if (power_suspend_stype != POWER_STYPE_POWEROFF) {
error = acpi_ReqSleepState(acpi_sc,
- acpi_sc->acpi_suspend_sx);
+ power_suspend_stype);
} else {
printf(
"power off via apm suspend not supported\n");
@@ -298,10 +298,10 @@ apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td
case APMIO_STANDBY:
if ((flag & FWRITE) == 0)
return (EPERM);
- if (acpi_sc->acpi_next_sstate == 0) {
- if (acpi_sc->acpi_standby_sx != ACPI_STATE_S5) {
+ if (acpi_sc->acpi_next_stype == POWER_STYPE_AWAKE) {
+ if (power_standby_stype != POWER_STYPE_POWEROFF) {
error = acpi_ReqSleepState(acpi_sc,
- acpi_sc->acpi_standby_sx);
+ power_standby_stype);
} else {
printf(
"power off via apm standby not supported\n");
@@ -313,10 +313,11 @@ apmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td
case APMIO_NEXTEVENT:
printf("apm nextevent start\n");
ACPI_LOCK(acpi);
- if (acpi_sc->acpi_next_sstate != 0 && clone->notify_status ==
- APM_EV_NONE) {
+ if (acpi_sc->acpi_next_stype != POWER_STYPE_AWAKE &&
+ clone->notify_status == APM_EV_NONE) {
ev_info = (struct apm_event_info *)addr;
- if (acpi_sc->acpi_next_sstate <= ACPI_STATE_S3)
+ /* XXX Check this. */
+ if (acpi_sc->acpi_next_stype == POWER_STYPE_STANDBY)
ev_info->type = PMEV_STANDBYREQ;
else
ev_info->type = PMEV_SUSPENDREQ;
@@ -392,7 +393,7 @@ apmpoll(struct cdev *dev, int events, struct thread *td)
revents = 0;
devfs_get_cdevpriv((void **)&clone);
ACPI_LOCK(acpi);
- if (clone->acpi_sc->acpi_next_sstate)
+ if (clone->acpi_sc->acpi_next_stype != POWER_STYPE_AWAKE)
revents |= events & (POLLIN | POLLRDNORM);
else
selrecord(td, &clone->sel_read);
@@ -433,7 +434,7 @@ apmreadfilt(struct knote *kn, long hint)
ACPI_LOCK(acpi);
clone = kn->kn_hook;
- sleeping = clone->acpi_sc->acpi_next_sstate ? 1 : 0;
+ sleeping = clone->acpi_sc->acpi_next_stype != POWER_STYPE_AWAKE;
ACPI_UNLOCK(acpi);
return (sleeping);
}
diff --git a/sys/x86/include/apicreg.h b/sys/x86/include/apicreg.h
index d610d7f11a1c..1252647fbab3 100644
--- a/sys/x86/include/apicreg.h
+++ b/sys/x86/include/apicreg.h
@@ -296,6 +296,8 @@ typedef struct IOAPIC ioapic_t;
/* constants relating to APIC ID registers */
#define APIC_ID_MASK 0xff000000
#define APIC_ID_SHIFT 24
+#define APIC_EXT_ID_MASK 0x00fe0000
+#define APIC_EXT_ID_SHIFT 17
#define APIC_ID_CLUSTER 0xf0
#define APIC_ID_CLUSTER_ID 0x0f
#define APIC_MAX_CLUSTER 0xe
diff --git a/sys/x86/include/mca.h b/sys/x86/include/mca.h
index 183480625f6d..553b5d765f17 100644
--- a/sys/x86/include/mca.h
+++ b/sys/x86/include/mca.h
@@ -44,6 +44,31 @@ struct mca_record {
int mr_cpu;
};
+enum mca_stat_types {
+ MCA_T_NONE = 0,
+ MCA_T_UNCLASSIFIED,
+ MCA_T_UCODE_ROM_PARITY,
+ MCA_T_EXTERNAL,
+ MCA_T_FRC,
+ MCA_T_INTERNAL_PARITY,
+ MCA_T_SMM_HANDLER,
+ MCA_T_INTERNAL_TIMER,
+ MCA_T_GENERIC_IO,
+ MCA_T_INTERNAL,
+ MCA_T_MEMORY,
+ MCA_T_TLB,
+ MCA_T_MEMCONTROLLER_GEN,
+ MCA_T_MEMCONTROLLER_RD,
+ MCA_T_MEMCONTROLLER_WR,
+ MCA_T_MEMCONTROLLER_AC,
+ MCA_T_MEMCONTROLLER_MS,
+ MCA_T_MEMCONTROLLER_OTHER,
+ MCA_T_CACHE,
+ MCA_T_BUS,
+ MCA_T_UNKNOWN,
+ MCA_T_COUNT /* Must stay last */
+};
+
#ifdef _KERNEL
void cmc_intr(void);
diff --git a/sys/x86/include/ucode.h b/sys/x86/include/ucode.h
index 0338d48a0832..ea7cb07669a4 100644
--- a/sys/x86/include/ucode.h
+++ b/sys/x86/include/ucode.h
@@ -62,12 +62,14 @@ struct ucode_intel_extsig_table {
} entries[0];
};
+typedef enum { SAFE, UNSAFE, EARLY } ucode_load_how;
+
const void *ucode_amd_find(const char *path, uint32_t signature,
- uint32_t revision, const uint8_t *fw_data, size_t fw_size,
+ uint32_t *revision, const uint8_t *fw_data, size_t fw_size,
size_t *selected_sizep);
-int ucode_intel_load(const void *data, bool unsafe,
+int ucode_intel_load(const void *data, ucode_load_how unsafe,
uint64_t *nrevp, uint64_t *orevp);
-int ucode_amd_load(const void *data, bool unsafe,
+int ucode_amd_load(const void *data, ucode_load_how how,
uint64_t *nrevp, uint64_t *orevp);
size_t ucode_load_bsp(uintptr_t free);
void ucode_load_ap(int cpu);
diff --git a/sys/x86/iommu/amd_intrmap.c b/sys/x86/iommu/amd_intrmap.c
index a4c1a7836268..f8900fe0561f 100644
--- a/sys/x86/iommu/amd_intrmap.c
+++ b/sys/x86/iommu/amd_intrmap.c
@@ -112,6 +112,8 @@ amdiommu_map_msi_intr(device_t src, u_int cpu, u_int vector,
{
struct amdiommu_ctx *ctx;
struct amdiommu_unit *unit;
+ device_t requester;
+ int error __diagused;
uint16_t rid;
bool is_iommu;
@@ -180,7 +182,8 @@ amdiommu_map_msi_intr(device_t src, u_int cpu, u_int vector,
*addr |= ((uint64_t)cpu & 0xffffff00) << 32;
}
- iommu_get_requester(src, &rid);
+ error = iommu_get_requester(src, &requester, &rid);
+ MPASS(error == 0);
AMDIOMMU_LOCK(unit);
amdiommu_qi_invalidate_ir_locked(unit, rid);
AMDIOMMU_UNLOCK(unit);
@@ -220,6 +223,7 @@ static struct amdiommu_ctx *
amdiommu_ir_find(device_t src, uint16_t *ridp, bool *is_iommu)
{
devclass_t src_class;
+ device_t requester;
struct amdiommu_unit *unit;
struct amdiommu_ctx *ctx;
uint32_t edte;
@@ -251,7 +255,8 @@ amdiommu_ir_find(device_t src, uint16_t *ridp, bool *is_iommu)
error = amdiommu_find_unit(src, &unit, &rid, &dte, &edte,
bootverbose);
if (error == 0) {
- iommu_get_requester(src, &rid);
+ error = iommu_get_requester(src, &requester, &rid);
+ MPASS(error == 0);
ctx = amdiommu_get_ctx_for_dev(unit, src,
rid, 0, false /* XXXKIB */, false, dte, edte);
}
@@ -266,6 +271,8 @@ amdiommu_ir_free_irte(struct amdiommu_ctx *ctx, device_t src,
u_int cookie)
{
struct amdiommu_unit *unit;
+ device_t requester;
+ int error __diagused;
uint16_t rid;
MPASS(ctx != NULL);
@@ -291,7 +298,8 @@ amdiommu_ir_free_irte(struct amdiommu_ctx *ctx, device_t src,
atomic_thread_fence_rel();
bzero(irte, sizeof(*irte));
}
- iommu_get_requester(src, &rid);
+ error = iommu_get_requester(src, &requester, &rid);
+ MPASS(error == 0);
AMDIOMMU_LOCK(unit);
amdiommu_qi_invalidate_ir_locked(unit, rid);
AMDIOMMU_UNLOCK(unit);
diff --git a/sys/x86/iommu/intel_intrmap.c b/sys/x86/iommu/intel_intrmap.c
index 06e41523624b..f12a0c9bae9b 100644
--- a/sys/x86/iommu/intel_intrmap.c
+++ b/sys/x86/iommu/intel_intrmap.c
@@ -234,6 +234,8 @@ dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
{
devclass_t src_class;
struct dmar_unit *unit;
+ device_t requester;
+ int error __diagused;
/*
* We need to determine if the interrupt source generates FSB
@@ -253,8 +255,10 @@ dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
unit = dmar_find_hpet(src, rid);
} else {
unit = dmar_find(src, bootverbose);
- if (unit != NULL && rid != NULL)
- iommu_get_requester(src, rid);
+ if (unit != NULL && rid != NULL) {
+ error = iommu_get_requester(src, &requester, rid);
+ MPASS(error == 0);
+ }
}
return (unit);
}
diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c
index 040174113104..e86279aa9c98 100644
--- a/sys/x86/x86/busdma_bounce.c
+++ b/sys/x86/x86/busdma_bounce.c
@@ -726,6 +726,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
map->pagesneeded != 0 &&
must_bounce(dmat, curaddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
+ sgsize = MIN(sgsize, buflen);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
sgsize);
}
diff --git a/sys/x86/x86/identcpu.c b/sys/x86/x86/identcpu.c
index 4d64eaf78b29..7661c82f4394 100644
--- a/sys/x86/x86/identcpu.c
+++ b/sys/x86/x86/identcpu.c
@@ -2613,7 +2613,7 @@ print_vmx_info(void)
"\020EPT#VE" /* EPT-violation #VE */
"\021XSAVES" /* Enable XSAVES/XRSTORS */
);
- printf("\n Exit Controls=0x%b", mask,
+ printf("\n Exit Controls=0x%b", exit,
"\020"
"\003DR" /* Save debug controls */
/* Ignore Host address-space size */
@@ -2625,7 +2625,7 @@ print_vmx_info(void)
"\026EFER-LD" /* Load MSR_EFER */
"\027PTMR-SV" /* Save VMX-preemption timer value */
);
- printf("\n Entry Controls=0x%b", mask,
+ printf("\n Entry Controls=0x%b", entry,
"\020"
"\003DR" /* Save debug controls */
/* Ignore IA-32e mode guest */
diff --git a/sys/x86/x86/mca.c b/sys/x86/x86/mca.c
index 4ba49469d3a2..735efe307215 100644
--- a/sys/x86/x86/mca.c
+++ b/sys/x86/x86/mca.c
@@ -46,9 +46,11 @@
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
+#include <sys/syslog.h>
#include <sys/systm.h>
#include <sys/taskqueue.h>
#include <machine/intr_machdep.h>
@@ -124,6 +126,22 @@ SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RDTUN,
&workaround_erratum383, 0,
"Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
+#ifdef DIAGNOSTIC
+static uint64_t fake_status;
+SYSCTL_U64(_hw_mca, OID_AUTO, fake_status, CTLFLAG_RW,
+ &fake_status, 0,
+ "Insert artificial MCA with given status (testing purpose only)");
+static int fake_bank;
+SYSCTL_INT(_hw_mca, OID_AUTO, fake_bank, CTLFLAG_RW,
+ &fake_bank, 0,
+ "Bank to use for artificial MCAs (testing purpose only)");
+#endif
+
+static bool mca_uselog = false;
+SYSCTL_BOOL(_hw_mca, OID_AUTO, uselog, CTLFLAG_RWTUN, &mca_uselog, 0,
+ "Should the system send non-fatal machine check errors to the log "
+ "(instead of the console)?");
+
static STAILQ_HEAD(, mca_internal) mca_freelist;
static int mca_freecount;
static STAILQ_HEAD(, mca_internal) mca_records;
@@ -131,8 +149,44 @@ static STAILQ_HEAD(, mca_internal) mca_pending;
static int mca_ticks = 300;
static struct taskqueue *mca_tq;
static struct task mca_resize_task;
+static struct task mca_postscan_task;
static struct timeout_task mca_scan_task;
static struct mtx mca_lock;
+static bool mca_startup_done = false;
+
+/* Static buffer to compose messages while in an interrupt context. */
+static char mca_msg_buf[1024];
+static struct mtx mca_msg_buf_lock;
+
+/* Statistics on number of MCA events by type, updated with the mca_lock. */
+static uint64_t mca_stats[MCA_T_COUNT];
+SYSCTL_OPAQUE(_hw_mca, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_SKIP,
+ mca_stats, MCA_T_COUNT * sizeof(mca_stats[0]),
+ "S", "Array of MCA events by type");
+
+/* Variables to track and control message rate limiting. */
+static struct timeval mca_last_log_time;
+static struct timeval mca_log_interval;
+static int mca_log_skipped;
+
+static int
+sysctl_mca_log_interval(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ u_int val;
+
+ val = mca_log_interval.tv_sec;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ mca_log_interval.tv_sec = val;
+ return (0);
+}
+SYSCTL_PROC(_hw_mca, OID_AUTO, log_interval,
+ CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, &mca_log_interval, 0,
+ sysctl_mca_log_interval, "IU",
+ "Minimum number of seconds between logging correctable MCAs"
+ " (0 = no limit)");
static unsigned int
mca_ia32_ctl_reg(int bank)
@@ -356,21 +410,27 @@ mca_error_request(uint16_t mca_error)
}
static const char *
-mca_error_mmtype(uint16_t mca_error)
+mca_error_mmtype(uint16_t mca_error, enum mca_stat_types *event_type)
{
switch ((mca_error & 0x70) >> 4) {
case 0x0:
+ *event_type = MCA_T_MEMCONTROLLER_GEN;
return ("GEN");
case 0x1:
+ *event_type = MCA_T_MEMCONTROLLER_RD;
return ("RD");
case 0x2:
+ *event_type = MCA_T_MEMCONTROLLER_WR;
return ("WR");
case 0x3:
+ *event_type = MCA_T_MEMCONTROLLER_AC;
return ("AC");
case 0x4:
+ *event_type = MCA_T_MEMCONTROLLER_MS;
return ("MS");
}
+ *event_type = MCA_T_MEMCONTROLLER_OTHER;
return ("???");
}
@@ -423,87 +483,111 @@ mca_mute(const struct mca_record *rec)
/* Dump details about a single machine check. */
static void
-mca_log(const struct mca_record *rec)
+mca_log(enum scan_mode mode, const struct mca_record *rec, bool fatal)
{
+ int error, numskipped;
uint16_t mca_error;
+ enum mca_stat_types event_type;
+ struct sbuf sb;
+ bool uncor, using_shared_buf;
if (mca_mute(rec))
return;
- if (!log_corrected && (rec->mr_status & MC_STATUS_UC) == 0 &&
- (!tes_supported(rec->mr_mcg_cap) ||
+ uncor = (rec->mr_status & MC_STATUS_UC) != 0;
+
+ if (!log_corrected && !uncor && (!tes_supported(rec->mr_mcg_cap) ||
((rec->mr_status & MC_STATUS_TES_STATUS) >> 53) != 0x2))
return;
- printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
+ /* Try to use an allocated buffer when not in an interrupt context. */
+ if (mode == POLLED && sbuf_new(&sb, NULL, 512, SBUF_AUTOEXTEND) != NULL)
+ using_shared_buf = false;
+ else {
+ using_shared_buf = true;
+ mtx_lock_spin(&mca_msg_buf_lock);
+ sbuf_new(&sb, mca_msg_buf, sizeof(mca_msg_buf), SBUF_FIXEDLEN);
+ }
+
+ sbuf_printf(&sb, "MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
(long long)rec->mr_status);
- printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
+ sbuf_printf(&sb, "MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
(long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
- printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
- rec->mr_cpu_id, rec->mr_apic_id);
- printf("MCA: CPU %d ", rec->mr_cpu);
+ sbuf_printf(&sb, "MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n",
+ cpu_vendor, rec->mr_cpu_id, rec->mr_apic_id);
+ sbuf_printf(&sb, "MCA: CPU %d ", rec->mr_cpu);
if (rec->mr_status & MC_STATUS_UC)
- printf("UNCOR ");
+ sbuf_printf(&sb, "UNCOR ");
else {
- printf("COR ");
+ sbuf_printf(&sb, "COR ");
if (cmci_supported(rec->mr_mcg_cap))
- printf("(%lld) ", ((long long)rec->mr_status &
+ sbuf_printf(&sb, "(%lld) ", ((long long)rec->mr_status &
MC_STATUS_COR_COUNT) >> 38);
if (tes_supported(rec->mr_mcg_cap)) {
switch ((rec->mr_status & MC_STATUS_TES_STATUS) >> 53) {
case 0x1:
- printf("(Green) ");
+ sbuf_printf(&sb, "(Green) ");
break;
case 0x2:
- printf("(Yellow) ");
+ sbuf_printf(&sb, "(Yellow) ");
break;
}
}
}
if (rec->mr_status & MC_STATUS_EN)
- printf("EN ");
+ sbuf_printf(&sb, "EN ");
if (rec->mr_status & MC_STATUS_PCC)
- printf("PCC ");
+ sbuf_printf(&sb, "PCC ");
if (ser_supported(rec->mr_mcg_cap)) {
if (rec->mr_status & MC_STATUS_S)
- printf("S ");
+ sbuf_printf(&sb, "S ");
if (rec->mr_status & MC_STATUS_AR)
- printf("AR ");
+ sbuf_printf(&sb, "AR ");
}
if (rec->mr_status & MC_STATUS_OVER)
- printf("OVER ");
+ sbuf_printf(&sb, "OVER ");
mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
+ event_type = MCA_T_COUNT;
switch (mca_error) {
/* Simple error codes. */
case 0x0000:
- printf("no error");
+ sbuf_printf(&sb, "no error");
+ event_type = MCA_T_NONE;
break;
case 0x0001:
- printf("unclassified error");
+ sbuf_printf(&sb, "unclassified error");
+ event_type = MCA_T_UNCLASSIFIED;
break;
case 0x0002:
- printf("ucode ROM parity error");
+ sbuf_printf(&sb, "ucode ROM parity error");
+ event_type = MCA_T_UCODE_ROM_PARITY;
break;
case 0x0003:
- printf("external error");
+ sbuf_printf(&sb, "external error");
+ event_type = MCA_T_EXTERNAL;
break;
case 0x0004:
- printf("FRC error");
+ sbuf_printf(&sb, "FRC error");
+ event_type = MCA_T_FRC;
break;
case 0x0005:
- printf("internal parity error");
+ sbuf_printf(&sb, "internal parity error");
+ event_type = MCA_T_INTERNAL_PARITY;
break;
case 0x0006:
- printf("SMM handler code access violation");
+ sbuf_printf(&sb, "SMM handler code access violation");
+ event_type = MCA_T_SMM_HANDLER;
break;
case 0x0400:
- printf("internal timer error");
+ sbuf_printf(&sb, "internal timer error");
+ event_type = MCA_T_INTERNAL_TIMER;
break;
case 0x0e0b:
- printf("generic I/O error");
+ sbuf_printf(&sb, "generic I/O error");
+ event_type = MCA_T_GENERIC_IO;
if (rec->mr_cpu_vendor_id == CPU_VENDOR_INTEL &&
(rec->mr_status & MC_STATUS_MISCV)) {
- printf(" (pci%d:%d:%d:%d)",
+ sbuf_printf(&sb, " (pci%d:%d:%d:%d)",
(int)((rec->mr_misc & MC_MISC_PCIE_SEG) >> 32),
(int)((rec->mr_misc & MC_MISC_PCIE_BUS) >> 24),
(int)((rec->mr_misc & MC_MISC_PCIE_SLOT) >> 19),
@@ -512,7 +596,9 @@ mca_log(const struct mca_record *rec)
break;
default:
if ((mca_error & 0xfc00) == 0x0400) {
- printf("internal error %x", mca_error & 0x03ff);
+ sbuf_printf(&sb, "internal error %x",
+ mca_error & 0x03ff);
+ event_type = MCA_T_INTERNAL;
break;
}
@@ -520,101 +606,168 @@ mca_log(const struct mca_record *rec)
/* Memory hierarchy error. */
if ((mca_error & 0xeffc) == 0x000c) {
- printf("%s memory error", mca_error_level(mca_error));
+ sbuf_printf(&sb, "%s memory error",
+ mca_error_level(mca_error));
+ event_type = MCA_T_MEMORY;
break;
}
/* TLB error. */
if ((mca_error & 0xeff0) == 0x0010) {
- printf("%sTLB %s error", mca_error_ttype(mca_error),
+ sbuf_printf(&sb, "%sTLB %s error",
+ mca_error_ttype(mca_error),
mca_error_level(mca_error));
+ event_type = MCA_T_TLB;
break;
}
/* Memory controller error. */
if ((mca_error & 0xef80) == 0x0080) {
- printf("%s channel ", mca_error_mmtype(mca_error));
+ sbuf_printf(&sb, "%s channel ",
+ mca_error_mmtype(mca_error, &event_type));
if ((mca_error & 0x000f) != 0x000f)
- printf("%d", mca_error & 0x000f);
+ sbuf_printf(&sb, "%d", mca_error & 0x000f);
else
- printf("??");
- printf(" memory error");
+ sbuf_printf(&sb, "??");
+ sbuf_printf(&sb, " memory error");
break;
}
/* Cache error. */
if ((mca_error & 0xef00) == 0x0100) {
- printf("%sCACHE %s %s error",
+ sbuf_printf(&sb, "%sCACHE %s %s error",
mca_error_ttype(mca_error),
mca_error_level(mca_error),
mca_error_request(mca_error));
+ event_type = MCA_T_CACHE;
break;
}
/* Extended memory error. */
if ((mca_error & 0xef80) == 0x0280) {
- printf("%s channel ", mca_error_mmtype(mca_error));
+ sbuf_printf(&sb, "%s channel ",
+ mca_error_mmtype(mca_error, &event_type));
if ((mca_error & 0x000f) != 0x000f)
- printf("%d", mca_error & 0x000f);
+ sbuf_printf(&sb, "%d", mca_error & 0x000f);
else
- printf("??");
- printf(" extended memory error");
+ sbuf_printf(&sb, "??");
+ sbuf_printf(&sb, " extended memory error");
break;
}
/* Bus and/or Interconnect error. */
if ((mca_error & 0xe800) == 0x0800) {
- printf("BUS%s ", mca_error_level(mca_error));
+ sbuf_printf(&sb, "BUS%s ", mca_error_level(mca_error));
+ event_type = MCA_T_BUS;
switch ((mca_error & 0x0600) >> 9) {
case 0:
- printf("Source");
+ sbuf_printf(&sb, "Source");
break;
case 1:
- printf("Responder");
+ sbuf_printf(&sb, "Responder");
break;
case 2:
- printf("Observer");
+ sbuf_printf(&sb, "Observer");
break;
default:
- printf("???");
+ sbuf_printf(&sb, "???");
break;
}
- printf(" %s ", mca_error_request(mca_error));
+ sbuf_printf(&sb, " %s ", mca_error_request(mca_error));
switch ((mca_error & 0x000c) >> 2) {
case 0:
- printf("Memory");
+ sbuf_printf(&sb, "Memory");
break;
case 2:
- printf("I/O");
+ sbuf_printf(&sb, "I/O");
break;
case 3:
- printf("Other");
+ sbuf_printf(&sb, "Other");
break;
default:
- printf("???");
+ sbuf_printf(&sb, "???");
break;
}
if (mca_error & 0x0100)
- printf(" timed out");
+ sbuf_printf(&sb, " timed out");
break;
}
- printf("unknown error %x", mca_error);
+ sbuf_printf(&sb, "unknown error %x", mca_error);
+ event_type = MCA_T_UNKNOWN;
break;
}
- printf("\n");
+ sbuf_printf(&sb, "\n");
if (rec->mr_status & MC_STATUS_ADDRV) {
- printf("MCA: Address 0x%llx", (long long)rec->mr_addr);
+ sbuf_printf(&sb, "MCA: Address 0x%llx",
+ (long long)rec->mr_addr);
if (ser_supported(rec->mr_mcg_cap) &&
(rec->mr_status & MC_STATUS_MISCV)) {
- printf(" (Mode: %s, LSB: %d)",
+ sbuf_printf(&sb, " (Mode: %s, LSB: %d)",
mca_addres_mode(rec->mr_misc),
(int)(rec->mr_misc & MC_MISC_RA_LSB));
}
- printf("\n");
+ sbuf_printf(&sb, "\n");
}
if (rec->mr_status & MC_STATUS_MISCV)
- printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
+ sbuf_printf(&sb, "MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
+
+ if (event_type < 0 || event_type >= MCA_T_COUNT) {
+ KASSERT(0, ("%s: invalid event type (%d)", __func__,
+ event_type));
+ event_type = MCA_T_UNKNOWN;
+ }
+ numskipped = 0;
+ if (!fatal && !uncor) {
+ /*
+ * Update statistics and check the rate limit for
+ * correctable errors. The rate limit is only applied
+ * after the system records a reasonable number of errors
+ * of the same type. The goal is to reduce the impact of
+ * the system seeing and attempting to log a burst of
+ * similar errors, which (especially when printed to the
+ * console) can be expensive.
+ */
+ mtx_lock_spin(&mca_lock);
+ mca_stats[event_type]++;
+ if (mca_log_interval.tv_sec > 0 && mca_stats[event_type] > 50 &&
+ ratecheck(&mca_last_log_time, &mca_log_interval) == 0) {
+ mca_log_skipped++;
+ mtx_unlock_spin(&mca_lock);
+ goto done;
+ }
+ numskipped = mca_log_skipped;
+ mca_log_skipped = 0;
+ mtx_unlock_spin(&mca_lock);
+ }
+
+ error = sbuf_finish(&sb);
+ if (fatal || !mca_uselog) {
+ if (numskipped > 0)
+ printf("MCA: %d events skipped due to rate limit\n",
+ numskipped);
+ if (error)
+ printf("MCA: error logging message (sbuf error %d)\n",
+ error);
+ else
+ sbuf_putbuf(&sb);
+ } else {
+ if (numskipped > 0)
+ log(LOG_ERR,
+ "MCA: %d events skipped due to rate limit\n",
+ numskipped);
+ if (error)
+ log(LOG_ERR,
+ "MCA: error logging message (sbuf error %d)\n",
+ error);
+ else
+ log(uncor ? LOG_CRIT : LOG_ERR, "%s", sbuf_data(&sb));
+ }
+
+done:
+ sbuf_delete(&sb);
+ if (using_shared_buf)
+ mtx_unlock_spin(&mca_msg_buf_lock);
}
static bool
@@ -662,8 +815,24 @@ mca_check_status(enum scan_mode mode, uint64_t mcg_cap, int bank,
bool mce, recover;
status = rdmsr(mca_msr_ops.status(bank));
- if (!(status & MC_STATUS_VAL))
+ if (!(status & MC_STATUS_VAL)) {
+#ifdef DIAGNOSTIC
+ /*
+ * Check if we have a pending artificial event to generate.
+ * Note that this is potentially racy with the sysctl. The
+ * tradeoff is deemed acceptable given the test nature
+ * of the code.
+ */
+ if (fake_status && bank == fake_bank) {
+ status = fake_status;
+ fake_status = 0;
+ }
+ if (!(status & MC_STATUS_VAL))
+ return (0);
+#else
return (0);
+#endif
+ }
recover = *recoverablep;
mce = mca_is_mce(mcg_cap, status, &recover);
@@ -757,9 +926,9 @@ mca_record_entry(enum scan_mode mode, const struct mca_record *record)
mtx_lock_spin(&mca_lock);
rec = STAILQ_FIRST(&mca_freelist);
if (rec == NULL) {
- printf("MCA: Unable to allocate space for an event.\n");
- mca_log(record);
mtx_unlock_spin(&mca_lock);
+ printf("MCA: Unable to allocate space for an event.\n");
+ mca_log(mode, record, false);
return;
}
STAILQ_REMOVE_HEAD(&mca_freelist, link);
@@ -916,7 +1085,7 @@ mca_scan(enum scan_mode mode, bool *recoverablep)
if (*recoverablep)
mca_record_entry(mode, &rec);
else
- mca_log(&rec);
+ mca_log(mode, &rec, true);
}
#ifdef DEV_APIC
@@ -978,18 +1147,49 @@ static void
mca_process_records(enum scan_mode mode)
{
struct mca_internal *mca;
+ STAILQ_HEAD(, mca_internal) tmplist;
+
+ /*
+ * If in an interrupt context, defer the post-scan activities to a
+ * task queue.
+ */
+ if (mode != POLLED) {
+ if (mca_startup_done)
+ taskqueue_enqueue(mca_tq, &mca_postscan_task);
+ return;
+ }
+
+ /*
+ * Copy the pending list to the stack so we can drop the spin lock
+ * while we are emitting logs.
+ */
+ STAILQ_INIT(&tmplist);
+ mtx_lock_spin(&mca_lock);
+ STAILQ_SWAP(&mca_pending, &tmplist, mca_internal);
+ mtx_unlock_spin(&mca_lock);
+
+ STAILQ_FOREACH(mca, &tmplist, link)
+ mca_log(mode, &mca->rec, false);
mtx_lock_spin(&mca_lock);
- while ((mca = STAILQ_FIRST(&mca_pending)) != NULL) {
- STAILQ_REMOVE_HEAD(&mca_pending, link);
- mca_log(&mca->rec);
+ while ((mca = STAILQ_FIRST(&tmplist)) != NULL) {
+ STAILQ_REMOVE_HEAD(&tmplist, link);
mca_store_record(mca);
}
mtx_unlock_spin(&mca_lock);
- if (mode == POLLED)
- mca_resize_freelist();
- else if (!cold)
- taskqueue_enqueue(mca_tq, &mca_resize_task);
+ mca_resize_freelist();
+}
+
+/*
+ * Emit log entries and resize the free list. This is intended to be called
+ * from a task queue to handle work which does not need to be done (or cannot
+ * be done) in an interrupt context.
+ */
+static void
+mca_postscan(void *context __unused, int pending __unused)
+{
+
+ mca_process_records(POLLED);
}
/*
@@ -1060,7 +1260,7 @@ sysctl_mca_maxcount(SYSCTL_HANDLER_ARGS)
doresize = true;
}
mtx_unlock_spin(&mca_lock);
- if (doresize && !cold)
+ if (doresize && mca_startup_done)
taskqueue_enqueue(mca_tq, &mca_resize_task);
return (error);
}
@@ -1072,12 +1272,16 @@ mca_startup(void *dummy)
if (mca_banks <= 0)
return;
- /* CMCIs during boot may have claimed items from the freelist. */
- mca_resize_freelist();
-
taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
taskqueue_enqueue_timeout_sbt(mca_tq, &mca_scan_task,
mca_ticks * SBT_1S, 0, C_PREL(1));
+ mca_startup_done = true;
+
+ /*
+ * CMCIs during boot may have recorded entries. Conduct the post-scan
+ * activities now.
+ */
+ mca_postscan(NULL, 0);
}
SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL);
@@ -1130,6 +1334,7 @@ mca_setup(uint64_t mcg_cap)
mca_banks = mcg_cap & MCG_CAP_COUNT;
mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
+ mtx_init(&mca_msg_buf_lock, "mca_msg_buf", NULL, MTX_SPIN);
STAILQ_INIT(&mca_records);
STAILQ_INIT(&mca_pending);
mca_tq = taskqueue_create_fast("mca", M_WAITOK,
@@ -1137,6 +1342,7 @@ mca_setup(uint64_t mcg_cap)
TIMEOUT_TASK_INIT(mca_tq, &mca_scan_task, 0, mca_scan_cpus, NULL);
STAILQ_INIT(&mca_freelist);
TASK_INIT(&mca_resize_task, 0, mca_resize, NULL);
+ TASK_INIT(&mca_postscan_task, 0, mca_postscan, NULL);
mca_resize_freelist();
SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
"count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
@@ -1540,6 +1746,9 @@ mca_intr(void)
panic("Unrecoverable machine check exception");
}
+ if (count)
+ mca_process_records(MCE);
+
/* Clear MCIP. */
wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
}
diff --git a/sys/x86/x86/tsc.c b/sys/x86/x86/tsc.c
index a1a5d8140b14..3b873d9dae73 100644
--- a/sys/x86/x86/tsc.c
+++ b/sys/x86/x86/tsc.c
@@ -650,7 +650,7 @@ retry:
#endif /* SMP */
static void
-init_TSC_tc(void)
+init_TSC_tc(void *dummy __unused)
{
uint64_t max_freq;
int shift;
diff --git a/sys/x86/x86/ucode.c b/sys/x86/x86/ucode.c
index 0c153c0b656c..72133de211f8 100644
--- a/sys/x86/x86/ucode.c
+++ b/sys/x86/x86/ucode.c
@@ -40,6 +40,7 @@
#include <machine/atomic.h>
#include <machine/cpufunc.h>
+#include <machine/md_var.h>
#include <x86/specialreg.h>
#include <x86/ucode.h>
#include <x86/x86_smp.h>
@@ -58,7 +59,7 @@ static const void *ucode_amd_match(const uint8_t *data, size_t *len);
static struct ucode_ops {
const char *vendor;
- int (*load)(const void *, bool, uint64_t *, uint64_t *);
+ int (*load)(const void *, ucode_load_how how, uint64_t *, uint64_t *);
const void *(*match)(const uint8_t *, size_t *);
} loaders[] = {
{
@@ -83,6 +84,7 @@ enum {
NO_ERROR,
NO_MATCH,
VERIFICATION_FAILED,
+ LOAD_FAILED,
} ucode_error = NO_ERROR;
static uint64_t ucode_nrev, ucode_orev;
@@ -103,6 +105,9 @@ log_msg(void *arg __unused)
case VERIFICATION_FAILED:
printf("CPU microcode: microcode verification failed\n");
break;
+ case LOAD_FAILED:
+ printf("CPU microcode load failed. BIOS update advised\n");
+ break;
default:
break;
}
@@ -110,7 +115,8 @@ log_msg(void *arg __unused)
SYSINIT(ucode_log, SI_SUB_CPU, SI_ORDER_FIRST, log_msg, NULL);
int
-ucode_intel_load(const void *data, bool unsafe, uint64_t *nrevp, uint64_t *orevp)
+ucode_intel_load(const void *data, ucode_load_how how, uint64_t *nrevp,
+ uint64_t *orevp)
{
uint64_t nrev, orev;
uint32_t cpuid[4];
@@ -122,10 +128,23 @@ ucode_intel_load(const void *data, bool unsafe, uint64_t *nrevp, uint64_t *orevp
* undocumented errata applying to some Broadwell CPUs.
*/
wbinvd();
- if (unsafe)
+ switch (how) {
+ case SAFE:
wrmsr_safe(MSR_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)data);
- else
+ break;
+ case EARLY:
+#ifdef __amd64__
+ wrmsr_early_safe_start();
+ if (wrmsr_early_safe(MSR_BIOS_UPDT_TRIG,
+ (uint64_t)(uintptr_t)data) != 0)
+ ucode_error = LOAD_FAILED;
+ wrmsr_early_safe_end();
+ break;
+#endif
+ case UNSAFE:
wrmsr(MSR_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)data);
+ break;
+ }
wrmsr(MSR_BIOS_SIGN, 0);
/*
@@ -233,20 +252,31 @@ ucode_intel_match(const uint8_t *data, size_t *len)
}
int
-ucode_amd_load(const void *data, bool unsafe, uint64_t *nrevp, uint64_t *orevp)
+ucode_amd_load(const void *data, ucode_load_how how, uint64_t *nrevp,
+ uint64_t *orevp)
{
uint64_t nrev, orev;
uint32_t cpuid[4];
orev = rdmsr(MSR_BIOS_SIGN);
- /*
- * Perform update.
- */
- if (unsafe)
+ switch (how) {
+ case SAFE:
wrmsr_safe(MSR_K8_UCODE_UPDATE, (uint64_t)(uintptr_t)data);
- else
+ break;
+ case EARLY:
+#ifdef __amd64__
+ wrmsr_early_safe_start();
+ if (wrmsr_early_safe(MSR_K8_UCODE_UPDATE,
+ (uint64_t)(uintptr_t)data) != 0)
+ ucode_error = LOAD_FAILED;
+ wrmsr_early_safe_end();
+ break;
+#endif
+ case UNSAFE:
wrmsr(MSR_K8_UCODE_UPDATE, (uint64_t)(uintptr_t)data);
+ break;
+ }
/*
* Serialize instruction flow.
@@ -277,7 +307,8 @@ ucode_amd_match(const uint8_t *data, size_t *len)
signature = regs[0];
revision = rdmsr(MSR_BIOS_SIGN);
- return (ucode_amd_find("loader blob", signature, revision, data, *len, len));
+ return (ucode_amd_find("loader blob", signature, &revision, data, *len,
+ len));
}
/*
@@ -326,8 +357,8 @@ ucode_load_ap(int cpu)
return;
#endif
- if (ucode_data != NULL)
- (void)ucode_loader->load(ucode_data, false, NULL, NULL);
+ if (ucode_data != NULL && ucode_error != LOAD_FAILED)
+ (void)ucode_loader->load(ucode_data, UNSAFE, NULL, NULL);
}
static void *
@@ -414,7 +445,7 @@ ucode_load_bsp(uintptr_t free)
memcpy_early(addr, match, len);
match = addr;
- error = ucode_loader->load(match, false, &nrev, &orev);
+ error = ucode_loader->load(match, EARLY, &nrev, &orev);
if (error == 0) {
ucode_data = early_ucode_data = match;
ucode_nrev = nrev;
diff --git a/sys/x86/x86/ucode_subr.c b/sys/x86/x86/ucode_subr.c
index 9e128ad2bf04..53d7cfc06769 100644
--- a/sys/x86/x86/ucode_subr.c
+++ b/sys/x86/x86/ucode_subr.c
@@ -94,7 +94,7 @@ typedef struct container_header {
* source code.
*/
const void *
-ucode_amd_find(const char *path, uint32_t signature, uint32_t revision,
+ucode_amd_find(const char *path, uint32_t signature, uint32_t *revision,
const uint8_t *fw_data, size_t fw_size, size_t *selected_sizep)
{
const amd_10h_fw_header_t *fw_header;
@@ -112,7 +112,7 @@ ucode_amd_find(const char *path, uint32_t signature, uint32_t revision,
(signature >> 4) & 0x0f,
(signature >> 0) & 0x0f, (signature >> 20) & 0xff,
(signature >> 16) & 0x0f);
- WARNX(1, "microcode revision %#x", revision);
+ WARNX(1, "microcode revision %#x", *revision);
nextfile:
WARNX(1, "checking %s for update.", path);
@@ -212,9 +212,9 @@ nextfile:
fw_header->processor_rev_id, equiv_id);
continue; /* different cpu */
}
- if (fw_header->patch_id <= revision) {
+ if (fw_header->patch_id <= *revision) {
WARNX(1, "patch_id %x, revision %x",
- fw_header->patch_id, revision);
+ fw_header->patch_id, *revision);
continue; /* not newer revision */
}
if (fw_header->nb_dev_id != 0 || fw_header->sb_dev_id != 0) {
@@ -222,7 +222,7 @@ nextfile:
}
WARNX(3, "selecting revision: %x", fw_header->patch_id);
- revision = fw_header->patch_id;
+ *revision = fw_header->patch_id;
selected_fw = fw_header;
selected_size = section_header->size;
}
diff --git a/sys/x86/xen/xen_apic.c b/sys/x86/xen/xen_apic.c
index 994dc3e0804c..43a253cc2860 100644
--- a/sys/x86/xen/xen_apic.c
+++ b/sys/x86/xen/xen_apic.c
@@ -330,7 +330,7 @@ xen_cpu_ipi_init(int cpu)
}
static void
-xen_setup_cpus(void)
+xen_setup_cpus(void *dummy __unused)
{
uint32_t regs[4];
int i;